1 //===-- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ---------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 
10 #include "AMDKernelCodeT.h"
11 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
12 #include "MCTargetDesc/AMDGPUTargetStreamer.h"
13 #include "SIDefines.h"
14 #include "Utils/AMDGPUBaseInfo.h"
15 #include "Utils/AMDKernelCodeTUtils.h"
16 #include "llvm/ADT/APFloat.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SmallString.h"
19 #include "llvm/ADT/StringSwitch.h"
20 #include "llvm/ADT/Twine.h"
21 #include "llvm/MC/MCContext.h"
22 #include "llvm/MC/MCExpr.h"
23 #include "llvm/MC/MCInst.h"
24 #include "llvm/MC/MCInstrInfo.h"
25 #include "llvm/MC/MCParser/MCAsmLexer.h"
26 #include "llvm/MC/MCParser/MCAsmParser.h"
27 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
28 #include "llvm/MC/MCParser/MCTargetAsmParser.h"
29 #include "llvm/MC/MCRegisterInfo.h"
30 #include "llvm/MC/MCStreamer.h"
31 #include "llvm/MC/MCSubtargetInfo.h"
32 #include "llvm/MC/MCSymbolELF.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/ELF.h"
35 #include "llvm/Support/SourceMgr.h"
36 #include "llvm/Support/TargetRegistry.h"
37 #include "llvm/Support/raw_ostream.h"
38 
39 using namespace llvm;
40 
41 namespace {
42 
43 struct OptionalOperand;
44 
45 enum RegisterKind { IS_UNKNOWN, IS_VGPR, IS_SGPR, IS_TTMP, IS_SPECIAL };
46 
47 class AMDGPUOperand : public MCParsedAsmOperand {
48   enum KindTy {
49     Token,
50     Immediate,
51     Register,
52     Expression
53   } Kind;
54 
55   SMLoc StartLoc, EndLoc;
56 
57 public:
58   AMDGPUOperand(enum KindTy K) : MCParsedAsmOperand(), Kind(K) {}
59 
60   MCContext *Ctx;
61 
62   enum ImmTy {
63     ImmTyNone,
64     ImmTyDSOffset0,
65     ImmTyDSOffset1,
66     ImmTyGDS,
67     ImmTyOffset,
68     ImmTyGLC,
69     ImmTySLC,
70     ImmTyTFE,
71     ImmTyClamp,
72     ImmTyOMod,
73     ImmTyDppCtrl,
74     ImmTyDppRowMask,
75     ImmTyDppBankMask,
76     ImmTyDppBoundCtrl,
77     ImmTySdwaSel,
78     ImmTySdwaDstUnused,
79     ImmTyDMask,
80     ImmTyUNorm,
81     ImmTyDA,
82     ImmTyR128,
83     ImmTyLWE,
84     ImmTyHwreg,
85   };
86 
87   struct TokOp {
88     const char *Data;
89     unsigned Length;
90   };
91 
92   struct ImmOp {
93     bool IsFPImm;
94     ImmTy Type;
95     int64_t Val;
96     int Modifiers;
97   };
98 
99   struct RegOp {
100     unsigned RegNo;
101     int Modifiers;
102     const MCRegisterInfo *TRI;
103     const MCSubtargetInfo *STI;
104     bool IsForcedVOP3;
105   };
106 
107   union {
108     TokOp Tok;
109     ImmOp Imm;
110     RegOp Reg;
111     const MCExpr *Expr;
112   };
113 
114   void addImmOperands(MCInst &Inst, unsigned N) const {
115     Inst.addOperand(MCOperand::createImm(getImm()));
116   }
117 
118   StringRef getToken() const {
119     return StringRef(Tok.Data, Tok.Length);
120   }
121 
122   void addRegOperands(MCInst &Inst, unsigned N) const {
123     Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), *Reg.STI)));
124   }
125 
126   void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
127     if (isRegKind())
128       addRegOperands(Inst, N);
129     else
130       addImmOperands(Inst, N);
131   }
132 
133   void addRegOrImmWithInputModsOperands(MCInst &Inst, unsigned N) const {
134     if (isRegKind()) {
135       Inst.addOperand(MCOperand::createImm(Reg.Modifiers));
136       addRegOperands(Inst, N);
137     } else {
138       Inst.addOperand(MCOperand::createImm(Imm.Modifiers));
139       addImmOperands(Inst, N);
140     }
141   }
142 
143   void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
144     if (isImm())
145       addImmOperands(Inst, N);
146     else {
147       assert(isExpr());
148       Inst.addOperand(MCOperand::createExpr(Expr));
149     }
150   }
151 
152   bool defaultTokenHasSuffix() const {
153     StringRef Token(Tok.Data, Tok.Length);
154 
155     return Token.endswith("_e32") || Token.endswith("_e64") ||
156       Token.endswith("_dpp");
157   }
158 
159   bool isToken() const override {
160     return Kind == Token;
161   }
162 
163   bool isImm() const override {
164     return Kind == Immediate;
165   }
166 
167   bool isInlinableImm() const {
168     if (!isImm() || Imm.Type != AMDGPUOperand::ImmTyNone /* Only plain
169       immediates are inlinable (e.g. "clamp" attribute is not) */ )
170       return false;
171     // TODO: We should avoid using host float here. It would be better to
172     // check the float bit values which is what a few other places do.
173     // We've had bot failures before due to weird NaN support on mips hosts.
174     const float F = BitsToFloat(Imm.Val);
175     // TODO: Add 1/(2*pi) for VI
176     return (Imm.Val <= 64 && Imm.Val >= -16) ||
177            (F == 0.0 || F == 0.5 || F == -0.5 || F == 1.0 || F == -1.0 ||
178            F == 2.0 || F == -2.0 || F == 4.0 || F == -4.0);
179   }
180 
181   bool isDSOffset0() const {
182     assert(isImm());
183     return Imm.Type == ImmTyDSOffset0;
184   }
185 
186   bool isDSOffset1() const {
187     assert(isImm());
188     return Imm.Type == ImmTyDSOffset1;
189   }
190 
191   int64_t getImm() const {
192     return Imm.Val;
193   }
194 
195   enum ImmTy getImmTy() const {
196     assert(isImm());
197     return Imm.Type;
198   }
199 
200   bool isRegKind() const {
201     return Kind == Register;
202   }
203 
204   bool isReg() const override {
205     return Kind == Register && Reg.Modifiers == 0;
206   }
207 
208   bool isRegOrImmWithInputMods() const {
209     return Kind == Register || isInlinableImm();
210   }
211 
212   bool isImmTy(ImmTy ImmT) const {
213     return isImm() && Imm.Type == ImmT;
214   }
215 
216   bool isClamp() const {
217     return isImmTy(ImmTyClamp);
218   }
219 
220   bool isOMod() const {
221     return isImmTy(ImmTyOMod);
222   }
223 
224   bool isImmModifier() const {
225     return Kind == Immediate && Imm.Type != ImmTyNone;
226   }
227 
228   bool isDMask() const {
229     return isImmTy(ImmTyDMask);
230   }
231 
232   bool isUNorm() const { return isImmTy(ImmTyUNorm); }
233   bool isDA() const { return isImmTy(ImmTyDA); }
234   bool isR128() const { return isImmTy(ImmTyUNorm); }
235   bool isLWE() const { return isImmTy(ImmTyLWE); }
236 
237   bool isMod() const {
238     return isClamp() || isOMod();
239   }
240 
241   bool isGDS() const { return isImmTy(ImmTyGDS); }
242   bool isGLC() const { return isImmTy(ImmTyGLC); }
243   bool isSLC() const { return isImmTy(ImmTySLC); }
244   bool isTFE() const { return isImmTy(ImmTyTFE); }
245 
246   bool isBankMask() const {
247     return isImmTy(ImmTyDppBankMask);
248   }
249 
250   bool isRowMask() const {
251     return isImmTy(ImmTyDppRowMask);
252   }
253 
254   bool isBoundCtrl() const {
255     return isImmTy(ImmTyDppBoundCtrl);
256   }
257 
258   bool isSDWASel() const {
259     return isImmTy(ImmTySdwaSel);
260   }
261 
262   bool isSDWADstUnused() const {
263     return isImmTy(ImmTySdwaDstUnused);
264   }
265 
266   void setModifiers(unsigned Mods) {
267     assert(isReg() || (isImm() && Imm.Modifiers == 0));
268     if (isReg())
269       Reg.Modifiers = Mods;
270     else
271       Imm.Modifiers = Mods;
272   }
273 
274   bool hasModifiers() const {
275     assert(isRegKind() || isImm());
276     return isRegKind() ? Reg.Modifiers != 0 : Imm.Modifiers != 0;
277   }
278 
279   unsigned getReg() const override {
280     return Reg.RegNo;
281   }
282 
283   bool isRegOrImm() const {
284     return isReg() || isImm();
285   }
286 
287   bool isRegClass(unsigned RCID) const {
288     return isReg() && Reg.TRI->getRegClass(RCID).contains(getReg());
289   }
290 
291   bool isSCSrc32() const {
292     return isInlinableImm() || isRegClass(AMDGPU::SReg_32RegClassID);
293   }
294 
295   bool isSCSrc64() const {
296     return isInlinableImm() || isRegClass(AMDGPU::SReg_64RegClassID);
297   }
298 
299   bool isSSrc32() const {
300     return isImm() || isSCSrc32();
301   }
302 
303   bool isSSrc64() const {
304     // TODO: Find out how SALU supports extension of 32-bit literals to 64 bits.
305     // See isVSrc64().
306     return isImm() || isSCSrc64();
307   }
308 
309   bool isVCSrc32() const {
310     return isInlinableImm() || isRegClass(AMDGPU::VS_32RegClassID);
311   }
312 
313   bool isVCSrc64() const {
314     return isInlinableImm() || isRegClass(AMDGPU::VS_64RegClassID);
315   }
316 
317   bool isVSrc32() const {
318     return isImm() || isVCSrc32();
319   }
320 
321   bool isVSrc64() const {
322     // TODO: Check if the 64-bit value (coming from assembly source) can be
323     // narrowed to 32 bits (in the instruction stream). That require knowledge
324     // of instruction type (unsigned/signed, floating or "untyped"/B64),
325     // see [AMD GCN3 ISA 6.3.1].
326     // TODO: How 64-bit values are formed from 32-bit literals in _B64 insns?
327     return isImm() || isVCSrc64();
328   }
329 
330   bool isMem() const override {
331     return false;
332   }
333 
334   bool isExpr() const {
335     return Kind == Expression;
336   }
337 
338   bool isSoppBrTarget() const {
339     return isExpr() || isImm();
340   }
341 
342   SMLoc getStartLoc() const override {
343     return StartLoc;
344   }
345 
346   SMLoc getEndLoc() const override {
347     return EndLoc;
348   }
349 
350   void print(raw_ostream &OS) const override {
351     switch (Kind) {
352     case Register:
353       OS << "<register " << getReg() << " mods: " << Reg.Modifiers << '>';
354       break;
355     case Immediate:
356       if (Imm.Type != AMDGPUOperand::ImmTyNone)
357         OS << getImm();
358       else
359         OS << '<' << getImm() << " mods: " << Imm.Modifiers << '>';
360       break;
361     case Token:
362       OS << '\'' << getToken() << '\'';
363       break;
364     case Expression:
365       OS << "<expr " << *Expr << '>';
366       break;
367     }
368   }
369 
370   static std::unique_ptr<AMDGPUOperand> CreateImm(int64_t Val, SMLoc Loc,
371                                                   enum ImmTy Type = ImmTyNone,
372                                                   bool IsFPImm = false) {
373     auto Op = llvm::make_unique<AMDGPUOperand>(Immediate);
374     Op->Imm.Val = Val;
375     Op->Imm.IsFPImm = IsFPImm;
376     Op->Imm.Type = Type;
377     Op->Imm.Modifiers = 0;
378     Op->StartLoc = Loc;
379     Op->EndLoc = Loc;
380     return Op;
381   }
382 
383   static std::unique_ptr<AMDGPUOperand> CreateToken(StringRef Str, SMLoc Loc,
384                                            bool HasExplicitEncodingSize = true) {
385     auto Res = llvm::make_unique<AMDGPUOperand>(Token);
386     Res->Tok.Data = Str.data();
387     Res->Tok.Length = Str.size();
388     Res->StartLoc = Loc;
389     Res->EndLoc = Loc;
390     return Res;
391   }
392 
393   static std::unique_ptr<AMDGPUOperand> CreateReg(unsigned RegNo, SMLoc S,
394                                                   SMLoc E,
395                                                   const MCRegisterInfo *TRI,
396                                                   const MCSubtargetInfo *STI,
397                                                   bool ForceVOP3) {
398     auto Op = llvm::make_unique<AMDGPUOperand>(Register);
399     Op->Reg.RegNo = RegNo;
400     Op->Reg.TRI = TRI;
401     Op->Reg.STI = STI;
402     Op->Reg.Modifiers = 0;
403     Op->Reg.IsForcedVOP3 = ForceVOP3;
404     Op->StartLoc = S;
405     Op->EndLoc = E;
406     return Op;
407   }
408 
409   static std::unique_ptr<AMDGPUOperand> CreateExpr(const class MCExpr *Expr, SMLoc S) {
410     auto Op = llvm::make_unique<AMDGPUOperand>(Expression);
411     Op->Expr = Expr;
412     Op->StartLoc = S;
413     Op->EndLoc = S;
414     return Op;
415   }
416 
417   bool isDSOffset() const;
418   bool isDSOffset01() const;
419   bool isSWaitCnt() const;
420   bool isHwreg() const;
421   bool isMubufOffset() const;
422   bool isSMRDOffset() const;
423   bool isSMRDLiteralOffset() const;
424   bool isDPPCtrl() const;
425 };
426 
427 class AMDGPUAsmParser : public MCTargetAsmParser {
428   const MCInstrInfo &MII;
429   MCAsmParser &Parser;
430 
431   unsigned ForcedEncodingSize;
432 
433   bool isSI() const {
434     return AMDGPU::isSI(getSTI());
435   }
436 
437   bool isCI() const {
438     return AMDGPU::isCI(getSTI());
439   }
440 
441   bool isVI() const {
442     return AMDGPU::isVI(getSTI());
443   }
444 
445   bool hasSGPR102_SGPR103() const {
446     return !isVI();
447   }
448 
449   /// @name Auto-generated Match Functions
450   /// {
451 
452 #define GET_ASSEMBLER_HEADER
453 #include "AMDGPUGenAsmMatcher.inc"
454 
455   /// }
456 
457 private:
458   bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
459   bool ParseDirectiveHSACodeObjectVersion();
460   bool ParseDirectiveHSACodeObjectISA();
461   bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
462   bool ParseDirectiveAMDKernelCodeT();
463   bool ParseSectionDirectiveHSAText();
464   bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo) const;
465   bool ParseDirectiveAMDGPUHsaKernel();
466   bool ParseDirectiveAMDGPUHsaModuleGlobal();
467   bool ParseDirectiveAMDGPUHsaProgramGlobal();
468   bool ParseSectionDirectiveHSADataGlobalAgent();
469   bool ParseSectionDirectiveHSADataGlobalProgram();
470   bool ParseSectionDirectiveHSARodataReadonlyAgent();
471   bool AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth, RegisterKind RegKind, unsigned Reg1, unsigned RegNum);
472   bool ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg, unsigned& RegNum, unsigned& RegWidth);
473 
474 public:
475   enum AMDGPUMatchResultTy {
476     Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
477   };
478 
479   AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
480                const MCInstrInfo &MII,
481                const MCTargetOptions &Options)
482       : MCTargetAsmParser(Options, STI), MII(MII), Parser(_Parser),
483         ForcedEncodingSize(0) {
484     MCAsmParserExtension::Initialize(Parser);
485 
486     if (getSTI().getFeatureBits().none()) {
487       // Set default features.
488       copySTI().ToggleFeature("SOUTHERN_ISLANDS");
489     }
490 
491     setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
492   }
493 
494   AMDGPUTargetStreamer &getTargetStreamer() {
495     MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
496     return static_cast<AMDGPUTargetStreamer &>(TS);
497   }
498 
499   unsigned getForcedEncodingSize() const {
500     return ForcedEncodingSize;
501   }
502 
503   void setForcedEncodingSize(unsigned Size) {
504     ForcedEncodingSize = Size;
505   }
506 
507   bool isForcedVOP3() const {
508     return ForcedEncodingSize == 64;
509   }
510 
511   std::unique_ptr<AMDGPUOperand> parseRegister();
512   bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
513   unsigned checkTargetMatchPredicate(MCInst &Inst) override;
514   bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
515                                OperandVector &Operands, MCStreamer &Out,
516                                uint64_t &ErrorInfo,
517                                bool MatchingInlineAsm) override;
518   bool ParseDirective(AsmToken DirectiveID) override;
519   OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
520   bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
521                         SMLoc NameLoc, OperandVector &Operands) override;
522 
523   OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int,
524                                           int64_t Default = 0);
525   OperandMatchResultTy parseIntWithPrefix(const char *Prefix,
526                                           OperandVector &Operands,
527                                           enum AMDGPUOperand::ImmTy ImmTy =
528                                                       AMDGPUOperand::ImmTyNone);
529   OperandMatchResultTy parseNamedBit(const char *Name, OperandVector &Operands,
530                                      enum AMDGPUOperand::ImmTy ImmTy =
531                                                       AMDGPUOperand::ImmTyNone);
532   OperandMatchResultTy parseOptionalOps(
533                                    const ArrayRef<OptionalOperand> &OptionalOps,
534                                    OperandVector &Operands);
535   OperandMatchResultTy parseStringWithPrefix(const char *Prefix, StringRef &Value);
536 
537 
538   void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
539   void cvtDS(MCInst &Inst, const OperandVector &Operands);
540   OperandMatchResultTy parseDSOptionalOps(OperandVector &Operands);
541   OperandMatchResultTy parseDSOff01OptionalOps(OperandVector &Operands);
542   OperandMatchResultTy parseDSOffsetOptional(OperandVector &Operands);
543 
544   bool parseCnt(int64_t &IntVal);
545   OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
546   bool parseHwreg(int64_t &HwRegCode, int64_t &Offset, int64_t &Width, bool &IsIdentifier);
547   OperandMatchResultTy parseHwregOp(OperandVector &Operands);
548   OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
549 
550   OperandMatchResultTy parseFlatOptionalOps(OperandVector &Operands);
551   OperandMatchResultTy parseFlatAtomicOptionalOps(OperandVector &Operands);
552   void cvtFlat(MCInst &Inst, const OperandVector &Operands);
553   void cvtFlatAtomic(MCInst &Inst, const OperandVector &Operands);
554 
555   void cvtMubuf(MCInst &Inst, const OperandVector &Operands);
556   OperandMatchResultTy parseOffset(OperandVector &Operands);
557   OperandMatchResultTy parseMubufOptionalOps(OperandVector &Operands);
558   OperandMatchResultTy parseGLC(OperandVector &Operands);
559   OperandMatchResultTy parseSLC(OperandVector &Operands);
560   OperandMatchResultTy parseTFE(OperandVector &Operands);
561 
562   OperandMatchResultTy parseDMask(OperandVector &Operands);
563   OperandMatchResultTy parseUNorm(OperandVector &Operands);
564   OperandMatchResultTy parseDA(OperandVector &Operands);
565   OperandMatchResultTy parseR128(OperandVector &Operands);
566   OperandMatchResultTy parseLWE(OperandVector &Operands);
567 
568   void cvtId(MCInst &Inst, const OperandVector &Operands);
569   void cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands);
570   void cvtVOP3_2_nomod(MCInst &Inst, const OperandVector &Operands);
571   void cvtVOP3_only(MCInst &Inst, const OperandVector &Operands);
572   void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
573 
574   void cvtMIMG(MCInst &Inst, const OperandVector &Operands);
575   void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
576   OperandMatchResultTy parseVOP3OptionalOps(OperandVector &Operands);
577 
578   OperandMatchResultTy parseDPPCtrlOps(OperandVector &Operands);
579   OperandMatchResultTy parseDPPOptionalOps(OperandVector &Operands);
580   void cvtDPP_mod(MCInst &Inst, const OperandVector &Operands);
581   void cvtDPP_nomod(MCInst &Inst, const OperandVector &Operands);
582   void cvtDPP(MCInst &Inst, const OperandVector &Operands, bool HasMods);
583 
584   OperandMatchResultTy parseSDWASel(OperandVector &Operands);
585   OperandMatchResultTy parseSDWADstUnused(OperandVector &Operands);
586 };
587 
588 struct OptionalOperand {
589   const char *Name;
590   AMDGPUOperand::ImmTy Type;
591   bool IsBit;
592   int64_t Default;
593   bool (*ConvertResult)(int64_t&);
594 };
595 
596 }
597 
598 static int getRegClass(RegisterKind Is, unsigned RegWidth) {
599   if (Is == IS_VGPR) {
600     switch (RegWidth) {
601       default: return -1;
602       case 1: return AMDGPU::VGPR_32RegClassID;
603       case 2: return AMDGPU::VReg_64RegClassID;
604       case 3: return AMDGPU::VReg_96RegClassID;
605       case 4: return AMDGPU::VReg_128RegClassID;
606       case 8: return AMDGPU::VReg_256RegClassID;
607       case 16: return AMDGPU::VReg_512RegClassID;
608     }
609   } else if (Is == IS_TTMP) {
610     switch (RegWidth) {
611       default: return -1;
612       case 1: return AMDGPU::TTMP_32RegClassID;
613       case 2: return AMDGPU::TTMP_64RegClassID;
614     }
615   } else if (Is == IS_SGPR) {
616     switch (RegWidth) {
617       default: return -1;
618       case 1: return AMDGPU::SGPR_32RegClassID;
619       case 2: return AMDGPU::SGPR_64RegClassID;
620       case 4: return AMDGPU::SReg_128RegClassID;
621       case 8: return AMDGPU::SReg_256RegClassID;
622       case 16: return AMDGPU::SReg_512RegClassID;
623     }
624   }
625   return -1;
626 }
627 
628 static unsigned getSpecialRegForName(StringRef RegName) {
629   return StringSwitch<unsigned>(RegName)
630     .Case("exec", AMDGPU::EXEC)
631     .Case("vcc", AMDGPU::VCC)
632     .Case("flat_scratch", AMDGPU::FLAT_SCR)
633     .Case("m0", AMDGPU::M0)
634     .Case("scc", AMDGPU::SCC)
635     .Case("tba", AMDGPU::TBA)
636     .Case("tma", AMDGPU::TMA)
637     .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
638     .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
639     .Case("vcc_lo", AMDGPU::VCC_LO)
640     .Case("vcc_hi", AMDGPU::VCC_HI)
641     .Case("exec_lo", AMDGPU::EXEC_LO)
642     .Case("exec_hi", AMDGPU::EXEC_HI)
643     .Case("tma_lo", AMDGPU::TMA_LO)
644     .Case("tma_hi", AMDGPU::TMA_HI)
645     .Case("tba_lo", AMDGPU::TBA_LO)
646     .Case("tba_hi", AMDGPU::TBA_HI)
647     .Default(0);
648 }
649 
650 bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) {
651   auto R = parseRegister();
652   if (!R) return true;
653   assert(R->isReg());
654   RegNo = R->getReg();
655   StartLoc = R->getStartLoc();
656   EndLoc = R->getEndLoc();
657   return false;
658 }
659 
660 bool AMDGPUAsmParser::AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth, RegisterKind RegKind, unsigned Reg1, unsigned RegNum)
661 {
662   switch (RegKind) {
663   case IS_SPECIAL:
664     if (Reg == AMDGPU::EXEC_LO && Reg1 == AMDGPU::EXEC_HI) { Reg = AMDGPU::EXEC; RegWidth = 2; return true; }
665     if (Reg == AMDGPU::FLAT_SCR_LO && Reg1 == AMDGPU::FLAT_SCR_HI) { Reg = AMDGPU::FLAT_SCR; RegWidth = 2; return true; }
666     if (Reg == AMDGPU::VCC_LO && Reg1 == AMDGPU::VCC_HI) { Reg = AMDGPU::VCC; RegWidth = 2; return true; }
667     if (Reg == AMDGPU::TBA_LO && Reg1 == AMDGPU::TBA_HI) { Reg = AMDGPU::TBA; RegWidth = 2; return true; }
668     if (Reg == AMDGPU::TMA_LO && Reg1 == AMDGPU::TMA_HI) { Reg = AMDGPU::TMA; RegWidth = 2; return true; }
669     return false;
670   case IS_VGPR:
671   case IS_SGPR:
672   case IS_TTMP:
673     if (Reg1 != Reg + RegWidth) { return false; }
674     RegWidth++;
675     return true;
676   default:
677     assert(false); return false;
678   }
679 }
680 
681 bool AMDGPUAsmParser::ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg, unsigned& RegNum, unsigned& RegWidth)
682 {
683   const MCRegisterInfo *TRI = getContext().getRegisterInfo();
684   if (getLexer().is(AsmToken::Identifier)) {
685     StringRef RegName = Parser.getTok().getString();
686     if ((Reg = getSpecialRegForName(RegName))) {
687       Parser.Lex();
688       RegKind = IS_SPECIAL;
689     } else {
690       unsigned RegNumIndex = 0;
691       if (RegName[0] == 'v') { RegNumIndex = 1; RegKind = IS_VGPR; }
692       else if (RegName[0] == 's') { RegNumIndex = 1; RegKind = IS_SGPR; }
693       else if (RegName.startswith("ttmp")) { RegNumIndex = strlen("ttmp"); RegKind = IS_TTMP; }
694       else { return false; }
695       if (RegName.size() > RegNumIndex) {
696         // Single 32-bit register: vXX.
697         if (RegName.substr(RegNumIndex).getAsInteger(10, RegNum)) { return false; }
698         Parser.Lex();
699         RegWidth = 1;
700       } else {
701         // Range of registers: v[XX:YY].
702         Parser.Lex();
703         int64_t RegLo, RegHi;
704         if (getLexer().isNot(AsmToken::LBrac)) { return false; }
705         Parser.Lex();
706 
707         if (getParser().parseAbsoluteExpression(RegLo)) { return false; }
708 
709         if (getLexer().isNot(AsmToken::Colon)) { return false; }
710         Parser.Lex();
711 
712         if (getParser().parseAbsoluteExpression(RegHi)) { return false; }
713 
714         if (getLexer().isNot(AsmToken::RBrac)) { return false; }
715         Parser.Lex();
716 
717         RegNum = (unsigned) RegLo;
718         RegWidth = (RegHi - RegLo) + 1;
719       }
720     }
721   } else if (getLexer().is(AsmToken::LBrac)) {
722     // List of consecutive registers: [s0,s1,s2,s3]
723     Parser.Lex();
724     if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth)) { return false; }
725     if (RegWidth != 1) { return false; }
726     RegisterKind RegKind1;
727     unsigned Reg1, RegNum1, RegWidth1;
728     do {
729       if (getLexer().is(AsmToken::Comma)) {
730         Parser.Lex();
731       } else if (getLexer().is(AsmToken::RBrac)) {
732         Parser.Lex();
733         break;
734       } else if (ParseAMDGPURegister(RegKind1, Reg1, RegNum1, RegWidth1)) {
735         if (RegWidth1 != 1) { return false; }
736         if (RegKind1 != RegKind) { return false; }
737         if (!AddNextRegisterToList(Reg, RegWidth, RegKind1, Reg1, RegNum1)) { return false; }
738       } else {
739         return false;
740       }
741     } while (true);
742   } else {
743     return false;
744   }
745   switch (RegKind) {
746   case IS_SPECIAL:
747     RegNum = 0;
748     RegWidth = 1;
749     break;
750   case IS_VGPR:
751   case IS_SGPR:
752   case IS_TTMP:
753   {
754     unsigned Size = 1;
755     if (RegKind == IS_SGPR || RegKind == IS_TTMP) {
756       // SGPR and TTMP registers must be are aligned. Max required alignment is 4 dwords.
757       Size = std::min(RegWidth, 4u);
758     }
759     if (RegNum % Size != 0) { return false; }
760     RegNum = RegNum / Size;
761     int RCID = getRegClass(RegKind, RegWidth);
762     if (RCID == -1) { return false; }
763     const MCRegisterClass RC = TRI->getRegClass(RCID);
764     if (RegNum >= RC.getNumRegs()) { return false; }
765     Reg = RC.getRegister(RegNum);
766     break;
767   }
768 
769   default:
770     assert(false); return false;
771   }
772 
773   if (!subtargetHasRegister(*TRI, Reg)) { return false; }
774   return true;
775 }
776 
777 std::unique_ptr<AMDGPUOperand> AMDGPUAsmParser::parseRegister() {
778   const auto &Tok = Parser.getTok();
779   SMLoc StartLoc = Tok.getLoc();
780   SMLoc EndLoc = Tok.getEndLoc();
781   const MCRegisterInfo *TRI = getContext().getRegisterInfo();
782 
783   RegisterKind RegKind;
784   unsigned Reg, RegNum, RegWidth;
785 
786   if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth)) {
787     return nullptr;
788   }
789   return AMDGPUOperand::CreateReg(Reg, StartLoc, EndLoc,
790                                   TRI, &getSTI(), false);
791 }
792 
793 unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
794 
795   uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
796 
797   if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
798       (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)))
799     return Match_InvalidOperand;
800 
801   if ((TSFlags & SIInstrFlags::VOP3) &&
802       (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
803       getForcedEncodingSize() != 64)
804     return Match_PreferE32;
805 
806   return Match_Success;
807 }
808 
809 
810 bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
811                                               OperandVector &Operands,
812                                               MCStreamer &Out,
813                                               uint64_t &ErrorInfo,
814                                               bool MatchingInlineAsm) {
815   MCInst Inst;
816 
817   switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) {
818     default: break;
819     case Match_Success:
820       Inst.setLoc(IDLoc);
821       Out.EmitInstruction(Inst, getSTI());
822       return false;
823     case Match_MissingFeature:
824       return Error(IDLoc, "instruction not supported on this GPU");
825 
826     case Match_MnemonicFail:
827       return Error(IDLoc, "unrecognized instruction mnemonic");
828 
829     case Match_InvalidOperand: {
830       SMLoc ErrorLoc = IDLoc;
831       if (ErrorInfo != ~0ULL) {
832         if (ErrorInfo >= Operands.size()) {
833           return Error(IDLoc, "too few operands for instruction");
834         }
835         ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
836         if (ErrorLoc == SMLoc())
837           ErrorLoc = IDLoc;
838       }
839       return Error(ErrorLoc, "invalid operand for instruction");
840     }
841     case Match_PreferE32:
842       return Error(IDLoc, "internal error: instruction without _e64 suffix "
843                           "should be encoded as e32");
844   }
845   llvm_unreachable("Implement any new match types added!");
846 }
847 
848 bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
849                                                uint32_t &Minor) {
850   if (getLexer().isNot(AsmToken::Integer))
851     return TokError("invalid major version");
852 
853   Major = getLexer().getTok().getIntVal();
854   Lex();
855 
856   if (getLexer().isNot(AsmToken::Comma))
857     return TokError("minor version number required, comma expected");
858   Lex();
859 
860   if (getLexer().isNot(AsmToken::Integer))
861     return TokError("invalid minor version");
862 
863   Minor = getLexer().getTok().getIntVal();
864   Lex();
865 
866   return false;
867 }
868 
869 bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
870 
871   uint32_t Major;
872   uint32_t Minor;
873 
874   if (ParseDirectiveMajorMinor(Major, Minor))
875     return true;
876 
877   getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
878   return false;
879 }
880 
881 bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
882 
883   uint32_t Major;
884   uint32_t Minor;
885   uint32_t Stepping;
886   StringRef VendorName;
887   StringRef ArchName;
888 
889   // If this directive has no arguments, then use the ISA version for the
890   // targeted GPU.
891   if (getLexer().is(AsmToken::EndOfStatement)) {
892     AMDGPU::IsaVersion Isa = AMDGPU::getIsaVersion(getSTI().getFeatureBits());
893     getTargetStreamer().EmitDirectiveHSACodeObjectISA(Isa.Major, Isa.Minor,
894                                                       Isa.Stepping,
895                                                       "AMD", "AMDGPU");
896     return false;
897   }
898 
899 
900   if (ParseDirectiveMajorMinor(Major, Minor))
901     return true;
902 
903   if (getLexer().isNot(AsmToken::Comma))
904     return TokError("stepping version number required, comma expected");
905   Lex();
906 
907   if (getLexer().isNot(AsmToken::Integer))
908     return TokError("invalid stepping version");
909 
910   Stepping = getLexer().getTok().getIntVal();
911   Lex();
912 
913   if (getLexer().isNot(AsmToken::Comma))
914     return TokError("vendor name required, comma expected");
915   Lex();
916 
917   if (getLexer().isNot(AsmToken::String))
918     return TokError("invalid vendor name");
919 
920   VendorName = getLexer().getTok().getStringContents();
921   Lex();
922 
923   if (getLexer().isNot(AsmToken::Comma))
924     return TokError("arch name required, comma expected");
925   Lex();
926 
927   if (getLexer().isNot(AsmToken::String))
928     return TokError("invalid arch name");
929 
930   ArchName = getLexer().getTok().getStringContents();
931   Lex();
932 
933   getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
934                                                     VendorName, ArchName);
935   return false;
936 }
937 
938 bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
939                                                amd_kernel_code_t &Header) {
940   SmallString<40> ErrStr;
941   raw_svector_ostream Err(ErrStr);
942   if (!parseAmdKernelCodeField(ID, getLexer(), Header, Err)) {
943     return TokError(Err.str());
944   }
945   Lex();
946   return false;
947 }
948 
949 bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
950 
951   amd_kernel_code_t Header;
952   AMDGPU::initDefaultAMDKernelCodeT(Header, getSTI().getFeatureBits());
953 
954   while (true) {
955 
956     if (getLexer().isNot(AsmToken::EndOfStatement))
957       return TokError("amd_kernel_code_t values must begin on a new line");
958 
959     // Lex EndOfStatement.  This is in a while loop, because lexing a comment
960     // will set the current token to EndOfStatement.
961     while(getLexer().is(AsmToken::EndOfStatement))
962       Lex();
963 
964     if (getLexer().isNot(AsmToken::Identifier))
965       return TokError("expected value identifier or .end_amd_kernel_code_t");
966 
967     StringRef ID = getLexer().getTok().getIdentifier();
968     Lex();
969 
970     if (ID == ".end_amd_kernel_code_t")
971       break;
972 
973     if (ParseAMDKernelCodeTValue(ID, Header))
974       return true;
975   }
976 
977   getTargetStreamer().EmitAMDKernelCodeT(Header);
978 
979   return false;
980 }
981 
982 bool AMDGPUAsmParser::ParseSectionDirectiveHSAText() {
983   getParser().getStreamer().SwitchSection(
984       AMDGPU::getHSATextSection(getContext()));
985   return false;
986 }
987 
988 bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
989   if (getLexer().isNot(AsmToken::Identifier))
990     return TokError("expected symbol name");
991 
992   StringRef KernelName = Parser.getTok().getString();
993 
994   getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
995                                            ELF::STT_AMDGPU_HSA_KERNEL);
996   Lex();
997   return false;
998 }
999 
1000 bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaModuleGlobal() {
1001   if (getLexer().isNot(AsmToken::Identifier))
1002     return TokError("expected symbol name");
1003 
1004   StringRef GlobalName = Parser.getTok().getIdentifier();
1005 
1006   getTargetStreamer().EmitAMDGPUHsaModuleScopeGlobal(GlobalName);
1007   Lex();
1008   return false;
1009 }
1010 
1011 bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaProgramGlobal() {
1012   if (getLexer().isNot(AsmToken::Identifier))
1013     return TokError("expected symbol name");
1014 
1015   StringRef GlobalName = Parser.getTok().getIdentifier();
1016 
1017   getTargetStreamer().EmitAMDGPUHsaProgramScopeGlobal(GlobalName);
1018   Lex();
1019   return false;
1020 }
1021 
1022 bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalAgent() {
1023   getParser().getStreamer().SwitchSection(
1024       AMDGPU::getHSADataGlobalAgentSection(getContext()));
1025   return false;
1026 }
1027 
1028 bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalProgram() {
1029   getParser().getStreamer().SwitchSection(
1030       AMDGPU::getHSADataGlobalProgramSection(getContext()));
1031   return false;
1032 }
1033 
1034 bool AMDGPUAsmParser::ParseSectionDirectiveHSARodataReadonlyAgent() {
1035   getParser().getStreamer().SwitchSection(
1036       AMDGPU::getHSARodataReadonlyAgentSection(getContext()));
1037   return false;
1038 }
1039 
1040 bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
1041   StringRef IDVal = DirectiveID.getString();
1042 
1043   if (IDVal == ".hsa_code_object_version")
1044     return ParseDirectiveHSACodeObjectVersion();
1045 
1046   if (IDVal == ".hsa_code_object_isa")
1047     return ParseDirectiveHSACodeObjectISA();
1048 
1049   if (IDVal == ".amd_kernel_code_t")
1050     return ParseDirectiveAMDKernelCodeT();
1051 
1052   if (IDVal == ".hsatext" || IDVal == ".text")
1053     return ParseSectionDirectiveHSAText();
1054 
1055   if (IDVal == ".amdgpu_hsa_kernel")
1056     return ParseDirectiveAMDGPUHsaKernel();
1057 
1058   if (IDVal == ".amdgpu_hsa_module_global")
1059     return ParseDirectiveAMDGPUHsaModuleGlobal();
1060 
1061   if (IDVal == ".amdgpu_hsa_program_global")
1062     return ParseDirectiveAMDGPUHsaProgramGlobal();
1063 
1064   if (IDVal == ".hsadata_global_agent")
1065     return ParseSectionDirectiveHSADataGlobalAgent();
1066 
1067   if (IDVal == ".hsadata_global_program")
1068     return ParseSectionDirectiveHSADataGlobalProgram();
1069 
1070   if (IDVal == ".hsarodata_readonly_agent")
1071     return ParseSectionDirectiveHSARodataReadonlyAgent();
1072 
1073   return true;
1074 }
1075 
1076 bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
1077                                            unsigned RegNo) const {
1078   if (isCI())
1079     return true;
1080 
1081   if (isSI()) {
1082     // No flat_scr
1083     switch (RegNo) {
1084     case AMDGPU::FLAT_SCR:
1085     case AMDGPU::FLAT_SCR_LO:
1086     case AMDGPU::FLAT_SCR_HI:
1087       return false;
1088     default:
1089       return true;
1090     }
1091   }
1092 
1093   // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
1094   // SI/CI have.
1095   for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
1096        R.isValid(); ++R) {
1097     if (*R == RegNo)
1098       return false;
1099   }
1100 
1101   return true;
1102 }
1103 
1104 static bool operandsHaveModifiers(const OperandVector &Operands) {
1105 
1106   for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
1107     const AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
1108     if (Op.isRegKind() && Op.hasModifiers())
1109       return true;
1110     if (Op.isImm() && Op.hasModifiers())
1111       return true;
1112     if (Op.isImm() && (Op.getImmTy() == AMDGPUOperand::ImmTyOMod ||
1113                        Op.getImmTy() == AMDGPUOperand::ImmTyClamp))
1114       return true;
1115   }
1116   return false;
1117 }
1118 
1119 AMDGPUAsmParser::OperandMatchResultTy
1120 AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
1121 
1122   // Try to parse with a custom parser
1123   OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
1124 
1125   // If we successfully parsed the operand or if there as an error parsing,
1126   // we are done.
1127   //
1128   // If we are parsing after we reach EndOfStatement then this means we
1129   // are appending default values to the Operands list.  This is only done
1130   // by custom parser, so we shouldn't continue on to the generic parsing.
1131   if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail||
1132       getLexer().is(AsmToken::EndOfStatement))
1133     return ResTy;
1134 
1135   bool Negate = false, Abs = false, Abs2 = false;
1136 
1137   if (getLexer().getKind()== AsmToken::Minus) {
1138     Parser.Lex();
1139     Negate = true;
1140   }
1141 
1142   if (getLexer().getKind() == AsmToken::Identifier && Parser.getTok().getString() == "abs") {
1143     Parser.Lex();
1144     Abs2 = true;
1145     if (getLexer().isNot(AsmToken::LParen)) {
1146       Error(Parser.getTok().getLoc(), "expected left paren after abs");
1147       return MatchOperand_ParseFail;
1148     }
1149     Parser.Lex();
1150   }
1151 
1152   if (getLexer().getKind() == AsmToken::Pipe) {
1153     Parser.Lex();
1154     Abs = true;
1155   }
1156 
1157   switch(getLexer().getKind()) {
1158     case AsmToken::Integer: {
1159       SMLoc S = Parser.getTok().getLoc();
1160       int64_t IntVal;
1161       if (getParser().parseAbsoluteExpression(IntVal))
1162         return MatchOperand_ParseFail;
1163       if (!isInt<32>(IntVal) && !isUInt<32>(IntVal)) {
1164         Error(S, "invalid immediate: only 32-bit values are legal");
1165         return MatchOperand_ParseFail;
1166       }
1167 
1168       if (Negate)
1169         IntVal *= -1;
1170       Operands.push_back(AMDGPUOperand::CreateImm(IntVal, S));
1171       return MatchOperand_Success;
1172     }
1173     case AsmToken::Real: {
1174       // FIXME: We should emit an error if a double precisions floating-point
1175       // value is used.  I'm not sure the best way to detect this.
1176       SMLoc S = Parser.getTok().getLoc();
1177       int64_t IntVal;
1178       if (getParser().parseAbsoluteExpression(IntVal))
1179         return MatchOperand_ParseFail;
1180 
1181       APFloat F((float)BitsToDouble(IntVal));
1182       if (Negate)
1183         F.changeSign();
1184       Operands.push_back(
1185           AMDGPUOperand::CreateImm(F.bitcastToAPInt().getZExtValue(), S));
1186       return MatchOperand_Success;
1187     }
1188     case AsmToken::LBrac:
1189     case AsmToken::Identifier: {
1190       if (auto R = parseRegister()) {
1191         unsigned Modifiers = 0;
1192 
1193         if (Negate)
1194           Modifiers |= 0x1;
1195 
1196         if (Abs) {
1197           if (getLexer().getKind() != AsmToken::Pipe)
1198             return MatchOperand_ParseFail;
1199           Parser.Lex();
1200           Modifiers |= 0x2;
1201         }
1202         if (Abs2) {
1203           if (getLexer().isNot(AsmToken::RParen)) {
1204             return MatchOperand_ParseFail;
1205           }
1206           Parser.Lex();
1207           Modifiers |= 0x2;
1208         }
1209         assert(R->isReg());
1210         R->Reg.IsForcedVOP3 = isForcedVOP3();
1211         if (Modifiers) {
1212           R->setModifiers(Modifiers);
1213         }
1214         Operands.push_back(std::move(R));
1215       } else {
1216         ResTy = parseVOP3OptionalOps(Operands);
1217         if (ResTy == MatchOperand_NoMatch) {
1218           const auto &Tok = Parser.getTok();
1219           Operands.push_back(AMDGPUOperand::CreateToken(Tok.getString(),
1220                                                         Tok.getLoc()));
1221           Parser.Lex();
1222         }
1223       }
1224       return MatchOperand_Success;
1225     }
1226     default:
1227       return MatchOperand_NoMatch;
1228   }
1229 }
1230 
1231 bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
1232                                        StringRef Name,
1233                                        SMLoc NameLoc, OperandVector &Operands) {
1234 
1235   // Clear any forced encodings from the previous instruction.
1236   setForcedEncodingSize(0);
1237 
1238   if (Name.endswith("_e64"))
1239     setForcedEncodingSize(64);
1240   else if (Name.endswith("_e32"))
1241     setForcedEncodingSize(32);
1242 
1243   // Add the instruction mnemonic
1244   Operands.push_back(AMDGPUOperand::CreateToken(Name, NameLoc));
1245 
1246   while (!getLexer().is(AsmToken::EndOfStatement)) {
1247     AMDGPUAsmParser::OperandMatchResultTy Res = parseOperand(Operands, Name);
1248 
1249     // Eat the comma or space if there is one.
1250     if (getLexer().is(AsmToken::Comma))
1251       Parser.Lex();
1252 
1253     switch (Res) {
1254       case MatchOperand_Success: break;
1255       case MatchOperand_ParseFail: return Error(getLexer().getLoc(),
1256                                                 "failed parsing operand.");
1257       case MatchOperand_NoMatch: return Error(getLexer().getLoc(),
1258                                               "not a valid operand.");
1259     }
1260   }
1261 
1262   return false;
1263 }
1264 
1265 //===----------------------------------------------------------------------===//
1266 // Utility functions
1267 //===----------------------------------------------------------------------===//
1268 
1269 AMDGPUAsmParser::OperandMatchResultTy
1270 AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int,
1271                                     int64_t Default) {
1272   // We are at the end of the statement, and this is a default argument, so
1273   // use a default value.
1274   if (getLexer().is(AsmToken::EndOfStatement)) {
1275     Int = Default;
1276     return MatchOperand_Success;
1277   }
1278 
1279   switch(getLexer().getKind()) {
1280     default: return MatchOperand_NoMatch;
1281     case AsmToken::Identifier: {
1282       StringRef OffsetName = Parser.getTok().getString();
1283       if (!OffsetName.equals(Prefix))
1284         return MatchOperand_NoMatch;
1285 
1286       Parser.Lex();
1287       if (getLexer().isNot(AsmToken::Colon))
1288         return MatchOperand_ParseFail;
1289 
1290       Parser.Lex();
1291       if (getLexer().isNot(AsmToken::Integer))
1292         return MatchOperand_ParseFail;
1293 
1294       if (getParser().parseAbsoluteExpression(Int))
1295         return MatchOperand_ParseFail;
1296       break;
1297     }
1298   }
1299   return MatchOperand_Success;
1300 }
1301 
1302 AMDGPUAsmParser::OperandMatchResultTy
1303 AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
1304                                     enum AMDGPUOperand::ImmTy ImmTy) {
1305 
1306   SMLoc S = Parser.getTok().getLoc();
1307   int64_t Offset = 0;
1308 
1309   AMDGPUAsmParser::OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Offset);
1310   if (Res != MatchOperand_Success)
1311     return Res;
1312 
1313   Operands.push_back(AMDGPUOperand::CreateImm(Offset, S, ImmTy));
1314   return MatchOperand_Success;
1315 }
1316 
1317 AMDGPUAsmParser::OperandMatchResultTy
1318 AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
1319                                enum AMDGPUOperand::ImmTy ImmTy) {
1320   int64_t Bit = 0;
1321   SMLoc S = Parser.getTok().getLoc();
1322 
1323   // We are at the end of the statement, and this is a default argument, so
1324   // use a default value.
1325   if (getLexer().isNot(AsmToken::EndOfStatement)) {
1326     switch(getLexer().getKind()) {
1327       case AsmToken::Identifier: {
1328         StringRef Tok = Parser.getTok().getString();
1329         if (Tok == Name) {
1330           Bit = 1;
1331           Parser.Lex();
1332         } else if (Tok.startswith("no") && Tok.endswith(Name)) {
1333           Bit = 0;
1334           Parser.Lex();
1335         } else {
1336           return MatchOperand_NoMatch;
1337         }
1338         break;
1339       }
1340       default:
1341         return MatchOperand_NoMatch;
1342     }
1343   }
1344 
1345   Operands.push_back(AMDGPUOperand::CreateImm(Bit, S, ImmTy));
1346   return MatchOperand_Success;
1347 }
1348 
1349 typedef std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalImmIndexMap;
1350 
1351 void addOptionalImmOperand(MCInst& Inst, const OperandVector& Operands,
1352                            OptionalImmIndexMap& OptionalIdx,
1353                            enum AMDGPUOperand::ImmTy ImmT, int64_t Default = 0) {
1354   auto i = OptionalIdx.find(ImmT);
1355   if (i != OptionalIdx.end()) {
1356     unsigned Idx = i->second;
1357     ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
1358   } else {
1359     Inst.addOperand(MCOperand::createImm(Default));
1360   }
1361 }
1362 
1363 static bool operandsHasOptionalOp(const OperandVector &Operands,
1364                                   const OptionalOperand &OOp) {
1365   for (unsigned i = 0; i < Operands.size(); i++) {
1366     const AMDGPUOperand &ParsedOp = ((const AMDGPUOperand &)*Operands[i]);
1367     if ((ParsedOp.isImm() && ParsedOp.getImmTy() == OOp.Type) ||
1368         (ParsedOp.isToken() && ParsedOp.getToken() == OOp.Name))
1369       return true;
1370 
1371   }
1372   return false;
1373 }
1374 
1375 AMDGPUAsmParser::OperandMatchResultTy
1376 AMDGPUAsmParser::parseOptionalOps(const ArrayRef<OptionalOperand> &OptionalOps,
1377                                    OperandVector &Operands) {
1378   SMLoc S = Parser.getTok().getLoc();
1379   for (const OptionalOperand &Op : OptionalOps) {
1380     if (operandsHasOptionalOp(Operands, Op))
1381       continue;
1382     AMDGPUAsmParser::OperandMatchResultTy Res;
1383     int64_t Value;
1384     if (Op.IsBit) {
1385       Res = parseNamedBit(Op.Name, Operands, Op.Type);
1386       if (Res == MatchOperand_NoMatch)
1387         continue;
1388       return Res;
1389     }
1390 
1391     Res = parseIntWithPrefix(Op.Name, Value, Op.Default);
1392 
1393     if (Res == MatchOperand_NoMatch)
1394       continue;
1395 
1396     if (Res != MatchOperand_Success)
1397       return Res;
1398 
1399     bool DefaultValue = (Value == Op.Default);
1400 
1401     if (Op.ConvertResult && !Op.ConvertResult(Value)) {
1402       return MatchOperand_ParseFail;
1403     }
1404 
1405     if (!DefaultValue) {
1406       Operands.push_back(AMDGPUOperand::CreateImm(Value, S, Op.Type));
1407     }
1408     return MatchOperand_Success;
1409   }
1410   return MatchOperand_NoMatch;
1411 }
1412 
1413 AMDGPUAsmParser::OperandMatchResultTy
1414 AMDGPUAsmParser::parseStringWithPrefix(const char *Prefix, StringRef &Value) {
1415   if (getLexer().isNot(AsmToken::Identifier)) {
1416     return MatchOperand_NoMatch;
1417   }
1418   StringRef Tok = Parser.getTok().getString();
1419   if (Tok != Prefix) {
1420     return MatchOperand_NoMatch;
1421   }
1422 
1423   Parser.Lex();
1424   if (getLexer().isNot(AsmToken::Colon)) {
1425     return MatchOperand_ParseFail;
1426   }
1427 
1428   Parser.Lex();
1429   if (getLexer().isNot(AsmToken::Identifier)) {
1430     return MatchOperand_ParseFail;
1431   }
1432 
1433   Value = Parser.getTok().getString();
1434   return MatchOperand_Success;
1435 }
1436 
1437 //===----------------------------------------------------------------------===//
1438 // ds
1439 //===----------------------------------------------------------------------===//
1440 
1441 static const OptionalOperand DSOptionalOps [] = {
1442   {"offset",  AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
1443   {"gds",     AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
1444 };
1445 
1446 static const OptionalOperand DSOptionalOpsOff01 [] = {
1447   {"offset0", AMDGPUOperand::ImmTyDSOffset0, false, 0, nullptr},
1448   {"offset1", AMDGPUOperand::ImmTyDSOffset1, false, 0, nullptr},
1449   {"gds",     AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
1450 };
1451 
1452 AMDGPUAsmParser::OperandMatchResultTy
1453 AMDGPUAsmParser::parseDSOptionalOps(OperandVector &Operands) {
1454   return parseOptionalOps(DSOptionalOps, Operands);
1455 }
1456 AMDGPUAsmParser::OperandMatchResultTy
1457 AMDGPUAsmParser::parseDSOff01OptionalOps(OperandVector &Operands) {
1458   return parseOptionalOps(DSOptionalOpsOff01, Operands);
1459 }
1460 
1461 AMDGPUAsmParser::OperandMatchResultTy
1462 AMDGPUAsmParser::parseDSOffsetOptional(OperandVector &Operands) {
1463   SMLoc S = Parser.getTok().getLoc();
1464   AMDGPUAsmParser::OperandMatchResultTy Res =
1465     parseIntWithPrefix("offset", Operands, AMDGPUOperand::ImmTyOffset);
1466   if (Res == MatchOperand_NoMatch) {
1467     Operands.push_back(AMDGPUOperand::CreateImm(0, S,
1468                        AMDGPUOperand::ImmTyOffset));
1469     Res = MatchOperand_Success;
1470   }
1471   return Res;
1472 }
1473 
1474 bool AMDGPUOperand::isDSOffset() const {
1475   return isImm() && isUInt<16>(getImm());
1476 }
1477 
1478 bool AMDGPUOperand::isDSOffset01() const {
1479   return isImm() && isUInt<8>(getImm());
1480 }
1481 
1482 void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
1483                                     const OperandVector &Operands) {
1484 
1485   OptionalImmIndexMap OptionalIdx;
1486 
1487   for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1488     AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1489 
1490     // Add the register arguments
1491     if (Op.isReg()) {
1492       Op.addRegOperands(Inst, 1);
1493       continue;
1494     }
1495 
1496     // Handle optional arguments
1497     OptionalIdx[Op.getImmTy()] = i;
1498   }
1499 
1500   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDSOffset0);
1501   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDSOffset1);
1502   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
1503 
1504   Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1505 }
1506 
1507 void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) {
1508 
1509   std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1510   bool GDSOnly = false;
1511 
1512   for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1513     AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1514 
1515     // Add the register arguments
1516     if (Op.isReg()) {
1517       Op.addRegOperands(Inst, 1);
1518       continue;
1519     }
1520 
1521     if (Op.isToken() && Op.getToken() == "gds") {
1522       GDSOnly = true;
1523       continue;
1524     }
1525 
1526     // Handle optional arguments
1527     OptionalIdx[Op.getImmTy()] = i;
1528   }
1529 
1530   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
1531   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
1532 
1533   if (!GDSOnly) {
1534     addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
1535   }
1536   Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1537 }
1538 
1539 
1540 //===----------------------------------------------------------------------===//
1541 // s_waitcnt
1542 //===----------------------------------------------------------------------===//
1543 
1544 bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
1545   StringRef CntName = Parser.getTok().getString();
1546   int64_t CntVal;
1547 
1548   Parser.Lex();
1549   if (getLexer().isNot(AsmToken::LParen))
1550     return true;
1551 
1552   Parser.Lex();
1553   if (getLexer().isNot(AsmToken::Integer))
1554     return true;
1555 
1556   if (getParser().parseAbsoluteExpression(CntVal))
1557     return true;
1558 
1559   if (getLexer().isNot(AsmToken::RParen))
1560     return true;
1561 
1562   Parser.Lex();
1563   if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma))
1564     Parser.Lex();
1565 
1566   int CntShift;
1567   int CntMask;
1568 
1569   if (CntName == "vmcnt") {
1570     CntMask = 0xf;
1571     CntShift = 0;
1572   } else if (CntName == "expcnt") {
1573     CntMask = 0x7;
1574     CntShift = 4;
1575   } else if (CntName == "lgkmcnt") {
1576     CntMask = 0xf;
1577     CntShift = 8;
1578   } else {
1579     return true;
1580   }
1581 
1582   IntVal &= ~(CntMask << CntShift);
1583   IntVal |= (CntVal << CntShift);
1584   return false;
1585 }
1586 
1587 AMDGPUAsmParser::OperandMatchResultTy
1588 AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
1589   // Disable all counters by default.
1590   // vmcnt   [3:0]
1591   // expcnt  [6:4]
1592   // lgkmcnt [11:8]
1593   int64_t CntVal = 0xf7f;
1594   SMLoc S = Parser.getTok().getLoc();
1595 
1596   switch(getLexer().getKind()) {
1597     default: return MatchOperand_ParseFail;
1598     case AsmToken::Integer:
1599       // The operand can be an integer value.
1600       if (getParser().parseAbsoluteExpression(CntVal))
1601         return MatchOperand_ParseFail;
1602       break;
1603 
1604     case AsmToken::Identifier:
1605       do {
1606         if (parseCnt(CntVal))
1607           return MatchOperand_ParseFail;
1608       } while(getLexer().isNot(AsmToken::EndOfStatement));
1609       break;
1610   }
1611   Operands.push_back(AMDGPUOperand::CreateImm(CntVal, S));
1612   return MatchOperand_Success;
1613 }
1614 
1615 bool AMDGPUAsmParser::parseHwreg(int64_t &HwRegCode, int64_t &Offset, int64_t &Width, bool &IsIdentifier) {
1616   if (Parser.getTok().getString() != "hwreg")
1617     return true;
1618   Parser.Lex();
1619 
1620   if (getLexer().isNot(AsmToken::LParen))
1621     return true;
1622   Parser.Lex();
1623 
1624   if (getLexer().is(AsmToken::Identifier)) {
1625     IsIdentifier = true;
1626     HwRegCode = StringSwitch<unsigned>(Parser.getTok().getString())
1627       .Case("HW_REG_MODE"     , 1)
1628       .Case("HW_REG_STATUS"   , 2)
1629       .Case("HW_REG_TRAPSTS"  , 3)
1630       .Case("HW_REG_HW_ID"    , 4)
1631       .Case("HW_REG_GPR_ALLOC", 5)
1632       .Case("HW_REG_LDS_ALLOC", 6)
1633       .Case("HW_REG_IB_STS"   , 7)
1634       .Default(-1);
1635     Parser.Lex();
1636   } else {
1637     IsIdentifier = false;
1638     if (getLexer().isNot(AsmToken::Integer))
1639       return true;
1640     if (getParser().parseAbsoluteExpression(HwRegCode))
1641       return true;
1642   }
1643 
1644   if (getLexer().is(AsmToken::RParen)) {
1645     Parser.Lex();
1646     return false;
1647   }
1648 
1649   // optional params
1650   if (getLexer().isNot(AsmToken::Comma))
1651     return true;
1652   Parser.Lex();
1653 
1654   if (getLexer().isNot(AsmToken::Integer))
1655     return true;
1656   if (getParser().parseAbsoluteExpression(Offset))
1657     return true;
1658 
1659   if (getLexer().isNot(AsmToken::Comma))
1660     return true;
1661   Parser.Lex();
1662 
1663   if (getLexer().isNot(AsmToken::Integer))
1664     return true;
1665   if (getParser().parseAbsoluteExpression(Width))
1666     return true;
1667 
1668   if (getLexer().isNot(AsmToken::RParen))
1669     return true;
1670   Parser.Lex();
1671 
1672   return false;
1673 }
1674 
1675 AMDGPUAsmParser::OperandMatchResultTy
1676 AMDGPUAsmParser::parseHwregOp(OperandVector &Operands) {
1677   int64_t Imm16Val = 0;
1678   SMLoc S = Parser.getTok().getLoc();
1679 
1680   switch(getLexer().getKind()) {
1681     default: return MatchOperand_ParseFail;
1682     case AsmToken::Integer:
1683       // The operand can be an integer value.
1684       if (getParser().parseAbsoluteExpression(Imm16Val))
1685         return MatchOperand_ParseFail;
1686       if (!isInt<16>(Imm16Val) && !isUInt<16>(Imm16Val)) {
1687         Error(S, "invalid immediate: only 16-bit values are legal");
1688         // Do not return error code, but create an imm operand anyway and proceed
1689         // to the next operand, if any. That avoids unneccessary error messages.
1690       }
1691       break;
1692 
1693     case AsmToken::Identifier: {
1694         bool IsIdentifier = false;
1695         int64_t HwRegCode = -1;
1696         int64_t Offset = 0; // default
1697         int64_t Width = 32; // default
1698         if (parseHwreg(HwRegCode, Offset, Width, IsIdentifier))
1699           return MatchOperand_ParseFail;
1700         // HwRegCode (6) [5:0]
1701         // Offset (5) [10:6]
1702         // WidthMinusOne (5) [15:11]
1703         if (HwRegCode < 0 || HwRegCode > 63) {
1704           if (IsIdentifier)
1705             Error(S, "invalid symbolic name of hardware register");
1706           else
1707             Error(S, "invalid code of hardware register: only 6-bit values are legal");
1708         }
1709         if (Offset < 0 || Offset > 31)
1710           Error(S, "invalid bit offset: only 5-bit values are legal");
1711         if (Width < 1 || Width > 32)
1712           Error(S, "invalid bitfield width: only values from 1 to 32 are legal");
1713         Imm16Val = HwRegCode | (Offset << 6) | ((Width-1) << 11);
1714       }
1715       break;
1716   }
1717   Operands.push_back(AMDGPUOperand::CreateImm(Imm16Val, S, AMDGPUOperand::ImmTyHwreg));
1718   return MatchOperand_Success;
1719 }
1720 
1721 bool AMDGPUOperand::isSWaitCnt() const {
1722   return isImm();
1723 }
1724 
1725 bool AMDGPUOperand::isHwreg() const {
1726   return isImmTy(ImmTyHwreg);
1727 }
1728 
1729 //===----------------------------------------------------------------------===//
1730 // sopp branch targets
1731 //===----------------------------------------------------------------------===//
1732 
1733 AMDGPUAsmParser::OperandMatchResultTy
1734 AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
1735   SMLoc S = Parser.getTok().getLoc();
1736 
1737   switch (getLexer().getKind()) {
1738     default: return MatchOperand_ParseFail;
1739     case AsmToken::Integer: {
1740       int64_t Imm;
1741       if (getParser().parseAbsoluteExpression(Imm))
1742         return MatchOperand_ParseFail;
1743       Operands.push_back(AMDGPUOperand::CreateImm(Imm, S));
1744       return MatchOperand_Success;
1745     }
1746 
1747     case AsmToken::Identifier:
1748       Operands.push_back(AMDGPUOperand::CreateExpr(
1749           MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
1750                                   Parser.getTok().getString()), getContext()), S));
1751       Parser.Lex();
1752       return MatchOperand_Success;
1753   }
1754 }
1755 
1756 //===----------------------------------------------------------------------===//
1757 // flat
1758 //===----------------------------------------------------------------------===//
1759 
1760 static const OptionalOperand FlatOptionalOps [] = {
1761   {"glc",    AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
1762   {"slc",    AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1763   {"tfe",    AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1764 };
1765 
1766 static const OptionalOperand FlatAtomicOptionalOps [] = {
1767   {"slc",    AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1768   {"tfe",    AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1769 };
1770 
1771 AMDGPUAsmParser::OperandMatchResultTy
1772 AMDGPUAsmParser::parseFlatOptionalOps(OperandVector &Operands) {
1773   return parseOptionalOps(FlatOptionalOps, Operands);
1774 }
1775 
1776 AMDGPUAsmParser::OperandMatchResultTy
1777 AMDGPUAsmParser::parseFlatAtomicOptionalOps(OperandVector &Operands) {
1778   return parseOptionalOps(FlatAtomicOptionalOps, Operands);
1779 }
1780 
1781 void AMDGPUAsmParser::cvtFlat(MCInst &Inst,
1782                                const OperandVector &Operands) {
1783   OptionalImmIndexMap OptionalIdx;
1784 
1785   for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1786     AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1787 
1788     // Add the register arguments
1789     if (Op.isReg()) {
1790       Op.addRegOperands(Inst, 1);
1791       continue;
1792     }
1793 
1794     OptionalIdx[Op.getImmTy()] = i;
1795   }
1796   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
1797   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1798   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
1799 }
1800 
1801 
1802 void AMDGPUAsmParser::cvtFlatAtomic(MCInst &Inst,
1803                                const OperandVector &Operands) {
1804   OptionalImmIndexMap OptionalIdx;
1805 
1806   for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1807     AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1808 
1809     // Add the register arguments
1810     if (Op.isReg()) {
1811       Op.addRegOperands(Inst, 1);
1812       continue;
1813     }
1814 
1815     // Handle 'glc' token for flat atomics.
1816     if (Op.isToken()) {
1817       continue;
1818     }
1819 
1820     // Handle optional arguments
1821     OptionalIdx[Op.getImmTy()] = i;
1822   }
1823   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1824   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
1825 }
1826 
1827 //===----------------------------------------------------------------------===//
1828 // mubuf
1829 //===----------------------------------------------------------------------===//
1830 
1831 static const OptionalOperand MubufOptionalOps [] = {
1832   {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
1833   {"glc",    AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
1834   {"slc",    AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1835   {"tfe",    AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1836 };
1837 
1838 AMDGPUAsmParser::OperandMatchResultTy
1839 AMDGPUAsmParser::parseMubufOptionalOps(OperandVector &Operands) {
1840   return parseOptionalOps(MubufOptionalOps, Operands);
1841 }
1842 
1843 AMDGPUAsmParser::OperandMatchResultTy
1844 AMDGPUAsmParser::parseOffset(OperandVector &Operands) {
1845   return parseIntWithPrefix("offset", Operands);
1846 }
1847 
1848 AMDGPUAsmParser::OperandMatchResultTy
1849 AMDGPUAsmParser::parseGLC(OperandVector &Operands) {
1850   return parseNamedBit("glc", Operands);
1851 }
1852 
1853 AMDGPUAsmParser::OperandMatchResultTy
1854 AMDGPUAsmParser::parseSLC(OperandVector &Operands) {
1855   return parseNamedBit("slc", Operands);
1856 }
1857 
1858 AMDGPUAsmParser::OperandMatchResultTy
1859 AMDGPUAsmParser::parseTFE(OperandVector &Operands) {
1860   return parseNamedBit("tfe", Operands);
1861 }
1862 
1863 bool AMDGPUOperand::isMubufOffset() const {
1864   return isImmTy(ImmTyOffset) && isUInt<12>(getImm());
1865 }
1866 
1867 void AMDGPUAsmParser::cvtMubuf(MCInst &Inst,
1868                                const OperandVector &Operands) {
1869   OptionalImmIndexMap OptionalIdx;
1870 
1871   for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1872     AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1873 
1874     // Add the register arguments
1875     if (Op.isReg()) {
1876       Op.addRegOperands(Inst, 1);
1877       continue;
1878     }
1879 
1880     // Handle the case where soffset is an immediate
1881     if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
1882       Op.addImmOperands(Inst, 1);
1883       continue;
1884     }
1885 
1886     // Handle tokens like 'offen' which are sometimes hard-coded into the
1887     // asm string.  There are no MCInst operands for these.
1888     if (Op.isToken()) {
1889       continue;
1890     }
1891     assert(Op.isImm());
1892 
1893     // Handle optional arguments
1894     OptionalIdx[Op.getImmTy()] = i;
1895   }
1896 
1897   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
1898   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
1899   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1900   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
1901 }
1902 
1903 //===----------------------------------------------------------------------===//
1904 // mimg
1905 //===----------------------------------------------------------------------===//
1906 
1907 AMDGPUAsmParser::OperandMatchResultTy
1908 AMDGPUAsmParser::parseDMask(OperandVector &Operands) {
1909   return parseIntWithPrefix("dmask", Operands, AMDGPUOperand::ImmTyDMask);
1910 }
1911 
1912 AMDGPUAsmParser::OperandMatchResultTy
1913 AMDGPUAsmParser::parseUNorm(OperandVector &Operands) {
1914   return parseNamedBit("unorm", Operands, AMDGPUOperand::ImmTyUNorm);
1915 }
1916 
1917 AMDGPUAsmParser::OperandMatchResultTy
1918 AMDGPUAsmParser::parseDA(OperandVector &Operands) {
1919   return parseNamedBit("da", Operands, AMDGPUOperand::ImmTyDA);
1920 }
1921 
1922 AMDGPUAsmParser::OperandMatchResultTy
1923 AMDGPUAsmParser::parseR128(OperandVector &Operands) {
1924   return parseNamedBit("r128", Operands, AMDGPUOperand::ImmTyR128);
1925 }
1926 
1927 AMDGPUAsmParser::OperandMatchResultTy
1928 AMDGPUAsmParser::parseLWE(OperandVector &Operands) {
1929   return parseNamedBit("lwe", Operands, AMDGPUOperand::ImmTyLWE);
1930 }
1931 
1932 //===----------------------------------------------------------------------===//
1933 // smrd
1934 //===----------------------------------------------------------------------===//
1935 
1936 bool AMDGPUOperand::isSMRDOffset() const {
1937 
1938   // FIXME: Support 20-bit offsets on VI.  We need to to pass subtarget
1939   // information here.
1940   return isImm() && isUInt<8>(getImm());
1941 }
1942 
1943 bool AMDGPUOperand::isSMRDLiteralOffset() const {
1944   // 32-bit literals are only supported on CI and we only want to use them
1945   // when the offset is > 8-bits.
1946   return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
1947 }
1948 
1949 //===----------------------------------------------------------------------===//
1950 // vop3
1951 //===----------------------------------------------------------------------===//
1952 
1953 static bool ConvertOmodMul(int64_t &Mul) {
1954   if (Mul != 1 && Mul != 2 && Mul != 4)
1955     return false;
1956 
1957   Mul >>= 1;
1958   return true;
1959 }
1960 
1961 static bool ConvertOmodDiv(int64_t &Div) {
1962   if (Div == 1) {
1963     Div = 0;
1964     return true;
1965   }
1966 
1967   if (Div == 2) {
1968     Div = 3;
1969     return true;
1970   }
1971 
1972   return false;
1973 }
1974 
1975 static const OptionalOperand VOP3OptionalOps [] = {
1976   {"clamp", AMDGPUOperand::ImmTyClamp, true, 0, nullptr},
1977   {"mul",   AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodMul},
1978   {"div",   AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodDiv},
1979 };
1980 
1981 static bool isVOP3(OperandVector &Operands) {
1982   if (operandsHaveModifiers(Operands))
1983     return true;
1984 
1985   if (Operands.size() >= 2) {
1986     AMDGPUOperand &DstOp = ((AMDGPUOperand&)*Operands[1]);
1987 
1988     if (DstOp.isRegClass(AMDGPU::SGPR_64RegClassID))
1989       return true;
1990   }
1991 
1992   if (Operands.size() >= 5)
1993     return true;
1994 
1995   if (Operands.size() > 3) {
1996     AMDGPUOperand &Src1Op = ((AMDGPUOperand&)*Operands[3]);
1997     if (Src1Op.isRegClass(AMDGPU::SReg_32RegClassID) ||
1998         Src1Op.isRegClass(AMDGPU::SReg_64RegClassID))
1999       return true;
2000   }
2001   return false;
2002 }
2003 
2004 AMDGPUAsmParser::OperandMatchResultTy
2005 AMDGPUAsmParser::parseVOP3OptionalOps(OperandVector &Operands) {
2006 
2007   // The value returned by this function may change after parsing
2008   // an operand so store the original value here.
2009   bool HasModifiers = operandsHaveModifiers(Operands);
2010 
2011   bool IsVOP3 = isVOP3(Operands);
2012   if (HasModifiers || IsVOP3 ||
2013       getLexer().isNot(AsmToken::EndOfStatement) ||
2014       getForcedEncodingSize() == 64) {
2015 
2016     AMDGPUAsmParser::OperandMatchResultTy Res =
2017         parseOptionalOps(VOP3OptionalOps, Operands);
2018 
2019     if (!HasModifiers && Res == MatchOperand_Success) {
2020       // We have added a modifier operation, so we need to make sure all
2021       // previous register operands have modifiers
2022       for (unsigned i = 2, e = Operands.size(); i != e; ++i) {
2023         AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
2024         if ((Op.isReg() || Op.isImm()) && !Op.hasModifiers())
2025           Op.setModifiers(0);
2026       }
2027     }
2028     return Res;
2029   }
2030   return MatchOperand_NoMatch;
2031 }
2032 
2033 void AMDGPUAsmParser::cvtId(MCInst &Inst, const OperandVector &Operands) {
2034   unsigned I = 1;
2035   const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2036   for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2037     ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2038   }
2039   for (unsigned E = Operands.size(); I != E; ++I)
2040     ((AMDGPUOperand &)*Operands[I]).addRegOrImmOperands(Inst, 1);
2041 }
2042 
2043 void AMDGPUAsmParser::cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands) {
2044   uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
2045   if (TSFlags & SIInstrFlags::VOP3) {
2046     cvtVOP3(Inst, Operands);
2047   } else {
2048     cvtId(Inst, Operands);
2049   }
2050 }
2051 
2052 void AMDGPUAsmParser::cvtVOP3_2_nomod(MCInst &Inst, const OperandVector &Operands) {
2053   if (operandsHaveModifiers(Operands)) {
2054     cvtVOP3(Inst, Operands);
2055   } else {
2056     cvtId(Inst, Operands);
2057   }
2058 }
2059 
2060 void AMDGPUAsmParser::cvtVOP3_only(MCInst &Inst, const OperandVector &Operands) {
2061   cvtVOP3(Inst, Operands);
2062 }
2063 
2064 void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
2065   OptionalImmIndexMap OptionalIdx;
2066   unsigned I = 1;
2067   const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2068   for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2069     ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2070   }
2071 
2072   for (unsigned E = Operands.size(); I != E; ++I) {
2073     AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2074     if (Op.isRegOrImmWithInputMods()) {
2075       Op.addRegOrImmWithInputModsOperands(Inst, 2);
2076     } else if (Op.isImm()) {
2077       OptionalIdx[Op.getImmTy()] = I;
2078     } else {
2079       assert(false);
2080     }
2081   }
2082 
2083   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClamp);
2084   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOMod);
2085 }
2086 
2087 void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands) {
2088   unsigned I = 1;
2089   const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2090   for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2091     ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2092   }
2093 
2094   OptionalImmIndexMap OptionalIdx;
2095 
2096   for (unsigned E = Operands.size(); I != E; ++I) {
2097     AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2098 
2099     // Add the register arguments
2100     if (Op.isRegOrImm()) {
2101       Op.addRegOrImmOperands(Inst, 1);
2102       continue;
2103     } else if (Op.isImmModifier()) {
2104       OptionalIdx[Op.getImmTy()] = I;
2105     } else {
2106       assert(false);
2107     }
2108   }
2109 
2110   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
2111   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
2112   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2113   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
2114   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
2115   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
2116   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
2117   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2118 }
2119 
2120 void AMDGPUAsmParser::cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands) {
2121   unsigned I = 1;
2122   const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2123   for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2124     ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2125   }
2126 
2127   // Add src, same as dst
2128   ((AMDGPUOperand &)*Operands[I]).addRegOperands(Inst, 1);
2129 
2130   OptionalImmIndexMap OptionalIdx;
2131 
2132   for (unsigned E = Operands.size(); I != E; ++I) {
2133     AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2134 
2135     // Add the register arguments
2136     if (Op.isRegOrImm()) {
2137       Op.addRegOrImmOperands(Inst, 1);
2138       continue;
2139     } else if (Op.isImmModifier()) {
2140       OptionalIdx[Op.getImmTy()] = I;
2141     } else {
2142       assert(false);
2143     }
2144   }
2145 
2146   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
2147   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
2148   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2149   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
2150   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
2151   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
2152   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
2153   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2154 }
2155 
2156 //===----------------------------------------------------------------------===//
2157 // dpp
2158 //===----------------------------------------------------------------------===//
2159 
2160 bool AMDGPUOperand::isDPPCtrl() const {
2161   bool result = isImm() && getImmTy() == ImmTyDppCtrl && isUInt<9>(getImm());
2162   if (result) {
2163     int64_t Imm = getImm();
2164     return ((Imm >= 0x000) && (Imm <= 0x0ff)) ||
2165            ((Imm >= 0x101) && (Imm <= 0x10f)) ||
2166            ((Imm >= 0x111) && (Imm <= 0x11f)) ||
2167            ((Imm >= 0x121) && (Imm <= 0x12f)) ||
2168            (Imm == 0x130) ||
2169            (Imm == 0x134) ||
2170            (Imm == 0x138) ||
2171            (Imm == 0x13c) ||
2172            (Imm == 0x140) ||
2173            (Imm == 0x141) ||
2174            (Imm == 0x142) ||
2175            (Imm == 0x143);
2176   }
2177   return false;
2178 }
2179 
2180 AMDGPUAsmParser::OperandMatchResultTy
2181 AMDGPUAsmParser::parseDPPCtrlOps(OperandVector &Operands) {
2182   SMLoc S = Parser.getTok().getLoc();
2183   StringRef Prefix;
2184   int64_t Int;
2185 
2186   if (getLexer().getKind() == AsmToken::Identifier) {
2187     Prefix = Parser.getTok().getString();
2188   } else {
2189     return MatchOperand_NoMatch;
2190   }
2191 
2192   if (Prefix == "row_mirror") {
2193     Int = 0x140;
2194   } else if (Prefix == "row_half_mirror") {
2195     Int = 0x141;
2196   } else {
2197     // Check to prevent parseDPPCtrlOps from eating invalid tokens
2198     if (Prefix != "quad_perm"
2199         && Prefix != "row_shl"
2200         && Prefix != "row_shr"
2201         && Prefix != "row_ror"
2202         && Prefix != "wave_shl"
2203         && Prefix != "wave_rol"
2204         && Prefix != "wave_shr"
2205         && Prefix != "wave_ror"
2206         && Prefix != "row_bcast") {
2207       return MatchOperand_NoMatch;
2208     }
2209 
2210     Parser.Lex();
2211     if (getLexer().isNot(AsmToken::Colon))
2212       return MatchOperand_ParseFail;
2213 
2214     if (Prefix == "quad_perm") {
2215       // quad_perm:[%d,%d,%d,%d]
2216       Parser.Lex();
2217       if (getLexer().isNot(AsmToken::LBrac))
2218         return MatchOperand_ParseFail;
2219 
2220       Parser.Lex();
2221       if (getLexer().isNot(AsmToken::Integer))
2222         return MatchOperand_ParseFail;
2223       Int = getLexer().getTok().getIntVal();
2224 
2225       Parser.Lex();
2226       if (getLexer().isNot(AsmToken::Comma))
2227         return MatchOperand_ParseFail;
2228       Parser.Lex();
2229       if (getLexer().isNot(AsmToken::Integer))
2230         return MatchOperand_ParseFail;
2231       Int += (getLexer().getTok().getIntVal() << 2);
2232 
2233       Parser.Lex();
2234       if (getLexer().isNot(AsmToken::Comma))
2235         return MatchOperand_ParseFail;
2236       Parser.Lex();
2237       if (getLexer().isNot(AsmToken::Integer))
2238         return MatchOperand_ParseFail;
2239       Int += (getLexer().getTok().getIntVal() << 4);
2240 
2241       Parser.Lex();
2242       if (getLexer().isNot(AsmToken::Comma))
2243         return MatchOperand_ParseFail;
2244       Parser.Lex();
2245       if (getLexer().isNot(AsmToken::Integer))
2246         return MatchOperand_ParseFail;
2247       Int += (getLexer().getTok().getIntVal() << 6);
2248 
2249       Parser.Lex();
2250       if (getLexer().isNot(AsmToken::RBrac))
2251         return MatchOperand_ParseFail;
2252 
2253     } else {
2254       // sel:%d
2255       Parser.Lex();
2256       if (getLexer().isNot(AsmToken::Integer))
2257         return MatchOperand_ParseFail;
2258       Int = getLexer().getTok().getIntVal();
2259 
2260       if (Prefix == "row_shl") {
2261         Int |= 0x100;
2262       } else if (Prefix == "row_shr") {
2263         Int |= 0x110;
2264       } else if (Prefix == "row_ror") {
2265         Int |= 0x120;
2266       } else if (Prefix == "wave_shl") {
2267         Int = 0x130;
2268       } else if (Prefix == "wave_rol") {
2269         Int = 0x134;
2270       } else if (Prefix == "wave_shr") {
2271         Int = 0x138;
2272       } else if (Prefix == "wave_ror") {
2273         Int = 0x13C;
2274       } else if (Prefix == "row_bcast") {
2275         if (Int == 15) {
2276           Int = 0x142;
2277         } else if (Int == 31) {
2278           Int = 0x143;
2279         }
2280       } else {
2281         return MatchOperand_ParseFail;
2282       }
2283     }
2284   }
2285   Parser.Lex(); // eat last token
2286 
2287   Operands.push_back(AMDGPUOperand::CreateImm(Int, S,
2288                                               AMDGPUOperand::ImmTyDppCtrl));
2289   return MatchOperand_Success;
2290 }
2291 
2292 static const OptionalOperand DPPOptionalOps [] = {
2293   {"row_mask", AMDGPUOperand::ImmTyDppRowMask, false, 0xf, nullptr},
2294   {"bank_mask", AMDGPUOperand::ImmTyDppBankMask, false, 0xf, nullptr},
2295   {"bound_ctrl", AMDGPUOperand::ImmTyDppBoundCtrl, false, -1, nullptr}
2296 };
2297 
2298 AMDGPUAsmParser::OperandMatchResultTy
2299 AMDGPUAsmParser::parseDPPOptionalOps(OperandVector &Operands) {
2300   SMLoc S = Parser.getTok().getLoc();
2301   OperandMatchResultTy Res = parseOptionalOps(DPPOptionalOps, Operands);
2302   // XXX - sp3 use syntax "bound_ctrl:0" to indicate that bound_ctrl bit was set
2303   if (Res == MatchOperand_Success) {
2304     AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands.back());
2305     // If last operand was parsed as bound_ctrl we should replace it with correct value (1)
2306     if (Op.isImmTy(AMDGPUOperand::ImmTyDppBoundCtrl)) {
2307       Operands.pop_back();
2308       Operands.push_back(
2309         AMDGPUOperand::CreateImm(1, S, AMDGPUOperand::ImmTyDppBoundCtrl));
2310         return MatchOperand_Success;
2311     }
2312   }
2313   return Res;
2314 }
2315 
2316 void AMDGPUAsmParser::cvtDPP_mod(MCInst &Inst, const OperandVector &Operands) {
2317   cvtDPP(Inst, Operands, true);
2318 }
2319 
2320 void AMDGPUAsmParser::cvtDPP_nomod(MCInst &Inst, const OperandVector &Operands) {
2321   cvtDPP(Inst, Operands, false);
2322 }
2323 
2324 void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands,
2325                              bool HasMods) {
2326   OptionalImmIndexMap OptionalIdx;
2327 
2328   unsigned I = 1;
2329   const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2330   for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2331     ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2332   }
2333 
2334   for (unsigned E = Operands.size(); I != E; ++I) {
2335     AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2336     // Add the register arguments
2337     if (!HasMods && Op.isReg()) {
2338       Op.addRegOperands(Inst, 1);
2339     } else if (HasMods && Op.isRegOrImmWithInputMods()) {
2340       Op.addRegOrImmWithInputModsOperands(Inst, 2);
2341     } else if (Op.isDPPCtrl()) {
2342       Op.addImmOperands(Inst, 1);
2343     } else if (Op.isImm()) {
2344       // Handle optional arguments
2345       OptionalIdx[Op.getImmTy()] = I;
2346     } else {
2347       llvm_unreachable("Invalid operand type");
2348     }
2349   }
2350 
2351   // ToDo: fix default values for row_mask and bank_mask
2352   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppRowMask, 0xf);
2353   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBankMask, 0xf);
2354   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBoundCtrl);
2355 }
2356 
2357 //===----------------------------------------------------------------------===//
2358 // sdwa
2359 //===----------------------------------------------------------------------===//
2360 
2361 AMDGPUAsmParser::OperandMatchResultTy
2362 AMDGPUAsmParser::parseSDWASel(OperandVector &Operands) {
2363   SMLoc S = Parser.getTok().getLoc();
2364   StringRef Value;
2365   AMDGPUAsmParser::OperandMatchResultTy res;
2366 
2367   res = parseStringWithPrefix("dst_sel", Value);
2368   if (res == MatchOperand_ParseFail) {
2369     return MatchOperand_ParseFail;
2370   } else if (res == MatchOperand_NoMatch) {
2371     res = parseStringWithPrefix("src0_sel", Value);
2372     if (res == MatchOperand_ParseFail) {
2373       return MatchOperand_ParseFail;
2374     } else if (res == MatchOperand_NoMatch) {
2375       res = parseStringWithPrefix("src1_sel", Value);
2376       if (res != MatchOperand_Success) {
2377         return res;
2378       }
2379     }
2380   }
2381 
2382   int64_t Int;
2383   Int = StringSwitch<int64_t>(Value)
2384         .Case("BYTE_0", 0)
2385         .Case("BYTE_1", 1)
2386         .Case("BYTE_2", 2)
2387         .Case("BYTE_3", 3)
2388         .Case("WORD_0", 4)
2389         .Case("WORD_1", 5)
2390         .Case("DWORD", 6)
2391         .Default(0xffffffff);
2392   Parser.Lex(); // eat last token
2393 
2394   if (Int == 0xffffffff) {
2395     return MatchOperand_ParseFail;
2396   }
2397 
2398   Operands.push_back(AMDGPUOperand::CreateImm(Int, S,
2399                                               AMDGPUOperand::ImmTySdwaSel));
2400   return MatchOperand_Success;
2401 }
2402 
2403 AMDGPUAsmParser::OperandMatchResultTy
2404 AMDGPUAsmParser::parseSDWADstUnused(OperandVector &Operands) {
2405   SMLoc S = Parser.getTok().getLoc();
2406   StringRef Value;
2407   AMDGPUAsmParser::OperandMatchResultTy res;
2408 
2409   res = parseStringWithPrefix("dst_unused", Value);
2410   if (res != MatchOperand_Success) {
2411     return res;
2412   }
2413 
2414   int64_t Int;
2415   Int = StringSwitch<int64_t>(Value)
2416         .Case("UNUSED_PAD", 0)
2417         .Case("UNUSED_SEXT", 1)
2418         .Case("UNUSED_PRESERVE", 2)
2419         .Default(0xffffffff);
2420   Parser.Lex(); // eat last token
2421 
2422   if (Int == 0xffffffff) {
2423     return MatchOperand_ParseFail;
2424   }
2425 
2426   Operands.push_back(AMDGPUOperand::CreateImm(Int, S,
2427                                               AMDGPUOperand::ImmTySdwaDstUnused));
2428   return MatchOperand_Success;
2429 }
2430 
2431 
2432 /// Force static initialization.
2433 extern "C" void LLVMInitializeAMDGPUAsmParser() {
2434   RegisterMCAsmParser<AMDGPUAsmParser> A(TheAMDGPUTarget);
2435   RegisterMCAsmParser<AMDGPUAsmParser> B(TheGCNTarget);
2436 }
2437 
2438 #define GET_REGISTER_MATCHER
2439 #define GET_MATCHER_IMPLEMENTATION
2440 #include "AMDGPUGenAsmMatcher.inc"
2441