1 //===-- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ---------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 
10 #include "AMDKernelCodeT.h"
11 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
12 #include "MCTargetDesc/AMDGPUTargetStreamer.h"
13 #include "SIDefines.h"
14 #include "Utils/AMDGPUBaseInfo.h"
15 #include "Utils/AMDKernelCodeTUtils.h"
16 #include "llvm/ADT/APFloat.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SmallString.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/StringSwitch.h"
21 #include "llvm/ADT/Twine.h"
22 #include "llvm/MC/MCContext.h"
23 #include "llvm/MC/MCExpr.h"
24 #include "llvm/MC/MCInst.h"
25 #include "llvm/MC/MCInstrInfo.h"
26 #include "llvm/MC/MCParser/MCAsmLexer.h"
27 #include "llvm/MC/MCParser/MCAsmParser.h"
28 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
29 #include "llvm/MC/MCParser/MCTargetAsmParser.h"
30 #include "llvm/MC/MCRegisterInfo.h"
31 #include "llvm/MC/MCStreamer.h"
32 #include "llvm/MC/MCSubtargetInfo.h"
33 #include "llvm/MC/MCSymbolELF.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/ELF.h"
36 #include "llvm/Support/SourceMgr.h"
37 #include "llvm/Support/TargetRegistry.h"
38 #include "llvm/Support/raw_ostream.h"
39 
40 using namespace llvm;
41 
42 namespace {
43 
44 struct OptionalOperand;
45 
46 class AMDGPUOperand : public MCParsedAsmOperand {
47   enum KindTy {
48     Token,
49     Immediate,
50     Register,
51     Expression
52   } Kind;
53 
54   SMLoc StartLoc, EndLoc;
55 
56 public:
57   AMDGPUOperand(enum KindTy K) : MCParsedAsmOperand(), Kind(K) {}
58 
59   MCContext *Ctx;
60 
61   enum ImmTy {
62     ImmTyNone,
63     ImmTyDSOffset0,
64     ImmTyDSOffset1,
65     ImmTyGDS,
66     ImmTyOffset,
67     ImmTyGLC,
68     ImmTySLC,
69     ImmTyTFE,
70     ImmTyClamp,
71     ImmTyOMod,
72     ImmTyDMask,
73     ImmTyUNorm,
74     ImmTyDA,
75     ImmTyR128,
76     ImmTyLWE,
77   };
78 
79   struct TokOp {
80     const char *Data;
81     unsigned Length;
82   };
83 
84   struct ImmOp {
85     bool IsFPImm;
86     ImmTy Type;
87     int64_t Val;
88     int Modifiers;
89   };
90 
91   struct RegOp {
92     unsigned RegNo;
93     int Modifiers;
94     const MCRegisterInfo *TRI;
95     const MCSubtargetInfo *STI;
96     bool IsForcedVOP3;
97   };
98 
99   union {
100     TokOp Tok;
101     ImmOp Imm;
102     RegOp Reg;
103     const MCExpr *Expr;
104   };
105 
106   void addImmOperands(MCInst &Inst, unsigned N) const {
107     Inst.addOperand(MCOperand::createImm(getImm()));
108   }
109 
110   StringRef getToken() const {
111     return StringRef(Tok.Data, Tok.Length);
112   }
113 
114   void addRegOperands(MCInst &Inst, unsigned N) const {
115     Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), *Reg.STI)));
116   }
117 
118   void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
119     if (isRegKind())
120       addRegOperands(Inst, N);
121     else
122       addImmOperands(Inst, N);
123   }
124 
125   void addRegOrImmWithInputModsOperands(MCInst &Inst, unsigned N) const {
126     if (isRegKind()) {
127       Inst.addOperand(MCOperand::createImm(Reg.Modifiers));
128       addRegOperands(Inst, N);
129     } else {
130       Inst.addOperand(MCOperand::createImm(Imm.Modifiers));
131       addImmOperands(Inst, N);
132     }
133   }
134 
135   void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
136     if (isImm())
137       addImmOperands(Inst, N);
138     else {
139       assert(isExpr());
140       Inst.addOperand(MCOperand::createExpr(Expr));
141     }
142   }
143 
144   bool defaultTokenHasSuffix() const {
145     StringRef Token(Tok.Data, Tok.Length);
146 
147     return Token.endswith("_e32") || Token.endswith("_e64");
148   }
149 
150   bool isToken() const override {
151     return Kind == Token;
152   }
153 
154   bool isImm() const override {
155     return Kind == Immediate;
156   }
157 
158   bool isInlinableImm() const {
159     if (!isImm() || Imm.Type != AMDGPUOperand::ImmTyNone /* Only plain
160       immediates are inlinable (e.g. "clamp" attribute is not) */ )
161       return false;
162     // TODO: We should avoid using host float here. It would be better to
163     // check the float bit values which is what a few other places do.
164     // We've had bot failures before due to weird NaN support on mips hosts.
165     const float F = BitsToFloat(Imm.Val);
166     // TODO: Add 1/(2*pi) for VI
167     return (Imm.Val <= 64 && Imm.Val >= -16) ||
168            (F == 0.0 || F == 0.5 || F == -0.5 || F == 1.0 || F == -1.0 ||
169            F == 2.0 || F == -2.0 || F == 4.0 || F == -4.0);
170   }
171 
172   bool isDSOffset0() const {
173     assert(isImm());
174     return Imm.Type == ImmTyDSOffset0;
175   }
176 
177   bool isDSOffset1() const {
178     assert(isImm());
179     return Imm.Type == ImmTyDSOffset1;
180   }
181 
182   int64_t getImm() const {
183     return Imm.Val;
184   }
185 
186   enum ImmTy getImmTy() const {
187     assert(isImm());
188     return Imm.Type;
189   }
190 
191   bool isRegKind() const {
192     return Kind == Register;
193   }
194 
195   bool isReg() const override {
196     return Kind == Register && Reg.Modifiers == 0;
197   }
198 
199   bool isRegOrImmWithInputMods() const {
200     return Kind == Register || isInlinableImm();
201   }
202 
203   bool isImmTy(ImmTy ImmT) const {
204     return isImm() && Imm.Type == ImmT;
205   }
206 
207   bool isClamp() const {
208     return isImmTy(ImmTyClamp);
209   }
210 
211   bool isOMod() const {
212     return isImmTy(ImmTyOMod);
213   }
214 
215   bool isImmModifier() const {
216     return Kind == Immediate && Imm.Type != ImmTyNone;
217   }
218 
219   bool isDMask() const {
220     return isImmTy(ImmTyDMask);
221   }
222 
223   bool isUNorm() const { return isImmTy(ImmTyUNorm); }
224   bool isDA() const { return isImmTy(ImmTyDA); }
225   bool isR128() const { return isImmTy(ImmTyUNorm); }
226   bool isLWE() const { return isImmTy(ImmTyLWE); }
227 
228   bool isMod() const {
229     return isClamp() || isOMod();
230   }
231 
232   bool isGDS() const { return isImmTy(ImmTyGDS); }
233   bool isGLC() const { return isImmTy(ImmTyGLC); }
234   bool isSLC() const { return isImmTy(ImmTySLC); }
235   bool isTFE() const { return isImmTy(ImmTyTFE); }
236 
237   void setModifiers(unsigned Mods) {
238     assert(isReg() || (isImm() && Imm.Modifiers == 0));
239     if (isReg())
240       Reg.Modifiers = Mods;
241     else
242       Imm.Modifiers = Mods;
243   }
244 
245   bool hasModifiers() const {
246     assert(isRegKind() || isImm());
247     return isRegKind() ? Reg.Modifiers != 0 : Imm.Modifiers != 0;
248   }
249 
250   unsigned getReg() const override {
251     return Reg.RegNo;
252   }
253 
254   bool isRegOrImm() const {
255     return isReg() || isImm();
256   }
257 
258   bool isRegClass(unsigned RCID) const {
259     return isReg() && Reg.TRI->getRegClass(RCID).contains(getReg());
260   }
261 
262   bool isSCSrc32() const {
263     return isInlinableImm() || (isReg() && isRegClass(AMDGPU::SReg_32RegClassID));
264   }
265 
266   bool isSCSrc64() const {
267     return isInlinableImm() || (isReg() && isRegClass(AMDGPU::SReg_64RegClassID));
268   }
269 
270   bool isSSrc32() const {
271     return isImm() || isSCSrc32();
272   }
273 
274   bool isSSrc64() const {
275     // TODO: Find out how SALU supports extension of 32-bit literals to 64 bits.
276     // See isVSrc64().
277     return isImm() || isSCSrc64();
278   }
279 
280   bool isVCSrc32() const {
281     return isInlinableImm() || (isReg() && isRegClass(AMDGPU::VS_32RegClassID));
282   }
283 
284   bool isVCSrc64() const {
285     return isInlinableImm() || (isReg() && isRegClass(AMDGPU::VS_64RegClassID));
286   }
287 
288   bool isVSrc32() const {
289     return isImm() || isVCSrc32();
290   }
291 
292   bool isVSrc64() const {
293     // TODO: Check if the 64-bit value (coming from assembly source) can be
294     // narrowed to 32 bits (in the instruction stream). That require knowledge
295     // of instruction type (unsigned/signed, floating or "untyped"/B64),
296     // see [AMD GCN3 ISA 6.3.1].
297     // TODO: How 64-bit values are formed from 32-bit literals in _B64 insns?
298     return isImm() || isVCSrc64();
299   }
300 
301   bool isMem() const override {
302     return false;
303   }
304 
305   bool isExpr() const {
306     return Kind == Expression;
307   }
308 
309   bool isSoppBrTarget() const {
310     return isExpr() || isImm();
311   }
312 
313   SMLoc getStartLoc() const override {
314     return StartLoc;
315   }
316 
317   SMLoc getEndLoc() const override {
318     return EndLoc;
319   }
320 
321   void print(raw_ostream &OS) const override {
322     switch (Kind) {
323     case Register:
324       OS << "<register " << getReg() << " mods: " << Reg.Modifiers << '>';
325       break;
326     case Immediate:
327       if (Imm.Type != AMDGPUOperand::ImmTyNone)
328         OS << getImm();
329       else
330         OS << '<' << getImm() << " mods: " << Imm.Modifiers << '>';
331       break;
332     case Token:
333       OS << '\'' << getToken() << '\'';
334       break;
335     case Expression:
336       OS << "<expr " << *Expr << '>';
337       break;
338     }
339   }
340 
341   static std::unique_ptr<AMDGPUOperand> CreateImm(int64_t Val, SMLoc Loc,
342                                                   enum ImmTy Type = ImmTyNone,
343                                                   bool IsFPImm = false) {
344     auto Op = llvm::make_unique<AMDGPUOperand>(Immediate);
345     Op->Imm.Val = Val;
346     Op->Imm.IsFPImm = IsFPImm;
347     Op->Imm.Type = Type;
348     Op->Imm.Modifiers = 0;
349     Op->StartLoc = Loc;
350     Op->EndLoc = Loc;
351     return Op;
352   }
353 
354   static std::unique_ptr<AMDGPUOperand> CreateToken(StringRef Str, SMLoc Loc,
355                                            bool HasExplicitEncodingSize = true) {
356     auto Res = llvm::make_unique<AMDGPUOperand>(Token);
357     Res->Tok.Data = Str.data();
358     Res->Tok.Length = Str.size();
359     Res->StartLoc = Loc;
360     Res->EndLoc = Loc;
361     return Res;
362   }
363 
364   static std::unique_ptr<AMDGPUOperand> CreateReg(unsigned RegNo, SMLoc S,
365                                                   SMLoc E,
366                                                   const MCRegisterInfo *TRI,
367                                                   const MCSubtargetInfo *STI,
368                                                   bool ForceVOP3) {
369     auto Op = llvm::make_unique<AMDGPUOperand>(Register);
370     Op->Reg.RegNo = RegNo;
371     Op->Reg.TRI = TRI;
372     Op->Reg.STI = STI;
373     Op->Reg.Modifiers = 0;
374     Op->Reg.IsForcedVOP3 = ForceVOP3;
375     Op->StartLoc = S;
376     Op->EndLoc = E;
377     return Op;
378   }
379 
380   static std::unique_ptr<AMDGPUOperand> CreateExpr(const class MCExpr *Expr, SMLoc S) {
381     auto Op = llvm::make_unique<AMDGPUOperand>(Expression);
382     Op->Expr = Expr;
383     Op->StartLoc = S;
384     Op->EndLoc = S;
385     return Op;
386   }
387 
388   bool isDSOffset() const;
389   bool isDSOffset01() const;
390   bool isSWaitCnt() const;
391   bool isMubufOffset() const;
392   bool isSMRDOffset() const;
393   bool isSMRDLiteralOffset() const;
394 };
395 
396 class AMDGPUAsmParser : public MCTargetAsmParser {
397   const MCInstrInfo &MII;
398   MCAsmParser &Parser;
399 
400   unsigned ForcedEncodingSize;
401 
402   bool isSI() const {
403     return AMDGPU::isSI(getSTI());
404   }
405 
406   bool isCI() const {
407     return AMDGPU::isCI(getSTI());
408   }
409 
410   bool isVI() const {
411     return AMDGPU::isVI(getSTI());
412   }
413 
414   bool hasSGPR102_SGPR103() const {
415     return !isVI();
416   }
417 
418   /// @name Auto-generated Match Functions
419   /// {
420 
421 #define GET_ASSEMBLER_HEADER
422 #include "AMDGPUGenAsmMatcher.inc"
423 
424   /// }
425 
426 private:
427   bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
428   bool ParseDirectiveHSACodeObjectVersion();
429   bool ParseDirectiveHSACodeObjectISA();
430   bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
431   bool ParseDirectiveAMDKernelCodeT();
432   bool ParseSectionDirectiveHSAText();
433   bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo) const;
434   bool ParseDirectiveAMDGPUHsaKernel();
435   bool ParseDirectiveAMDGPUHsaModuleGlobal();
436   bool ParseDirectiveAMDGPUHsaProgramGlobal();
437   bool ParseSectionDirectiveHSADataGlobalAgent();
438   bool ParseSectionDirectiveHSADataGlobalProgram();
439   bool ParseSectionDirectiveHSARodataReadonlyAgent();
440 
441 public:
442 public:
443   enum AMDGPUMatchResultTy {
444     Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
445   };
446 
447   AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
448                const MCInstrInfo &MII,
449                const MCTargetOptions &Options)
450       : MCTargetAsmParser(Options, STI), MII(MII), Parser(_Parser),
451         ForcedEncodingSize(0) {
452     MCAsmParserExtension::Initialize(Parser);
453 
454     if (getSTI().getFeatureBits().none()) {
455       // Set default features.
456       copySTI().ToggleFeature("SOUTHERN_ISLANDS");
457     }
458 
459     setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
460   }
461 
462   AMDGPUTargetStreamer &getTargetStreamer() {
463     MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
464     return static_cast<AMDGPUTargetStreamer &>(TS);
465   }
466 
467   unsigned getForcedEncodingSize() const {
468     return ForcedEncodingSize;
469   }
470 
471   void setForcedEncodingSize(unsigned Size) {
472     ForcedEncodingSize = Size;
473   }
474 
475   bool isForcedVOP3() const {
476     return ForcedEncodingSize == 64;
477   }
478 
479   bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
480   unsigned checkTargetMatchPredicate(MCInst &Inst) override;
481   bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
482                                OperandVector &Operands, MCStreamer &Out,
483                                uint64_t &ErrorInfo,
484                                bool MatchingInlineAsm) override;
485   bool ParseDirective(AsmToken DirectiveID) override;
486   OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
487   bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
488                         SMLoc NameLoc, OperandVector &Operands) override;
489 
490   OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int,
491                                           int64_t Default = 0);
492   OperandMatchResultTy parseIntWithPrefix(const char *Prefix,
493                                           OperandVector &Operands,
494                                           enum AMDGPUOperand::ImmTy ImmTy =
495                                                       AMDGPUOperand::ImmTyNone);
496   OperandMatchResultTy parseNamedBit(const char *Name, OperandVector &Operands,
497                                      enum AMDGPUOperand::ImmTy ImmTy =
498                                                       AMDGPUOperand::ImmTyNone);
499   OperandMatchResultTy parseOptionalOps(
500                                    const ArrayRef<OptionalOperand> &OptionalOps,
501                                    OperandVector &Operands);
502 
503 
504   void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
505   void cvtDS(MCInst &Inst, const OperandVector &Operands);
506   OperandMatchResultTy parseDSOptionalOps(OperandVector &Operands);
507   OperandMatchResultTy parseDSOff01OptionalOps(OperandVector &Operands);
508   OperandMatchResultTy parseDSOffsetOptional(OperandVector &Operands);
509 
510   bool parseCnt(int64_t &IntVal);
511   OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
512   OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
513 
514   OperandMatchResultTy parseFlatOptionalOps(OperandVector &Operands);
515   OperandMatchResultTy parseFlatAtomicOptionalOps(OperandVector &Operands);
516   void cvtFlat(MCInst &Inst, const OperandVector &Operands);
517   void cvtFlatAtomic(MCInst &Inst, const OperandVector &Operands);
518 
519   void cvtMubuf(MCInst &Inst, const OperandVector &Operands);
520   OperandMatchResultTy parseOffset(OperandVector &Operands);
521   OperandMatchResultTy parseMubufOptionalOps(OperandVector &Operands);
522   OperandMatchResultTy parseGLC(OperandVector &Operands);
523   OperandMatchResultTy parseSLC(OperandVector &Operands);
524   OperandMatchResultTy parseTFE(OperandVector &Operands);
525 
526   OperandMatchResultTy parseDMask(OperandVector &Operands);
527   OperandMatchResultTy parseUNorm(OperandVector &Operands);
528   OperandMatchResultTy parseDA(OperandVector &Operands);
529   OperandMatchResultTy parseR128(OperandVector &Operands);
530   OperandMatchResultTy parseLWE(OperandVector &Operands);
531 
532   void cvtId(MCInst &Inst, const OperandVector &Operands);
533   void cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands);
534   void cvtVOP3_2_nomod(MCInst &Inst, const OperandVector &Operands);
535   void cvtVOP3_only(MCInst &Inst, const OperandVector &Operands);
536   void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
537 
538   void cvtMIMG(MCInst &Inst, const OperandVector &Operands);
539   void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
540   OperandMatchResultTy parseVOP3OptionalOps(OperandVector &Operands);
541 };
542 
543 struct OptionalOperand {
544   const char *Name;
545   AMDGPUOperand::ImmTy Type;
546   bool IsBit;
547   int64_t Default;
548   bool (*ConvertResult)(int64_t&);
549 };
550 
551 }
552 
553 static int getRegClass(bool IsVgpr, unsigned RegWidth) {
554   if (IsVgpr) {
555     switch (RegWidth) {
556       default: return -1;
557       case 1: return AMDGPU::VGPR_32RegClassID;
558       case 2: return AMDGPU::VReg_64RegClassID;
559       case 3: return AMDGPU::VReg_96RegClassID;
560       case 4: return AMDGPU::VReg_128RegClassID;
561       case 8: return AMDGPU::VReg_256RegClassID;
562       case 16: return AMDGPU::VReg_512RegClassID;
563     }
564   }
565 
566   switch (RegWidth) {
567     default: return -1;
568     case 1: return AMDGPU::SGPR_32RegClassID;
569     case 2: return AMDGPU::SGPR_64RegClassID;
570     case 4: return AMDGPU::SReg_128RegClassID;
571     case 8: return AMDGPU::SReg_256RegClassID;
572     case 16: return AMDGPU::SReg_512RegClassID;
573   }
574 }
575 
576 static unsigned getRegForName(StringRef RegName) {
577 
578   return StringSwitch<unsigned>(RegName)
579     .Case("exec", AMDGPU::EXEC)
580     .Case("vcc", AMDGPU::VCC)
581     .Case("flat_scratch", AMDGPU::FLAT_SCR)
582     .Case("m0", AMDGPU::M0)
583     .Case("scc", AMDGPU::SCC)
584     .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
585     .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
586     .Case("vcc_lo", AMDGPU::VCC_LO)
587     .Case("vcc_hi", AMDGPU::VCC_HI)
588     .Case("exec_lo", AMDGPU::EXEC_LO)
589     .Case("exec_hi", AMDGPU::EXEC_HI)
590     .Default(0);
591 }
592 
593 bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) {
594   const AsmToken Tok = Parser.getTok();
595   StartLoc = Tok.getLoc();
596   EndLoc = Tok.getEndLoc();
597   const MCRegisterInfo *TRI = getContext().getRegisterInfo();
598 
599   StringRef RegName = Tok.getString();
600   RegNo = getRegForName(RegName);
601 
602   if (RegNo) {
603     Parser.Lex();
604     return !subtargetHasRegister(*TRI, RegNo);
605   }
606 
607   // Match vgprs and sgprs
608   if (RegName[0] != 's' && RegName[0] != 'v')
609     return true;
610 
611   bool IsVgpr = RegName[0] == 'v';
612   unsigned RegWidth;
613   unsigned RegIndexInClass;
614   if (RegName.size() > 1) {
615     // We have a 32-bit register
616     RegWidth = 1;
617     if (RegName.substr(1).getAsInteger(10, RegIndexInClass))
618       return true;
619     Parser.Lex();
620   } else {
621     // We have a register greater than 32-bits.
622 
623     int64_t RegLo, RegHi;
624     Parser.Lex();
625     if (getLexer().isNot(AsmToken::LBrac))
626       return true;
627 
628     Parser.Lex();
629     if (getParser().parseAbsoluteExpression(RegLo))
630       return true;
631 
632     if (getLexer().isNot(AsmToken::Colon))
633       return true;
634 
635     Parser.Lex();
636     if (getParser().parseAbsoluteExpression(RegHi))
637       return true;
638 
639     if (getLexer().isNot(AsmToken::RBrac))
640       return true;
641 
642     Parser.Lex();
643     RegWidth = (RegHi - RegLo) + 1;
644     if (IsVgpr) {
645       // VGPR registers aren't aligned.
646       RegIndexInClass = RegLo;
647     } else {
648       // SGPR registers are aligned.  Max alignment is 4 dwords.
649       unsigned Size = std::min(RegWidth, 4u);
650       if (RegLo % Size != 0)
651         return true;
652 
653       RegIndexInClass = RegLo / Size;
654     }
655   }
656 
657   int RCID = getRegClass(IsVgpr, RegWidth);
658   if (RCID == -1)
659     return true;
660 
661   const MCRegisterClass RC = TRI->getRegClass(RCID);
662   if (RegIndexInClass >= RC.getNumRegs())
663     return true;
664 
665   RegNo = RC.getRegister(RegIndexInClass);
666   return !subtargetHasRegister(*TRI, RegNo);
667 }
668 
669 unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
670 
671   uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
672 
673   if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
674       (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)))
675     return Match_InvalidOperand;
676 
677   if ((TSFlags & SIInstrFlags::VOP3) &&
678       (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
679       getForcedEncodingSize() != 64)
680     return Match_PreferE32;
681 
682   return Match_Success;
683 }
684 
685 
686 bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
687                                               OperandVector &Operands,
688                                               MCStreamer &Out,
689                                               uint64_t &ErrorInfo,
690                                               bool MatchingInlineAsm) {
691   MCInst Inst;
692 
693   switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) {
694     default: break;
695     case Match_Success:
696       Inst.setLoc(IDLoc);
697       Out.EmitInstruction(Inst, getSTI());
698       return false;
699     case Match_MissingFeature:
700       return Error(IDLoc, "instruction not supported on this GPU");
701 
702     case Match_MnemonicFail:
703       return Error(IDLoc, "unrecognized instruction mnemonic");
704 
705     case Match_InvalidOperand: {
706       SMLoc ErrorLoc = IDLoc;
707       if (ErrorInfo != ~0ULL) {
708         if (ErrorInfo >= Operands.size()) {
709           return Error(IDLoc, "too few operands for instruction");
710         }
711         ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
712         if (ErrorLoc == SMLoc())
713           ErrorLoc = IDLoc;
714       }
715       return Error(ErrorLoc, "invalid operand for instruction");
716     }
717     case Match_PreferE32:
718       return Error(IDLoc, "internal error: instruction without _e64 suffix "
719                           "should be encoded as e32");
720   }
721   llvm_unreachable("Implement any new match types added!");
722 }
723 
724 bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
725                                                uint32_t &Minor) {
726   if (getLexer().isNot(AsmToken::Integer))
727     return TokError("invalid major version");
728 
729   Major = getLexer().getTok().getIntVal();
730   Lex();
731 
732   if (getLexer().isNot(AsmToken::Comma))
733     return TokError("minor version number required, comma expected");
734   Lex();
735 
736   if (getLexer().isNot(AsmToken::Integer))
737     return TokError("invalid minor version");
738 
739   Minor = getLexer().getTok().getIntVal();
740   Lex();
741 
742   return false;
743 }
744 
745 bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
746 
747   uint32_t Major;
748   uint32_t Minor;
749 
750   if (ParseDirectiveMajorMinor(Major, Minor))
751     return true;
752 
753   getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
754   return false;
755 }
756 
757 bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
758 
759   uint32_t Major;
760   uint32_t Minor;
761   uint32_t Stepping;
762   StringRef VendorName;
763   StringRef ArchName;
764 
765   // If this directive has no arguments, then use the ISA version for the
766   // targeted GPU.
767   if (getLexer().is(AsmToken::EndOfStatement)) {
768     AMDGPU::IsaVersion Isa = AMDGPU::getIsaVersion(getSTI().getFeatureBits());
769     getTargetStreamer().EmitDirectiveHSACodeObjectISA(Isa.Major, Isa.Minor,
770                                                       Isa.Stepping,
771                                                       "AMD", "AMDGPU");
772     return false;
773   }
774 
775 
776   if (ParseDirectiveMajorMinor(Major, Minor))
777     return true;
778 
779   if (getLexer().isNot(AsmToken::Comma))
780     return TokError("stepping version number required, comma expected");
781   Lex();
782 
783   if (getLexer().isNot(AsmToken::Integer))
784     return TokError("invalid stepping version");
785 
786   Stepping = getLexer().getTok().getIntVal();
787   Lex();
788 
789   if (getLexer().isNot(AsmToken::Comma))
790     return TokError("vendor name required, comma expected");
791   Lex();
792 
793   if (getLexer().isNot(AsmToken::String))
794     return TokError("invalid vendor name");
795 
796   VendorName = getLexer().getTok().getStringContents();
797   Lex();
798 
799   if (getLexer().isNot(AsmToken::Comma))
800     return TokError("arch name required, comma expected");
801   Lex();
802 
803   if (getLexer().isNot(AsmToken::String))
804     return TokError("invalid arch name");
805 
806   ArchName = getLexer().getTok().getStringContents();
807   Lex();
808 
809   getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
810                                                     VendorName, ArchName);
811   return false;
812 }
813 
814 bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
815                                                amd_kernel_code_t &Header) {
816   SmallString<40> ErrStr;
817   raw_svector_ostream Err(ErrStr);
818   if (!parseAmdKernelCodeField(ID, getLexer(), Header, Err)) {
819     return TokError(Err.str());
820   }
821   Lex();
822   return false;
823 }
824 
825 bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
826 
827   amd_kernel_code_t Header;
828   AMDGPU::initDefaultAMDKernelCodeT(Header, getSTI().getFeatureBits());
829 
830   while (true) {
831 
832     if (getLexer().isNot(AsmToken::EndOfStatement))
833       return TokError("amd_kernel_code_t values must begin on a new line");
834 
835     // Lex EndOfStatement.  This is in a while loop, because lexing a comment
836     // will set the current token to EndOfStatement.
837     while(getLexer().is(AsmToken::EndOfStatement))
838       Lex();
839 
840     if (getLexer().isNot(AsmToken::Identifier))
841       return TokError("expected value identifier or .end_amd_kernel_code_t");
842 
843     StringRef ID = getLexer().getTok().getIdentifier();
844     Lex();
845 
846     if (ID == ".end_amd_kernel_code_t")
847       break;
848 
849     if (ParseAMDKernelCodeTValue(ID, Header))
850       return true;
851   }
852 
853   getTargetStreamer().EmitAMDKernelCodeT(Header);
854 
855   return false;
856 }
857 
858 bool AMDGPUAsmParser::ParseSectionDirectiveHSAText() {
859   getParser().getStreamer().SwitchSection(
860       AMDGPU::getHSATextSection(getContext()));
861   return false;
862 }
863 
864 bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
865   if (getLexer().isNot(AsmToken::Identifier))
866     return TokError("expected symbol name");
867 
868   StringRef KernelName = Parser.getTok().getString();
869 
870   getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
871                                            ELF::STT_AMDGPU_HSA_KERNEL);
872   Lex();
873   return false;
874 }
875 
876 bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaModuleGlobal() {
877   if (getLexer().isNot(AsmToken::Identifier))
878     return TokError("expected symbol name");
879 
880   StringRef GlobalName = Parser.getTok().getIdentifier();
881 
882   getTargetStreamer().EmitAMDGPUHsaModuleScopeGlobal(GlobalName);
883   Lex();
884   return false;
885 }
886 
887 bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaProgramGlobal() {
888   if (getLexer().isNot(AsmToken::Identifier))
889     return TokError("expected symbol name");
890 
891   StringRef GlobalName = Parser.getTok().getIdentifier();
892 
893   getTargetStreamer().EmitAMDGPUHsaProgramScopeGlobal(GlobalName);
894   Lex();
895   return false;
896 }
897 
898 bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalAgent() {
899   getParser().getStreamer().SwitchSection(
900       AMDGPU::getHSADataGlobalAgentSection(getContext()));
901   return false;
902 }
903 
904 bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalProgram() {
905   getParser().getStreamer().SwitchSection(
906       AMDGPU::getHSADataGlobalProgramSection(getContext()));
907   return false;
908 }
909 
910 bool AMDGPUAsmParser::ParseSectionDirectiveHSARodataReadonlyAgent() {
911   getParser().getStreamer().SwitchSection(
912       AMDGPU::getHSARodataReadonlyAgentSection(getContext()));
913   return false;
914 }
915 
916 bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
917   StringRef IDVal = DirectiveID.getString();
918 
919   if (IDVal == ".hsa_code_object_version")
920     return ParseDirectiveHSACodeObjectVersion();
921 
922   if (IDVal == ".hsa_code_object_isa")
923     return ParseDirectiveHSACodeObjectISA();
924 
925   if (IDVal == ".amd_kernel_code_t")
926     return ParseDirectiveAMDKernelCodeT();
927 
928   if (IDVal == ".hsatext" || IDVal == ".text")
929     return ParseSectionDirectiveHSAText();
930 
931   if (IDVal == ".amdgpu_hsa_kernel")
932     return ParseDirectiveAMDGPUHsaKernel();
933 
934   if (IDVal == ".amdgpu_hsa_module_global")
935     return ParseDirectiveAMDGPUHsaModuleGlobal();
936 
937   if (IDVal == ".amdgpu_hsa_program_global")
938     return ParseDirectiveAMDGPUHsaProgramGlobal();
939 
940   if (IDVal == ".hsadata_global_agent")
941     return ParseSectionDirectiveHSADataGlobalAgent();
942 
943   if (IDVal == ".hsadata_global_program")
944     return ParseSectionDirectiveHSADataGlobalProgram();
945 
946   if (IDVal == ".hsarodata_readonly_agent")
947     return ParseSectionDirectiveHSARodataReadonlyAgent();
948 
949   return true;
950 }
951 
952 bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
953                                            unsigned RegNo) const {
954   if (isCI())
955     return true;
956 
957   if (isSI()) {
958     // No flat_scr
959     switch (RegNo) {
960     case AMDGPU::FLAT_SCR:
961     case AMDGPU::FLAT_SCR_LO:
962     case AMDGPU::FLAT_SCR_HI:
963       return false;
964     default:
965       return true;
966     }
967   }
968 
969   // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
970   // SI/CI have.
971   for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
972        R.isValid(); ++R) {
973     if (*R == RegNo)
974       return false;
975   }
976 
977   return true;
978 }
979 
980 static bool operandsHaveModifiers(const OperandVector &Operands) {
981 
982   for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
983     const AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
984     if (Op.isRegKind() && Op.hasModifiers())
985       return true;
986     if (Op.isImm() && Op.hasModifiers())
987       return true;
988     if (Op.isImm() && (Op.getImmTy() == AMDGPUOperand::ImmTyOMod ||
989                        Op.getImmTy() == AMDGPUOperand::ImmTyClamp))
990       return true;
991   }
992   return false;
993 }
994 
995 AMDGPUAsmParser::OperandMatchResultTy
996 AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
997 
998   // Try to parse with a custom parser
999   OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
1000 
1001   // If we successfully parsed the operand or if there as an error parsing,
1002   // we are done.
1003   //
1004   // If we are parsing after we reach EndOfStatement then this means we
1005   // are appending default values to the Operands list.  This is only done
1006   // by custom parser, so we shouldn't continue on to the generic parsing.
1007   if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail||
1008       getLexer().is(AsmToken::EndOfStatement))
1009     return ResTy;
1010 
1011   bool Negate = false, Abs = false;
1012   if (getLexer().getKind()== AsmToken::Minus) {
1013     Parser.Lex();
1014     Negate = true;
1015   }
1016 
1017   if (getLexer().getKind() == AsmToken::Pipe) {
1018     Parser.Lex();
1019     Abs = true;
1020   }
1021 
1022   switch(getLexer().getKind()) {
1023     case AsmToken::Integer: {
1024       SMLoc S = Parser.getTok().getLoc();
1025       int64_t IntVal;
1026       if (getParser().parseAbsoluteExpression(IntVal))
1027         return MatchOperand_ParseFail;
1028       if (!isInt<32>(IntVal) && !isUInt<32>(IntVal)) {
1029         Error(S, "invalid immediate: only 32-bit values are legal");
1030         return MatchOperand_ParseFail;
1031       }
1032 
1033       if (Negate)
1034         IntVal *= -1;
1035       Operands.push_back(AMDGPUOperand::CreateImm(IntVal, S));
1036       return MatchOperand_Success;
1037     }
1038     case AsmToken::Real: {
1039       // FIXME: We should emit an error if a double precisions floating-point
1040       // value is used.  I'm not sure the best way to detect this.
1041       SMLoc S = Parser.getTok().getLoc();
1042       int64_t IntVal;
1043       if (getParser().parseAbsoluteExpression(IntVal))
1044         return MatchOperand_ParseFail;
1045 
1046       APFloat F((float)BitsToDouble(IntVal));
1047       if (Negate)
1048         F.changeSign();
1049       Operands.push_back(
1050           AMDGPUOperand::CreateImm(F.bitcastToAPInt().getZExtValue(), S));
1051       return MatchOperand_Success;
1052     }
1053     case AsmToken::Identifier: {
1054       SMLoc S, E;
1055       unsigned RegNo;
1056       if (!ParseRegister(RegNo, S, E)) {
1057         unsigned Modifiers = 0;
1058 
1059         if (Negate)
1060           Modifiers |= 0x1;
1061 
1062         if (Abs) {
1063           if (getLexer().getKind() != AsmToken::Pipe)
1064             return MatchOperand_ParseFail;
1065           Parser.Lex();
1066           Modifiers |= 0x2;
1067         }
1068 
1069         Operands.push_back(AMDGPUOperand::CreateReg(
1070             RegNo, S, E, getContext().getRegisterInfo(), &getSTI(),
1071             isForcedVOP3()));
1072 
1073         if (Modifiers) {
1074           AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[Operands.size() - 1]);
1075           RegOp.setModifiers(Modifiers);
1076         }
1077       } else {
1078         ResTy = parseVOP3OptionalOps(Operands);
1079         if (ResTy == MatchOperand_NoMatch) {
1080           Operands.push_back(AMDGPUOperand::CreateToken(Parser.getTok().getString(),
1081                                                         S));
1082           Parser.Lex();
1083         }
1084       }
1085       return MatchOperand_Success;
1086     }
1087     default:
1088       return MatchOperand_NoMatch;
1089   }
1090 }
1091 
1092 bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
1093                                        StringRef Name,
1094                                        SMLoc NameLoc, OperandVector &Operands) {
1095 
1096   // Clear any forced encodings from the previous instruction.
1097   setForcedEncodingSize(0);
1098 
1099   if (Name.endswith("_e64"))
1100     setForcedEncodingSize(64);
1101   else if (Name.endswith("_e32"))
1102     setForcedEncodingSize(32);
1103 
1104   // Add the instruction mnemonic
1105   Operands.push_back(AMDGPUOperand::CreateToken(Name, NameLoc));
1106 
1107   while (!getLexer().is(AsmToken::EndOfStatement)) {
1108     AMDGPUAsmParser::OperandMatchResultTy Res = parseOperand(Operands, Name);
1109 
1110     // Eat the comma or space if there is one.
1111     if (getLexer().is(AsmToken::Comma))
1112       Parser.Lex();
1113 
1114     switch (Res) {
1115       case MatchOperand_Success: break;
1116       case MatchOperand_ParseFail: return Error(getLexer().getLoc(),
1117                                                 "failed parsing operand.");
1118       case MatchOperand_NoMatch: return Error(getLexer().getLoc(),
1119                                               "not a valid operand.");
1120     }
1121   }
1122 
1123   return false;
1124 }
1125 
1126 //===----------------------------------------------------------------------===//
1127 // Utility functions
1128 //===----------------------------------------------------------------------===//
1129 
1130 AMDGPUAsmParser::OperandMatchResultTy
1131 AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int,
1132                                     int64_t Default) {
1133 
1134   // We are at the end of the statement, and this is a default argument, so
1135   // use a default value.
1136   if (getLexer().is(AsmToken::EndOfStatement)) {
1137     Int = Default;
1138     return MatchOperand_Success;
1139   }
1140 
1141   switch(getLexer().getKind()) {
1142     default: return MatchOperand_NoMatch;
1143     case AsmToken::Identifier: {
1144       StringRef OffsetName = Parser.getTok().getString();
1145       if (!OffsetName.equals(Prefix))
1146         return MatchOperand_NoMatch;
1147 
1148       Parser.Lex();
1149       if (getLexer().isNot(AsmToken::Colon))
1150         return MatchOperand_ParseFail;
1151 
1152       Parser.Lex();
1153       if (getLexer().isNot(AsmToken::Integer))
1154         return MatchOperand_ParseFail;
1155 
1156       if (getParser().parseAbsoluteExpression(Int))
1157         return MatchOperand_ParseFail;
1158       break;
1159     }
1160   }
1161   return MatchOperand_Success;
1162 }
1163 
1164 AMDGPUAsmParser::OperandMatchResultTy
1165 AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
1166                                     enum AMDGPUOperand::ImmTy ImmTy) {
1167 
1168   SMLoc S = Parser.getTok().getLoc();
1169   int64_t Offset = 0;
1170 
1171   AMDGPUAsmParser::OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Offset);
1172   if (Res != MatchOperand_Success)
1173     return Res;
1174 
1175   Operands.push_back(AMDGPUOperand::CreateImm(Offset, S, ImmTy));
1176   return MatchOperand_Success;
1177 }
1178 
1179 AMDGPUAsmParser::OperandMatchResultTy
1180 AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
1181                                enum AMDGPUOperand::ImmTy ImmTy) {
1182   int64_t Bit = 0;
1183   SMLoc S = Parser.getTok().getLoc();
1184 
1185   // We are at the end of the statement, and this is a default argument, so
1186   // use a default value.
1187   if (getLexer().isNot(AsmToken::EndOfStatement)) {
1188     switch(getLexer().getKind()) {
1189       case AsmToken::Identifier: {
1190         StringRef Tok = Parser.getTok().getString();
1191         if (Tok == Name) {
1192           Bit = 1;
1193           Parser.Lex();
1194         } else if (Tok.startswith("no") && Tok.endswith(Name)) {
1195           Bit = 0;
1196           Parser.Lex();
1197         } else {
1198           return MatchOperand_NoMatch;
1199         }
1200         break;
1201       }
1202       default:
1203         return MatchOperand_NoMatch;
1204     }
1205   }
1206 
1207   Operands.push_back(AMDGPUOperand::CreateImm(Bit, S, ImmTy));
1208   return MatchOperand_Success;
1209 }
1210 
1211 typedef std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalImmIndexMap;
1212 
1213 void addOptionalImmOperand(MCInst& Inst, const OperandVector& Operands, OptionalImmIndexMap& OptionalIdx, enum AMDGPUOperand::ImmTy ImmT) {
1214   auto i = OptionalIdx.find(ImmT);
1215   if (i != OptionalIdx.end()) {
1216     unsigned Idx = i->second;
1217     ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
1218   } else {
1219     Inst.addOperand(MCOperand::createImm(0));
1220   }
1221 }
1222 
1223 static bool operandsHasOptionalOp(const OperandVector &Operands,
1224                                   const OptionalOperand &OOp) {
1225   for (unsigned i = 0; i < Operands.size(); i++) {
1226     const AMDGPUOperand &ParsedOp = ((const AMDGPUOperand &)*Operands[i]);
1227     if ((ParsedOp.isImm() && ParsedOp.getImmTy() == OOp.Type) ||
1228         (ParsedOp.isToken() && ParsedOp.getToken() == OOp.Name))
1229       return true;
1230 
1231   }
1232   return false;
1233 }
1234 
1235 AMDGPUAsmParser::OperandMatchResultTy
1236 AMDGPUAsmParser::parseOptionalOps(const ArrayRef<OptionalOperand> &OptionalOps,
1237                                    OperandVector &Operands) {
1238   SMLoc S = Parser.getTok().getLoc();
1239   for (const OptionalOperand &Op : OptionalOps) {
1240     if (operandsHasOptionalOp(Operands, Op))
1241       continue;
1242     AMDGPUAsmParser::OperandMatchResultTy Res;
1243     int64_t Value;
1244     if (Op.IsBit) {
1245       Res = parseNamedBit(Op.Name, Operands, Op.Type);
1246       if (Res == MatchOperand_NoMatch)
1247         continue;
1248       return Res;
1249     }
1250 
1251     Res = parseIntWithPrefix(Op.Name, Value, Op.Default);
1252 
1253     if (Res == MatchOperand_NoMatch)
1254       continue;
1255 
1256     if (Res != MatchOperand_Success)
1257       return Res;
1258 
1259     bool DefaultValue = (Value == Op.Default);
1260 
1261     if (Op.ConvertResult && !Op.ConvertResult(Value)) {
1262       return MatchOperand_ParseFail;
1263     }
1264 
1265     if (!DefaultValue) {
1266       Operands.push_back(AMDGPUOperand::CreateImm(Value, S, Op.Type));
1267     }
1268     return MatchOperand_Success;
1269   }
1270   return MatchOperand_NoMatch;
1271 }
1272 
1273 //===----------------------------------------------------------------------===//
1274 // ds
1275 //===----------------------------------------------------------------------===//
1276 
1277 static const OptionalOperand DSOptionalOps [] = {
1278   {"offset",  AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
1279   {"gds",     AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
1280 };
1281 
1282 static const OptionalOperand DSOptionalOpsOff01 [] = {
1283   {"offset0", AMDGPUOperand::ImmTyDSOffset0, false, 0, nullptr},
1284   {"offset1", AMDGPUOperand::ImmTyDSOffset1, false, 0, nullptr},
1285   {"gds",     AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
1286 };
1287 
1288 AMDGPUAsmParser::OperandMatchResultTy
1289 AMDGPUAsmParser::parseDSOptionalOps(OperandVector &Operands) {
1290   return parseOptionalOps(DSOptionalOps, Operands);
1291 }
1292 AMDGPUAsmParser::OperandMatchResultTy
1293 AMDGPUAsmParser::parseDSOff01OptionalOps(OperandVector &Operands) {
1294   return parseOptionalOps(DSOptionalOpsOff01, Operands);
1295 }
1296 
1297 AMDGPUAsmParser::OperandMatchResultTy
1298 AMDGPUAsmParser::parseDSOffsetOptional(OperandVector &Operands) {
1299   SMLoc S = Parser.getTok().getLoc();
1300   AMDGPUAsmParser::OperandMatchResultTy Res =
1301     parseIntWithPrefix("offset", Operands, AMDGPUOperand::ImmTyOffset);
1302   if (Res == MatchOperand_NoMatch) {
1303     Operands.push_back(AMDGPUOperand::CreateImm(0, S,
1304                        AMDGPUOperand::ImmTyOffset));
1305     Res = MatchOperand_Success;
1306   }
1307   return Res;
1308 }
1309 
1310 bool AMDGPUOperand::isDSOffset() const {
1311   return isImm() && isUInt<16>(getImm());
1312 }
1313 
1314 bool AMDGPUOperand::isDSOffset01() const {
1315   return isImm() && isUInt<8>(getImm());
1316 }
1317 
1318 void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
1319                                     const OperandVector &Operands) {
1320 
1321   OptionalImmIndexMap OptionalIdx;
1322 
1323   for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1324     AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1325 
1326     // Add the register arguments
1327     if (Op.isReg()) {
1328       Op.addRegOperands(Inst, 1);
1329       continue;
1330     }
1331 
1332     // Handle optional arguments
1333     OptionalIdx[Op.getImmTy()] = i;
1334   }
1335 
1336   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDSOffset0);
1337   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDSOffset1);
1338   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
1339 
1340   Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1341 }
1342 
1343 void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) {
1344 
1345   std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1346   bool GDSOnly = false;
1347 
1348   for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1349     AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1350 
1351     // Add the register arguments
1352     if (Op.isReg()) {
1353       Op.addRegOperands(Inst, 1);
1354       continue;
1355     }
1356 
1357     if (Op.isToken() && Op.getToken() == "gds") {
1358       GDSOnly = true;
1359       continue;
1360     }
1361 
1362     // Handle optional arguments
1363     OptionalIdx[Op.getImmTy()] = i;
1364   }
1365 
1366   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
1367   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
1368 
1369   if (!GDSOnly) {
1370     addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
1371   }
1372   Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1373 }
1374 
1375 
1376 //===----------------------------------------------------------------------===//
1377 // s_waitcnt
1378 //===----------------------------------------------------------------------===//
1379 
1380 bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
1381   StringRef CntName = Parser.getTok().getString();
1382   int64_t CntVal;
1383 
1384   Parser.Lex();
1385   if (getLexer().isNot(AsmToken::LParen))
1386     return true;
1387 
1388   Parser.Lex();
1389   if (getLexer().isNot(AsmToken::Integer))
1390     return true;
1391 
1392   if (getParser().parseAbsoluteExpression(CntVal))
1393     return true;
1394 
1395   if (getLexer().isNot(AsmToken::RParen))
1396     return true;
1397 
1398   Parser.Lex();
1399   if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma))
1400     Parser.Lex();
1401 
1402   int CntShift;
1403   int CntMask;
1404 
1405   if (CntName == "vmcnt") {
1406     CntMask = 0xf;
1407     CntShift = 0;
1408   } else if (CntName == "expcnt") {
1409     CntMask = 0x7;
1410     CntShift = 4;
1411   } else if (CntName == "lgkmcnt") {
1412     CntMask = 0xf;
1413     CntShift = 8;
1414   } else {
1415     return true;
1416   }
1417 
1418   IntVal &= ~(CntMask << CntShift);
1419   IntVal |= (CntVal << CntShift);
1420   return false;
1421 }
1422 
1423 AMDGPUAsmParser::OperandMatchResultTy
1424 AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
1425   // Disable all counters by default.
1426   // vmcnt   [3:0]
1427   // expcnt  [6:4]
1428   // lgkmcnt [11:8]
1429   int64_t CntVal = 0xf7f;
1430   SMLoc S = Parser.getTok().getLoc();
1431 
1432   switch(getLexer().getKind()) {
1433     default: return MatchOperand_ParseFail;
1434     case AsmToken::Integer:
1435       // The operand can be an integer value.
1436       if (getParser().parseAbsoluteExpression(CntVal))
1437         return MatchOperand_ParseFail;
1438       break;
1439 
1440     case AsmToken::Identifier:
1441       do {
1442         if (parseCnt(CntVal))
1443           return MatchOperand_ParseFail;
1444       } while(getLexer().isNot(AsmToken::EndOfStatement));
1445       break;
1446   }
1447   Operands.push_back(AMDGPUOperand::CreateImm(CntVal, S));
1448   return MatchOperand_Success;
1449 }
1450 
1451 bool AMDGPUOperand::isSWaitCnt() const {
1452   return isImm();
1453 }
1454 
1455 //===----------------------------------------------------------------------===//
1456 // sopp branch targets
1457 //===----------------------------------------------------------------------===//
1458 
1459 AMDGPUAsmParser::OperandMatchResultTy
1460 AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
1461   SMLoc S = Parser.getTok().getLoc();
1462 
1463   switch (getLexer().getKind()) {
1464     default: return MatchOperand_ParseFail;
1465     case AsmToken::Integer: {
1466       int64_t Imm;
1467       if (getParser().parseAbsoluteExpression(Imm))
1468         return MatchOperand_ParseFail;
1469       Operands.push_back(AMDGPUOperand::CreateImm(Imm, S));
1470       return MatchOperand_Success;
1471     }
1472 
1473     case AsmToken::Identifier:
1474       Operands.push_back(AMDGPUOperand::CreateExpr(
1475           MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
1476                                   Parser.getTok().getString()), getContext()), S));
1477       Parser.Lex();
1478       return MatchOperand_Success;
1479   }
1480 }
1481 
1482 //===----------------------------------------------------------------------===//
1483 // flat
1484 //===----------------------------------------------------------------------===//
1485 
1486 static const OptionalOperand FlatOptionalOps [] = {
1487   {"glc",    AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
1488   {"slc",    AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1489   {"tfe",    AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1490 };
1491 
1492 static const OptionalOperand FlatAtomicOptionalOps [] = {
1493   {"slc",    AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1494   {"tfe",    AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1495 };
1496 
1497 AMDGPUAsmParser::OperandMatchResultTy
1498 AMDGPUAsmParser::parseFlatOptionalOps(OperandVector &Operands) {
1499   return parseOptionalOps(FlatOptionalOps, Operands);
1500 }
1501 
1502 AMDGPUAsmParser::OperandMatchResultTy
1503 AMDGPUAsmParser::parseFlatAtomicOptionalOps(OperandVector &Operands) {
1504   return parseOptionalOps(FlatAtomicOptionalOps, Operands);
1505 }
1506 
1507 void AMDGPUAsmParser::cvtFlat(MCInst &Inst,
1508                                const OperandVector &Operands) {
1509   OptionalImmIndexMap OptionalIdx;
1510 
1511   for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1512     AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1513 
1514     // Add the register arguments
1515     if (Op.isReg()) {
1516       Op.addRegOperands(Inst, 1);
1517       continue;
1518     }
1519 
1520     OptionalIdx[Op.getImmTy()] = i;
1521   }
1522   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
1523   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1524   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
1525 }
1526 
1527 
1528 void AMDGPUAsmParser::cvtFlatAtomic(MCInst &Inst,
1529                                const OperandVector &Operands) {
1530   OptionalImmIndexMap OptionalIdx;
1531 
1532   for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1533     AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1534 
1535     // Add the register arguments
1536     if (Op.isReg()) {
1537       Op.addRegOperands(Inst, 1);
1538       continue;
1539     }
1540 
1541     // Handle 'glc' token for flat atomics.
1542     if (Op.isToken()) {
1543       continue;
1544     }
1545 
1546     // Handle optional arguments
1547     OptionalIdx[Op.getImmTy()] = i;
1548   }
1549   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1550   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
1551 }
1552 
1553 //===----------------------------------------------------------------------===//
1554 // mubuf
1555 //===----------------------------------------------------------------------===//
1556 
1557 static const OptionalOperand MubufOptionalOps [] = {
1558   {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
1559   {"glc",    AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
1560   {"slc",    AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1561   {"tfe",    AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1562 };
1563 
1564 AMDGPUAsmParser::OperandMatchResultTy
1565 AMDGPUAsmParser::parseMubufOptionalOps(OperandVector &Operands) {
1566   return parseOptionalOps(MubufOptionalOps, Operands);
1567 }
1568 
1569 AMDGPUAsmParser::OperandMatchResultTy
1570 AMDGPUAsmParser::parseOffset(OperandVector &Operands) {
1571   return parseIntWithPrefix("offset", Operands);
1572 }
1573 
1574 AMDGPUAsmParser::OperandMatchResultTy
1575 AMDGPUAsmParser::parseGLC(OperandVector &Operands) {
1576   return parseNamedBit("glc", Operands);
1577 }
1578 
1579 AMDGPUAsmParser::OperandMatchResultTy
1580 AMDGPUAsmParser::parseSLC(OperandVector &Operands) {
1581   return parseNamedBit("slc", Operands);
1582 }
1583 
1584 AMDGPUAsmParser::OperandMatchResultTy
1585 AMDGPUAsmParser::parseTFE(OperandVector &Operands) {
1586   return parseNamedBit("tfe", Operands);
1587 }
1588 
1589 bool AMDGPUOperand::isMubufOffset() const {
1590   return isImmTy(ImmTyOffset) && isUInt<12>(getImm());
1591 }
1592 
1593 void AMDGPUAsmParser::cvtMubuf(MCInst &Inst,
1594                                const OperandVector &Operands) {
1595   OptionalImmIndexMap OptionalIdx;
1596 
1597   for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1598     AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1599 
1600     // Add the register arguments
1601     if (Op.isReg()) {
1602       Op.addRegOperands(Inst, 1);
1603       continue;
1604     }
1605 
1606     // Handle the case where soffset is an immediate
1607     if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
1608       Op.addImmOperands(Inst, 1);
1609       continue;
1610     }
1611 
1612     // Handle tokens like 'offen' which are sometimes hard-coded into the
1613     // asm string.  There are no MCInst operands for these.
1614     if (Op.isToken()) {
1615       continue;
1616     }
1617     assert(Op.isImm());
1618 
1619     // Handle optional arguments
1620     OptionalIdx[Op.getImmTy()] = i;
1621   }
1622 
1623   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
1624   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
1625   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1626   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
1627 }
1628 
1629 //===----------------------------------------------------------------------===//
1630 // mimg
1631 //===----------------------------------------------------------------------===//
1632 
1633 AMDGPUAsmParser::OperandMatchResultTy
1634 AMDGPUAsmParser::parseDMask(OperandVector &Operands) {
1635   return parseIntWithPrefix("dmask", Operands, AMDGPUOperand::ImmTyDMask);
1636 }
1637 
1638 AMDGPUAsmParser::OperandMatchResultTy
1639 AMDGPUAsmParser::parseUNorm(OperandVector &Operands) {
1640   return parseNamedBit("unorm", Operands, AMDGPUOperand::ImmTyUNorm);
1641 }
1642 
1643 AMDGPUAsmParser::OperandMatchResultTy
1644 AMDGPUAsmParser::parseDA(OperandVector &Operands) {
1645   return parseNamedBit("da", Operands, AMDGPUOperand::ImmTyDA);
1646 }
1647 
1648 AMDGPUAsmParser::OperandMatchResultTy
1649 AMDGPUAsmParser::parseR128(OperandVector &Operands) {
1650   return parseNamedBit("r128", Operands, AMDGPUOperand::ImmTyR128);
1651 }
1652 
1653 AMDGPUAsmParser::OperandMatchResultTy
1654 AMDGPUAsmParser::parseLWE(OperandVector &Operands) {
1655   return parseNamedBit("lwe", Operands, AMDGPUOperand::ImmTyLWE);
1656 }
1657 
1658 //===----------------------------------------------------------------------===//
1659 // smrd
1660 //===----------------------------------------------------------------------===//
1661 
1662 bool AMDGPUOperand::isSMRDOffset() const {
1663 
1664   // FIXME: Support 20-bit offsets on VI.  We need to to pass subtarget
1665   // information here.
1666   return isImm() && isUInt<8>(getImm());
1667 }
1668 
1669 bool AMDGPUOperand::isSMRDLiteralOffset() const {
1670   // 32-bit literals are only supported on CI and we only want to use them
1671   // when the offset is > 8-bits.
1672   return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
1673 }
1674 
1675 //===----------------------------------------------------------------------===//
1676 // vop3
1677 //===----------------------------------------------------------------------===//
1678 
1679 static bool ConvertOmodMul(int64_t &Mul) {
1680   if (Mul != 1 && Mul != 2 && Mul != 4)
1681     return false;
1682 
1683   Mul >>= 1;
1684   return true;
1685 }
1686 
1687 static bool ConvertOmodDiv(int64_t &Div) {
1688   if (Div == 1) {
1689     Div = 0;
1690     return true;
1691   }
1692 
1693   if (Div == 2) {
1694     Div = 3;
1695     return true;
1696   }
1697 
1698   return false;
1699 }
1700 
1701 static const OptionalOperand VOP3OptionalOps [] = {
1702   {"clamp", AMDGPUOperand::ImmTyClamp, true, 0, nullptr},
1703   {"mul",   AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodMul},
1704   {"div",   AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodDiv},
1705 };
1706 
1707 static bool isVOP3(OperandVector &Operands) {
1708   if (operandsHaveModifiers(Operands))
1709     return true;
1710 
1711   if (Operands.size() >= 2) {
1712     AMDGPUOperand &DstOp = ((AMDGPUOperand&)*Operands[1]);
1713 
1714     if (DstOp.isReg() && DstOp.isRegClass(AMDGPU::SGPR_64RegClassID))
1715       return true;
1716   }
1717 
1718   if (Operands.size() >= 5)
1719     return true;
1720 
1721   if (Operands.size() > 3) {
1722     AMDGPUOperand &Src1Op = ((AMDGPUOperand&)*Operands[3]);
1723     if (Src1Op.isReg() && (Src1Op.isRegClass(AMDGPU::SReg_32RegClassID) ||
1724                            Src1Op.isRegClass(AMDGPU::SReg_64RegClassID)))
1725       return true;
1726   }
1727   return false;
1728 }
1729 
1730 AMDGPUAsmParser::OperandMatchResultTy
1731 AMDGPUAsmParser::parseVOP3OptionalOps(OperandVector &Operands) {
1732 
1733   // The value returned by this function may change after parsing
1734   // an operand so store the original value here.
1735   bool HasModifiers = operandsHaveModifiers(Operands);
1736 
1737   bool IsVOP3 = isVOP3(Operands);
1738   if (HasModifiers || IsVOP3 ||
1739       getLexer().isNot(AsmToken::EndOfStatement) ||
1740       getForcedEncodingSize() == 64) {
1741 
1742     AMDGPUAsmParser::OperandMatchResultTy Res =
1743         parseOptionalOps(VOP3OptionalOps, Operands);
1744 
1745     if (!HasModifiers && Res == MatchOperand_Success) {
1746       // We have added a modifier operation, so we need to make sure all
1747       // previous register operands have modifiers
1748       for (unsigned i = 2, e = Operands.size(); i != e; ++i) {
1749         AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
1750         if ((Op.isReg() || Op.isImm()) && !Op.hasModifiers())
1751           Op.setModifiers(0);
1752       }
1753     }
1754     return Res;
1755   }
1756   return MatchOperand_NoMatch;
1757 }
1758 
1759 void AMDGPUAsmParser::cvtId(MCInst &Inst, const OperandVector &Operands) {
1760   unsigned I = 1;
1761   const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
1762   for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
1763     ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
1764   }
1765   for (unsigned E = Operands.size(); I != E; ++I)
1766     ((AMDGPUOperand &)*Operands[I]).addRegOrImmOperands(Inst, 1);
1767 }
1768 
1769 void AMDGPUAsmParser::cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands) {
1770   uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
1771   if (TSFlags & SIInstrFlags::VOP3) {
1772     cvtVOP3(Inst, Operands);
1773   } else {
1774     cvtId(Inst, Operands);
1775   }
1776 }
1777 
1778 void AMDGPUAsmParser::cvtVOP3_2_nomod(MCInst &Inst, const OperandVector &Operands) {
1779   if (operandsHaveModifiers(Operands)) {
1780     cvtVOP3(Inst, Operands);
1781   } else {
1782     cvtId(Inst, Operands);
1783   }
1784 }
1785 
1786 void AMDGPUAsmParser::cvtVOP3_only(MCInst &Inst, const OperandVector &Operands) {
1787   cvtVOP3(Inst, Operands);
1788 }
1789 
1790 void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
1791   OptionalImmIndexMap OptionalIdx;
1792   unsigned I = 1;
1793   const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
1794   for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
1795     ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
1796   }
1797 
1798   for (unsigned E = Operands.size(); I != E; ++I) {
1799     AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
1800     if (Op.isRegOrImmWithInputMods()) {
1801       Op.addRegOrImmWithInputModsOperands(Inst, 2);
1802     } else if (Op.isImm()) {
1803       OptionalIdx[Op.getImmTy()] = I;
1804     } else {
1805       assert(false);
1806     }
1807   }
1808 
1809   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClamp);
1810   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOMod);
1811 }
1812 
1813 void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands) {
1814   unsigned I = 1;
1815   const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
1816   for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
1817     ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
1818   }
1819 
1820   OptionalImmIndexMap OptionalIdx;
1821 
1822   for (unsigned E = Operands.size(); I != E; ++I) {
1823     AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
1824 
1825     // Add the register arguments
1826     if (Op.isRegOrImm()) {
1827       Op.addRegOrImmOperands(Inst, 1);
1828       continue;
1829     } else if (Op.isImmModifier()) {
1830       OptionalIdx[Op.getImmTy()] = I;
1831     } else {
1832       assert(false);
1833     }
1834   }
1835 
1836   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
1837   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
1838   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
1839   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
1840   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
1841   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
1842   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
1843   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1844 }
1845 
1846 void AMDGPUAsmParser::cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands) {
1847   unsigned I = 1;
1848   const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
1849   for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
1850     ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
1851   }
1852 
1853   // Add src, same as dst
1854   ((AMDGPUOperand &)*Operands[I]).addRegOperands(Inst, 1);
1855 
1856   OptionalImmIndexMap OptionalIdx;
1857 
1858   for (unsigned E = Operands.size(); I != E; ++I) {
1859     AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
1860 
1861     // Add the register arguments
1862     if (Op.isRegOrImm()) {
1863       Op.addRegOrImmOperands(Inst, 1);
1864       continue;
1865     } else if (Op.isImmModifier()) {
1866       OptionalIdx[Op.getImmTy()] = I;
1867     } else {
1868       assert(false);
1869     }
1870   }
1871 
1872   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
1873   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
1874   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
1875   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
1876   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
1877   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
1878   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
1879   addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1880 }
1881 
1882 
1883 
1884 /// Force static initialization.
1885 extern "C" void LLVMInitializeAMDGPUAsmParser() {
1886   RegisterMCAsmParser<AMDGPUAsmParser> A(TheAMDGPUTarget);
1887   RegisterMCAsmParser<AMDGPUAsmParser> B(TheGCNTarget);
1888 }
1889 
1890 #define GET_REGISTER_MATCHER
1891 #define GET_MATCHER_IMPLEMENTATION
1892 #include "AMDGPUGenAsmMatcher.inc"
1893