1 //===- RISCVMatInt.cpp - Immediate materialisation -------------*- C++ -*--===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "RISCVMatInt.h"
10 #include "MCTargetDesc/RISCVMCTargetDesc.h"
11 #include "llvm/ADT/APInt.h"
12 #include "llvm/Support/MathExtras.h"
13 using namespace llvm;
14 
15 // Recursively generate a sequence for materializing an integer.
16 static void generateInstSeqImpl(int64_t Val,
17                                 const FeatureBitset &ActiveFeatures,
18                                 RISCVMatInt::InstSeq &Res) {
19   bool IsRV64 = ActiveFeatures[RISCV::Feature64Bit];
20 
21   if (isInt<32>(Val)) {
22     // Depending on the active bits in the immediate Value v, the following
23     // instruction sequences are emitted:
24     //
25     // v == 0                        : ADDI
26     // v[0,12) != 0 && v[12,32) == 0 : ADDI
27     // v[0,12) == 0 && v[12,32) != 0 : LUI
28     // v[0,32) != 0                  : LUI+ADDI(W)
29     int64_t Hi20 = ((Val + 0x800) >> 12) & 0xFFFFF;
30     int64_t Lo12 = SignExtend64<12>(Val);
31 
32     if (Hi20)
33       Res.push_back(RISCVMatInt::Inst(RISCV::LUI, Hi20));
34 
35     if (Lo12 || Hi20 == 0) {
36       unsigned AddiOpc = (IsRV64 && Hi20) ? RISCV::ADDIW : RISCV::ADDI;
37       Res.push_back(RISCVMatInt::Inst(AddiOpc, Lo12));
38     }
39     return;
40   }
41 
42   assert(IsRV64 && "Can't emit >32-bit imm for non-RV64 target");
43 
44   // In the worst case, for a full 64-bit constant, a sequence of 8 instructions
45   // (i.e., LUI+ADDIW+SLLI+ADDI+SLLI+ADDI+SLLI+ADDI) has to be emmitted. Note
46   // that the first two instructions (LUI+ADDIW) can contribute up to 32 bits
47   // while the following ADDI instructions contribute up to 12 bits each.
48   //
49   // On the first glance, implementing this seems to be possible by simply
50   // emitting the most significant 32 bits (LUI+ADDIW) followed by as many left
51   // shift (SLLI) and immediate additions (ADDI) as needed. However, due to the
52   // fact that ADDI performs a sign extended addition, doing it like that would
53   // only be possible when at most 11 bits of the ADDI instructions are used.
54   // Using all 12 bits of the ADDI instructions, like done by GAS, actually
55   // requires that the constant is processed starting with the least significant
56   // bit.
57   //
58   // In the following, constants are processed from LSB to MSB but instruction
59   // emission is performed from MSB to LSB by recursively calling
60   // generateInstSeq. In each recursion, first the lowest 12 bits are removed
61   // from the constant and the optimal shift amount, which can be greater than
62   // 12 bits if the constant is sparse, is determined. Then, the shifted
63   // remaining constant is processed recursively and gets emitted as soon as it
64   // fits into 32 bits. The emission of the shifts and additions is subsequently
65   // performed when the recursion returns.
66 
67   int64_t Lo12 = SignExtend64<12>(Val);
68   int64_t Hi52 = ((uint64_t)Val + 0x800ull) >> 12;
69   int ShiftAmount = 12 + findFirstSet((uint64_t)Hi52);
70   Hi52 = SignExtend64(Hi52 >> (ShiftAmount - 12), 64 - ShiftAmount);
71 
72   generateInstSeqImpl(Hi52, ActiveFeatures, Res);
73 
74   Res.push_back(RISCVMatInt::Inst(RISCV::SLLI, ShiftAmount));
75   if (Lo12)
76     Res.push_back(RISCVMatInt::Inst(RISCV::ADDI, Lo12));
77 }
78 
79 namespace llvm {
80 namespace RISCVMatInt {
81 InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures) {
82   RISCVMatInt::InstSeq Res;
83   generateInstSeqImpl(Val, ActiveFeatures, Res);
84 
85   // If the constant is positive we might be able to generate a shifted constant
86   // with no leading zeros and use a final SRLI to restore them.
87   if (Val > 0 && Res.size() > 2) {
88     assert(ActiveFeatures[RISCV::Feature64Bit] &&
89            "Expected RV32 to only need 2 instructions");
90     unsigned LeadingZeros = countLeadingZeros((uint64_t)Val);
91     uint64_t ShiftedVal = (uint64_t)Val << LeadingZeros;
92     // Fill in the bits that will be shifted out with 1s. An example where this
93     // helps is trailing one masks with 32 or more ones. This will generate
94     // ADDI -1 and an SRLI.
95     ShiftedVal |= maskTrailingOnes<uint64_t>(LeadingZeros);
96 
97     RISCVMatInt::InstSeq TmpSeq;
98     generateInstSeqImpl(ShiftedVal, ActiveFeatures, TmpSeq);
99     TmpSeq.push_back(RISCVMatInt::Inst(RISCV::SRLI, LeadingZeros));
100 
101     // Keep the new sequence if it is an improvement.
102     if (TmpSeq.size() < Res.size()) {
103       Res = TmpSeq;
104       // A 2 instruction sequence is the best we can do.
105       if (Res.size() <= 2)
106         return Res;
107     }
108 
109     // Some cases can benefit from filling the lower bits with zeros instead.
110     ShiftedVal &= maskTrailingZeros<uint64_t>(LeadingZeros);
111     TmpSeq.clear();
112     generateInstSeqImpl(ShiftedVal, ActiveFeatures, TmpSeq);
113     TmpSeq.push_back(RISCVMatInt::Inst(RISCV::SRLI, LeadingZeros));
114 
115     // Keep the new sequence if it is an improvement.
116     if (TmpSeq.size() < Res.size()) {
117       Res = TmpSeq;
118       // A 2 instruction sequence is the best we can do.
119       if (Res.size() <= 2)
120         return Res;
121     }
122 
123     // If we have exactly 32 leading zeros and Zba, we can try using zext.w at
124     // the end of the sequence.
125     if (LeadingZeros == 32 && ActiveFeatures[RISCV::FeatureExtZba]) {
126       // Try replacing upper bits with 1.
127       uint64_t LeadingOnesVal = Val | maskLeadingOnes<uint64_t>(LeadingZeros);
128       TmpSeq.clear();
129       generateInstSeqImpl(LeadingOnesVal, ActiveFeatures, TmpSeq);
130       TmpSeq.push_back(RISCVMatInt::Inst(RISCV::ADDUW, 0));
131 
132       // Keep the new sequence if it is an improvement.
133       if (TmpSeq.size() < Res.size()) {
134         Res = TmpSeq;
135         // A 2 instruction sequence is the best we can do.
136         if (Res.size() <= 2)
137           return Res;
138       }
139     }
140   }
141 
142   return Res;
143 }
144 
145 int getIntMatCost(const APInt &Val, unsigned Size,
146                   const FeatureBitset &ActiveFeatures) {
147   bool IsRV64 = ActiveFeatures[RISCV::Feature64Bit];
148   int PlatRegSize = IsRV64 ? 64 : 32;
149 
150   // Split the constant into platform register sized chunks, and calculate cost
151   // of each chunk.
152   int Cost = 0;
153   for (unsigned ShiftVal = 0; ShiftVal < Size; ShiftVal += PlatRegSize) {
154     APInt Chunk = Val.ashr(ShiftVal).sextOrTrunc(PlatRegSize);
155     InstSeq MatSeq = generateInstSeq(Chunk.getSExtValue(), ActiveFeatures);
156     Cost += MatSeq.size();
157   }
158   return std::max(1, Cost);
159 }
160 } // namespace RISCVMatInt
161 } // namespace llvm
162