xref: /netbsd-src/external/apache2/llvm/dist/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 //===- RISCVMatInt.cpp - Immediate materialisation -------------*- C++ -*--===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "RISCVMatInt.h"
10 #include "MCTargetDesc/RISCVMCTargetDesc.h"
11 #include "llvm/ADT/APInt.h"
12 #include "llvm/Support/MathExtras.h"
13 using namespace llvm;
14 
15 // Recursively generate a sequence for materializing an integer.
generateInstSeqImpl(int64_t Val,bool IsRV64,RISCVMatInt::InstSeq & Res)16 static void generateInstSeqImpl(int64_t Val, bool IsRV64,
17                                 RISCVMatInt::InstSeq &Res) {
18   if (isInt<32>(Val)) {
19     // Depending on the active bits in the immediate Value v, the following
20     // instruction sequences are emitted:
21     //
22     // v == 0                        : ADDI
23     // v[0,12) != 0 && v[12,32) == 0 : ADDI
24     // v[0,12) == 0 && v[12,32) != 0 : LUI
25     // v[0,32) != 0                  : LUI+ADDI(W)
26     int64_t Hi20 = ((Val + 0x800) >> 12) & 0xFFFFF;
27     int64_t Lo12 = SignExtend64<12>(Val);
28 
29     if (Hi20)
30       Res.push_back(RISCVMatInt::Inst(RISCV::LUI, Hi20));
31 
32     if (Lo12 || Hi20 == 0) {
33       unsigned AddiOpc = (IsRV64 && Hi20) ? RISCV::ADDIW : RISCV::ADDI;
34       Res.push_back(RISCVMatInt::Inst(AddiOpc, Lo12));
35     }
36     return;
37   }
38 
39   assert(IsRV64 && "Can't emit >32-bit imm for non-RV64 target");
40 
41   // In the worst case, for a full 64-bit constant, a sequence of 8 instructions
42   // (i.e., LUI+ADDIW+SLLI+ADDI+SLLI+ADDI+SLLI+ADDI) has to be emmitted. Note
43   // that the first two instructions (LUI+ADDIW) can contribute up to 32 bits
44   // while the following ADDI instructions contribute up to 12 bits each.
45   //
46   // On the first glance, implementing this seems to be possible by simply
47   // emitting the most significant 32 bits (LUI+ADDIW) followed by as many left
48   // shift (SLLI) and immediate additions (ADDI) as needed. However, due to the
49   // fact that ADDI performs a sign extended addition, doing it like that would
50   // only be possible when at most 11 bits of the ADDI instructions are used.
51   // Using all 12 bits of the ADDI instructions, like done by GAS, actually
52   // requires that the constant is processed starting with the least significant
53   // bit.
54   //
55   // In the following, constants are processed from LSB to MSB but instruction
56   // emission is performed from MSB to LSB by recursively calling
57   // generateInstSeq. In each recursion, first the lowest 12 bits are removed
58   // from the constant and the optimal shift amount, which can be greater than
59   // 12 bits if the constant is sparse, is determined. Then, the shifted
60   // remaining constant is processed recursively and gets emitted as soon as it
61   // fits into 32 bits. The emission of the shifts and additions is subsequently
62   // performed when the recursion returns.
63 
64   int64_t Lo12 = SignExtend64<12>(Val);
65   int64_t Hi52 = ((uint64_t)Val + 0x800ull) >> 12;
66   int ShiftAmount = 12 + findFirstSet((uint64_t)Hi52);
67   Hi52 = SignExtend64(Hi52 >> (ShiftAmount - 12), 64 - ShiftAmount);
68 
69   generateInstSeqImpl(Hi52, IsRV64, Res);
70 
71   Res.push_back(RISCVMatInt::Inst(RISCV::SLLI, ShiftAmount));
72   if (Lo12)
73     Res.push_back(RISCVMatInt::Inst(RISCV::ADDI, Lo12));
74 }
75 
76 namespace llvm {
77 namespace RISCVMatInt {
generateInstSeq(int64_t Val,bool IsRV64)78 InstSeq generateInstSeq(int64_t Val, bool IsRV64) {
79   RISCVMatInt::InstSeq Res;
80   generateInstSeqImpl(Val, IsRV64, Res);
81 
82   // If the constant is positive we might be able to generate a shifted constant
83   // with no leading zeros and use a final SRLI to restore them.
84   if (Val > 0 && Res.size() > 2) {
85     assert(IsRV64 && "Expected RV32 to only need 2 instructions");
86     unsigned ShiftAmount = countLeadingZeros((uint64_t)Val);
87     Val <<= ShiftAmount;
88     // Fill in the bits that will be shifted out with 1s. An example where this
89     // helps is trailing one masks with 32 or more ones. This will generate
90     // ADDI -1 and an SRLI.
91     Val |= maskTrailingOnes<uint64_t>(ShiftAmount);
92 
93     RISCVMatInt::InstSeq TmpSeq;
94     generateInstSeqImpl(Val, IsRV64, TmpSeq);
95     TmpSeq.push_back(RISCVMatInt::Inst(RISCV::SRLI, ShiftAmount));
96 
97     // Keep the new sequence if it is an improvement.
98     if (TmpSeq.size() < Res.size())
99       Res = TmpSeq;
100 
101     // Some cases can benefit from filling the lower bits with zeros instead.
102     Val &= maskTrailingZeros<uint64_t>(ShiftAmount);
103     TmpSeq.clear();
104     generateInstSeqImpl(Val, IsRV64, TmpSeq);
105     TmpSeq.push_back(RISCVMatInt::Inst(RISCV::SRLI, ShiftAmount));
106 
107     // Keep the new sequence if it is an improvement.
108     if (TmpSeq.size() < Res.size())
109       Res = TmpSeq;
110   }
111 
112   return Res;
113 }
114 
getIntMatCost(const APInt & Val,unsigned Size,bool IsRV64)115 int getIntMatCost(const APInt &Val, unsigned Size, bool IsRV64) {
116   int PlatRegSize = IsRV64 ? 64 : 32;
117 
118   // Split the constant into platform register sized chunks, and calculate cost
119   // of each chunk.
120   int Cost = 0;
121   for (unsigned ShiftVal = 0; ShiftVal < Size; ShiftVal += PlatRegSize) {
122     APInt Chunk = Val.ashr(ShiftVal).sextOrTrunc(PlatRegSize);
123     InstSeq MatSeq = generateInstSeq(Chunk.getSExtValue(), IsRV64);
124     Cost += MatSeq.size();
125   }
126   return std::max(1, Cost);
127 }
128 } // namespace RISCVMatInt
129 } // namespace llvm
130