xref: /llvm-project/llvm/lib/TargetParser/RISCVTargetParser.cpp (revision 4d6d56315d4ea2ae2b8059b99e45bdfee764861a)
1 //===-- RISCVTargetParser.cpp - Parser for target features ------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements a target parser to recognise hardware features
10 // for RISC-V CPUs.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/TargetParser/RISCVTargetParser.h"
15 #include "llvm/ADT/SmallVector.h"
16 #include "llvm/ADT/StringSwitch.h"
17 #include "llvm/TargetParser/RISCVISAInfo.h"
18 
19 namespace llvm {
20 namespace RISCV {
21 
22 enum CPUKind : unsigned {
23 #define PROC(ENUM, NAME, DEFAULT_MARCH, FAST_SCALAR_UNALIGN,                   \
24              FAST_VECTOR_UNALIGN)                                              \
25   CK_##ENUM,
26 #define TUNE_PROC(ENUM, NAME) CK_##ENUM,
27 #include "llvm/TargetParser/RISCVTargetParserDef.inc"
28 };
29 
30 struct CPUInfo {
31   StringLiteral Name;
32   StringLiteral DefaultMarch;
33   bool FastScalarUnalignedAccess;
34   bool FastVectorUnalignedAccess;
35   bool is64Bit() const { return DefaultMarch.starts_with("rv64"); }
36 };
37 
38 constexpr CPUInfo RISCVCPUInfo[] = {
39 #define PROC(ENUM, NAME, DEFAULT_MARCH, FAST_SCALAR_UNALIGN,                   \
40              FAST_VECTOR_UNALIGN)                                              \
41   {NAME, DEFAULT_MARCH, FAST_SCALAR_UNALIGN, FAST_VECTOR_UNALIGN},
42 #include "llvm/TargetParser/RISCVTargetParserDef.inc"
43 };
44 
45 static const CPUInfo *getCPUInfoByName(StringRef CPU) {
46   for (auto &C : RISCVCPUInfo)
47     if (C.Name == CPU)
48       return &C;
49   return nullptr;
50 }
51 
52 bool hasFastScalarUnalignedAccess(StringRef CPU) {
53   const CPUInfo *Info = getCPUInfoByName(CPU);
54   return Info && Info->FastScalarUnalignedAccess;
55 }
56 
57 bool hasFastVectorUnalignedAccess(StringRef CPU) {
58   const CPUInfo *Info = getCPUInfoByName(CPU);
59   return Info && Info->FastVectorUnalignedAccess;
60 }
61 
62 bool parseCPU(StringRef CPU, bool IsRV64) {
63   const CPUInfo *Info = getCPUInfoByName(CPU);
64 
65   if (!Info)
66     return false;
67   return Info->is64Bit() == IsRV64;
68 }
69 
70 bool parseTuneCPU(StringRef TuneCPU, bool IsRV64) {
71   std::optional<CPUKind> Kind =
72       llvm::StringSwitch<std::optional<CPUKind>>(TuneCPU)
73 #define TUNE_PROC(ENUM, NAME) .Case(NAME, CK_##ENUM)
74   #include "llvm/TargetParser/RISCVTargetParserDef.inc"
75       .Default(std::nullopt);
76 
77   if (Kind.has_value())
78     return true;
79 
80   // Fallback to parsing as a CPU.
81   return parseCPU(TuneCPU, IsRV64);
82 }
83 
84 StringRef getMArchFromMcpu(StringRef CPU) {
85   const CPUInfo *Info = getCPUInfoByName(CPU);
86   if (!Info)
87     return "";
88   return Info->DefaultMarch;
89 }
90 
91 void fillValidCPUArchList(SmallVectorImpl<StringRef> &Values, bool IsRV64) {
92   for (const auto &C : RISCVCPUInfo) {
93     if (IsRV64 == C.is64Bit())
94       Values.emplace_back(C.Name);
95   }
96 }
97 
98 void fillValidTuneCPUArchList(SmallVectorImpl<StringRef> &Values, bool IsRV64) {
99   for (const auto &C : RISCVCPUInfo) {
100     if (IsRV64 == C.is64Bit())
101       Values.emplace_back(C.Name);
102   }
103 #define TUNE_PROC(ENUM, NAME) Values.emplace_back(StringRef(NAME));
104 #include "llvm/TargetParser/RISCVTargetParserDef.inc"
105 }
106 
107 // This function is currently used by IREE, so it's not dead code.
108 void getFeaturesForCPU(StringRef CPU,
109                        SmallVectorImpl<std::string> &EnabledFeatures,
110                        bool NeedPlus) {
111   StringRef MarchFromCPU = llvm::RISCV::getMArchFromMcpu(CPU);
112   if (MarchFromCPU == "")
113     return;
114 
115   EnabledFeatures.clear();
116   auto RII = RISCVISAInfo::parseArchString(
117       MarchFromCPU, /* EnableExperimentalExtension */ true);
118 
119   if (llvm::errorToBool(RII.takeError()))
120     return;
121 
122   std::vector<std::string> FeatStrings =
123       (*RII)->toFeatures(/* AddAllExtensions */ false);
124   for (const auto &F : FeatStrings)
125     if (NeedPlus)
126       EnabledFeatures.push_back(F);
127     else
128       EnabledFeatures.push_back(F.substr(1));
129 }
130 
131 namespace RISCVExtensionBitmaskTable {
132 #define GET_RISCVExtensionBitmaskTable_IMPL
133 #include "llvm/TargetParser/RISCVTargetParserDef.inc"
134 
135 } // namespace RISCVExtensionBitmaskTable
136 
137 namespace {
138 struct LessExtName {
139   bool operator()(const RISCVExtensionBitmaskTable::RISCVExtensionBitmask &LHS,
140                   StringRef RHS) {
141     return StringRef(LHS.Name) < RHS;
142   }
143 };
144 } // namespace
145 
146 } // namespace RISCV
147 
148 namespace RISCVVType {
149 // Encode VTYPE into the binary format used by the the VSETVLI instruction which
150 // is used by our MC layer representation.
151 //
152 // Bits | Name       | Description
153 // -----+------------+------------------------------------------------
154 // 7    | vma        | Vector mask agnostic
155 // 6    | vta        | Vector tail agnostic
156 // 5:3  | vsew[2:0]  | Standard element width (SEW) setting
157 // 2:0  | vlmul[2:0] | Vector register group multiplier (LMUL) setting
158 unsigned encodeVTYPE(RISCVII::VLMUL VLMUL, unsigned SEW, bool TailAgnostic,
159                      bool MaskAgnostic) {
160   assert(isValidSEW(SEW) && "Invalid SEW");
161   unsigned VLMULBits = static_cast<unsigned>(VLMUL);
162   unsigned VSEWBits = encodeSEW(SEW);
163   unsigned VTypeI = (VSEWBits << 3) | (VLMULBits & 0x7);
164   if (TailAgnostic)
165     VTypeI |= 0x40;
166   if (MaskAgnostic)
167     VTypeI |= 0x80;
168 
169   return VTypeI;
170 }
171 
172 std::pair<unsigned, bool> decodeVLMUL(RISCVII::VLMUL VLMUL) {
173   switch (VLMUL) {
174   default:
175     llvm_unreachable("Unexpected LMUL value!");
176   case RISCVII::VLMUL::LMUL_1:
177   case RISCVII::VLMUL::LMUL_2:
178   case RISCVII::VLMUL::LMUL_4:
179   case RISCVII::VLMUL::LMUL_8:
180     return std::make_pair(1 << static_cast<unsigned>(VLMUL), false);
181   case RISCVII::VLMUL::LMUL_F2:
182   case RISCVII::VLMUL::LMUL_F4:
183   case RISCVII::VLMUL::LMUL_F8:
184     return std::make_pair(1 << (8 - static_cast<unsigned>(VLMUL)), true);
185   }
186 }
187 
188 void printVType(unsigned VType, raw_ostream &OS) {
189   unsigned Sew = getSEW(VType);
190   OS << "e" << Sew;
191 
192   unsigned LMul;
193   bool Fractional;
194   std::tie(LMul, Fractional) = decodeVLMUL(getVLMUL(VType));
195 
196   if (Fractional)
197     OS << ", mf";
198   else
199     OS << ", m";
200   OS << LMul;
201 
202   if (isTailAgnostic(VType))
203     OS << ", ta";
204   else
205     OS << ", tu";
206 
207   if (isMaskAgnostic(VType))
208     OS << ", ma";
209   else
210     OS << ", mu";
211 }
212 
213 unsigned getSEWLMULRatio(unsigned SEW, RISCVII::VLMUL VLMul) {
214   unsigned LMul;
215   bool Fractional;
216   std::tie(LMul, Fractional) = decodeVLMUL(VLMul);
217 
218   // Convert LMul to a fixed point value with 3 fractional bits.
219   LMul = Fractional ? (8 / LMul) : (LMul * 8);
220 
221   assert(SEW >= 8 && "Unexpected SEW value");
222   return (SEW * 8) / LMul;
223 }
224 
225 std::optional<RISCVII::VLMUL>
226 getSameRatioLMUL(unsigned SEW, RISCVII::VLMUL VLMUL, unsigned EEW) {
227   unsigned Ratio = RISCVVType::getSEWLMULRatio(SEW, VLMUL);
228   unsigned EMULFixedPoint = (EEW * 8) / Ratio;
229   bool Fractional = EMULFixedPoint < 8;
230   unsigned EMUL = Fractional ? 8 / EMULFixedPoint : EMULFixedPoint / 8;
231   if (!isValidLMUL(EMUL, Fractional))
232     return std::nullopt;
233   return RISCVVType::encodeLMUL(EMUL, Fractional);
234 }
235 
236 } // namespace RISCVVType
237 
238 } // namespace llvm
239