1//=- AArch64RegisterInfo.td - Describe the AArch64 Registers -*- tablegen -*-=// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// 10//===----------------------------------------------------------------------===// 11 12 13class AArch64Reg<bits<16> enc, string n, list<Register> subregs = [], 14 list<string> altNames = []> 15 : Register<n, altNames> { 16 let HWEncoding = enc; 17 let Namespace = "AArch64"; 18 let SubRegs = subregs; 19} 20 21let Namespace = "AArch64" in { 22 def sub_32 : SubRegIndex<32>; 23 24 def bsub : SubRegIndex<8>; 25 def hsub : SubRegIndex<16>; 26 def ssub : SubRegIndex<32>; 27 def dsub : SubRegIndex<64>; 28 def sube32 : SubRegIndex<32>; 29 def subo32 : SubRegIndex<32>; 30 def sube64 : SubRegIndex<64>; 31 def subo64 : SubRegIndex<64>; 32 // SVE 33 def zsub : SubRegIndex<128>; 34 // Note: Code depends on these having consecutive numbers 35 def dsub0 : SubRegIndex<64>; 36 def dsub1 : SubRegIndex<64>; 37 def dsub2 : SubRegIndex<64>; 38 def dsub3 : SubRegIndex<64>; 39 // Note: Code depends on these having consecutive numbers 40 def qsub0 : SubRegIndex<128>; 41 def qsub1 : SubRegIndex<128>; 42 def qsub2 : SubRegIndex<128>; 43 def qsub3 : SubRegIndex<128>; 44 // Note: Code depends on these having consecutive numbers 45 def zasubb : SubRegIndex<2048>; // (16 x 16)/1 bytes = 2048 bits 46 def zasubh0 : SubRegIndex<1024>; // (16 x 16)/2 bytes = 1024 bits 47 def zasubh1 : SubRegIndex<1024>; // (16 x 16)/2 bytes = 1024 bits 48 def zasubs0 : SubRegIndex<512>; // (16 x 16)/4 bytes = 512 bits 49 def zasubs1 : SubRegIndex<512>; // (16 x 16)/4 bytes = 512 bits 50 def zasubd0 : SubRegIndex<256>; // (16 x 16)/8 bytes = 256 bits 51 def zasubd1 : SubRegIndex<256>; // (16 x 16)/8 bytes = 256 bits 52 def zasubq0 : SubRegIndex<128>; // (16 x 16)/16 bytes = 128 bits 53 def zasubq1 : SubRegIndex<128>; // (16 x 16)/16 bytes = 128 bits 54 55 def psub : SubRegIndex<16>; 56} 57 58let Namespace = "AArch64" in { 59 def vreg : RegAltNameIndex; 60 def vlist1 : RegAltNameIndex; 61} 62 63//===----------------------------------------------------------------------===// 64// Registers 65//===----------------------------------------------------------------------===// 66def W0 : AArch64Reg<0, "w0" >, DwarfRegNum<[0]>; 67def W1 : AArch64Reg<1, "w1" >, DwarfRegNum<[1]>; 68def W2 : AArch64Reg<2, "w2" >, DwarfRegNum<[2]>; 69def W3 : AArch64Reg<3, "w3" >, DwarfRegNum<[3]>; 70def W4 : AArch64Reg<4, "w4" >, DwarfRegNum<[4]>; 71def W5 : AArch64Reg<5, "w5" >, DwarfRegNum<[5]>; 72def W6 : AArch64Reg<6, "w6" >, DwarfRegNum<[6]>; 73def W7 : AArch64Reg<7, "w7" >, DwarfRegNum<[7]>; 74def W8 : AArch64Reg<8, "w8" >, DwarfRegNum<[8]>; 75def W9 : AArch64Reg<9, "w9" >, DwarfRegNum<[9]>; 76def W10 : AArch64Reg<10, "w10">, DwarfRegNum<[10]>; 77def W11 : AArch64Reg<11, "w11">, DwarfRegNum<[11]>; 78def W12 : AArch64Reg<12, "w12">, DwarfRegNum<[12]>; 79def W13 : AArch64Reg<13, "w13">, DwarfRegNum<[13]>; 80def W14 : AArch64Reg<14, "w14">, DwarfRegNum<[14]>; 81def W15 : AArch64Reg<15, "w15">, DwarfRegNum<[15]>; 82def W16 : AArch64Reg<16, "w16">, DwarfRegNum<[16]>; 83def W17 : AArch64Reg<17, "w17">, DwarfRegNum<[17]>; 84def W18 : AArch64Reg<18, "w18">, DwarfRegNum<[18]>; 85def W19 : AArch64Reg<19, "w19">, DwarfRegNum<[19]>; 86def W20 : AArch64Reg<20, "w20">, DwarfRegNum<[20]>; 87def W21 : AArch64Reg<21, "w21">, DwarfRegNum<[21]>; 88def W22 : AArch64Reg<22, "w22">, DwarfRegNum<[22]>; 89def W23 : AArch64Reg<23, "w23">, DwarfRegNum<[23]>; 90def W24 : AArch64Reg<24, "w24">, DwarfRegNum<[24]>; 91def W25 : AArch64Reg<25, "w25">, DwarfRegNum<[25]>; 92def W26 : AArch64Reg<26, "w26">, DwarfRegNum<[26]>; 93def W27 : AArch64Reg<27, "w27">, DwarfRegNum<[27]>; 94def W28 : AArch64Reg<28, "w28">, DwarfRegNum<[28]>; 95def W29 : AArch64Reg<29, "w29">, DwarfRegNum<[29]>; 96def W30 : AArch64Reg<30, "w30">, DwarfRegNum<[30]>; 97def WSP : AArch64Reg<31, "wsp">, DwarfRegNum<[31]>; 98let isConstant = true in 99def WZR : AArch64Reg<31, "wzr">, DwarfRegAlias<WSP>; 100 101let SubRegIndices = [sub_32] in { 102def X0 : AArch64Reg<0, "x0", [W0]>, DwarfRegAlias<W0>; 103def X1 : AArch64Reg<1, "x1", [W1]>, DwarfRegAlias<W1>; 104def X2 : AArch64Reg<2, "x2", [W2]>, DwarfRegAlias<W2>; 105def X3 : AArch64Reg<3, "x3", [W3]>, DwarfRegAlias<W3>; 106def X4 : AArch64Reg<4, "x4", [W4]>, DwarfRegAlias<W4>; 107def X5 : AArch64Reg<5, "x5", [W5]>, DwarfRegAlias<W5>; 108def X6 : AArch64Reg<6, "x6", [W6]>, DwarfRegAlias<W6>; 109def X7 : AArch64Reg<7, "x7", [W7]>, DwarfRegAlias<W7>; 110def X8 : AArch64Reg<8, "x8", [W8]>, DwarfRegAlias<W8>; 111def X9 : AArch64Reg<9, "x9", [W9]>, DwarfRegAlias<W9>; 112def X10 : AArch64Reg<10, "x10", [W10]>, DwarfRegAlias<W10>; 113def X11 : AArch64Reg<11, "x11", [W11]>, DwarfRegAlias<W11>; 114def X12 : AArch64Reg<12, "x12", [W12]>, DwarfRegAlias<W12>; 115def X13 : AArch64Reg<13, "x13", [W13]>, DwarfRegAlias<W13>; 116def X14 : AArch64Reg<14, "x14", [W14]>, DwarfRegAlias<W14>; 117def X15 : AArch64Reg<15, "x15", [W15]>, DwarfRegAlias<W15>; 118def X16 : AArch64Reg<16, "x16", [W16]>, DwarfRegAlias<W16>; 119def X17 : AArch64Reg<17, "x17", [W17]>, DwarfRegAlias<W17>; 120def X18 : AArch64Reg<18, "x18", [W18]>, DwarfRegAlias<W18>; 121def X19 : AArch64Reg<19, "x19", [W19]>, DwarfRegAlias<W19>; 122def X20 : AArch64Reg<20, "x20", [W20]>, DwarfRegAlias<W20>; 123def X21 : AArch64Reg<21, "x21", [W21]>, DwarfRegAlias<W21>; 124def X22 : AArch64Reg<22, "x22", [W22]>, DwarfRegAlias<W22>; 125def X23 : AArch64Reg<23, "x23", [W23]>, DwarfRegAlias<W23>; 126def X24 : AArch64Reg<24, "x24", [W24]>, DwarfRegAlias<W24>; 127def X25 : AArch64Reg<25, "x25", [W25]>, DwarfRegAlias<W25>; 128def X26 : AArch64Reg<26, "x26", [W26]>, DwarfRegAlias<W26>; 129def X27 : AArch64Reg<27, "x27", [W27]>, DwarfRegAlias<W27>; 130def X28 : AArch64Reg<28, "x28", [W28]>, DwarfRegAlias<W28>; 131def FP : AArch64Reg<29, "x29", [W29]>, DwarfRegAlias<W29>; 132def LR : AArch64Reg<30, "x30", [W30]>, DwarfRegAlias<W30>; 133def SP : AArch64Reg<31, "sp", [WSP]>, DwarfRegAlias<WSP>; 134let isConstant = true in 135def XZR : AArch64Reg<31, "xzr", [WZR]>, DwarfRegAlias<WSP>; 136} 137 138// Condition code register. 139def NZCV : AArch64Reg<0, "nzcv">; 140 141// First fault status register 142def FFR : AArch64Reg<0, "ffr">, DwarfRegNum<[47]>; 143 144// Purely virtual Vector Granule (VG) Dwarf register 145def VG : AArch64Reg<0, "vg">, DwarfRegNum<[46]>; 146 147// Floating-point control register 148def FPCR : AArch64Reg<0, "fpcr">; 149 150// Floating-point status register. 151def FPSR : AArch64Reg<0, "fpsr">; 152 153// GPR register classes with the intersections of GPR32/GPR32sp and 154// GPR64/GPR64sp for use by the coalescer. 155def GPR32common : RegisterClass<"AArch64", [i32], 32, (sequence "W%u", 0, 30)> { 156 let AltOrders = [(rotl GPR32common, 8)]; 157 let AltOrderSelect = [{ return 1; }]; 158} 159def GPR64common : RegisterClass<"AArch64", [i64], 64, 160 (add (sequence "X%u", 0, 28), FP, LR)> { 161 let AltOrders = [(rotl GPR64common, 8)]; 162 let AltOrderSelect = [{ return 1; }]; 163 let DecoderMethod = "DecodeSimpleRegisterClass<AArch64::GPR64commonRegClassID, 0, 31>"; 164} 165// GPR register classes which exclude SP/WSP. 166def GPR32 : RegisterClass<"AArch64", [i32], 32, (add GPR32common, WZR)> { 167 let AltOrders = [(rotl GPR32, 8)]; 168 let AltOrderSelect = [{ return 1; }]; 169 let DecoderMethod = "DecodeSimpleRegisterClass<AArch64::GPR32RegClassID, 0, 32>"; 170} 171def GPR64 : RegisterClass<"AArch64", [i64], 64, (add GPR64common, XZR)> { 172 let AltOrders = [(rotl GPR64, 8)]; 173 let AltOrderSelect = [{ return 1; }]; 174 let DecoderMethod = "DecodeSimpleRegisterClass<AArch64::GPR64RegClassID, 0, 32>"; 175} 176 177// GPR register classes which include SP/WSP. 178def GPR32sp : RegisterClass<"AArch64", [i32], 32, (add GPR32common, WSP)> { 179 let AltOrders = [(rotl GPR32sp, 8)]; 180 let AltOrderSelect = [{ return 1; }]; 181 let DecoderMethod = "DecodeSimpleRegisterClass<AArch64::GPR32spRegClassID, 0, 32>"; 182} 183def GPR64sp : RegisterClass<"AArch64", [i64], 64, (add GPR64common, SP)> { 184 let AltOrders = [(rotl GPR64sp, 8)]; 185 let AltOrderSelect = [{ return 1; }]; 186 let DecoderMethod = "DecodeSimpleRegisterClass<AArch64::GPR64spRegClassID,0, 32>"; 187} 188 189def GPR32sponly : RegisterClass<"AArch64", [i32], 32, (add WSP)>; 190def GPR64sponly : RegisterClass<"AArch64", [i64], 64, (add SP)>; 191 192def GPR64spPlus0Operand : AsmOperandClass { 193 let Name = "GPR64sp0"; 194 let RenderMethod = "addRegOperands"; 195 let PredicateMethod = "isGPR64<AArch64::GPR64spRegClassID>"; 196 let ParserMethod = "tryParseGPR64sp0Operand"; 197} 198 199def GPR64sp0 : RegisterOperand<GPR64sp> { 200 let ParserMatchClass = GPR64spPlus0Operand; 201} 202 203// GPR32/GPR64 but with zero-register substitution enabled. 204// TODO: Roll this out to GPR32/GPR64/GPR32all/GPR64all. 205def GPR32z : RegisterOperand<GPR32> { 206 let GIZeroRegister = WZR; 207} 208def GPR64z : RegisterOperand<GPR64> { 209 let GIZeroRegister = XZR; 210} 211 212// GPR argument registers. 213def GPR32arg : RegisterClass<"AArch64", [i32], 32, (sequence "W%u", 0, 7)>; 214def GPR64arg : RegisterClass<"AArch64", [i64], 64, (sequence "X%u", 0, 7)>; 215 216// GPR register classes which include WZR/XZR AND SP/WSP. This is not a 217// constraint used by any instructions, it is used as a common super-class. 218def GPR32all : RegisterClass<"AArch64", [i32], 32, (add GPR32common, WZR, WSP)>; 219def GPR64all : RegisterClass<"AArch64", [i64], 64, (add GPR64common, XZR, SP)>; 220 221// For tail calls, we can't use callee-saved registers, as they are restored 222// to the saved value before the tail call, which would clobber a call address. 223// This is for indirect tail calls to store the address of the destination. 224def tcGPR64 : RegisterClass<"AArch64", [i64], 64, (sub GPR64common, X19, X20, X21, 225 X22, X23, X24, X25, X26, 226 X27, X28, FP, LR)>; 227 228// Restricted sets of tail call registers, for use when branch target 229// enforcement or PAuthLR are enabled. 230// For BTI, x16 and x17 are the only registers which can be used to indirectly 231// branch (not call) to the "BTI c" instruction at the start of a BTI-protected 232// function. 233// For PAuthLR, x16 must be used in the function epilogue for other purposes, 234// so cannot hold the function pointer. 235def tcGPRx17 : RegisterClass<"AArch64", [i64], 64, (add X17)>; 236def tcGPRx16x17 : RegisterClass<"AArch64", [i64], 64, (add X16, X17)>; 237def tcGPRnotx16 : RegisterClass<"AArch64", [i64], 64, (sub tcGPR64, X16)>; 238 239// Register set that excludes registers that are reserved for procedure calls. 240// This is used for pseudo-instructions that are actually implemented using a 241// procedure call. 242def GPR64noip : RegisterClass<"AArch64", [i64], 64, (sub GPR64, X16, X17, LR)> { 243 let AltOrders = [(rotl GPR64noip, 8)]; 244 let AltOrderSelect = [{ return 1; }]; 245} 246 247// GPR register classes for post increment amount of vector load/store that 248// has alternate printing when Rm=31 and prints a constant immediate value 249// equal to the total number of bytes transferred. 250 251// FIXME: TableGen *should* be able to do these itself now. There appears to be 252// a bug in counting how many operands a Post-indexed MCInst should have which 253// means the aliases don't trigger. 254def GPR64pi1 : RegisterOperand<GPR64, "printPostIncOperand<1>">; 255def GPR64pi2 : RegisterOperand<GPR64, "printPostIncOperand<2>">; 256def GPR64pi3 : RegisterOperand<GPR64, "printPostIncOperand<3>">; 257def GPR64pi4 : RegisterOperand<GPR64, "printPostIncOperand<4>">; 258def GPR64pi6 : RegisterOperand<GPR64, "printPostIncOperand<6>">; 259def GPR64pi8 : RegisterOperand<GPR64, "printPostIncOperand<8>">; 260def GPR64pi12 : RegisterOperand<GPR64, "printPostIncOperand<12>">; 261def GPR64pi16 : RegisterOperand<GPR64, "printPostIncOperand<16>">; 262def GPR64pi24 : RegisterOperand<GPR64, "printPostIncOperand<24>">; 263def GPR64pi32 : RegisterOperand<GPR64, "printPostIncOperand<32>">; 264def GPR64pi48 : RegisterOperand<GPR64, "printPostIncOperand<48>">; 265def GPR64pi64 : RegisterOperand<GPR64, "printPostIncOperand<64>">; 266 267// Condition code regclass. 268def CCR : RegisterClass<"AArch64", [i32], 32, (add NZCV)> { 269 let CopyCost = -1; // Don't allow copying of status registers. 270 271 // CCR is not allocatable. 272 let isAllocatable = 0; 273} 274 275//===----------------------------------------------------------------------===// 276// Floating Point Scalar Registers 277//===----------------------------------------------------------------------===// 278 279def B0 : AArch64Reg<0, "b0">, DwarfRegNum<[64]>; 280def B1 : AArch64Reg<1, "b1">, DwarfRegNum<[65]>; 281def B2 : AArch64Reg<2, "b2">, DwarfRegNum<[66]>; 282def B3 : AArch64Reg<3, "b3">, DwarfRegNum<[67]>; 283def B4 : AArch64Reg<4, "b4">, DwarfRegNum<[68]>; 284def B5 : AArch64Reg<5, "b5">, DwarfRegNum<[69]>; 285def B6 : AArch64Reg<6, "b6">, DwarfRegNum<[70]>; 286def B7 : AArch64Reg<7, "b7">, DwarfRegNum<[71]>; 287def B8 : AArch64Reg<8, "b8">, DwarfRegNum<[72]>; 288def B9 : AArch64Reg<9, "b9">, DwarfRegNum<[73]>; 289def B10 : AArch64Reg<10, "b10">, DwarfRegNum<[74]>; 290def B11 : AArch64Reg<11, "b11">, DwarfRegNum<[75]>; 291def B12 : AArch64Reg<12, "b12">, DwarfRegNum<[76]>; 292def B13 : AArch64Reg<13, "b13">, DwarfRegNum<[77]>; 293def B14 : AArch64Reg<14, "b14">, DwarfRegNum<[78]>; 294def B15 : AArch64Reg<15, "b15">, DwarfRegNum<[79]>; 295def B16 : AArch64Reg<16, "b16">, DwarfRegNum<[80]>; 296def B17 : AArch64Reg<17, "b17">, DwarfRegNum<[81]>; 297def B18 : AArch64Reg<18, "b18">, DwarfRegNum<[82]>; 298def B19 : AArch64Reg<19, "b19">, DwarfRegNum<[83]>; 299def B20 : AArch64Reg<20, "b20">, DwarfRegNum<[84]>; 300def B21 : AArch64Reg<21, "b21">, DwarfRegNum<[85]>; 301def B22 : AArch64Reg<22, "b22">, DwarfRegNum<[86]>; 302def B23 : AArch64Reg<23, "b23">, DwarfRegNum<[87]>; 303def B24 : AArch64Reg<24, "b24">, DwarfRegNum<[88]>; 304def B25 : AArch64Reg<25, "b25">, DwarfRegNum<[89]>; 305def B26 : AArch64Reg<26, "b26">, DwarfRegNum<[90]>; 306def B27 : AArch64Reg<27, "b27">, DwarfRegNum<[91]>; 307def B28 : AArch64Reg<28, "b28">, DwarfRegNum<[92]>; 308def B29 : AArch64Reg<29, "b29">, DwarfRegNum<[93]>; 309def B30 : AArch64Reg<30, "b30">, DwarfRegNum<[94]>; 310def B31 : AArch64Reg<31, "b31">, DwarfRegNum<[95]>; 311 312let SubRegIndices = [bsub] in { 313def H0 : AArch64Reg<0, "h0", [B0]>, DwarfRegAlias<B0>; 314def H1 : AArch64Reg<1, "h1", [B1]>, DwarfRegAlias<B1>; 315def H2 : AArch64Reg<2, "h2", [B2]>, DwarfRegAlias<B2>; 316def H3 : AArch64Reg<3, "h3", [B3]>, DwarfRegAlias<B3>; 317def H4 : AArch64Reg<4, "h4", [B4]>, DwarfRegAlias<B4>; 318def H5 : AArch64Reg<5, "h5", [B5]>, DwarfRegAlias<B5>; 319def H6 : AArch64Reg<6, "h6", [B6]>, DwarfRegAlias<B6>; 320def H7 : AArch64Reg<7, "h7", [B7]>, DwarfRegAlias<B7>; 321def H8 : AArch64Reg<8, "h8", [B8]>, DwarfRegAlias<B8>; 322def H9 : AArch64Reg<9, "h9", [B9]>, DwarfRegAlias<B9>; 323def H10 : AArch64Reg<10, "h10", [B10]>, DwarfRegAlias<B10>; 324def H11 : AArch64Reg<11, "h11", [B11]>, DwarfRegAlias<B11>; 325def H12 : AArch64Reg<12, "h12", [B12]>, DwarfRegAlias<B12>; 326def H13 : AArch64Reg<13, "h13", [B13]>, DwarfRegAlias<B13>; 327def H14 : AArch64Reg<14, "h14", [B14]>, DwarfRegAlias<B14>; 328def H15 : AArch64Reg<15, "h15", [B15]>, DwarfRegAlias<B15>; 329def H16 : AArch64Reg<16, "h16", [B16]>, DwarfRegAlias<B16>; 330def H17 : AArch64Reg<17, "h17", [B17]>, DwarfRegAlias<B17>; 331def H18 : AArch64Reg<18, "h18", [B18]>, DwarfRegAlias<B18>; 332def H19 : AArch64Reg<19, "h19", [B19]>, DwarfRegAlias<B19>; 333def H20 : AArch64Reg<20, "h20", [B20]>, DwarfRegAlias<B20>; 334def H21 : AArch64Reg<21, "h21", [B21]>, DwarfRegAlias<B21>; 335def H22 : AArch64Reg<22, "h22", [B22]>, DwarfRegAlias<B22>; 336def H23 : AArch64Reg<23, "h23", [B23]>, DwarfRegAlias<B23>; 337def H24 : AArch64Reg<24, "h24", [B24]>, DwarfRegAlias<B24>; 338def H25 : AArch64Reg<25, "h25", [B25]>, DwarfRegAlias<B25>; 339def H26 : AArch64Reg<26, "h26", [B26]>, DwarfRegAlias<B26>; 340def H27 : AArch64Reg<27, "h27", [B27]>, DwarfRegAlias<B27>; 341def H28 : AArch64Reg<28, "h28", [B28]>, DwarfRegAlias<B28>; 342def H29 : AArch64Reg<29, "h29", [B29]>, DwarfRegAlias<B29>; 343def H30 : AArch64Reg<30, "h30", [B30]>, DwarfRegAlias<B30>; 344def H31 : AArch64Reg<31, "h31", [B31]>, DwarfRegAlias<B31>; 345} 346 347let SubRegIndices = [hsub] in { 348def S0 : AArch64Reg<0, "s0", [H0]>, DwarfRegAlias<B0>; 349def S1 : AArch64Reg<1, "s1", [H1]>, DwarfRegAlias<B1>; 350def S2 : AArch64Reg<2, "s2", [H2]>, DwarfRegAlias<B2>; 351def S3 : AArch64Reg<3, "s3", [H3]>, DwarfRegAlias<B3>; 352def S4 : AArch64Reg<4, "s4", [H4]>, DwarfRegAlias<B4>; 353def S5 : AArch64Reg<5, "s5", [H5]>, DwarfRegAlias<B5>; 354def S6 : AArch64Reg<6, "s6", [H6]>, DwarfRegAlias<B6>; 355def S7 : AArch64Reg<7, "s7", [H7]>, DwarfRegAlias<B7>; 356def S8 : AArch64Reg<8, "s8", [H8]>, DwarfRegAlias<B8>; 357def S9 : AArch64Reg<9, "s9", [H9]>, DwarfRegAlias<B9>; 358def S10 : AArch64Reg<10, "s10", [H10]>, DwarfRegAlias<B10>; 359def S11 : AArch64Reg<11, "s11", [H11]>, DwarfRegAlias<B11>; 360def S12 : AArch64Reg<12, "s12", [H12]>, DwarfRegAlias<B12>; 361def S13 : AArch64Reg<13, "s13", [H13]>, DwarfRegAlias<B13>; 362def S14 : AArch64Reg<14, "s14", [H14]>, DwarfRegAlias<B14>; 363def S15 : AArch64Reg<15, "s15", [H15]>, DwarfRegAlias<B15>; 364def S16 : AArch64Reg<16, "s16", [H16]>, DwarfRegAlias<B16>; 365def S17 : AArch64Reg<17, "s17", [H17]>, DwarfRegAlias<B17>; 366def S18 : AArch64Reg<18, "s18", [H18]>, DwarfRegAlias<B18>; 367def S19 : AArch64Reg<19, "s19", [H19]>, DwarfRegAlias<B19>; 368def S20 : AArch64Reg<20, "s20", [H20]>, DwarfRegAlias<B20>; 369def S21 : AArch64Reg<21, "s21", [H21]>, DwarfRegAlias<B21>; 370def S22 : AArch64Reg<22, "s22", [H22]>, DwarfRegAlias<B22>; 371def S23 : AArch64Reg<23, "s23", [H23]>, DwarfRegAlias<B23>; 372def S24 : AArch64Reg<24, "s24", [H24]>, DwarfRegAlias<B24>; 373def S25 : AArch64Reg<25, "s25", [H25]>, DwarfRegAlias<B25>; 374def S26 : AArch64Reg<26, "s26", [H26]>, DwarfRegAlias<B26>; 375def S27 : AArch64Reg<27, "s27", [H27]>, DwarfRegAlias<B27>; 376def S28 : AArch64Reg<28, "s28", [H28]>, DwarfRegAlias<B28>; 377def S29 : AArch64Reg<29, "s29", [H29]>, DwarfRegAlias<B29>; 378def S30 : AArch64Reg<30, "s30", [H30]>, DwarfRegAlias<B30>; 379def S31 : AArch64Reg<31, "s31", [H31]>, DwarfRegAlias<B31>; 380} 381 382let SubRegIndices = [ssub], RegAltNameIndices = [vreg, vlist1] in { 383def D0 : AArch64Reg<0, "d0", [S0], ["v0", ""]>, DwarfRegAlias<B0>; 384def D1 : AArch64Reg<1, "d1", [S1], ["v1", ""]>, DwarfRegAlias<B1>; 385def D2 : AArch64Reg<2, "d2", [S2], ["v2", ""]>, DwarfRegAlias<B2>; 386def D3 : AArch64Reg<3, "d3", [S3], ["v3", ""]>, DwarfRegAlias<B3>; 387def D4 : AArch64Reg<4, "d4", [S4], ["v4", ""]>, DwarfRegAlias<B4>; 388def D5 : AArch64Reg<5, "d5", [S5], ["v5", ""]>, DwarfRegAlias<B5>; 389def D6 : AArch64Reg<6, "d6", [S6], ["v6", ""]>, DwarfRegAlias<B6>; 390def D7 : AArch64Reg<7, "d7", [S7], ["v7", ""]>, DwarfRegAlias<B7>; 391def D8 : AArch64Reg<8, "d8", [S8], ["v8", ""]>, DwarfRegAlias<B8>; 392def D9 : AArch64Reg<9, "d9", [S9], ["v9", ""]>, DwarfRegAlias<B9>; 393def D10 : AArch64Reg<10, "d10", [S10], ["v10", ""]>, DwarfRegAlias<B10>; 394def D11 : AArch64Reg<11, "d11", [S11], ["v11", ""]>, DwarfRegAlias<B11>; 395def D12 : AArch64Reg<12, "d12", [S12], ["v12", ""]>, DwarfRegAlias<B12>; 396def D13 : AArch64Reg<13, "d13", [S13], ["v13", ""]>, DwarfRegAlias<B13>; 397def D14 : AArch64Reg<14, "d14", [S14], ["v14", ""]>, DwarfRegAlias<B14>; 398def D15 : AArch64Reg<15, "d15", [S15], ["v15", ""]>, DwarfRegAlias<B15>; 399def D16 : AArch64Reg<16, "d16", [S16], ["v16", ""]>, DwarfRegAlias<B16>; 400def D17 : AArch64Reg<17, "d17", [S17], ["v17", ""]>, DwarfRegAlias<B17>; 401def D18 : AArch64Reg<18, "d18", [S18], ["v18", ""]>, DwarfRegAlias<B18>; 402def D19 : AArch64Reg<19, "d19", [S19], ["v19", ""]>, DwarfRegAlias<B19>; 403def D20 : AArch64Reg<20, "d20", [S20], ["v20", ""]>, DwarfRegAlias<B20>; 404def D21 : AArch64Reg<21, "d21", [S21], ["v21", ""]>, DwarfRegAlias<B21>; 405def D22 : AArch64Reg<22, "d22", [S22], ["v22", ""]>, DwarfRegAlias<B22>; 406def D23 : AArch64Reg<23, "d23", [S23], ["v23", ""]>, DwarfRegAlias<B23>; 407def D24 : AArch64Reg<24, "d24", [S24], ["v24", ""]>, DwarfRegAlias<B24>; 408def D25 : AArch64Reg<25, "d25", [S25], ["v25", ""]>, DwarfRegAlias<B25>; 409def D26 : AArch64Reg<26, "d26", [S26], ["v26", ""]>, DwarfRegAlias<B26>; 410def D27 : AArch64Reg<27, "d27", [S27], ["v27", ""]>, DwarfRegAlias<B27>; 411def D28 : AArch64Reg<28, "d28", [S28], ["v28", ""]>, DwarfRegAlias<B28>; 412def D29 : AArch64Reg<29, "d29", [S29], ["v29", ""]>, DwarfRegAlias<B29>; 413def D30 : AArch64Reg<30, "d30", [S30], ["v30", ""]>, DwarfRegAlias<B30>; 414def D31 : AArch64Reg<31, "d31", [S31], ["v31", ""]>, DwarfRegAlias<B31>; 415} 416 417let SubRegIndices = [dsub], RegAltNameIndices = [vreg, vlist1] in { 418def Q0 : AArch64Reg<0, "q0", [D0], ["v0", ""]>, DwarfRegAlias<B0>; 419def Q1 : AArch64Reg<1, "q1", [D1], ["v1", ""]>, DwarfRegAlias<B1>; 420def Q2 : AArch64Reg<2, "q2", [D2], ["v2", ""]>, DwarfRegAlias<B2>; 421def Q3 : AArch64Reg<3, "q3", [D3], ["v3", ""]>, DwarfRegAlias<B3>; 422def Q4 : AArch64Reg<4, "q4", [D4], ["v4", ""]>, DwarfRegAlias<B4>; 423def Q5 : AArch64Reg<5, "q5", [D5], ["v5", ""]>, DwarfRegAlias<B5>; 424def Q6 : AArch64Reg<6, "q6", [D6], ["v6", ""]>, DwarfRegAlias<B6>; 425def Q7 : AArch64Reg<7, "q7", [D7], ["v7", ""]>, DwarfRegAlias<B7>; 426def Q8 : AArch64Reg<8, "q8", [D8], ["v8", ""]>, DwarfRegAlias<B8>; 427def Q9 : AArch64Reg<9, "q9", [D9], ["v9", ""]>, DwarfRegAlias<B9>; 428def Q10 : AArch64Reg<10, "q10", [D10], ["v10", ""]>, DwarfRegAlias<B10>; 429def Q11 : AArch64Reg<11, "q11", [D11], ["v11", ""]>, DwarfRegAlias<B11>; 430def Q12 : AArch64Reg<12, "q12", [D12], ["v12", ""]>, DwarfRegAlias<B12>; 431def Q13 : AArch64Reg<13, "q13", [D13], ["v13", ""]>, DwarfRegAlias<B13>; 432def Q14 : AArch64Reg<14, "q14", [D14], ["v14", ""]>, DwarfRegAlias<B14>; 433def Q15 : AArch64Reg<15, "q15", [D15], ["v15", ""]>, DwarfRegAlias<B15>; 434def Q16 : AArch64Reg<16, "q16", [D16], ["v16", ""]>, DwarfRegAlias<B16>; 435def Q17 : AArch64Reg<17, "q17", [D17], ["v17", ""]>, DwarfRegAlias<B17>; 436def Q18 : AArch64Reg<18, "q18", [D18], ["v18", ""]>, DwarfRegAlias<B18>; 437def Q19 : AArch64Reg<19, "q19", [D19], ["v19", ""]>, DwarfRegAlias<B19>; 438def Q20 : AArch64Reg<20, "q20", [D20], ["v20", ""]>, DwarfRegAlias<B20>; 439def Q21 : AArch64Reg<21, "q21", [D21], ["v21", ""]>, DwarfRegAlias<B21>; 440def Q22 : AArch64Reg<22, "q22", [D22], ["v22", ""]>, DwarfRegAlias<B22>; 441def Q23 : AArch64Reg<23, "q23", [D23], ["v23", ""]>, DwarfRegAlias<B23>; 442def Q24 : AArch64Reg<24, "q24", [D24], ["v24", ""]>, DwarfRegAlias<B24>; 443def Q25 : AArch64Reg<25, "q25", [D25], ["v25", ""]>, DwarfRegAlias<B25>; 444def Q26 : AArch64Reg<26, "q26", [D26], ["v26", ""]>, DwarfRegAlias<B26>; 445def Q27 : AArch64Reg<27, "q27", [D27], ["v27", ""]>, DwarfRegAlias<B27>; 446def Q28 : AArch64Reg<28, "q28", [D28], ["v28", ""]>, DwarfRegAlias<B28>; 447def Q29 : AArch64Reg<29, "q29", [D29], ["v29", ""]>, DwarfRegAlias<B29>; 448def Q30 : AArch64Reg<30, "q30", [D30], ["v30", ""]>, DwarfRegAlias<B30>; 449def Q31 : AArch64Reg<31, "q31", [D31], ["v31", ""]>, DwarfRegAlias<B31>; 450} 451 452def FPR8 : RegisterClass<"AArch64", [i8], 8, (sequence "B%u", 0, 31)> { 453 let Size = 8; 454 let DecoderMethod = "DecodeSimpleRegisterClass<AArch64::FPR8RegClassID, 0, 32>"; 455} 456def FPR16 : RegisterClass<"AArch64", [f16, bf16, i16], 16, (sequence "H%u", 0, 31)> { 457 let Size = 16; 458 let DecoderMethod = "DecodeSimpleRegisterClass<AArch64::FPR16RegClassID, 0, 32>"; 459} 460 461def FPR16_lo : RegisterClass<"AArch64", [f16], 16, (trunc FPR16, 16)> { 462 let Size = 16; 463} 464def FPR32 : RegisterClass<"AArch64", [f32, i32], 32,(sequence "S%u", 0, 31)> { 465 let DecoderMethod = "DecodeSimpleRegisterClass<AArch64::FPR32RegClassID, 0, 32>"; 466} 467def FPR64 : RegisterClass<"AArch64", [f64, i64, v2f32, v1f64, v8i8, v4i16, v2i32, 468 v1i64, v4f16, v4bf16], 469 64, (sequence "D%u", 0, 31)> { 470 let DecoderMethod = "DecodeSimpleRegisterClass<AArch64::FPR64RegClassID, 0, 32>"; 471} 472def FPR64_lo : RegisterClass<"AArch64", 473 [v8i8, v4i16, v2i32, v1i64, v4f16, v4bf16, v2f32, 474 v1f64], 475 64, (trunc FPR64, 16)>; 476 477// We don't (yet) have an f128 legal type, so don't use that here. We 478// normalize 128-bit vectors to v2f64 for arg passing and such, so use 479// that here. 480def FPR128 : RegisterClass<"AArch64", 481 [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64, f128, 482 v8f16, v8bf16], 483 128, (sequence "Q%u", 0, 31)> { 484 let DecoderMethod = "DecodeSimpleRegisterClass<AArch64::FPR128RegClassID, 0, 32>"; 485} 486 487// The lower 16 vector registers. Some instructions can only take registers 488// in this range. 489def FPR128_lo : RegisterClass<"AArch64", 490 [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64, v8f16, 491 v8bf16], 492 128, (trunc FPR128, 16)> { 493 let DecoderMethod = "DecodeSimpleRegisterClass<AArch64::FPR128RegClassID, 0, 16>"; 494} 495 496// The lower 8 vector registers. Some instructions can only take registers 497// in this range. 498def FPR128_0to7 : RegisterClass<"AArch64", 499 [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64, v8f16, 500 v8bf16], 501 128, (trunc FPR128, 8)> { 502 let DecoderMethod = "DecodeSimpleRegisterClass<AArch64::FPR128RegClassID, 0, 8>"; 503} 504 505// Pairs, triples, and quads of 64-bit vector registers. 506def DSeqPairs : RegisterTuples<[dsub0, dsub1], [(rotl FPR64, 0), (rotl FPR64, 1)]>; 507def DSeqTriples : RegisterTuples<[dsub0, dsub1, dsub2], 508 [(rotl FPR64, 0), (rotl FPR64, 1), 509 (rotl FPR64, 2)]>; 510def DSeqQuads : RegisterTuples<[dsub0, dsub1, dsub2, dsub3], 511 [(rotl FPR64, 0), (rotl FPR64, 1), 512 (rotl FPR64, 2), (rotl FPR64, 3)]>; 513def DD : RegisterClass<"AArch64", [untyped], 64, (add DSeqPairs)> { 514 let Size = 128; 515 let DecoderMethod = "DecodeSimpleRegisterClass<AArch64::DDRegClassID, 0, 32>"; 516} 517def DDD : RegisterClass<"AArch64", [untyped], 64, (add DSeqTriples)> { 518 let Size = 192; 519 let DecoderMethod = "DecodeSimpleRegisterClass<AArch64::DDDRegClassID, 0, 32>"; 520} 521def DDDD : RegisterClass<"AArch64", [untyped], 64, (add DSeqQuads)> { 522 let Size = 256; 523 let DecoderMethod = "DecodeSimpleRegisterClass<AArch64::DDDDRegClassID, 0, 32>"; 524} 525 526// Pairs, triples, and quads of 128-bit vector registers. 527def QSeqPairs : RegisterTuples<[qsub0, qsub1], [(rotl FPR128, 0), (rotl FPR128, 1)]>; 528def QSeqTriples : RegisterTuples<[qsub0, qsub1, qsub2], 529 [(rotl FPR128, 0), (rotl FPR128, 1), 530 (rotl FPR128, 2)]>; 531def QSeqQuads : RegisterTuples<[qsub0, qsub1, qsub2, qsub3], 532 [(rotl FPR128, 0), (rotl FPR128, 1), 533 (rotl FPR128, 2), (rotl FPR128, 3)]>; 534def QQ : RegisterClass<"AArch64", [untyped], 128, (add QSeqPairs)> { 535 let Size = 256; 536 let DecoderMethod = "DecodeSimpleRegisterClass<AArch64::QQRegClassID, 0, 32>"; 537} 538def QQQ : RegisterClass<"AArch64", [untyped], 128, (add QSeqTriples)> { 539 let Size = 384; 540 let DecoderMethod = "DecodeSimpleRegisterClass<AArch64::QQQRegClassID, 0, 32>"; 541} 542def QQQQ : RegisterClass<"AArch64", [untyped], 128, (add QSeqQuads)> { 543 let Size = 512; 544 let DecoderMethod = "DecodeSimpleRegisterClass<AArch64::QQQQRegClassID, 0, 32>"; 545} 546 547 548// Vector operand versions of the FP registers. Alternate name printing and 549// assembler matching. 550def VectorReg64AsmOperand : AsmOperandClass { 551 let Name = "VectorReg64"; 552 let PredicateMethod = "isNeonVectorReg"; 553} 554def VectorReg128AsmOperand : AsmOperandClass { 555 let Name = "VectorReg128"; 556 let PredicateMethod = "isNeonVectorReg"; 557} 558 559def V64 : RegisterOperand<FPR64, "printVRegOperand"> { 560 let ParserMatchClass = VectorReg64AsmOperand; 561} 562 563def V128 : RegisterOperand<FPR128, "printVRegOperand"> { 564 let ParserMatchClass = VectorReg128AsmOperand; 565} 566 567def VectorRegLoAsmOperand : AsmOperandClass { 568 let Name = "VectorRegLo"; 569 let PredicateMethod = "isNeonVectorRegLo"; 570} 571def V64_lo : RegisterOperand<FPR64_lo, "printVRegOperand"> { 572 let ParserMatchClass = VectorRegLoAsmOperand; 573} 574def V128_lo : RegisterOperand<FPR128_lo, "printVRegOperand"> { 575 let ParserMatchClass = VectorRegLoAsmOperand; 576} 577 578def VectorReg0to7AsmOperand : AsmOperandClass { 579 let Name = "VectorReg0to7"; 580 let PredicateMethod = "isNeonVectorReg0to7"; 581} 582 583def V128_0to7 : RegisterOperand<FPR128_0to7, "printVRegOperand"> { 584 let ParserMatchClass = VectorReg0to7AsmOperand; 585} 586 587class TypedVecListAsmOperand<int count, string vecty, int lanes, int eltsize> 588 : AsmOperandClass { 589 let Name = "TypedVectorList" # count # "_" # lanes # eltsize; 590 591 let PredicateMethod 592 = "isTypedVectorList<RegKind::NeonVector, " # count # ", " # lanes # ", " # eltsize # ">"; 593 let RenderMethod = "addVectorListOperands<" # vecty # ", " # count # ">"; 594} 595 596class TypedVecListRegOperand<RegisterClass Reg, int lanes, string eltsize> 597 : RegisterOperand<Reg, "printTypedVectorList<" # lanes # ", '" 598 # eltsize # "'>">; 599 600multiclass VectorList<int count, RegisterClass Reg64, RegisterClass Reg128> { 601 // With implicit types (probably on instruction instead). E.g. { v0, v1 } 602 def _64AsmOperand : AsmOperandClass { 603 let Name = NAME # "64"; 604 let PredicateMethod = "isImplicitlyTypedVectorList<RegKind::NeonVector, " # count # ">"; 605 let RenderMethod = "addVectorListOperands<AArch64Operand::VecListIdx_DReg, " # count # ">"; 606 } 607 608 def "64" : RegisterOperand<Reg64, "printImplicitlyTypedVectorList"> { 609 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_64AsmOperand"); 610 } 611 612 def _128AsmOperand : AsmOperandClass { 613 let Name = NAME # "128"; 614 let PredicateMethod = "isImplicitlyTypedVectorList<RegKind::NeonVector, " # count # ">"; 615 let RenderMethod = "addVectorListOperands<AArch64Operand::VecListIdx_QReg, " # count # ">"; 616 } 617 618 def "128" : RegisterOperand<Reg128, "printImplicitlyTypedVectorList"> { 619 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_128AsmOperand"); 620 } 621 622 // 64-bit register lists with explicit type. 623 624 // { v0.8b, v1.8b } 625 def _8bAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_DReg", 8, 8>; 626 def "8b" : TypedVecListRegOperand<Reg64, 8, "b"> { 627 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_8bAsmOperand"); 628 } 629 630 // { v0.4h, v1.4h } 631 def _4hAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_DReg", 4, 16>; 632 def "4h" : TypedVecListRegOperand<Reg64, 4, "h"> { 633 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_4hAsmOperand"); 634 } 635 636 // { v0.2s, v1.2s } 637 def _2sAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_DReg", 2, 32>; 638 def "2s" : TypedVecListRegOperand<Reg64, 2, "s"> { 639 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_2sAsmOperand"); 640 } 641 642 // { v0.1d, v1.1d } 643 def _1dAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_DReg", 1, 64>; 644 def "1d" : TypedVecListRegOperand<Reg64, 1, "d"> { 645 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_1dAsmOperand"); 646 } 647 648 // 128-bit register lists with explicit type 649 650 // { v0.16b, v1.16b } 651 def _16bAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 16, 8>; 652 def "16b" : TypedVecListRegOperand<Reg128, 16, "b"> { 653 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_16bAsmOperand"); 654 } 655 656 // { v0.8h, v1.8h } 657 def _8hAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 8, 16>; 658 def "8h" : TypedVecListRegOperand<Reg128, 8, "h"> { 659 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_8hAsmOperand"); 660 } 661 662 // { v0.4s, v1.4s } 663 def _4sAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 4, 32>; 664 def "4s" : TypedVecListRegOperand<Reg128, 4, "s"> { 665 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_4sAsmOperand"); 666 } 667 668 // { v0.2d, v1.2d } 669 def _2dAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 2, 64>; 670 def "2d" : TypedVecListRegOperand<Reg128, 2, "d"> { 671 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_2dAsmOperand"); 672 } 673 674 // { v0.b, v1.b } 675 def _bAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 0, 8>; 676 def "b" : TypedVecListRegOperand<Reg128, 0, "b"> { 677 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_bAsmOperand"); 678 } 679 680 // { v0.h, v1.h } 681 def _hAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 0, 16>; 682 def "h" : TypedVecListRegOperand<Reg128, 0, "h"> { 683 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_hAsmOperand"); 684 } 685 686 // { v0.s, v1.s } 687 def _sAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 0, 32>; 688 def "s" : TypedVecListRegOperand<Reg128, 0, "s"> { 689 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_sAsmOperand"); 690 } 691 692 // { v0.d, v1.d } 693 def _dAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 0, 64>; 694 def "d" : TypedVecListRegOperand<Reg128, 0, "d"> { 695 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_dAsmOperand"); 696 } 697 698 699} 700 701defm VecListOne : VectorList<1, FPR64, FPR128>; 702defm VecListTwo : VectorList<2, DD, QQ>; 703defm VecListThree : VectorList<3, DDD, QQQ>; 704defm VecListFour : VectorList<4, DDDD, QQQQ>; 705 706class FPRAsmOperand<string RC> : AsmOperandClass { 707 let Name = "FPRAsmOperand" # RC; 708 let PredicateMethod = "isGPR64<AArch64::" # RC # "RegClassID>"; 709 let RenderMethod = "addRegOperands"; 710} 711 712// Register operand versions of the scalar FP registers. 713def FPR8Op : RegisterOperand<FPR8, "printOperand"> { 714 let ParserMatchClass = FPRAsmOperand<"FPR8">; 715} 716 717def FPR16Op : RegisterOperand<FPR16, "printOperand"> { 718 let ParserMatchClass = FPRAsmOperand<"FPR16">; 719} 720 721def FPR16Op_lo : RegisterOperand<FPR16_lo, "printOperand"> { 722 let ParserMatchClass = FPRAsmOperand<"FPR16_lo">; 723} 724 725def FPR32Op : RegisterOperand<FPR32, "printOperand"> { 726 let ParserMatchClass = FPRAsmOperand<"FPR32">; 727} 728 729def FPR64Op : RegisterOperand<FPR64, "printOperand"> { 730 let ParserMatchClass = FPRAsmOperand<"FPR64">; 731} 732 733def FPR128Op : RegisterOperand<FPR128, "printOperand"> { 734 let ParserMatchClass = FPRAsmOperand<"FPR128">; 735} 736 737//===----------------------------------------------------------------------===// 738// ARMv8.1a atomic CASP register operands 739 740 741def WSeqPairs : RegisterTuples<[sube32, subo32], 742 [(decimate (rotl GPR32, 0), 2), 743 (decimate (rotl GPR32, 1), 2)]>; 744def XSeqPairs : RegisterTuples<[sube64, subo64], 745 [(decimate (rotl GPR64, 0), 2), 746 (decimate (rotl GPR64, 1), 2)]>; 747 748def WSeqPairsClass : RegisterClass<"AArch64", [untyped], 32, 749 (add WSeqPairs)>{ 750 let Size = 64; 751} 752def XSeqPairsClass : RegisterClass<"AArch64", [untyped], 64, 753 (add XSeqPairs)>{ 754 let Size = 128; 755} 756 757 758let RenderMethod = "addRegOperands", ParserMethod="tryParseGPRSeqPair" in { 759 def WSeqPairsAsmOperandClass : AsmOperandClass { let Name = "WSeqPair"; } 760 def XSeqPairsAsmOperandClass : AsmOperandClass { let Name = "XSeqPair"; } 761} 762 763def WSeqPairClassOperand : 764 RegisterOperand<WSeqPairsClass, "printGPRSeqPairsClassOperand<32>"> { 765 let ParserMatchClass = WSeqPairsAsmOperandClass; 766} 767def XSeqPairClassOperand : 768 RegisterOperand<XSeqPairsClass, "printGPRSeqPairsClassOperand<64>"> { 769 let ParserMatchClass = XSeqPairsAsmOperandClass; 770} 771// Reuse the parsing and register numbers from XSeqPairs, but encoding is different. 772def MrrsMssrPairClassOperand : 773 RegisterOperand<XSeqPairsClass, "printGPRSeqPairsClassOperand<64>"> { 774 let ParserMatchClass = XSeqPairsAsmOperandClass; 775} 776def SyspXzrPairOperandMatcherClass : AsmOperandClass { 777 let Name = "SyspXzrPair"; 778 let RenderMethod = "addSyspXzrPairOperand"; 779 let ParserMethod = "tryParseSyspXzrPair"; 780} 781def SyspXzrPairOperand : 782 RegisterOperand<GPR64, "printSyspXzrPair"> { // needed to allow alias with XZR operand 783 let ParserMatchClass = SyspXzrPairOperandMatcherClass; 784} 785 786 787 788//===----- END: v8.1a atomic CASP register operands -----------------------===// 789 790//===----------------------------------------------------------------------===// 791// Armv8.7a accelerator extension register operands: 8 consecutive GPRs 792// starting with an even one 793 794let Namespace = "AArch64" in { 795 foreach i = 0-7 in 796 def "x8sub_"#i : SubRegIndex<64, !mul(64, i)>; 797} 798 799def Tuples8X : RegisterTuples< 800 !foreach(i, [0,1,2,3,4,5,6,7], !cast<SubRegIndex>("x8sub_"#i)), 801 !foreach(i, [0,1,2,3,4,5,6,7], (trunc (decimate (rotl GPR64, i), 2), 12))>; 802 803def GPR64x8Class : RegisterClass<"AArch64", [i64x8], 512, (trunc Tuples8X, 12)> { 804 let Size = 512; 805} 806def GPR64x8AsmOp : AsmOperandClass { 807 let Name = "GPR64x8"; 808 let ParserMethod = "tryParseGPR64x8"; 809 let RenderMethod = "addRegOperands"; 810} 811def GPR64x8 : RegisterOperand<GPR64x8Class, "printGPR64x8"> { 812 let ParserMatchClass = GPR64x8AsmOp; 813 let PrintMethod = "printGPR64x8"; 814} 815 816//===----- END: v8.7a accelerator extension register operands -------------===// 817 818// SVE predicate-as-counter registers 819 def PN0 : AArch64Reg<0, "pn0">, DwarfRegNum<[48]>; 820 def PN1 : AArch64Reg<1, "pn1">, DwarfRegNum<[49]>; 821 def PN2 : AArch64Reg<2, "pn2">, DwarfRegNum<[50]>; 822 def PN3 : AArch64Reg<3, "pn3">, DwarfRegNum<[51]>; 823 def PN4 : AArch64Reg<4, "pn4">, DwarfRegNum<[52]>; 824 def PN5 : AArch64Reg<5, "pn5">, DwarfRegNum<[53]>; 825 def PN6 : AArch64Reg<6, "pn6">, DwarfRegNum<[54]>; 826 def PN7 : AArch64Reg<7, "pn7">, DwarfRegNum<[55]>; 827 def PN8 : AArch64Reg<8, "pn8">, DwarfRegNum<[56]>; 828 def PN9 : AArch64Reg<9, "pn9">, DwarfRegNum<[57]>; 829 def PN10 : AArch64Reg<10, "pn10">, DwarfRegNum<[58]>; 830 def PN11 : AArch64Reg<11, "pn11">, DwarfRegNum<[59]>; 831 def PN12 : AArch64Reg<12, "pn12">, DwarfRegNum<[60]>; 832 def PN13 : AArch64Reg<13, "pn13">, DwarfRegNum<[61]>; 833 def PN14 : AArch64Reg<14, "pn14">, DwarfRegNum<[62]>; 834 def PN15 : AArch64Reg<15, "pn15">, DwarfRegNum<[63]>; 835 836// SVE predicate registers 837let SubRegIndices = [psub] in { 838 def P0 : AArch64Reg<0, "p0", [PN0]>, DwarfRegAlias<PN0>; 839 def P1 : AArch64Reg<1, "p1", [PN1]>, DwarfRegAlias<PN1>; 840 def P2 : AArch64Reg<2, "p2", [PN2]>, DwarfRegAlias<PN2>; 841 def P3 : AArch64Reg<3, "p3", [PN3]>, DwarfRegAlias<PN3>; 842 def P4 : AArch64Reg<4, "p4", [PN4]>, DwarfRegAlias<PN4>; 843 def P5 : AArch64Reg<5, "p5", [PN5]>, DwarfRegAlias<PN5>; 844 def P6 : AArch64Reg<6, "p6", [PN6]>, DwarfRegAlias<PN6>; 845 def P7 : AArch64Reg<7, "p7", [PN7]>, DwarfRegAlias<PN7>; 846 def P8 : AArch64Reg<8, "p8", [PN8]>, DwarfRegAlias<PN8>; 847 def P9 : AArch64Reg<9, "p9", [PN9]>, DwarfRegAlias<PN9>; 848 def P10 : AArch64Reg<10, "p10", [PN10]>, DwarfRegAlias<PN10>; 849 def P11 : AArch64Reg<11, "p11", [PN11]>, DwarfRegAlias<PN11>; 850 def P12 : AArch64Reg<12, "p12", [PN12]>, DwarfRegAlias<PN12>; 851 def P13 : AArch64Reg<13, "p13", [PN13]>, DwarfRegAlias<PN13>; 852 def P14 : AArch64Reg<14, "p14", [PN14]>, DwarfRegAlias<PN14>; 853 def P15 : AArch64Reg<15, "p15", [PN15]>, DwarfRegAlias<PN15>; 854} 855 856// SVE variable-size vector registers 857let SubRegIndices = [zsub] in { 858def Z0 : AArch64Reg<0, "z0", [Q0]>, DwarfRegNum<[96]>; 859def Z1 : AArch64Reg<1, "z1", [Q1]>, DwarfRegNum<[97]>; 860def Z2 : AArch64Reg<2, "z2", [Q2]>, DwarfRegNum<[98]>; 861def Z3 : AArch64Reg<3, "z3", [Q3]>, DwarfRegNum<[99]>; 862def Z4 : AArch64Reg<4, "z4", [Q4]>, DwarfRegNum<[100]>; 863def Z5 : AArch64Reg<5, "z5", [Q5]>, DwarfRegNum<[101]>; 864def Z6 : AArch64Reg<6, "z6", [Q6]>, DwarfRegNum<[102]>; 865def Z7 : AArch64Reg<7, "z7", [Q7]>, DwarfRegNum<[103]>; 866def Z8 : AArch64Reg<8, "z8", [Q8]>, DwarfRegNum<[104]>; 867def Z9 : AArch64Reg<9, "z9", [Q9]>, DwarfRegNum<[105]>; 868def Z10 : AArch64Reg<10, "z10", [Q10]>, DwarfRegNum<[106]>; 869def Z11 : AArch64Reg<11, "z11", [Q11]>, DwarfRegNum<[107]>; 870def Z12 : AArch64Reg<12, "z12", [Q12]>, DwarfRegNum<[108]>; 871def Z13 : AArch64Reg<13, "z13", [Q13]>, DwarfRegNum<[109]>; 872def Z14 : AArch64Reg<14, "z14", [Q14]>, DwarfRegNum<[110]>; 873def Z15 : AArch64Reg<15, "z15", [Q15]>, DwarfRegNum<[111]>; 874def Z16 : AArch64Reg<16, "z16", [Q16]>, DwarfRegNum<[112]>; 875def Z17 : AArch64Reg<17, "z17", [Q17]>, DwarfRegNum<[113]>; 876def Z18 : AArch64Reg<18, "z18", [Q18]>, DwarfRegNum<[114]>; 877def Z19 : AArch64Reg<19, "z19", [Q19]>, DwarfRegNum<[115]>; 878def Z20 : AArch64Reg<20, "z20", [Q20]>, DwarfRegNum<[116]>; 879def Z21 : AArch64Reg<21, "z21", [Q21]>, DwarfRegNum<[117]>; 880def Z22 : AArch64Reg<22, "z22", [Q22]>, DwarfRegNum<[118]>; 881def Z23 : AArch64Reg<23, "z23", [Q23]>, DwarfRegNum<[119]>; 882def Z24 : AArch64Reg<24, "z24", [Q24]>, DwarfRegNum<[120]>; 883def Z25 : AArch64Reg<25, "z25", [Q25]>, DwarfRegNum<[121]>; 884def Z26 : AArch64Reg<26, "z26", [Q26]>, DwarfRegNum<[122]>; 885def Z27 : AArch64Reg<27, "z27", [Q27]>, DwarfRegNum<[123]>; 886def Z28 : AArch64Reg<28, "z28", [Q28]>, DwarfRegNum<[124]>; 887def Z29 : AArch64Reg<29, "z29", [Q29]>, DwarfRegNum<[125]>; 888def Z30 : AArch64Reg<30, "z30", [Q30]>, DwarfRegNum<[126]>; 889def Z31 : AArch64Reg<31, "z31", [Q31]>, DwarfRegNum<[127]>; 890} 891 892// Enum describing the element size for destructive 893// operations. 894class ElementSizeEnum<bits<3> val> { 895 bits<3> Value = val; 896} 897 898def ElementSizeNone : ElementSizeEnum<0>; 899def ElementSizeB : ElementSizeEnum<1>; 900def ElementSizeH : ElementSizeEnum<2>; 901def ElementSizeS : ElementSizeEnum<3>; 902def ElementSizeD : ElementSizeEnum<4>; 903def ElementSizeQ : ElementSizeEnum<5>; // Unused 904 905class SVERegOp <string Suffix, AsmOperandClass C, 906 ElementSizeEnum Size, 907 RegisterClass RC> : RegisterOperand<RC> { 908 ElementSizeEnum ElementSize; 909 910 let ElementSize = Size; 911 let PrintMethod = !if(!eq(Suffix, ""), 912 "printSVERegOp<>", 913 "printSVERegOp<'" # Suffix # "'>"); 914 let ParserMatchClass = C; 915} 916 917class ZPRRegOp <string Suffix, AsmOperandClass C, ElementSizeEnum Size, 918 RegisterClass RC> : SVERegOp<Suffix, C, Size, RC> {} 919 920//****************************************************************************** 921 922// SVE predicate register classes. 923class PPRClass<int firstreg, int lastreg> : RegisterClass< 924 "AArch64", 925 [ nxv16i1, nxv8i1, nxv4i1, nxv2i1, nxv1i1 ], 16, 926 (sequence "P%u", firstreg, lastreg)> { 927 let Size = 16; 928} 929 930def PPR : PPRClass<0, 15> { 931 let DecoderMethod = "DecodeSimpleRegisterClass<AArch64::PPRRegClassID, 0, 16>"; 932} 933def PPR_3b : PPRClass<0, 7> { // Restricted 3 bit SVE predicate register class. 934 let DecoderMethod = "DecodeSimpleRegisterClass<AArch64::PPRRegClassID, 0, 8>"; 935} 936def PPR_p8to15 : PPRClass<8, 15> { 937 let DecoderMethod = "DecodeSimpleRegisterClass<AArch64::PNRRegClassID, 8, 8>"; 938} 939 940class PPRAsmOperand <string name, string RegClass, int Width>: AsmOperandClass { 941 let Name = "SVE" # name # "Reg"; 942 let PredicateMethod = "isSVEPredicateVectorRegOfWidth<" 943 # Width # ", " # "AArch64::" # RegClass # "RegClassID>"; 944 let DiagnosticType = "InvalidSVE" # name # "Reg"; 945 let RenderMethod = "addRegOperands"; 946 let ParserMethod = "tryParseSVEPredicateVector<RegKind::SVEPredicateVector>"; 947} 948 949def PPRAsmOpAny : PPRAsmOperand<"PredicateAny", "PPR", 0>; 950def PPRAsmOp8 : PPRAsmOperand<"PredicateB", "PPR", 8>; 951def PPRAsmOp16 : PPRAsmOperand<"PredicateH", "PPR", 16>; 952def PPRAsmOp32 : PPRAsmOperand<"PredicateS", "PPR", 32>; 953def PPRAsmOp64 : PPRAsmOperand<"PredicateD", "PPR", 64>; 954def PPRAsmOp3bAny : PPRAsmOperand<"Predicate3bAny", "PPR_3b", 0>; 955 956class PPRRegOp <string Suffix, AsmOperandClass C, ElementSizeEnum Size, 957 RegisterClass RC> : SVERegOp<Suffix, C, Size, RC> {} 958 959def PPRAny : PPRRegOp<"", PPRAsmOpAny, ElementSizeNone, PPR>; 960def PPR8 : PPRRegOp<"b", PPRAsmOp8, ElementSizeB, PPR>; 961def PPR16 : PPRRegOp<"h", PPRAsmOp16, ElementSizeH, PPR>; 962def PPR32 : PPRRegOp<"s", PPRAsmOp32, ElementSizeS, PPR>; 963def PPR64 : PPRRegOp<"d", PPRAsmOp64, ElementSizeD, PPR>; 964def PPR3bAny : PPRRegOp<"", PPRAsmOp3bAny, ElementSizeNone, PPR_3b>; 965 966class PNRClass<int firstreg, int lastreg> : RegisterClass< 967 "AArch64", 968 [ aarch64svcount ], 16, 969 (sequence "PN%u", firstreg, lastreg)> { 970 let Size = 16; 971} 972 973def PNR : PNRClass<0, 15> { 974 let DecoderMethod = "DecodeSimpleRegisterClass<AArch64::PNRRegClassID, 0, 16>"; 975} 976def PNR_3b : PNRClass<0, 7>; 977def PNR_p8to15 : PNRClass<8, 15>; 978 979// SVE predicate-as-counter operand 980class PNRAsmOperand<string name, string RegClass, int Width>: AsmOperandClass { 981 let Name = "SVE" # name # "Reg"; 982 let PredicateMethod = "isSVEPredicateAsCounterRegOfWidth<" 983 # Width # ", " # "AArch64::" 984 # RegClass # "RegClassID>"; 985 let DiagnosticType = "InvalidSVE" # name # "Reg"; 986 let RenderMethod = "addRegOperands"; 987 let ParserMethod = "tryParseSVEPredicateVector<RegKind::SVEPredicateAsCounter>"; 988} 989 990def PNRAsmOpAny: PNRAsmOperand<"PNPredicateAny", "PNR", 0>; 991def PNRAsmOp8 : PNRAsmOperand<"PNPredicateB", "PNR", 8>; 992def PNRAsmOp16 : PNRAsmOperand<"PNPredicateH", "PNR", 16>; 993def PNRAsmOp32 : PNRAsmOperand<"PNPredicateS", "PNR", 32>; 994def PNRAsmOp64 : PNRAsmOperand<"PNPredicateD", "PNR", 64>; 995 996class PNRRegOp<string Suffix, AsmOperandClass C, int Size, RegisterClass RC> 997 : SVERegOp<Suffix, C, ElementSizeNone, RC> { 998 let PrintMethod = "printPredicateAsCounter<" # Size # ">"; 999} 1000def PNRAny : PNRRegOp<"", PNRAsmOpAny, 0, PNR>; 1001def PNR8 : PNRRegOp<"b", PNRAsmOp8, 8, PNR>; 1002def PNR16 : PNRRegOp<"h", PNRAsmOp16, 16, PNR>; 1003def PNR32 : PNRRegOp<"s", PNRAsmOp32, 32, PNR>; 1004def PNR64 : PNRRegOp<"d", PNRAsmOp64, 64, PNR>; 1005 1006def PNRAsmAny_p8to15 : PNRAsmOperand<"PNPredicateAny_p8to15", "PNR_p8to15", 0>; 1007def PNRAsmOp8_p8to15 : PNRAsmOperand<"PNPredicateB_p8to15", "PNR_p8to15", 8>; 1008def PNRAsmOp16_p8to15 : PNRAsmOperand<"PNPredicateH_p8to15", "PNR_p8to15", 16>; 1009def PNRAsmOp32_p8to15 : PNRAsmOperand<"PNPredicateS_p8to15", "PNR_p8to15", 32>; 1010def PNRAsmOp64_p8to15 : PNRAsmOperand<"PNPredicateD_p8to15", "PNR_p8to15", 64>; 1011 1012class PNRP8to15RegOp<string Suffix, AsmOperandClass C, int Width, RegisterClass RC> 1013 : SVERegOp<Suffix, C, ElementSizeNone, RC> { 1014 let PrintMethod = "printPredicateAsCounter<" # Width # ">"; 1015 let EncoderMethod = "EncodePNR_p8to15"; 1016 let DecoderMethod = "DecodeSimpleRegisterClass<AArch64::PNRRegClassID, 8, 8>"; 1017} 1018 1019def PNRAny_p8to15 : PNRP8to15RegOp<"", PNRAsmAny_p8to15, 0, PNR_p8to15>; 1020def PNR8_p8to15 : PNRP8to15RegOp<"b", PNRAsmOp8_p8to15, 8, PNR_p8to15>; 1021def PNR16_p8to15 : PNRP8to15RegOp<"h", PNRAsmOp16_p8to15, 16, PNR_p8to15>; 1022def PNR32_p8to15 : PNRP8to15RegOp<"s", PNRAsmOp32_p8to15, 32, PNR_p8to15>; 1023def PNR64_p8to15 : PNRP8to15RegOp<"d", PNRAsmOp64_p8to15, 64, PNR_p8to15>; 1024 1025let Namespace = "AArch64" in { 1026 def psub0 : SubRegIndex<16, -1>; 1027 def psub1 : SubRegIndex<16, -1>; 1028} 1029 1030class PPRorPNRClass : RegisterClass< 1031 "AArch64", 1032 [ nxv16i1, nxv8i1, nxv4i1, nxv2i1, nxv1i1, aarch64svcount ], 16, 1033 (add PPR, PNR)> { 1034 let Size = 16; 1035} 1036 1037class PPRorPNRAsmOperand<string name, string RegClass, int Width>: AsmOperandClass { 1038 let Name = "SVE" # name # "Reg"; 1039 let PredicateMethod = "isSVEPredicateOrPredicateAsCounterRegOfWidth<" 1040 # Width # ", " # "AArch64::" 1041 # RegClass # "RegClassID>"; 1042 let DiagnosticType = "InvalidSVE" # name # "Reg"; 1043 let RenderMethod = "addPPRorPNRRegOperands"; 1044 let ParserMethod = "tryParseSVEPredicateOrPredicateAsCounterVector"; 1045} 1046 1047def PPRorPNR : PPRorPNRClass { 1048 let DecoderMethod = "DecodeSimpleRegisterClass<AArch64::PPRorPNRRegClassID, 0, 16>"; 1049} 1050def PPRorPNRAsmOp8 : PPRorPNRAsmOperand<"PPRorPNRB", "PPRorPNR", 8>; 1051def PPRorPNRAsmOpAny : PPRorPNRAsmOperand<"PPRorPNRAny", "PPRorPNR", 0>; 1052def PPRorPNRAny : PPRRegOp<"", PPRorPNRAsmOpAny, ElementSizeNone, PPRorPNR>; 1053def PPRorPNR8 : PPRRegOp<"b", PPRorPNRAsmOp8, ElementSizeB, PPRorPNR>; 1054 1055// Pairs of SVE predicate vector registers. 1056def PSeqPairs : RegisterTuples<[psub0, psub1], [(rotl PPR, 0), (rotl PPR, 1)]>; 1057 1058def PPR2 : RegisterClass<"AArch64", [untyped], 16, (add PSeqPairs)> { 1059 let Size = 32; 1060 let DecoderMethod = "DecodeSimpleRegisterClass<AArch64::PPR2RegClassID, 0, 16>"; 1061} 1062 1063class PPRVectorList<int ElementWidth, int NumRegs> : AsmOperandClass { 1064 let Name = "SVEPredicateList" # NumRegs # "x" # ElementWidth; 1065 let ParserMethod = "tryParseVectorList<RegKind::SVEPredicateVector>"; 1066 let PredicateMethod = "isTypedVectorList<RegKind::SVEPredicateVector, " 1067 # NumRegs #", 0, "#ElementWidth #">"; 1068 let RenderMethod = "addVectorListOperands<AArch64Operand::VecListIdx_PReg, " 1069 # NumRegs #">"; 1070} 1071 1072def PP_b : RegisterOperand<PPR2, "printTypedVectorList<0,'b'>"> { 1073 let ParserMatchClass = PPRVectorList<8, 2>; 1074} 1075 1076def PP_h : RegisterOperand<PPR2, "printTypedVectorList<0,'h'>"> { 1077 let ParserMatchClass = PPRVectorList<16, 2>; 1078} 1079 1080def PP_s : RegisterOperand<PPR2, "printTypedVectorList<0,'s'>"> { 1081 let ParserMatchClass = PPRVectorList<32, 2>; 1082} 1083 1084def PP_d : RegisterOperand<PPR2, "printTypedVectorList<0,'d'>"> { 1085 let ParserMatchClass = PPRVectorList<64, 2>; 1086} 1087 1088// SVE2 multiple-of-2 multi-predicate-vector operands 1089def PPR2Mul2 : RegisterClass<"AArch64", [untyped], 16, (add (decimate PSeqPairs, 2))> { 1090 let Size = 32; 1091} 1092 1093class PPRVectorListMul<int ElementWidth, int NumRegs> : PPRVectorList<ElementWidth, NumRegs> { 1094 let Name = "SVEPredicateListMul" # NumRegs # "x" # ElementWidth; 1095 let DiagnosticType = "Invalid" # Name; 1096 let PredicateMethod = 1097 "isTypedVectorListMultiple<RegKind::SVEPredicateVector, " # NumRegs # ", 0, " 1098 # ElementWidth # ">"; 1099} 1100 1101let EncoderMethod = "EncodeRegAsMultipleOf<2>", 1102 DecoderMethod = "DecodePPR2Mul2RegisterClass" in { 1103 def PP_b_mul_r : RegisterOperand<PPR2Mul2, "printTypedVectorList<0,'b'>"> { 1104 let ParserMatchClass = PPRVectorListMul<8, 2>; 1105 } 1106 1107 def PP_h_mul_r : RegisterOperand<PPR2Mul2, "printTypedVectorList<0,'h'>"> { 1108 let ParserMatchClass = PPRVectorListMul<16, 2>; 1109 } 1110 1111 def PP_s_mul_r : RegisterOperand<PPR2Mul2, "printTypedVectorList<0,'s'>"> { 1112 let ParserMatchClass = PPRVectorListMul<32, 2>; 1113 } 1114 1115 def PP_d_mul_r : RegisterOperand<PPR2Mul2, "printTypedVectorList<0,'d'>"> { 1116 let ParserMatchClass = PPRVectorListMul<64, 2>; 1117 } 1118} // end let EncoderMethod/DecoderMethod 1119 1120 1121//****************************************************************************** 1122 1123// SVE vector register classes 1124class ZPRClass<int lastreg> : RegisterClass<"AArch64", 1125 [nxv16i8, nxv8i16, nxv4i32, nxv2i64, 1126 nxv2f16, nxv4f16, nxv8f16, 1127 nxv2bf16, nxv4bf16, nxv8bf16, 1128 nxv2f32, nxv4f32, 1129 nxv2f64], 1130 128, (sequence "Z%u", 0, lastreg)> { 1131 let Size = 128; 1132} 1133 1134def ZPR : ZPRClass<31> { 1135 let DecoderMethod = "DecodeSimpleRegisterClass<AArch64::ZPRRegClassID, 0, 32>"; 1136} 1137def ZPR_4b : ZPRClass<15> { // Restricted 4 bit SVE vector register class. 1138 let DecoderMethod = "DecodeSimpleRegisterClass<AArch64::ZPRRegClassID, 0, 16>"; 1139} 1140def ZPR_3b : ZPRClass<7> { // Restricted 3 bit SVE vector register class. 1141 let DecoderMethod = "DecodeSimpleRegisterClass<AArch64::ZPRRegClassID, 0, 8>"; 1142} 1143 1144class ZPRAsmOperand<string name, int Width, string RegClassSuffix = ""> 1145 : AsmOperandClass { 1146 let Name = "SVE" # name # "Reg"; 1147 let PredicateMethod = "isSVEDataVectorRegOfWidth<" 1148 # Width # ", AArch64::ZPR" 1149 # RegClassSuffix # "RegClassID>"; 1150 let RenderMethod = "addRegOperands"; 1151 let DiagnosticType = "InvalidZPR" # RegClassSuffix # Width; 1152 let ParserMethod = "tryParseSVEDataVector<false, " 1153 # !if(!eq(Width, 0), "false", "true") # ">"; 1154} 1155 1156def ZPRAsmOpAny : ZPRAsmOperand<"VectorAny", 0>; 1157def ZPRAsmOp8 : ZPRAsmOperand<"VectorB", 8>; 1158def ZPRAsmOp16 : ZPRAsmOperand<"VectorH", 16>; 1159def ZPRAsmOp32 : ZPRAsmOperand<"VectorS", 32>; 1160def ZPRAsmOp64 : ZPRAsmOperand<"VectorD", 64>; 1161def ZPRAsmOp128 : ZPRAsmOperand<"VectorQ", 128>; 1162 1163def ZPRAny : ZPRRegOp<"", ZPRAsmOpAny, ElementSizeNone, ZPR>; 1164def ZPR8 : ZPRRegOp<"b", ZPRAsmOp8, ElementSizeB, ZPR>; 1165def ZPR16 : ZPRRegOp<"h", ZPRAsmOp16, ElementSizeH, ZPR>; 1166def ZPR32 : ZPRRegOp<"s", ZPRAsmOp32, ElementSizeS, ZPR>; 1167def ZPR64 : ZPRRegOp<"d", ZPRAsmOp64, ElementSizeD, ZPR>; 1168def ZPR128 : ZPRRegOp<"q", ZPRAsmOp128, ElementSizeQ, ZPR>; 1169 1170def ZPRAsmOp3b8 : ZPRAsmOperand<"Vector3bB", 8, "_3b">; 1171def ZPRAsmOp3b16 : ZPRAsmOperand<"Vector3bH", 16, "_3b">; 1172def ZPRAsmOp3b32 : ZPRAsmOperand<"Vector3bS", 32, "_3b">; 1173 1174def ZPR3b8 : ZPRRegOp<"b", ZPRAsmOp3b8, ElementSizeB, ZPR_3b>; 1175def ZPR3b16 : ZPRRegOp<"h", ZPRAsmOp3b16, ElementSizeH, ZPR_3b>; 1176def ZPR3b32 : ZPRRegOp<"s", ZPRAsmOp3b32, ElementSizeS, ZPR_3b>; 1177 1178def ZPRAsmOp4b8 : ZPRAsmOperand<"Vector4bB", 8, "_4b">; 1179def ZPRAsmOp4b16 : ZPRAsmOperand<"Vector4bH", 16, "_4b">; 1180def ZPRAsmOp4b32 : ZPRAsmOperand<"Vector4bS", 32, "_4b">; 1181def ZPRAsmOp4b64 : ZPRAsmOperand<"Vector4bD", 64, "_4b">; 1182 1183def ZPR4b8 : ZPRRegOp<"b", ZPRAsmOp4b8, ElementSizeB, ZPR_4b>; 1184def ZPR4b16 : ZPRRegOp<"h", ZPRAsmOp4b16, ElementSizeH, ZPR_4b>; 1185def ZPR4b32 : ZPRRegOp<"s", ZPRAsmOp4b32, ElementSizeS, ZPR_4b>; 1186def ZPR4b64 : ZPRRegOp<"d", ZPRAsmOp4b64, ElementSizeD, ZPR_4b>; 1187 1188class FPRasZPR<int Width> : AsmOperandClass{ 1189 let Name = "FPR" # Width # "asZPR"; 1190 let PredicateMethod = "isFPRasZPR<AArch64::FPR" # Width # "RegClassID>"; 1191 let RenderMethod = "addFPRasZPRRegOperands<" # Width # ">"; 1192} 1193 1194class FPRasZPROperand<int Width> : RegisterOperand<ZPR> { 1195 let ParserMatchClass = FPRasZPR<Width>; 1196 let PrintMethod = "printZPRasFPR<" # Width # ">"; 1197} 1198 1199def FPR8asZPR : FPRasZPROperand<8>; 1200def FPR16asZPR : FPRasZPROperand<16>; 1201def FPR32asZPR : FPRasZPROperand<32>; 1202def FPR64asZPR : FPRasZPROperand<64>; 1203def FPR128asZPR : FPRasZPROperand<128>; 1204 1205let Namespace = "AArch64" in { 1206 def zsub0 : SubRegIndex<128, -1>; 1207 def zsub1 : SubRegIndex<128, -1>; 1208 def zsub2 : SubRegIndex<128, -1>; 1209 def zsub3 : SubRegIndex<128, -1>; 1210} 1211 1212// Pairs, triples, and quads of SVE vector registers. 1213def ZSeqPairs : RegisterTuples<[zsub0, zsub1], [(rotl ZPR, 0), (rotl ZPR, 1)]>; 1214def ZSeqTriples : RegisterTuples<[zsub0, zsub1, zsub2], [(rotl ZPR, 0), (rotl ZPR, 1), (rotl ZPR, 2)]>; 1215def ZSeqQuads : RegisterTuples<[zsub0, zsub1, zsub2, zsub3], [(rotl ZPR, 0), (rotl ZPR, 1), (rotl ZPR, 2), (rotl ZPR, 3)]>; 1216 1217def ZPR2 : RegisterClass<"AArch64", [untyped], 128, (add ZSeqPairs)> { 1218 let Size = 256; 1219 let DecoderMethod = "DecodeSimpleRegisterClass<AArch64::ZPR2RegClassID, 0, 32>"; 1220} 1221def ZPR3 : RegisterClass<"AArch64", [untyped], 128, (add ZSeqTriples)> { 1222 let Size = 384; 1223 let DecoderMethod = "DecodeSimpleRegisterClass<AArch64::ZPR3RegClassID, 0, 32>"; 1224} 1225def ZPR4 : RegisterClass<"AArch64", [untyped], 128, (add ZSeqQuads)> { 1226 let Size = 512; 1227 let DecoderMethod = "DecodeSimpleRegisterClass<AArch64::ZPR4RegClassID, 0, 32>"; 1228} 1229 1230class ZPRVectorList<int ElementWidth, int NumRegs> : AsmOperandClass { 1231 let Name = "SVEVectorList" # NumRegs # ElementWidth; 1232 let ParserMethod = "tryParseVectorList<RegKind::SVEDataVector>"; 1233 let PredicateMethod = 1234 "isTypedVectorList<RegKind::SVEDataVector, " #NumRegs #", 0, " #ElementWidth #">"; 1235 let RenderMethod = "addVectorListOperands<AArch64Operand::VecListIdx_ZReg, " # NumRegs # ">"; 1236} 1237 1238def Z_b : RegisterOperand<ZPR, "printTypedVectorList<0,'b'>"> { 1239 let ParserMatchClass = ZPRVectorList<8, 1>; 1240} 1241 1242def Z_h : RegisterOperand<ZPR, "printTypedVectorList<0,'h'>"> { 1243 let ParserMatchClass = ZPRVectorList<16, 1>; 1244} 1245 1246def Z_s : RegisterOperand<ZPR, "printTypedVectorList<0,'s'>"> { 1247 let ParserMatchClass = ZPRVectorList<32, 1>; 1248} 1249 1250def Z_d : RegisterOperand<ZPR, "printTypedVectorList<0,'d'>"> { 1251 let ParserMatchClass = ZPRVectorList<64, 1>; 1252} 1253 1254def Z_q : RegisterOperand<ZPR, "printTypedVectorList<0,'q'>"> { 1255 let ParserMatchClass = ZPRVectorList<128, 1>; 1256} 1257 1258def ZZ_b : RegisterOperand<ZPR2, "printTypedVectorList<0,'b'>"> { 1259 let ParserMatchClass = ZPRVectorList<8, 2>; 1260} 1261 1262def ZZ_h : RegisterOperand<ZPR2, "printTypedVectorList<0,'h'>"> { 1263 let ParserMatchClass = ZPRVectorList<16, 2>; 1264} 1265 1266def ZZ_s : RegisterOperand<ZPR2, "printTypedVectorList<0,'s'>"> { 1267 let ParserMatchClass = ZPRVectorList<32, 2>; 1268} 1269 1270def ZZ_d : RegisterOperand<ZPR2, "printTypedVectorList<0,'d'>"> { 1271 let ParserMatchClass = ZPRVectorList<64, 2>; 1272} 1273 1274def ZZ_q : RegisterOperand<ZPR2, "printTypedVectorList<0,'q'>"> { 1275 let ParserMatchClass = ZPRVectorList<128, 2>; 1276} 1277 1278def ZZZ_b : RegisterOperand<ZPR3, "printTypedVectorList<0,'b'>"> { 1279 let ParserMatchClass = ZPRVectorList<8, 3>; 1280} 1281 1282def ZZZ_h : RegisterOperand<ZPR3, "printTypedVectorList<0,'h'>"> { 1283 let ParserMatchClass = ZPRVectorList<16, 3>; 1284} 1285 1286def ZZZ_s : RegisterOperand<ZPR3, "printTypedVectorList<0,'s'>"> { 1287 let ParserMatchClass = ZPRVectorList<32, 3>; 1288} 1289 1290def ZZZ_d : RegisterOperand<ZPR3, "printTypedVectorList<0,'d'>"> { 1291 let ParserMatchClass = ZPRVectorList<64, 3>; 1292} 1293 1294def ZZZ_q : RegisterOperand<ZPR3, "printTypedVectorList<0,'q'>"> { 1295 let ParserMatchClass = ZPRVectorList<128, 3>; 1296} 1297 1298def ZZZZ_b : RegisterOperand<ZPR4, "printTypedVectorList<0,'b'>"> { 1299 let ParserMatchClass = ZPRVectorList<8, 4>; 1300} 1301 1302def ZZZZ_h : RegisterOperand<ZPR4, "printTypedVectorList<0,'h'>"> { 1303 let ParserMatchClass = ZPRVectorList<16, 4>; 1304} 1305 1306def ZZZZ_s : RegisterOperand<ZPR4, "printTypedVectorList<0,'s'>"> { 1307 let ParserMatchClass = ZPRVectorList<32, 4>; 1308} 1309 1310def ZZZZ_d : RegisterOperand<ZPR4, "printTypedVectorList<0,'d'>"> { 1311 let ParserMatchClass = ZPRVectorList<64, 4>; 1312} 1313 1314def ZZZZ_q : RegisterOperand<ZPR4, "printTypedVectorList<0,'q'>"> { 1315 let ParserMatchClass = ZPRVectorList<128, 4>; 1316} 1317 1318// SME2 multiple-of-2 or 4 multi-vector operands 1319def ZPR2Mul2 : RegisterClass<"AArch64", [untyped], 128, (add (decimate ZSeqPairs, 2))> { 1320 let Size = 256; 1321} 1322 1323def ZPR4Mul4 : RegisterClass<"AArch64", [untyped], 128, (add (decimate ZSeqQuads, 4))> { 1324 let Size = 512; 1325} 1326 1327class ZPRVectorListMul<int ElementWidth, int NumRegs> : ZPRVectorList<ElementWidth, NumRegs> { 1328 let Name = "SVEVectorListMul" # NumRegs # "x" # ElementWidth; 1329 let DiagnosticType = "Invalid" # Name; 1330 let PredicateMethod = 1331 "isTypedVectorListMultiple<RegKind::SVEDataVector, " # NumRegs # ", 0, " 1332 # ElementWidth # ">"; 1333} 1334 1335let EncoderMethod = "EncodeRegAsMultipleOf<2>", 1336 DecoderMethod = "DecodeZPR2Mul2RegisterClass" in { 1337 def ZZ_mul_r : RegisterOperand<ZPR2Mul2, "printTypedVectorList<0,0>"> { 1338 let ParserMatchClass = ZPRVectorListMul<0, 2>; 1339 } 1340 1341 def ZZ_b_mul_r : RegisterOperand<ZPR2Mul2, "printTypedVectorList<0,'b'>"> { 1342 let ParserMatchClass = ZPRVectorListMul<8, 2>; 1343 } 1344 1345 def ZZ_h_mul_r : RegisterOperand<ZPR2Mul2, "printTypedVectorList<0,'h'>"> { 1346 let ParserMatchClass = ZPRVectorListMul<16, 2>; 1347 } 1348 1349 def ZZ_s_mul_r : RegisterOperand<ZPR2Mul2, "printTypedVectorList<0,'s'>"> { 1350 let ParserMatchClass = ZPRVectorListMul<32, 2>; 1351 } 1352 1353 def ZZ_d_mul_r : RegisterOperand<ZPR2Mul2, "printTypedVectorList<0,'d'>"> { 1354 let ParserMatchClass = ZPRVectorListMul<64, 2>; 1355 } 1356 1357 def ZZ_q_mul_r : RegisterOperand<ZPR2Mul2, "printTypedVectorList<0,'q'>"> { 1358 let ParserMatchClass = ZPRVectorListMul<128, 2>; 1359 } 1360} // end let EncoderMethod/DecoderMethod 1361 1362let EncoderMethod = "EncodeRegAsMultipleOf<4>", 1363 DecoderMethod = "DecodeZPR4Mul4RegisterClass" in { 1364 def ZZZZ_b_mul_r : RegisterOperand<ZPR4Mul4, "printTypedVectorList<0,'b'>"> { 1365 let ParserMatchClass = ZPRVectorListMul<8, 4>; 1366 } 1367 1368 def ZZZZ_h_mul_r : RegisterOperand<ZPR4Mul4, "printTypedVectorList<0,'h'>"> { 1369 let ParserMatchClass = ZPRVectorListMul<16, 4>; 1370 } 1371 1372 def ZZZZ_s_mul_r : RegisterOperand<ZPR4Mul4, "printTypedVectorList<0,'s'>"> { 1373 let ParserMatchClass = ZPRVectorListMul<32, 4>; 1374 } 1375 1376 def ZZZZ_d_mul_r : RegisterOperand<ZPR4Mul4, "printTypedVectorList<0,'d'>"> { 1377 let ParserMatchClass = ZPRVectorListMul<64, 4>; 1378 } 1379 1380 def ZZZZ_q_mul_r : RegisterOperand<ZPR4Mul4, "printTypedVectorList<0,'q'>"> { 1381 let ParserMatchClass = ZPRVectorListMul<128, 4>; 1382 } 1383} // end let EncoderMethod/DecoderMethod 1384 1385// SME2 strided multi-vector operands 1386 1387// ZStridedPairs 1388// 1389// A group of two Z vectors with strided numbering consisting of: 1390// Zn+0.T and Zn+8.T 1391// where n is in the range 0 to 7 and 16 to 23 inclusive, and T is one of B, H, 1392// S, or D. 1393 1394// Z0_Z8, Z1_Z9, Z2_Z10, Z3_Z11, Z4_Z12, Z5_Z13, Z6_Z14, Z7_Z15 1395def ZStridedPairsLo : RegisterTuples<[zsub0, zsub1], [ 1396 (trunc (rotl ZPR, 0), 8), (trunc (rotl ZPR, 8), 8) 1397]>; 1398 1399// Z16_Z24, Z17_Z25, Z18_Z26, Z19_Z27, Z20_Z28, Z21_Z29, Z22_Z30, Z23_Z31 1400def ZStridedPairsHi : RegisterTuples<[zsub0, zsub1], [ 1401 (trunc (rotl ZPR, 16), 8), (trunc (rotl ZPR, 24), 8) 1402]>; 1403 1404// ZStridedQuads 1405// 1406// A group of four Z vectors with strided numbering consisting of: 1407// Zn+0.T, Zn+4.T, Zn+8.T and Zn+12.T 1408// where n is in the range 0 to 3 and 16 to 19 inclusive, and T is one of B, H, 1409// S, or D. 1410 1411// Z0_Z4_Z8_Z12, Z1_Z5_Z9_Z13, Z2_Z6_Z10_Z14, Z3_Z7_Z11_Z15 1412def ZStridedQuadsLo : RegisterTuples<[zsub0, zsub1, zsub2, zsub3], [ 1413 (trunc (rotl ZPR, 0), 4), (trunc (rotl ZPR, 4), 4), 1414 (trunc (rotl ZPR, 8), 4), (trunc (rotl ZPR, 12), 4) 1415]>; 1416// Z16_Z20_Z24_Z28, Z17_Z21_Z25_Z29, Z18_Z22_Z26_Z30, Z19_Z23_Z27_Z31 1417def ZStridedQuadsHi : RegisterTuples<[zsub0, zsub1, zsub2, zsub3], [ 1418 (trunc (rotl ZPR, 16), 4), (trunc (rotl ZPR, 20), 4), 1419 (trunc (rotl ZPR, 24), 4), (trunc (rotl ZPR, 28), 4) 1420]>; 1421 1422def ZPR2Strided : RegisterClass<"AArch64", [untyped], 128, 1423 (add ZStridedPairsLo, ZStridedPairsHi)> { 1424 let Size = 256; 1425 let DecoderMethod = "DecodeSimpleRegisterClass<AArch64::ZPR2StridedRegClassID, 0, 16>"; 1426} 1427def ZPR4Strided : RegisterClass<"AArch64", [untyped], 128, 1428 (add ZStridedQuadsLo, ZStridedQuadsHi)> { 1429 let Size = 512; 1430 let DecoderMethod = "DecodeSimpleRegisterClass<AArch64::ZPR4StridedRegClassID, 0, 8>"; 1431} 1432 1433def ZPR2StridedOrContiguous : RegisterClass<"AArch64", [untyped], 128, 1434 (add ZStridedPairsLo, ZStridedPairsHi, 1435 (decimate ZSeqPairs, 2))> { 1436 let Size = 256; 1437} 1438 1439class ZPRVectorListStrided<int ElementWidth, int NumRegs, int Stride> 1440 : ZPRVectorList<ElementWidth, NumRegs> { 1441 let Name = "SVEVectorListStrided" # NumRegs # "x" # ElementWidth; 1442 let DiagnosticType = "Invalid" # Name; 1443 let PredicateMethod = "isTypedVectorListStrided<RegKind::SVEDataVector, " 1444 # NumRegs # "," # Stride # "," # ElementWidth # ">"; 1445 let RenderMethod = "addStridedVectorListOperands<" # NumRegs # ">"; 1446} 1447 1448let EncoderMethod = "EncodeZPR2StridedRegisterClass", 1449 DecoderMethod = "DecodeSimpleRegisterClass<AArch64::ZPR2StridedRegClassID, 0, 16>" in { 1450 def ZZ_b_strided 1451 : RegisterOperand<ZPR2Strided, "printTypedVectorList<0, 'b'>"> { 1452 let ParserMatchClass = ZPRVectorListStrided<8, 2, 8>; 1453 } 1454 1455 def ZZ_h_strided 1456 : RegisterOperand<ZPR2Strided, "printTypedVectorList<0, 'h'>"> { 1457 let ParserMatchClass = ZPRVectorListStrided<16, 2, 8>; 1458 } 1459 1460 def ZZ_s_strided 1461 : RegisterOperand<ZPR2Strided, "printTypedVectorList<0,'s'>"> { 1462 let ParserMatchClass = ZPRVectorListStrided<32, 2, 8>; 1463 } 1464 1465 def ZZ_d_strided 1466 : RegisterOperand<ZPR2Strided, "printTypedVectorList<0,'d'>"> { 1467 let ParserMatchClass = ZPRVectorListStrided<64, 2, 8>; 1468 } 1469 1470 def ZZ_b_strided_and_contiguous 1471 : RegisterOperand<ZPR2StridedOrContiguous, "printTypedVectorList<0,'b'>">; 1472 def ZZ_h_strided_and_contiguous 1473 : RegisterOperand<ZPR2StridedOrContiguous, "printTypedVectorList<0,'h'>">; 1474 def ZZ_s_strided_and_contiguous 1475 : RegisterOperand<ZPR2StridedOrContiguous, "printTypedVectorList<0,'s'>">; 1476 def ZZ_d_strided_and_contiguous 1477 : RegisterOperand<ZPR2StridedOrContiguous, "printTypedVectorList<0,'d'>">; 1478} 1479 1480def ZPR4StridedOrContiguous : RegisterClass<"AArch64", [untyped], 128, 1481 (add ZStridedQuadsLo, ZStridedQuadsHi, 1482 (decimate ZSeqQuads, 4))> { 1483 let Size = 512; 1484} 1485 1486let EncoderMethod = "EncodeZPR4StridedRegisterClass", 1487 DecoderMethod = "DecodeSimpleRegisterClass<AArch64::ZPR4StridedRegClassID, 0, 16>" in { 1488 def ZZZZ_b_strided 1489 : RegisterOperand<ZPR4Strided, "printTypedVectorList<0,'b'>"> { 1490 let ParserMatchClass = ZPRVectorListStrided<8, 4, 4>; 1491 } 1492 1493 def ZZZZ_h_strided 1494 : RegisterOperand<ZPR4Strided, "printTypedVectorList<0,'h'>"> { 1495 let ParserMatchClass = ZPRVectorListStrided<16, 4, 4>; 1496 } 1497 1498 def ZZZZ_s_strided 1499 : RegisterOperand<ZPR4Strided, "printTypedVectorList<0,'s'>"> { 1500 let ParserMatchClass = ZPRVectorListStrided<32, 4, 4>; 1501 } 1502 1503 def ZZZZ_d_strided 1504 : RegisterOperand<ZPR4Strided, "printTypedVectorList<0,'d'>"> { 1505 let ParserMatchClass = ZPRVectorListStrided<64, 4, 4>; 1506 } 1507 1508 def ZZZZ_b_strided_and_contiguous 1509 : RegisterOperand<ZPR4StridedOrContiguous, "printTypedVectorList<0,'b'>">; 1510 def ZZZZ_h_strided_and_contiguous 1511 : RegisterOperand<ZPR4StridedOrContiguous, "printTypedVectorList<0,'h'>">; 1512 def ZZZZ_s_strided_and_contiguous 1513 : RegisterOperand<ZPR4StridedOrContiguous, "printTypedVectorList<0,'s'>">; 1514 def ZZZZ_d_strided_and_contiguous 1515 : RegisterOperand<ZPR4StridedOrContiguous, "printTypedVectorList<0,'d'>">; 1516} 1517 1518class ZPRExtendAsmOperand<string ShiftExtend, int RegWidth, int Scale, 1519 bit ScaleAlwaysSame = 0b0> : AsmOperandClass { 1520 let Name = "ZPRExtend" # ShiftExtend # RegWidth # Scale 1521 # !if(ScaleAlwaysSame, "Only", ""); 1522 1523 let PredicateMethod = "isSVEDataVectorRegWithShiftExtend<" 1524 # RegWidth # ", AArch64::ZPRRegClassID, " 1525 # "AArch64_AM::" # ShiftExtend # ", " 1526 # Scale # ", " 1527 # !if(ScaleAlwaysSame, "true", "false") 1528 # ">"; 1529 let DiagnosticType = "InvalidZPR" # RegWidth # ShiftExtend # Scale; 1530 let RenderMethod = "addRegOperands"; 1531 let ParserMethod = "tryParseSVEDataVector<true, true>"; 1532} 1533 1534class ZPRExtendRegisterOperand<bit SignExtend, bit IsLSL, string Repr, 1535 int RegWidth, int Scale, string Suffix = ""> 1536 : RegisterOperand<ZPR> { 1537 let ParserMatchClass = 1538 !cast<AsmOperandClass>("ZPR" # RegWidth # "AsmOpndExt" # Repr # Scale # Suffix); 1539 let PrintMethod = "printRegWithShiftExtend<" 1540 # !if(SignExtend, "true", "false") # ", " 1541 # Scale # ", " 1542 # !if(IsLSL, "'x'", "'w'") # ", " 1543 # !if(!eq(RegWidth, 32), "'s'", "'d'") # ">"; 1544} 1545 1546foreach RegWidth = [32, 64] in { 1547 // UXTW(8|16|32|64) 1548 def ZPR#RegWidth#AsmOpndExtUXTW8Only : ZPRExtendAsmOperand<"UXTW", RegWidth, 8, 0b1>; 1549 def ZPR#RegWidth#AsmOpndExtUXTW8 : ZPRExtendAsmOperand<"UXTW", RegWidth, 8>; 1550 def ZPR#RegWidth#AsmOpndExtUXTW16 : ZPRExtendAsmOperand<"UXTW", RegWidth, 16>; 1551 def ZPR#RegWidth#AsmOpndExtUXTW32 : ZPRExtendAsmOperand<"UXTW", RegWidth, 32>; 1552 def ZPR#RegWidth#AsmOpndExtUXTW64 : ZPRExtendAsmOperand<"UXTW", RegWidth, 64>; 1553 1554 def ZPR#RegWidth#ExtUXTW8Only : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 8, "Only">; 1555 def ZPR#RegWidth#ExtUXTW8 : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 8>; 1556 def ZPR#RegWidth#ExtUXTW16 : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 16>; 1557 def ZPR#RegWidth#ExtUXTW32 : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 32>; 1558 def ZPR#RegWidth#ExtUXTW64 : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 64>; 1559 1560 // SXTW(8|16|32|64) 1561 def ZPR#RegWidth#AsmOpndExtSXTW8Only : ZPRExtendAsmOperand<"SXTW", RegWidth, 8, 0b1>; 1562 def ZPR#RegWidth#AsmOpndExtSXTW8 : ZPRExtendAsmOperand<"SXTW", RegWidth, 8>; 1563 def ZPR#RegWidth#AsmOpndExtSXTW16 : ZPRExtendAsmOperand<"SXTW", RegWidth, 16>; 1564 def ZPR#RegWidth#AsmOpndExtSXTW32 : ZPRExtendAsmOperand<"SXTW", RegWidth, 32>; 1565 def ZPR#RegWidth#AsmOpndExtSXTW64 : ZPRExtendAsmOperand<"SXTW", RegWidth, 64>; 1566 1567 def ZPR#RegWidth#ExtSXTW8Only : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 8, "Only">; 1568 def ZPR#RegWidth#ExtSXTW8 : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 8>; 1569 def ZPR#RegWidth#ExtSXTW16 : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 16>; 1570 def ZPR#RegWidth#ExtSXTW32 : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 32>; 1571 def ZPR#RegWidth#ExtSXTW64 : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 64>; 1572 1573 // LSL(8|16|32|64) 1574 def ZPR#RegWidth#AsmOpndExtLSL8 : ZPRExtendAsmOperand<"LSL", RegWidth, 8>; 1575 def ZPR#RegWidth#AsmOpndExtLSL16 : ZPRExtendAsmOperand<"LSL", RegWidth, 16>; 1576 def ZPR#RegWidth#AsmOpndExtLSL32 : ZPRExtendAsmOperand<"LSL", RegWidth, 32>; 1577 def ZPR#RegWidth#AsmOpndExtLSL64 : ZPRExtendAsmOperand<"LSL", RegWidth, 64>; 1578 def ZPR#RegWidth#ExtLSL8 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 8>; 1579 def ZPR#RegWidth#ExtLSL16 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 16>; 1580 def ZPR#RegWidth#ExtLSL32 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 32>; 1581 def ZPR#RegWidth#ExtLSL64 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 64>; 1582} 1583 1584class GPR64ShiftExtendAsmOperand <string AsmOperandName, int Scale, string RegClass> : AsmOperandClass { 1585 let Name = AsmOperandName # Scale; 1586 let PredicateMethod = "isGPR64WithShiftExtend<AArch64::"#RegClass#"RegClassID, " # Scale # ">"; 1587 let DiagnosticType = "Invalid" # AsmOperandName # Scale; 1588 let RenderMethod = "addRegOperands"; 1589 let ParserMethod = "tryParseGPROperand<true>"; 1590} 1591 1592class GPR64ExtendRegisterOperand<string Name, int Scale, RegisterClass RegClass> : RegisterOperand<RegClass>{ 1593 let ParserMatchClass = !cast<AsmOperandClass>(Name); 1594 let PrintMethod = "printRegWithShiftExtend<false, " # Scale # ", 'x', 0>"; 1595} 1596 1597foreach Scale = [8, 16, 32, 64, 128] in { 1598 def GPR64shiftedAsmOpnd # Scale : GPR64ShiftExtendAsmOperand<"GPR64shifted", Scale, "GPR64">; 1599 def GPR64shifted # Scale : GPR64ExtendRegisterOperand<"GPR64shiftedAsmOpnd" # Scale, Scale, GPR64>; 1600 1601 def GPR64NoXZRshiftedAsmOpnd # Scale : GPR64ShiftExtendAsmOperand<"GPR64NoXZRshifted", Scale, "GPR64common">; 1602 def GPR64NoXZRshifted # Scale : GPR64ExtendRegisterOperand<"GPR64NoXZRshiftedAsmOpnd" # Scale, Scale, GPR64common>; 1603} 1604 1605// Accumulator array tiles. 1606def ZAQ0 : AArch64Reg<0, "za0.q">; 1607def ZAQ1 : AArch64Reg<1, "za1.q">; 1608def ZAQ2 : AArch64Reg<2, "za2.q">; 1609def ZAQ3 : AArch64Reg<3, "za3.q">; 1610def ZAQ4 : AArch64Reg<4, "za4.q">; 1611def ZAQ5 : AArch64Reg<5, "za5.q">; 1612def ZAQ6 : AArch64Reg<6, "za6.q">; 1613def ZAQ7 : AArch64Reg<7, "za7.q">; 1614def ZAQ8 : AArch64Reg<8, "za8.q">; 1615def ZAQ9 : AArch64Reg<9, "za9.q">; 1616def ZAQ10 : AArch64Reg<10, "za10.q">; 1617def ZAQ11 : AArch64Reg<11, "za11.q">; 1618def ZAQ12 : AArch64Reg<12, "za12.q">; 1619def ZAQ13 : AArch64Reg<13, "za13.q">; 1620def ZAQ14 : AArch64Reg<14, "za14.q">; 1621def ZAQ15 : AArch64Reg<15, "za15.q">; 1622 1623let SubRegIndices = [zasubq0, zasubq1] in { 1624 def ZAD0 : AArch64Reg<0, "za0.d", [ZAQ0, ZAQ8]>; 1625 def ZAD1 : AArch64Reg<1, "za1.d", [ZAQ1, ZAQ9]>; 1626 def ZAD2 : AArch64Reg<2, "za2.d", [ZAQ2, ZAQ10]>; 1627 def ZAD3 : AArch64Reg<3, "za3.d", [ZAQ3, ZAQ11]>; 1628 def ZAD4 : AArch64Reg<4, "za4.d", [ZAQ4, ZAQ12]>; 1629 def ZAD5 : AArch64Reg<5, "za5.d", [ZAQ5, ZAQ13]>; 1630 def ZAD6 : AArch64Reg<6, "za6.d", [ZAQ6, ZAQ14]>; 1631 def ZAD7 : AArch64Reg<7, "za7.d", [ZAQ7, ZAQ15]>; 1632} 1633 1634let SubRegIndices = [zasubd0, zasubd1] in { 1635 def ZAS0 : AArch64Reg<0, "za0.s", [ZAD0, ZAD4]>; 1636 def ZAS1 : AArch64Reg<1, "za1.s", [ZAD1, ZAD5]>; 1637 def ZAS2 : AArch64Reg<2, "za2.s", [ZAD2, ZAD6]>; 1638 def ZAS3 : AArch64Reg<3, "za3.s", [ZAD3, ZAD7]>; 1639} 1640 1641let SubRegIndices = [zasubs0, zasubs1] in { 1642 def ZAH0 : AArch64Reg<0, "za0.h", [ZAS0, ZAS2]>; 1643 def ZAH1 : AArch64Reg<1, "za1.h", [ZAS1, ZAS3]>; 1644} 1645 1646let SubRegIndices = [zasubh0, zasubh1] in { 1647 def ZAB0 : AArch64Reg<0, "za0.b", [ZAH0, ZAH1]>; 1648} 1649 1650let SubRegIndices = [zasubb] in { 1651 def ZA : AArch64Reg<0, "za", [ZAB0]>; 1652} 1653 1654def ZT0 : AArch64Reg<0, "zt0">; 1655 1656// SME Register Classes 1657 1658let isAllocatable = 0 in { 1659 // Accumulator array 1660 def MPR : RegisterClass<"AArch64", [untyped], 2048, (add ZA)> { 1661 let Size = 2048; 1662 } 1663 1664 // Accumulator array as single tiles 1665 def MPR8 : RegisterClass<"AArch64", [untyped], 2048, (add (sequence "ZAB%u", 0, 0))> { 1666 let Size = 2048; 1667 } 1668 def MPR16 : RegisterClass<"AArch64", [untyped], 1024, (add (sequence "ZAH%u", 0, 1))> { 1669 let Size = 1024; 1670 } 1671 def MPR32 : RegisterClass<"AArch64", [untyped], 512, (add (sequence "ZAS%u", 0, 3))> { 1672 let Size = 512; 1673 } 1674 def MPR64 : RegisterClass<"AArch64", [untyped], 256, (add (sequence "ZAD%u", 0, 7))> { 1675 let Size = 256; 1676 } 1677 def MPR128 : RegisterClass<"AArch64", [untyped], 128, (add (sequence "ZAQ%u", 0, 15))> { 1678 let Size = 128; 1679 } 1680} 1681 1682def ZTR : RegisterClass<"AArch64", [untyped], 512, (add ZT0)> { 1683 let Size = 512; 1684 let DiagnosticType = "InvalidLookupTable"; 1685} 1686// SME Register Operands 1687// There are three types of SME matrix register operands: 1688// * Tiles: 1689// 1690// These tiles make up the larger accumulator matrix. The tile representation 1691// has an element type suffix, e.g. za0.b or za15.q and can be any of the 1692// registers: 1693// ZAQ0..ZAQ15 1694// ZAD0..ZAD7 1695// ZAS0..ZAS3 1696// ZAH0..ZAH1 1697// or ZAB0 1698// 1699// * Tile vectors: 1700// 1701// Their representation is similar to regular tiles, but they have an extra 1702// 'h' or 'v' to tell how the vector at [reg+offset] is layed out in the tile, 1703// horizontally or vertically. 1704// 1705// e.g. za1h.h or za15v.q, which corresponds to vectors in registers ZAH1 and 1706// ZAQ15, respectively. The horizontal/vertical is more a property of the 1707// instruction, than a property of the asm-operand itself, or its register. 1708// The distinction is required for the parsing/printing of the operand, 1709// as from a compiler's perspective, the whole tile is read/written. 1710// 1711// * Accumulator matrix: 1712// 1713// This is the entire matrix accumulator register ZA (<=> ZAB0), printed as 1714// 'za'. 1715 1716// 1717// Tiles 1718// 1719 1720class MatrixTileAsmOperand<string RC, int EltSize> : AsmOperandClass { 1721 let Name = "MatrixTile" # EltSize; 1722 let DiagnosticType = "Invalid" # Name; 1723 let ParserMethod = "tryParseMatrixRegister"; 1724 let RenderMethod = "addMatrixOperands"; 1725 let PredicateMethod = "isMatrixRegOperand<" 1726 # "MatrixKind::Tile" # ", " 1727 # EltSize # ", AArch64::" # RC # "RegClassID>"; 1728} 1729 1730class MatrixTileOperand<int EltSize, int NumBitsForTile, RegisterClass RC> 1731 : RegisterOperand<RC> { 1732 let ParserMatchClass = MatrixTileAsmOperand<!cast<string>(RC), EltSize>; 1733 let DecoderMethod = "DecodeMatrixTile<" # NumBitsForTile # ">"; 1734 let PrintMethod = "printMatrixTile"; 1735} 1736 1737def TileOp16 : MatrixTileOperand<16, 1, MPR16>; 1738def TileOp32 : MatrixTileOperand<32, 2, MPR32>; 1739def TileOp64 : MatrixTileOperand<64, 3, MPR64>; 1740 1741// 1742// Tile vectors (horizontal and vertical) 1743// 1744 1745class MatrixTileVectorAsmOperand<string RC, int EltSize, int IsVertical> 1746 : AsmOperandClass { 1747 let Name = "MatrixTileVector" # !if(IsVertical, "V", "H") # EltSize; 1748 let DiagnosticType = "Invalid" # Name; 1749 let ParserMethod = "tryParseMatrixRegister"; 1750 let RenderMethod = "addMatrixOperands"; 1751 let PredicateMethod = "isMatrixRegOperand<" 1752 # "MatrixKind::" 1753 # !if(IsVertical, "Col", "Row") # ", " 1754 # EltSize # ", AArch64::" # RC # "RegClassID>"; 1755} 1756 1757class MatrixTileVectorOperand<int EltSize, int NumBitsForTile, 1758 RegisterClass RC, int IsVertical> 1759 : RegisterOperand<RC> { 1760 let ParserMatchClass = MatrixTileVectorAsmOperand<!cast<string>(RC), EltSize, 1761 IsVertical>; 1762 let DecoderMethod = "DecodeMatrixTile<" # NumBitsForTile # ">"; 1763 let PrintMethod = "printMatrixTileVector<" # IsVertical # ">"; 1764} 1765 1766def TileVectorOpH8 : MatrixTileVectorOperand< 8, 0, MPR8, 0>; 1767def TileVectorOpH16 : MatrixTileVectorOperand< 16, 1, MPR16, 0>; 1768def TileVectorOpH32 : MatrixTileVectorOperand< 32, 2, MPR32, 0>; 1769def TileVectorOpH64 : MatrixTileVectorOperand< 64, 3, MPR64, 0>; 1770def TileVectorOpH128 : MatrixTileVectorOperand<128, 4, MPR128, 0>; 1771 1772def TileVectorOpV8 : MatrixTileVectorOperand< 8, 0, MPR8, 1>; 1773def TileVectorOpV16 : MatrixTileVectorOperand< 16, 1, MPR16, 1>; 1774def TileVectorOpV32 : MatrixTileVectorOperand< 32, 2, MPR32, 1>; 1775def TileVectorOpV64 : MatrixTileVectorOperand< 64, 3, MPR64, 1>; 1776def TileVectorOpV128 : MatrixTileVectorOperand<128, 4, MPR128, 1>; 1777 1778// 1779// Accumulator matrix 1780// 1781 1782class MatrixAsmOperand<string RC, int EltSize> : AsmOperandClass { 1783 let Name = "Matrix" # !if(EltSize, !cast<string>(EltSize), ""); 1784 let DiagnosticType = "Invalid" # Name; 1785 let ParserMethod = "tryParseMatrixRegister"; 1786 let RenderMethod = "addMatrixOperands"; 1787 let PredicateMethod = "isMatrixRegOperand<" 1788 # "MatrixKind::Array" # ", " 1789 # EltSize # ", AArch64::" # RC # "RegClassID>"; 1790} 1791 1792class MatrixOperand<RegisterClass RC, int EltSize> : RegisterOperand<RC> { 1793 let ParserMatchClass = MatrixAsmOperand<!cast<string>(RC), EltSize>; 1794 let PrintMethod = "printMatrix<" # EltSize # ">"; 1795} 1796 1797def MatrixOp : MatrixOperand<MPR, 0>; 1798// SME2 register operands and classes 1799def MatrixOp8 : MatrixOperand<MPR, 8>; 1800def MatrixOp16 : MatrixOperand<MPR, 16>; 1801def MatrixOp32 : MatrixOperand<MPR, 32>; 1802def MatrixOp64 : MatrixOperand<MPR, 64>; 1803 1804class MatrixTileListAsmOperand : AsmOperandClass { 1805 let Name = "MatrixTileList"; 1806 let ParserMethod = "tryParseMatrixTileList"; 1807 let RenderMethod = "addMatrixTileListOperands"; 1808 let PredicateMethod = "isMatrixTileList"; 1809} 1810 1811class MatrixTileListOperand : Operand<i8> { 1812 let ParserMatchClass = MatrixTileListAsmOperand<>; 1813 let DecoderMethod = "DecodeMatrixTileListRegisterClass"; 1814 let EncoderMethod = "EncodeMatrixTileListRegisterClass"; 1815 let PrintMethod = "printMatrixTileList"; 1816} 1817 1818def MatrixTileList : MatrixTileListOperand<>; 1819 1820def MatrixIndexGPR32_8_11 : RegisterClass<"AArch64", [i32], 32, (sequence "W%u", 8, 11)> { 1821 let DiagnosticType = "InvalidMatrixIndexGPR32_8_11"; 1822 let DecoderMethod = "DecodeSimpleRegisterClass<AArch64::MatrixIndexGPR32_8_11RegClassID, 0, 4>"; 1823} 1824def MatrixIndexGPR32_12_15 : RegisterClass<"AArch64", [i32], 32, (sequence "W%u", 12, 15)> { 1825 let DiagnosticType = "InvalidMatrixIndexGPR32_12_15"; 1826 let DecoderMethod = "DecodeSimpleRegisterClass<AArch64::MatrixIndexGPR32_12_15RegClassID, 0, 4>"; 1827} 1828def MatrixIndexGPR32Op8_11 : RegisterOperand<MatrixIndexGPR32_8_11> { 1829 let EncoderMethod = "encodeMatrixIndexGPR32<AArch64::W8>"; 1830} 1831def MatrixIndexGPR32Op12_15 : RegisterOperand<MatrixIndexGPR32_12_15> { 1832 let EncoderMethod = "encodeMatrixIndexGPR32<AArch64::W12>"; 1833} 1834 1835def SVCROperand : AsmOperandClass { 1836 let Name = "SVCR"; 1837 let ParserMethod = "tryParseSVCR"; 1838 let DiagnosticType = "Invalid" # Name; 1839} 1840 1841def svcr_op : Operand<i32>, TImmLeaf<i32, [{ 1842 return AArch64SVCR::lookupSVCRByEncoding(Imm) != nullptr; 1843 }]> { 1844 let ParserMatchClass = SVCROperand; 1845 let PrintMethod = "printSVCROp"; 1846 let DecoderMethod = "DecodeSVCROp"; 1847 let MCOperandPredicate = [{ 1848 if (!MCOp.isImm()) 1849 return false; 1850 return AArch64SVCR::lookupSVCRByEncoding(MCOp.getImm()) != nullptr; 1851 }]; 1852} 1853 1854//===----------------------------------------------------------------------===// 1855// Register categories. 1856// 1857 1858def GeneralPurposeRegisters : RegisterCategory<[GPR64, GPR32]>; 1859 1860def FIXED_REGS : RegisterClass<"AArch64", [i64], 64, (add FP, SP, VG, FFR)>; 1861def FixedRegisters : RegisterCategory<[CCR, FIXED_REGS]>; 1862