xref: /llvm-project/clang/include/clang/Basic/arm_neon.td (revision db6fa74dfea30c025e5d4c30ca4e31e20b69b04d)
1//===--- arm_neon.td - ARM NEON compiler interface ------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9//  This file defines the TableGen definitions from which the ARM NEON header
10//  file will be generated.  See ARM document DUI0348B.
11//
12//===----------------------------------------------------------------------===//
13
14include "arm_neon_incl.td"
15
16def OP_ADD      : Op<(op "+", $p0, $p1)>;
17def OP_ADDL     : Op<(op "+", (call "vmovl", $p0), (call "vmovl", $p1))>;
18def OP_ADDLHi   : Op<(op "+", (call "vmovl_high", $p0),
19                              (call "vmovl_high", $p1))>;
20def OP_ADDW     : Op<(op "+", $p0, (call "vmovl", $p1))>;
21def OP_ADDWHi   : Op<(op "+", $p0, (call "vmovl_high", $p1))>;
22def OP_SUB      : Op<(op "-", $p0, $p1)>;
23def OP_SUBL     : Op<(op "-", (call "vmovl", $p0), (call "vmovl", $p1))>;
24def OP_SUBLHi   : Op<(op "-", (call "vmovl_high", $p0),
25                              (call "vmovl_high", $p1))>;
26def OP_SUBW     : Op<(op "-", $p0, (call "vmovl", $p1))>;
27def OP_SUBWHi   : Op<(op "-", $p0, (call "vmovl_high", $p1))>;
28def OP_MUL      : Op<(op "*", $p0, $p1)>;
29def OP_MLA      : Op<(op "+", $p0, (op "*", $p1, $p2))>;
30def OP_MLAL     : Op<(op "+", $p0, (call "vmull", $p1, $p2))>;
31def OP_MULLHi   : Op<(call "vmull", (call "vget_high", $p0),
32                                    (call "vget_high", $p1))>;
33def OP_MULLHi_P64 : Op<(call "vmull",
34                         (cast "poly64_t", (call "vget_high", $p0)),
35                         (cast "poly64_t", (call "vget_high", $p1)))>;
36def OP_MULLHi_N : Op<(call "vmull_n", (call "vget_high", $p0), $p1)>;
37def OP_MLALHi   : Op<(call "vmlal", $p0, (call "vget_high", $p1),
38                                         (call "vget_high", $p2))>;
39def OP_MLALHi_N : Op<(call "vmlal_n", $p0, (call "vget_high", $p1), $p2)>;
40def OP_MLS      : Op<(op "-", $p0, (op "*", $p1, $p2))>;
41def OP_FMLS     : Op<(call "vfma", $p0, (op "-", $p1), $p2)>;
42def OP_MLSL     : Op<(op "-", $p0, (call "vmull", $p1, $p2))>;
43def OP_MLSLHi   : Op<(call "vmlsl", $p0, (call "vget_high", $p1),
44                                         (call "vget_high", $p2))>;
45def OP_MLSLHi_N : Op<(call "vmlsl_n", $p0, (call "vget_high", $p1), $p2)>;
46def OP_MUL_N    : Op<(op "*", $p0, (dup $p1))>;
47def OP_MULX_N   : Op<(call "vmulx", $p0, (dup $p1))>;
48def OP_MLA_N    : Op<(op "+", $p0, (op "*", $p1, (dup $p2)))>;
49def OP_MLS_N    : Op<(op "-", $p0, (op "*", $p1, (dup $p2)))>;
50def OP_FMLA_N   : Op<(call "vfma", $p0, $p1, (dup $p2))>;
51def OP_FMLS_N   : Op<(call "vfma", $p0, (op "-", $p1), (dup $p2))>;
52def OP_MLAL_N   : Op<(op "+", $p0, (call "vmull", $p1, (dup $p2)))>;
53def OP_MLSL_N   : Op<(op "-", $p0, (call "vmull", $p1, (dup $p2)))>;
54def OP_MUL_LN   : Op<(op "*", $p0, (call_mangled "splat_lane", $p1, $p2))>;
55def OP_MULX_LN  : Op<(call "vmulx", $p0, (call_mangled "splat_lane", $p1, $p2))>;
56def OP_MULL_N  : Op<(call "vmull", $p0, (dup $p1))>;
57def OP_MULL_LN  : Op<(call "vmull", $p0, (call_mangled "splat_lane", $p1, $p2))>;
58def OP_MULLHi_LN: Op<(call "vmull", (call "vget_high", $p0), (call_mangled "splat_lane", $p1, $p2))>;
59def OP_MLA_LN   : Op<(op "+", $p0, (op "*", $p1, (call_mangled "splat_lane", $p2, $p3)))>;
60def OP_MLS_LN   : Op<(op "-", $p0, (op "*", $p1, (call_mangled "splat_lane", $p2, $p3)))>;
61def OP_MLAL_LN  : Op<(op "+", $p0, (call "vmull", $p1, (call_mangled "splat_lane", $p2, $p3)))>;
62def OP_MLALHi_LN: Op<(op "+", $p0, (call "vmull", (call "vget_high", $p1),
63                                                  (call_mangled "splat_lane", $p2, $p3)))>;
64def OP_MLSL_LN  : Op<(op "-", $p0, (call "vmull", $p1, (call_mangled "splat_lane", $p2, $p3)))>;
65def OP_MLSLHi_LN : Op<(op "-", $p0, (call "vmull", (call "vget_high", $p1),
66                                                   (call_mangled "splat_lane", $p2, $p3)))>;
67def OP_QDMULL_N : Op<(call "vqdmull", $p0, (dup $p1))>;
68def OP_QDMULL_LN : Op<(call "vqdmull", $p0, (call_mangled "splat_lane", $p1, $p2))>;
69def OP_QDMULLHi_LN : Op<(call "vqdmull", (call "vget_high", $p0),
70                                         (call_mangled "splat_lane", $p1, $p2))>;
71def OP_QDMLAL_N : Op<(call "vqdmlal", $p0, $p1, (dup $p2))>;
72def OP_QDMLAL_LN : Op<(call "vqdmlal", $p0, $p1, (call_mangled "splat_lane", $p2, $p3))>;
73def OP_QDMLALHi_LN : Op<(call "vqdmlal", $p0, (call "vget_high", $p1),
74                                              (call_mangled "splat_lane", $p2, $p3))>;
75def OP_QDMLSL_N : Op<(call "vqdmlsl", $p0, $p1, (dup $p2))>;
76def OP_QDMLSL_LN : Op<(call "vqdmlsl", $p0, $p1, (call_mangled "splat_lane", $p2, $p3))>;
77def OP_QDMLSLHi_LN : Op<(call "vqdmlsl", $p0, (call "vget_high", $p1),
78                                              (call_mangled "splat_lane", $p2, $p3))>;
79def OP_QDMULH_N : Op<(call "vqdmulh", $p0, (dup $p1))>;
80def OP_QDMULH_LN : Op<(call "vqdmulh", $p0, (call_mangled "splat_lane", $p1, $p2))>;
81def OP_QRDMULH_LN : Op<(call "vqrdmulh", $p0, (call_mangled "splat_lane", $p1, $p2))>;
82def OP_QRDMULH_N : Op<(call "vqrdmulh", $p0, (dup $p1))>;
83def OP_QRDMLAH_LN : Op<(call "vqrdmlah", $p0, $p1, (call_mangled "splat_lane", $p2, $p3))>;
84def OP_QRDMLSH_LN : Op<(call "vqrdmlsh", $p0, $p1, (call_mangled "splat_lane", $p2, $p3))>;
85def OP_FMS_LN   : Op<(call "vfma_lane", $p0, (op "-", $p1), $p2, $p3)>;
86def OP_FMS_LNQ  : Op<(call "vfma_laneq", $p0, (op "-", $p1), $p2, $p3)>;
87def OP_TRN1     : Op<(shuffle $p0, $p1, (interleave (decimate mask0, 2),
88                                                    (decimate mask1, 2)))>;
89def OP_ZIP1     : Op<(shuffle $p0, $p1, (lowhalf (interleave mask0, mask1)))>;
90def OP_UZP1     : Op<(shuffle $p0, $p1, (add (decimate mask0, 2),
91                                             (decimate mask1, 2)))>;
92def OP_TRN2     : Op<(shuffle $p0, $p1, (interleave
93                                          (decimate (rotl mask0, 1), 2),
94                                          (decimate (rotl mask1, 1), 2)))>;
95def OP_ZIP2     : Op<(shuffle $p0, $p1, (highhalf (interleave mask0, mask1)))>;
96def OP_UZP2     : Op<(shuffle $p0, $p1, (add (decimate (rotl mask0, 1), 2),
97                                             (decimate (rotl mask1, 1), 2)))>;
98def OP_EQ       : Op<(cast "R", (op "==", $p0, $p1))>;
99def OP_GE       : Op<(cast "R", (op ">=", $p0, $p1))>;
100def OP_LE       : Op<(cast "R", (op "<=", $p0, $p1))>;
101def OP_GT       : Op<(cast "R", (op ">", $p0, $p1))>;
102def OP_LT       : Op<(cast "R", (op "<", $p0, $p1))>;
103def OP_NEG      : Op<(op "-", $p0)>;
104def OP_NOT      : Op<(op "~", $p0)>;
105def OP_AND      : Op<(op "&", $p0, $p1)>;
106def OP_OR       : Op<(op "|", $p0, $p1)>;
107def OP_XOR      : Op<(op "^", $p0, $p1)>;
108def OP_ANDN     : Op<(op "&", $p0, (op "~", $p1))>;
109def OP_ORN      : Op<(op "|", $p0, (op "~", $p1))>;
110def OP_CAST     : LOp<[(save_temp $promote, $p0),
111                       (cast "R", $promote)]>;
112def OP_HI       : Op<(shuffle $p0, $p0, (highhalf mask0))>;
113def OP_LO       : Op<(shuffle $p0, $p0, (lowhalf mask0))>;
114def OP_CONC     : Op<(shuffle $p0, $p1, (add mask0, mask1))>;
115def OP_DUP      : Op<(dup $p0)>;
116def OP_DUP_LN   : Op<(call_mangled "splat_lane", $p0, $p1)>;
117def OP_SEL      : Op<(cast "R", (op "|",
118                                    (op "&", $p0, (cast $p0, $p1)),
119                                    (op "&", (op "~", $p0), (cast $p0, $p2))))>;
120def OP_REV16    : Op<(shuffle $p0, $p0, (rev 16, mask0))>;
121def OP_REV32    : Op<(shuffle $p0, $p0, (rev 32, mask0))>;
122def OP_REV64    : Op<(shuffle $p0, $p0, (rev 64, mask0))>;
123def OP_XTN      : Op<(call "vcombine", $p0, (call "vmovn", $p1))>;
124def OP_SQXTUN   : Op<(call "vcombine", (cast $p0, "U", $p0),
125                                       (call "vqmovun", $p1))>;
126def OP_QXTN     : Op<(call "vcombine", $p0, (call "vqmovn", $p1))>;
127def OP_VCVT_NA_HI_F16 : Op<(call "vcombine", $p0, (call "vcvt_f16_f32", $p1))>;
128def OP_VCVT_NA_HI_F32 : Op<(call "vcombine", $p0, (call "vcvt_f32_f64", $p1))>;
129def OP_VCVT_EX_HI_F32 : Op<(call "vcvt_f32_f16", (call "vget_high", $p0))>;
130def OP_VCVT_EX_HI_F64 : Op<(call "vcvt_f64_f32", (call "vget_high", $p0))>;
131def OP_VCVTX_HI : Op<(call "vcombine", $p0, (call "vcvtx_f32", $p1))>;
132def OP_REINT    : Op<(cast "R", $p0)>;
133def OP_ADDHNHi  : Op<(call "vcombine", $p0, (call "vaddhn", $p1, $p2))>;
134def OP_RADDHNHi : Op<(call "vcombine", $p0, (call "vraddhn", $p1, $p2))>;
135def OP_SUBHNHi  : Op<(call "vcombine", $p0, (call "vsubhn", $p1, $p2))>;
136def OP_RSUBHNHi : Op<(call "vcombine", $p0, (call "vrsubhn", $p1, $p2))>;
137def OP_ABDL     : Op<(cast "R", (call "vmovl", (cast $p0, "U",
138                                                     (call "vabd", $p0, $p1))))>;
139def OP_ABDLHi   : Op<(call "vabdl", (call "vget_high", $p0),
140                                    (call "vget_high", $p1))>;
141def OP_ABA      : Op<(op "+", $p0, (call "vabd", $p1, $p2))>;
142def OP_ABAL     : Op<(op "+", $p0, (call "vabdl", $p1, $p2))>;
143def OP_ABALHi   : Op<(call "vabal", $p0, (call "vget_high", $p1),
144                                       (call "vget_high", $p2))>;
145def OP_QDMULLHi : Op<(call "vqdmull", (call "vget_high", $p0),
146                                      (call "vget_high", $p1))>;
147def OP_QDMULLHi_N : Op<(call "vqdmull_n", (call "vget_high", $p0), $p1)>;
148def OP_QDMLALHi : Op<(call "vqdmlal", $p0, (call "vget_high", $p1),
149                                           (call "vget_high", $p2))>;
150def OP_QDMLALHi_N : Op<(call "vqdmlal_n", $p0, (call "vget_high", $p1), $p2)>;
151def OP_QDMLSLHi : Op<(call "vqdmlsl", $p0, (call "vget_high", $p1),
152                                           (call "vget_high", $p2))>;
153def OP_QDMLSLHi_N : Op<(call "vqdmlsl_n", $p0, (call "vget_high", $p1), $p2)>;
154def OP_DIV  : Op<(op "/", $p0, $p1)>;
155def OP_LONG_HI : Op<(cast "R", (call (name_replace "_high_", "_"),
156                                                (call "vget_high", $p0), $p1))>;
157def OP_NARROW_HI : Op<(cast "R", (call "vcombine",
158                                       (cast "R", "H", $p0),
159                                       (cast "R", "H",
160                                           (call (name_replace "_high_", "_"),
161                                                 $p1, $p2))))>;
162def OP_MOVL_HI  : LOp<[(save_temp $a1, (call "vget_high", $p0)),
163                       (cast "R",
164                            (call "vshll_n", $a1, (literal "int32_t", "0")))]>;
165def OP_COPY_LN : Op<(call "vset_lane", (call "vget_lane", $p2, $p3), $p0, $p1)>;
166def OP_SCALAR_MUL_LN : Op<(op "*", $p0, (call "vget_lane", $p1, $p2))>;
167def OP_SCALAR_MULX_LN : Op<(call "vmulx", $p0, (call "vget_lane", $p1, $p2))>;
168def OP_SCALAR_VMULX_LN : LOp<[(save_temp $x, (call "vget_lane", $p0,
169                                                    (literal "int32_t", "0"))),
170                              (save_temp $y, (call "vget_lane", $p1, $p2)),
171                              (save_temp $z, (call "vmulx", $x, $y)),
172                              (call "vset_lane", $z, $p0, $p2)]>;
173def OP_SCALAR_VMULX_LNQ : LOp<[(save_temp $x, (call "vget_lane", $p0,
174                                                     (literal "int32_t", "0"))),
175                               (save_temp $y, (call "vget_lane", $p1, $p2)),
176                               (save_temp $z, (call "vmulx", $x, $y)),
177                               (call "vset_lane", $z, $p0, (literal "int32_t",
178                                                                     "0"))]>;
179class ScalarMulOp<string opname> :
180  Op<(call opname, $p0, (call "vget_lane", $p1, $p2))>;
181
182def OP_SCALAR_QDMULL_LN : ScalarMulOp<"vqdmull">;
183def OP_SCALAR_QDMULH_LN : ScalarMulOp<"vqdmulh">;
184def OP_SCALAR_QRDMULH_LN : ScalarMulOp<"vqrdmulh">;
185
186def OP_SCALAR_QRDMLAH_LN : Op<(call "vqrdmlah", $p0, $p1,
187                                (call "vget_lane", $p2, $p3))>;
188def OP_SCALAR_QRDMLSH_LN : Op<(call "vqrdmlsh", $p0, $p1,
189                                (call "vget_lane", $p2, $p3))>;
190
191def OP_SCALAR_HALF_GET_LN : Op<(bitcast "float16_t",
192                                   (call "vget_lane",
193                                         (bitcast "int16x4_t", $p0), $p1))>;
194def OP_SCALAR_HALF_GET_LNQ : Op<(bitcast "float16_t",
195                                    (call "vget_lane",
196                                          (bitcast "int16x8_t", $p0), $p1))>;
197def OP_SCALAR_HALF_SET_LN : Op<(bitcast "float16x4_t",
198                                   (call "vset_lane",
199                                         (bitcast "int16_t", $p0),
200                                         (bitcast "int16x4_t", $p1), $p2))>;
201def OP_SCALAR_HALF_SET_LNQ : Op<(bitcast "float16x8_t",
202                                    (call "vset_lane",
203                                          (bitcast "int16_t", $p0),
204                                          (bitcast "int16x8_t", $p1), $p2))>;
205
206def OP_DOT_LN
207    : Op<(call "vdot", $p0, $p1,
208          (bitcast $p1, (call_mangled "splat_lane", (bitcast "32", $p2), $p3)))>;
209def OP_DOT_LNQ
210    : Op<(call "vdot", $p0, $p1,
211          (bitcast $p1, (call_mangled "splat_lane", (bitcast "32", $p2), $p3)))>;
212
213def OP_FMLAL_LN     : Op<(call "vfmlal_low", $p0, $p1,
214                           (dup_typed $p1, (call "vget_lane", $p2, $p3)))>;
215def OP_FMLSL_LN     : Op<(call "vfmlsl_low", $p0, $p1,
216                           (dup_typed $p1, (call "vget_lane", $p2, $p3)))>;
217def OP_FMLAL_LN_Hi  : Op<(call "vfmlal_high", $p0, $p1,
218                           (dup_typed $p1, (call "vget_lane", $p2, $p3)))>;
219def OP_FMLSL_LN_Hi  : Op<(call "vfmlsl_high", $p0, $p1,
220                           (dup_typed $p1, (call "vget_lane", $p2, $p3)))>;
221
222def OP_USDOT_LN
223    : Op<(call "vusdot", $p0, $p1,
224          (cast "8", "S", (call_mangled "splat_lane", (bitcast "int32x2_t", $p2), $p3)))>;
225def OP_USDOT_LNQ
226    : Op<(call "vusdot", $p0, $p1,
227          (cast "8", "S", (call_mangled "splat_lane", (bitcast "int32x4_t", $p2), $p3)))>;
228
229// sudot splats the second vector and then calls vusdot
230def OP_SUDOT_LN
231    : Op<(call "vusdot", $p0,
232          (cast "8", "U", (call_mangled "splat_lane", (bitcast "int32x2_t", $p2), $p3)), $p1)>;
233def OP_SUDOT_LNQ
234    : Op<(call "vusdot", $p0,
235          (cast "8", "U", (call_mangled "splat_lane", (bitcast "int32x4_t", $p2), $p3)), $p1)>;
236
237def OP_BFDOT_LN
238    : Op<(call "vbfdot", $p0, $p1,
239          (bitcast $p1, (call_mangled "splat_lane", (bitcast "float32x2_t", $p2), $p3)))>;
240
241def OP_BFDOT_LNQ
242    : Op<(call "vbfdot", $p0, $p1,
243          (bitcast $p1, (call_mangled "splat_lane", (bitcast "float32x4_t", $p2), $p3)))>;
244
245def OP_BFMLALB_LN
246    : Op<(call "vbfmlalb", $p0, $p1,
247          (dup_typed $p1, (call "vget_lane", $p2, $p3)))>;
248
249def OP_BFMLALT_LN
250    : Op<(call "vbfmlalt", $p0, $p1,
251          (dup_typed $p1, (call "vget_lane", $p2, $p3)))>;
252
253def OP_VCVT_F32_BF16
254    : Op<(bitcast "R",
255          (call "vshll_n", (bitcast "uint16x4_t", $p0),
256                           (literal "int32_t", "16")))>;
257def OP_VCVT_F32_BF16_LO
258    : Op<(call "vcvt_f32_bf16", (call "vget_low", $p0))>;
259def OP_VCVT_F32_BF16_HI
260    : Op<(call "vcvt_f32_bf16", (call "vget_high", $p0))>;
261
262def OP_VCVT_BF16_F32_A32
263    : Op<(call "__a32_vcvt_bf16", $p0)>;
264
265def OP_VCVT_BF16_F32_LO_A32
266    : Op<(call "vcombine", (cast "bfloat16x4_t", (literal "uint64_t", "0ULL")),
267                           (call "__a32_vcvt_bf16", $p0))>;
268def OP_VCVT_BF16_F32_HI_A32
269    : Op<(call "vcombine", (call "__a32_vcvt_bf16", $p1),
270                           (call "vget_low", $p0))>;
271
272def OP_CVT_F32_BF16
273    : Op<(bitcast "R", (op "<<", (cast "uint32_t", (bitcast "uint16_t", $p0)),
274                                 (literal "uint32_t", "16")))>;
275
276//===----------------------------------------------------------------------===//
277// Auxiliary Instructions
278//===----------------------------------------------------------------------===//
279
280// Splat operation - performs a range-checked splat over a vector
281def SPLAT  : WInst<"splat_lane", ".(!q)I",
282                   "UcUsUicsilPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUlhdQhQdPlQPl",
283                    [ImmCheck<1, ImmCheckLaneIndex, 0>]>;
284def SPLATQ : WInst<"splat_laneq", ".(!Q)I",
285                   "UcUsUicsilPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUlhdQhQdPlQPl",
286                   [ImmCheck<1, ImmCheckLaneIndex, 0>]>;
287
288let TargetGuard = "bf16,neon" in {
289  def SPLAT_BF  : WInst<"splat_lane", ".(!q)I", "bQb",
290                      [ImmCheck<1, ImmCheckLaneIndex, 0>]>;
291  def SPLATQ_BF : WInst<"splat_laneq", ".(!Q)I", "bQb",
292                      [ImmCheck<1, ImmCheckLaneIndex, 0>]>;
293}
294
295//===----------------------------------------------------------------------===//
296// Intrinsics
297//===----------------------------------------------------------------------===//
298
299////////////////////////////////////////////////////////////////////////////////
300// E.3.1 Addition
301def VADD    : IOpInst<"vadd", "...",
302                      "csilfUcUsUiUlQcQsQiQlQfQUcQUsQUiQUl", OP_ADD>;
303def VADDL   : SOpInst<"vaddl", "(>Q)..", "csiUcUsUi", OP_ADDL>;
304def VADDW   : SOpInst<"vaddw", "(>Q)(>Q).", "csiUcUsUi", OP_ADDW>;
305def VHADD   : SInst<"vhadd", "...", "csiUcUsUiQcQsQiQUcQUsQUi">;
306def VRHADD  : SInst<"vrhadd", "...", "csiUcUsUiQcQsQiQUcQUsQUi">;
307def VQADD   : SInst<"vqadd", "...", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
308def VADDHN  : IInst<"vaddhn", "<QQ", "silUsUiUl">;
309def VRADDHN : IInst<"vraddhn", "<QQ", "silUsUiUl">;
310
311////////////////////////////////////////////////////////////////////////////////
312// E.3.2 Multiplication
313def VMUL     : IOpInst<"vmul", "...", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_MUL>;
314def VMULP    : SInst<"vmul", "...", "PcQPc">;
315def VMLA     : IOpInst<"vmla", "....", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_MLA>;
316def VMLAL    : SOpInst<"vmlal", "(>Q)(>Q)..", "csiUcUsUi", OP_MLAL>;
317def VMLS     : IOpInst<"vmls", "....", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_MLS>;
318def VMLSL    : SOpInst<"vmlsl", "(>Q)(>Q)..", "csiUcUsUi", OP_MLSL>;
319def VQDMULH  : SInst<"vqdmulh", "...", "siQsQi">;
320def VQRDMULH : SInst<"vqrdmulh", "...", "siQsQi">;
321
322let TargetGuard = "v8.1a,neon" in {
323def VQRDMLAH : SInst<"vqrdmlah", "....", "siQsQi">;
324def VQRDMLSH : SInst<"vqrdmlsh", "....", "siQsQi">;
325}
326
327def VQDMLAL  : SInst<"vqdmlal", "(>Q)(>Q)..", "si">;
328def VQDMLSL  : SInst<"vqdmlsl", "(>Q)(>Q)..", "si">;
329def VMULL    : SInst<"vmull", "(>Q)..", "csiUcUsUiPc">;
330def VQDMULL  : SInst<"vqdmull", "(>Q)..", "si">;
331
332////////////////////////////////////////////////////////////////////////////////
333// E.3.3 Subtraction
334def VSUB    : IOpInst<"vsub", "...",
335                      "csilfUcUsUiUlQcQsQiQlQfQUcQUsQUiQUl", OP_SUB>;
336def VSUBL   : SOpInst<"vsubl", "(>Q)..", "csiUcUsUi", OP_SUBL>;
337def VSUBW   : SOpInst<"vsubw", "(>Q)(>Q).", "csiUcUsUi", OP_SUBW>;
338def VQSUB   : SInst<"vqsub", "...", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
339def VHSUB   : SInst<"vhsub", "...", "csiUcUsUiQcQsQiQUcQUsQUi">;
340def VSUBHN  : IInst<"vsubhn", "<QQ", "silUsUiUl">;
341def VRSUBHN : IInst<"vrsubhn", "<QQ", "silUsUiUl">;
342
343////////////////////////////////////////////////////////////////////////////////
344// E.3.4 Comparison
345def VCEQ  : IOpInst<"vceq", "U..", "csifUcUsUiPcQcQsQiQfQUcQUsQUiQPc", OP_EQ>;
346def VCGE  : SOpInst<"vcge", "U..", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_GE>;
347let InstName = "vcge" in
348def VCLE  : SOpInst<"vcle", "U..", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_LE>;
349def VCGT  : SOpInst<"vcgt", "U..", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_GT>;
350let InstName = "vcgt" in
351def VCLT  : SOpInst<"vclt", "U..", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_LT>;
352let InstName = "vacge" in {
353def VCAGE : IInst<"vcage", "U..", "fQf">;
354def VCALE : IInst<"vcale", "U..", "fQf">;
355}
356let InstName = "vacgt" in {
357def VCAGT : IInst<"vcagt", "U..", "fQf">;
358def VCALT : IInst<"vcalt", "U..", "fQf">;
359}
360def VTST  : WInst<"vtst", "U..", "csiUcUsUiPcPsQcQsQiQUcQUsQUiQPcQPs">;
361
362////////////////////////////////////////////////////////////////////////////////
363// E.3.5 Absolute Difference
364def VABD  : SInst<"vabd", "...",  "csiUcUsUifQcQsQiQUcQUsQUiQf">;
365def VABDL : SOpInst<"vabdl", "(>Q)..",  "csiUcUsUi", OP_ABDL>;
366def VABA  : SOpInst<"vaba", "....", "csiUcUsUiQcQsQiQUcQUsQUi", OP_ABA>;
367def VABAL : SOpInst<"vabal", "(>Q)(>Q)..", "csiUcUsUi", OP_ABAL>;
368
369////////////////////////////////////////////////////////////////////////////////
370// E.3.6 Max/Min
371def VMAX : SInst<"vmax", "...", "csiUcUsUifQcQsQiQUcQUsQUiQf">;
372def VMIN : SInst<"vmin", "...", "csiUcUsUifQcQsQiQUcQUsQUiQf">;
373
374////////////////////////////////////////////////////////////////////////////////
375// E.3.7 Pairwise Addition
376def VPADD  : IInst<"vpadd", "...", "csiUcUsUif">;
377def VPADDL : SInst<"vpaddl", ">.",  "csiUcUsUiQcQsQiQUcQUsQUi">;
378def VPADAL : SInst<"vpadal", ">>.", "csiUcUsUiQcQsQiQUcQUsQUi">;
379
380////////////////////////////////////////////////////////////////////////////////
381// E.3.8-9 Folding Max/Min
382def VPMAX : SInst<"vpmax", "...", "csiUcUsUif">;
383def VPMIN : SInst<"vpmin", "...", "csiUcUsUif">;
384
385////////////////////////////////////////////////////////////////////////////////
386// E.3.10 Reciprocal/Sqrt
387def VRECPS  : IInst<"vrecps", "...", "fQf">;
388def VRSQRTS : IInst<"vrsqrts", "...", "fQf">;
389
390////////////////////////////////////////////////////////////////////////////////
391// E.3.11 Shifts by signed variable
392def VSHL   : SInst<"vshl", "..S", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
393def VQSHL  : SInst<"vqshl", "..S", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
394def VRSHL  : SInst<"vrshl", "..S", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
395def VQRSHL : SInst<"vqrshl", "..S", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
396
397////////////////////////////////////////////////////////////////////////////////
398// E.3.12 Shifts by constant
399let isShift = 1 in {
400
401
402def VSHR_N     : SInst<"vshr_n", "..I", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl",
403                      [ImmCheck<1, ImmCheckShiftRight>]>;
404def VSHL_N     : IInst<"vshl_n", "..I", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl",
405                      [ImmCheck<1, ImmCheckShiftLeft>]>;
406def VRSHR_N    : SInst<"vrshr_n", "..I", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl",
407                      [ImmCheck<1, ImmCheckShiftRight>]>;
408def VSRA_N     : SInst<"vsra_n", "...I", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl",
409                      [ImmCheck<2, ImmCheckShiftRight>]>;
410def VRSRA_N    : SInst<"vrsra_n", "...I", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl",
411                      [ImmCheck<2, ImmCheckShiftRight>]>;
412def VQSHL_N    : SInst<"vqshl_n", "..I", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl",
413                      [ImmCheck<1, ImmCheckShiftLeft>]>;
414def VQSHLU_N   : SInst<"vqshlu_n", "U.I", "csilQcQsQiQl",
415                      [ImmCheck<1, ImmCheckShiftLeft>]>;
416
417// Narrowing right shifts should have an immediate range of 1..(sizeinbits(arg)/2).
418// However, as the overloaded type code that is supplied to a polymorphic builtin
419// is that of the return type (half as wide as the argument in this case), using
420// ImmCheckShiftRightNarrow would return in an upper bound of (sizeinbits(arg)/2)/2.
421// ImmCheckShiftRight produces the correct behavior here.
422def VSHRN_N    : IInst<"vshrn_n", "<QI", "silUsUiUl",
423                      [ImmCheck<1, ImmCheckShiftRight>]>;
424def VQSHRUN_N  : SInst<"vqshrun_n", "(<U)QI", "sil",
425                      [ImmCheck<1, ImmCheckShiftRight>]>;
426def VQRSHRUN_N : SInst<"vqrshrun_n", "(<U)QI", "sil",
427                      [ImmCheck<1, ImmCheckShiftRight>]>;
428def VQSHRN_N   : SInst<"vqshrn_n", "<QI", "silUsUiUl",
429                      [ImmCheck<1, ImmCheckShiftRight>]>;
430def VRSHRN_N   : IInst<"vrshrn_n", "<QI", "silUsUiUl",
431                      [ImmCheck<1, ImmCheckShiftRight>]>;
432def VQRSHRN_N  : SInst<"vqrshrn_n", "<QI", "silUsUiUl",
433                      [ImmCheck<1, ImmCheckShiftRight>]>;
434
435// Widening left-shifts should have a range of 0..(sizeinbits(arg)-1).
436// This polymorphic builtin is supplied the wider return type as it's overloaded
437// base type, so the range here is actually 0..(sizeinbits(arg)*2).
438// This cannot be rectified currently due to a use of vshll_n_s16 with an
439// out-of-bounds immediate in the defintiion of vcvt_f32_bf16.
440def VSHLL_N    : SInst<"vshll_n", "(>Q).I", "csiUcUsUi",
441                      [ImmCheck<1, ImmCheckShiftLeft>]>;
442
443////////////////////////////////////////////////////////////////////////////////
444// E.3.13 Shifts with insert
445def VSRI_N : WInst<"vsri_n", "...I",
446                   "csilUcUsUiUlPcPsQcQsQiQlQUcQUsQUiQUlQPcQPs",
447                    [ImmCheck<2, ImmCheckShiftRight, 0>]>;
448def VSLI_N : WInst<"vsli_n", "...I",
449                   "csilUcUsUiUlPcPsQcQsQiQlQUcQUsQUiQUlQPcQPs",
450                   [ImmCheck<2, ImmCheckShiftLeft, 0>]>;
451}
452
453////////////////////////////////////////////////////////////////////////////////
454// E.3.14 Loads and stores of a single vector
455def VLD1      : WInst<"vld1", ".(c*!)",
456                      "QUcQUsQUiQUlQcQsQiQlQfQPcQPsUcUsUiUlcsilfPcPs">;
457def VLD1_X2   : WInst<"vld1_x2", "2(c*!)",
458                      "cfilsUcUiUlUsQcQfQiQlQsQUcQUiQUlQUsPcPsQPcQPs">;
459def VLD1_X3   : WInst<"vld1_x3", "3(c*!)",
460                      "cfilsUcUiUlUsQcQfQiQlQsQUcQUiQUlQUsPcPsQPcQPs">;
461def VLD1_X4   : WInst<"vld1_x4", "4(c*!)",
462                      "cfilsUcUiUlUsQcQfQiQlQsQUcQUiQUlQUsPcPsQPcQPs">;
463def VLD1_LANE : WInst<"vld1_lane", ".(c*!).I",
464                      "QUcQUsQUiQUlQcQsQiQlQfQPcQPsUcUsUiUlcsilfPcPs",
465                      [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
466def VLD1_DUP  : WInst<"vld1_dup", ".(c*!)",
467                      "QUcQUsQUiQUlQcQsQiQlQfQPcQPsUcUsUiUlcsilfPcPs">;
468def VST1      : WInst<"vst1", "v*(.!)",
469                      "QUcQUsQUiQUlQcQsQiQlQfQPcQPsUcUsUiUlcsilfPcPs">;
470def VST1_X2   : WInst<"vst1_x2", "v*(2!)",
471                      "cfilsUcUiUlUsQcQfQiQlQsQUcQUiQUlQUsPcPsQPcQPs">;
472def VST1_X3   : WInst<"vst1_x3", "v*(3!)",
473                      "cfilsUcUiUlUsQcQfQiQlQsQUcQUiQUlQUsPcPsQPcQPs">;
474def VST1_X4   : WInst<"vst1_x4", "v*(4!)",
475                      "cfilsUcUiUlUsQcQfQiQlQsQUcQUiQUlQUsPcPsQPcQPs">;
476def VST1_LANE : WInst<"vst1_lane", "v*(.!)I",
477                      "QUcQUsQUiQUlQcQsQiQlQfQPcQPsUcUsUiUlcsilfPcPs",
478                      [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
479
480let ArchGuard = "(__ARM_FP & 2)" in {
481def VLD1_F16      : WInst<"vld1", ".(c*!)", "hQh">;
482def VLD1_X2_F16   : WInst<"vld1_x2", "2(c*!)", "hQh">;
483def VLD1_X3_F16   : WInst<"vld1_x3", "3(c*!)", "hQh">;
484def VLD1_X4_F16   : WInst<"vld1_x4", "4(c*!)", "hQh">;
485def VLD1_LANE_F16 : WInst<"vld1_lane", ".(c*!).I", "hQh",
486                          [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
487def VLD1_DUP_F16  : WInst<"vld1_dup", ".(c*!)", "hQh">;
488def VST1_F16      : WInst<"vst1", "v*(.!)", "hQh">;
489def VST1_X2_F16   : WInst<"vst1_x2", "v*(2!)", "hQh">;
490def VST1_X3_F16   : WInst<"vst1_x3", "v*(3!)", "hQh">;
491def VST1_X4_F16   : WInst<"vst1_x4", "v*(4!)", "hQh">;
492def VST1_LANE_F16 : WInst<"vst1_lane", "v*(.!)I", "hQh",
493                          [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
494}
495
496////////////////////////////////////////////////////////////////////////////////
497// E.3.15 Loads and stores of an N-element structure
498def VLD2 : WInst<"vld2", "2(c*!)", "QUcQUsQUiQcQsQiQfQPcQPsUcUsUiUlcsilfPcPs">;
499def VLD3 : WInst<"vld3", "3(c*!)", "QUcQUsQUiQcQsQiQfQPcQPsUcUsUiUlcsilfPcPs">;
500def VLD4 : WInst<"vld4", "4(c*!)", "QUcQUsQUiQcQsQiQfQPcQPsUcUsUiUlcsilfPcPs">;
501def VLD2_DUP  : WInst<"vld2_dup", "2(c*!)",
502                      "UcUsUiUlcsilfPcPsQcQfQiQlQsQPcQPsQUcQUiQUlQUs">;
503def VLD3_DUP  : WInst<"vld3_dup", "3(c*!)",
504                      "UcUsUiUlcsilfPcPsQcQfQiQlQsQPcQPsQUcQUiQUlQUs">;
505def VLD4_DUP  : WInst<"vld4_dup", "4(c*!)",
506                      "UcUsUiUlcsilfPcPsQcQfQiQlQsQPcQPsQUcQUiQUlQUs">;
507def VLD2_LANE : WInst<"vld2_lane", "2(c*!)2I", "QUsQUiQsQiQfQPsUcUsUicsifPcPs",
508                      [ImmCheck<4, ImmCheckLaneIndex, 1>]>;
509def VLD3_LANE : WInst<"vld3_lane", "3(c*!)3I", "QUsQUiQsQiQfQPsUcUsUicsifPcPs",
510                      [ImmCheck<5, ImmCheckLaneIndex, 1>]>;
511def VLD4_LANE : WInst<"vld4_lane", "4(c*!)4I", "QUsQUiQsQiQfQPsUcUsUicsifPcPs",
512                      [ImmCheck<6, ImmCheckLaneIndex, 1>]>;
513def VST2 : WInst<"vst2", "v*(2!)", "QUcQUsQUiQcQsQiQfQPcQPsUcUsUiUlcsilfPcPs">;
514def VST3 : WInst<"vst3", "v*(3!)", "QUcQUsQUiQcQsQiQfQPcQPsUcUsUiUlcsilfPcPs">;
515def VST4 : WInst<"vst4", "v*(4!)", "QUcQUsQUiQcQsQiQfQPcQPsUcUsUiUlcsilfPcPs">;
516def VST2_LANE : WInst<"vst2_lane", "v*(2!)I", "QUsQUiQsQiQfQPsUcUsUicsifPcPs",
517                      [ImmCheck<3, ImmCheckLaneIndex, 1>]>;
518def VST3_LANE : WInst<"vst3_lane", "v*(3!)I", "QUsQUiQsQiQfQPsUcUsUicsifPcPs",
519                      [ImmCheck<4, ImmCheckLaneIndex, 1>]>;
520def VST4_LANE : WInst<"vst4_lane", "v*(4!)I", "QUsQUiQsQiQfQPsUcUsUicsifPcPs",
521                      [ImmCheck<5, ImmCheckLaneIndex, 1>]>;
522let ArchGuard = "(__ARM_FP & 2)" in {
523def VLD2_F16      : WInst<"vld2", "2(c*!)", "hQh">;
524def VLD3_F16      : WInst<"vld3", "3(c*!)", "hQh">;
525def VLD4_F16      : WInst<"vld4", "4(c*!)", "hQh">;
526def VLD2_DUP_F16  : WInst<"vld2_dup", "2(c*!)", "hQh">;
527def VLD3_DUP_F16  : WInst<"vld3_dup", "3(c*!)", "hQh">;
528def VLD4_DUP_F16  : WInst<"vld4_dup", "4(c*!)", "hQh">;
529def VLD2_LANE_F16 : WInst<"vld2_lane", "2(c*!)2I", "hQh",
530                          [ImmCheck<4, ImmCheckLaneIndex, 1>]>;
531def VLD3_LANE_F16 : WInst<"vld3_lane", "3(c*!)3I", "hQh",
532                          [ImmCheck<5, ImmCheckLaneIndex, 1>]>;
533def VLD4_LANE_F16 : WInst<"vld4_lane", "4(c*!)4I", "hQh",
534                          [ImmCheck<6, ImmCheckLaneIndex, 1>]>;
535def VST2_F16      : WInst<"vst2", "v*(2!)", "hQh">;
536def VST3_F16      : WInst<"vst3", "v*(3!)", "hQh">;
537def VST4_F16      : WInst<"vst4", "v*(4!)", "hQh">;
538def VST2_LANE_F16 : WInst<"vst2_lane", "v*(2!)I", "hQh",
539                          [ImmCheck<3, ImmCheckLaneIndex, 1>]>;
540def VST3_LANE_F16 : WInst<"vst3_lane", "v*(3!)I", "hQh",
541                         [ImmCheck<4, ImmCheckLaneIndex, 1>]>;
542def VST4_LANE_F16 : WInst<"vst4_lane", "v*(4!)I", "hQh",
543                          [ImmCheck<5, ImmCheckLaneIndex, 1>]>;
544}
545
546////////////////////////////////////////////////////////////////////////////////
547// E.3.16 Extract lanes from a vector
548let InstName = "vmov" in
549def VGET_LANE : IInst<"vget_lane", "1.I",
550                      "UcUsUicsiPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUl",
551                      [ImmCheck<1, ImmCheckLaneIndex, 0>]>;
552
553////////////////////////////////////////////////////////////////////////////////
554// E.3.17 Set lanes within a vector
555let InstName = "vmov" in
556def VSET_LANE : IInst<"vset_lane", ".1.I",
557                      "UcUsUicsiPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUl",
558                      [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
559
560////////////////////////////////////////////////////////////////////////////////
561// E.3.18 Initialize a vector from bit pattern
562def VCREATE : NoTestOpInst<"vcreate", ".(IU>)", "csihfUcUsUiUlPcPsl", OP_CAST> {
563  let BigEndianSafe = 1;
564}
565
566////////////////////////////////////////////////////////////////////////////////
567// E.3.19 Set all lanes to same value
568let InstName = "vmov" in {
569def VDUP_N   : WOpInst<"vdup_n", ".1",
570                       "UcUsUicsiPcPshfQUcQUsQUiQcQsQiQPcQPsQhQflUlQlQUl",
571                       OP_DUP>;
572def VMOV_N   : WOpInst<"vmov_n", ".1",
573                       "UcUsUicsiPcPshfQUcQUsQUiQcQsQiQPcQPsQhQflUlQlQUl",
574                       OP_DUP>;
575}
576let InstName = "" in
577def VDUP_LANE: WOpInst<"vdup_lane", ".qI",
578                       "UcUsUicsiPcPshfQUcQUsQUiQcQsQiQPcQPsQhQflUlQlQUl",
579                       OP_DUP_LN>;
580
581////////////////////////////////////////////////////////////////////////////////
582// E.3.20 Combining vectors
583def VCOMBINE : NoTestOpInst<"vcombine", "Q..", "csilhfUcUsUiUlPcPs", OP_CONC>;
584
585////////////////////////////////////////////////////////////////////////////////
586// E.3.21 Splitting vectors
587// Note that the ARM NEON Reference 2.0 mistakenly document the vget_high_f16()
588// and vget_low_f16() intrinsics as AArch64-only. We (and GCC) support all
589// versions of these intrinsics in both AArch32 and AArch64 architectures. See
590// D45668 for more details.
591let InstName = "vmov" in {
592def VGET_HIGH : NoTestOpInst<"vget_high", ".Q", "csilhfUcUsUiUlPcPs", OP_HI>;
593def VGET_LOW  : NoTestOpInst<"vget_low", ".Q", "csilhfUcUsUiUlPcPs", OP_LO>;
594}
595
596////////////////////////////////////////////////////////////////////////////////
597// E.3.22 Converting vectors
598
599let ArchGuard = "(__ARM_FP & 2)" in {
600  def VCVT_F16_F32 : SInst<"vcvt_f16_f32", "(<q)(.!)", "Hf">;
601  def VCVT_F32_F16 : SInst<"vcvt_f32_f16", "(>Q)(.!)", "h">;
602}
603
604def VCVT_S32     : SInst<"vcvt_s32", "S.",  "fQf">;
605def VCVT_U32     : SInst<"vcvt_u32", "U.",  "fQf">;
606def VCVT_F32     : SInst<"vcvt_f32", "F(.!)",  "iUiQiQUi">;
607def VCVT_N_S32   : SInst<"vcvt_n_s32", "S.I", "fQf",
608                        [ImmCheck<1, ImmCheck1_32>]>;
609def VCVT_N_U32   : SInst<"vcvt_n_u32", "U.I", "fQf",
610                        [ImmCheck<1, ImmCheck1_32>]>;
611def VCVT_N_F32   : SInst<"vcvt_n_f32", "F(.!)I", "iUiQiQUi",
612                        [ImmCheck<1, ImmCheck1_32>]>;
613
614def VMOVN        : IInst<"vmovn", "<Q",  "silUsUiUl">;
615def VMOVL        : SInst<"vmovl", "(>Q).",  "csiUcUsUi">;
616def VQMOVN       : SInst<"vqmovn", "<Q",  "silUsUiUl">;
617def VQMOVUN      : SInst<"vqmovun", "(<U)Q",  "sil">;
618
619////////////////////////////////////////////////////////////////////////////////
620// E.3.23-24 Table lookup, Extended table lookup
621let InstName = "vtbl" in {
622def VTBL1 : WInst<"vtbl1", "..p",  "UccPc">;
623def VTBL2 : WInst<"vtbl2", ".2p",  "UccPc">;
624def VTBL3 : WInst<"vtbl3", ".3p",  "UccPc">;
625def VTBL4 : WInst<"vtbl4", ".4p",  "UccPc">;
626}
627let InstName = "vtbx" in {
628def VTBX1 : WInst<"vtbx1", "...p", "UccPc">;
629def VTBX2 : WInst<"vtbx2", "..2p", "UccPc">;
630def VTBX3 : WInst<"vtbx3", "..3p", "UccPc">;
631def VTBX4 : WInst<"vtbx4", "..4p", "UccPc">;
632}
633
634////////////////////////////////////////////////////////////////////////////////
635// E.3.25 Operations with a scalar value
636def VMLA_LANE     : IOpInst<"vmla_lane", "...qI",
637                            "siUsUifQsQiQUsQUiQf", OP_MLA_LN>;
638def VMLAL_LANE    : SOpInst<"vmlal_lane", "(>Q)(>Q)..I", "siUsUi", OP_MLAL_LN>;
639def VQDMLAL_LANE  : SOpInst<"vqdmlal_lane", "(>Q)(>Q)..I", "si", OP_QDMLAL_LN>;
640def VMLS_LANE     : IOpInst<"vmls_lane", "...qI",
641                            "siUsUifQsQiQUsQUiQf", OP_MLS_LN>;
642def VMLSL_LANE    : SOpInst<"vmlsl_lane", "(>Q)(>Q)..I", "siUsUi", OP_MLSL_LN>;
643def VQDMLSL_LANE  : SOpInst<"vqdmlsl_lane", "(>Q)(>Q)..I", "si", OP_QDMLSL_LN>;
644def VMUL_N        : IOpInst<"vmul_n", "..1", "sifUsUiQsQiQfQUsQUi", OP_MUL_N>;
645def VMUL_LANE     : IOpInst<"vmul_lane", "..qI",
646                            "sifUsUiQsQiQfQUsQUi", OP_MUL_LN>;
647def VMULL_N       : SOpInst<"vmull_n", "(>Q).1", "siUsUi", OP_MULL_N>;
648def VMULL_LANE    : SOpInst<"vmull_lane", "(>Q)..I", "siUsUi", OP_MULL_LN>;
649def VQDMULL_N     : SOpInst<"vqdmull_n", "(>Q).1", "si", OP_QDMULL_N>;
650def VQDMULL_LANE  : SOpInst<"vqdmull_lane", "(>Q)..I", "si", OP_QDMULL_LN>;
651def VQDMULH_N     : SOpInst<"vqdmulh_n", "..1", "siQsQi", OP_QDMULH_N>;
652def VQRDMULH_N    : SOpInst<"vqrdmulh_n", "..1", "siQsQi", OP_QRDMULH_N>;
653
654let ArchGuard = "!defined(__aarch64__) && !defined(__arm64ec__)" in {
655def VQDMULH_LANE  : SOpInst<"vqdmulh_lane", "..qI", "siQsQi", OP_QDMULH_LN>;
656def VQRDMULH_LANE : SOpInst<"vqrdmulh_lane", "..qI", "siQsQi", OP_QRDMULH_LN>;
657}
658let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)" in {
659def A64_VQDMULH_LANE  : SInst<"vqdmulh_lane", "..(!q)I", "siQsQi",
660                              [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
661def A64_VQRDMULH_LANE : SInst<"vqrdmulh_lane", "..(!q)I", "siQsQi",
662                              [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
663}
664
665let TargetGuard = "v8.1a,neon" in {
666def VQRDMLAH_LANE : SOpInst<"vqrdmlah_lane", "...qI", "siQsQi", OP_QRDMLAH_LN>;
667def VQRDMLSH_LANE : SOpInst<"vqrdmlsh_lane", "...qI", "siQsQi", OP_QRDMLSH_LN>;
668}
669
670def VMLA_N        : IOpInst<"vmla_n", "...1", "siUsUifQsQiQUsQUiQf", OP_MLA_N>;
671def VMLAL_N       : SOpInst<"vmlal_n", "(>Q)(>Q).1", "siUsUi", OP_MLAL_N>;
672def VQDMLAL_N     : SOpInst<"vqdmlal_n", "(>Q)(>Q).1", "si", OP_QDMLAL_N>;
673def VMLS_N        : IOpInst<"vmls_n", "...1", "siUsUifQsQiQUsQUiQf", OP_MLS_N>;
674def VMLSL_N       : SOpInst<"vmlsl_n", "(>Q)(>Q).1", "siUsUi", OP_MLSL_N>;
675def VQDMLSL_N     : SOpInst<"vqdmlsl_n", "(>Q)(>Q).1", "si", OP_QDMLSL_N>;
676
677////////////////////////////////////////////////////////////////////////////////
678// E.3.26 Vector Extract
679def VEXT : WInst<"vext", "...I",
680                 "cUcPcsUsPsiUilUlfQcQUcQPcQsQUsQPsQiQUiQlQUlQf",
681                 [ImmCheck<2, ImmCheckLaneIndex, 0>]>;
682
683////////////////////////////////////////////////////////////////////////////////
684// E.3.27 Reverse vector elements
685def VREV64 : WOpInst<"vrev64", "..", "csiUcUsUiPcPsfQcQsQiQUcQUsQUiQPcQPsQf",
686                  OP_REV64>;
687def VREV32 : WOpInst<"vrev32", "..", "csUcUsPcPsQcQsQUcQUsQPcQPs", OP_REV32>;
688def VREV16 : WOpInst<"vrev16", "..", "cUcPcQcQUcQPc", OP_REV16>;
689
690////////////////////////////////////////////////////////////////////////////////
691// E.3.28 Other single operand arithmetic
692def VABS    : SInst<"vabs", "..", "csifQcQsQiQf">;
693def VQABS   : SInst<"vqabs", "..", "csiQcQsQi">;
694def VNEG    : SOpInst<"vneg", "..", "csifQcQsQiQf", OP_NEG>;
695def VQNEG   : SInst<"vqneg", "..", "csiQcQsQi">;
696def VCLS    : SInst<"vcls", "S.", "csiUcUsUiQcQsQiQUcQUsQUi">;
697def VCLZ    : IInst<"vclz", "..", "csiUcUsUiQcQsQiQUcQUsQUi">;
698def VCNT    : WInst<"vcnt", "..", "UccPcQUcQcQPc">;
699def VRECPE  : SInst<"vrecpe", "..", "fUiQfQUi">;
700def VRSQRTE : SInst<"vrsqrte", "..", "fUiQfQUi">;
701
702////////////////////////////////////////////////////////////////////////////////
703// E.3.29 Logical operations
704def VMVN : LOpInst<"vmvn", "..", "csiUcUsUiPcQcQsQiQUcQUsQUiQPc", OP_NOT>;
705def VAND : LOpInst<"vand", "...", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_AND>;
706def VORR : LOpInst<"vorr", "...", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_OR>;
707def VEOR : LOpInst<"veor", "...", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_XOR>;
708def VBIC : LOpInst<"vbic", "...", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_ANDN>;
709def VORN : LOpInst<"vorn", "...", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_ORN>;
710let isHiddenLInst = 1 in
711def VBSL : SInst<"vbsl", ".U..",
712                "csilUcUsUiUlfPcPsQcQsQiQlQUcQUsQUiQUlQfQPcQPs">;
713
714////////////////////////////////////////////////////////////////////////////////
715// E.3.30 Transposition operations
716def VTRN : WInst<"vtrn", "2..", "csiUcUsUifPcPsQcQsQiQUcQUsQUiQfQPcQPs">;
717def VZIP : WInst<"vzip", "2..", "csiUcUsUifPcPsQcQsQiQUcQUsQUiQfQPcQPs">;
718def VUZP : WInst<"vuzp", "2..", "csiUcUsUifPcPsQcQsQiQUcQUsQUiQfQPcQPs">;
719
720////////////////////////////////////////////////////////////////////////////////
721
722class REINTERPRET_CROSS_SELF<string Types> :
723  NoTestOpInst<"vreinterpret", "..", Types, OP_REINT> {
724    let CartesianProductWith = Types;
725}
726
727multiclass REINTERPRET_CROSS_TYPES<string TypesA, string TypesB> {
728  def AXB: NoTestOpInst<"vreinterpret", "..", TypesA, OP_REINT> {
729    let CartesianProductWith = TypesB;
730  }
731  def BXA: NoTestOpInst<"vreinterpret", "..", TypesB, OP_REINT> {
732    let CartesianProductWith = TypesA;
733  }
734}
735
736// E.3.31 Vector reinterpret cast operations
737def VREINTERPRET : REINTERPRET_CROSS_SELF<"csilUcUsUiUlhfPcPsQcQsQiQlQUcQUsQUiQUlQhQfQPcQPs"> {
738  let ArchGuard = "!defined(__aarch64__) && !defined(__arm64ec__)";
739  let BigEndianSafe = 1;
740}
741
742////////////////////////////////////////////////////////////////////////////////
743// Vector fused multiply-add operations
744
745let ArchGuard = "defined(__ARM_FEATURE_FMA)" in {
746  def VFMA : SInst<"vfma", "....", "fQf">;
747  def VFMS : SOpInst<"vfms", "....", "fQf", OP_FMLS>;
748  def FMLA_N_F32 : SOpInst<"vfma_n", "...1", "fQf", OP_FMLA_N>;
749}
750
751////////////////////////////////////////////////////////////////////////////////
752// fp16 vector operations
753def SCALAR_HALF_GET_LANE : IOpInst<"vget_lane", "1.I", "h", OP_SCALAR_HALF_GET_LN>;
754def SCALAR_HALF_SET_LANE : IOpInst<"vset_lane", ".1.I", "h", OP_SCALAR_HALF_SET_LN>;
755def SCALAR_HALF_GET_LANEQ : IOpInst<"vget_lane", "1.I", "Qh", OP_SCALAR_HALF_GET_LNQ>;
756def SCALAR_HALF_SET_LANEQ : IOpInst<"vset_lane", ".1.I", "Qh", OP_SCALAR_HALF_SET_LNQ>;
757
758////////////////////////////////////////////////////////////////////////////////
759// Non poly128_t vaddp for Arm and AArch64
760// TODO: poly128_t not implemented on arm32
761def VADDP   : WInst<"vadd", "...", "PcPsPlQPcQPsQPl">;
762
763////////////////////////////////////////////////////////////////////////////////
764// AArch64 Intrinsics
765
766let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)" in {
767
768////////////////////////////////////////////////////////////////////////////////
769// Load/Store
770def LD1 : WInst<"vld1", ".(c*!)", "dQdPlQPl">;
771def LD2 : WInst<"vld2", "2(c*!)", "QUlQldQdPlQPl">;
772def LD3 : WInst<"vld3", "3(c*!)", "QUlQldQdPlQPl">;
773def LD4 : WInst<"vld4", "4(c*!)", "QUlQldQdPlQPl">;
774def ST1 : WInst<"vst1", "v*(.!)", "dQdPlQPl">;
775def ST2 : WInst<"vst2", "v*(2!)", "QUlQldQdPlQPl">;
776def ST3 : WInst<"vst3", "v*(3!)", "QUlQldQdPlQPl">;
777def ST4 : WInst<"vst4", "v*(4!)", "QUlQldQdPlQPl">;
778
779def LD1_X2 : WInst<"vld1_x2", "2(c*!)",
780                   "dQdPlQPl">;
781def LD1_X3 : WInst<"vld1_x3", "3(c*!)",
782                   "dQdPlQPl">;
783def LD1_X4 : WInst<"vld1_x4", "4(c*!)",
784                   "dQdPlQPl">;
785
786def ST1_X2 : WInst<"vst1_x2", "v*(2!)", "dQdPlQPl">;
787def ST1_X3 : WInst<"vst1_x3", "v*(3!)", "dQdPlQPl">;
788def ST1_X4 : WInst<"vst1_x4", "v*(4!)", "dQdPlQPl">;
789
790def LD1_LANE : WInst<"vld1_lane", ".(c*!).I", "dQdPlQPl",
791                    [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
792def LD2_LANE : WInst<"vld2_lane", "2(c*!)2I", "lUlQcQUcQPcQlQUldQdPlQPl",
793                    [ImmCheck<4, ImmCheckLaneIndex, 1>]>;
794def LD3_LANE : WInst<"vld3_lane", "3(c*!)3I", "lUlQcQUcQPcQlQUldQdPlQPl",
795                    [ImmCheck<5, ImmCheckLaneIndex, 1>]>;
796def LD4_LANE : WInst<"vld4_lane", "4(c*!)4I", "lUlQcQUcQPcQlQUldQdPlQPl",
797                    [ImmCheck<6, ImmCheckLaneIndex, 1>]>;
798def ST1_LANE : WInst<"vst1_lane", "v*(.!)I", "dQdPlQPl",
799                    [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
800def ST2_LANE : WInst<"vst2_lane", "v*(2!)I", "lUlQcQUcQPcQlQUldQdPlQPl",
801                    [ImmCheck<3, ImmCheckLaneIndex, 1>]>;
802def ST3_LANE : WInst<"vst3_lane", "v*(3!)I", "lUlQcQUcQPcQlQUldQdPlQPl",
803                    [ImmCheck<4, ImmCheckLaneIndex, 1>]>;
804def ST4_LANE : WInst<"vst4_lane", "v*(4!)I", "lUlQcQUcQPcQlQUldQdPlQPl",
805                    [ImmCheck<5, ImmCheckLaneIndex, 1>]>;
806
807def LD1_DUP  : WInst<"vld1_dup", ".(c*!)", "dQdPlQPl">;
808def LD2_DUP  : WInst<"vld2_dup", "2(c*!)", "dQdPlQPl">;
809def LD3_DUP  : WInst<"vld3_dup", "3(c*!)", "dQdPlQPl">;
810def LD4_DUP  : WInst<"vld4_dup", "4(c*!)", "dQdPlQPl">;
811
812def VLDRQ : WInst<"vldrq", "1(c*!)", "Pk">;
813def VSTRQ : WInst<"vstrq", "v*(1!)", "Pk">;
814
815////////////////////////////////////////////////////////////////////////////////
816// Addition
817def ADD : IOpInst<"vadd", "...", "dQd", OP_ADD>;
818
819////////////////////////////////////////////////////////////////////////////////
820// Subtraction
821def SUB : IOpInst<"vsub", "...", "dQd", OP_SUB>;
822
823////////////////////////////////////////////////////////////////////////////////
824// Multiplication
825def MUL     : IOpInst<"vmul", "...", "dQd", OP_MUL>;
826def MLA     : IOpInst<"vmla", "....", "dQd", OP_MLA>;
827def MLS     : IOpInst<"vmls", "....", "dQd", OP_MLS>;
828
829////////////////////////////////////////////////////////////////////////////////
830// Multiplication Extended
831def MULX : SInst<"vmulx", "...", "fdQfQd">;
832
833////////////////////////////////////////////////////////////////////////////////
834// Division
835def FDIV : IOpInst<"vdiv", "...",  "fdQfQd", OP_DIV>;
836
837////////////////////////////////////////////////////////////////////////////////
838// Vector fused multiply-add operations
839def FMLA : SInst<"vfma", "....", "dQd">;
840def FMLS : SOpInst<"vfms", "....", "dQd", OP_FMLS>;
841
842////////////////////////////////////////////////////////////////////////////////
843// MUL, MLA, MLS, FMA, FMS definitions with scalar argument
844def VMUL_N_A64 : IOpInst<"vmul_n", "..1", "Qd", OP_MUL_N>;
845
846def FMLA_N : SOpInst<"vfma_n", "...1", "dQd", OP_FMLA_N>;
847def FMLS_N : SOpInst<"vfms_n", "...1", "fdQfQd", OP_FMLS_N>;
848
849////////////////////////////////////////////////////////////////////////////////
850// Logical operations
851def BSL : SInst<"vbsl", ".U..", "dPlQdQPl">;
852
853////////////////////////////////////////////////////////////////////////////////
854// Absolute Difference
855def ABD  : SInst<"vabd", "...",  "dQd">;
856
857////////////////////////////////////////////////////////////////////////////////
858// saturating absolute/negate
859def ABS    : SInst<"vabs", "..", "dQdlQl">;
860def QABS   : SInst<"vqabs", "..", "lQl">;
861def NEG    : SOpInst<"vneg", "..", "dlQdQl", OP_NEG>;
862def QNEG   : SInst<"vqneg", "..", "lQl">;
863
864////////////////////////////////////////////////////////////////////////////////
865// Signed Saturating Accumulated of Unsigned Value
866def SUQADD : SInst<"vuqadd", "..U", "csilQcQsQiQl">;
867
868////////////////////////////////////////////////////////////////////////////////
869// Unsigned Saturating Accumulated of Signed Value
870def USQADD : SInst<"vsqadd", "..S", "UcUsUiUlQUcQUsQUiQUl">;
871
872////////////////////////////////////////////////////////////////////////////////
873// Reciprocal/Sqrt
874def FRECPS  : IInst<"vrecps", "...", "dQd">;
875def FRSQRTS : IInst<"vrsqrts", "...", "dQd">;
876def FRECPE  : SInst<"vrecpe", "..", "dQd">;
877def FRSQRTE : SInst<"vrsqrte", "..", "dQd">;
878def FSQRT   : SInst<"vsqrt", "..", "fdQfQd">;
879
880////////////////////////////////////////////////////////////////////////////////
881// bitwise reverse
882def RBIT : IInst<"vrbit", "..", "cUcPcQcQUcQPc">;
883
884////////////////////////////////////////////////////////////////////////////////
885// Integer extract and narrow to high
886def XTN2 : SOpInst<"vmovn_high", "(<Q)<Q", "silUsUiUl", OP_XTN>;
887
888////////////////////////////////////////////////////////////////////////////////
889// Signed integer saturating extract and unsigned narrow to high
890def SQXTUN2 : SOpInst<"vqmovun_high", "(<U)(<Uq).", "HsHiHl", OP_SQXTUN>;
891
892////////////////////////////////////////////////////////////////////////////////
893// Integer saturating extract and narrow to high
894def QXTN2 : SOpInst<"vqmovn_high", "(<Q)<Q", "silUsUiUl", OP_QXTN>;
895
896////////////////////////////////////////////////////////////////////////////////
897// Converting vectors
898
899def VCVT_F32_F64 : SInst<"vcvt_f32_f64", "(<q).", "Qd">;
900def VCVT_F64_F32 : SInst<"vcvt_f64_f32", "(>Q).", "f">;
901
902def VCVT_S64 : SInst<"vcvt_s64", "S.",  "dQd">;
903def VCVT_U64 : SInst<"vcvt_u64", "U.",  "dQd">;
904def VCVT_F64 : SInst<"vcvt_f64", "F(.!)",  "lUlQlQUl">;
905
906def VCVT_HIGH_F16_F32 : SOpInst<"vcvt_high_f16", "<(<q!)Q", "Hf", OP_VCVT_NA_HI_F16>;
907def VCVT_HIGH_F32_F16 : SOpInst<"vcvt_high_f32", "(>Q)(Q!)", "h", OP_VCVT_EX_HI_F32>;
908def VCVT_HIGH_F32_F64 : SOpInst<"vcvt_high_f32", "(<Q)(F<!)Q", "d", OP_VCVT_NA_HI_F32>;
909def VCVT_HIGH_F64_F32 : SOpInst<"vcvt_high_f64", "(>Q)(Q!)", "f", OP_VCVT_EX_HI_F64>;
910
911def VCVTX_F32_F64      : SInst<"vcvtx_f32", "(F<)(Q!)",  "d">;
912def VCVTX_HIGH_F32_F64 : SOpInst<"vcvtx_high_f32", "(<Q)(F<!)Q", "d", OP_VCVTX_HI>;
913
914////////////////////////////////////////////////////////////////////////////////
915// Comparison
916def FCAGE : IInst<"vcage", "U..", "dQd">;
917def FCAGT : IInst<"vcagt", "U..", "dQd">;
918def FCALE : IInst<"vcale", "U..", "dQd">;
919def FCALT : IInst<"vcalt", "U..", "dQd">;
920def CMTST  : WInst<"vtst", "U..", "lUlPlQlQUlQPl">;
921def CFMEQ  : SOpInst<"vceq", "U..", "lUldQdQlQUlPlQPl", OP_EQ>;
922def CFMGE  : SOpInst<"vcge", "U..", "lUldQdQlQUl", OP_GE>;
923def CFMLE  : SOpInst<"vcle", "U..", "lUldQdQlQUl", OP_LE>;
924def CFMGT  : SOpInst<"vcgt", "U..", "lUldQdQlQUl", OP_GT>;
925def CFMLT  : SOpInst<"vclt", "U..", "lUldQdQlQUl", OP_LT>;
926
927def CMEQ  : SInst<"vceqz", "U.",
928                  "csilfUcUsUiUlPcPlQcQsQiQlQfQUcQUsQUiQUlQPcdQdQPl">;
929def CMGE  : SInst<"vcgez", "U.", "csilfdQcQsQiQlQfQd">;
930def CMLE  : SInst<"vclez", "U.", "csilfdQcQsQiQlQfQd">;
931def CMGT  : SInst<"vcgtz", "U.", "csilfdQcQsQiQlQfQd">;
932def CMLT  : SInst<"vcltz", "U.", "csilfdQcQsQiQlQfQd">;
933
934////////////////////////////////////////////////////////////////////////////////
935// Max/Min Integer
936def MAX : SInst<"vmax", "...", "dQd">;
937def MIN : SInst<"vmin", "...", "dQd">;
938
939////////////////////////////////////////////////////////////////////////////////
940// Pairwise Max/Min
941def MAXP : SInst<"vpmax", "...", "QcQsQiQUcQUsQUiQfQd">;
942def MINP : SInst<"vpmin", "...", "QcQsQiQUcQUsQUiQfQd">;
943
944////////////////////////////////////////////////////////////////////////////////
945// Pairwise MaxNum/MinNum Floating Point
946def FMAXNMP : SInst<"vpmaxnm", "...", "fQfQd">;
947def FMINNMP : SInst<"vpminnm", "...", "fQfQd">;
948
949////////////////////////////////////////////////////////////////////////////////
950// Pairwise Addition
951def ADDP  : IInst<"vpadd", "...", "QcQsQiQlQUcQUsQUiQUlQfQd">;
952
953////////////////////////////////////////////////////////////////////////////////
954// Shifts by constant
955let isShift = 1 in {
956// Left shift long high
957def SHLL_HIGH_N    : SOpInst<"vshll_high_n", ">.I", "HcHsHiHUcHUsHUi",
958                             OP_LONG_HI>;
959
960////////////////////////////////////////////////////////////////////////////////
961def SRI_N : WInst<"vsri_n", "...I", "PlQPl", [ImmCheck<2, ImmCheckShiftRight, 0>]>;
962def SLI_N : WInst<"vsli_n", "...I", "PlQPl", [ImmCheck<2, ImmCheckShiftLeft, 0>]>;
963
964// Right shift narrow high
965def SHRN_HIGH_N    : IOpInst<"vshrn_high_n", "<(<q).I",
966                             "HsHiHlHUsHUiHUl", OP_NARROW_HI>;
967def QSHRUN_HIGH_N  : SOpInst<"vqshrun_high_n", "<(<q).I",
968                             "HsHiHl", OP_NARROW_HI>;
969def RSHRN_HIGH_N   : IOpInst<"vrshrn_high_n", "<(<q).I",
970                             "HsHiHlHUsHUiHUl", OP_NARROW_HI>;
971def QRSHRUN_HIGH_N : SOpInst<"vqrshrun_high_n", "<(<q).I",
972                             "HsHiHl", OP_NARROW_HI>;
973def QSHRN_HIGH_N   : SOpInst<"vqshrn_high_n", "<(<q).I",
974                             "HsHiHlHUsHUiHUl", OP_NARROW_HI>;
975def QRSHRN_HIGH_N  : SOpInst<"vqrshrn_high_n", "<(<q).I",
976                             "HsHiHlHUsHUiHUl", OP_NARROW_HI>;
977}
978
979////////////////////////////////////////////////////////////////////////////////
980// Converting vectors
981def VMOVL_HIGH   : SOpInst<"vmovl_high", ">.", "HcHsHiHUcHUsHUi", OP_MOVL_HI>;
982
983def CVTF_N_F64   : SInst<"vcvt_n_f64", "F(.!)I", "lUlQlQUl",
984                        [ImmCheck<1, ImmCheck1_64>]>;
985def FCVTZS_N_S64 : SInst<"vcvt_n_s64", "S.I", "dQd",
986                        [ImmCheck<1, ImmCheck1_64>]>;
987def FCVTZS_N_U64 : SInst<"vcvt_n_u64", "U.I", "dQd",
988                        [ImmCheck<1, ImmCheck1_64>]>;
989
990////////////////////////////////////////////////////////////////////////////////
991// 3VDiff class using high 64-bit in operands
992def VADDL_HIGH   : SOpInst<"vaddl_high", "(>Q)QQ", "csiUcUsUi", OP_ADDLHi>;
993def VADDW_HIGH   : SOpInst<"vaddw_high", "(>Q)(>Q)Q", "csiUcUsUi", OP_ADDWHi>;
994def VSUBL_HIGH   : SOpInst<"vsubl_high", "(>Q)QQ", "csiUcUsUi", OP_SUBLHi>;
995def VSUBW_HIGH   : SOpInst<"vsubw_high", "(>Q)(>Q)Q", "csiUcUsUi", OP_SUBWHi>;
996
997def VABDL_HIGH   : SOpInst<"vabdl_high", "(>Q)QQ",  "csiUcUsUi", OP_ABDLHi>;
998def VABAL_HIGH   : SOpInst<"vabal_high", "(>Q)(>Q)QQ", "csiUcUsUi", OP_ABALHi>;
999
1000def VMULL_HIGH   : SOpInst<"vmull_high", "(>Q)QQ", "csiUcUsUiPc", OP_MULLHi>;
1001def VMULL_HIGH_N : SOpInst<"vmull_high_n", "(>Q)Q1", "siUsUi", OP_MULLHi_N>;
1002def VMLAL_HIGH   : SOpInst<"vmlal_high", "(>Q)(>Q)QQ", "csiUcUsUi", OP_MLALHi>;
1003def VMLAL_HIGH_N : SOpInst<"vmlal_high_n", "(>Q)(>Q)Q1", "siUsUi", OP_MLALHi_N>;
1004def VMLSL_HIGH   : SOpInst<"vmlsl_high", "(>Q)(>Q)QQ", "csiUcUsUi", OP_MLSLHi>;
1005def VMLSL_HIGH_N : SOpInst<"vmlsl_high_n", "(>Q)(>Q)Q1", "siUsUi", OP_MLSLHi_N>;
1006
1007def VADDHN_HIGH  : SOpInst<"vaddhn_high", "(<Q)<QQ", "silUsUiUl", OP_ADDHNHi>;
1008def VRADDHN_HIGH : SOpInst<"vraddhn_high", "(<Q)<QQ", "silUsUiUl", OP_RADDHNHi>;
1009def VSUBHN_HIGH  : SOpInst<"vsubhn_high", "(<Q)<QQ", "silUsUiUl", OP_SUBHNHi>;
1010def VRSUBHN_HIGH : SOpInst<"vrsubhn_high", "(<Q)<QQ", "silUsUiUl", OP_RSUBHNHi>;
1011
1012def VQDMULL_HIGH : SOpInst<"vqdmull_high", "(>Q)QQ", "si", OP_QDMULLHi>;
1013def VQDMULL_HIGH_N : SOpInst<"vqdmull_high_n", "(>Q)Q1", "si", OP_QDMULLHi_N>;
1014def VQDMLAL_HIGH : SOpInst<"vqdmlal_high", "(>Q)(>Q)QQ", "si", OP_QDMLALHi>;
1015def VQDMLAL_HIGH_N : SOpInst<"vqdmlal_high_n", "(>Q)(>Q)Q1", "si", OP_QDMLALHi_N>;
1016def VQDMLSL_HIGH : SOpInst<"vqdmlsl_high", "(>Q)(>Q)QQ", "si", OP_QDMLSLHi>;
1017def VQDMLSL_HIGH_N : SOpInst<"vqdmlsl_high_n", "(>Q)(>Q)Q1", "si", OP_QDMLSLHi_N>;
1018let TargetGuard = "aes,neon" in {
1019  def VMULL_P64    : SInst<"vmull", "(1>)11", "Pl">;
1020  def VMULL_HIGH_P64 : SOpInst<"vmull_high", "(1>)..", "HPl", OP_MULLHi_P64>;
1021}
1022
1023
1024////////////////////////////////////////////////////////////////////////////////
1025// Extract or insert element from vector
1026def GET_LANE : IInst<"vget_lane", "1.I", "dQdPlQPl",
1027                      [ImmCheck<1, ImmCheckLaneIndex, 0>]>;
1028def SET_LANE : IInst<"vset_lane", ".1.I", "dQdPlQPl",
1029                      [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
1030def COPY_LANE : IOpInst<"vcopy_lane", "..I.I",
1031                        "csilUcUsUiUlPcPsPlfd", OP_COPY_LN>;
1032def COPYQ_LANE : IOpInst<"vcopy_lane", "..IqI",
1033                        "QcQsQiQlQUcQUsQUiQUlQPcQPsQfQdQPl", OP_COPY_LN>;
1034def COPY_LANEQ : IOpInst<"vcopy_laneq", "..IQI",
1035                     "csilPcPsPlUcUsUiUlfd", OP_COPY_LN>;
1036def COPYQ_LANEQ : IOpInst<"vcopy_laneq", "..I.I",
1037                     "QcQsQiQlQUcQUsQUiQUlQPcQPsQfQdQPl", OP_COPY_LN>;
1038
1039////////////////////////////////////////////////////////////////////////////////
1040// Set all lanes to same value
1041def VDUP_LANE1: WOpInst<"vdup_lane", ".qI", "dQdPlQPl", OP_DUP_LN>;
1042def VDUP_LANE2: WOpInst<"vdup_laneq", ".QI",
1043                  "csilUcUsUiUlPcPshfdQcQsQiQlQPcQPsQUcQUsQUiQUlQhQfQdPlQPl",
1044                        OP_DUP_LN>;
1045def DUP_N   : WOpInst<"vdup_n", ".1", "dQdPlQPl", OP_DUP>;
1046def MOV_N   : WOpInst<"vmov_n", ".1", "dQdPlQPl", OP_DUP>;
1047
1048////////////////////////////////////////////////////////////////////////////////
1049def COMBINE : NoTestOpInst<"vcombine", "Q..", "dPl", OP_CONC>;
1050
1051////////////////////////////////////////////////////////////////////////////////
1052//Initialize a vector from bit pattern
1053def CREATE : NoTestOpInst<"vcreate", ".(IU>)", "dPl", OP_CAST> {
1054  let BigEndianSafe = 1;
1055}
1056
1057////////////////////////////////////////////////////////////////////////////////
1058
1059def VMLA_LANEQ   : IOpInst<"vmla_laneq", "...QI",
1060                           "siUsUifQsQiQUsQUiQf", OP_MLA_LN>;
1061def VMLS_LANEQ   : IOpInst<"vmls_laneq", "...QI",
1062                           "siUsUifQsQiQUsQUiQf", OP_MLS_LN>;
1063def VFMA_LANE    : IInst<"vfma_lane", "...qI", "fdQfQd",
1064                        [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
1065def VFMA_LANEQ   : IInst<"vfma_laneq", "...QI", "fdQfQd",
1066                        [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
1067def VFMS_LANE    : IOpInst<"vfms_lane", "...qI", "fdQfQd", OP_FMS_LN>;
1068def VFMS_LANEQ   : IOpInst<"vfms_laneq", "...QI", "fdQfQd", OP_FMS_LNQ>;
1069
1070def VMLAL_LANEQ  : SOpInst<"vmlal_laneq", "(>Q)(>Q).QI", "siUsUi", OP_MLAL_LN>;
1071def VMLAL_HIGH_LANE   : SOpInst<"vmlal_high_lane", "(>Q)(>Q)Q.I", "siUsUi",
1072                                OP_MLALHi_LN>;
1073def VMLAL_HIGH_LANEQ  : SOpInst<"vmlal_high_laneq", "(>Q)(>Q)QQI", "siUsUi",
1074                                OP_MLALHi_LN>;
1075def VMLSL_LANEQ  : SOpInst<"vmlsl_laneq", "(>Q)(>Q).QI", "siUsUi", OP_MLSL_LN>;
1076def VMLSL_HIGH_LANE   : SOpInst<"vmlsl_high_lane", "(>Q)(>Q)Q.I", "siUsUi",
1077                                OP_MLSLHi_LN>;
1078def VMLSL_HIGH_LANEQ  : SOpInst<"vmlsl_high_laneq", "(>Q)(>Q)QQI", "siUsUi",
1079                                OP_MLSLHi_LN>;
1080def VQDMLAL_LANEQ  : SOpInst<"vqdmlal_laneq", "(>Q)(>Q).QI", "si", OP_QDMLAL_LN>;
1081def VQDMLAL_HIGH_LANE   : SOpInst<"vqdmlal_high_lane", "(>Q)(>Q)Q.I", "si",
1082                                OP_QDMLALHi_LN>;
1083def VQDMLAL_HIGH_LANEQ  : SOpInst<"vqdmlal_high_laneq", "(>Q)(>Q)QQI", "si",
1084                                OP_QDMLALHi_LN>;
1085def VQDMLSL_LANEQ  : SOpInst<"vqdmlsl_laneq", "(>Q)(>Q).QI", "si", OP_QDMLSL_LN>;
1086def VQDMLSL_HIGH_LANE   : SOpInst<"vqdmlsl_high_lane", "(>Q)(>Q)Q.I", "si",
1087                                OP_QDMLSLHi_LN>;
1088def VQDMLSL_HIGH_LANEQ  : SOpInst<"vqdmlsl_high_laneq", "(>Q)(>Q)QQI", "si",
1089                                OP_QDMLSLHi_LN>;
1090
1091// Newly add double parameter for vmul_lane in aarch64
1092// Note: d type is handled by SCALAR_VMUL_LANE
1093def VMUL_LANE_A64 : IOpInst<"vmul_lane", "..qI", "Qd", OP_MUL_LN>;
1094
1095// Note: d type is handled by SCALAR_VMUL_LANEQ
1096def VMUL_LANEQ   : IOpInst<"vmul_laneq", "..QI",
1097                           "sifUsUiQsQiQUsQUiQfQd", OP_MUL_LN>;
1098def VMULL_LANEQ  : SOpInst<"vmull_laneq", "(>Q).QI", "siUsUi", OP_MULL_LN>;
1099def VMULL_HIGH_LANE   : SOpInst<"vmull_high_lane", "(>Q)Q.I", "siUsUi",
1100                                OP_MULLHi_LN>;
1101def VMULL_HIGH_LANEQ  : SOpInst<"vmull_high_laneq", "(>Q)QQI", "siUsUi",
1102                                OP_MULLHi_LN>;
1103def VQDMULL_LANEQ  : SOpInst<"vqdmull_laneq", "(>Q).QI", "si", OP_QDMULL_LN>;
1104def VQDMULL_HIGH_LANE   : SOpInst<"vqdmull_high_lane", "(>Q)Q.I", "si",
1105                                  OP_QDMULLHi_LN>;
1106def VQDMULL_HIGH_LANEQ  : SOpInst<"vqdmull_high_laneq", "(>Q)QQI", "si",
1107                                  OP_QDMULLHi_LN>;
1108def VQDMULH_LANEQ  : SInst<"vqdmulh_laneq", "..QI", "siQsQi",
1109                          [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
1110def VQRDMULH_LANEQ : SInst<"vqrdmulh_laneq", "..QI", "siQsQi",
1111                          [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
1112
1113let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "v8.1a,neon" in {
1114def VQRDMLAH_LANEQ : SOpInst<"vqrdmlah_laneq", "...QI", "siQsQi", OP_QRDMLAH_LN>;
1115def VQRDMLSH_LANEQ : SOpInst<"vqrdmlsh_laneq", "...QI", "siQsQi", OP_QRDMLSH_LN>;
1116} // ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "v8.1a"
1117
1118// Note: d type implemented by SCALAR_VMULX_LANE
1119def VMULX_LANE : IOpInst<"vmulx_lane", "..qI", "fQfQd", OP_MULX_LN>;
1120// Note: d type is implemented by SCALAR_VMULX_LANEQ
1121def VMULX_LANEQ : IOpInst<"vmulx_laneq", "..QI", "fQfQd", OP_MULX_LN>;
1122
1123////////////////////////////////////////////////////////////////////////////////
1124// Across vectors class
1125def VADDLV  : SInst<"vaddlv", "(1>).", "csiUcUsUiQcQsQiQUcQUsQUi">;
1126def VMAXV   : SInst<"vmaxv", "1.", "csifUcUsUiQcQsQiQUcQUsQUiQfQd">;
1127def VMINV   : SInst<"vminv", "1.", "csifUcUsUiQcQsQiQUcQUsQUiQfQd">;
1128def VADDV   : SInst<"vaddv", "1.", "csifUcUsUiQcQsQiQUcQUsQUiQfQdQlQUl">;
1129def FMAXNMV : SInst<"vmaxnmv", "1.", "fQfQd">;
1130def FMINNMV : SInst<"vminnmv", "1.", "fQfQd">;
1131
1132////////////////////////////////////////////////////////////////////////////////
1133// Newly added Vector Extract for f64
1134def VEXT_A64 : WInst<"vext", "...I", "dQdPlQPl",
1135                    [ImmCheck<2, ImmCheckLaneIndex, 0>]>;
1136
1137////////////////////////////////////////////////////////////////////////////////
1138// Crypto
1139let ArchGuard = "__ARM_ARCH >= 8", TargetGuard = "aes,neon" in {
1140def AESE : SInst<"vaese", "...", "QUc">;
1141def AESD : SInst<"vaesd", "...", "QUc">;
1142def AESMC : SInst<"vaesmc", "..", "QUc">;
1143def AESIMC : SInst<"vaesimc", "..", "QUc">;
1144}
1145
1146let ArchGuard = "__ARM_ARCH >= 8", TargetGuard = "sha2,neon" in {
1147def SHA1H : SInst<"vsha1h", "11", "Ui">;
1148def SHA1SU1 : SInst<"vsha1su1", "...", "QUi">;
1149def SHA256SU0 : SInst<"vsha256su0", "...", "QUi">;
1150
1151def SHA1C : SInst<"vsha1c", "..1.", "QUi">;
1152def SHA1P : SInst<"vsha1p", "..1.", "QUi">;
1153def SHA1M : SInst<"vsha1m", "..1.", "QUi">;
1154def SHA1SU0 : SInst<"vsha1su0", "....", "QUi">;
1155def SHA256H : SInst<"vsha256h", "....", "QUi">;
1156def SHA256H2 : SInst<"vsha256h2", "....", "QUi">;
1157def SHA256SU1 : SInst<"vsha256su1", "....", "QUi">;
1158}
1159
1160let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "sha3,neon" in {
1161def BCAX : SInst<"vbcax", "....", "QUcQUsQUiQUlQcQsQiQl">;
1162def EOR3 : SInst<"veor3", "....", "QUcQUsQUiQUlQcQsQiQl">;
1163def RAX1 : SInst<"vrax1", "...", "QUl">;
1164def XAR :  SInst<"vxar", "...I", "QUl", [ImmCheck<2, ImmCheck0_63>]>;
1165}
1166
1167let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "sha3,neon" in {
1168def SHA512SU0 : SInst<"vsha512su0", "...", "QUl">;
1169def SHA512su1 : SInst<"vsha512su1", "....", "QUl">;
1170def SHA512H : SInst<"vsha512h", "....", "QUl">;
1171def SHA512H2 : SInst<"vsha512h2", "....", "QUl">;
1172}
1173
1174let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "sm4,neon" in {
1175def SM3SS1 : SInst<"vsm3ss1", "....", "QUi">;
1176def SM3TT1A : SInst<"vsm3tt1a", "....I", "QUi", [ImmCheck<3, ImmCheck0_3>]>;
1177def SM3TT1B : SInst<"vsm3tt1b", "....I", "QUi", [ImmCheck<3, ImmCheck0_3>]>;
1178def SM3TT2A : SInst<"vsm3tt2a", "....I", "QUi", [ImmCheck<3, ImmCheck0_3>]>;
1179def SM3TT2B : SInst<"vsm3tt2b", "....I", "QUi", [ImmCheck<3, ImmCheck0_3>]>;
1180def SM3PARTW1 : SInst<"vsm3partw1", "....", "QUi">;
1181def SM3PARTW2 : SInst<"vsm3partw2", "....", "QUi">;
1182}
1183
1184let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "sm4,neon" in {
1185def SM4E : SInst<"vsm4e", "...", "QUi">;
1186def SM4EKEY : SInst<"vsm4ekey", "...", "QUi">;
1187}
1188
1189////////////////////////////////////////////////////////////////////////////////
1190// poly128_t vadd for AArch64 only see VADDP for the rest
1191def VADDP_Q   : WInst<"vadd", "...", "QPk">;
1192
1193////////////////////////////////////////////////////////////////////////////////
1194// Float -> Int conversions with explicit rounding mode
1195
1196let ArchGuard = "__ARM_ARCH >= 8" in {
1197def FCVTNS_S32 : SInst<"vcvtn_s32", "S.", "fQf">;
1198def FCVTNU_S32 : SInst<"vcvtn_u32", "U.", "fQf">;
1199def FCVTPS_S32 : SInst<"vcvtp_s32", "S.", "fQf">;
1200def FCVTPU_S32 : SInst<"vcvtp_u32", "U.", "fQf">;
1201def FCVTMS_S32 : SInst<"vcvtm_s32", "S.", "fQf">;
1202def FCVTMU_S32 : SInst<"vcvtm_u32", "U.", "fQf">;
1203def FCVTAS_S32 : SInst<"vcvta_s32", "S.", "fQf">;
1204def FCVTAU_S32 : SInst<"vcvta_u32", "U.", "fQf">;
1205}
1206
1207let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)" in {
1208def FCVTNS_S64 : SInst<"vcvtn_s64", "S.", "dQd">;
1209def FCVTNU_S64 : SInst<"vcvtn_u64", "U.", "dQd">;
1210def FCVTPS_S64 : SInst<"vcvtp_s64", "S.", "dQd">;
1211def FCVTPU_S64 : SInst<"vcvtp_u64", "U.", "dQd">;
1212def FCVTMS_S64 : SInst<"vcvtm_s64", "S.", "dQd">;
1213def FCVTMU_S64 : SInst<"vcvtm_u64", "U.", "dQd">;
1214def FCVTAS_S64 : SInst<"vcvta_s64", "S.", "dQd">;
1215def FCVTAU_S64 : SInst<"vcvta_u64", "U.", "dQd">;
1216}
1217
1218////////////////////////////////////////////////////////////////////////////////
1219// Round to Integral
1220
1221let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING)" in {
1222def FRINTN_S32 : SInst<"vrndn", "..", "fQf">;
1223def FRINTA_S32 : SInst<"vrnda", "..", "fQf">;
1224def FRINTP_S32 : SInst<"vrndp", "..", "fQf">;
1225def FRINTM_S32 : SInst<"vrndm", "..", "fQf">;
1226def FRINTX_S32 : SInst<"vrndx", "..", "fQf">;
1227def FRINTZ_S32 : SInst<"vrnd", "..", "fQf">;
1228def FRINTI_S32 : SInst<"vrndi", "..", "fQf">;
1229}
1230
1231let ArchGuard = "(defined(__aarch64__) || defined(__arm64ec__)) && defined(__ARM_FEATURE_DIRECTED_ROUNDING)" in {
1232def FRINTN_S64 : SInst<"vrndn", "..", "dQd">;
1233def FRINTA_S64 : SInst<"vrnda", "..", "dQd">;
1234def FRINTP_S64 : SInst<"vrndp", "..", "dQd">;
1235def FRINTM_S64 : SInst<"vrndm", "..", "dQd">;
1236def FRINTX_S64 : SInst<"vrndx", "..", "dQd">;
1237def FRINTZ_S64 : SInst<"vrnd", "..", "dQd">;
1238def FRINTI_S64 : SInst<"vrndi", "..", "dQd">;
1239}
1240
1241let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "v8.5a,neon" in {
1242def FRINT32X_S32 : SInst<"vrnd32x", "..", "fQf">;
1243def FRINT32Z_S32 : SInst<"vrnd32z", "..", "fQf">;
1244def FRINT64X_S32 : SInst<"vrnd64x", "..", "fQf">;
1245def FRINT64Z_S32 : SInst<"vrnd64z", "..", "fQf">;
1246
1247def FRINT32X_S64 : SInst<"vrnd32x", "..", "dQd">;
1248def FRINT32Z_S64 : SInst<"vrnd32z", "..", "dQd">;
1249def FRINT64X_S64 : SInst<"vrnd64x", "..", "dQd">;
1250def FRINT64Z_S64 : SInst<"vrnd64z", "..", "dQd">;
1251}
1252
1253////////////////////////////////////////////////////////////////////////////////
1254// MaxNum/MinNum Floating Point
1255
1256let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_NUMERIC_MAXMIN)" in {
1257def FMAXNM_S32 : SInst<"vmaxnm", "...", "fQf">;
1258def FMINNM_S32 : SInst<"vminnm", "...", "fQf">;
1259}
1260
1261let ArchGuard = "(defined(__aarch64__)  || defined(__arm64ec__)) && defined(__ARM_FEATURE_NUMERIC_MAXMIN)" in {
1262def FMAXNM_S64 : SInst<"vmaxnm", "...", "dQd">;
1263def FMINNM_S64 : SInst<"vminnm", "...", "dQd">;
1264}
1265
1266////////////////////////////////////////////////////////////////////////////////
1267// Permutation
1268def VTRN1 : SOpInst<"vtrn1", "...",
1269                    "csiUcUsUifPcPsQcQsQiQlQUcQUsQUiQUlQfQdQPcQPsQPl", OP_TRN1>;
1270def VZIP1 : SOpInst<"vzip1", "...",
1271                    "csiUcUsUifPcPsQcQsQiQlQUcQUsQUiQUlQfQdQPcQPsQPl", OP_ZIP1>;
1272def VUZP1 : SOpInst<"vuzp1", "...",
1273                    "csiUcUsUifPcPsQcQsQiQlQUcQUsQUiQUlQfQdQPcQPsQPl", OP_UZP1>;
1274def VTRN2 : SOpInst<"vtrn2", "...",
1275                    "csiUcUsUifPcPsQcQsQiQlQUcQUsQUiQUlQfQdQPcQPsQPl", OP_TRN2>;
1276def VZIP2 : SOpInst<"vzip2", "...",
1277                    "csiUcUsUifPcPsQcQsQiQlQUcQUsQUiQUlQfQdQPcQPsQPl", OP_ZIP2>;
1278def VUZP2 : SOpInst<"vuzp2", "...",
1279                    "csiUcUsUifPcPsQcQsQiQlQUcQUsQUiQUlQfQdQPcQPsQPl", OP_UZP2>;
1280
1281////////////////////////////////////////////////////////////////////////////////
1282// Table lookup
1283let InstName = "vtbl" in {
1284def VQTBL1_A64 : WInst<"vqtbl1", ".QU",  "UccPcQUcQcQPc">;
1285def VQTBL2_A64 : WInst<"vqtbl2", ".(2Q)U",  "UccPcQUcQcQPc">;
1286def VQTBL3_A64 : WInst<"vqtbl3", ".(3Q)U",  "UccPcQUcQcQPc">;
1287def VQTBL4_A64 : WInst<"vqtbl4", ".(4Q)U",  "UccPcQUcQcQPc">;
1288}
1289let InstName = "vtbx" in {
1290def VQTBX1_A64 : WInst<"vqtbx1", "..QU", "UccPcQUcQcQPc">;
1291def VQTBX2_A64 : WInst<"vqtbx2", "..(2Q)U", "UccPcQUcQcQPc">;
1292def VQTBX3_A64 : WInst<"vqtbx3", "..(3Q)U", "UccPcQUcQcQPc">;
1293def VQTBX4_A64 : WInst<"vqtbx4", "..(4Q)U", "UccPcQUcQcQPc">;
1294}
1295
1296////////////////////////////////////////////////////////////////////////////////
1297// Vector reinterpret cast operations
1298
1299// NeonEmitter implicitly takes the cartesian product of the type string with
1300// itself during generation so, unlike all other intrinsics, this one should
1301// include *all* types, not just additional ones.
1302def VVREINTERPRET : REINTERPRET_CROSS_SELF<"csilUcUsUiUlmhfdPcPsPlQcQsQiQlQUcQUsQUiQUlQmQhQfQdQPcQPsQPlQPk"> {
1303  let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)";
1304  let BigEndianSafe = 1;
1305}
1306
1307////////////////////////////////////////////////////////////////////////////////
1308// Scalar Intrinsics
1309// Scalar Arithmetic
1310
1311// Scalar Addition
1312def SCALAR_ADD : SInst<"vadd", "111",  "SlSUl">;
1313// Scalar  Saturating Add
1314def SCALAR_QADD   : SInst<"vqadd", "111", "ScSsSiSlSUcSUsSUiSUl">;
1315
1316// Scalar Subtraction
1317def SCALAR_SUB : SInst<"vsub", "111",  "SlSUl">;
1318// Scalar  Saturating Sub
1319def SCALAR_QSUB   : SInst<"vqsub", "111", "ScSsSiSlSUcSUsSUiSUl">;
1320
1321let InstName = "vmov" in {
1322def VGET_HIGH_A64 : NoTestOpInst<"vget_high", ".Q", "dPl", OP_HI>;
1323def VGET_LOW_A64  : NoTestOpInst<"vget_low", ".Q", "dPl", OP_LO>;
1324}
1325
1326////////////////////////////////////////////////////////////////////////////////
1327// Scalar Shift
1328// Scalar Shift Left
1329def SCALAR_SHL: SInst<"vshl", "11(S1)", "SlSUl">;
1330// Scalar Saturating Shift Left
1331def SCALAR_QSHL: SInst<"vqshl", "11(S1)", "ScSsSiSlSUcSUsSUiSUl">;
1332// Scalar Saturating Rounding Shift Left
1333def SCALAR_QRSHL: SInst<"vqrshl", "11(S1)", "ScSsSiSlSUcSUsSUiSUl">;
1334// Scalar Shift Rounding Left
1335def SCALAR_RSHL: SInst<"vrshl", "11(S1)", "SlSUl">;
1336
1337////////////////////////////////////////////////////////////////////////////////
1338// Scalar Shift (Immediate)
1339let isScalarShift = 1 in {
1340// Signed/Unsigned Shift Right (Immediate)
1341def SCALAR_SSHR_N: SInst<"vshr_n", "11I", "SlSUl",
1342                        [ImmCheck<1, ImmCheckShiftRight, 0>]>;
1343// Signed/Unsigned Rounding Shift Right (Immediate)
1344def SCALAR_SRSHR_N: SInst<"vrshr_n", "11I", "SlSUl",
1345                          [ImmCheck<1, ImmCheckShiftRight, 0>]>;
1346
1347// Signed/Unsigned Shift Right and Accumulate (Immediate)
1348def SCALAR_SSRA_N: SInst<"vsra_n", "111I", "SlSUl",
1349                        [ImmCheck<2, ImmCheckShiftRight, 0>]>;
1350// Signed/Unsigned Rounding Shift Right and Accumulate (Immediate)
1351def SCALAR_SRSRA_N: SInst<"vrsra_n", "111I", "SlSUl",
1352                        [ImmCheck<2, ImmCheckShiftRight, 0>]>;
1353
1354// Shift Left (Immediate)
1355def SCALAR_SHL_N: SInst<"vshl_n", "11I", "SlSUl",
1356                      [ImmCheck<1, ImmCheckShiftLeft, 0>]>;
1357// Signed/Unsigned Saturating Shift Left (Immediate)
1358def SCALAR_SQSHL_N: SInst<"vqshl_n", "11I", "ScSsSiSlSUcSUsSUiSUl",
1359                      [ImmCheck<1, ImmCheckShiftLeft, 0>]>;
1360// Signed Saturating Shift Left Unsigned (Immediate)
1361def SCALAR_SQSHLU_N: SInst<"vqshlu_n", "11I", "ScSsSiSl",
1362                      [ImmCheck<1, ImmCheckShiftLeft, 0>]>;
1363
1364// Shift Right And Insert (Immediate)
1365def SCALAR_SRI_N: SInst<"vsri_n", "111I", "SlSUl",
1366                        [ImmCheck<2, ImmCheckShiftRight, 0>]>;
1367// Shift Left And Insert (Immediate)
1368def SCALAR_SLI_N: SInst<"vsli_n", "111I", "SlSUl",
1369                        [ImmCheck<2, ImmCheckShiftLeft, 0>]>;
1370
1371let isScalarNarrowShift = 1 in {
1372  // Signed/Unsigned Saturating Shift Right Narrow (Immediate)
1373  def SCALAR_SQSHRN_N: SInst<"vqshrn_n", "(1<)1I", "SsSiSlSUsSUiSUl",
1374                            [ImmCheck<1, ImmCheckShiftRightNarrow, 0>]>;
1375  // Signed/Unsigned Saturating Rounded Shift Right Narrow (Immediate)
1376  def SCALAR_SQRSHRN_N: SInst<"vqrshrn_n", "(1<)1I", "SsSiSlSUsSUiSUl",
1377                            [ImmCheck<1, ImmCheckShiftRightNarrow, 0>]>;
1378  // Signed Saturating Shift Right Unsigned Narrow (Immediate)
1379  def SCALAR_SQSHRUN_N: SInst<"vqshrun_n", "(1<U)1I", "SsSiSl",
1380                            [ImmCheck<1, ImmCheckShiftRightNarrow, 0>]>;
1381  // Signed Saturating Rounded Shift Right Unsigned Narrow (Immediate)
1382  def SCALAR_SQRSHRUN_N: SInst<"vqrshrun_n", "(1<U)1I", "SsSiSl",
1383                            [ImmCheck<1, ImmCheckShiftRightNarrow, 0>]>;
1384}
1385
1386////////////////////////////////////////////////////////////////////////////////
1387// Scalar Signed/Unsigned Fixed-point Convert To Floating-Point (Immediate)
1388def SCALAR_SCVTF_N_F32: SInst<"vcvt_n_f32", "(1F)(1!)I", "SiSUi",
1389                              [ImmCheck<1, ImmCheck1_32>]>;
1390def SCALAR_SCVTF_N_F64: SInst<"vcvt_n_f64", "(1F)(1!)I", "SlSUl",
1391                              [ImmCheck<1, ImmCheck1_64>]>;
1392
1393////////////////////////////////////////////////////////////////////////////////
1394// Scalar Floating-point Convert To Signed/Unsigned Fixed-point (Immediate)
1395def SCALAR_FCVTZS_N_S32 : SInst<"vcvt_n_s32", "(1S)1I", "Sf",
1396                                [ImmCheck<1, ImmCheck1_32>]>;
1397def SCALAR_FCVTZU_N_U32 : SInst<"vcvt_n_u32", "(1U)1I", "Sf",
1398                                [ImmCheck<1, ImmCheck1_32>]>;
1399def SCALAR_FCVTZS_N_S64 : SInst<"vcvt_n_s64", "(1S)1I", "Sd",
1400                                [ImmCheck<1, ImmCheck1_64>]>;
1401def SCALAR_FCVTZU_N_U64 : SInst<"vcvt_n_u64", "(1U)1I", "Sd",
1402                                [ImmCheck<1, ImmCheck1_64>]>;
1403}
1404
1405////////////////////////////////////////////////////////////////////////////////
1406// Scalar Floating-point Round to Integral
1407let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING)" in {
1408def SCALAR_FRINTN_S32 : SInst<"vrndn", "11", "Sf">;
1409}
1410
1411////////////////////////////////////////////////////////////////////////////////
1412// Scalar Reduce Pairwise Addition (Scalar and Floating Point)
1413def SCALAR_ADDP  : SInst<"vpadd", "1.", "SfSHlSHdSHUl">;
1414
1415////////////////////////////////////////////////////////////////////////////////
1416// Scalar Reduce Floating Point Pairwise Max/Min
1417def SCALAR_FMAXP : SInst<"vpmax", "1.", "SfSQd">;
1418
1419def SCALAR_FMINP : SInst<"vpmin", "1.", "SfSQd">;
1420
1421////////////////////////////////////////////////////////////////////////////////
1422// Scalar Reduce Floating Point Pairwise maxNum/minNum
1423def SCALAR_FMAXNMP : SInst<"vpmaxnm", "1.", "SfSQd">;
1424def SCALAR_FMINNMP : SInst<"vpminnm", "1.", "SfSQd">;
1425
1426////////////////////////////////////////////////////////////////////////////////
1427// Scalar Integer Saturating Doubling Multiply Half High
1428def SCALAR_SQDMULH : SInst<"vqdmulh", "111", "SsSi">;
1429
1430////////////////////////////////////////////////////////////////////////////////
1431// Scalar Integer Saturating Rounding Doubling Multiply Half High
1432def SCALAR_SQRDMULH : SInst<"vqrdmulh", "111", "SsSi">;
1433
1434let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "v8.1a,neon" in {
1435////////////////////////////////////////////////////////////////////////////////
1436// Signed Saturating Rounding Doubling Multiply Accumulate Returning High Half
1437def SCALAR_SQRDMLAH : SInst<"vqrdmlah", "1111", "SsSi">;
1438
1439////////////////////////////////////////////////////////////////////////////////
1440// Signed Saturating Rounding Doubling Multiply Subtract Returning High Half
1441def SCALAR_SQRDMLSH : SInst<"vqrdmlsh", "1111", "SsSi">;
1442} // ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "v8.1a"
1443
1444////////////////////////////////////////////////////////////////////////////////
1445// Scalar Floating-point Multiply Extended
1446def SCALAR_FMULX : IInst<"vmulx", "111", "SfSd">;
1447
1448////////////////////////////////////////////////////////////////////////////////
1449// Scalar Floating-point Reciprocal Step
1450def SCALAR_FRECPS : IInst<"vrecps", "111", "SfSd">;
1451
1452////////////////////////////////////////////////////////////////////////////////
1453// Scalar Floating-point Reciprocal Square Root Step
1454def SCALAR_FRSQRTS : IInst<"vrsqrts", "111", "SfSd">;
1455
1456////////////////////////////////////////////////////////////////////////////////
1457// Scalar Signed Integer Convert To Floating-point
1458def SCALAR_SCVTFS : SInst<"vcvt_f32", "(1F)(1!)", "Si">;
1459def SCALAR_SCVTFD : SInst<"vcvt_f64", "(1F)(1!)", "Sl">;
1460
1461////////////////////////////////////////////////////////////////////////////////
1462// Scalar Unsigned Integer Convert To Floating-point
1463def SCALAR_UCVTFS : SInst<"vcvt_f32", "(1F)(1!)", "SUi">;
1464def SCALAR_UCVTFD : SInst<"vcvt_f64", "(1F)(1!)", "SUl">;
1465
1466////////////////////////////////////////////////////////////////////////////////
1467// Scalar Floating-point Converts
1468def SCALAR_FCVTXN  : IInst<"vcvtx_f32", "(1F<)(1!)", "Sd">;
1469def SCALAR_FCVTNSS : SInst<"vcvtn_s32", "(1S)1", "Sf">;
1470def SCALAR_FCVTNUS : SInst<"vcvtn_u32", "(1U)1", "Sf">;
1471def SCALAR_FCVTNSD : SInst<"vcvtn_s64", "(1S)1", "Sd">;
1472def SCALAR_FCVTNUD : SInst<"vcvtn_u64", "(1U)1", "Sd">;
1473def SCALAR_FCVTMSS : SInst<"vcvtm_s32", "(1S)1", "Sf">;
1474def SCALAR_FCVTMUS : SInst<"vcvtm_u32", "(1U)1", "Sf">;
1475def SCALAR_FCVTMSD : SInst<"vcvtm_s64", "(1S)1", "Sd">;
1476def SCALAR_FCVTMUD : SInst<"vcvtm_u64", "(1U)1", "Sd">;
1477def SCALAR_FCVTASS : SInst<"vcvta_s32", "(1S)1", "Sf">;
1478def SCALAR_FCVTAUS : SInst<"vcvta_u32", "(1U)1", "Sf">;
1479def SCALAR_FCVTASD : SInst<"vcvta_s64", "(1S)1", "Sd">;
1480def SCALAR_FCVTAUD : SInst<"vcvta_u64", "(1U)1", "Sd">;
1481def SCALAR_FCVTPSS : SInst<"vcvtp_s32", "(1S)1", "Sf">;
1482def SCALAR_FCVTPUS : SInst<"vcvtp_u32", "(1U)1", "Sf">;
1483def SCALAR_FCVTPSD : SInst<"vcvtp_s64", "(1S)1", "Sd">;
1484def SCALAR_FCVTPUD : SInst<"vcvtp_u64", "(1U)1", "Sd">;
1485def SCALAR_FCVTZSS : SInst<"vcvt_s32", "(1S)1", "Sf">;
1486def SCALAR_FCVTZUS : SInst<"vcvt_u32", "(1U)1", "Sf">;
1487def SCALAR_FCVTZSD : SInst<"vcvt_s64", "(1S)1", "Sd">;
1488def SCALAR_FCVTZUD : SInst<"vcvt_u64", "(1U)1", "Sd">;
1489
1490////////////////////////////////////////////////////////////////////////////////
1491// Scalar Floating-point Reciprocal Estimate
1492def SCALAR_FRECPE : IInst<"vrecpe", "11", "SfSd">;
1493
1494////////////////////////////////////////////////////////////////////////////////
1495// Scalar Floating-point Reciprocal Exponent
1496def SCALAR_FRECPX : IInst<"vrecpx", "11", "SfSd">;
1497
1498////////////////////////////////////////////////////////////////////////////////
1499// Scalar Floating-point Reciprocal Square Root Estimate
1500def SCALAR_FRSQRTE : IInst<"vrsqrte", "11", "SfSd">;
1501
1502////////////////////////////////////////////////////////////////////////////////
1503// Scalar Integer Comparison
1504def SCALAR_CMEQ : SInst<"vceq", "(U1)11", "SlSUl">;
1505def SCALAR_CMEQZ : SInst<"vceqz", "(U1)1", "SlSUl">;
1506def SCALAR_CMGE : SInst<"vcge", "(U1)11", "Sl">;
1507def SCALAR_CMGEZ : SInst<"vcgez", "(U1)1", "Sl">;
1508def SCALAR_CMHS : SInst<"vcge", "(U1)11", "SUl">;
1509def SCALAR_CMLE : SInst<"vcle", "(U1)11", "SlSUl">;
1510def SCALAR_CMLEZ : SInst<"vclez", "(U1)1", "Sl">;
1511def SCALAR_CMLT : SInst<"vclt", "(U1)11", "SlSUl">;
1512def SCALAR_CMLTZ : SInst<"vcltz", "(U1)1", "Sl">;
1513def SCALAR_CMGT : SInst<"vcgt", "(U1)11", "Sl">;
1514def SCALAR_CMGTZ : SInst<"vcgtz", "(U1)1", "Sl">;
1515def SCALAR_CMHI : SInst<"vcgt", "(U1)11", "SUl">;
1516def SCALAR_CMTST : SInst<"vtst", "(U1)11", "SlSUl">;
1517
1518////////////////////////////////////////////////////////////////////////////////
1519// Scalar Floating-point Comparison
1520def SCALAR_FCMEQ : IInst<"vceq", "(1U)11", "SfSd">;
1521def SCALAR_FCMEQZ : IInst<"vceqz", "(1U)1", "SfSd">;
1522def SCALAR_FCMGE : IInst<"vcge", "(1U)11", "SfSd">;
1523def SCALAR_FCMGEZ : IInst<"vcgez", "(1U)1", "SfSd">;
1524def SCALAR_FCMGT : IInst<"vcgt", "(1U)11", "SfSd">;
1525def SCALAR_FCMGTZ : IInst<"vcgtz", "(1U)1", "SfSd">;
1526def SCALAR_FCMLE : IInst<"vcle", "(1U)11", "SfSd">;
1527def SCALAR_FCMLEZ : IInst<"vclez", "(1U)1", "SfSd">;
1528def SCALAR_FCMLT : IInst<"vclt", "(1U)11", "SfSd">;
1529def SCALAR_FCMLTZ : IInst<"vcltz", "(1U)1", "SfSd">;
1530
1531////////////////////////////////////////////////////////////////////////////////
1532// Scalar Floating-point Absolute Compare Mask Greater Than Or Equal
1533def SCALAR_FACGE : IInst<"vcage", "(1U)11", "SfSd">;
1534def SCALAR_FACLE : IInst<"vcale", "(1U)11", "SfSd">;
1535
1536////////////////////////////////////////////////////////////////////////////////
1537// Scalar Floating-point Absolute Compare Mask Greater Than
1538def SCALAR_FACGT : IInst<"vcagt", "(1U)11", "SfSd">;
1539def SCALAR_FACLT : IInst<"vcalt", "(1U)11", "SfSd">;
1540
1541////////////////////////////////////////////////////////////////////////////////
1542// Scalar Absolute Value
1543def SCALAR_ABS : SInst<"vabs", "11", "Sl">;
1544
1545////////////////////////////////////////////////////////////////////////////////
1546// Scalar Absolute Difference
1547def SCALAR_ABD : IInst<"vabd", "111", "SfSd">;
1548
1549////////////////////////////////////////////////////////////////////////////////
1550// Scalar Signed Saturating Absolute Value
1551def SCALAR_SQABS : SInst<"vqabs", "11", "ScSsSiSl">;
1552
1553////////////////////////////////////////////////////////////////////////////////
1554// Scalar Negate
1555def SCALAR_NEG : SInst<"vneg", "11", "Sl">;
1556
1557////////////////////////////////////////////////////////////////////////////////
1558// Scalar Signed Saturating Negate
1559def SCALAR_SQNEG : SInst<"vqneg", "11", "ScSsSiSl">;
1560
1561////////////////////////////////////////////////////////////////////////////////
1562// Scalar Signed Saturating Accumulated of Unsigned Value
1563def SCALAR_SUQADD : SInst<"vuqadd", "11(1U)", "ScSsSiSl">;
1564
1565////////////////////////////////////////////////////////////////////////////////
1566// Scalar Unsigned Saturating Accumulated of Signed Value
1567def SCALAR_USQADD : SInst<"vsqadd", "11(1S)", "SUcSUsSUiSUl">;
1568
1569////////////////////////////////////////////////////////////////////////////////
1570// Signed Saturating Doubling Multiply-Add Long
1571def SCALAR_SQDMLAL : SInst<"vqdmlal", "(1>)(1>)11", "SsSi">;
1572
1573////////////////////////////////////////////////////////////////////////////////
1574// Signed Saturating Doubling Multiply-Subtract Long
1575def SCALAR_SQDMLSL : SInst<"vqdmlsl", "(1>)(1>)11", "SsSi">;
1576
1577////////////////////////////////////////////////////////////////////////////////
1578// Signed Saturating Doubling Multiply Long
1579def SCALAR_SQDMULL : SInst<"vqdmull", "(1>)11", "SsSi">;
1580
1581////////////////////////////////////////////////////////////////////////////////
1582// Scalar Signed Saturating Extract Unsigned Narrow
1583def SCALAR_SQXTUN : SInst<"vqmovun", "(U1<)1", "SsSiSl">;
1584
1585////////////////////////////////////////////////////////////////////////////////
1586// Scalar Signed Saturating Extract Narrow
1587def SCALAR_SQXTN : SInst<"vqmovn", "(1<)1", "SsSiSl">;
1588
1589////////////////////////////////////////////////////////////////////////////////
1590// Scalar Unsigned Saturating Extract Narrow
1591def SCALAR_UQXTN : SInst<"vqmovn", "(1<)1", "SUsSUiSUl">;
1592
1593// Scalar Floating Point  multiply (scalar, by element)
1594def SCALAR_FMUL_LANE : IOpInst<"vmul_lane", "11.I", "SfSd", OP_SCALAR_MUL_LN>;
1595def SCALAR_FMUL_LANEQ : IOpInst<"vmul_laneq", "11QI", "SfSd", OP_SCALAR_MUL_LN>;
1596
1597// Scalar Floating Point  multiply extended (scalar, by element)
1598def SCALAR_FMULX_LANE : IOpInst<"vmulx_lane", "11.I", "SfSd", OP_SCALAR_MULX_LN>;
1599def SCALAR_FMULX_LANEQ : IOpInst<"vmulx_laneq", "11QI", "SfSd", OP_SCALAR_MULX_LN>;
1600
1601def SCALAR_VMUL_N : IInst<"vmul_n", "..1", "d">;
1602
1603// VMUL_LANE_A64 d type implemented using scalar mul lane
1604def SCALAR_VMUL_LANE : IInst<"vmul_lane", "..qI", "d",
1605                            [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
1606
1607// VMUL_LANEQ d type implemented using scalar mul lane
1608def SCALAR_VMUL_LANEQ : IInst<"vmul_laneq", "..QI", "d",
1609                              [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
1610// VMULX_LANE d type implemented using scalar vmulx_lane
1611def SCALAR_VMULX_LANE : IOpInst<"vmulx_lane", "..qI", "d", OP_SCALAR_VMULX_LN>;
1612
1613// VMULX_LANEQ d type implemented using scalar vmulx_laneq
1614def SCALAR_VMULX_LANEQ : IOpInst<"vmulx_laneq", "..QI", "d", OP_SCALAR_VMULX_LNQ>;
1615// Scalar Floating Point fused multiply-add (scalar, by element)
1616def SCALAR_FMLA_LANE : IInst<"vfma_lane", "111.I", "SfSd",
1617                            [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
1618def SCALAR_FMLA_LANEQ : IInst<"vfma_laneq", "111QI", "SfSd",
1619                            [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
1620
1621// Scalar Floating Point fused multiply-subtract (scalar, by element)
1622def SCALAR_FMLS_LANE : IOpInst<"vfms_lane", "111.I", "SfSd", OP_FMS_LN>;
1623def SCALAR_FMLS_LANEQ : IOpInst<"vfms_laneq", "111QI", "SfSd", OP_FMS_LNQ>;
1624
1625// Signed Saturating Doubling Multiply Long (scalar by element)
1626def SCALAR_SQDMULL_LANE : SOpInst<"vqdmull_lane", "(1>)1.I", "SsSi", OP_SCALAR_QDMULL_LN>;
1627def SCALAR_SQDMULL_LANEQ : SOpInst<"vqdmull_laneq", "(1>)1QI", "SsSi", OP_SCALAR_QDMULL_LN>;
1628
1629// Signed Saturating Doubling Multiply-Add Long (scalar by element)
1630def SCALAR_SQDMLAL_LANE : SInst<"vqdmlal_lane", "(1>)(1>)1.I", "SsSi",
1631                                [ImmCheck<3, ImmCheckLaneIndex, 1>]>;
1632def SCALAR_SQDMLAL_LANEQ : SInst<"vqdmlal_laneq", "(1>)(1>)1QI", "SsSi",
1633                                [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
1634
1635// Signed Saturating Doubling Multiply-Subtract Long (scalar by element)
1636def SCALAR_SQDMLS_LANE : SInst<"vqdmlsl_lane", "(1>)(1>)1.I", "SsSi",
1637                              [ImmCheck<3, ImmCheckLaneIndex, 1>]>;
1638def SCALAR_SQDMLS_LANEQ : SInst<"vqdmlsl_laneq", "(1>)(1>)1QI", "SsSi",
1639                              [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
1640// Scalar Integer Saturating Doubling Multiply Half High (scalar by element)
1641def SCALAR_SQDMULH_LANE : SOpInst<"vqdmulh_lane", "11.I", "SsSi", OP_SCALAR_QDMULH_LN>;
1642def SCALAR_SQDMULH_LANEQ : SOpInst<"vqdmulh_laneq", "11QI", "SsSi", OP_SCALAR_QDMULH_LN>;
1643
1644// Scalar Integer Saturating Rounding Doubling Multiply Half High
1645def SCALAR_SQRDMULH_LANE : SOpInst<"vqrdmulh_lane", "11.I", "SsSi", OP_SCALAR_QRDMULH_LN>;
1646def SCALAR_SQRDMULH_LANEQ : SOpInst<"vqrdmulh_laneq", "11QI", "SsSi", OP_SCALAR_QRDMULH_LN>;
1647
1648let TargetGuard = "v8.1a,neon" in {
1649// Signed Saturating Rounding Doubling Multiply Accumulate Returning High Half
1650def SCALAR_SQRDMLAH_LANE : SOpInst<"vqrdmlah_lane", "111.I", "SsSi", OP_SCALAR_QRDMLAH_LN>;
1651def SCALAR_SQRDMLAH_LANEQ : SOpInst<"vqrdmlah_laneq", "111QI", "SsSi", OP_SCALAR_QRDMLAH_LN>;
1652// Signed Saturating Rounding Doubling Multiply Subtract Returning High Half
1653def SCALAR_SQRDMLSH_LANE : SOpInst<"vqrdmlsh_lane", "111.I", "SsSi", OP_SCALAR_QRDMLSH_LN>;
1654def SCALAR_SQRDMLSH_LANEQ : SOpInst<"vqrdmlsh_laneq", "111QI", "SsSi", OP_SCALAR_QRDMLSH_LN>;
1655} // TargetGuard = "v8.1a"
1656
1657def SCALAR_VDUP_LANE : IInst<"vdup_lane", "1.I", "ScSsSiSlSfSdSUcSUsSUiSUlSPcSPs",
1658                            [ImmCheck<1, ImmCheckLaneIndex, 0>]>;
1659def SCALAR_VDUP_LANEQ : IInst<"vdup_laneq", "1QI", "ScSsSiSlSfSdSUcSUsSUiSUlSPcSPs",
1660                            [ImmCheck<1, ImmCheckLaneIndex, 0>]>;
1661
1662} // ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)"
1663
1664// ARMv8.2-A FP16 vector intrinsics for A32/A64.
1665let TargetGuard = "fullfp16,neon" in {
1666
1667  // ARMv8.2-A FP16 one-operand vector intrinsics.
1668
1669  // Comparison
1670  def CMEQH    : SInst<"vceqz", "U.", "hQh">;
1671  def CMGEH    : SInst<"vcgez", "U.", "hQh">;
1672  def CMGTH    : SInst<"vcgtz", "U.", "hQh">;
1673  def CMLEH    : SInst<"vclez", "U.", "hQh">;
1674  def CMLTH    : SInst<"vcltz", "U.", "hQh">;
1675
1676  // Vector conversion
1677  def VCVT_F16     : SInst<"vcvt_f16", "F(.!)",  "sUsQsQUs">;
1678  def VCVT_S16     : SInst<"vcvt_s16", "S.",  "hQh">;
1679  def VCVT_U16     : SInst<"vcvt_u16", "U.",  "hQh">;
1680  def VCVTA_S16    : SInst<"vcvta_s16", "S.", "hQh">;
1681  def VCVTA_U16    : SInst<"vcvta_u16", "U.", "hQh">;
1682  def VCVTM_S16    : SInst<"vcvtm_s16", "S.", "hQh">;
1683  def VCVTM_U16    : SInst<"vcvtm_u16", "U.", "hQh">;
1684  def VCVTN_S16    : SInst<"vcvtn_s16", "S.", "hQh">;
1685  def VCVTN_U16    : SInst<"vcvtn_u16", "U.", "hQh">;
1686  def VCVTP_S16    : SInst<"vcvtp_s16", "S.", "hQh">;
1687  def VCVTP_U16    : SInst<"vcvtp_u16", "U.", "hQh">;
1688
1689  // Vector rounding
1690  let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING)", TargetGuard = "fullfp16,neon" in {
1691    def FRINTZH      : SInst<"vrnd",  "..", "hQh">;
1692    def FRINTNH      : SInst<"vrndn", "..", "hQh">;
1693    def FRINTAH      : SInst<"vrnda", "..", "hQh">;
1694    def FRINTPH      : SInst<"vrndp", "..", "hQh">;
1695    def FRINTMH      : SInst<"vrndm", "..", "hQh">;
1696    def FRINTXH      : SInst<"vrndx", "..", "hQh">;
1697  }
1698
1699  // Misc.
1700  def VABSH        : SInst<"vabs", "..", "hQh">;
1701  def VNEGH        : SOpInst<"vneg", "..", "hQh", OP_NEG>;
1702  def VRECPEH      : SInst<"vrecpe", "..", "hQh">;
1703  def FRSQRTEH     : SInst<"vrsqrte", "..", "hQh">;
1704
1705  // ARMv8.2-A FP16 two-operands vector intrinsics.
1706
1707  // Misc.
1708  def VADDH        : SOpInst<"vadd", "...", "hQh", OP_ADD>;
1709  def VABDH        : SInst<"vabd", "...",  "hQh">;
1710  def VSUBH         : SOpInst<"vsub", "...", "hQh", OP_SUB>;
1711
1712  // Comparison
1713  let InstName = "vacge" in {
1714    def VCAGEH     : SInst<"vcage", "U..", "hQh">;
1715    def VCALEH     : SInst<"vcale", "U..", "hQh">;
1716  }
1717  let InstName = "vacgt" in {
1718    def VCAGTH     : SInst<"vcagt", "U..", "hQh">;
1719    def VCALTH     : SInst<"vcalt", "U..", "hQh">;
1720  }
1721  def VCEQH        : SOpInst<"vceq", "U..", "hQh", OP_EQ>;
1722  def VCGEH        : SOpInst<"vcge", "U..", "hQh", OP_GE>;
1723  def VCGTH        : SOpInst<"vcgt", "U..", "hQh", OP_GT>;
1724  let InstName = "vcge" in
1725    def VCLEH      : SOpInst<"vcle", "U..", "hQh", OP_LE>;
1726  let InstName = "vcgt" in
1727    def VCLTH      : SOpInst<"vclt", "U..", "hQh", OP_LT>;
1728
1729  // Vector conversion
1730    def VCVT_N_F16 : SInst<"vcvt_n_f16", "F(.!)I", "sUsQsQUs",
1731                          [ImmCheck<1, ImmCheck1_16>]>;
1732    def VCVT_N_S16 : SInst<"vcvt_n_s16", "S.I", "hQh",
1733                          [ImmCheck<1, ImmCheck1_16>]>;
1734    def VCVT_N_U16 : SInst<"vcvt_n_u16", "U.I", "hQh",
1735                          [ImmCheck<1, ImmCheck1_16>]>;
1736
1737  // Max/Min
1738  def VMAXH         : SInst<"vmax", "...", "hQh">;
1739  def VMINH         : SInst<"vmin", "...", "hQh">;
1740  let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_NUMERIC_MAXMIN)", TargetGuard = "fullfp16,neon" in {
1741    def FMAXNMH       : SInst<"vmaxnm", "...", "hQh">;
1742    def FMINNMH       : SInst<"vminnm", "...", "hQh">;
1743  }
1744
1745  // Multiplication/Division
1746  def VMULH         : SOpInst<"vmul", "...", "hQh", OP_MUL>;
1747
1748  // Pairwise addition
1749  def VPADDH        : SInst<"vpadd", "...", "h">;
1750
1751  // Pairwise Max/Min
1752  def VPMAXH        : SInst<"vpmax", "...", "h">;
1753  def VPMINH        : SInst<"vpmin", "...", "h">;
1754
1755  // Reciprocal/Sqrt
1756  def VRECPSH       : SInst<"vrecps", "...", "hQh">;
1757  def VRSQRTSH      : SInst<"vrsqrts", "...", "hQh">;
1758
1759  // ARMv8.2-A FP16 three-operands vector intrinsics.
1760
1761  // Vector fused multiply-add operations
1762  def VFMAH        : SInst<"vfma", "....", "hQh">;
1763  def VFMSH        : SOpInst<"vfms", "....", "hQh", OP_FMLS>;
1764
1765  // ARMv8.2-A FP16 lane vector intrinsics.
1766
1767  // Mul lane
1768  def VMUL_LANEH    : IOpInst<"vmul_lane", "..qI", "hQh", OP_MUL_LN>;
1769  def VMUL_NH       : IOpInst<"vmul_n", "..1", "hQh", OP_MUL_N>;
1770}
1771
1772// Data processing intrinsics - section 5. Do not require fullfp16.
1773
1774// Logical operations
1775let isHiddenLInst = 1 in
1776def VBSLH    : SInst<"vbsl", ".U..", "hQh">;
1777// Transposition operations
1778def VZIPH    : WInst<"vzip", "2..", "hQh">;
1779def VUZPH    : WInst<"vuzp", "2..", "hQh">;
1780def VTRNH    : WInst<"vtrn", "2..", "hQh">;
1781// Vector Extract
1782def VEXTH      : WInst<"vext", "...I", "hQh", [ImmCheck<2, ImmCheckLaneIndex, 0>]>;
1783// Reverse vector elements
1784def VREV64H    : WOpInst<"vrev64", "..", "hQh", OP_REV64>;
1785
1786// ARMv8.2-A FP16 vector intrinsics for A64 only.
1787let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "fullfp16,neon" in {
1788
1789  // Vector rounding
1790  def FRINTIH      : SInst<"vrndi", "..", "hQh">;
1791
1792  // Misc.
1793  def FSQRTH       : SInst<"vsqrt", "..", "hQh">;
1794
1795  // Multiplication/Division
1796  def MULXH         : SInst<"vmulx", "...", "hQh">;
1797  def FDIVH         : IOpInst<"vdiv", "...",  "hQh", OP_DIV>;
1798
1799  // Pairwise addition
1800  def VPADDH1       : SInst<"vpadd", "...", "Qh">;
1801
1802  // Pairwise Max/Min
1803  def VPMAXH1       : SInst<"vpmax", "...", "Qh">;
1804  def VPMINH1       : SInst<"vpmin", "...", "Qh">;
1805
1806  // Pairwise MaxNum/MinNum
1807  def FMAXNMPH      : SInst<"vpmaxnm", "...", "hQh">;
1808  def FMINNMPH      : SInst<"vpminnm", "...", "hQh">;
1809
1810  // ARMv8.2-A FP16 lane vector intrinsics.
1811
1812  // FMA lane
1813  def VFMA_LANEH   : IInst<"vfma_lane", "...qI", "hQh",
1814                          [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
1815  def VFMA_LANEQH  : IInst<"vfma_laneq", "...QI", "hQh",
1816                          [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
1817
1818  // FMA lane with scalar argument
1819  def FMLA_NH      : SOpInst<"vfma_n", "...1", "hQh", OP_FMLA_N>;
1820  // Scalar floating point fused multiply-add (scalar, by element)
1821  def SCALAR_FMLA_LANEH  : IInst<"vfma_lane", "111.I", "Sh",
1822                                [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
1823  def SCALAR_FMLA_LANEQH : IInst<"vfma_laneq", "111QI", "Sh",
1824                                [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
1825
1826  // FMS lane
1827  def VFMS_LANEH   : IOpInst<"vfms_lane", "...qI", "hQh", OP_FMS_LN>;
1828  def VFMS_LANEQH  : IOpInst<"vfms_laneq", "...QI", "hQh", OP_FMS_LNQ>;
1829  // FMS lane with scalar argument
1830  def FMLS_NH      : SOpInst<"vfms_n", "...1", "hQh", OP_FMLS_N>;
1831  // Scalar floating foint fused multiply-subtract (scalar, by element)
1832  def SCALAR_FMLS_LANEH  : IOpInst<"vfms_lane", "111.I", "Sh", OP_FMS_LN>;
1833  def SCALAR_FMLS_LANEQH : IOpInst<"vfms_laneq", "111QI", "Sh", OP_FMS_LNQ>;
1834  // Mul lane
1835  def VMUL_LANEQH   : IOpInst<"vmul_laneq", "..QI", "hQh", OP_MUL_LN>;
1836  // Scalar floating point  multiply (scalar, by element)
1837  def SCALAR_FMUL_LANEH  : IOpInst<"vmul_lane", "11.I", "Sh", OP_SCALAR_MUL_LN>;
1838  def SCALAR_FMUL_LANEQH : IOpInst<"vmul_laneq", "11QI", "Sh", OP_SCALAR_MUL_LN>;
1839
1840  // Mulx lane
1841  def VMULX_LANEH   : IOpInst<"vmulx_lane", "..qI", "hQh", OP_MULX_LN>;
1842  def VMULX_LANEQH  : IOpInst<"vmulx_laneq", "..QI", "hQh", OP_MULX_LN>;
1843  def VMULX_NH      : IOpInst<"vmulx_n", "..1", "hQh", OP_MULX_N>;
1844  // Scalar floating point  mulx (scalar, by element)
1845  def SCALAR_FMULX_LANEH : IInst<"vmulx_lane", "11.I", "Sh",
1846                                [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
1847  def SCALAR_FMULX_LANEQH : IInst<"vmulx_laneq", "11QI", "Sh",
1848                                [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
1849  // ARMv8.2-A FP16 reduction vector intrinsics.
1850  def VMAXVH   : SInst<"vmaxv", "1.", "hQh">;
1851  def VMINVH   : SInst<"vminv", "1.", "hQh">;
1852  def FMAXNMVH : SInst<"vmaxnmv", "1.", "hQh">;
1853  def FMINNMVH : SInst<"vminnmv", "1.", "hQh">;
1854}
1855
1856let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)" in {
1857  // Permutation
1858  def VTRN1H     : SOpInst<"vtrn1", "...", "hQh", OP_TRN1>;
1859  def VZIP1H     : SOpInst<"vzip1", "...", "hQh", OP_ZIP1>;
1860  def VUZP1H     : SOpInst<"vuzp1", "...", "hQh", OP_UZP1>;
1861  def VTRN2H     : SOpInst<"vtrn2", "...", "hQh", OP_TRN2>;
1862  def VZIP2H     : SOpInst<"vzip2", "...", "hQh", OP_ZIP2>;
1863  def VUZP2H     : SOpInst<"vuzp2", "...", "hQh", OP_UZP2>;
1864
1865  def SCALAR_VDUP_LANEH  : IInst<"vdup_lane", "1.I", "Sh",
1866                                [ImmCheck<1, ImmCheckLaneIndex, 0>]>;
1867  def SCALAR_VDUP_LANEQH : IInst<"vdup_laneq", "1QI", "Sh",
1868                                [ImmCheck<1, ImmCheckLaneIndex, 0>]>;
1869}
1870
1871// v8.2-A dot product instructions.
1872let TargetGuard = "dotprod,neon" in {
1873  def DOT : SInst<"vdot", "..(<<)(<<)", "iQiUiQUi">;
1874  def DOT_LANE : SOpInst<"vdot_lane", "..(<<)(<<q)I", "iUiQiQUi", OP_DOT_LN>;
1875}
1876let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "dotprod,neon" in {
1877  // Variants indexing into a 128-bit vector are A64 only.
1878  def UDOT_LANEQ : SOpInst<"vdot_laneq", "..(<<)(<<Q)I", "iUiQiQUi", OP_DOT_LNQ>;
1879}
1880
1881// v8.2-A FP16 fused multiply-add long instructions.
1882let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "fp16fml,neon" in {
1883  def VFMLAL_LOW  : SInst<"vfmlal_low",  ">>..", "hQh">;
1884  def VFMLSL_LOW  : SInst<"vfmlsl_low",  ">>..", "hQh">;
1885  def VFMLAL_HIGH : SInst<"vfmlal_high", ">>..", "hQh">;
1886  def VFMLSL_HIGH : SInst<"vfmlsl_high", ">>..", "hQh">;
1887
1888  def VFMLAL_LANE_LOW  : SOpInst<"vfmlal_lane_low",  "(F>)(F>)F(Fq)I", "hQh", OP_FMLAL_LN>;
1889  def VFMLSL_LANE_LOW  : SOpInst<"vfmlsl_lane_low",  "(F>)(F>)F(Fq)I", "hQh", OP_FMLSL_LN>;
1890  def VFMLAL_LANE_HIGH : SOpInst<"vfmlal_lane_high", "(F>)(F>)F(Fq)I", "hQh", OP_FMLAL_LN_Hi>;
1891  def VFMLSL_LANE_HIGH : SOpInst<"vfmlsl_lane_high", "(F>)(F>)F(Fq)I", "hQh", OP_FMLSL_LN_Hi>;
1892
1893  def VFMLAL_LANEQ_LOW  : SOpInst<"vfmlal_laneq_low",  "(F>)(F>)F(FQ)I", "hQh", OP_FMLAL_LN>;
1894  def VFMLSL_LANEQ_LOW  : SOpInst<"vfmlsl_laneq_low",  "(F>)(F>)F(FQ)I", "hQh", OP_FMLSL_LN>;
1895  def VFMLAL_LANEQ_HIGH : SOpInst<"vfmlal_laneq_high", "(F>)(F>)F(FQ)I", "hQh", OP_FMLAL_LN_Hi>;
1896  def VFMLSL_LANEQ_HIGH : SOpInst<"vfmlsl_laneq_high", "(F>)(F>)F(FQ)I", "hQh", OP_FMLSL_LN_Hi>;
1897}
1898
1899let TargetGuard = "i8mm,neon" in {
1900  def VMMLA   : SInst<"vmmla", "..(<<)(<<)", "QUiQi">;
1901  def VUSMMLA : SInst<"vusmmla", "..(<<U)(<<)", "Qi">;
1902
1903  def VUSDOT  : SInst<"vusdot", "..(<<U)(<<)", "iQi">;
1904
1905  def VUSDOT_LANE  : SOpInst<"vusdot_lane", "..(<<U)(<<q)I", "iQi", OP_USDOT_LN>;
1906  def VSUDOT_LANE  : SOpInst<"vsudot_lane", "..(<<)(<<qU)I", "iQi", OP_SUDOT_LN>;
1907
1908  let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)" in {
1909    def VUSDOT_LANEQ  : SOpInst<"vusdot_laneq", "..(<<U)(<<Q)I", "iQi", OP_USDOT_LNQ>;
1910    def VSUDOT_LANEQ  : SOpInst<"vsudot_laneq", "..(<<)(<<QU)I", "iQi", OP_SUDOT_LNQ>;
1911  }
1912}
1913
1914let TargetGuard = "bf16,neon" in {
1915  def VDOT_BF : SInst<"vbfdot", "..BB", "fQf">;
1916  def VDOT_LANE_BF : SOpInst<"vbfdot_lane", "..B(Bq)I", "fQf", OP_BFDOT_LN>;
1917  def VDOT_LANEQ_BF : SOpInst<"vbfdot_laneq", "..B(BQ)I", "fQf", OP_BFDOT_LNQ>;
1918
1919  def VFMMLA_BF : SInst<"vbfmmla", "..BB", "Qf">;
1920
1921  def VFMLALB_BF : SInst<"vbfmlalb", "..BB", "Qf">;
1922  def VFMLALT_BF : SInst<"vbfmlalt", "..BB", "Qf">;
1923
1924  def VFMLALB_LANE_BF : SOpInst<"vbfmlalb_lane", "..B(Bq)I", "Qf", OP_BFMLALB_LN>;
1925  def VFMLALB_LANEQ_BF : SOpInst<"vbfmlalb_laneq", "..B(BQ)I", "Qf", OP_BFMLALB_LN>;
1926
1927  def VFMLALT_LANE_BF : SOpInst<"vbfmlalt_lane", "..B(Bq)I", "Qf", OP_BFMLALT_LN>;
1928  def VFMLALT_LANEQ_BF : SOpInst<"vbfmlalt_laneq", "..B(BQ)I", "Qf", OP_BFMLALT_LN>;
1929}
1930
1931multiclass VCMLA_ROTS<string type, string lanety, string laneqty> {
1932  foreach ROT = ["", "_rot90", "_rot180", "_rot270" ] in {
1933    def   : SInst<"vcmla" # ROT, "....", type # "Q" # type>;
1934
1935    // vcmla{ROT}_lane
1936    def : SOpInst<"vcmla" # ROT # "_lane", "...qI", type, Op<(call "vcmla" # ROT, $p0, $p1,
1937           (bitcast $p0, (dup_typed lanety , (call "vget_lane", (bitcast lanety, $p2), $p3))))>>;
1938    // vcmlaq{ROT}_lane
1939    def : SOpInst<"vcmla" # ROT # "_lane", "...qI", "Q" # type, Op<(call "vcmla" # ROT, $p0, $p1,
1940           (bitcast $p0, (dup_typed laneqty , (call "vget_lane", (bitcast lanety, $p2), $p3))))>>;
1941
1942    // vcmla{ROT}_laneq
1943    def : SOpInst<"vcmla" # ROT # "_laneq", "...QI", type,  Op<(call "vcmla" # ROT, $p0, $p1,
1944            (bitcast $p0, (dup_typed lanety, (call "vget_lane", (bitcast laneqty, $p2), $p3))))>>;
1945    // vcmlaq{ROT}_laneq
1946    def : SOpInst<"vcmla" # ROT # "_laneq", "...QI", "Q" # type, Op<(call "vcmla" # ROT, $p0, $p1,
1947            (bitcast $p0, (dup_typed laneqty , (call "vget_lane", (bitcast laneqty, $p2), $p3))))>>;
1948  }
1949}
1950
1951// v8.3-A Vector complex addition intrinsics
1952let TargetGuard = "v8.3a,fullfp16,neon" in {
1953  def VCADD_ROT90_FP16   : SInst<"vcadd_rot90", "...", "h">;
1954  def VCADD_ROT270_FP16  : SInst<"vcadd_rot270", "...", "h">;
1955  def VCADDQ_ROT90_FP16  : SInst<"vcaddq_rot90", "QQQ", "h">;
1956  def VCADDQ_ROT270_FP16 : SInst<"vcaddq_rot270", "QQQ", "h">;
1957
1958  defm VCMLA_FP16  : VCMLA_ROTS<"h", "uint32x2_t", "uint32x4_t">;
1959}
1960let TargetGuard = "v8.3a,neon" in {
1961  def VCADD_ROT90   : SInst<"vcadd_rot90", "...", "f">;
1962  def VCADD_ROT270  : SInst<"vcadd_rot270", "...", "f">;
1963  def VCADDQ_ROT90  : SInst<"vcaddq_rot90", "QQQ", "f">;
1964  def VCADDQ_ROT270 : SInst<"vcaddq_rot270", "QQQ", "f">;
1965
1966  defm VCMLA_F32    : VCMLA_ROTS<"f", "uint64x1_t", "uint64x2_t">;
1967}
1968let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "v8.3a,neon" in {
1969  def VCADDQ_ROT90_FP64  : SInst<"vcaddq_rot90", "QQQ", "d">;
1970  def VCADDQ_ROT270_FP64 : SInst<"vcaddq_rot270", "QQQ", "d">;
1971
1972  def VCMLAQ_FP64        : SInst<"vcmlaq", "QQQQ", "d">;
1973  def VCMLAQ_ROT90_FP64  : SInst<"vcmlaq_rot90", "QQQQ", "d">;
1974  def VCMLAQ_ROT180_FP64 : SInst<"vcmlaq_rot180", "QQQQ", "d">;
1975  def VCMLAQ_ROT270_FP64 : SInst<"vcmlaq_rot270", "QQQQ", "d">;
1976}
1977
1978// V8.2-A BFloat intrinsics
1979let TargetGuard = "bf16,neon" in {
1980  def VCREATE_BF : NoTestOpInst<"vcreate", ".(IU>)", "b", OP_CAST> {
1981    let BigEndianSafe = 1;
1982  }
1983
1984  def VDUP_N_BF    : WOpInst<"vdup_n", ".1", "bQb", OP_DUP>;
1985
1986  def VDUP_LANE_BF : WOpInst<"vdup_lane", ".qI", "bQb", OP_DUP_LN>;
1987  def VDUP_LANEQ_BF: WOpInst<"vdup_laneq", ".QI", "bQb", OP_DUP_LN>;
1988
1989  def VCOMBINE_BF  : NoTestOpInst<"vcombine", "Q..", "b", OP_CONC>;
1990
1991  def VGET_HIGH_BF : NoTestOpInst<"vget_high", ".Q", "b", OP_HI>;
1992  def VGET_LOW_BF  : NoTestOpInst<"vget_low", ".Q", "b", OP_LO>;
1993
1994  def VGET_LANE_BF : IInst<"vget_lane", "1.I", "bQb",
1995                          [ImmCheck<1, ImmCheckLaneIndex, 0>]>;
1996  def VSET_LANE_BF : IInst<"vset_lane", ".1.I", "bQb",
1997                          [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
1998  def SCALAR_VDUP_LANE_BF : IInst<"vdup_lane", "1.I", "Sb",
1999                          [ImmCheck<1, ImmCheckLaneIndex, 0>]>;
2000  def SCALAR_VDUP_LANEQ_BF : IInst<"vdup_laneq", "1QI", "Sb",
2001                          [ImmCheck<1, ImmCheckLaneIndex, 0>]>;
2002
2003  def VLD1_BF : WInst<"vld1", ".(c*!)", "bQb">;
2004  def VLD2_BF : WInst<"vld2", "2(c*!)", "bQb">;
2005  def VLD3_BF : WInst<"vld3", "3(c*!)", "bQb">;
2006  def VLD4_BF : WInst<"vld4", "4(c*!)", "bQb">;
2007
2008  def VST1_BF : WInst<"vst1", "v*(.!)", "bQb">;
2009  def VST2_BF : WInst<"vst2", "v*(2!)", "bQb">;
2010  def VST3_BF : WInst<"vst3", "v*(3!)", "bQb">;
2011  def VST4_BF : WInst<"vst4", "v*(4!)", "bQb">;
2012
2013  def VLD1_X2_BF : WInst<"vld1_x2", "2(c*!)", "bQb">;
2014  def VLD1_X3_BF : WInst<"vld1_x3", "3(c*!)", "bQb">;
2015  def VLD1_X4_BF : WInst<"vld1_x4", "4(c*!)", "bQb">;
2016
2017  def VST1_X2_BF : WInst<"vst1_x2", "v*(2!)", "bQb">;
2018  def VST1_X3_BF : WInst<"vst1_x3", "v*(3!)", "bQb">;
2019  def VST1_X4_BF : WInst<"vst1_x4", "v*(4!)", "bQb">;
2020
2021  def VLD1_LANE_BF : WInst<"vld1_lane", ".(c*!).I", "bQb",
2022                          [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
2023  def VLD2_LANE_BF : WInst<"vld2_lane", "2(c*!)2I", "bQb",
2024                          [ImmCheck<4, ImmCheckLaneIndex, 1>]>;
2025  def VLD3_LANE_BF : WInst<"vld3_lane", "3(c*!)3I", "bQb",
2026                          [ImmCheck<5, ImmCheckLaneIndex, 1>]>;
2027  def VLD4_LANE_BF : WInst<"vld4_lane", "4(c*!)4I", "bQb",
2028                          [ImmCheck<6, ImmCheckLaneIndex, 1>]>;
2029  def VST1_LANE_BF : WInst<"vst1_lane", "v*(.!)I", "bQb",
2030                          [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
2031  def VST2_LANE_BF : WInst<"vst2_lane", "v*(2!)I", "bQb",
2032                          [ImmCheck<3, ImmCheckLaneIndex, 1>]>;
2033  def VST3_LANE_BF : WInst<"vst3_lane", "v*(3!)I", "bQb",
2034                          [ImmCheck<4, ImmCheckLaneIndex, 1>]>;
2035  def VST4_LANE_BF : WInst<"vst4_lane", "v*(4!)I", "bQb",
2036                          [ImmCheck<5, ImmCheckLaneIndex, 1>]>;
2037
2038  def VLD1_DUP_BF : WInst<"vld1_dup", ".(c*!)", "bQb">;
2039  def VLD2_DUP_BF : WInst<"vld2_dup", "2(c*!)", "bQb">;
2040  def VLD3_DUP_BF : WInst<"vld3_dup", "3(c*!)", "bQb">;
2041  def VLD4_DUP_BF : WInst<"vld4_dup", "4(c*!)", "bQb">;
2042
2043  def VCVT_F32_BF16 : SOpInst<"vcvt_f32_bf16", "(F>)(Bq!)",  "Qb", OP_VCVT_F32_BF16>;
2044  def VCVT_LOW_F32_BF16 : SOpInst<"vcvt_low_f32", "(F>)(BQ!)",  "Qb", OP_VCVT_F32_BF16_LO>;
2045  def VCVT_HIGH_F32_BF16 : SOpInst<"vcvt_high_f32", "(F>)(BQ!)", "Qb", OP_VCVT_F32_BF16_HI>;
2046
2047  def SCALAR_CVT_BF16_F32 : SInst<"vcvth_bf16", "(1B)1", "f">;
2048  def SCALAR_CVT_F32_BF16 : SOpInst<"vcvtah_f32", "(1F>)(1!)", "b", OP_CVT_F32_BF16>;
2049}
2050
2051let ArchGuard = "!defined(__aarch64__) && !defined(__arm64ec__)", TargetGuard = "bf16,neon" in {
2052  def VCVT_BF16_F32_A32_INTERNAL : WInst<"__a32_vcvt_bf16", "BQ", "f">;
2053  def VCVT_BF16_F32_A32 : SOpInst<"vcvt_bf16", "BQ", "f", OP_VCVT_BF16_F32_A32>;
2054  def VCVT_LOW_BF16_F32_A32 : SOpInst<"vcvt_low_bf16",  "BQ", "Qf", OP_VCVT_BF16_F32_LO_A32>;
2055  def VCVT_HIGH_BF16_F32_A32 : SOpInst<"vcvt_high_bf16", "BBQ", "Qf", OP_VCVT_BF16_F32_HI_A32>;
2056}
2057
2058let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "bf16,neon" in {
2059  def VCVT_LOW_BF16_F32_A64 : SInst<"vcvt_low_bf16", "BQ", "Qf">;
2060  def VCVT_HIGH_BF16_F32_A64 : SInst<"vcvt_high_bf16", "BBQ", "Qf">;
2061  def VCVT_BF16_F32 : SInst<"vcvt_bf16", "BQ", "f">;
2062
2063  def COPY_LANE_BF16 : IOpInst<"vcopy_lane", "..I.I", "b", OP_COPY_LN>;
2064  def COPYQ_LANE_BF16 : IOpInst<"vcopy_lane", "..IqI", "Qb", OP_COPY_LN>;
2065  def COPY_LANEQ_BF16 : IOpInst<"vcopy_laneq", "..IQI", "b", OP_COPY_LN>;
2066  def COPYQ_LANEQ_BF16 : IOpInst<"vcopy_laneq", "..I.I", "Qb", OP_COPY_LN>;
2067}
2068
2069let ArchGuard = "!defined(__aarch64__) && !defined(__arm64ec__)", TargetGuard = "bf16,neon" in {
2070  let BigEndianSafe = 1 in {
2071    defm VREINTERPRET_BF : REINTERPRET_CROSS_TYPES<
2072        "csilUcUsUiUlhfPcPsPlQcQsQiQlQUcQUsQUiQUlQhQfQPcQPsQPl", "bQb">;
2073  }
2074}
2075
2076let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "bf16,neon" in {
2077  let BigEndianSafe = 1 in {
2078    defm VVREINTERPRET_BF : REINTERPRET_CROSS_TYPES<
2079        "csilUcUsUiUlhfdPcPsPlQcQsQiQlQUcQUsQUiQUlQhQfQdQPcQPsQPlQPk", "bQb">;
2080  }
2081}
2082
2083// v8.9a/v9.4a LRCPC3 intrinsics
2084let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__)", TargetGuard = "rcpc3,neon" in {
2085  def VLDAP1_LANE : WInst<"vldap1_lane", ".(c*!).I", "QUlQlUlldQdPlQPl",
2086                        [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
2087  def VSTL1_LANE  : WInst<"vstl1_lane", "v*(.!)I", "QUlQlUlldQdPlQPl",
2088                        [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
2089}
2090
2091// Lookup table read with 2-bit/4-bit indices
2092let ArchGuard = "defined(__aarch64__)", TargetGuard = "lut" in {
2093  def VLUTI2_B    : SInst<"vluti2_lane", "Q.(qU)I", "cUcPcQcQUcQPc",
2094                         [ImmCheck<2, ImmCheck0_1>]>;
2095  def VLUTI2_B_Q  : SInst<"vluti2_laneq", "Q.(QU)I", "cUcPcQcQUcQPc",
2096                         [ImmCheck<2, ImmCheck0_3>]>;
2097  def VLUTI2_H    : SInst<"vluti2_lane", "Q.(<qU)I", "sUsPshQsQUsQPsQh",
2098                         [ImmCheck<2, ImmCheck0_3>]>;
2099  def VLUTI2_H_Q  : SInst<"vluti2_laneq", "Q.(<QU)I", "sUsPshQsQUsQPsQh",
2100                         [ImmCheck<2, ImmCheck0_7>]>;
2101  def VLUTI4_B    : SInst<"vluti4_lane", "..(qU)I", "QcQUcQPc",
2102                         [ImmCheck<2, ImmCheck0_0>]>;
2103  def VLUTI4_B_Q  : SInst<"vluti4_laneq", "..UI", "QcQUcQPc",
2104                         [ImmCheck<2, ImmCheck0_1>]>;
2105  def VLUTI4_H_X2 : SInst<"vluti4_lane_x2", ".2(<qU)I", "QsQUsQPsQh",
2106                          [ImmCheck<3, ImmCheck0_1>]>;
2107  def VLUTI4_H_X2_Q : SInst<"vluti4_laneq_x2", ".2(<U)I", "QsQUsQPsQh",
2108                          [ImmCheck<3, ImmCheck0_3>]>;
2109
2110  let TargetGuard = "lut,bf16" in {
2111    def VLUTI2_BF      : SInst<"vluti2_lane", "Q.(<qU)I", "bQb",
2112                              [ImmCheck<2, ImmCheck0_3>]>;
2113    def VLUTI2_BF_Q    : SInst<"vluti2_laneq", "Q.(<QU)I", "bQb",
2114                              [ImmCheck<2, ImmCheck0_7>]>;
2115    def VLUTI4_BF_X2   : SInst<"vluti4_lane_x2", ".2(<qU)I", "Qb",
2116                              [ImmCheck<3, ImmCheck0_1>]>;
2117    def VLUTI4_BF_X2_Q   : SInst<"vluti4_laneq_x2", ".2(<U)I", "Qb",
2118                              [ImmCheck<3, ImmCheck0_3>]>;
2119  }
2120}
2121
2122let ArchGuard = "defined(__aarch64__)", TargetGuard = "fp8,neon" in {
2123  def VBF1CVT_BF16_MF8        : VInst<"vcvt1_bf16_mf8_fpm",      "(QB).V", "m">;
2124  def VBF1CVT_LOW_BF16_MF8    : VInst<"vcvt1_low_bf16_mf8_fpm",  "B.V",    "Hm">;
2125  def VBF2CVTL_BF16_MF8       : VInst<"vcvt2_bf16_mf8_fpm",      "(QB).V", "m">;
2126  def VBF2CVTL_LOW_BF16_MF8   : VInst<"vcvt2_low_bf16_mf8_fpm",  "B.V",    "Hm">;
2127  def VBF1CVTL2_HIGH_BF16_MF8 : VInst<"vcvt1_high_bf16_mf8_fpm", "B.V",    "Hm">;
2128  def VBF2CVTL2_HIGH_BF16_MF8 : VInst<"vcvt2_high_bf16_mf8_fpm", "B.V",    "Hm">;
2129}
2130
2131let ArchGuard = "defined(__aarch64__)", TargetGuard = "fp8,neon" in {
2132  def VF1CVT_F16_MF8        : VInst<"vcvt1_f16_mf8_fpm",      "(>QF).V", "m">;
2133  def VF1CVT_LOW_F16_MF8    : VInst<"vcvt1_low_f16_mf8_fpm",  "(>F).V",  "Hm">;
2134  def VF2CVTL_F16_MF8       : VInst<"vcvt2_f16_mf8_fpm",      "(>QF).V", "m">;
2135  def VF2CVTL_LOW_F16_MF8   : VInst<"vcvt2_low_f16_mf8_fpm",  "(>F).V",  "Hm">;
2136  def VF1CVTL2_HIGH_F16_MF8 : VInst<"vcvt1_high_f16_mf8_fpm", "(>F).V",  "Hm">;
2137  def VF2CVTL2_HIGH_F16_MF8 : VInst<"vcvt2_high_f16_mf8_fpm", "(>F).V",  "Hm">;
2138
2139  def VCVTN_LOW_F8_F32  : VInst<"vcvt_mf8_f32_fpm",      ".(>>QF)(>>QF)V",  "m">;
2140  def VCVTN_HIGH_F8_F32 : VInst<"vcvt_high_mf8_f32_fpm", ".(q)(>>F)(>>F)V", "Hm">;
2141  def VCVTN_F8_F16      : VInst<"vcvt_mf8_f16_fpm",      ".(>F)(>F)V",      "mQm">;
2142}
2143
2144let ArchGuard = "defined(__aarch64__)", TargetGuard = "fp8dot2,neon" in {
2145  def VDOT_F16_MF8 : VInst<"vdot_f16_mf8_fpm", "(>F)(>F)..V", "mQm">;
2146
2147  def VDOT_LANE_F16_MF8  : VInst<"vdot_lane_f16_mf8_fpm",  "(>F)(>F)..IV", "m", [ImmCheck<3, ImmCheck0_3, 0>]>;
2148  def VDOT_LANEQ_F16_MF8 : VInst<"vdot_laneq_f16_mf8_fpm", "(>F)(>F).QIV", "m", [ImmCheck<3, ImmCheck0_7, 0>]>;
2149
2150  def VDOTQ_LANE_F16_MF8  : VInst<"vdot_lane_f16_mf8_fpm",  "(>F)(>F).qIV", "Qm", [ImmCheck<3, ImmCheck0_3, 0>]>;
2151  def VDOTQ_LANEQ_F16_MF8 : VInst<"vdot_laneq_f16_mf8_fpm", "(>F)(>F)..IV", "Qm", [ImmCheck<3, ImmCheck0_7, 0>]>;
2152}
2153
2154let ArchGuard = "defined(__aarch64__)", TargetGuard = "fp8dot4,neon" in {
2155  def VDOT_F32_MF8 : VInst<"vdot_f32_mf8_fpm", "(>>F)(>>F)..V", "mQm">;
2156
2157  def VDOT_LANE_F32_MF8  : VInst<"vdot_lane_f32_mf8_fpm",  "(>>F)(>>F)..IV", "m", [ImmCheck<3, ImmCheck0_1, 0>]>;
2158  def VDOT_LANEQ_F32_MF8 : VInst<"vdot_laneq_f32_mf8_fpm", "(>>F)(>>F).QIV", "m", [ImmCheck<3, ImmCheck0_3, 0>]>;
2159
2160  def VDOTQ_LANE_F32_MF8  : VInst<"vdot_lane_f32_mf8_fpm",  "(>>F)(>>F).qIV", "Qm", [ImmCheck<3, ImmCheck0_1, 0>]>;
2161  def VDOTQ_LANEQ_F32_MF8 : VInst<"vdot_laneq_f32_mf8_fpm", "(>>F)(>>F)..IV", "Qm", [ImmCheck<3, ImmCheck0_3, 0>]>;
2162}
2163
2164let ArchGuard = "defined(__aarch64__)", TargetGuard = "fp8fma,neon" in {
2165  def VMLALB_F16_F8 : VInst<"vmlalb_f16_mf8_fpm", "(>F)(>F)..V", "Qm">;
2166  def VMLALT_F16_F8 : VInst<"vmlalt_f16_mf8_fpm", "(>F)(>F)..V", "Qm">;
2167
2168  def VMLALLBB_F32_F8 : VInst<"vmlallbb_f32_mf8_fpm", "(>>F)(>>F)..V", "Qm">;
2169  def VMLALLBT_F32_F8 : VInst<"vmlallbt_f32_mf8_fpm", "(>>F)(>>F)..V", "Qm">;
2170  def VMLALLTB_F32_F8 : VInst<"vmlalltb_f32_mf8_fpm", "(>>F)(>>F)..V", "Qm">;
2171  def VMLALLTT_F32_F8 : VInst<"vmlalltt_f32_mf8_fpm", "(>>F)(>>F)..V", "Qm">;
2172
2173  def VMLALB_F16_F8_LANE  : VInst<"vmlalb_lane_f16_mf8_fpm",  "(>F)(>F).qIV", "Qm", [ImmCheck<3, ImmCheck0_7,  0>]>;
2174  def VMLALB_F16_F8_LANEQ : VInst<"vmlalb_laneq_f16_mf8_fpm", "(>F)(>F)..IV", "Qm", [ImmCheck<3, ImmCheck0_15, 0>]>;
2175  def VMLALT_F16_F8_LANE  : VInst<"vmlalt_lane_f16_mf8_fpm",  "(>F)(>F).qIV", "Qm", [ImmCheck<3, ImmCheck0_7,  0>]>;
2176  def VMLALT_F16_F8_LANEQ : VInst<"vmlalt_laneq_f16_mf8_fpm", "(>F)(>F)..IV", "Qm", [ImmCheck<3, ImmCheck0_15, 0>]>;
2177
2178  def VMLALLBB_F32_F8_LANE  : VInst<"vmlallbb_lane_f32_mf8_fpm",  "(>>F)(>>F).qIV", "Qm", [ImmCheck<3, ImmCheck0_7,  0>]>;
2179  def VMLALLBB_F32_F8_LANEQ : VInst<"vmlallbb_laneq_f32_mf8_fpm", "(>>F)(>>F)..IV", "Qm", [ImmCheck<3, ImmCheck0_15, 0>]>;
2180  def VMLALLBT_F32_F8_LANE  : VInst<"vmlallbt_lane_f32_mf8_fpm",  "(>>F)(>>F).qIV", "Qm", [ImmCheck<3, ImmCheck0_7,  0>]>;
2181  def VMLALLBT_F32_F8_LANEQ : VInst<"vmlallbt_laneq_f32_mf8_fpm", "(>>F)(>>F)..IV", "Qm", [ImmCheck<3, ImmCheck0_15, 0>]>;
2182  def VMLALLTB_F32_F8_LANE  : VInst<"vmlalltb_lane_f32_mf8_fpm",  "(>>F)(>>F).qIV", "Qm", [ImmCheck<3, ImmCheck0_7,  0>]>;
2183  def VMLALLTB_F32_F8_LANEQ : VInst<"vmlalltb_laneq_f32_mf8_fpm", "(>>F)(>>F)..IV", "Qm", [ImmCheck<3, ImmCheck0_15, 0>]>;
2184  def VMLALLTT_F32_F8_LANE  : VInst<"vmlalltt_lane_f32_mf8_fpm",  "(>>F)(>>F).qIV", "Qm", [ImmCheck<3, ImmCheck0_7,  0>]>;
2185  def VMLALLTT_F32_F8_LANEQ : VInst<"vmlalltt_laneq_f32_mf8_fpm", "(>>F)(>>F)..IV", "Qm", [ImmCheck<3, ImmCheck0_15, 0>]>;
2186}
2187
2188let ArchGuard = "defined(__aarch64__)", TargetGuard = "neon,faminmax" in {
2189  def FAMIN : WInst<"vamin", "...", "fhQdQfQh">;
2190  def FAMAX : WInst<"vamax", "...", "fhQdQfQh">;
2191}
2192
2193let ArchGuard = "defined(__aarch64__)", TargetGuard = "fp8,neon" in {
2194  // fscale
2195  def FSCALE_V128 : WInst<"vscale", "..(.S)", "QdQfQh">;
2196  def FSCALE_V64 : WInst<"vscale", "(.q)(.q)(.qS)", "fh">;
2197}