/llvm-project/llvm/test/MC/AArch64/ |
H A D | rprfm.s | 273 prfm #24, [x0, w0, uxtw] 277 prfm #25, [x0, w0, uxtw] 281 prfm #26, [x0, w0, uxtw] 285 prfm #27, [x0, w0, uxtw] 289 prfm #28, [x0, w0, uxtw] 293 prfm #29, [x0, w0, uxtw] 297 prfm #30, [x0, w0, uxtw] 301 prfm #31, [x0, w0, uxtw] 305 prfm #24, [x0, w0, uxtw #3] 309 prfm #25, [x0, w0, uxtw #3] [all …]
|
H A D | arm64-arithmetic-encoding.s | 172 add w1, w2, w3, uxtw 181 ; CHECK: add w1, w2, w3, uxtw ; encoding: [0x41,0x40,0x23,0x0b] 190 add x1, x2, w3, uxtw 197 ; CHECK: add x1, x2, w3, uxtw ; encoding: [0x41,0x40,0x23,0x8b] 203 add w1, wsp, w3, uxtw #0 216 sub w1, w2, w3, uxtw 225 ; CHECK: sub w1, w2, w3, uxtw ; encoding: [0x41,0x40,0x23,0x4b] 234 sub x1, x2, w3, uxtw 241 ; CHECK: sub x1, x2, w3, uxtw ; encoding: [0x41,0x40,0x23,0xcb] 247 sub w1, wsp, w3, uxtw #0 [all …]
|
/llvm-project/llvm/test/CodeGen/Thumb2/ |
H A D | mve-scatter-ind32-scaled.ll | 4 ; VLDRH.u32 Qd, [base, offs, #uxtw #1] 9 ; CHECK-NEXT: vstrh.32 q0, [r0, q1, uxtw #1] 19 ; VSTRW.32 Qd, [base, offs, uxtw #2] 24 ; CHECK-NEXT: vstrw.32 q0, [r0, q1, uxtw #2] 33 ; VSTRW.32 Qd, [base, offs, uxtw #2] 38 ; CHECK-NEXT: vstrw.32 q0, [r0, q1, uxtw #2] 48 ; VSTRW.32 Qd, [base, offs.zext, uxtw #2] 53 ; CHECK-NEXT: vstrw.32 q0, [r0, q1, uxtw #2] 63 ; VSTRW.32 Qd, [base, offs.sext, uxtw #2] 68 ; CHECK-NEXT: vstrw.32 q0, [r0, q1, uxtw #2] [all …]
|
/llvm-project/llvm/test/CodeGen/AArch64/ |
H A D | sve-intrinsics-gather-loads-32bit-scaled-offsets.ll | 5 ; LD1H, LD1W, LD1D: base + 32-bit scaled offset, sign (sxtw) or zero (uxtw) 7 ; e.g. ld1h z0.d, p0/z, [x0, z0.d, uxtw #1] 14 ; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1] 16 …%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i16(<vscale x 4 x i1> … 38 ; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, uxtw #1] 40 …%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2i16(<vscale x 2 x i1> … 63 ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0, z0.s, uxtw #2] 65 …%load = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i32(<vscale x 4 x i1> … 85 ; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0, z0.d, uxtw #2] 87 …%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2i32(<vscale x 2 x i1> … [all …]
|
H A D | sve-intrinsics-ff-gather-loads-32bit-scaled-offsets.ll | 5 ; LDFF1H, LDFF1W, LDFF1D: base + 32-bit scaled offset, sign (sxtw) or zero (uxtw) 7 ; e.g. ldff1h z0.d, p0/z, [x0, z0.d, uxtw #1] 14 ; CHECK-NEXT: ldff1h { z0.s }, p0/z, [x0, z0.s, uxtw #1] 16 …%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4i16(<vscale x 4 x i1… 38 ; CHECK-NEXT: ldff1h { z0.d }, p0/z, [x0, z0.d, uxtw #1] 40 …%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv2i16(<vscale x 2 x i1… 63 ; CHECK-NEXT: ldff1w { z0.s }, p0/z, [x0, z0.s, uxtw #2] 65 …%load = call <vscale x 4 x i32> @llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4i32(<vscale x 4 x i1… 85 ; CHECK-NEXT: ldff1w { z0.d }, p0/z, [x0, z0.d, uxtw #2] 87 …%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv2i32(<vscale x 2 x i1… [all …]
|
H A D | sve-intrinsics-scatter-stores-32bit-unscaled-offsets.ll | 6 ; (uxtw) extended to 64 bits. 7 ; e.g. st1h { z0.d }, p0, [x0, z1.d, uxtw] 14 ; CHECK-NEXT: st1b { z0.s }, p0, [x0, z1.s, uxtw] 17 call void @llvm.aarch64.sve.st1.scatter.uxtw.nxv4i8(<vscale x 4 x i8> %data_trunc, 40 ; CHECK-NEXT: st1b { z0.d }, p0, [x0, z1.d, uxtw] 43 call void @llvm.aarch64.sve.st1.scatter.uxtw.nxv2i8(<vscale x 2 x i8> %data_trunc, 67 ; CHECK-NEXT: st1h { z0.s }, p0, [x0, z1.s, uxtw] 70 call void @llvm.aarch64.sve.st1.scatter.uxtw.nxv4i16(<vscale x 4 x i16> %data_trunc, 93 ; CHECK-NEXT: st1h { z0.d }, p0, [x0, z1.d, uxtw] 96 call void @llvm.aarch64.sve.st1.scatter.uxtw.nxv2i16(<vscale x 2 x i16> %data_trunc, [all …]
|
H A D | sve-intrinsics-scatter-stores-32bit-scaled-offsets.ll | 6 ; (uxtw) extended to 64 bits. 7 ; e.g. st1h { z0.d }, p0, [x0, z1.d, uxtw #1] 14 ; CHECK-NEXT: st1h { z0.s }, p0, [x0, z1.s, uxtw #1] 17 call void @llvm.aarch64.sve.st1.scatter.uxtw.index.nxv4i16(<vscale x 4 x i16> %data_trunc, 40 ; CHECK-NEXT: st1h { z0.d }, p0, [x0, z1.d, uxtw #1] 43 call void @llvm.aarch64.sve.st1.scatter.uxtw.index.nxv2i16(<vscale x 2 x i16> %data_trunc, 67 ; CHECK-NEXT: st1w { z0.s }, p0, [x0, z1.s, uxtw #2] 69 call void @llvm.aarch64.sve.st1.scatter.uxtw.index.nxv4i32(<vscale x 4 x i32> %data, 91 ; CHECK-NEXT: st1w { z0.d }, p0, [x0, z1.d, uxtw #2] 94 call void @llvm.aarch64.sve.st1.scatter.uxtw.index.nxv2i32(<vscale x 2 x i32> %data_trunc, [all …]
|
H A D | sve-intrinsics-ff-gather-loads-32bit-unscaled-offsets.ll | 6 ; (uxtw) extended to 64 bits. 7 ; e.g. ldff1h { z0.d }, p0/z, [x0, z0.d, uxtw] 14 ; CHECK-NEXT: ldff1b { z0.s }, p0/z, [x0, z0.s, uxtw] 16 %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i8(<vscale x 4 x i1> %pg, 38 ; CHECK-NEXT: ldff1b { z0.d }, p0/z, [x0, z0.d, uxtw] 40 %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv2i8(<vscale x 2 x i1> %pg, 63 ; CHECK-NEXT: ldff1h { z0.s }, p0/z, [x0, z0.s, uxtw] 65 %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i16(<vscale x 4 x i1> %pg, 87 ; CHECK-NEXT: ldff1h { z0.d }, p0/z, [x0, z0.d, uxtw] 89 %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv2i16(<vscale x 2 x i1> %pg, [all …]
|
H A D | sve-intrinsics-gather-loads-32bit-unscaled-offsets.ll | 6 ; (uxtw) extended to 64 bits. 7 ; e.g. ld1h { z0.d }, p0/z, [x0, z0.d, uxtw] 14 ; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0, z0.s, uxtw] 16 %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i8(<vscale x 4 x i1> %pg, 38 ; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0, z0.d, uxtw] 40 %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i8(<vscale x 2 x i1> %pg, 63 ; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0, z0.s, uxtw] 65 %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i16(<vscale x 4 x i1> %pg, 87 ; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, uxtw] 89 %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i16(<vscale x 2 x i1> %pg, [all …]
|
H A D | sve-intrinsics-gather-loads-64bit-unscaled-offset.ll | 237 ; LD1B, LD1W, LD1H, LD1D: base + 64-bit uxtw'd unscaled offset 238 ; e.g. ld1h { z0.d }, p0/z, [x0, z0.d, uxtw] 244 ; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0, z0.d, uxtw] 246 %uxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> undef, 251 <vscale x 2 x i64> %uxtw) 259 ; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, uxtw] 261 %uxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> undef, 266 <vscale x 2 x i64> %uxtw) 274 ; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0, z0.d, uxtw] 276 %uxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> undef, [all …]
|
H A D | sve2-intrinsics-nt-scatter-stores-32bit-unscaled-offset.ll | 5 ; STNT1B, STNT1W, STNT1H, STNT1D: base + 32-bit unscaled offset, zero (uxtw) 17 call void @llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4i8(<vscale x 4 x i8> %data_trunc, 31 call void @llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4i16(<vscale x 4 x i16> %data_trunc, 44 call void @llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4i32(<vscale x 4 x i32> %data, 56 call void @llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4f32(<vscale x 4 x float> %data, 64 declare void @llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i1>, ptr,… 65 declare void @llvm.aarch64.sve.stnt1.scatter.uxtw.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, ptr,… 72 declare void @llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i1>, pt… 73 declare void @llvm.aarch64.sve.stnt1.scatter.uxtw.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, pt… 78 declare void @llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, pt… [all …]
|
H A D | sve-intrinsics-gather-prefetches-vect-base-invalid-imm-offset.ll | 8 ; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.s, uxtw] 18 ; CHECK-NEXT: prfb pldl1strm, p0, [x8, z0.s, uxtw] 28 ; CHECK-NEXT: prfb pldl1strm, p0, [x8, z0.s, uxtw] 38 ; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.d, uxtw] 48 ; CHECK-NEXT: prfb pldl1strm, p0, [x8, z0.d, uxtw] 58 ; CHECK-NEXT: prfb pldl1strm, p0, [x8, z0.d, uxtw] 70 ; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.s, uxtw] 80 ; CHECK-NEXT: prfb pldl1strm, p0, [x8, z0.s, uxtw] 90 ; CHECK-NEXT: prfb pldl1strm, p0, [x8, z0.s, uxtw] 100 ; CHECK-NEXT: prfb pldl1strm, p0, [x8, z0.s, uxtw] [all …]
|
H A D | sve-intrinsics-gather-loads-64bit-scaled-offset.ll | 184 ; e.g. ld1h z0.d, p0/z, [x0, z0.d, uxtw #1] 190 ; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, z0.d, uxtw #1] 192 %uxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> undef, 197 … <vscale x 2 x i64> %uxtw) 205 ; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0, z0.d, uxtw #2] 207 %uxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> undef, 212 … <vscale x 2 x i64> %uxtw) 220 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, z0.d, uxtw #3] 222 %uxtw = call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> undef, 227 … <vscale x 2 x i64> %uxtw) [all …]
|
H A D | sve2-intrinsics-nt-gather-loads-32bit-unscaled-offset.ll | 5 ; LDNT1B, LDNT1W, LDNT1H, LDNT1D: base + 32-bit unscaled offsets, zero (uxtw) 16 %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i8(<vscale x 4 x i1> %pg, 29 %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i16(<vscale x 4 x i1> %pg, 42 %load = call <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i32(<vscale x 4 x i1> %pg, 53 …%load = call <vscale x 4 x float> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4f32(<vscale x 4 x i1> %p… 59 ; LDNT1SB, LDNT1SW, LDNT1SH: base + 32-bit unscaled offsets, zero (uxtw) 70 %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i8(<vscale x 4 x i1> %pg, 83 %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i16(<vscale x 4 x i1> %pg, 91 declare <vscale x 4 x i8> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i8(<vscale x 4 x i1>, ptr, <vscal… 96 declare <vscale x 4 x i16> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i16(<vscale x 4 x i1>, ptr, <vsc… [all …]
|
H A D | sve-intrinsics-gather-prefetches-scalar-base-vector-indexes.ll | 8 ; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.s, uxtw] 10 …call void @llvm.aarch64.sve.prfb.gather.uxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vsca… 28 ; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.d, uxtw] 30 …call void @llvm.aarch64.sve.prfb.gather.uxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vsca… 58 ; CHECK-NEXT: prfh pldl1strm, p0, [x0, z0.s, uxtw #1] 60 …call void @llvm.aarch64.sve.prfh.gather.uxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vsca… 77 ; CHECK-NEXT: prfh pldl1strm, p0, [x0, z0.d, uxtw #1] 79 …call void @llvm.aarch64.sve.prfh.gather.uxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vsca… 108 ; CHECK-NEXT: prfw pldl1strm, p0, [x0, z0.s, uxtw #2] 110 …call void @llvm.aarch64.sve.prfw.gather.uxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vsca… [all …]
|
/llvm-project/llvm/test/MC/AArch64/SVE/ |
H A D | uxtw.s | 12 uxtw z0.d, p0/m, z0.d label 18 uxtw z31.d, p7/m, z31.d label 34 uxtw z4.d, p7/m, z31.d label 46 uxtw z4.d, p7/m, z31.d label
|
H A D | adr.s | 70 adr z0.d, [z0.d, z0.d, uxtw] 76 adr z0.d, [z0.d, z0.d, uxtw #0] 82 adr z0.d, [z0.d, z0.d, uxtw #1] 88 adr z0.d, [z0.d, z0.d, uxtw #2] 94 adr z0.d, [z0.d, z0.d, uxtw #3]
|
H A D | ld1h-sve-only.s | 14 ld1h { z0.s }, p0/z, [x0, z0.s, uxtw] 26 ld1h { z31.s }, p7/z, [sp, z31.s, uxtw #1] 50 ld1h { z21.d }, p5/z, [x10, z21.d, uxtw] 62 ld1h { z0.d }, p0/z, [x0, z0.d, uxtw #1]
|
H A D | st1h-sve-only.s | 14 st1h { z0.s }, p0, [x0, z0.s, uxtw] 26 st1h { z0.d }, p0, [x0, z0.d, uxtw] 38 st1h { z0.s }, p0, [x0, z0.s, uxtw #1] 50 st1h { z0.d }, p0, [x0, z0.d, uxtw #1]
|
H A D | ld1sh-sve-only.s | 14 ld1sh { z0.s }, p0/z, [x0, z0.s, uxtw] 26 ld1sh { z31.s }, p7/z, [sp, z31.s, uxtw #1] 50 ld1sh { z21.d }, p5/z, [x10, z21.d, uxtw] 62 ld1sh { z0.d }, p0/z, [x0, z0.d, uxtw #1]
|
H A D | st1w-sve-only.s | 14 st1w { z0.s }, p0, [x0, z0.s, uxtw] 26 st1w { z0.d }, p0, [x0, z0.d, uxtw] 38 st1w { z0.s }, p0, [x0, z0.s, uxtw #2] 50 st1w { z0.d }, p0, [x0, z0.d, uxtw #2]
|
H A D | ld1w-sve-only.s | 14 ld1w { z0.s }, p0/z, [x0, z0.s, uxtw] 26 ld1w { z31.s }, p7/z, [sp, z31.s, uxtw #2] 50 ld1w { z21.d }, p5/z, [x10, z21.d, uxtw] 62 ld1w { z0.d }, p0/z, [x0, z0.d, uxtw #2]
|
H A D | ldff1sh.s | 48 ldff1sh { z0.s }, p0/z, [x0, z0.s, uxtw] 60 ldff1sh { z31.s }, p7/z, [sp, z31.s, uxtw #1] 84 ldff1sh { z21.d }, p5/z, [x10, z21.d, uxtw] 96 ldff1sh { z0.d }, p0/z, [x0, z0.d, uxtw #1]
|
H A D | ldff1w.s | 48 ldff1w { z0.s }, p0/z, [x0, z0.s, uxtw] 60 ldff1w { z31.s }, p7/z, [sp, z31.s, uxtw #2] 84 ldff1w { z21.d }, p5/z, [x10, z21.d, uxtw] 96 ldff1w { z0.d }, p0/z, [x0, z0.d, uxtw #2]
|
/llvm-project/llvm/test/tools/llvm-mca/ARM/ |
H A D | m55-mve-ldst.s | 43 vldrw.32 q1, [r0, q0, uxtw #2] 44 vldrh.16 q1, [r0, q0, uxtw #1] 45 vldrh.u32 q1, [r0, q0, uxtw #1] 46 vldrh.s32 q1, [r0, q0, uxtw #1] 91 vstrw.32 q1, [r0, q0, uxtw #2] 92 vstrh.16 q1, [r0, q0, uxtw #1] 93 vstrh.32 q1, [r0, q0, uxtw #1] 156 # CHECK-NEXT: 1 6 2.00 * vldrw.u32 q1, [r0, q0, uxtw #2] 157 # CHECK-NEXT: 1 6 2.00 * vldrh.u16 q1, [r0, q0, uxtw #1] 158 # CHECK-NEXT: 1 6 2.00 * vldrh.u32 q1, [r0, q0, uxtw #1] [all …]
|