/llvm-project/llvm/test/CodeGen/RISCV/rvv/ |
H A D | fixed-vectors-segN-load.ll | 4 define <8 x i8> @load_factor2(ptr %ptr) { 7 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma 10 %1 = call { <8 x i8>, <8 x i8> } @llvm.riscv.seg2.load.v8i8.p0.i64(ptr %ptr, i64 8) 11 %2 = extractvalue { <8 x i8>, <8 x i8> } %1, 0 12 %3 = extractvalue { <8 x i8>, <8 x i8> } %1, 1 13 ret <8 x i8> %3 16 define <8 x i8> @load_factor3(ptr %ptr) { 19 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma 22 %1 = call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg3.load.v8i8.p0.i64(ptr %ptr, i64 8) 23 %2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %1, 0 [all …]
|
H A D | fixed-vectors-peephole-vmerge-vops.ll | 4 declare <8 x i16> @llvm.vp.merge.v8i16(<8 x i1>, <8 x i16>, <8 x i16>, i32) 5 declare <8 x i32> @llvm.vp.merge.v8i32(<8 x i1>, <8 x i32>, <8 x i32>, i32) 6 declare <8 x float> @llvm.vp.merge.v8f32(<8 x i1>, <8 x float>, <8 x float>, i32) 7 declare <8 x double> @llvm.vp.merge.v8f64(<8 x i1>, <8 x double>, <8 x double>, i32) 10 declare <8 x i32> @llvm.vp.add.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) 11 define <8 x i32> @vpmerge_vpadd(<8 x i32> %passthru, <8 x i32> %x, <8 x i32> %y, <8 x i1> %m, i32 z… 17 …%a = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> splat (i1 true), i32 %… 18 %b = call <8 x i32> @llvm.vp.merge.v8i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl) 19 ret <8 x i32> %b 23 declare <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32>, <8 x i32>, metadata, <8 x i1>, i32) [all …]
|
H A D | fixed-vectors-vaaddu.ll | 5 define <8 x i8> @vaaddu_vv_v8i8_floor(<8 x i8> %x, <8 x i8> %y) { 9 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma 12 %xzv = zext <8 x i8> %x to <8 x i16> 13 %yzv = zext <8 x i8> %y to <8 x i16> 14 %add = add nuw nsw <8 x i16> %xzv, %yzv 15 %div = lshr <8 [all...] |
H A D | vaaddu-sdnode.ll | 5 define <vscale x 8 x i8> @vaaddu_vv_nxv8i8_floor(<vscale x 8 x i8> %x, <vscale x 8 x i8> %y) { 12 %xzv = zext <vscale x 8 x i8> %x to <vscale x 8 x i16> 13 %yzv = zext <vscale x 8 x i8> %y to <vscale x 8 x i16> 14 %add = add nuw nsw <vscale x 8 x i16> %xzv, %yzv 15 %div = lshr <vscale x 8 x i16> %add, splat (i16 1) 16 %ret = trunc <vscale x 8 [all...] |
H A D | fixed-vectors-setcc-int-vp.ll | 10 declare <8 x i1> @llvm.vp.icmp.v8i7(<8 x i7>, <8 x i7>, metadata, <8 x i1>, i32) 12 define <8 x i1> @icmp_eq_vv_v8i7(<8 x i7> %va, <8 x i7> %vb, <8 x i1> %m, i32 zeroext %evl) { 16 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma 22 %v = call <8 [all...] |
H A D | fixed-vectors-segN-store.ll | 5 declare void @llvm.riscv.seg2.store.v8i8.p0.iXLen(<8 x i8>, <8 x i8>, ptr, iXLen) 6 define void @store_factor2(<8 x i8> %v0, <8 x i8> %v1, ptr %ptr) { 9 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma 12 call void @llvm.riscv.seg2.store.v8i8.p0.iXLen(<8 x i8> %v0, <8 x i8> %v1, ptr %ptr, iXLen 8) 16 declare void @llvm.riscv.seg3.store.v8i8.p0.iXLen(<8 x i8>, <8 x i8>, <8 x i8>, ptr, iXLen) 17 define void @store_factor3(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, ptr %ptr) { 20 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma 23 …all void @llvm.riscv.seg3.store.v8i8.p0.iXLen(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, ptr %ptr, … 27 declare void @llvm.riscv.seg4.store.v8i8.p0.iXLen(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, ptr, iXLe… 28 define void @store_factor4(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, ptr %ptr) { [all …]
|
H A D | setcc-integer.ll | 41 define <vscale x 8 x i1> @icmp_eq_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) { 47 %vc = icmp eq <vscale x 8 x i8> %va, %vb 48 ret <vscale x 8 x i1> %vc 51 define <vscale x 8 x i1> @icmp_eq_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) { 57 %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0 58 %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 [all...] |
/llvm-project/llvm/test/Verifier/ |
H A D | vp-intrinsics.ll | 3 define void @test_vp_int(<8 x i32> %i0, <8 x i32> %i1, <8 x i1> %m, i32 %n) { 4 %r0 = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %i0, <8 x i32> %i1, <8 x i1> %m, i32 %n) 5 %r1 = call <8 x i32> @llvm.vp.sub.v8i32(<8 x i32> %i0, <8 x i32> %i1, <8 x i1> %m, i32 %n) 6 %r2 = call <8 x i32> @llvm.vp.mul.v8i32(<8 x i32> %i0, <8 x i32> %i1, <8 x i1> %m, i32 %n) 7 %r3 = call <8 x i32> @llvm.vp.sdiv.v8i32(<8 x i32> %i0, <8 x i32> %i1, <8 x i1> %m, i32 %n) 8 %r4 = call <8 x i32> @llvm.vp.srem.v8i32(<8 x i32> %i0, <8 x i32> %i1, <8 x i1> %m, i32 %n) 9 %r5 = call <8 x i32> @llvm.vp.udiv.v8i32(<8 x i32> %i0, <8 x i32> %i1, <8 x i1> %m, i32 %n) 10 %r6 = call <8 x i32> @llvm.vp.urem.v8i32(<8 x i32> %i0, <8 x i32> %i1, <8 x i1> %m, i32 %n) 11 %r7 = call <8 x i32> @llvm.vp.and.v8i32(<8 x i32> %i0, <8 x i32> %i1, <8 x i1> %m, i32 %n) 12 %r8 = call <8 x i32> @llvm.vp.or.v8i32(<8 x i32> %i0, <8 x i32> %i1, <8 x i1> %m, i32 %n) [all …]
|
/llvm-project/llvm/test/CodeGen/AArch64/ |
H A D | abd-combine.ll | 4 define <8 x i16> @abdu_base(<8 x i16> %src1, <8 x i16> %src2) { 7 ; CHECK-NEXT: uabd v0.8h, v0.8h, v1.8h 9 %zextsrc1 = zext <8 x i16> %src1 to <8 x i32> 10 %zextsrc2 = zext <8 x i16> %src2 to <8 x i32> 11 %sub = sub <8 x i32> %zextsrc1, %zextsrc2 12 %abs = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %sub, i1 0) 13 %result = trunc <8 x i32> %abs to <8 x i16> 14 ret <8 x i16> %result 17 define <8 x i16> @abdu_const(<8 x i16> %src1) { 21 ; CHECK-NEXT: ushll2 v2.4s, v0.8h, #0 [all …]
|
H A D | sve-fixed-length-int-mulh.ll | 16 define <8 x i8> @smulh_v8i8(<8 x i8> %op1, <8 x i8> %op2) vscale_range(2,0) #0 { 25 %insert = insertelement <8 x i16> undef, i16 8, i64 0 26 %splat = shufflevector <8 x i16> %insert, <8 x i16> undef, <8 x i32> zeroinitializer 27 %1 = sext <8 x i8> %op1 to <8 x i16> 28 %2 = sext <8 x i8> %op2 to <8 x i16> 29 %mul = mul <8 x i16> %1, %2 30 %shr = lshr <8 x i16> %mul, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8> 31 %res = trunc <8 x i16> %shr to <8 x i8> 32 ret <8 x i8> %res 48 … <16 x i16> %mul, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i1… [all …]
|
H A D | aarch64-known-bits-hadd.ll | 4 declare <8 x i16> @llvm.aarch64.neon.uhadd.v8i16(<8 x i16>, <8 x i16>) 5 declare <8 x i16> @llvm.aarch64.neon.urhadd.v8i16(<8 x i16>, <8 x i16>) 6 declare <8 x i16> @llvm.aarch64.neon.shadd.v8i16(<8 x i16>, <8 x i16>) 7 declare <8 x i16> @llvm.aarch64.neon.srhadd.v8i16(<8 x i16>, <8 x i16>) 9 define <8 x i16> @haddu_zext(<8 x i8> %a0, <8 x i8> %a1) { 12 ; CHECK-NEXT: uhadd v0.8b, v0.8b, v1.8b 13 ; CHECK-NEXT: ushll v0.8h, v0.8b, #0 15 %x0 = zext <8 x i8> %a0 to <8 x i16> 16 %x1 = zext <8 x i8> %a1 to <8 x i16> 17 %hadd = call <8 x i16> @llvm.aarch64.neon.uhadd.v8i16(<8 x i16> %x0, <8 x i16> %x1) [all …]
|
H A D | hadd-combine.ll | 4 define <8 x i16> @haddu_base(<8 x i16> %src1, <8 x i16> %src2) { 7 ; CHECK-NEXT: uhadd v0.8h, v0.8h, v1.8h 9 %zextsrc1 = zext <8 x i16> %src1 to <8 x i32> 10 %zextsrc2 = zext <8 x i16> %src2 to <8 x i32> 11 %add = add <8 x i32> %zextsrc1, %zextsrc2 12 %resulti16 = lshr <8 x i32> %add, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> 13 %result = trunc <8 x i32> %resulti16 to <8 x i16> 14 ret <8 x i16> %result 17 define <8 x i16> @haddu_const(<8 x i16> %src1) { 20 ; CHECK-NEXT: movi v1.8h, #1 [all …]
|
H A D | sme2-intrinsics-fmlas16.ll | 6 define void @test_fmla_f16_vg2_single(i32 %slice, <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, <vscale x 8 x half> %b) #0 { 13 call void @llvm.aarch64.sme.fmla.single.vg1x2.nxv8f16(i32 %slice, <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, <vscale x 8 x half> %b) 15 call void @llvm.aarch64.sme.fmla.single.vg1x2.nxv8f16(i32 %slice.7, <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, <vscale x 8 x half> %b) 19 define void @test_fmla_f16_vg4_single(i32 %slice, <vscale x 8 [all...] |
H A D | sme2-intrinsics-mlals.ll | 8 define void @multi_vector_add_single_vg2x1_bf16(i32 %slice, <vscale x 8 x bfloat> %zn, <vscale x 8 … 15 …aarch64.sme.fmlal.single.vg2x1.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %zn, <vscale x 8 x bfloa… 17 …ch64.sme.fmlal.single.vg2x1.nxv8bf16(i32 %slice.14, <vscale x 8 x bfloat> %zn, <vscale x 8 x bfloa… 21 define void @multi_vector_add_single_vg2x1_f16(i32 %slice, <vscale x 8 x half> %zn, <vscale x 8 x h… 28 …m.aarch64.sme.fmlal.single.vg2x1.nxv8f16(i32 %slice, <vscale x 8 x half> %zn, <vscale x 8 x half> … 30 …arch64.sme.fmlal.single.vg2x1.nxv8f16(i32 %slice.14, <vscale x 8 x half> %zn, <vscale x 8 x half> … 34 define void @multi_vector_add_single_vg2x1_s16(i32 %slice, <vscale x 8 x i16> %zn, <vscale x 8 x i1… 41 …vm.aarch64.sme.smlal.single.vg2x1.nxv8i16(i32 %slice, <vscale x 8 x i16> %zn, <vscale x 8 x i16> %… 43 …aarch64.sme.smlal.single.vg2x1.nxv8i16(i32 %slice.14, <vscale x 8 x i16> %zn, <vscale x 8 x i16> %… 47 define void @multi_vector_add_single_vg2x1_u16(i32 %slice, <vscale x 8 x i16> %zn, <vscale x 8 x i1… [all …]
|
/llvm-project/llvm/test/Transforms/InstCombine/AArch64/ |
H A D | sve-intrinsic-abs-srshl.ll | 6 define <vscale x 8 x i16> @srshl_abs_undef_merge(<vscale x 8 x i16> %a, <vscale x 8 x i1> %pg, <vscale x 8 x i1> %pg2) #0 { 8 ; CHECK-NEXT: [[ABS:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.abs.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i1> [[PG:%.*]], <vscale x 8 x i16> [[A:%.*]]) 9 ; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 [all...] |
/llvm-project/llvm/test/CodeGen/SystemZ/ |
H A D | vec-cmp-02.ll | 6 define <8 x i16> @f1(<8 x i16> %dummy, <8 x i16> %val1, <8 x i16> %val2) { 10 %cmp = icmp eq <8 x i16> %val1, %val2 11 %ret = sext <8 x i1> %cmp to <8 x i16> 12 ret <8 x i16> %ret 16 define <8 x i16> @f2(<8 x i16> %dummy, <8 x i16> %val1, <8 x i16> %val2) { 21 %cmp = icmp ne <8 x i16> %val1, %val2 22 %ret = sext <8 x i1> %cmp to <8 x i16> 23 ret <8 x i16> %ret 27 define <8 x i16> @f3(<8 x i16> %dummy, <8 x i16> %val1, <8 x i16> %val2) { 31 %cmp = icmp sgt <8 x i16> %val1, %val2 [all …]
|
/llvm-project/llvm/test/Transforms/Reassociate/ |
H A D | reassoc_bool_vec.ll | 4 define <8 x i1> @vector0(<8 x i1> %b0, <8 x i1> %b1, <8 x i1> %b2, <8 x i1> %b3, <8 x i1> %b4, <8 x i1> %b5, <8 x i1> %b6, <8 x i1> %b7) { 5 ; CHECK-LABEL: define <8 [all...] |
/llvm-project/mlir/test/Dialect/Arith/ |
H A D | ops.mlir | 11 func.func @test_addi_tensor(%arg0 : tensor<8x8xi64>, %arg1 : tensor<8x8xi64>) -> tensor<8x8xi64> { 12 %0 = arith.addi %arg0, %arg1 : tensor<8x8xi64> 13 return %0 : tensor<8x8xi64> 17 func.func @test_addi_vector(%arg0 : vector<8xi64>, %arg1 : vector<8xi64>) -> vector<8xi64> { 18 %0 = arith.addi %arg0, %arg1 : vector<8xi64> 19 return %0 : vector<8xi64> 23 func.func @test_addi_scalable_vector(%arg0 : vector<[8]xi64>, %arg1 : vector<[8]xi64>) -> vector<[8… 24 %0 = arith.addi %arg0, %arg1 : vector<[8]xi64> 25 return %0 : vector<[8]xi64> 35 func.func @test_addui_extended_tensor(%arg0 : tensor<8x8xi64>, %arg1 : tensor<8x8xi64>) -> tensor<8… [all …]
|
/llvm-project/llvm/test/CodeGen/ARM/ |
H A D | vtbl.ll | 3 %struct.__neon_int8x8x2_t = type { <8 x i8>, <8 x i8> } 4 %struct.__neon_int8x8x3_t = type { <8 x i8>, <8 x i8>, <8 x i8> } 5 %struct.__neon_int8x8x4_t = type { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } 7 define <8 x i8> @vtbl1(ptr %A, ptr %B) nounwind { 9 ;CHECK: vtbl.8 10 %tmp1 = load <8 x i8>, ptr %A 11 %tmp2 = load <8 x i8>, ptr %B 12 %tmp3 = call <8 x i8> @llvm.arm.neon.vtbl1(<8 x i8> %tmp1, <8 x i8> %tmp2) 13 ret <8 x i8> %tmp3 16 define <8 x i8> @vtbl2(ptr %A, ptr %B) nounwind { [all …]
|
H A D | cmse.ll | 3 ; RUN: FileCheck %s --check-prefix=CHECK-8B 5 ; RUN: FileCheck %s --check-prefix=CHECK-8B 7 ; RUN: FileCheck %s --check-prefix=CHECK-8M 9 ; RUN: FileCheck %s --check-prefix=CHECK-8M 17 ; CHECK-8B-LABEL: func1: 18 ; CHECK-8B: @ %bb.0: @ %entry 19 ; CHECK-8B-NEXT: push {r7, lr} 20 ; CHECK-8B-NEXT: push {r4, r5, r6, r7} 21 ; CHECK-8B-NEXT: mov r7, r11 22 ; CHECK-8B-NEXT: mov r6, r10 [all …]
|
H A D | cmse-clear.ll | 3 ; RUN: FileCheck %s --check-prefix=CHECK-8B 5 ; RUN: FileCheck %s --check-prefix=CHECK-8B 7 ; RUN: FileCheck %s --check-prefix=CHECK-8M-SOFT 9 ; RUN: FileCheck %s --check-prefix=CHECK-8M-SOFT 11 ; RUN: FileCheck %s --check-prefix=CHECK-8M-SOFTFP 13 ; RUN: FileCheck %s --check-prefix=CHECK-8M-SOFTFP 28 ; CHECK-8B-LABEL: ns_entry: 29 ; CHECK-8B: @ %bb.0: @ %entry 30 ; CHECK-8B-NEXT: push {r7, lr} 31 ; CHECK-8B-NEXT: mov r1, r0 [all …]
|
/llvm-project/llvm/test/CodeGen/Hexagon/ |
H A D | signext-inreg.ll | 10 ; CHECK-NEXT: r1 = extract(r1,#8,#0) 19 ; CHECK-64B-NEXT: r1 = extract(r1,#8,#0) 28 ; CHECK-128B-NEXT: r1 = extract(r1,#8,#0) 43 ; CHECK-NEXT: r3 = extract(r3,#8,#0) 44 ; CHECK-NEXT: r29 = add(r29,#-8) 49 ; CHECK-NEXT: r5 = extract(r5,#8,#0) 54 ; CHECK-NEXT: r13 = extract(r13,#8,#0) 57 ; CHECK-NEXT: r9:8 = memd(r29+#32) 60 ; CHECK-NEXT: r9 = extract(r9,#8,#0) 66 ; CHECK-NEXT: r11 = extract(r11,#8,# [all...] |
/llvm-project/llvm/test/CodeGen/X86/ |
H A D | vector-shuffle-512-v8.ll | 5 define <8 x double> @shuffle_v8f64_00000000(<8 x double> %a, <8 x double> %b) { 10 %shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0> 11 ret <8 x double> %shuffle 14 define <8 x double> @shuffle_v8f64_22222222(<8 x double> %a, <8 [all...] |
/llvm-project/mlir/test/Dialect/Vector/CPU/X86/ |
H A D | vector-transpose-lowering.mlir | 8 func.func @transpose4x8xf32(%arg0: vector<4x8xf32>) -> vector<8x4xf32> { 13 // CHECK-NEXT: vector.shuffle {{.*}} [0, 8, 1, 9, 4, 12, 5, 13] : vector<8xf32>, vector<8xf32> 14 // CHECK-NEXT: vector.shuffle {{.*}} [2, 10, 3, 11, 6, 14, 7, 15] : vector<8xf32>, vector<8xf32> 15 // CHECK-NEXT: vector.shuffle {{.*}} [0, 8, 1, 9, 4, 12, 5, 13] : vector<8xf32>, vector<8xf32> 16 // CHECK-NEXT: vector.shuffle {{.*}} [2, 10, 3, 11, 6, 14, 7, 15] : vector<8xf32>, vector<8xf32> 17 // CHECK-NEXT: vector.shuffle {{.*}} [0, 1, 8, 9, 4, 5, 12, 13] : vector<8xf32>, vector<8xf32> 18 // CHECK-NEXT: vector.shuffle {{.*}} [2, 3, 10, 11, 6, 7, 14, 15] : vector<8xf32>, vector<8xf32> 19 // CHECK-NEXT: vector.shuffle {{.*}} [0, 1, 8, 9, 4, 5, 12, 13] : vector<8xf32>, vector<8xf32> 20 // CHECK-NEXT: vector.shuffle {{.*}} [2, 3, 10, 11, 6, 7, 14, 15] : vector<8xf32>, vector<8xf32> 21 // CHECK-NEXT: vector.shuffle {{.*}} [0, 1, 2, 3, 8, 9, 10, 11] : vector<8xf32>, vector<8xf32> [all …]
|
/llvm-project/llvm/test/CodeGen/AArch64/GlobalISel/ |
H A D | combine-udiv.ll | 6 define <8 x i16> @combine_vec_udiv_uniform(<8 x i16> %x) { 10 ; SDAG-NEXT: dup v1.8h, w8 11 ; SDAG-NEXT: umull2 v2.4s, v0.8h, v1.8h 13 ; SDAG-NEXT: uzp2 v1.8h, v1.8h, v2.8h 14 ; SDAG-NEXT: sub v0.8h, v0.8 [all...] |