/llvm-project/clang/test/CodeGen/aarch64-sme2p1-intrinsics/ |
H A D | acle_sme2p1_movaz.c |
|
H A D | acle_sme2p1_zero.c |
|
/llvm-project/clang/test/Sema/aarch64-sme2-intrinsics/ |
H A D | acle_sme2_fmlas16.c | 8 void test_features_f16f16(uint32_t slice, in test_features_f16f16() argument 18 svmla_single_za16_f16_vg1x2(slice, zn2, zm); in test_features_f16f16() 20 svmla_single_za16_f16_vg1x4(slice, zn4, zm); in test_features_f16f16() 22 svmls_single_za16_f16_vg1x2(slice, zn2, zm); in test_features_f16f16() 24 svmls_single_za16_f16_vg1x4(slice, zn4, zm); in test_features_f16f16() 26 svmla_za16_f16_vg1x2(slice, zn2, zm2); in test_features_f16f16() 28 svmla_za16_f16_vg1x4(slice, zn4, zm4); in test_features_f16f16() 30 svmls_za16_f16_vg1x2(slice, zn2, zm2); in test_features_f16f16() 32 svmls_za16_f16_vg1x4(slice, zn4, zm4); in test_features_f16f16() 34 svmla_lane_za16_f16_vg1x2(slice, zn in test_features_f16f16() 69 test_imm(uint32_t slice,svfloat16_t zm,svfloat16x2_t zn2,svfloat16x4_t zn4,svbfloat16_t bzm,svbfloat16x2_t bzn2,svbfloat16x4_t bzn4) test_imm() argument [all...] |
H A D | acle_sme2_add_sub_za16.c | 7 void test_features(uint32_t slice, svfloat16x2_t zn2, svfloat16x4_t zn4, in test_features() argument 10 svadd_za16_f16_vg1x2(slice, zn2); in test_features() 12 svadd_za16_f16_vg1x4(slice, zn4); in test_features() 14 svsub_za16_f16_vg1x2(slice, zn2); in test_features() 16 svsub_za16_f16_vg1x4(slice, zn4); in test_features() 19 svadd_za16_bf16_vg1x2(slice, bzn2); in test_features() 21 svadd_za16_bf16_vg1x4(slice, bzn4); in test_features() 23 svsub_za16_bf16_vg1x2(slice, bzn2); in test_features() 25 svsub_za16_bf16_vg1x4(slice, bzn4); in test_features()
|
/llvm-project/clang/test/CodeGen/aarch64-sme2-intrinsics/ |
H A D | acle_sme2_fmlas16.c |
|
H A D | acle_sme2_add_sub_za16.c |
|
/llvm-project/clang/test/Sema/aarch64-sme-intrinsics/ |
H A D | acle_sme_imm.cpp | 15 void test_range_0_0(uint32_t slice, svbool_t pg, void *ptr) __arm_streaming __arm_inout("za") { in test_range_0_0() argument 17 SVE_ACLE_FUNC(svld1_hor_za8,,,)(-1, slice, pg, ptr); in test_range_0_0() 19 SVE_ACLE_FUNC(svst1_ver_za8,,,)(1, slice, pg, ptr); in test_range_0_0() 21 SVE_ACLE_FUNC(svld1_hor_vnum_za8,,,)(-1, slice, pg, ptr, 1); in test_range_0_0() 23 SVE_ACLE_FUNC(svst1_ver_vnum_za8,,,)(1, slice, pg, ptr, 1); in test_range_0_0() 26 SVE_ACLE_FUNC(svread_hor_za8, _s8, _m,)(svundef_s8(), pg, -1, slice); in test_range_0_0() 28 SVE_ACLE_FUNC(svread_ver_za8, _s8, _m,)(svundef_s8(), pg, 1, slice); in test_range_0_0() 30 SVE_ACLE_FUNC(svwrite_hor_za8, _s8, _m,)(-1, slice, pg, svundef_s8()); in test_range_0_0() 32 SVE_ACLE_FUNC(svwrite_ver_za8, _s8, _m,)(1, slice, pg, svundef_s8()); in test_range_0_0() 35 void test_range_0_1(uint32_t slice, svbool_t pg, void *ptr) __arm_streaming __arm_inout("za") { in test_range_0_1() argument [all …]
|
/llvm-project/llvm/test/CodeGen/AArch64/ |
H A D | sme2p1-intrinsics-zero.ll | 6 define void @test_svzero_za64_vg1x2(i32 %slice) #0 { 13 tail call void @llvm.aarch64.sme.zero.za64.vg1x2(i32 %slice) 17 define void @test_svzero_za64_vg1x2_offset(i32 %slice) #0 { 24 %slice.max = add i32 %slice, 7 25 tail call void @llvm.aarch64.sme.zero.za64.vg1x2(i32 %slice.max) 29 define void @test_svzero_za64_vg1x4(i32 %slice) #0 { 36 tail call void @llvm.aarch64.sme.zero.za64.vg1x4(i32 %slice) 40 define void @test_svzero_za64_vg1x4_offset(i32 %slice) #0 { 47 %slice.min = add i32 %slice, 1 48 tail call void @llvm.aarch64.sme.zero.za64.vg1x4(i32 %slice.min) [all …]
|
H A D | sme2p1-intrinsics-movaz.ll | 13 define {<vscale x 16 x i8>, <vscale x 16 x i8>} @test_readz_hor_z8_i8_x2(i32 %tile, i32 %slice) #0 { 20 …<vscale x 16 x i8>, <vscale x 16 x i8>} @llvm.aarch64.sme.readz.horiz.x2.nxv16i8(i32 0, i32 %slice) 21 %slice.max = add i32 %slice, 14 22 …ale x 16 x i8>, <vscale x 16 x i8>} @llvm.aarch64.sme.readz.horiz.x2.nxv16i8(i32 0, i32 %slice.max) 25 define {<vscale x 8 x i16>, <vscale x 8 x i16>} @test_readz_hor_z16_i16_x2(i32 %slice) #0 { 32 …<vscale x 8 x i16>, <vscale x 8 x i16>} @llvm.aarch64.sme.readz.horiz.x2.nxv8i16(i32 0, i32 %slice) 33 %slice.max = add i32 %slice, 6 34 …ale x 8 x i16>, <vscale x 8 x i16>} @llvm.aarch64.sme.readz.horiz.x2.nxv8i16(i32 1, i32 %slice.max) 38 define {<vscale x 4 x i32>, <vscale x 4 x i32>} @test_readz_hor_z32_i32_x2(i32 %slice) #0 { 45 …<vscale x 4 x i32>, <vscale x 4 x i32>} @llvm.aarch64.sme.readz.horiz.x2.nxv4i32(i32 0, i32 %slice) [all …]
|
H A D | sme2-intrinsics-extract-mova.ll | 10 define { <vscale x 16 x i8>, <vscale x 16 x i8> } @za_read_horiz_vg2_b(i32 %slice) { 17 … <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sme.read.hor.vg2.nxv16i8(i32 0, i32 %slice) 18 %slice.14 = add i32 %slice, 14 19 …scale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sme.read.hor.vg2.nxv16i8(i32 0, i32 %slice.14) 23 define { <vscale x 8 x i16>, <vscale x 8 x i16> } @za_read_horiz_vg2_h(i32 %slice) { 30 … <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sme.read.hor.vg2.nxv8i16(i32 0, i32 %slice) 31 %slice.6 = add i32 %slice, 6 32 …vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sme.read.hor.vg2.nxv8i16(i32 1, i32 %slice.6) 36 define { <vscale x 8 x half>, <vscale x 8 x half> } @za_read_horiz_vg2_f16(i32 %slice) { 43 …vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sme.read.hor.vg2.nxv8f16(i32 0, i32 %slice) [all …]
|
H A D | sme2-intrinsics-fmlas16.ll | 6 define void @test_fmla_f16_vg2_single(i32 %slice, <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, <vscale x 8 x half> %b) #0 { 13 call void @llvm.aarch64.sme.fmla.single.vg1x2.nxv8f16(i32 %slice, <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, <vscale x 8 x half> %b) 14 %slice.7 = add i32 %slice, 7 15 call void @llvm.aarch64.sme.fmla.single.vg1x2.nxv8f16(i32 %slice.7, <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, <vscale x 8 x half> %b) 19 define void @test_fmla_f16_vg4_single(i32 %slice, <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, 27 call void @llvm.aarch64.sme.fmla.single.vg1x4.nxv8f16(i32 %slice, <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, 29 %slice.7 = add i32 %slice, 7 30 call void @llvm.aarch64.sme.fmla.single.vg1x4.nxv8f16(i32 %slice [all...] |
H A D | sme2-intrinsics-add-sub-za16.ll | 6 define void @add_f16_vg1x2(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1) #0 { 15 call void @llvm.aarch64.sme.add.za16.vg1x2.nxv8f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1) 16 %slice.7 = add i32 %slice, 7 17 call void @llvm.aarch64.sme.add.za16.vg1x2.nxv8f16(i32 %slice.7, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1) 21 define void @add_f16_vg1x4(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, 33 call void @llvm.aarch64.sme.add.za16.vg1x4.nxv8f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, 35 %slice.7 = add i32 %slice, 7 36 call void @llvm.aarch64.sme.add.za16.vg1x4.nxv8f16(i32 %slice [all...] |
H A D | sme2-intrinsics-insert-mova.ll | 10 define void @za_write_vg2_horiz_b(i32 %slice, <vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2) { 19 …call void @llvm.aarch64.sme.write.hor.vg2.nxv16i8(i32 0, i32 %slice, <vscale x 16 x i8> %zn1, <vsc… 20 %slice.14 = add i32 %slice, 14 21 …call void @llvm.aarch64.sme.write.hor.vg2.nxv16i8(i32 0, i32 %slice.14, <vscale x 16 x i8> %zn1, <… 25 define void @za_write_vg2_horiz_h(i32 %slice, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2) { 34 …call void @llvm.aarch64.sme.write.hor.vg2.nxv8i16(i32 0, i32 %slice, <vscale x 8 x i16> %zn1, <vsc… 35 %slice.6 = add i32 %slice, 6 36 …call void @llvm.aarch64.sme.write.hor.vg2.nxv8i16(i32 1, i32 %slice.6, <vscale x 8 x i16> %zn1, <v… 40 define void @za_write_vg2_horiz_f16(i32 %slice, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2)… 49 …call void @llvm.aarch64.sme.write.hor.vg2.nxv8f16(i32 0, i32 %slice, <vscale x 8 x half> %zn1, <vs… [all …]
|
H A D | sme2-intrinsics-mlall.ll | 10 define void @multi_vector_mul_add_single_long_vg4x1_s8(i32 %slice, <vscale x 16 x i8> %dummy, <vsca… 17 …call void @llvm.aarch64.sme.smla.za32.single.vg4x1.nxv16i8(i32 %slice, <vscale x 16 x i8> %zn, <vs… 18 %slice.12 = add i32 %slice, 12 19 …call void @llvm.aarch64.sme.smla.za32.single.vg4x1.nxv16i8(i32 %slice.12, <vscale x 16 x i8> %zn, … 23 define void @multi_vector_mul_add_single_long_vg4x1_s16(i32 %slice, <vscale x 8 x i16> %dummy, <vsc… 30 …call void @llvm.aarch64.sme.smla.za64.single.vg4x1.nxv8i16(i32 %slice, <vscale x 8 x i16> %zn, <vs… 31 %slice.12 = add i32 %slice, 12 32 …call void @llvm.aarch64.sme.smla.za64.single.vg4x1.nxv8i16(i32 %slice.12, <vscale x 8 x i16> %zn, … 38 define void @multi_vector_mul_add_single_long_vg4x2_s8(i32 %slice, <vscale x 16 x i8> %dummy, <vsca… 47 …call void @llvm.aarch64.sme.smla.za32.single.vg4x2.nxv16i8(i32 %slice, <vscale x 16 x i8> %zn0, <v… [all …]
|
H A D | sme2-intrinsics-mlals.ll | 8 define void @multi_vector_add_single_vg2x1_bf16(i32 %slice, <vscale x 8 x bfloat> %zn, <vscale x 8 … 15 …call void @llvm.aarch64.sme.fmlal.single.vg2x1.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %zn, <vs… 16 %slice.14 = add i32 %slice, 14 17 …call void @llvm.aarch64.sme.fmlal.single.vg2x1.nxv8bf16(i32 %slice.14, <vscale x 8 x bfloat> %zn, … 21 define void @multi_vector_add_single_vg2x1_f16(i32 %slice, <vscale x 8 x half> %zn, <vscale x 8 x h… 28 …call void @llvm.aarch64.sme.fmlal.single.vg2x1.nxv8f16(i32 %slice, <vscale x 8 x half> %zn, <vscal… 29 %slice.14 = add i32 %slice, 14 30 …call void @llvm.aarch64.sme.fmlal.single.vg2x1.nxv8f16(i32 %slice.14, <vscale x 8 x half> %zn, <vs… 34 define void @multi_vector_add_single_vg2x1_s16(i32 %slice, <vscale x 8 x i16> %zn, <vscale x 8 x i1… 41 …call void @llvm.aarch64.sme.smlal.single.vg2x1.nxv8i16(i32 %slice, <vscale x 8 x i16> %zn, <vscale… [all …]
|
H A D | sme2-intrinsics-sub.ll | 8 define void @multi_vector_sub_write_single_za_vg1x2_i32(i32 %slice, <vscale x 4 x i32> %zn0, <vscal… 17 call void @llvm.aarch64.sme.sub.write.single.za.vg1x2.nxv4i32(i32 %slice, 20 %slice.7 = add i32 %slice, 7 21 call void @llvm.aarch64.sme.sub.write.single.za.vg1x2.nxv4i32(i32 %slice.7, 27 define void @multi_vector_sub_write_single_za_vg1x2_i64(i32 %slice, <vscale x 2 x i64> %zn0, <vscal… 36 call void @llvm.aarch64.sme.sub.write.single.za.vg1x2.nxv2i64(i32 %slice, 39 %slice.7 = add i32 %slice, 7 40 call void @llvm.aarch64.sme.sub.write.single.za.vg1x2.nxv2i64(i32 %slice.7, 50 define void @multi_vector_sub_write_single_za_vg1x4_i32(i32 %slice, <vscale x 4 x i32> %zn0, <vscal… 63 call void @llvm.aarch64.sme.sub.write.single.za.vg1x4.nxv4i32(i32 %slice, [all …]
|
H A D | sme2-intrinsics-vdot.ll | 6 define void @test_fvdot_lane_za32_vg1x2_nxv8f16(i32 %slice, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zm) { 13 call void @llvm.aarch64.sme.fvdot.lane.za32.vg1x2.nxv8f16(i32 %slice, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zm, i32 3) 14 %slice.7 = add i32 %slice, 7 15 call void @llvm.aarch64.sme.fvdot.lane.za32.vg1x2.nxv8f16(i32 %slice.7, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zm, i32 3) 22 define void @test_fvdot_lane_za32_vg1x2_nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zm) { 29 call void @llvm.aarch64.sme.fvdot.lane.za32.vg1x2.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zm, i32 3) 30 %slice.7 = add i32 %slice, 7 31 call void @llvm.aarch64.sme.fvdot.lane.za32.vg1x2.nxv8bf16(i32 %slice [all...] |
H A D | sme2-intrinsics-fmlas.ll | 6 define void @multi_vector_add_single_vg1x2_s(i32 %slice, <vscale x 4 x float> %zn0, <vscale x 4 x f… 15 call void @llvm.aarch64.sme.fmla.single.vg1x2.nxv4f32(i32 %slice, 18 %slice.7 = add i32 %slice, 7 19 call void @llvm.aarch64.sme.fmla.single.vg1x2.nxv4f32(i32 %slice.7, 25 define void @multi_vector_add_single_vg1x2_d(i32 %slice, <vscale x 2 x double> %zn0, <vscale x 2 x … 34 call void @llvm.aarch64.sme.fmla.single.vg1x2.nxv2f64(i32 %slice, 37 %slice.7 = add i32 %slice, 7 38 call void @llvm.aarch64.sme.fmla.single.vg1x2.nxv2f64(i32 %slice.7, 44 define void @multi_vector_add_single_vg1x4_s(i32 %slice, <vscale x 4 x float> %zn0, <vscale x 4 x f… 56 call void @llvm.aarch64.sme.fmla.single.vg1x4.nxv4f32(i32 %slice, [all …]
|
/llvm-project/libcxx/test/std/numerics/numarray/class.slice/slice.ops/ |
H A D | slice.ops.pass.cpp | 24 std::slice s1; in test() 25 std::slice s2; in test() 30 std::slice s1{1, 2, 3}; in test() 31 std::slice s2{1, 2, 3}; in test() 36 std::slice s1; in test() 37 std::slice s2{1, 2, 3}; in test() 42 std::slice s1{0, 2, 3}; in test() 43 std::slice s2{1, 2, 3}; in test() 48 std::slice s1{1, 0, 3}; in test() 49 std::slice s2{1, 2, 3}; in test() [all …]
|
/llvm-project/clang/test/Sema/aarch64-sme2p1-intrinsics/ |
H A D | acle_sme2p1_imm.cpp | 8 void tests_readz_tile_to_vector_single(uint32_t slice) __arm_streaming __arm_inout("za") { in tests_readz_tile_to_vector_single() argument 9 …svreadz_hor_za8_s8(-1, slice); // expected-error {{argument value 18446744073709551615 is outside … in tests_readz_tile_to_vector_single() 10 …svreadz_hor_za16_s16(-1, slice); // expected-error {{argument value 18446744073709551615 is outsid… in tests_readz_tile_to_vector_single() 11 …svreadz_hor_za32_s32(-1, slice); // expected-error {{argument value 18446744073709551615 is outsid… in tests_readz_tile_to_vector_single() 12 …svreadz_hor_za64_s64(-1, slice); // expected-error {{argument value 18446744073709551615 is outsid… in tests_readz_tile_to_vector_single() 13 …svreadz_hor_za128_s8(-1, slice); // expected-error {{argument value 18446744073709551615 is outsid… in tests_readz_tile_to_vector_single() 14 …svreadz_hor_za128_s16(-1, slice); // expected-error {{argument value 18446744073709551615 is outsi… in tests_readz_tile_to_vector_single() 15 …svreadz_hor_za128_s32(-1, slice); // expected-error {{argument value 18446744073709551615 is outsi… in tests_readz_tile_to_vector_single() 16 …svreadz_hor_za128_s64(-1, slice); // expected-error {{argument value 18446744073709551615 is outsi… in tests_readz_tile_to_vector_single() 17 …svreadz_hor_za128_bf16(-1, slice); // expected-error {{argument value 18446744073709551615 is outs… in tests_readz_tile_to_vector_single()
|
/llvm-project/flang/test/Fir/ |
H A D | array-coor-canonicalization.fir | 7 // CHECK: %[[VAL_8:.*]] = fir.slice 19 …%3 = fir.slice %c1, %c120, %c1, %c1, %c2, %c1 : (index, index, index, index, index, index) -> !fir… 20 …%4 = fir.embox %2(%1) [%3] : (!fir.ref<!fir.array<120x2xi32>>, !fir.shape<2>, !fir.slice<2>) -> !f… 65 // CHECK: %[[VAL_12:.*]] = fir.slice 82 …%4 = fir.slice %c10, %c120, %c1, %c11, %c12, %c1 : (index, index, index, index, index, index) -> !… 83 …%5 = fir.rebox %3(%1) [%4] : (!fir.box<!fir.array<?x?xi32>>, !fir.shift<2>, !fir.slice<2>) -> !fir… 99 // CHECK: %[[VAL_7:.*]] = fir.slice 100 …%[[VAL_7]]] : (!fir.ref<!fir.array<100x100x100xi32>>, !fir.shape<3>, !fir.slice<3>) -> !fir.box<!f… 112 …%4 = fir.slice %c1, %c100, %c1, %c1, %3, %3, %c1, %c100, %c1 : (index, index, index, index, index,… 113 …%5 = fir.embox %2(%1) [%4] : (!fir.ref<!fir.array<100x100x100xi32>>, !fir.shape<3>, !fir.slice<3>)… [all …]
|
H A D | cg-ops.fir | 9 %2 = fir.slice %0, %0, %0 : (index, index, index) -> !fir.slice<1> 11 …%3 = fir.embox %addr (%1) [%2] : (!fir.ref<!fir.array<?xi32>>, !fir.shapeshift<1>, !fir.slice<1>) … 13 …ddr (%1) [%2] %0 : (!fir.ref<!fir.array<?xi32>>, !fir.shapeshift<1>, !fir.slice<1>, index) -> !fir… 28 %2 = fir.slice %0, %0, %0 : (index, index, index) -> !fir.slice<1> 30 …%3 = fir.embox %arr (%1) [%2] : (!fir.ref<!fir.array<?xi32>>, !fir.shapeshift<1>, !fir.slice<1>) -… 36 // fir.embox with slice with substr 44 %2 = fir.slice %0, %0, %0 substr %0, %0: (index, index, index, index, index) -> !fir.slice<1> 46 …%3 = fir.embox %addr (%1) [%2] : (!fir.ref<!fir.array<?xi32>>, !fir.shapeshift<1>, !fir.slice<1>) … 52 // fir.rebox with slice with substr 60 …%0 = fir.slice %c10, %c1, %c1 substr %c1, %c1: (index, index, index, index, index) -> !fir.slice<1> [all …]
|
/llvm-project/llvm/test/Transforms/SROA/ |
H A D | pr37267.ll | 18 ; The reported error happened when rewriteIntegerStore try to widen a split tail of slice 1 for [4,… 20 ; slice 1: WWWW 21 ; slice 2: WWWW 22 ; slice 3: RR 23 ; slice 4: RR 26 ; slice 1: [2,6) 29 ; slice 2: [8,12) 32 ; slice 3: [8,10) 35 ; slice 4: [2,4) 58 ; The reported error happened when visitLoadInst rewrites a split tail of slice 1 for [4, 8) partit… [all …]
|
/llvm-project/clang/test/CodeGen/aarch64-sme-intrinsics/ |
H A D | acle_sme_write.c |
|
H A D | acle_sme_read.c |
|