1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s 3 4; PRFB <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element 5define void @llvm_aarch64_sve_prfb_gather_scalar_offset_nx4vi32(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { 6; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scalar_offset_nx4vi32: 7; CHECK: // %bb.0: 8; CHECK-NEXT: prfb pldl1strm, p0, [z0.s, #7] 9; CHECK-NEXT: ret 10 call void @llvm.aarch64.sve.prfb.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 7, i32 1) 11 ret void 12} 13 14; PRFB <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element 15define void @llvm_aarch64_sve_prfb_gather_scalar_offset_nx2vi64(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { 16; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scalar_offset_nx2vi64: 17; CHECK: // %bb.0: 18; CHECK-NEXT: prfb pldl1strm, p0, [z0.d, #7] 19; CHECK-NEXT: ret 20 call void @llvm.aarch64.sve.prfb.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 7, i32 1) 21 ret void 22} 23 24; PRFH <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element 25define void @llvm_aarch64_sve_prfh_gather_scalar_offset_nx4vi32(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { 26; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scalar_offset_nx4vi32: 27; CHECK: // %bb.0: 28; CHECK-NEXT: prfh pldl1strm, p0, [z0.s, #6] 29; CHECK-NEXT: ret 30 call void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 6, i32 1) 31 ret void 32} 33 34; PRFH <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element 35define void @llvm_aarch64_sve_prfh_gather_scalar_offset_nx2vi64(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { 36; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scalar_offset_nx2vi64: 37; CHECK: // %bb.0: 38; CHECK-NEXT: prfh pldl1strm, p0, [z0.d, #6] 39; CHECK-NEXT: ret 40 call void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 6, i32 1) 41 ret void 42} 43 44; PRFW <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element 45define void @llvm_aarch64_sve_prfw_gather_scalar_offset_nx4vi32(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { 46; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scalar_offset_nx4vi32: 47; CHECK: // %bb.0: 48; CHECK-NEXT: prfw pldl1strm, p0, [z0.s, #12] 49; CHECK-NEXT: ret 50 call void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 12, i32 1) 51 ret void 52} 53 54; PRFW <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element 55define void @llvm_aarch64_sve_prfw_gather_scalar_offset_nx2vi64(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { 56; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scalar_offset_nx2vi64: 57; CHECK: // %bb.0: 58; CHECK-NEXT: prfw pldl1strm, p0, [z0.d, #12] 59; CHECK-NEXT: ret 60 call void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 12, i32 1) 61 ret void 62} 63 64; PRFD <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element 65define void @llvm_aarch64_sve_prfd_gather_scalar_offset_nx4vi32(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind { 66; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scalar_offset_nx4vi32: 67; CHECK: // %bb.0: 68; CHECK-NEXT: prfd pldl1strm, p0, [z0.s, #16] 69; CHECK-NEXT: ret 70 call void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 16, i32 1) 71 ret void 72} 73 74; PRFD <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element 75define void @llvm_aarch64_sve_prfd_gather_scalar_offset_nx2vi64(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind { 76; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scalar_offset_nx2vi64: 77; CHECK: // %bb.0: 78; CHECK-NEXT: prfd pldl1strm, p0, [z0.d, #16] 79; CHECK-NEXT: ret 80 call void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 16, i32 1) 81 ret void 82} 83 84declare void @llvm.aarch64.sve.prfb.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %offset, i32 %prfop) 85declare void @llvm.aarch64.sve.prfb.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %offset, i32 %prfop) 86declare void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %offset, i32 %prfop) 87declare void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %offset, i32 %prfop) 88declare void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %offset, i32 %prfop) 89declare void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %offset, i32 %prfop) 90declare void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 %offset, i32 %prfop) 91declare void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 %offset, i32 %prfop) 92