1; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s 2 3define <vscale x 4 x i32> @sdiv_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) { 4; CHECK-LABEL: sdiv_i32: 5; CHECK: sdiv z0.s, p0/m, z0.s, z1.s 6; CHECK-NEXT: ret 7 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sdiv.nxv4i32(<vscale x 4 x i1> %pg, 8 <vscale x 4 x i32> %a, 9 <vscale x 4 x i32> %b) 10 ret <vscale x 4 x i32> %out 11} 12 13define <vscale x 2 x i64> @sdiv_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { 14; CHECK-LABEL: sdiv_i64: 15; CHECK: sdiv z0.d, p0/m, z0.d, z1.d 16; CHECK-NEXT: ret 17 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sdiv.nxv2i64(<vscale x 2 x i1> %pg, 18 <vscale x 2 x i64> %a, 19 <vscale x 2 x i64> %b) 20 ret <vscale x 2 x i64> %out 21} 22 23define <vscale x 4 x i32> @udiv_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) { 24; CHECK-LABEL: udiv_i32: 25; CHECK: udiv z0.s, p0/m, z0.s, z1.s 26; CHECK-NEXT: ret 27 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.udiv.nxv4i32(<vscale x 4 x i1> %pg, 28 <vscale x 4 x i32> %a, 29 <vscale x 4 x i32> %b) 30 ret <vscale x 4 x i32> %out 31} 32 33define <vscale x 2 x i64> @udiv_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { 34; CHECK-LABEL: udiv_i64: 35; CHECK: udiv z0.d, p0/m, z0.d, z1.d 36; CHECK-NEXT: ret 37 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.udiv.nxv2i64(<vscale x 2 x i1> %pg, 38 <vscale x 2 x i64> %a, 39 <vscale x 2 x i64> %b) 40 ret <vscale x 2 x i64> %out 41} 42 43define <vscale x 4 x i32> @sdivr_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) { 44; CHECK-LABEL: sdivr_i32: 45; CHECK: sdivr z0.s, p0/m, z0.s, z1.s 46; CHECK-NEXT: ret 47 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sdivr.nxv4i32(<vscale x 4 x i1> %pg, 48 <vscale x 4 x i32> %a, 49 <vscale x 4 x i32> %b) 50 ret <vscale x 4 x i32> %out 51} 52 53define <vscale x 2 x i64> @sdivr_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { 54; CHECK-LABEL: sdivr_i64: 55; CHECK: sdivr z0.d, p0/m, z0.d, z1.d 56; CHECK-NEXT: ret 57 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sdivr.nxv2i64(<vscale x 2 x i1> %pg, 58 <vscale x 2 x i64> %a, 59 <vscale x 2 x i64> %b) 60 ret <vscale x 2 x i64> %out 61} 62 63define <vscale x 4 x i32> @udivr_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) { 64; CHECK-LABEL: udivr_i32: 65; CHECK: udivr z0.s, p0/m, z0.s, z1.s 66; CHECK-NEXT: ret 67 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.udivr.nxv4i32(<vscale x 4 x i1> %pg, 68 <vscale x 4 x i32> %a, 69 <vscale x 4 x i32> %b) 70 ret <vscale x 4 x i32> %out 71} 72 73define <vscale x 2 x i64> @udivr_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { 74; CHECK-LABEL: udivr_i64: 75; CHECK: udivr z0.d, p0/m, z0.d, z1.d 76; CHECK-NEXT: ret 77 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.udivr.nxv2i64(<vscale x 2 x i1> %pg, 78 <vscale x 2 x i64> %a, 79 <vscale x 2 x i64> %b) 80 ret <vscale x 2 x i64> %out 81} 82 83declare <vscale x 4 x i32> @llvm.aarch64.sve.sdiv.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>) 84declare <vscale x 2 x i64> @llvm.aarch64.sve.sdiv.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>) 85declare <vscale x 4 x i32> @llvm.aarch64.sve.udiv.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>) 86declare <vscale x 2 x i64> @llvm.aarch64.sve.udiv.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>) 87declare <vscale x 4 x i32> @llvm.aarch64.sve.sdivr.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>) 88declare <vscale x 2 x i64> @llvm.aarch64.sve.sdivr.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>) 89declare <vscale x 4 x i32> @llvm.aarch64.sve.udivr.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>) 90declare <vscale x 2 x i64> @llvm.aarch64.sve.udivr.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>) 91 92