1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt -S -passes=instcombine < %s | FileCheck %s 3 4target triple = "aarch64-unknown-linux-gnu" 5 6define <vscale x 16 x i8> @dup_insertelement_0(<vscale x 16 x i8> %v, i8 %s) #0 { 7; CHECK-LABEL: @dup_insertelement_0( 8; CHECK-NEXT: [[INSERT:%.*]] = insertelement <vscale x 16 x i8> [[V:%.*]], i8 [[S:%.*]], i64 0 9; CHECK-NEXT: ret <vscale x 16 x i8> [[INSERT]] 10; 11 %pg = tail call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 1) 12 %insert = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> %v, <vscale x 16 x i1> %pg, i8 %s) 13 ret <vscale x 16 x i8> %insert 14} 15 16define <vscale x 16 x i8> @dup_insertelement_1(<vscale x 16 x i8> %v, i8 %s) #0 { 17; CHECK-LABEL: @dup_insertelement_1( 18; CHECK-NEXT: [[PG:%.*]] = tail call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 2) 19; CHECK-NEXT: [[INSERT:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> [[V:%.*]], <vscale x 16 x i1> [[PG]], i8 [[S:%.*]]) 20; CHECK-NEXT: ret <vscale x 16 x i8> [[INSERT]] 21; 22 %pg = tail call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 2) 23 %insert = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> %v, <vscale x 16 x i1> %pg, i8 %s) 24 ret <vscale x 16 x i8> %insert 25} 26 27define <vscale x 16 x i8> @dup_insertelement_x(<vscale x 16 x i8> %v, i8 %s, <vscale x 16 x i1> %pg) #0 { 28; CHECK-LABEL: @dup_insertelement_x( 29; CHECK-NEXT: [[INSERT:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> [[V:%.*]], <vscale x 16 x i1> [[PG:%.*]], i8 [[S:%.*]]) 30; CHECK-NEXT: ret <vscale x 16 x i8> [[INSERT]] 31; 32 %insert = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> %v, <vscale x 16 x i1> %pg, i8 %s) 33 ret <vscale x 16 x i8> %insert 34} 35 36define <vscale x 8 x i16> @dup_insertelement_0_convert(<vscale x 8 x i16> %v, i16 %s) #0 { 37; CHECK-LABEL: @dup_insertelement_0_convert( 38; CHECK-NEXT: [[INSERT:%.*]] = insertelement <vscale x 8 x i16> [[V:%.*]], i16 [[S:%.*]], i64 0 39; CHECK-NEXT: ret <vscale x 8 x i16> [[INSERT]] 40; 41 %pg = tail call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 1) 42 %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %pg) 43 %2 = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %1) 44 %insert = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> %v, <vscale x 8 x i1> %2, i16 %s) 45 ret <vscale x 8 x i16> %insert 46} 47 48define <vscale x 8 x i16> @dupx_splat_convert(i16 %s) #0 { 49; CHECK-LABEL: @dupx_splat_convert( 50; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[S:%.*]], i64 0 51; CHECK-NEXT: [[SPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer 52; CHECK-NEXT: ret <vscale x 8 x i16> [[SPLAT]] 53; 54 %splat = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %s) 55 ret <vscale x 8 x i16> %splat 56} 57 58declare <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16) 59 60declare <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, i8) 61declare <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, i16) 62 63declare <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32) 64declare <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32) 65 66declare <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1>) 67declare <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1>) 68 69attributes #0 = { "target-features"="+sve" } 70