1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh,+xsfvcp \ 3; RUN: -verify-machineinstrs | FileCheck %s 4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh,+xsfvcp \ 5; RUN: -verify-machineinstrs | FileCheck %s 6 7define void @test_sf_vc_vvw_se_e8mf8(<1 x i16> %vd, <1 x i8> %vs2, <1 x i8> %vs1, iXLen %vl) { 8; CHECK-LABEL: test_sf_vc_vvw_se_e8mf8: 9; CHECK: # %bb.0: # %entry 10; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma 11; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10 12; CHECK-NEXT: ret 13entry: 14 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i16.nxv1i8.nxv1i8.iXLen(iXLen 3, <1 x i16> %vd, <1 x i8> %vs2, <1 x i8> %vs1, iXLen %vl) 15 ret void 16} 17 18declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i16.nxv1i8.nxv1i8.iXLen(iXLen, <1 x i16>, <1 x i8>, <1 x i8>, iXLen) 19 20define void @test_sf_vc_vvw_se_e8mf4(<2 x i16> %vd, <2 x i8> %vs2, <2 x i8> %vs1, iXLen %vl) { 21; CHECK-LABEL: test_sf_vc_vvw_se_e8mf4: 22; CHECK: # %bb.0: # %entry 23; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma 24; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10 25; CHECK-NEXT: ret 26entry: 27 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i16.nxv2i8.nxv2i8.iXLen(iXLen 3, <2 x i16> %vd, <2 x i8> %vs2, <2 x i8> %vs1, iXLen %vl) 28 ret void 29} 30 31declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i16.nxv2i8.nxv2i8.iXLen(iXLen, <2 x i16>, <2 x i8>, <2 x i8>, iXLen) 32 33define void @test_sf_vc_vvw_se_e8mf2(<4 x i16> %vd, <4 x i8> %vs2, <4 x i8> %vs1, iXLen %vl) { 34; CHECK-LABEL: test_sf_vc_vvw_se_e8mf2: 35; CHECK: # %bb.0: # %entry 36; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma 37; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10 38; CHECK-NEXT: ret 39entry: 40 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i16.nxv4i8.nxv4i8.iXLen(iXLen 3, <4 x i16> %vd, <4 x i8> %vs2, <4 x i8> %vs1, iXLen %vl) 41 ret void 42} 43 44declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i16.nxv4i8.nxv4i8.iXLen(iXLen, <4 x i16>, <4 x i8>, <4 x i8>, iXLen) 45 46define void @test_sf_vc_vvw_se_e8m1(<8 x i16> %vd, <8 x i8> %vs2, <8 x i8> %vs1, iXLen %vl) { 47; CHECK-LABEL: test_sf_vc_vvw_se_e8m1: 48; CHECK: # %bb.0: # %entry 49; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma 50; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10 51; CHECK-NEXT: ret 52entry: 53 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i16.nxv8i8.nxv8i8.iXLen(iXLen 3, <8 x i16> %vd, <8 x i8> %vs2, <8 x i8> %vs1, iXLen %vl) 54 ret void 55} 56 57declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i16.nxv8i8.nxv8i8.iXLen(iXLen, <8 x i16>, <8 x i8>, <8 x i8>, iXLen) 58 59define void @test_sf_vc_vvw_se_e8m2(<16 x i16> %vd, <16 x i8> %vs2, <16 x i8> %vs1, iXLen %vl) { 60; CHECK-LABEL: test_sf_vc_vvw_se_e8m2: 61; CHECK: # %bb.0: # %entry 62; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma 63; CHECK-NEXT: sf.vc.vvw 3, v8, v10, v11 64; CHECK-NEXT: ret 65entry: 66 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16i16.nxv16i8.nxv16i8.iXLen(iXLen 3, <16 x i16> %vd, <16 x i8> %vs2, <16 x i8> %vs1, iXLen %vl) 67 ret void 68} 69 70declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16i16.nxv16i8.nxv16i8.iXLen(iXLen, <16 x i16>, <16 x i8>, <16 x i8>, iXLen) 71 72define void @test_sf_vc_vvw_se_e8m4(<32 x i16> %vd, <32 x i8> %vs2, <32 x i8> %vs1, iXLen %vl) { 73; CHECK-LABEL: test_sf_vc_vvw_se_e8m4: 74; CHECK: # %bb.0: # %entry 75; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma 76; CHECK-NEXT: sf.vc.vvw 3, v8, v12, v14 77; CHECK-NEXT: ret 78entry: 79 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv32i16.nxv32i8.nxv32i8.iXLen(iXLen 3, <32 x i16> %vd, <32 x i8> %vs2, <32 x i8> %vs1, iXLen %vl) 80 ret void 81} 82 83declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv32i16.nxv32i8.nxv32i8.iXLen(iXLen, <32 x i16>, <32 x i8>, <32 x i8>, iXLen) 84 85define void @test_sf_vc_vvw_se_e16mf4(<1 x i32> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) { 86; CHECK-LABEL: test_sf_vc_vvw_se_e16mf4: 87; CHECK: # %bb.0: # %entry 88; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 89; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10 90; CHECK-NEXT: ret 91entry: 92 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i32.nxv1i16.nxv1i16.iXLen(iXLen 3, <1 x i32> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) 93 ret void 94} 95 96declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i32.nxv1i16.nxv1i16.iXLen(iXLen, <1 x i32>, <1 x i16>, <1 x i16>, iXLen) 97 98define void @test_sf_vc_vvw_se_e16mf2(<2 x i32> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) { 99; CHECK-LABEL: test_sf_vc_vvw_se_e16mf2: 100; CHECK: # %bb.0: # %entry 101; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 102; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10 103; CHECK-NEXT: ret 104entry: 105 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i32.nxv2i16.nxv2i16.iXLen(iXLen 3, <2 x i32> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) 106 ret void 107} 108 109declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i32.nxv2i16.nxv2i16.iXLen(iXLen, <2 x i32>, <2 x i16>, <2 x i16>, iXLen) 110 111define void @test_sf_vc_vvw_se_e16m1(<4 x i32> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) { 112; CHECK-LABEL: test_sf_vc_vvw_se_e16m1: 113; CHECK: # %bb.0: # %entry 114; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 115; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10 116; CHECK-NEXT: ret 117entry: 118 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i32.nxv4i16.nxv4i16.iXLen(iXLen 3, <4 x i32> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) 119 ret void 120} 121 122declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i32.nxv4i16.nxv4i16.iXLen(iXLen, <4 x i32>, <4 x i16>, <4 x i16>, iXLen) 123 124define void @test_sf_vc_vvw_se_e16m2(<8 x i32> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) { 125; CHECK-LABEL: test_sf_vc_vvw_se_e16m2: 126; CHECK: # %bb.0: # %entry 127; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma 128; CHECK-NEXT: sf.vc.vvw 3, v8, v10, v11 129; CHECK-NEXT: ret 130entry: 131 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i32.nxv8i16.nxv8i16.iXLen(iXLen 3, <8 x i32> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) 132 ret void 133} 134 135declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i32.nxv8i16.nxv8i16.iXLen(iXLen, <8 x i32>, <8 x i16>, <8 x i16>, iXLen) 136 137define void @test_sf_vc_vvw_se_e16m4(<16 x i32> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) { 138; CHECK-LABEL: test_sf_vc_vvw_se_e16m4: 139; CHECK: # %bb.0: # %entry 140; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma 141; CHECK-NEXT: sf.vc.vvw 3, v8, v12, v14 142; CHECK-NEXT: ret 143entry: 144 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16i32.nxv16i16.nxv16i16.iXLen(iXLen 3, <16 x i32> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) 145 ret void 146} 147 148declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16i32.nxv16i16.nxv16i16.iXLen(iXLen, <16 x i32>, <16 x i16>, <16 x i16>, iXLen) 149 150define void @test_sf_vc_vvw_se_e32mf2(<1 x i64> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) { 151; CHECK-LABEL: test_sf_vc_vvw_se_e32mf2: 152; CHECK: # %bb.0: # %entry 153; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 154; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10 155; CHECK-NEXT: ret 156entry: 157 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i64.nxv1i32.nxv1i32.iXLen(iXLen 3, <1 x i64> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) 158 ret void 159} 160 161declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i64.nxv1i32.nxv1i32.iXLen(iXLen, <1 x i64>, <1 x i32>, <1 x i32>, iXLen) 162 163define void @test_sf_vc_vvw_se_e32m1(<2 x i64> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) { 164; CHECK-LABEL: test_sf_vc_vvw_se_e32m1: 165; CHECK: # %bb.0: # %entry 166; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 167; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10 168; CHECK-NEXT: ret 169entry: 170 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i64.nxv2i32.nxv2i32.iXLen(iXLen 3, <2 x i64> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) 171 ret void 172} 173 174declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i64.nxv2i32.nxv2i32.iXLen(iXLen, <2 x i64>, <2 x i32>, <2 x i32>, iXLen) 175 176define void @test_sf_vc_vvw_se_e32m2(<4 x i64> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) { 177; CHECK-LABEL: test_sf_vc_vvw_se_e32m2: 178; CHECK: # %bb.0: # %entry 179; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 180; CHECK-NEXT: sf.vc.vvw 3, v8, v10, v11 181; CHECK-NEXT: ret 182entry: 183 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i64.nxv4i32.nxv4i32.iXLen(iXLen 3, <4 x i64> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) 184 ret void 185} 186 187declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i64.nxv4i32.nxv4i32.iXLen(iXLen, <4 x i64>, <4 x i32>, <4 x i32>, iXLen) 188 189define void @test_sf_vc_vvw_se_e32m4(<8 x i64> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) { 190; CHECK-LABEL: test_sf_vc_vvw_se_e32m4: 191; CHECK: # %bb.0: # %entry 192; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 193; CHECK-NEXT: sf.vc.vvw 3, v8, v12, v14 194; CHECK-NEXT: ret 195entry: 196 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i64.nxv8i32.nxv8i32.iXLen(iXLen 3, <8 x i64> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) 197 ret void 198} 199 200declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i64.nxv8i32.nxv8i32.iXLen(iXLen, <8 x i64>, <8 x i32>, <8 x i32>, iXLen) 201 202define <1 x i16> @test_sf_vc_v_vvw_se_e8mf8(<1 x i16> %vd, <1 x i8> %vs2, <1 x i8> %vs1, iXLen %vl) { 203; CHECK-LABEL: test_sf_vc_v_vvw_se_e8mf8: 204; CHECK: # %bb.0: # %entry 205; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma 206; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 207; CHECK-NEXT: ret 208entry: 209 %0 = tail call <1 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv1i16.iXLen.nxv1i8.nxv1i8.iXLen(iXLen 3, <1 x i16> %vd, <1 x i8> %vs2, <1 x i8> %vs1, iXLen %vl) 210 ret <1 x i16> %0 211} 212 213declare <1 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv1i16.iXLen.nxv1i8.nxv1i8.iXLen(iXLen, <1 x i16>, <1 x i8>, <1 x i8>, iXLen) 214 215define <2 x i16> @test_sf_vc_v_vvw_se_e8mf4(<2 x i16> %vd, <2 x i8> %vs2, <2 x i8> %vs1, iXLen %vl) { 216; CHECK-LABEL: test_sf_vc_v_vvw_se_e8mf4: 217; CHECK: # %bb.0: # %entry 218; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma 219; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 220; CHECK-NEXT: ret 221entry: 222 %0 = tail call <2 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv2i16.iXLen.nxv2i8.nxv2i8.iXLen(iXLen 3, <2 x i16> %vd, <2 x i8> %vs2, <2 x i8> %vs1, iXLen %vl) 223 ret <2 x i16> %0 224} 225 226declare <2 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv2i16.iXLen.nxv2i8.nxv2i8.iXLen(iXLen, <2 x i16>, <2 x i8>, <2 x i8>, iXLen) 227 228define <4 x i16> @test_sf_vc_v_vvw_se_e8mf2(<4 x i16> %vd, <4 x i8> %vs2, <4 x i8> %vs1, iXLen %vl) { 229; CHECK-LABEL: test_sf_vc_v_vvw_se_e8mf2: 230; CHECK: # %bb.0: # %entry 231; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma 232; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 233; CHECK-NEXT: ret 234entry: 235 %0 = tail call <4 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv4i16.iXLen.nxv4i8.nxv4i8.iXLen(iXLen 3, <4 x i16> %vd, <4 x i8> %vs2, <4 x i8> %vs1, iXLen %vl) 236 ret <4 x i16> %0 237} 238 239declare <4 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv4i16.iXLen.nxv4i8.nxv4i8.iXLen(iXLen, <4 x i16>, <4 x i8>, <4 x i8>, iXLen) 240 241define <8 x i16> @test_sf_vc_v_vvw_se_e8m1(<8 x i16> %vd, <8 x i8> %vs2, <8 x i8> %vs1, iXLen %vl) { 242; CHECK-LABEL: test_sf_vc_v_vvw_se_e8m1: 243; CHECK: # %bb.0: # %entry 244; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma 245; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 246; CHECK-NEXT: ret 247entry: 248 %0 = tail call <8 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv8i16.iXLen.nxv8i8.nxv8i8.iXLen(iXLen 3, <8 x i16> %vd, <8 x i8> %vs2, <8 x i8> %vs1, iXLen %vl) 249 ret <8 x i16> %0 250} 251 252declare <8 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv8i16.iXLen.nxv8i8.nxv8i8.iXLen(iXLen, <8 x i16>, <8 x i8>, <8 x i8>, iXLen) 253 254define <16 x i16> @test_sf_vc_v_vvw_se_e8m2(<16 x i16> %vd, <16 x i8> %vs2, <16 x i8> %vs1, iXLen %vl) { 255; CHECK-LABEL: test_sf_vc_v_vvw_se_e8m2: 256; CHECK: # %bb.0: # %entry 257; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma 258; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11 259; CHECK-NEXT: ret 260entry: 261 %0 = tail call <16 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv16i16.iXLen.nxv16i8.nxv16i8.iXLen(iXLen 3, <16 x i16> %vd, <16 x i8> %vs2, <16 x i8> %vs1, iXLen %vl) 262 ret <16 x i16> %0 263} 264 265declare <16 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv16i16.iXLen.nxv16i8.nxv16i8.iXLen(iXLen, <16 x i16>, <16 x i8>, <16 x i8>, iXLen) 266 267define <32 x i16> @test_sf_vc_v_vvw_se_e8m4(<32 x i16> %vd, <32 x i8> %vs2, <32 x i8> %vs1, iXLen %vl) { 268; CHECK-LABEL: test_sf_vc_v_vvw_se_e8m4: 269; CHECK: # %bb.0: # %entry 270; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma 271; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14 272; CHECK-NEXT: ret 273entry: 274 %0 = tail call <32 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv32i16.iXLen.nxv32i8.nxv32i8.iXLen(iXLen 3, <32 x i16> %vd, <32 x i8> %vs2, <32 x i8> %vs1, iXLen %vl) 275 ret <32 x i16> %0 276} 277 278declare <32 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv32i16.iXLen.nxv32i8.nxv32i8.iXLen(iXLen, <32 x i16>, <32 x i8>, <32 x i8>, iXLen) 279 280define <1 x i32> @test_sf_vc_v_vvw_se_e16mf4(<1 x i32> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) { 281; CHECK-LABEL: test_sf_vc_v_vvw_se_e16mf4: 282; CHECK: # %bb.0: # %entry 283; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma 284; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 285; CHECK-NEXT: ret 286entry: 287 %0 = tail call <1 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv1i32.iXLen.nxv1i16.nxv1i16.iXLen(iXLen 3, <1 x i32> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) 288 ret <1 x i32> %0 289} 290 291declare <1 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv1i32.iXLen.nxv1i16.nxv1i16.iXLen(iXLen, <1 x i32>, <1 x i16>, <1 x i16>, iXLen) 292 293define <2 x i32> @test_sf_vc_v_vvw_se_e16mf2(<2 x i32> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) { 294; CHECK-LABEL: test_sf_vc_v_vvw_se_e16mf2: 295; CHECK: # %bb.0: # %entry 296; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma 297; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 298; CHECK-NEXT: ret 299entry: 300 %0 = tail call <2 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv2i32.iXLen.nxv2i16.nxv2i16.iXLen(iXLen 3, <2 x i32> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) 301 ret <2 x i32> %0 302} 303 304declare <2 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv2i32.iXLen.nxv2i16.nxv2i16.iXLen(iXLen, <2 x i32>, <2 x i16>, <2 x i16>, iXLen) 305 306define <4 x i32> @test_sf_vc_v_vvw_se_e16m1(<4 x i32> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) { 307; CHECK-LABEL: test_sf_vc_v_vvw_se_e16m1: 308; CHECK: # %bb.0: # %entry 309; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma 310; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 311; CHECK-NEXT: ret 312entry: 313 %0 = tail call <4 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv4i32.iXLen.nxv4i16.nxv4i16.iXLen(iXLen 3, <4 x i32> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) 314 ret <4 x i32> %0 315} 316 317declare <4 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv4i32.iXLen.nxv4i16.nxv4i16.iXLen(iXLen, <4 x i32>, <4 x i16>, <4 x i16>, iXLen) 318 319define <8 x i32> @test_sf_vc_v_vvw_se_e16m2(<8 x i32> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) { 320; CHECK-LABEL: test_sf_vc_v_vvw_se_e16m2: 321; CHECK: # %bb.0: # %entry 322; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma 323; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11 324; CHECK-NEXT: ret 325entry: 326 %0 = tail call <8 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv8i32.iXLen.nxv8i16.nxv8i16.iXLen(iXLen 3, <8 x i32> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) 327 ret <8 x i32> %0 328} 329 330declare <8 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv8i32.iXLen.nxv8i16.nxv8i16.iXLen(iXLen, <8 x i32>, <8 x i16>, <8 x i16>, iXLen) 331 332define <16 x i32> @test_sf_vc_v_vvw_se_e16m4(<16 x i32> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) { 333; CHECK-LABEL: test_sf_vc_v_vvw_se_e16m4: 334; CHECK: # %bb.0: # %entry 335; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma 336; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14 337; CHECK-NEXT: ret 338entry: 339 %0 = tail call <16 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv16i32.iXLen.nxv16i16.nxv16i16.iXLen(iXLen 3, <16 x i32> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) 340 ret <16 x i32> %0 341} 342 343declare <16 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv16i32.iXLen.nxv16i16.nxv16i16.iXLen(iXLen, <16 x i32>, <16 x i16>, <16 x i16>, iXLen) 344 345define <1 x i64> @test_sf_vc_v_vvw_se_e32mf2(<1 x i64> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) { 346; CHECK-LABEL: test_sf_vc_v_vvw_se_e32mf2: 347; CHECK: # %bb.0: # %entry 348; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma 349; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 350; CHECK-NEXT: ret 351entry: 352 %0 = tail call <1 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv1i64.iXLen.nxv1i32.nxv1i32.iXLen(iXLen 3, <1 x i64> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) 353 ret <1 x i64> %0 354} 355 356declare <1 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv1i64.iXLen.nxv1i32.nxv1i32.iXLen(iXLen, <1 x i64>, <1 x i32>, <1 x i32>, iXLen) 357 358define <2 x i64> @test_sf_vc_v_vvw_se_e32m1(<2 x i64> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) { 359; CHECK-LABEL: test_sf_vc_v_vvw_se_e32m1: 360; CHECK: # %bb.0: # %entry 361; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma 362; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 363; CHECK-NEXT: ret 364entry: 365 %0 = tail call <2 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv2i64.iXLen.nxv2i32.nxv2i32.iXLen(iXLen 3, <2 x i64> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) 366 ret <2 x i64> %0 367} 368 369declare <2 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv2i64.iXLen.nxv2i32.nxv2i32.iXLen(iXLen, <2 x i64>, <2 x i32>, <2 x i32>, iXLen) 370 371define <4 x i64> @test_sf_vc_v_vvw_se_e32m2(<4 x i64> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) { 372; CHECK-LABEL: test_sf_vc_v_vvw_se_e32m2: 373; CHECK: # %bb.0: # %entry 374; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma 375; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11 376; CHECK-NEXT: ret 377entry: 378 %0 = tail call <4 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv4i64.iXLen.nxv4i32.nxv4i32.iXLen(iXLen 3, <4 x i64> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) 379 ret <4 x i64> %0 380} 381 382declare <4 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv4i64.iXLen.nxv4i32.nxv4i32.iXLen(iXLen, <4 x i64>, <4 x i32>, <4 x i32>, iXLen) 383 384define <8 x i64> @test_sf_vc_v_vvw_se_e32m4(<8 x i64> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) { 385; CHECK-LABEL: test_sf_vc_v_vvw_se_e32m4: 386; CHECK: # %bb.0: # %entry 387; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma 388; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14 389; CHECK-NEXT: ret 390entry: 391 %0 = tail call <8 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv8i64.iXLen.nxv8i32.nxv8i32.iXLen(iXLen 3, <8 x i64> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) 392 ret <8 x i64> %0 393} 394 395declare <8 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv8i64.iXLen.nxv8i32.nxv8i32.iXLen(iXLen, <8 x i64>, <8 x i32>, <8 x i32>, iXLen) 396 397define <1 x i16> @test_sf_vc_v_vvw_e8mf8(<1 x i16> %vd, <1 x i8> %vs2, <1 x i8> %vs1, iXLen %vl) { 398; CHECK-LABEL: test_sf_vc_v_vvw_e8mf8: 399; CHECK: # %bb.0: # %entry 400; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma 401; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 402; CHECK-NEXT: ret 403entry: 404 %0 = tail call <1 x i16> @llvm.riscv.sf.vc.v.vvw.nxv1i16.iXLen.nxv1i8.nxv1i8.iXLen(iXLen 3, <1 x i16> %vd, <1 x i8> %vs2, <1 x i8> %vs1, iXLen %vl) 405 ret <1 x i16> %0 406} 407 408declare <1 x i16> @llvm.riscv.sf.vc.v.vvw.nxv1i16.iXLen.nxv1i8.nxv1i8.iXLen(iXLen, <1 x i16>, <1 x i8>, <1 x i8>, iXLen) 409 410define <2 x i16> @test_sf_vc_v_vvw_e8mf4(<2 x i16> %vd, <2 x i8> %vs2, <2 x i8> %vs1, iXLen %vl) { 411; CHECK-LABEL: test_sf_vc_v_vvw_e8mf4: 412; CHECK: # %bb.0: # %entry 413; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma 414; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 415; CHECK-NEXT: ret 416entry: 417 %0 = tail call <2 x i16> @llvm.riscv.sf.vc.v.vvw.nxv2i16.iXLen.nxv2i8.nxv2i8.iXLen(iXLen 3, <2 x i16> %vd, <2 x i8> %vs2, <2 x i8> %vs1, iXLen %vl) 418 ret <2 x i16> %0 419} 420 421declare <2 x i16> @llvm.riscv.sf.vc.v.vvw.nxv2i16.iXLen.nxv2i8.nxv2i8.iXLen(iXLen, <2 x i16>, <2 x i8>, <2 x i8>, iXLen) 422 423define <4 x i16> @test_sf_vc_v_vvw_e8mf2(<4 x i16> %vd, <4 x i8> %vs2, <4 x i8> %vs1, iXLen %vl) { 424; CHECK-LABEL: test_sf_vc_v_vvw_e8mf2: 425; CHECK: # %bb.0: # %entry 426; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma 427; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 428; CHECK-NEXT: ret 429entry: 430 %0 = tail call <4 x i16> @llvm.riscv.sf.vc.v.vvw.nxv4i16.iXLen.nxv4i8.nxv4i8.iXLen(iXLen 3, <4 x i16> %vd, <4 x i8> %vs2, <4 x i8> %vs1, iXLen %vl) 431 ret <4 x i16> %0 432} 433 434declare <4 x i16> @llvm.riscv.sf.vc.v.vvw.nxv4i16.iXLen.nxv4i8.nxv4i8.iXLen(iXLen, <4 x i16>, <4 x i8>, <4 x i8>, iXLen) 435 436define <8 x i16> @test_sf_vc_v_vvw_e8m1(<8 x i16> %vd, <8 x i8> %vs2, <8 x i8> %vs1, iXLen %vl) { 437; CHECK-LABEL: test_sf_vc_v_vvw_e8m1: 438; CHECK: # %bb.0: # %entry 439; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma 440; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 441; CHECK-NEXT: ret 442entry: 443 %0 = tail call <8 x i16> @llvm.riscv.sf.vc.v.vvw.nxv8i16.iXLen.nxv8i8.nxv8i8.iXLen(iXLen 3, <8 x i16> %vd, <8 x i8> %vs2, <8 x i8> %vs1, iXLen %vl) 444 ret <8 x i16> %0 445} 446 447declare <8 x i16> @llvm.riscv.sf.vc.v.vvw.nxv8i16.iXLen.nxv8i8.nxv8i8.iXLen(iXLen, <8 x i16>, <8 x i8>, <8 x i8>, iXLen) 448 449define <16 x i16> @test_sf_vc_v_vvw_e8m2(<16 x i16> %vd, <16 x i8> %vs2, <16 x i8> %vs1, iXLen %vl) { 450; CHECK-LABEL: test_sf_vc_v_vvw_e8m2: 451; CHECK: # %bb.0: # %entry 452; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma 453; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11 454; CHECK-NEXT: ret 455entry: 456 %0 = tail call <16 x i16> @llvm.riscv.sf.vc.v.vvw.nxv16i16.iXLen.nxv16i8.nxv16i8.iXLen(iXLen 3, <16 x i16> %vd, <16 x i8> %vs2, <16 x i8> %vs1, iXLen %vl) 457 ret <16 x i16> %0 458} 459 460declare <16 x i16> @llvm.riscv.sf.vc.v.vvw.nxv16i16.iXLen.nxv16i8.nxv16i8.iXLen(iXLen, <16 x i16>, <16 x i8>, <16 x i8>, iXLen) 461 462define <32 x i16> @test_sf_vc_v_vvw_e8m4(<32 x i16> %vd, <32 x i8> %vs2, <32 x i8> %vs1, iXLen %vl) { 463; CHECK-LABEL: test_sf_vc_v_vvw_e8m4: 464; CHECK: # %bb.0: # %entry 465; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma 466; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14 467; CHECK-NEXT: ret 468entry: 469 %0 = tail call <32 x i16> @llvm.riscv.sf.vc.v.vvw.nxv32i16.iXLen.nxv32i8.nxv32i8.iXLen(iXLen 3, <32 x i16> %vd, <32 x i8> %vs2, <32 x i8> %vs1, iXLen %vl) 470 ret <32 x i16> %0 471} 472 473declare <32 x i16> @llvm.riscv.sf.vc.v.vvw.nxv32i16.iXLen.nxv32i8.nxv32i8.iXLen(iXLen, <32 x i16>, <32 x i8>, <32 x i8>, iXLen) 474 475define <1 x i32> @test_sf_vc_v_vvw_e16mf4(<1 x i32> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) { 476; CHECK-LABEL: test_sf_vc_v_vvw_e16mf4: 477; CHECK: # %bb.0: # %entry 478; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma 479; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 480; CHECK-NEXT: ret 481entry: 482 %0 = tail call <1 x i32> @llvm.riscv.sf.vc.v.vvw.nxv1i32.iXLen.nxv1i16.nxv1i16.iXLen(iXLen 3, <1 x i32> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) 483 ret <1 x i32> %0 484} 485 486declare <1 x i32> @llvm.riscv.sf.vc.v.vvw.nxv1i32.iXLen.nxv1i16.nxv1i16.iXLen(iXLen, <1 x i32>, <1 x i16>, <1 x i16>, iXLen) 487 488define <2 x i32> @test_sf_vc_v_vvw_e16mf2(<2 x i32> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) { 489; CHECK-LABEL: test_sf_vc_v_vvw_e16mf2: 490; CHECK: # %bb.0: # %entry 491; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma 492; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 493; CHECK-NEXT: ret 494entry: 495 %0 = tail call <2 x i32> @llvm.riscv.sf.vc.v.vvw.nxv2i32.iXLen.nxv2i16.nxv2i16.iXLen(iXLen 3, <2 x i32> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) 496 ret <2 x i32> %0 497} 498 499declare <2 x i32> @llvm.riscv.sf.vc.v.vvw.nxv2i32.iXLen.nxv2i16.nxv2i16.iXLen(iXLen, <2 x i32>, <2 x i16>, <2 x i16>, iXLen) 500 501define <4 x i32> @test_sf_vc_v_vvw_e16m1(<4 x i32> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) { 502; CHECK-LABEL: test_sf_vc_v_vvw_e16m1: 503; CHECK: # %bb.0: # %entry 504; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma 505; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 506; CHECK-NEXT: ret 507entry: 508 %0 = tail call <4 x i32> @llvm.riscv.sf.vc.v.vvw.nxv4i32.iXLen.nxv4i16.nxv4i16.iXLen(iXLen 3, <4 x i32> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) 509 ret <4 x i32> %0 510} 511 512declare <4 x i32> @llvm.riscv.sf.vc.v.vvw.nxv4i32.iXLen.nxv4i16.nxv4i16.iXLen(iXLen, <4 x i32>, <4 x i16>, <4 x i16>, iXLen) 513 514define <8 x i32> @test_sf_vc_v_vvw_e16m2(<8 x i32> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) { 515; CHECK-LABEL: test_sf_vc_v_vvw_e16m2: 516; CHECK: # %bb.0: # %entry 517; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma 518; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11 519; CHECK-NEXT: ret 520entry: 521 %0 = tail call <8 x i32> @llvm.riscv.sf.vc.v.vvw.nxv8i32.iXLen.nxv8i16.nxv8i16.iXLen(iXLen 3, <8 x i32> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) 522 ret <8 x i32> %0 523} 524 525declare <8 x i32> @llvm.riscv.sf.vc.v.vvw.nxv8i32.iXLen.nxv8i16.nxv8i16.iXLen(iXLen, <8 x i32>, <8 x i16>, <8 x i16>, iXLen) 526 527define <16 x i32> @test_sf_vc_v_vvw_e16m4(<16 x i32> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) { 528; CHECK-LABEL: test_sf_vc_v_vvw_e16m4: 529; CHECK: # %bb.0: # %entry 530; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma 531; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14 532; CHECK-NEXT: ret 533entry: 534 %0 = tail call <16 x i32> @llvm.riscv.sf.vc.v.vvw.nxv16i32.iXLen.nxv16i16.nxv16i16.iXLen(iXLen 3, <16 x i32> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) 535 ret <16 x i32> %0 536} 537 538declare <16 x i32> @llvm.riscv.sf.vc.v.vvw.nxv16i32.iXLen.nxv16i16.nxv16i16.iXLen(iXLen, <16 x i32>, <16 x i16>, <16 x i16>, iXLen) 539 540define <1 x i64> @test_sf_vc_v_vvw_e32mf2(<1 x i64> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) { 541; CHECK-LABEL: test_sf_vc_v_vvw_e32mf2: 542; CHECK: # %bb.0: # %entry 543; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma 544; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 545; CHECK-NEXT: ret 546entry: 547 %0 = tail call <1 x i64> @llvm.riscv.sf.vc.v.vvw.nxv1i64.iXLen.nxv1i32.nxv1i32.iXLen(iXLen 3, <1 x i64> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) 548 ret <1 x i64> %0 549} 550 551declare <1 x i64> @llvm.riscv.sf.vc.v.vvw.nxv1i64.iXLen.nxv1i32.nxv1i32.iXLen(iXLen, <1 x i64>, <1 x i32>, <1 x i32>, iXLen) 552 553define <2 x i64> @test_sf_vc_v_vvw_e32m1(<2 x i64> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) { 554; CHECK-LABEL: test_sf_vc_v_vvw_e32m1: 555; CHECK: # %bb.0: # %entry 556; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma 557; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 558; CHECK-NEXT: ret 559entry: 560 %0 = tail call <2 x i64> @llvm.riscv.sf.vc.v.vvw.nxv2i64.iXLen.nxv2i32.nxv2i32.iXLen(iXLen 3, <2 x i64> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) 561 ret <2 x i64> %0 562} 563 564declare <2 x i64> @llvm.riscv.sf.vc.v.vvw.nxv2i64.iXLen.nxv2i32.nxv2i32.iXLen(iXLen, <2 x i64>, <2 x i32>, <2 x i32>, iXLen) 565 566define <4 x i64> @test_sf_vc_v_vvw_e32m2(<4 x i64> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) { 567; CHECK-LABEL: test_sf_vc_v_vvw_e32m2: 568; CHECK: # %bb.0: # %entry 569; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma 570; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11 571; CHECK-NEXT: ret 572entry: 573 %0 = tail call <4 x i64> @llvm.riscv.sf.vc.v.vvw.nxv4i64.iXLen.nxv4i32.nxv4i32.iXLen(iXLen 3, <4 x i64> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) 574 ret <4 x i64> %0 575} 576 577declare <4 x i64> @llvm.riscv.sf.vc.v.vvw.nxv4i64.iXLen.nxv4i32.nxv4i32.iXLen(iXLen, <4 x i64>, <4 x i32>, <4 x i32>, iXLen) 578 579define <8 x i64> @test_sf_vc_v_vvw_e32m4(<8 x i64> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) { 580; CHECK-LABEL: test_sf_vc_v_vvw_e32m4: 581; CHECK: # %bb.0: # %entry 582; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma 583; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14 584; CHECK-NEXT: ret 585entry: 586 %0 = tail call <8 x i64> @llvm.riscv.sf.vc.v.vvw.nxv8i64.iXLen.nxv8i32.nxv8i32.iXLen(iXLen 3, <8 x i64> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) 587 ret <8 x i64> %0 588} 589 590declare <8 x i64> @llvm.riscv.sf.vc.v.vvw.nxv8i64.iXLen.nxv8i32.nxv8i32.iXLen(iXLen, <8 x i64>, <8 x i32>, <8 x i32>, iXLen) 591 592define void @test_sf_vc_xvw_se_e8mf8(<1 x i16> %vd, <1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { 593; CHECK-LABEL: test_sf_vc_xvw_se_e8mf8: 594; CHECK: # %bb.0: # %entry 595; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 596; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0 597; CHECK-NEXT: ret 598entry: 599 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i16.nxv1i8.i8.iXLen(iXLen 3, <1 x i16> %vd, <1 x i8> %vs2, i8 %rs1, iXLen %vl) 600 ret void 601} 602 603declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i16.nxv1i8.i8.iXLen(iXLen, <1 x i16>, <1 x i8>, i8, iXLen) 604 605define void @test_sf_vc_xvw_se_e8mf4(<2 x i16> %vd, <2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { 606; CHECK-LABEL: test_sf_vc_xvw_se_e8mf4: 607; CHECK: # %bb.0: # %entry 608; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 609; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0 610; CHECK-NEXT: ret 611entry: 612 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i16.nxv2i8.i8.iXLen(iXLen 3, <2 x i16> %vd, <2 x i8> %vs2, i8 %rs1, iXLen %vl) 613 ret void 614} 615 616declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i16.nxv2i8.i8.iXLen(iXLen, <2 x i16>, <2 x i8>, i8, iXLen) 617 618define void @test_sf_vc_xvw_se_e8mf2(<4 x i16> %vd, <4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { 619; CHECK-LABEL: test_sf_vc_xvw_se_e8mf2: 620; CHECK: # %bb.0: # %entry 621; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 622; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0 623; CHECK-NEXT: ret 624entry: 625 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i16.nxv4i8.i8.iXLen(iXLen 3, <4 x i16> %vd, <4 x i8> %vs2, i8 %rs1, iXLen %vl) 626 ret void 627} 628 629declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i16.nxv4i8.i8.iXLen(iXLen, <4 x i16>, <4 x i8>, i8, iXLen) 630 631define void @test_sf_vc_xvw_se_e8m1(<8 x i16> %vd, <8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { 632; CHECK-LABEL: test_sf_vc_xvw_se_e8m1: 633; CHECK: # %bb.0: # %entry 634; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 635; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0 636; CHECK-NEXT: ret 637entry: 638 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i16.nxv8i8.i8.iXLen(iXLen 3, <8 x i16> %vd, <8 x i8> %vs2, i8 %rs1, iXLen %vl) 639 ret void 640} 641 642declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i16.nxv8i8.i8.iXLen(iXLen, <8 x i16>, <8 x i8>, i8, iXLen) 643 644define void @test_sf_vc_xvw_se_e8m2(<16 x i16> %vd, <16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { 645; CHECK-LABEL: test_sf_vc_xvw_se_e8m2: 646; CHECK: # %bb.0: # %entry 647; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 648; CHECK-NEXT: sf.vc.xvw 3, v8, v10, a0 649; CHECK-NEXT: ret 650entry: 651 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16i16.nxv16i8.i8.iXLen(iXLen 3, <16 x i16> %vd, <16 x i8> %vs2, i8 %rs1, iXLen %vl) 652 ret void 653} 654 655declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16i16.nxv16i8.i8.iXLen(iXLen, <16 x i16>, <16 x i8>, i8, iXLen) 656 657define void @test_sf_vc_xvw_se_e8m4(<32 x i16> %vd, <32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { 658; CHECK-LABEL: test_sf_vc_xvw_se_e8m4: 659; CHECK: # %bb.0: # %entry 660; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma 661; CHECK-NEXT: sf.vc.xvw 3, v8, v12, a0 662; CHECK-NEXT: ret 663entry: 664 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv32i16.nxv32i8.i8.iXLen(iXLen 3, <32 x i16> %vd, <32 x i8> %vs2, i8 %rs1, iXLen %vl) 665 ret void 666} 667 668declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv32i16.nxv32i8.i8.iXLen(iXLen, <32 x i16>, <32 x i8>, i8, iXLen) 669 670define void @test_sf_vc_xvw_se_e16mf4(<1 x i32> %vd, <1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { 671; CHECK-LABEL: test_sf_vc_xvw_se_e16mf4: 672; CHECK: # %bb.0: # %entry 673; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 674; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0 675; CHECK-NEXT: ret 676entry: 677 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i32.nxv1i16.i16.iXLen(iXLen 3, <1 x i32> %vd, <1 x i16> %vs2, i16 %rs1, iXLen %vl) 678 ret void 679} 680 681declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i32.nxv1i16.i16.iXLen(iXLen, <1 x i32>, <1 x i16>, i16, iXLen) 682 683define void @test_sf_vc_xvw_se_e16mf2(<2 x i32> %vd, <2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { 684; CHECK-LABEL: test_sf_vc_xvw_se_e16mf2: 685; CHECK: # %bb.0: # %entry 686; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 687; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0 688; CHECK-NEXT: ret 689entry: 690 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i32.nxv2i16.i16.iXLen(iXLen 3, <2 x i32> %vd, <2 x i16> %vs2, i16 %rs1, iXLen %vl) 691 ret void 692} 693 694declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i32.nxv2i16.i16.iXLen(iXLen, <2 x i32>, <2 x i16>, i16, iXLen) 695 696define void @test_sf_vc_xvw_se_e16m1(<4 x i32> %vd, <4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { 697; CHECK-LABEL: test_sf_vc_xvw_se_e16m1: 698; CHECK: # %bb.0: # %entry 699; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 700; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0 701; CHECK-NEXT: ret 702entry: 703 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i32.nxv4i16.i16.iXLen(iXLen 3, <4 x i32> %vd, <4 x i16> %vs2, i16 %rs1, iXLen %vl) 704 ret void 705} 706 707declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i32.nxv4i16.i16.iXLen(iXLen, <4 x i32>, <4 x i16>, i16, iXLen) 708 709define void @test_sf_vc_xvw_se_e16m2(<8 x i32> %vd, <8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { 710; CHECK-LABEL: test_sf_vc_xvw_se_e16m2: 711; CHECK: # %bb.0: # %entry 712; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 713; CHECK-NEXT: sf.vc.xvw 3, v8, v10, a0 714; CHECK-NEXT: ret 715entry: 716 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i32.nxv8i16.i16.iXLen(iXLen 3, <8 x i32> %vd, <8 x i16> %vs2, i16 %rs1, iXLen %vl) 717 ret void 718} 719 720declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i32.nxv8i16.i16.iXLen(iXLen, <8 x i32>, <8 x i16>, i16, iXLen) 721 722define void @test_sf_vc_xvw_se_e16m4(<16 x i32> %vd, <16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { 723; CHECK-LABEL: test_sf_vc_xvw_se_e16m4: 724; CHECK: # %bb.0: # %entry 725; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 726; CHECK-NEXT: sf.vc.xvw 3, v8, v12, a0 727; CHECK-NEXT: ret 728entry: 729 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16i32.nxv16i16.i16.iXLen(iXLen 3, <16 x i32> %vd, <16 x i16> %vs2, i16 %rs1, iXLen %vl) 730 ret void 731} 732 733declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16i32.nxv16i16.i16.iXLen(iXLen, <16 x i32>, <16 x i16>, i16, iXLen) 734 735define void @test_sf_vc_xvw_se_e32mf2(<1 x i64> %vd, <1 x i32> %vs2, i32 signext %rs1, iXLen %vl) { 736; CHECK-LABEL: test_sf_vc_xvw_se_e32mf2: 737; CHECK: # %bb.0: # %entry 738; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 739; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0 740; CHECK-NEXT: ret 741entry: 742 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i64.nxv1i32.i32.iXLen(iXLen 3, <1 x i64> %vd, <1 x i32> %vs2, i32 %rs1, iXLen %vl) 743 ret void 744} 745 746declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i64.nxv1i32.i32.iXLen(iXLen, <1 x i64>, <1 x i32>, i32, iXLen) 747 748define void @test_sf_vc_xvw_se_e32m1(<2 x i64> %vd, <2 x i32> %vs2, i32 signext %rs1, iXLen %vl) { 749; CHECK-LABEL: test_sf_vc_xvw_se_e32m1: 750; CHECK: # %bb.0: # %entry 751; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 752; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0 753; CHECK-NEXT: ret 754entry: 755 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i64.nxv2i32.i32.iXLen(iXLen 3, <2 x i64> %vd, <2 x i32> %vs2, i32 %rs1, iXLen %vl) 756 ret void 757} 758 759declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i64.nxv2i32.i32.iXLen(iXLen, <2 x i64>, <2 x i32>, i32, iXLen) 760 761define void @test_sf_vc_xvw_se_e32m2(<4 x i64> %vd, <4 x i32> %vs2, i32 signext %rs1, iXLen %vl) { 762; CHECK-LABEL: test_sf_vc_xvw_se_e32m2: 763; CHECK: # %bb.0: # %entry 764; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 765; CHECK-NEXT: sf.vc.xvw 3, v8, v10, a0 766; CHECK-NEXT: ret 767entry: 768 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i64.nxv4i32.i32.iXLen(iXLen 3, <4 x i64> %vd, <4 x i32> %vs2, i32 %rs1, iXLen %vl) 769 ret void 770} 771 772declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i64.nxv4i32.i32.iXLen(iXLen, <4 x i64>, <4 x i32>, i32, iXLen) 773 774define void @test_sf_vc_xvw_se_e32m4(<8 x i64> %vd, <8 x i32> %vs2, i32 signext %rs1, iXLen %vl) { 775; CHECK-LABEL: test_sf_vc_xvw_se_e32m4: 776; CHECK: # %bb.0: # %entry 777; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 778; CHECK-NEXT: sf.vc.xvw 3, v8, v12, a0 779; CHECK-NEXT: ret 780entry: 781 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i64.nxv8i32.i32.iXLen(iXLen 3, <8 x i64> %vd, <8 x i32> %vs2, i32 %rs1, iXLen %vl) 782 ret void 783} 784 785declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i64.nxv8i32.i32.iXLen(iXLen, <8 x i64>, <8 x i32>, i32, iXLen) 786 787define <1 x i16> @test_sf_vc_v_xvw_se_e8mf8(<1 x i16> %vd, <1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { 788; CHECK-LABEL: test_sf_vc_v_xvw_se_e8mf8: 789; CHECK: # %bb.0: # %entry 790; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma 791; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 792; CHECK-NEXT: ret 793entry: 794 %0 = tail call <1 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv1i16.iXLen.nxv1i8.i8.iXLen(iXLen 3, <1 x i16> %vd, <1 x i8> %vs2, i8 %rs1, iXLen %vl) 795 ret <1 x i16> %0 796} 797 798declare <1 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv1i16.iXLen.nxv1i8.i8.iXLen(iXLen, <1 x i16>, <1 x i8>, i8, iXLen) 799 800define <2 x i16> @test_sf_vc_v_xvw_se_e8mf4(<2 x i16> %vd, <2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { 801; CHECK-LABEL: test_sf_vc_v_xvw_se_e8mf4: 802; CHECK: # %bb.0: # %entry 803; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma 804; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 805; CHECK-NEXT: ret 806entry: 807 %0 = tail call <2 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv2i16.iXLen.nxv2i8.i8.iXLen(iXLen 3, <2 x i16> %vd, <2 x i8> %vs2, i8 %rs1, iXLen %vl) 808 ret <2 x i16> %0 809} 810 811declare <2 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv2i16.iXLen.nxv2i8.i8.iXLen(iXLen, <2 x i16>, <2 x i8>, i8, iXLen) 812 813define <4 x i16> @test_sf_vc_v_xvw_se_e8mf2(<4 x i16> %vd, <4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { 814; CHECK-LABEL: test_sf_vc_v_xvw_se_e8mf2: 815; CHECK: # %bb.0: # %entry 816; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma 817; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 818; CHECK-NEXT: ret 819entry: 820 %0 = tail call <4 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv4i16.iXLen.nxv4i8.i8.iXLen(iXLen 3, <4 x i16> %vd, <4 x i8> %vs2, i8 %rs1, iXLen %vl) 821 ret <4 x i16> %0 822} 823 824declare <4 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv4i16.iXLen.nxv4i8.i8.iXLen(iXLen, <4 x i16>, <4 x i8>, i8, iXLen) 825 826define <8 x i16> @test_sf_vc_v_xvw_se_e8m1(<8 x i16> %vd, <8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { 827; CHECK-LABEL: test_sf_vc_v_xvw_se_e8m1: 828; CHECK: # %bb.0: # %entry 829; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma 830; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 831; CHECK-NEXT: ret 832entry: 833 %0 = tail call <8 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv8i16.iXLen.nxv8i8.i8.iXLen(iXLen 3, <8 x i16> %vd, <8 x i8> %vs2, i8 %rs1, iXLen %vl) 834 ret <8 x i16> %0 835} 836 837declare <8 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv8i16.iXLen.nxv8i8.i8.iXLen(iXLen, <8 x i16>, <8 x i8>, i8, iXLen) 838 839define <16 x i16> @test_sf_vc_v_xvw_se_e8m2(<16 x i16> %vd, <16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { 840; CHECK-LABEL: test_sf_vc_v_xvw_se_e8m2: 841; CHECK: # %bb.0: # %entry 842; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma 843; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0 844; CHECK-NEXT: ret 845entry: 846 %0 = tail call <16 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv16i16.iXLen.nxv16i8.i8.iXLen(iXLen 3, <16 x i16> %vd, <16 x i8> %vs2, i8 %rs1, iXLen %vl) 847 ret <16 x i16> %0 848} 849 850declare <16 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv16i16.iXLen.nxv16i8.i8.iXLen(iXLen, <16 x i16>, <16 x i8>, i8, iXLen) 851 852define <32 x i16> @test_sf_vc_v_xvw_se_e8m4(<32 x i16> %vd, <32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { 853; CHECK-LABEL: test_sf_vc_v_xvw_se_e8m4: 854; CHECK: # %bb.0: # %entry 855; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma 856; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0 857; CHECK-NEXT: ret 858entry: 859 %0 = tail call <32 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv32i16.iXLen.nxv32i8.i8.iXLen(iXLen 3, <32 x i16> %vd, <32 x i8> %vs2, i8 %rs1, iXLen %vl) 860 ret <32 x i16> %0 861} 862 863declare <32 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv32i16.iXLen.nxv32i8.i8.iXLen(iXLen, <32 x i16>, <32 x i8>, i8, iXLen) 864 865define <1 x i32> @test_sf_vc_v_xvw_se_e16mf4(<1 x i32> %vd, <1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { 866; CHECK-LABEL: test_sf_vc_v_xvw_se_e16mf4: 867; CHECK: # %bb.0: # %entry 868; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma 869; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 870; CHECK-NEXT: ret 871entry: 872 %0 = tail call <1 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv1i32.iXLen.nxv1i16.i16.iXLen(iXLen 3, <1 x i32> %vd, <1 x i16> %vs2, i16 %rs1, iXLen %vl) 873 ret <1 x i32> %0 874} 875 876declare <1 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv1i32.iXLen.nxv1i16.i16.iXLen(iXLen, <1 x i32>, <1 x i16>, i16, iXLen) 877 878define <2 x i32> @test_sf_vc_v_xvw_se_e16mf2(<2 x i32> %vd, <2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { 879; CHECK-LABEL: test_sf_vc_v_xvw_se_e16mf2: 880; CHECK: # %bb.0: # %entry 881; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma 882; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 883; CHECK-NEXT: ret 884entry: 885 %0 = tail call <2 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv2i32.iXLen.nxv2i16.i16.iXLen(iXLen 3, <2 x i32> %vd, <2 x i16> %vs2, i16 %rs1, iXLen %vl) 886 ret <2 x i32> %0 887} 888 889declare <2 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv2i32.iXLen.nxv2i16.i16.iXLen(iXLen, <2 x i32>, <2 x i16>, i16, iXLen) 890 891define <4 x i32> @test_sf_vc_v_xvw_se_e16m1(<4 x i32> %vd, <4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { 892; CHECK-LABEL: test_sf_vc_v_xvw_se_e16m1: 893; CHECK: # %bb.0: # %entry 894; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma 895; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 896; CHECK-NEXT: ret 897entry: 898 %0 = tail call <4 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv4i32.iXLen.nxv4i16.i16.iXLen(iXLen 3, <4 x i32> %vd, <4 x i16> %vs2, i16 %rs1, iXLen %vl) 899 ret <4 x i32> %0 900} 901 902declare <4 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv4i32.iXLen.nxv4i16.i16.iXLen(iXLen, <4 x i32>, <4 x i16>, i16, iXLen) 903 904define <8 x i32> @test_sf_vc_v_xvw_se_e16m2(<8 x i32> %vd, <8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { 905; CHECK-LABEL: test_sf_vc_v_xvw_se_e16m2: 906; CHECK: # %bb.0: # %entry 907; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma 908; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0 909; CHECK-NEXT: ret 910entry: 911 %0 = tail call <8 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv8i32.iXLen.nxv8i16.i16.iXLen(iXLen 3, <8 x i32> %vd, <8 x i16> %vs2, i16 %rs1, iXLen %vl) 912 ret <8 x i32> %0 913} 914 915declare <8 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv8i32.iXLen.nxv8i16.i16.iXLen(iXLen, <8 x i32>, <8 x i16>, i16, iXLen) 916 917define <16 x i32> @test_sf_vc_v_xvw_se_e16m4(<16 x i32> %vd, <16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { 918; CHECK-LABEL: test_sf_vc_v_xvw_se_e16m4: 919; CHECK: # %bb.0: # %entry 920; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma 921; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0 922; CHECK-NEXT: ret 923entry: 924 %0 = tail call <16 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv16i32.iXLen.nxv16i16.i16.iXLen(iXLen 3, <16 x i32> %vd, <16 x i16> %vs2, i16 %rs1, iXLen %vl) 925 ret <16 x i32> %0 926} 927 928declare <16 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv16i32.iXLen.nxv16i16.i16.iXLen(iXLen, <16 x i32>, <16 x i16>, i16, iXLen) 929 930define <1 x i64> @test_sf_vc_v_xvw_se_e32mf2(<1 x i64> %vd, <1 x i32> %vs2, i32 signext %rs1, iXLen %vl) { 931; CHECK-LABEL: test_sf_vc_v_xvw_se_e32mf2: 932; CHECK: # %bb.0: # %entry 933; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma 934; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 935; CHECK-NEXT: ret 936entry: 937 %0 = tail call <1 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv1i64.i32.nxv1i32.iXLen.iXLen(iXLen 3, <1 x i64> %vd, <1 x i32> %vs2, i32 %rs1, iXLen %vl) 938 ret <1 x i64> %0 939} 940 941declare <1 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv1i64.i32.nxv1i32.iXLen.iXLen(iXLen, <1 x i64>, <1 x i32>, i32, iXLen) 942 943define <2 x i64> @test_sf_vc_v_xvw_se_e32m1(<2 x i64> %vd, <2 x i32> %vs2, i32 signext %rs1, iXLen %vl) { 944; CHECK-LABEL: test_sf_vc_v_xvw_se_e32m1: 945; CHECK: # %bb.0: # %entry 946; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma 947; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 948; CHECK-NEXT: ret 949entry: 950 %0 = tail call <2 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv2i64.i32.nxv2i32.iXLen.iXLen(iXLen 3, <2 x i64> %vd, <2 x i32> %vs2, i32 %rs1, iXLen %vl) 951 ret <2 x i64> %0 952} 953 954declare <2 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv2i64.i32.nxv2i32.iXLen.iXLen(iXLen, <2 x i64>, <2 x i32>, i32, iXLen) 955 956define <4 x i64> @test_sf_vc_v_xvw_se_e32m2(<4 x i64> %vd, <4 x i32> %vs2, i32 signext %rs1, iXLen %vl) { 957; CHECK-LABEL: test_sf_vc_v_xvw_se_e32m2: 958; CHECK: # %bb.0: # %entry 959; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma 960; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0 961; CHECK-NEXT: ret 962entry: 963 %0 = tail call <4 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv4i64.i32.nxv4i32.iXLen.iXLen(iXLen 3, <4 x i64> %vd, <4 x i32> %vs2, i32 %rs1, iXLen %vl) 964 ret <4 x i64> %0 965} 966 967declare <4 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv4i64.i32.nxv4i32.iXLen.iXLen(iXLen, <4 x i64>, <4 x i32>, i32, iXLen) 968 969define <8 x i64> @test_sf_vc_v_xvw_se_e32m4(<8 x i64> %vd, <8 x i32> %vs2, i32 signext %rs1, iXLen %vl) { 970; CHECK-LABEL: test_sf_vc_v_xvw_se_e32m4: 971; CHECK: # %bb.0: # %entry 972; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma 973; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0 974; CHECK-NEXT: ret 975entry: 976 %0 = tail call <8 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv8i64.i32.nxv8i32.iXLen.iXLen(iXLen 3, <8 x i64> %vd, <8 x i32> %vs2, i32 %rs1, iXLen %vl) 977 ret <8 x i64> %0 978} 979 980declare <8 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv8i64.i32.nxv8i32.iXLen.iXLen(iXLen, <8 x i64>, <8 x i32>, i32, iXLen) 981 982define <1 x i16> @test_sf_vc_v_xvw_e8mf8(<1 x i16> %vd, <1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { 983; CHECK-LABEL: test_sf_vc_v_xvw_e8mf8: 984; CHECK: # %bb.0: # %entry 985; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma 986; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 987; CHECK-NEXT: ret 988entry: 989 %0 = tail call <1 x i16> @llvm.riscv.sf.vc.v.xvw.nxv1i16.iXLen.nxv1i8.i8.iXLen(iXLen 3, <1 x i16> %vd, <1 x i8> %vs2, i8 %rs1, iXLen %vl) 990 ret <1 x i16> %0 991} 992 993declare <1 x i16> @llvm.riscv.sf.vc.v.xvw.nxv1i16.iXLen.nxv1i8.i8.iXLen(iXLen, <1 x i16>, <1 x i8>, i8, iXLen) 994 995define <2 x i16> @test_sf_vc_v_xvw_e8mf4(<2 x i16> %vd, <2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { 996; CHECK-LABEL: test_sf_vc_v_xvw_e8mf4: 997; CHECK: # %bb.0: # %entry 998; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma 999; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 1000; CHECK-NEXT: ret 1001entry: 1002 %0 = tail call <2 x i16> @llvm.riscv.sf.vc.v.xvw.nxv2i16.iXLen.nxv2i8.i8.iXLen(iXLen 3, <2 x i16> %vd, <2 x i8> %vs2, i8 %rs1, iXLen %vl) 1003 ret <2 x i16> %0 1004} 1005 1006declare <2 x i16> @llvm.riscv.sf.vc.v.xvw.nxv2i16.iXLen.nxv2i8.i8.iXLen(iXLen, <2 x i16>, <2 x i8>, i8, iXLen) 1007 1008define <4 x i16> @test_sf_vc_v_xvw_e8mf2(<4 x i16> %vd, <4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { 1009; CHECK-LABEL: test_sf_vc_v_xvw_e8mf2: 1010; CHECK: # %bb.0: # %entry 1011; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma 1012; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 1013; CHECK-NEXT: ret 1014entry: 1015 %0 = tail call <4 x i16> @llvm.riscv.sf.vc.v.xvw.nxv4i16.iXLen.nxv4i8.i8.iXLen(iXLen 3, <4 x i16> %vd, <4 x i8> %vs2, i8 %rs1, iXLen %vl) 1016 ret <4 x i16> %0 1017} 1018 1019declare <4 x i16> @llvm.riscv.sf.vc.v.xvw.nxv4i16.iXLen.nxv4i8.i8.iXLen(iXLen, <4 x i16>, <4 x i8>, i8, iXLen) 1020 1021define <8 x i16> @test_sf_vc_v_xvw_e8m1(<8 x i16> %vd, <8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { 1022; CHECK-LABEL: test_sf_vc_v_xvw_e8m1: 1023; CHECK: # %bb.0: # %entry 1024; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma 1025; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 1026; CHECK-NEXT: ret 1027entry: 1028 %0 = tail call <8 x i16> @llvm.riscv.sf.vc.v.xvw.nxv8i16.iXLen.nxv8i8.i8.iXLen(iXLen 3, <8 x i16> %vd, <8 x i8> %vs2, i8 %rs1, iXLen %vl) 1029 ret <8 x i16> %0 1030} 1031 1032declare <8 x i16> @llvm.riscv.sf.vc.v.xvw.nxv8i16.iXLen.nxv8i8.i8.iXLen(iXLen, <8 x i16>, <8 x i8>, i8, iXLen) 1033 1034define <16 x i16> @test_sf_vc_v_xvw_e8m2(<16 x i16> %vd, <16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { 1035; CHECK-LABEL: test_sf_vc_v_xvw_e8m2: 1036; CHECK: # %bb.0: # %entry 1037; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma 1038; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0 1039; CHECK-NEXT: ret 1040entry: 1041 %0 = tail call <16 x i16> @llvm.riscv.sf.vc.v.xvw.nxv16i16.iXLen.nxv16i8.i8.iXLen(iXLen 3, <16 x i16> %vd, <16 x i8> %vs2, i8 %rs1, iXLen %vl) 1042 ret <16 x i16> %0 1043} 1044 1045declare <16 x i16> @llvm.riscv.sf.vc.v.xvw.nxv16i16.iXLen.nxv16i8.i8.iXLen(iXLen, <16 x i16>, <16 x i8>, i8, iXLen) 1046 1047define <32 x i16> @test_sf_vc_v_xvw_e8m4(<32 x i16> %vd, <32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) { 1048; CHECK-LABEL: test_sf_vc_v_xvw_e8m4: 1049; CHECK: # %bb.0: # %entry 1050; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma 1051; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0 1052; CHECK-NEXT: ret 1053entry: 1054 %0 = tail call <32 x i16> @llvm.riscv.sf.vc.v.xvw.nxv32i16.iXLen.nxv32i8.i8.iXLen(iXLen 3, <32 x i16> %vd, <32 x i8> %vs2, i8 %rs1, iXLen %vl) 1055 ret <32 x i16> %0 1056} 1057 1058declare <32 x i16> @llvm.riscv.sf.vc.v.xvw.nxv32i16.iXLen.nxv32i8.i8.iXLen(iXLen, <32 x i16>, <32 x i8>, i8, iXLen) 1059 1060define <1 x i32> @test_sf_vc_v_xvw_e16mf4(<1 x i32> %vd, <1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { 1061; CHECK-LABEL: test_sf_vc_v_xvw_e16mf4: 1062; CHECK: # %bb.0: # %entry 1063; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma 1064; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 1065; CHECK-NEXT: ret 1066entry: 1067 %0 = tail call <1 x i32> @llvm.riscv.sf.vc.v.xvw.nxv1i32.iXLen.nxv1i16.i16.iXLen(iXLen 3, <1 x i32> %vd, <1 x i16> %vs2, i16 %rs1, iXLen %vl) 1068 ret <1 x i32> %0 1069} 1070 1071declare <1 x i32> @llvm.riscv.sf.vc.v.xvw.nxv1i32.iXLen.nxv1i16.i16.iXLen(iXLen, <1 x i32>, <1 x i16>, i16, iXLen) 1072 1073define <2 x i32> @test_sf_vc_v_xvw_e16mf2(<2 x i32> %vd, <2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { 1074; CHECK-LABEL: test_sf_vc_v_xvw_e16mf2: 1075; CHECK: # %bb.0: # %entry 1076; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma 1077; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 1078; CHECK-NEXT: ret 1079entry: 1080 %0 = tail call <2 x i32> @llvm.riscv.sf.vc.v.xvw.nxv2i32.iXLen.nxv2i16.i16.iXLen(iXLen 3, <2 x i32> %vd, <2 x i16> %vs2, i16 %rs1, iXLen %vl) 1081 ret <2 x i32> %0 1082} 1083 1084declare <2 x i32> @llvm.riscv.sf.vc.v.xvw.nxv2i32.iXLen.nxv2i16.i16.iXLen(iXLen, <2 x i32>, <2 x i16>, i16, iXLen) 1085 1086define <4 x i32> @test_sf_vc_v_xvw_e16m1(<4 x i32> %vd, <4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { 1087; CHECK-LABEL: test_sf_vc_v_xvw_e16m1: 1088; CHECK: # %bb.0: # %entry 1089; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma 1090; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 1091; CHECK-NEXT: ret 1092entry: 1093 %0 = tail call <4 x i32> @llvm.riscv.sf.vc.v.xvw.nxv4i32.iXLen.nxv4i16.i16.iXLen(iXLen 3, <4 x i32> %vd, <4 x i16> %vs2, i16 %rs1, iXLen %vl) 1094 ret <4 x i32> %0 1095} 1096 1097declare <4 x i32> @llvm.riscv.sf.vc.v.xvw.nxv4i32.iXLen.nxv4i16.i16.iXLen(iXLen, <4 x i32>, <4 x i16>, i16, iXLen) 1098 1099define <8 x i32> @test_sf_vc_v_xvw_e16m2(<8 x i32> %vd, <8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { 1100; CHECK-LABEL: test_sf_vc_v_xvw_e16m2: 1101; CHECK: # %bb.0: # %entry 1102; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma 1103; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0 1104; CHECK-NEXT: ret 1105entry: 1106 %0 = tail call <8 x i32> @llvm.riscv.sf.vc.v.xvw.nxv8i32.iXLen.nxv8i16.i16.iXLen(iXLen 3, <8 x i32> %vd, <8 x i16> %vs2, i16 %rs1, iXLen %vl) 1107 ret <8 x i32> %0 1108} 1109 1110declare <8 x i32> @llvm.riscv.sf.vc.v.xvw.nxv8i32.iXLen.nxv8i16.i16.iXLen(iXLen, <8 x i32>, <8 x i16>, i16, iXLen) 1111 1112define <16 x i32> @test_sf_vc_v_xvw_e16m4(<16 x i32> %vd, <16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) { 1113; CHECK-LABEL: test_sf_vc_v_xvw_e16m4: 1114; CHECK: # %bb.0: # %entry 1115; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma 1116; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0 1117; CHECK-NEXT: ret 1118entry: 1119 %0 = tail call <16 x i32> @llvm.riscv.sf.vc.v.xvw.nxv16i32.iXLen.nxv16i16.i16.iXLen(iXLen 3, <16 x i32> %vd, <16 x i16> %vs2, i16 %rs1, iXLen %vl) 1120 ret <16 x i32> %0 1121} 1122 1123declare <16 x i32> @llvm.riscv.sf.vc.v.xvw.nxv16i32.iXLen.nxv16i16.i16.iXLen(iXLen, <16 x i32>, <16 x i16>, i16, iXLen) 1124 1125define <1 x i64> @test_sf_vc_v_xvw_e32mf2(<1 x i64> %vd, <1 x i32> %vs2, i32 signext %rs1, iXLen %vl) { 1126; CHECK-LABEL: test_sf_vc_v_xvw_e32mf2: 1127; CHECK: # %bb.0: # %entry 1128; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma 1129; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 1130; CHECK-NEXT: ret 1131entry: 1132 %0 = tail call <1 x i64> @llvm.riscv.sf.vc.v.xvw.nxv1i64.iXLen.nxv1i32.i32.iXLen(iXLen 3, <1 x i64> %vd, <1 x i32> %vs2, i32 %rs1, iXLen %vl) 1133 ret <1 x i64> %0 1134} 1135 1136declare <1 x i64> @llvm.riscv.sf.vc.v.xvw.nxv1i64.iXLen.nxv1i32.i32.iXLen(iXLen, <1 x i64>, <1 x i32>, i32, iXLen) 1137 1138define <2 x i64> @test_sf_vc_v_xvw_e32m1(<2 x i64> %vd, <2 x i32> %vs2, i32 signext %rs1, iXLen %vl) { 1139; CHECK-LABEL: test_sf_vc_v_xvw_e32m1: 1140; CHECK: # %bb.0: # %entry 1141; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma 1142; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 1143; CHECK-NEXT: ret 1144entry: 1145 %0 = tail call <2 x i64> @llvm.riscv.sf.vc.v.xvw.nxv2i64.iXLen.nxv2i32.i32.iXLen(iXLen 3, <2 x i64> %vd, <2 x i32> %vs2, i32 %rs1, iXLen %vl) 1146 ret <2 x i64> %0 1147} 1148 1149declare <2 x i64> @llvm.riscv.sf.vc.v.xvw.nxv2i64.iXLen.nxv2i32.i32.iXLen(iXLen, <2 x i64>, <2 x i32>, i32, iXLen) 1150 1151define <4 x i64> @test_sf_vc_v_xvw_e32m2(<4 x i64> %vd, <4 x i32> %vs2, i32 signext %rs1, iXLen %vl) { 1152; CHECK-LABEL: test_sf_vc_v_xvw_e32m2: 1153; CHECK: # %bb.0: # %entry 1154; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma 1155; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0 1156; CHECK-NEXT: ret 1157entry: 1158 %0 = tail call <4 x i64> @llvm.riscv.sf.vc.v.xvw.nxv4i64.iXLen.nxv4i32.i32.iXLen(iXLen 3, <4 x i64> %vd, <4 x i32> %vs2, i32 %rs1, iXLen %vl) 1159 ret <4 x i64> %0 1160} 1161 1162declare <4 x i64> @llvm.riscv.sf.vc.v.xvw.nxv4i64.iXLen.nxv4i32.i32.iXLen(iXLen, <4 x i64>, <4 x i32>, i32, iXLen) 1163 1164define <8 x i64> @test_sf_vc_v_xvw_e32m4(<8 x i64> %vd, <8 x i32> %vs2, i32 signext %rs1, iXLen %vl) { 1165; CHECK-LABEL: test_sf_vc_v_xvw_e32m4: 1166; CHECK: # %bb.0: # %entry 1167; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma 1168; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0 1169; CHECK-NEXT: ret 1170entry: 1171 %0 = tail call <8 x i64> @llvm.riscv.sf.vc.v.xvw.nxv8i64.iXLen.nxv8i32.i32.iXLen(iXLen 3, <8 x i64> %vd, <8 x i32> %vs2, i32 %rs1, iXLen %vl) 1172 ret <8 x i64> %0 1173} 1174 1175declare <8 x i64> @llvm.riscv.sf.vc.v.xvw.nxv8i64.iXLen.nxv8i32.i32.iXLen(iXLen, <8 x i64>, <8 x i32>, i32, iXLen) 1176 1177define void @test_sf_vc_ivw_se_e8mf8(<1 x i16> %vd, <1 x i8> %vs2, iXLen %vl) { 1178; CHECK-LABEL: test_sf_vc_ivw_se_e8mf8: 1179; CHECK: # %bb.0: # %entry 1180; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma 1181; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10 1182; CHECK-NEXT: ret 1183entry: 1184 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i16.nxv1i8.iXLen.iXLen(iXLen 3, <1 x i16> %vd, <1 x i8> %vs2, iXLen 10, iXLen %vl) 1185 ret void 1186} 1187 1188declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i16.nxv1i8.iXLen.iXLen(iXLen, <1 x i16>, <1 x i8>, iXLen, iXLen) 1189 1190define void @test_sf_vc_ivw_se_e8mf4(<2 x i16> %vd, <2 x i8> %vs2, iXLen %vl) { 1191; CHECK-LABEL: test_sf_vc_ivw_se_e8mf4: 1192; CHECK: # %bb.0: # %entry 1193; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma 1194; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10 1195; CHECK-NEXT: ret 1196entry: 1197 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i16.nxv2i8.iXLen.iXLen(iXLen 3, <2 x i16> %vd, <2 x i8> %vs2, iXLen 10, iXLen %vl) 1198 ret void 1199} 1200 1201declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i16.nxv2i8.iXLen.iXLen(iXLen, <2 x i16>, <2 x i8>, iXLen, iXLen) 1202 1203define void @test_sf_vc_ivw_se_e8mf2(<4 x i16> %vd, <4 x i8> %vs2, iXLen %vl) { 1204; CHECK-LABEL: test_sf_vc_ivw_se_e8mf2: 1205; CHECK: # %bb.0: # %entry 1206; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma 1207; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10 1208; CHECK-NEXT: ret 1209entry: 1210 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i16.nxv4i8.iXLen.iXLen(iXLen 3, <4 x i16> %vd, <4 x i8> %vs2, iXLen 10, iXLen %vl) 1211 ret void 1212} 1213 1214declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i16.nxv4i8.iXLen.iXLen(iXLen, <4 x i16>, <4 x i8>, iXLen, iXLen) 1215 1216define void @test_sf_vc_ivw_se_e8m1(<8 x i16> %vd, <8 x i8> %vs2, iXLen %vl) { 1217; CHECK-LABEL: test_sf_vc_ivw_se_e8m1: 1218; CHECK: # %bb.0: # %entry 1219; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma 1220; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10 1221; CHECK-NEXT: ret 1222entry: 1223 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i16.nxv8i8.iXLen.iXLen(iXLen 3, <8 x i16> %vd, <8 x i8> %vs2, iXLen 10, iXLen %vl) 1224 ret void 1225} 1226 1227declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i16.nxv8i8.iXLen.iXLen(iXLen, <8 x i16>, <8 x i8>, iXLen, iXLen) 1228 1229define void @test_sf_vc_ivw_se_e8m2(<16 x i16> %vd, <16 x i8> %vs2, iXLen %vl) { 1230; CHECK-LABEL: test_sf_vc_ivw_se_e8m2: 1231; CHECK: # %bb.0: # %entry 1232; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma 1233; CHECK-NEXT: sf.vc.ivw 3, v8, v10, 10 1234; CHECK-NEXT: ret 1235entry: 1236 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16i16.nxv16i8.iXLen.iXLen(iXLen 3, <16 x i16> %vd, <16 x i8> %vs2, iXLen 10, iXLen %vl) 1237 ret void 1238} 1239 1240declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16i16.nxv16i8.iXLen.iXLen(iXLen, <16 x i16>, <16 x i8>, iXLen, iXLen) 1241 1242define void @test_sf_vc_ivw_se_e8m4(<32 x i16> %vd, <32 x i8> %vs2, iXLen %vl) { 1243; CHECK-LABEL: test_sf_vc_ivw_se_e8m4: 1244; CHECK: # %bb.0: # %entry 1245; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma 1246; CHECK-NEXT: sf.vc.ivw 3, v8, v12, 10 1247; CHECK-NEXT: ret 1248entry: 1249 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv32i16.nxv32i8.iXLen.iXLen(iXLen 3, <32 x i16> %vd, <32 x i8> %vs2, iXLen 10, iXLen %vl) 1250 ret void 1251} 1252 1253declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv32i16.nxv32i8.iXLen.iXLen(iXLen, <32 x i16>, <32 x i8>, iXLen, iXLen) 1254 1255define void @test_sf_vc_ivw_se_e16mf4(<1 x i32> %vd, <1 x i16> %vs2, iXLen %vl) { 1256; CHECK-LABEL: test_sf_vc_ivw_se_e16mf4: 1257; CHECK: # %bb.0: # %entry 1258; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 1259; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10 1260; CHECK-NEXT: ret 1261entry: 1262 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i32.nxv1i16.iXLen.iXLen(iXLen 3, <1 x i32> %vd, <1 x i16> %vs2, iXLen 10, iXLen %vl) 1263 ret void 1264} 1265 1266declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i32.nxv1i16.iXLen.iXLen(iXLen, <1 x i32>, <1 x i16>, iXLen, iXLen) 1267 1268define void @test_sf_vc_ivw_se_e16mf2(<2 x i32> %vd, <2 x i16> %vs2, iXLen %vl) { 1269; CHECK-LABEL: test_sf_vc_ivw_se_e16mf2: 1270; CHECK: # %bb.0: # %entry 1271; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 1272; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10 1273; CHECK-NEXT: ret 1274entry: 1275 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i32.nxv2i16.iXLen.iXLen(iXLen 3, <2 x i32> %vd, <2 x i16> %vs2, iXLen 10, iXLen %vl) 1276 ret void 1277} 1278 1279declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i32.nxv2i16.iXLen.iXLen(iXLen, <2 x i32>, <2 x i16>, iXLen, iXLen) 1280 1281define void @test_sf_vc_ivw_se_e16m1(<4 x i32> %vd, <4 x i16> %vs2, iXLen %vl) { 1282; CHECK-LABEL: test_sf_vc_ivw_se_e16m1: 1283; CHECK: # %bb.0: # %entry 1284; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 1285; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10 1286; CHECK-NEXT: ret 1287entry: 1288 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i32.nxv4i16.iXLen.iXLen(iXLen 3, <4 x i32> %vd, <4 x i16> %vs2, iXLen 10, iXLen %vl) 1289 ret void 1290} 1291 1292declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i32.nxv4i16.iXLen.iXLen(iXLen, <4 x i32>, <4 x i16>, iXLen, iXLen) 1293 1294define void @test_sf_vc_ivw_se_e16m2(<8 x i32> %vd, <8 x i16> %vs2, iXLen %vl) { 1295; CHECK-LABEL: test_sf_vc_ivw_se_e16m2: 1296; CHECK: # %bb.0: # %entry 1297; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma 1298; CHECK-NEXT: sf.vc.ivw 3, v8, v10, 10 1299; CHECK-NEXT: ret 1300entry: 1301 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i32.nxv8i16.iXLen.iXLen(iXLen 3, <8 x i32> %vd, <8 x i16> %vs2, iXLen 10, iXLen %vl) 1302 ret void 1303} 1304 1305declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i32.nxv8i16.iXLen.iXLen(iXLen, <8 x i32>, <8 x i16>, iXLen, iXLen) 1306 1307define void @test_sf_vc_ivw_se_e16m4(<16 x i32> %vd, <16 x i16> %vs2, iXLen %vl) { 1308; CHECK-LABEL: test_sf_vc_ivw_se_e16m4: 1309; CHECK: # %bb.0: # %entry 1310; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma 1311; CHECK-NEXT: sf.vc.ivw 3, v8, v12, 10 1312; CHECK-NEXT: ret 1313entry: 1314 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16i32.nxv16i16.iXLen.iXLen(iXLen 3, <16 x i32> %vd, <16 x i16> %vs2, iXLen 10, iXLen %vl) 1315 ret void 1316} 1317 1318declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16i32.nxv16i16.iXLen.iXLen(iXLen, <16 x i32>, <16 x i16>, iXLen, iXLen) 1319 1320define void @test_sf_vc_ivw_se_e32mf2(<1 x i64> %vd, <1 x i32> %vs2, iXLen %vl) { 1321; CHECK-LABEL: test_sf_vc_ivw_se_e32mf2: 1322; CHECK: # %bb.0: # %entry 1323; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 1324; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10 1325; CHECK-NEXT: ret 1326entry: 1327 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i64.nxv1i32.iXLen.iXLen(iXLen 3, <1 x i64> %vd, <1 x i32> %vs2, iXLen 10, iXLen %vl) 1328 ret void 1329} 1330 1331declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i64.nxv1i32.iXLen.iXLen(iXLen, <1 x i64>, <1 x i32>, iXLen, iXLen) 1332 1333define void @test_sf_vc_ivw_se_e32m1(<2 x i64> %vd, <2 x i32> %vs2, iXLen %vl) { 1334; CHECK-LABEL: test_sf_vc_ivw_se_e32m1: 1335; CHECK: # %bb.0: # %entry 1336; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 1337; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10 1338; CHECK-NEXT: ret 1339entry: 1340 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i64.nxv2i32.iXLen.iXLen(iXLen 3, <2 x i64> %vd, <2 x i32> %vs2, iXLen 10, iXLen %vl) 1341 ret void 1342} 1343 1344declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i64.nxv2i32.iXLen.iXLen(iXLen, <2 x i64>, <2 x i32>, iXLen, iXLen) 1345 1346define void @test_sf_vc_ivw_se_e32m2(<4 x i64> %vd, <4 x i32> %vs2, iXLen %vl) { 1347; CHECK-LABEL: test_sf_vc_ivw_se_e32m2: 1348; CHECK: # %bb.0: # %entry 1349; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 1350; CHECK-NEXT: sf.vc.ivw 3, v8, v10, 10 1351; CHECK-NEXT: ret 1352entry: 1353 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i64.nxv4i32.iXLen.iXLen(iXLen 3, <4 x i64> %vd, <4 x i32> %vs2, iXLen 10, iXLen %vl) 1354 ret void 1355} 1356 1357declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i64.nxv4i32.iXLen.iXLen(iXLen, <4 x i64>, <4 x i32>, iXLen, iXLen) 1358 1359define void @test_sf_vc_ivw_se_e32m4(<8 x i64> %vd, <8 x i32> %vs2, iXLen %vl) { 1360; CHECK-LABEL: test_sf_vc_ivw_se_e32m4: 1361; CHECK: # %bb.0: # %entry 1362; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 1363; CHECK-NEXT: sf.vc.ivw 3, v8, v12, 10 1364; CHECK-NEXT: ret 1365entry: 1366 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i64.nxv8i32.iXLen.iXLen(iXLen 3, <8 x i64> %vd, <8 x i32> %vs2, iXLen 10, iXLen %vl) 1367 ret void 1368} 1369 1370declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i64.nxv8i32.iXLen.iXLen(iXLen, <8 x i64>, <8 x i32>, iXLen, iXLen) 1371 1372define <1 x i16> @test_sf_vc_v_ivw_se_e8mf8(<1 x i16> %vd, <1 x i8> %vs2, iXLen %vl) { 1373; CHECK-LABEL: test_sf_vc_v_ivw_se_e8mf8: 1374; CHECK: # %bb.0: # %entry 1375; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma 1376; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10 1377; CHECK-NEXT: ret 1378entry: 1379 %0 = tail call <1 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv1i16.iXLen.nxv1i8.iXLen.iXLen(iXLen 3, <1 x i16> %vd, <1 x i8> %vs2, iXLen 10, iXLen %vl) 1380 ret <1 x i16> %0 1381} 1382 1383declare <1 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv1i16.iXLen.nxv1i8.iXLen.iXLen(iXLen, <1 x i16>, <1 x i8>, iXLen, iXLen) 1384 1385define <2 x i16> @test_sf_vc_v_ivw_se_e8mf4(<2 x i16> %vd, <2 x i8> %vs2, iXLen %vl) { 1386; CHECK-LABEL: test_sf_vc_v_ivw_se_e8mf4: 1387; CHECK: # %bb.0: # %entry 1388; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma 1389; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10 1390; CHECK-NEXT: ret 1391entry: 1392 %0 = tail call <2 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv2i16.iXLen.nxv2i8.iXLen.iXLen(iXLen 3, <2 x i16> %vd, <2 x i8> %vs2, iXLen 10, iXLen %vl) 1393 ret <2 x i16> %0 1394} 1395 1396declare <2 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv2i16.iXLen.nxv2i8.iXLen.iXLen(iXLen, <2 x i16>, <2 x i8>, iXLen, iXLen) 1397 1398define <4 x i16> @test_sf_vc_v_ivw_se_e8mf2(<4 x i16> %vd, <4 x i8> %vs2, iXLen %vl) { 1399; CHECK-LABEL: test_sf_vc_v_ivw_se_e8mf2: 1400; CHECK: # %bb.0: # %entry 1401; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma 1402; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10 1403; CHECK-NEXT: ret 1404entry: 1405 %0 = tail call <4 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv4i16.iXLen.nxv4i8.iXLen.iXLen(iXLen 3, <4 x i16> %vd, <4 x i8> %vs2, iXLen 10, iXLen %vl) 1406 ret <4 x i16> %0 1407} 1408 1409declare <4 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv4i16.iXLen.nxv4i8.iXLen.iXLen(iXLen, <4 x i16>, <4 x i8>, iXLen, iXLen) 1410 1411define <8 x i16> @test_sf_vc_v_ivw_se_e8m1(<8 x i16> %vd, <8 x i8> %vs2, iXLen %vl) { 1412; CHECK-LABEL: test_sf_vc_v_ivw_se_e8m1: 1413; CHECK: # %bb.0: # %entry 1414; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma 1415; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10 1416; CHECK-NEXT: ret 1417entry: 1418 %0 = tail call <8 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv8i16.iXLen.nxv8i8.iXLen.iXLen(iXLen 3, <8 x i16> %vd, <8 x i8> %vs2, iXLen 10, iXLen %vl) 1419 ret <8 x i16> %0 1420} 1421 1422declare <8 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv8i16.iXLen.nxv8i8.iXLen.iXLen(iXLen, <8 x i16>, <8 x i8>, iXLen, iXLen) 1423 1424define <16 x i16> @test_sf_vc_v_ivw_se_e8m2(<16 x i16> %vd, <16 x i8> %vs2, iXLen %vl) { 1425; CHECK-LABEL: test_sf_vc_v_ivw_se_e8m2: 1426; CHECK: # %bb.0: # %entry 1427; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma 1428; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10 1429; CHECK-NEXT: ret 1430entry: 1431 %0 = tail call <16 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv16i16.iXLen.nxv16i8.iXLen.iXLen(iXLen 3, <16 x i16> %vd, <16 x i8> %vs2, iXLen 10, iXLen %vl) 1432 ret <16 x i16> %0 1433} 1434 1435declare <16 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv16i16.iXLen.nxv16i8.iXLen.iXLen(iXLen, <16 x i16>, <16 x i8>, iXLen, iXLen) 1436 1437define <32 x i16> @test_sf_vc_v_ivw_se_e8m4(<32 x i16> %vd, <32 x i8> %vs2, iXLen %vl) { 1438; CHECK-LABEL: test_sf_vc_v_ivw_se_e8m4: 1439; CHECK: # %bb.0: # %entry 1440; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma 1441; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10 1442; CHECK-NEXT: ret 1443entry: 1444 %0 = tail call <32 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv32i16.iXLen.nxv32i8.iXLen.iXLen(iXLen 3, <32 x i16> %vd, <32 x i8> %vs2, iXLen 10, iXLen %vl) 1445 ret <32 x i16> %0 1446} 1447 1448declare <32 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv32i16.iXLen.nxv32i8.iXLen.iXLen(iXLen, <32 x i16>, <32 x i8>, iXLen, iXLen) 1449 1450define <1 x i32> @test_sf_vc_v_ivw_se_e16mf4(<1 x i32> %vd, <1 x i16> %vs2, iXLen %vl) { 1451; CHECK-LABEL: test_sf_vc_v_ivw_se_e16mf4: 1452; CHECK: # %bb.0: # %entry 1453; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma 1454; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10 1455; CHECK-NEXT: ret 1456entry: 1457 %0 = tail call <1 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv1i32.iXLen.nxv1i16.iXLen.iXLen(iXLen 3, <1 x i32> %vd, <1 x i16> %vs2, iXLen 10, iXLen %vl) 1458 ret <1 x i32> %0 1459} 1460 1461declare <1 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv1i32.iXLen.nxv1i16.iXLen.iXLen(iXLen, <1 x i32>, <1 x i16>, iXLen, iXLen) 1462 1463define <2 x i32> @test_sf_vc_v_ivw_se_e16mf2(<2 x i32> %vd, <2 x i16> %vs2, iXLen %vl) { 1464; CHECK-LABEL: test_sf_vc_v_ivw_se_e16mf2: 1465; CHECK: # %bb.0: # %entry 1466; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma 1467; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10 1468; CHECK-NEXT: ret 1469entry: 1470 %0 = tail call <2 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv2i32.iXLen.nxv2i16.iXLen.iXLen(iXLen 3, <2 x i32> %vd, <2 x i16> %vs2, iXLen 10, iXLen %vl) 1471 ret <2 x i32> %0 1472} 1473 1474declare <2 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv2i32.iXLen.nxv2i16.iXLen.iXLen(iXLen, <2 x i32>, <2 x i16>, iXLen, iXLen) 1475 1476define <4 x i32> @test_sf_vc_v_ivw_se_e16m1(<4 x i32> %vd, <4 x i16> %vs2, iXLen %vl) { 1477; CHECK-LABEL: test_sf_vc_v_ivw_se_e16m1: 1478; CHECK: # %bb.0: # %entry 1479; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma 1480; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10 1481; CHECK-NEXT: ret 1482entry: 1483 %0 = tail call <4 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv4i32.iXLen.nxv4i16.iXLen.iXLen(iXLen 3, <4 x i32> %vd, <4 x i16> %vs2, iXLen 10, iXLen %vl) 1484 ret <4 x i32> %0 1485} 1486 1487declare <4 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv4i32.iXLen.nxv4i16.iXLen.iXLen(iXLen, <4 x i32>, <4 x i16>, iXLen, iXLen) 1488 1489define <8 x i32> @test_sf_vc_v_ivw_se_e16m2(<8 x i32> %vd, <8 x i16> %vs2, iXLen %vl) { 1490; CHECK-LABEL: test_sf_vc_v_ivw_se_e16m2: 1491; CHECK: # %bb.0: # %entry 1492; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma 1493; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10 1494; CHECK-NEXT: ret 1495entry: 1496 %0 = tail call <8 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv8i32.iXLen.nxv8i16.iXLen.iXLen(iXLen 3, <8 x i32> %vd, <8 x i16> %vs2, iXLen 10, iXLen %vl) 1497 ret <8 x i32> %0 1498} 1499 1500declare <8 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv8i32.iXLen.nxv8i16.iXLen.iXLen(iXLen, <8 x i32>, <8 x i16>, iXLen, iXLen) 1501 1502define <16 x i32> @test_sf_vc_v_ivw_se_e16m4(<16 x i32> %vd, <16 x i16> %vs2, iXLen %vl) { 1503; CHECK-LABEL: test_sf_vc_v_ivw_se_e16m4: 1504; CHECK: # %bb.0: # %entry 1505; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma 1506; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10 1507; CHECK-NEXT: ret 1508entry: 1509 %0 = tail call <16 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv16i32.iXLen.nxv16i16.iXLen.iXLen(iXLen 3, <16 x i32> %vd, <16 x i16> %vs2, iXLen 10, iXLen %vl) 1510 ret <16 x i32> %0 1511} 1512 1513declare <16 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv16i32.iXLen.nxv16i16.iXLen.iXLen(iXLen, <16 x i32>, <16 x i16>, iXLen, iXLen) 1514 1515define <1 x i64> @test_sf_vc_v_ivw_se_e32mf2(<1 x i64> %vd, <1 x i32> %vs2, iXLen %vl) { 1516; CHECK-LABEL: test_sf_vc_v_ivw_se_e32mf2: 1517; CHECK: # %bb.0: # %entry 1518; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma 1519; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10 1520; CHECK-NEXT: ret 1521entry: 1522 %0 = tail call <1 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv1i64.iXLen.nxv1i32.iXLen.iXLen(iXLen 3, <1 x i64> %vd, <1 x i32> %vs2, iXLen 10, iXLen %vl) 1523 ret <1 x i64> %0 1524} 1525 1526declare <1 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv1i64.iXLen.nxv1i32.iXLen.iXLen(iXLen, <1 x i64>, <1 x i32>, iXLen, iXLen) 1527 1528define <2 x i64> @test_sf_vc_v_ivw_se_e32m1(<2 x i64> %vd, <2 x i32> %vs2, iXLen %vl) { 1529; CHECK-LABEL: test_sf_vc_v_ivw_se_e32m1: 1530; CHECK: # %bb.0: # %entry 1531; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma 1532; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10 1533; CHECK-NEXT: ret 1534entry: 1535 %0 = tail call <2 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv2i64.iXLen.nxv2i32.iXLen.iXLen(iXLen 3, <2 x i64> %vd, <2 x i32> %vs2, iXLen 10, iXLen %vl) 1536 ret <2 x i64> %0 1537} 1538 1539declare <2 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv2i64.iXLen.nxv2i32.iXLen.iXLen(iXLen, <2 x i64>, <2 x i32>, iXLen, iXLen) 1540 1541define <4 x i64> @test_sf_vc_v_ivw_se_e32m2(<4 x i64> %vd, <4 x i32> %vs2, iXLen %vl) { 1542; CHECK-LABEL: test_sf_vc_v_ivw_se_e32m2: 1543; CHECK: # %bb.0: # %entry 1544; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma 1545; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10 1546; CHECK-NEXT: ret 1547entry: 1548 %0 = tail call <4 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv4i64.iXLen.nxv4i32.iXLen.iXLen(iXLen 3, <4 x i64> %vd, <4 x i32> %vs2, iXLen 10, iXLen %vl) 1549 ret <4 x i64> %0 1550} 1551 1552declare <4 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv4i64.iXLen.nxv4i32.iXLen.iXLen(iXLen, <4 x i64>, <4 x i32>, iXLen, iXLen) 1553 1554define <8 x i64> @test_sf_vc_v_ivw_se_e32m4(<8 x i64> %vd, <8 x i32> %vs2, iXLen %vl) { 1555; CHECK-LABEL: test_sf_vc_v_ivw_se_e32m4: 1556; CHECK: # %bb.0: # %entry 1557; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma 1558; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10 1559; CHECK-NEXT: ret 1560entry: 1561 %0 = tail call <8 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv8i64.iXLen.nxv8i32.iXLen.iXLen(iXLen 3, <8 x i64> %vd, <8 x i32> %vs2, iXLen 10, iXLen %vl) 1562 ret <8 x i64> %0 1563} 1564 1565declare <8 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv8i64.iXLen.nxv8i32.iXLen.iXLen(iXLen, <8 x i64>, <8 x i32>, iXLen, iXLen) 1566 1567define <1 x i16> @test_sf_vc_v_ivw_e8mf8(<1 x i16> %vd, <1 x i8> %vs2, iXLen %vl) { 1568; CHECK-LABEL: test_sf_vc_v_ivw_e8mf8: 1569; CHECK: # %bb.0: # %entry 1570; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma 1571; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10 1572; CHECK-NEXT: ret 1573entry: 1574 %0 = tail call <1 x i16> @llvm.riscv.sf.vc.v.ivw.nxv1i16.iXLen.nxv1i8.iXLen.iXLen(iXLen 3, <1 x i16> %vd, <1 x i8> %vs2, iXLen 10, iXLen %vl) 1575 ret <1 x i16> %0 1576} 1577 1578declare <1 x i16> @llvm.riscv.sf.vc.v.ivw.nxv1i16.iXLen.nxv1i8.iXLen.iXLen(iXLen, <1 x i16>, <1 x i8>, iXLen, iXLen) 1579 1580define <2 x i16> @test_sf_vc_v_ivw_e8mf4(<2 x i16> %vd, <2 x i8> %vs2, iXLen %vl) { 1581; CHECK-LABEL: test_sf_vc_v_ivw_e8mf4: 1582; CHECK: # %bb.0: # %entry 1583; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma 1584; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10 1585; CHECK-NEXT: ret 1586entry: 1587 %0 = tail call <2 x i16> @llvm.riscv.sf.vc.v.ivw.nxv2i16.iXLen.nxv2i8.iXLen.iXLen(iXLen 3, <2 x i16> %vd, <2 x i8> %vs2, iXLen 10, iXLen %vl) 1588 ret <2 x i16> %0 1589} 1590 1591declare <2 x i16> @llvm.riscv.sf.vc.v.ivw.nxv2i16.iXLen.nxv2i8.iXLen.iXLen(iXLen, <2 x i16>, <2 x i8>, iXLen, iXLen) 1592 1593define <4 x i16> @test_sf_vc_v_ivw_e8mf2(<4 x i16> %vd, <4 x i8> %vs2, iXLen %vl) { 1594; CHECK-LABEL: test_sf_vc_v_ivw_e8mf2: 1595; CHECK: # %bb.0: # %entry 1596; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma 1597; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10 1598; CHECK-NEXT: ret 1599entry: 1600 %0 = tail call <4 x i16> @llvm.riscv.sf.vc.v.ivw.nxv4i16.iXLen.nxv4i8.iXLen.iXLen(iXLen 3, <4 x i16> %vd, <4 x i8> %vs2, iXLen 10, iXLen %vl) 1601 ret <4 x i16> %0 1602} 1603 1604declare <4 x i16> @llvm.riscv.sf.vc.v.ivw.nxv4i16.iXLen.nxv4i8.iXLen.iXLen(iXLen, <4 x i16>, <4 x i8>, iXLen, iXLen) 1605 1606define <8 x i16> @test_sf_vc_v_ivw_e8m1(<8 x i16> %vd, <8 x i8> %vs2, iXLen %vl) { 1607; CHECK-LABEL: test_sf_vc_v_ivw_e8m1: 1608; CHECK: # %bb.0: # %entry 1609; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma 1610; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10 1611; CHECK-NEXT: ret 1612entry: 1613 %0 = tail call <8 x i16> @llvm.riscv.sf.vc.v.ivw.nxv8i16.iXLen.nxv8i8.iXLen.iXLen(iXLen 3, <8 x i16> %vd, <8 x i8> %vs2, iXLen 10, iXLen %vl) 1614 ret <8 x i16> %0 1615} 1616 1617declare <8 x i16> @llvm.riscv.sf.vc.v.ivw.nxv8i16.iXLen.nxv8i8.iXLen.iXLen(iXLen, <8 x i16>, <8 x i8>, iXLen, iXLen) 1618 1619define <16 x i16> @test_sf_vc_v_ivw_e8m2(<16 x i16> %vd, <16 x i8> %vs2, iXLen %vl) { 1620; CHECK-LABEL: test_sf_vc_v_ivw_e8m2: 1621; CHECK: # %bb.0: # %entry 1622; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma 1623; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10 1624; CHECK-NEXT: ret 1625entry: 1626 %0 = tail call <16 x i16> @llvm.riscv.sf.vc.v.ivw.nxv16i16.iXLen.nxv16i8.iXLen.iXLen(iXLen 3, <16 x i16> %vd, <16 x i8> %vs2, iXLen 10, iXLen %vl) 1627 ret <16 x i16> %0 1628} 1629 1630declare <16 x i16> @llvm.riscv.sf.vc.v.ivw.nxv16i16.iXLen.nxv16i8.iXLen.iXLen(iXLen, <16 x i16>, <16 x i8>, iXLen, iXLen) 1631 1632define <32 x i16> @test_sf_vc_v_ivw_e8m4(<32 x i16> %vd, <32 x i8> %vs2, iXLen %vl) { 1633; CHECK-LABEL: test_sf_vc_v_ivw_e8m4: 1634; CHECK: # %bb.0: # %entry 1635; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma 1636; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10 1637; CHECK-NEXT: ret 1638entry: 1639 %0 = tail call <32 x i16> @llvm.riscv.sf.vc.v.ivw.nxv32i16.iXLen.nxv32i8.iXLen.iXLen(iXLen 3, <32 x i16> %vd, <32 x i8> %vs2, iXLen 10, iXLen %vl) 1640 ret <32 x i16> %0 1641} 1642 1643declare <32 x i16> @llvm.riscv.sf.vc.v.ivw.nxv32i16.iXLen.nxv32i8.iXLen.iXLen(iXLen, <32 x i16>, <32 x i8>, iXLen, iXLen) 1644 1645define <1 x i32> @test_sf_vc_v_ivw_e16mf4(<1 x i32> %vd, <1 x i16> %vs2, iXLen %vl) { 1646; CHECK-LABEL: test_sf_vc_v_ivw_e16mf4: 1647; CHECK: # %bb.0: # %entry 1648; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma 1649; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10 1650; CHECK-NEXT: ret 1651entry: 1652 %0 = tail call <1 x i32> @llvm.riscv.sf.vc.v.ivw.nxv1i32.iXLen.nxv1i16.iXLen.iXLen(iXLen 3, <1 x i32> %vd, <1 x i16> %vs2, iXLen 10, iXLen %vl) 1653 ret <1 x i32> %0 1654} 1655 1656declare <1 x i32> @llvm.riscv.sf.vc.v.ivw.nxv1i32.iXLen.nxv1i16.iXLen.iXLen(iXLen, <1 x i32>, <1 x i16>, iXLen, iXLen) 1657 1658define <2 x i32> @test_sf_vc_v_ivw_e16mf2(<2 x i32> %vd, <2 x i16> %vs2, iXLen %vl) { 1659; CHECK-LABEL: test_sf_vc_v_ivw_e16mf2: 1660; CHECK: # %bb.0: # %entry 1661; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma 1662; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10 1663; CHECK-NEXT: ret 1664entry: 1665 %0 = tail call <2 x i32> @llvm.riscv.sf.vc.v.ivw.nxv2i32.iXLen.nxv2i16.iXLen.iXLen(iXLen 3, <2 x i32> %vd, <2 x i16> %vs2, iXLen 10, iXLen %vl) 1666 ret <2 x i32> %0 1667} 1668 1669declare <2 x i32> @llvm.riscv.sf.vc.v.ivw.nxv2i32.iXLen.nxv2i16.iXLen.iXLen(iXLen, <2 x i32>, <2 x i16>, iXLen, iXLen) 1670 1671define <4 x i32> @test_sf_vc_v_ivw_e16m1(<4 x i32> %vd, <4 x i16> %vs2, iXLen %vl) { 1672; CHECK-LABEL: test_sf_vc_v_ivw_e16m1: 1673; CHECK: # %bb.0: # %entry 1674; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma 1675; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10 1676; CHECK-NEXT: ret 1677entry: 1678 %0 = tail call <4 x i32> @llvm.riscv.sf.vc.v.ivw.nxv4i32.iXLen.nxv4i16.iXLen.iXLen(iXLen 3, <4 x i32> %vd, <4 x i16> %vs2, iXLen 10, iXLen %vl) 1679 ret <4 x i32> %0 1680} 1681 1682declare <4 x i32> @llvm.riscv.sf.vc.v.ivw.nxv4i32.iXLen.nxv4i16.iXLen.iXLen(iXLen, <4 x i32>, <4 x i16>, iXLen, iXLen) 1683 1684define <8 x i32> @test_sf_vc_v_ivw_e16m2(<8 x i32> %vd, <8 x i16> %vs2, iXLen %vl) { 1685; CHECK-LABEL: test_sf_vc_v_ivw_e16m2: 1686; CHECK: # %bb.0: # %entry 1687; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma 1688; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10 1689; CHECK-NEXT: ret 1690entry: 1691 %0 = tail call <8 x i32> @llvm.riscv.sf.vc.v.ivw.nxv8i32.iXLen.nxv8i16.iXLen.iXLen(iXLen 3, <8 x i32> %vd, <8 x i16> %vs2, iXLen 10, iXLen %vl) 1692 ret <8 x i32> %0 1693} 1694 1695declare <8 x i32> @llvm.riscv.sf.vc.v.ivw.nxv8i32.iXLen.nxv8i16.iXLen.iXLen(iXLen, <8 x i32>, <8 x i16>, iXLen, iXLen) 1696 1697define <16 x i32> @test_sf_vc_v_ivw_e16m4(<16 x i32> %vd, <16 x i16> %vs2, iXLen %vl) { 1698; CHECK-LABEL: test_sf_vc_v_ivw_e16m4: 1699; CHECK: # %bb.0: # %entry 1700; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma 1701; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10 1702; CHECK-NEXT: ret 1703entry: 1704 %0 = tail call <16 x i32> @llvm.riscv.sf.vc.v.ivw.nxv16i32.iXLen.nxv16i16.iXLen.iXLen(iXLen 3, <16 x i32> %vd, <16 x i16> %vs2, iXLen 10, iXLen %vl) 1705 ret <16 x i32> %0 1706} 1707 1708declare <16 x i32> @llvm.riscv.sf.vc.v.ivw.nxv16i32.iXLen.nxv16i16.iXLen.iXLen(iXLen, <16 x i32>, <16 x i16>, iXLen, iXLen) 1709 1710define <1 x i64> @test_sf_vc_v_ivw_e32mf2(<1 x i64> %vd, <1 x i32> %vs2, iXLen %vl) { 1711; CHECK-LABEL: test_sf_vc_v_ivw_e32mf2: 1712; CHECK: # %bb.0: # %entry 1713; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma 1714; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10 1715; CHECK-NEXT: ret 1716entry: 1717 %0 = tail call <1 x i64> @llvm.riscv.sf.vc.v.ivw.nxv1i64.iXLen.nxv1i32.iXLen.iXLen(iXLen 3, <1 x i64> %vd, <1 x i32> %vs2, iXLen 10, iXLen %vl) 1718 ret <1 x i64> %0 1719} 1720 1721declare <1 x i64> @llvm.riscv.sf.vc.v.ivw.nxv1i64.iXLen.nxv1i32.iXLen.iXLen(iXLen, <1 x i64>, <1 x i32>, iXLen, iXLen) 1722 1723define <2 x i64> @test_sf_vc_v_ivw_e32m1(<2 x i64> %vd, <2 x i32> %vs2, iXLen %vl) { 1724; CHECK-LABEL: test_sf_vc_v_ivw_e32m1: 1725; CHECK: # %bb.0: # %entry 1726; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma 1727; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10 1728; CHECK-NEXT: ret 1729entry: 1730 %0 = tail call <2 x i64> @llvm.riscv.sf.vc.v.ivw.nxv2i64.iXLen.nxv2i32.iXLen.iXLen(iXLen 3, <2 x i64> %vd, <2 x i32> %vs2, iXLen 10, iXLen %vl) 1731 ret <2 x i64> %0 1732} 1733 1734declare <2 x i64> @llvm.riscv.sf.vc.v.ivw.nxv2i64.iXLen.nxv2i32.iXLen.iXLen(iXLen, <2 x i64>, <2 x i32>, iXLen, iXLen) 1735 1736define <4 x i64> @test_sf_vc_v_ivw_e32m2(<4 x i64> %vd, <4 x i32> %vs2, iXLen %vl) { 1737; CHECK-LABEL: test_sf_vc_v_ivw_e32m2: 1738; CHECK: # %bb.0: # %entry 1739; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma 1740; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10 1741; CHECK-NEXT: ret 1742entry: 1743 %0 = tail call <4 x i64> @llvm.riscv.sf.vc.v.ivw.nxv4i64.iXLen.nxv4i32.iXLen.iXLen(iXLen 3, <4 x i64> %vd, <4 x i32> %vs2, iXLen 10, iXLen %vl) 1744 ret <4 x i64> %0 1745} 1746 1747declare <4 x i64> @llvm.riscv.sf.vc.v.ivw.nxv4i64.iXLen.nxv4i32.iXLen.iXLen(iXLen, <4 x i64>, <4 x i32>, iXLen, iXLen) 1748 1749define <8 x i64> @test_sf_vc_v_ivw_e32m4(<8 x i64> %vd, <8 x i32> %vs2, iXLen %vl) { 1750; CHECK-LABEL: test_sf_vc_v_ivw_e32m4: 1751; CHECK: # %bb.0: # %entry 1752; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma 1753; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10 1754; CHECK-NEXT: ret 1755entry: 1756 %0 = tail call <8 x i64> @llvm.riscv.sf.vc.v.ivw.nxv8i64.iXLen.nxv8i32.iXLen.iXLen(iXLen 3, <8 x i64> %vd, <8 x i32> %vs2, iXLen 10, iXLen %vl) 1757 ret <8 x i64> %0 1758} 1759 1760declare <8 x i64> @llvm.riscv.sf.vc.v.ivw.nxv8i64.iXLen.nxv8i32.iXLen.iXLen(iXLen, <8 x i64>, <8 x i32>, iXLen, iXLen) 1761 1762define void @test_sf_vc_fwvv_se_e32mf2(<1 x float> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) { 1763; CHECK-LABEL: test_sf_vc_fwvv_se_e32mf2: 1764; CHECK: # %bb.0: # %entry 1765; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 1766; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10 1767; CHECK-NEXT: ret 1768entry: 1769 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1f32.nxv1i16.nxv1i16.iXLen(iXLen 3, <1 x float> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) 1770 ret void 1771} 1772 1773declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1f32.nxv1i16.nxv1i16.iXLen(iXLen, <1 x float>, <1 x i16>, <1 x i16>, iXLen) 1774 1775define <1 x float> @test_sf_vc_fw_fwvvv_se_e32mf2(<1 x float> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) { 1776; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e32mf2: 1777; CHECK: # %bb.0: # %entry 1778; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma 1779; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 1780; CHECK-NEXT: ret 1781entry: 1782 %0 = tail call <1 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv1f32.nxv1i16.nxv1i16.iXLen(iXLen 3, <1 x float> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) 1783 ret <1 x float> %0 1784} 1785 1786declare <1 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv1f32.nxv1i16.nxv1i16.iXLen(iXLen, <1 x float>, <1 x i16>, <1 x i16>, iXLen) 1787 1788define void @test_sf_vc_fwvv_se_e32m1(<2 x float> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) { 1789; CHECK-LABEL: test_sf_vc_fwvv_se_e32m1: 1790; CHECK: # %bb.0: # %entry 1791; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 1792; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10 1793; CHECK-NEXT: ret 1794entry: 1795 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2f32.nxv2i16.nxv2i16.iXLen(iXLen 3, <2 x float> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) 1796 ret void 1797} 1798 1799declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2f32.nxv2i16.nxv2i16.iXLen(iXLen, <2 x float>, <2 x i16>, <2 x i16>, iXLen) 1800 1801define <2 x float> @test_sf_vc_fw_fwvvv_se_e32m1(<2 x float> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) { 1802; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e32m1: 1803; CHECK: # %bb.0: # %entry 1804; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma 1805; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 1806; CHECK-NEXT: ret 1807entry: 1808 %0 = tail call <2 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv2f32.nxv2i16.nxv2i16.iXLen(iXLen 3, <2 x float> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) 1809 ret <2 x float> %0 1810} 1811 1812declare <2 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv2f32.nxv2i16.nxv2i16.iXLen(iXLen, <2 x float>, <2 x i16>, <2 x i16>, iXLen) 1813 1814define void @test_sf_vc_fwvv_se_e32m2(<4 x float> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) { 1815; CHECK-LABEL: test_sf_vc_fwvv_se_e32m2: 1816; CHECK: # %bb.0: # %entry 1817; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 1818; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10 1819; CHECK-NEXT: ret 1820entry: 1821 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4f32.nxv4i16.nxv4i16.iXLen(iXLen 3, <4 x float> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) 1822 ret void 1823} 1824 1825declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4f32.nxv4i16.nxv4i16.iXLen(iXLen, <4 x float>, <4 x i16>, <4 x i16>, iXLen) 1826 1827define <4 x float> @test_sf_vc_fw_fwvvv_se_e32m2(<4 x float> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) { 1828; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e32m2: 1829; CHECK: # %bb.0: # %entry 1830; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma 1831; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 1832; CHECK-NEXT: ret 1833entry: 1834 %0 = tail call <4 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv4f32.nxv4i16.nxv4i16.iXLen(iXLen 3, <4 x float> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) 1835 ret <4 x float> %0 1836} 1837 1838declare <4 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv4f32.nxv4i16.nxv4i16.iXLen(iXLen, <4 x float>, <4 x i16>, <4 x i16>, iXLen) 1839 1840define void @test_sf_vc_fwvv_se_e32m4(<8 x float> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) { 1841; CHECK-LABEL: test_sf_vc_fwvv_se_e32m4: 1842; CHECK: # %bb.0: # %entry 1843; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma 1844; CHECK-NEXT: sf.vc.vvw 3, v8, v10, v11 1845; CHECK-NEXT: ret 1846entry: 1847 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8f32.nxv8i16.nxv8i16.iXLen(iXLen 3, <8 x float> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) 1848 ret void 1849} 1850 1851declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8f32.nxv8i16.nxv8i16.iXLen(iXLen, <8 x float>, <8 x i16>, <8 x i16>, iXLen) 1852 1853define <8 x float> @test_sf_vc_fw_fwvvv_se_e32m4(<8 x float> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) { 1854; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e32m4: 1855; CHECK: # %bb.0: # %entry 1856; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma 1857; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11 1858; CHECK-NEXT: ret 1859entry: 1860 %0 = tail call <8 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv8f32.nxv8i16.nxv8i16.iXLen(iXLen 3, <8 x float> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) 1861 ret <8 x float> %0 1862} 1863 1864declare <8 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv8f32.nxv8i16.nxv8i16.iXLen(iXLen, <8 x float>, <8 x i16>, <8 x i16>, iXLen) 1865 1866define void @test_sf_vc_fwvv_se_e32m8(<16 x float> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) { 1867; CHECK-LABEL: test_sf_vc_fwvv_se_e32m8: 1868; CHECK: # %bb.0: # %entry 1869; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma 1870; CHECK-NEXT: sf.vc.vvw 3, v8, v12, v14 1871; CHECK-NEXT: ret 1872entry: 1873 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16f32.nxv16i16.nxv16i16.iXLen(iXLen 3, <16 x float> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) 1874 ret void 1875} 1876 1877declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16f32.nxv16i16.nxv16i16.iXLen(iXLen, <16 x float>, <16 x i16>, <16 x i16>, iXLen) 1878 1879define <16 x float> @test_sf_vc_fw_fwvvv_se_e32m8(<16 x float> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) { 1880; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e32m8: 1881; CHECK: # %bb.0: # %entry 1882; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma 1883; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14 1884; CHECK-NEXT: ret 1885entry: 1886 %0 = tail call <16 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv16f32.nxv16i16.nxv16i16.iXLen(iXLen 3, <16 x float> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) 1887 ret <16 x float> %0 1888} 1889 1890declare <16 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv16f32.nxv16i16.nxv16i16.iXLen(iXLen, <16 x float>, <16 x i16>, <16 x i16>, iXLen) 1891 1892define void @test_sf_vc_fwvv_se_e64m1(<1 x double> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) { 1893; CHECK-LABEL: test_sf_vc_fwvv_se_e64m1: 1894; CHECK: # %bb.0: # %entry 1895; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 1896; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10 1897; CHECK-NEXT: ret 1898entry: 1899 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1f64.nxv1i32.nxv1i32.iXLen(iXLen 3, <1 x double> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) 1900 ret void 1901} 1902 1903declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1f64.nxv1i32.nxv1i32.iXLen(iXLen, <1 x double>, <1 x i32>, <1 x i32>, iXLen) 1904 1905define <1 x double> @test_sf_vc_fw_fwvvv_se_e64m1(<1 x double> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) { 1906; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e64m1: 1907; CHECK: # %bb.0: # %entry 1908; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma 1909; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 1910; CHECK-NEXT: ret 1911entry: 1912 %0 = tail call <1 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv1f64.nxv1i32.nxv1i32.iXLen(iXLen 3, <1 x double> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) 1913 ret <1 x double> %0 1914} 1915 1916declare <1 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv1f64.nxv1i32.nxv1i32.iXLen(iXLen, <1 x double>, <1 x i32>, <1 x i32>, iXLen) 1917 1918define void @test_sf_vc_fwvv_se_e64m2(<2 x double> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) { 1919; CHECK-LABEL: test_sf_vc_fwvv_se_e64m2: 1920; CHECK: # %bb.0: # %entry 1921; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 1922; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10 1923; CHECK-NEXT: ret 1924entry: 1925 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2f64.nxv2i32.nxv2i32.iXLen(iXLen 3, <2 x double> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) 1926 ret void 1927} 1928 1929declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2f64.nxv2i32.nxv2i32.iXLen(iXLen, <2 x double>, <2 x i32>, <2 x i32>, iXLen) 1930 1931define <2 x double> @test_sf_vc_fw_fwvvv_se_e64m2(<2 x double> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) { 1932; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e64m2: 1933; CHECK: # %bb.0: # %entry 1934; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma 1935; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 1936; CHECK-NEXT: ret 1937entry: 1938 %0 = tail call <2 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv2f64.nxv2i32.nxv2i32.iXLen(iXLen 3, <2 x double> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) 1939 ret <2 x double> %0 1940} 1941 1942declare <2 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv2f64.nxv2i32.nxv2i32.iXLen(iXLen, <2 x double>, <2 x i32>, <2 x i32>, iXLen) 1943 1944define void @test_sf_vc_fwvv_se_e64m4(<4 x double> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) { 1945; CHECK-LABEL: test_sf_vc_fwvv_se_e64m4: 1946; CHECK: # %bb.0: # %entry 1947; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 1948; CHECK-NEXT: sf.vc.vvw 3, v8, v10, v11 1949; CHECK-NEXT: ret 1950entry: 1951 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4f64.nxv4i32.nxv4i32.iXLen(iXLen 3, <4 x double> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) 1952 ret void 1953} 1954 1955declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4f64.nxv4i32.nxv4i32.iXLen(iXLen, <4 x double>, <4 x i32>, <4 x i32>, iXLen) 1956 1957define <4 x double> @test_sf_vc_fw_fwvvv_se_e64m4(<4 x double> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) { 1958; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e64m4: 1959; CHECK: # %bb.0: # %entry 1960; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma 1961; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11 1962; CHECK-NEXT: ret 1963entry: 1964 %0 = tail call <4 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv4f64.nxv4i32.nxv4i32.iXLen(iXLen 3, <4 x double> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) 1965 ret <4 x double> %0 1966} 1967 1968declare <4 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv4f64.nxv4i32.nxv4i32.iXLen(iXLen, <4 x double>, <4 x i32>, <4 x i32>, iXLen) 1969 1970define void @test_sf_vc_fwvv_se_e64m8(<8 x double> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) { 1971; CHECK-LABEL: test_sf_vc_fwvv_se_e64m8: 1972; CHECK: # %bb.0: # %entry 1973; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 1974; CHECK-NEXT: sf.vc.vvw 3, v8, v12, v14 1975; CHECK-NEXT: ret 1976entry: 1977 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8f64.nxv8i32.nxv8i32.iXLen(iXLen 3, <8 x double> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) 1978 ret void 1979} 1980 1981declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8f64.nxv8i32.nxv8i32.iXLen(iXLen, <8 x double>, <8 x i32>, <8 x i32>, iXLen) 1982 1983define <8 x double> @test_sf_vc_fw_fwvvv_se_e64m8(<8 x double> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) { 1984; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e64m8: 1985; CHECK: # %bb.0: # %entry 1986; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma 1987; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14 1988; CHECK-NEXT: ret 1989entry: 1990 %0 = tail call <8 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv8f64.nxv8i32.nxv8i32.iXLen(iXLen 3, <8 x double> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) 1991 ret <8 x double> %0 1992} 1993 1994declare <8 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv8f64.nxv8i32.nxv8i32.iXLen(iXLen, <8 x double>, <8 x i32>, <8 x i32>, iXLen) 1995 1996define void @test_sf_vc_fwvx_se_e32mf2(<1 x float> %vd, <1 x i16> %vs2, i16 %rs1, iXLen %vl) { 1997; CHECK-LABEL: test_sf_vc_fwvx_se_e32mf2: 1998; CHECK: # %bb.0: # %entry 1999; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 2000; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0 2001; CHECK-NEXT: ret 2002entry: 2003 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1f32.nxv1i16.i16.iXLen(iXLen 3, <1 x float> %vd, <1 x i16> %vs2, i16 %rs1, iXLen %vl) 2004 ret void 2005} 2006 2007declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1f32.nxv1i16.i16.iXLen(iXLen, <1 x float>, <1 x i16>, i16, iXLen) 2008 2009define <1 x float> @test_sf_vc_w_fwvx_se_e32mf2(<1 x float> %vd, <1 x i16> %vs2, i16 %rs1, iXLen %vl) { 2010; CHECK-LABEL: test_sf_vc_w_fwvx_se_e32mf2: 2011; CHECK: # %bb.0: # %entry 2012; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma 2013; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 2014; CHECK-NEXT: ret 2015entry: 2016 %0 = tail call <1 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv1f32.nxv1f16.nxv1i16.i16.iXLen(iXLen 3, <1 x float> %vd, <1 x i16> %vs2, i16 %rs1, iXLen %vl) 2017 ret <1 x float> %0 2018} 2019 2020declare <1 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv1f32.nxv1f16.nxv1i16.i16.iXLen(iXLen, <1 x float>, <1 x i16>, i16, iXLen) 2021 2022define void @test_sf_vc_fwvx_se_e32m1(<2 x float> %vd, <2 x i16> %vs2, i16 %rs1, iXLen %vl) { 2023; CHECK-LABEL: test_sf_vc_fwvx_se_e32m1: 2024; CHECK: # %bb.0: # %entry 2025; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 2026; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0 2027; CHECK-NEXT: ret 2028entry: 2029 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2f32.nxv2i16.i16.iXLen(iXLen 3, <2 x float> %vd, <2 x i16> %vs2, i16 %rs1, iXLen %vl) 2030 ret void 2031} 2032 2033declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2f32.nxv2i16.i16.iXLen(iXLen, <2 x float>, <2 x i16>, i16, iXLen) 2034 2035define <2 x float> @test_sf_vc_w_fwvx_se_e32m1(<2 x float> %vd, <2 x i16> %vs2, i16 %rs1, iXLen %vl) { 2036; CHECK-LABEL: test_sf_vc_w_fwvx_se_e32m1: 2037; CHECK: # %bb.0: # %entry 2038; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma 2039; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 2040; CHECK-NEXT: ret 2041entry: 2042 %0 = tail call <2 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv2f32.nxv2f16.nxv2i16.i16.iXLen(iXLen 3, <2 x float> %vd, <2 x i16> %vs2, i16 %rs1, iXLen %vl) 2043 ret <2 x float> %0 2044} 2045 2046declare <2 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv2f32.nxv2f16.nxv2i16.i16.iXLen(iXLen, <2 x float>, <2 x i16>, i16, iXLen) 2047 2048define void @test_sf_vc_fwvx_se_e32m2(<4 x float> %vd, <4 x i16> %vs2, i16 %rs1, iXLen %vl) { 2049; CHECK-LABEL: test_sf_vc_fwvx_se_e32m2: 2050; CHECK: # %bb.0: # %entry 2051; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 2052; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0 2053; CHECK-NEXT: ret 2054entry: 2055 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4f32.nxv4i16.i16.iXLen(iXLen 3, <4 x float> %vd, <4 x i16> %vs2, i16 %rs1, iXLen %vl) 2056 ret void 2057} 2058 2059declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4f32.nxv4i16.i16.iXLen(iXLen, <4 x float>, <4 x i16>, i16, iXLen) 2060 2061define <4 x float> @test_sf_vc_w_fwvx_se_e32m2(<4 x float> %vd, <4 x i16> %vs2, i16 %rs1, iXLen %vl) { 2062; CHECK-LABEL: test_sf_vc_w_fwvx_se_e32m2: 2063; CHECK: # %bb.0: # %entry 2064; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma 2065; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 2066; CHECK-NEXT: ret 2067entry: 2068 %0 = tail call <4 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv4f32.nxv4f16.nxv4i16.i16.iXLen(iXLen 3, <4 x float> %vd, <4 x i16> %vs2, i16 %rs1, iXLen %vl) 2069 ret <4 x float> %0 2070} 2071 2072declare <4 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv4f32.nxv4f16.nxv4i16.i16.iXLen(iXLen, <4 x float>, <4 x i16>, i16, iXLen) 2073 2074define void @test_sf_vc_fwvx_se_e32m4(<8 x float> %vd, <8 x i16> %vs2, i16 %rs1, iXLen %vl) { 2075; CHECK-LABEL: test_sf_vc_fwvx_se_e32m4: 2076; CHECK: # %bb.0: # %entry 2077; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 2078; CHECK-NEXT: sf.vc.xvw 3, v8, v10, a0 2079; CHECK-NEXT: ret 2080entry: 2081 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8f32.nxv8i16.i16.iXLen(iXLen 3, <8 x float> %vd, <8 x i16> %vs2, i16 %rs1, iXLen %vl) 2082 ret void 2083} 2084 2085declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8f32.nxv8i16.i16.iXLen(iXLen, <8 x float>, <8 x i16>, i16, iXLen) 2086 2087define <8 x float> @test_sf_vc_w_fwvx_se_e32m4(<8 x float> %vd, <8 x i16> %vs2, i16 %rs1, iXLen %vl) { 2088; CHECK-LABEL: test_sf_vc_w_fwvx_se_e32m4: 2089; CHECK: # %bb.0: # %entry 2090; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma 2091; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0 2092; CHECK-NEXT: ret 2093entry: 2094 %0 = tail call <8 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv8f32.nxv8f16.nxv8i16.i16.iXLen(iXLen 3, <8 x float> %vd, <8 x i16> %vs2, i16 %rs1, iXLen %vl) 2095 ret <8 x float> %0 2096} 2097 2098declare <8 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv8f32.nxv8f16.nxv8i16.i16.iXLen(iXLen, <8 x float>, <8 x i16>, i16, iXLen) 2099 2100define void @test_sf_vc_fwvx_se_e32m8(<16 x float> %vd, <16 x i16> %vs2, i16 %rs1, iXLen %vl) { 2101; CHECK-LABEL: test_sf_vc_fwvx_se_e32m8: 2102; CHECK: # %bb.0: # %entry 2103; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 2104; CHECK-NEXT: sf.vc.xvw 3, v8, v12, a0 2105; CHECK-NEXT: ret 2106entry: 2107 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16f32.nxv16i16.i16.iXLen(iXLen 3, <16 x float> %vd, <16 x i16> %vs2, i16 %rs1, iXLen %vl) 2108 ret void 2109} 2110 2111declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16f32.nxv16i16.i16.iXLen(iXLen, <16 x float>, <16 x i16>, i16, iXLen) 2112 2113define <16 x float> @test_sf_vc_w_fwvx_se_e32m8(<16 x float> %vd, <16 x i16> %vs2, i16 %rs1, iXLen %vl) { 2114; CHECK-LABEL: test_sf_vc_w_fwvx_se_e32m8: 2115; CHECK: # %bb.0: # %entry 2116; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma 2117; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0 2118; CHECK-NEXT: ret 2119entry: 2120 %0 = tail call <16 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv16f32.nxv16f16.nxv16i16.i16.iXLen(iXLen 3, <16 x float> %vd, <16 x i16> %vs2, i16 %rs1, iXLen %vl) 2121 ret <16 x float> %0 2122} 2123 2124declare <16 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv16f32.nxv16f16.nxv16i16.i16.iXLen(iXLen, <16 x float>, <16 x i16>, i16, iXLen) 2125 2126define void @test_sf_vc_fwvx_se_e64m1(<1 x double> %vd, <1 x i32> %vs2, i32 %rs1, iXLen %vl) { 2127; CHECK-LABEL: test_sf_vc_fwvx_se_e64m1: 2128; CHECK: # %bb.0: # %entry 2129; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 2130; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0 2131; CHECK-NEXT: ret 2132entry: 2133 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1f64.nxv1i32.i32.iXLen(iXLen 3, <1 x double> %vd, <1 x i32> %vs2, i32 %rs1, iXLen %vl) 2134 ret void 2135} 2136 2137declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1f64.nxv1i32.i32.iXLen(iXLen, <1 x double>, <1 x i32>, i32, iXLen) 2138 2139define <1 x double> @test_sf_vc_w_fwvx_se_e64m1(<1 x double> %vd, <1 x i32> %vs2, i32 %rs1, iXLen %vl) { 2140; CHECK-LABEL: test_sf_vc_w_fwvx_se_e64m1: 2141; CHECK: # %bb.0: # %entry 2142; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma 2143; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 2144; CHECK-NEXT: ret 2145entry: 2146 %0 = tail call <1 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv1f64.nxv1f32.nxv1i32.i32.iXLen(iXLen 3, <1 x double> %vd, <1 x i32> %vs2, i32 %rs1, iXLen %vl) 2147 ret <1 x double> %0 2148} 2149 2150declare <1 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv1f64.nxv1f32.nxv1i32.i32.iXLen(iXLen, <1 x double>, <1 x i32>, i32, iXLen) 2151 2152define void @test_sf_vc_fwvx_se_e64m2(<2 x double> %vd, <2 x i32> %vs2, i32 %rs1, iXLen %vl) { 2153; CHECK-LABEL: test_sf_vc_fwvx_se_e64m2: 2154; CHECK: # %bb.0: # %entry 2155; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 2156; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0 2157; CHECK-NEXT: ret 2158entry: 2159 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2f64.nxv2i32.i32.iXLen(iXLen 3, <2 x double> %vd, <2 x i32> %vs2, i32 %rs1, iXLen %vl) 2160 ret void 2161} 2162 2163declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2f64.nxv2i32.i32.iXLen(iXLen, <2 x double>, <2 x i32>, i32, iXLen) 2164 2165define <2 x double> @test_sf_vc_w_fwvx_se_e64m2(<2 x double> %vd, <2 x i32> %vs2, i32 %rs1, iXLen %vl) { 2166; CHECK-LABEL: test_sf_vc_w_fwvx_se_e64m2: 2167; CHECK: # %bb.0: # %entry 2168; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma 2169; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 2170; CHECK-NEXT: ret 2171entry: 2172 %0 = tail call <2 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv2f64.nxv2f32.nxv2i32.i32.iXLen(iXLen 3, <2 x double> %vd, <2 x i32> %vs2, i32 %rs1, iXLen %vl) 2173 ret <2 x double> %0 2174} 2175 2176declare <2 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv2f64.nxv2f32.nxv2i32.i32.iXLen(iXLen, <2 x double>, <2 x i32>, i32, iXLen) 2177 2178define void @test_sf_vc_fwvx_se_e64m4(<4 x double> %vd, <4 x i32> %vs2, i32 %rs1, iXLen %vl) { 2179; CHECK-LABEL: test_sf_vc_fwvx_se_e64m4: 2180; CHECK: # %bb.0: # %entry 2181; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 2182; CHECK-NEXT: sf.vc.xvw 3, v8, v10, a0 2183; CHECK-NEXT: ret 2184entry: 2185 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4f64.nxv4i32.i32.iXLen(iXLen 3, <4 x double> %vd, <4 x i32> %vs2, i32 %rs1, iXLen %vl) 2186 ret void 2187} 2188 2189declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4f64.nxv4i32.i32.iXLen(iXLen, <4 x double>, <4 x i32>, i32, iXLen) 2190 2191define <4 x double> @test_sf_vc_w_fwvx_se_e64m4(<4 x double> %vd, <4 x i32> %vs2, i32 %rs1, iXLen %vl) { 2192; CHECK-LABEL: test_sf_vc_w_fwvx_se_e64m4: 2193; CHECK: # %bb.0: # %entry 2194; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma 2195; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0 2196; CHECK-NEXT: ret 2197entry: 2198 %0 = tail call <4 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv4f64.nxv4f32.nxv4i32.i32.iXLen(iXLen 3, <4 x double> %vd, <4 x i32> %vs2, i32 %rs1, iXLen %vl) 2199 ret <4 x double> %0 2200} 2201 2202declare <4 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv4f64.nxv4f32.nxv4i32.i32.iXLen(iXLen, <4 x double>, <4 x i32>, i32, iXLen) 2203 2204define void @test_sf_vc_fwvx_se_e64m8(<8 x double> %vd, <8 x i32> %vs2, i32 %rs1, iXLen %vl) { 2205; CHECK-LABEL: test_sf_vc_fwvx_se_e64m8: 2206; CHECK: # %bb.0: # %entry 2207; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 2208; CHECK-NEXT: sf.vc.xvw 3, v8, v12, a0 2209; CHECK-NEXT: ret 2210entry: 2211 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8f64.nxv8i32.i32.iXLen(iXLen 3, <8 x double> %vd, <8 x i32> %vs2, i32 %rs1, iXLen %vl) 2212 ret void 2213} 2214 2215declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8f64.nxv8i32.i32.iXLen(iXLen, <8 x double>, <8 x i32>, i32, iXLen) 2216 2217define <8 x double> @test_sf_vc_w_fwvx_se_e64m8(<8 x double> %vd, <8 x i32> %vs2, i32 %rs1, iXLen %vl) { 2218; CHECK-LABEL: test_sf_vc_w_fwvx_se_e64m8: 2219; CHECK: # %bb.0: # %entry 2220; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma 2221; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0 2222; CHECK-NEXT: ret 2223entry: 2224 %0 = tail call <8 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv8f64.nxv8f32.nxv8i32.i32.iXLen(iXLen 3, <8 x double> %vd, <8 x i32> %vs2, i32 %rs1, iXLen %vl) 2225 ret <8 x double> %0 2226} 2227 2228declare <8 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv8f64.nxv8f32.nxv8i32.i32.iXLen(iXLen, <8 x double>, <8 x i32>, i32, iXLen) 2229 2230define void @test_sf_vc_fwvi_se_e32mf2(<1 x float> %vd, <1 x i16> %vs2, iXLen %vl) { 2231; CHECK-LABEL: test_sf_vc_fwvi_se_e32mf2: 2232; CHECK: # %bb.0: # %entry 2233; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 2234; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 3 2235; CHECK-NEXT: ret 2236entry: 2237 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1f32.nxv1i16.iXLen.iXLen(iXLen 3, <1 x float> %vd, <1 x i16> %vs2, iXLen 3, iXLen %vl) 2238 ret void 2239} 2240 2241declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1f32.nxv1i16.iXLen.iXLen(iXLen, <1 x float>, <1 x i16>, iXLen, iXLen) 2242 2243define <1 x float> @test_sf_vc_fw_fwvi_se_e32mf2(<1 x float> %vd, <1 x i16> %vs2, iXLen %vl) { 2244; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e32mf2: 2245; CHECK: # %bb.0: # %entry 2246; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma 2247; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 3 2248; CHECK-NEXT: ret 2249entry: 2250 %0 = tail call <1 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv1f32.nxv1f16.nxv1i16.iXLen.iXLen(iXLen 3, <1 x float> %vd, <1 x i16> %vs2, iXLen 3, iXLen %vl) 2251 ret <1 x float> %0 2252} 2253 2254declare <1 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv1f32.nxv1f16.nxv1i16.iXLen.iXLen(iXLen, <1 x float>, <1 x i16>, iXLen, iXLen) 2255 2256define void @test_sf_vc_fwvi_se_e32m1(<2 x float> %vd, <2 x i16> %vs2, iXLen %vl) { 2257; CHECK-LABEL: test_sf_vc_fwvi_se_e32m1: 2258; CHECK: # %bb.0: # %entry 2259; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 2260; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 3 2261; CHECK-NEXT: ret 2262entry: 2263 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2f32.nxv2i16.iXLen.iXLen(iXLen 3, <2 x float> %vd, <2 x i16> %vs2, iXLen 3, iXLen %vl) 2264 ret void 2265} 2266 2267declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2f32.nxv2i16.iXLen.iXLen(iXLen, <2 x float>, <2 x i16>, iXLen, iXLen) 2268 2269define <2 x float> @test_sf_vc_fw_fwvi_se_e32m1(<2 x float> %vd, <2 x i16> %vs2, iXLen %vl) { 2270; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e32m1: 2271; CHECK: # %bb.0: # %entry 2272; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma 2273; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 3 2274; CHECK-NEXT: ret 2275entry: 2276 %0 = tail call <2 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv2f32.nxv2f16.nxv2i16.iXLen.iXLen(iXLen 3, <2 x float> %vd, <2 x i16> %vs2, iXLen 3, iXLen %vl) 2277 ret <2 x float> %0 2278} 2279 2280declare <2 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv2f32.nxv2f16.nxv2i16.iXLen.iXLen(iXLen, <2 x float>, <2 x i16>, iXLen, iXLen) 2281 2282define void @test_sf_vc_fwvi_se_e32m2(<4 x float> %vd, <4 x i16> %vs2, iXLen %vl) { 2283; CHECK-LABEL: test_sf_vc_fwvi_se_e32m2: 2284; CHECK: # %bb.0: # %entry 2285; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 2286; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 3 2287; CHECK-NEXT: ret 2288entry: 2289 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4f32.nxv4i16.iXLen.iXLen(iXLen 3, <4 x float> %vd, <4 x i16> %vs2, iXLen 3, iXLen %vl) 2290 ret void 2291} 2292 2293declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4f32.nxv4i16.iXLen.iXLen(iXLen, <4 x float>, <4 x i16>, iXLen, iXLen) 2294 2295define <4 x float> @test_sf_vc_fw_fwvi_se_e32m2(<4 x float> %vd, <4 x i16> %vs2, iXLen %vl) { 2296; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e32m2: 2297; CHECK: # %bb.0: # %entry 2298; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma 2299; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 3 2300; CHECK-NEXT: ret 2301entry: 2302 %0 = tail call <4 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv4f32.nxv4f16.nxv4i16.iXLen.iXLen(iXLen 3, <4 x float> %vd, <4 x i16> %vs2, iXLen 3, iXLen %vl) 2303 ret <4 x float> %0 2304} 2305 2306declare <4 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv4f32.nxv4f16.nxv4i16.iXLen.iXLen(iXLen, <4 x float>, <4 x i16>, iXLen, iXLen) 2307 2308define void @test_sf_vc_fwvi_se_e32m4(<8 x float> %vd, <8 x i16> %vs2, iXLen %vl) { 2309; CHECK-LABEL: test_sf_vc_fwvi_se_e32m4: 2310; CHECK: # %bb.0: # %entry 2311; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma 2312; CHECK-NEXT: sf.vc.ivw 3, v8, v10, 3 2313; CHECK-NEXT: ret 2314entry: 2315 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8f32.nxv8i16.iXLen.iXLen(iXLen 3, <8 x float> %vd, <8 x i16> %vs2, iXLen 3, iXLen %vl) 2316 ret void 2317} 2318 2319declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8f32.nxv8i16.iXLen.iXLen(iXLen, <8 x float>, <8 x i16>, iXLen, iXLen) 2320 2321define <8 x float> @test_sf_vc_fw_fwvi_se_e32m4(<8 x float> %vd, <8 x i16> %vs2, iXLen %vl) { 2322; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e32m4: 2323; CHECK: # %bb.0: # %entry 2324; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma 2325; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 3 2326; CHECK-NEXT: ret 2327entry: 2328 %0 = tail call <8 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv8f32.nxv8f16.nxv8i16.iXLen.iXLen(iXLen 3, <8 x float> %vd, <8 x i16> %vs2, iXLen 3, iXLen %vl) 2329 ret <8 x float> %0 2330} 2331 2332declare <8 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv8f32.nxv8f16.nxv8i16.iXLen.iXLen(iXLen, <8 x float>, <8 x i16>, iXLen, iXLen) 2333 2334define void @test_sf_vc_fwvi_se_e32m8(<16 x float> %vd, <16 x i16> %vs2, iXLen %vl) { 2335; CHECK-LABEL: test_sf_vc_fwvi_se_e32m8: 2336; CHECK: # %bb.0: # %entry 2337; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma 2338; CHECK-NEXT: sf.vc.ivw 3, v8, v12, 3 2339; CHECK-NEXT: ret 2340entry: 2341 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16f32.nxv16i16.iXLen.iXLen(iXLen 3, <16 x float> %vd, <16 x i16> %vs2, iXLen 3, iXLen %vl) 2342 ret void 2343} 2344 2345declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16f32.nxv16i16.iXLen.iXLen(iXLen, <16 x float>, <16 x i16>, iXLen, iXLen) 2346 2347define <16 x float> @test_sf_vc_fw_fwvi_se_e32m8(<16 x float> %vd, <16 x i16> %vs2, iXLen %vl) { 2348; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e32m8: 2349; CHECK: # %bb.0: # %entry 2350; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma 2351; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 3 2352; CHECK-NEXT: ret 2353entry: 2354 %0 = tail call <16 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv16f32.nxv16f16.nxv16i16.iXLen.iXLen(iXLen 3, <16 x float> %vd, <16 x i16> %vs2, iXLen 3, iXLen %vl) 2355 ret <16 x float> %0 2356} 2357 2358declare <16 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv16f32.nxv16f16.nxv16i16.iXLen.iXLen(iXLen, <16 x float>, <16 x i16>, iXLen, iXLen) 2359 2360define void @test_sf_vc_fwvi_se_e64m1(<1 x double> %vd, <1 x i32> %vs2, iXLen %vl) { 2361; CHECK-LABEL: test_sf_vc_fwvi_se_e64m1: 2362; CHECK: # %bb.0: # %entry 2363; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 2364; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 3 2365; CHECK-NEXT: ret 2366entry: 2367 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1f64.nxv1i32.iXLen.iXLen(iXLen 3, <1 x double> %vd, <1 x i32> %vs2, iXLen 3, iXLen %vl) 2368 ret void 2369} 2370 2371declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1f64.nxv1i32.iXLen.iXLen(iXLen, <1 x double>, <1 x i32>, iXLen, iXLen) 2372 2373define <1 x double> @test_sf_vc_fw_fwvi_se_e64m1(<1 x double> %vd, <1 x i32> %vs2, iXLen %vl) { 2374; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e64m1: 2375; CHECK: # %bb.0: # %entry 2376; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma 2377; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 3 2378; CHECK-NEXT: ret 2379entry: 2380 %0 = tail call <1 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv1f64.nxv1f32.nxv1i32.iXLen.iXLen(iXLen 3, <1 x double> %vd, <1 x i32> %vs2, iXLen 3, iXLen %vl) 2381 ret <1 x double> %0 2382} 2383 2384declare <1 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv1f64.nxv1f32.nxv1i32.iXLen.iXLen(iXLen, <1 x double>, <1 x i32>, iXLen, iXLen) 2385 2386define void @test_sf_vc_fwvi_se_e64m2(<2 x double> %vd, <2 x i32> %vs2, iXLen %vl) { 2387; CHECK-LABEL: test_sf_vc_fwvi_se_e64m2: 2388; CHECK: # %bb.0: # %entry 2389; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 2390; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 3 2391; CHECK-NEXT: ret 2392entry: 2393 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2f64.nxv2i32.iXLen.iXLen(iXLen 3, <2 x double> %vd, <2 x i32> %vs2, iXLen 3, iXLen %vl) 2394 ret void 2395} 2396 2397declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2f64.nxv2i32.iXLen.iXLen(iXLen, <2 x double>, <2 x i32>, iXLen, iXLen) 2398 2399define <2 x double> @test_sf_vc_fw_fwvi_se_e64m2(<2 x double> %vd, <2 x i32> %vs2, iXLen %vl) { 2400; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e64m2: 2401; CHECK: # %bb.0: # %entry 2402; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma 2403; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 3 2404; CHECK-NEXT: ret 2405entry: 2406 %0 = tail call <2 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv2f64.nxv2f32.nxv2i32.iXLen.iXLen(iXLen 3, <2 x double> %vd, <2 x i32> %vs2, iXLen 3, iXLen %vl) 2407 ret <2 x double> %0 2408} 2409 2410declare <2 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv2f64.nxv2f32.nxv2i32.iXLen.iXLen(iXLen, <2 x double>, <2 x i32>, iXLen, iXLen) 2411 2412define void @test_sf_vc_fwvi_se_e64m4(<4 x double> %vd, <4 x i32> %vs2, iXLen %vl) { 2413; CHECK-LABEL: test_sf_vc_fwvi_se_e64m4: 2414; CHECK: # %bb.0: # %entry 2415; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 2416; CHECK-NEXT: sf.vc.ivw 3, v8, v10, 3 2417; CHECK-NEXT: ret 2418entry: 2419 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4f64.nxv4i32.iXLen.iXLen(iXLen 3, <4 x double> %vd, <4 x i32> %vs2, iXLen 3, iXLen %vl) 2420 ret void 2421} 2422 2423declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4f64.nxv4i32.iXLen.iXLen(iXLen, <4 x double>, <4 x i32>, iXLen, iXLen) 2424 2425define <4 x double> @test_sf_vc_fw_fwvi_se_e64m4(<4 x double> %vd, <4 x i32> %vs2, iXLen %vl) { 2426; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e64m4: 2427; CHECK: # %bb.0: # %entry 2428; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma 2429; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 3 2430; CHECK-NEXT: ret 2431entry: 2432 %0 = tail call <4 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv4f64.nxv4f32.nxv4i32.iXLen.iXLen(iXLen 3, <4 x double> %vd, <4 x i32> %vs2, iXLen 3, iXLen %vl) 2433 ret <4 x double> %0 2434} 2435 2436declare <4 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv4f64.nxv4f32.nxv4i32.iXLen.iXLen(iXLen, <4 x double>, <4 x i32>, iXLen, iXLen) 2437 2438define void @test_sf_vc_fwvi_se_e64m8(<8 x double> %vd, <8 x i32> %vs2, iXLen %vl) { 2439; CHECK-LABEL: test_sf_vc_fwvi_se_e64m8: 2440; CHECK: # %bb.0: # %entry 2441; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 2442; CHECK-NEXT: sf.vc.ivw 3, v8, v12, 3 2443; CHECK-NEXT: ret 2444entry: 2445 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8f64.nxv8i32.iXLen.iXLen(iXLen 3, <8 x double> %vd, <8 x i32> %vs2, iXLen 3, iXLen %vl) 2446 ret void 2447} 2448 2449declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8f64.nxv8i32.iXLen.iXLen(iXLen, <8 x double>, <8 x i32>, iXLen, iXLen) 2450 2451define <8 x double> @test_sf_vc_fw_fwvi_se_e64m8(<8 x double> %vd, <8 x i32> %vs2, iXLen %vl) { 2452; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e64m8: 2453; CHECK: # %bb.0: # %entry 2454; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma 2455; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 3 2456; CHECK-NEXT: ret 2457entry: 2458 %0 = tail call <8 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv8f64.nxv8f32.nxv8i32.iXLen.iXLen(iXLen 3, <8 x double> %vd, <8 x i32> %vs2, iXLen 3, iXLen %vl) 2459 ret <8 x double> %0 2460} 2461 2462declare <8 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv8f64.nxv8f32.nxv8i32.iXLen.iXLen(iXLen, <8 x double>, <8 x i32>, iXLen, iXLen) 2463 2464define void @test_sf_vc_fwvf_se_e32mf2(<1 x float> %vd, <1 x i16> %vs2, half %rs1, iXLen %vl) { 2465; CHECK-LABEL: test_sf_vc_fwvf_se_e32mf2: 2466; CHECK: # %bb.0: # %entry 2467; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 2468; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0 2469; CHECK-NEXT: ret 2470entry: 2471 tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1f32.nxv1i16.f16.iXLen(iXLen 1, <1 x float> %vd, <1 x i16> %vs2, half %rs1, iXLen %vl) 2472 ret void 2473} 2474 2475declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1f32.nxv1i16.f16.iXLen(iXLen, <1 x float>, <1 x i16>, half, iXLen) 2476 2477define <1 x float> @test_sf_vc_fw_fwvf_se_e32mf2(<1 x float> %vd, <1 x i16> %vs2, half %rs1, iXLen %vl) { 2478; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e32mf2: 2479; CHECK: # %bb.0: # %entry 2480; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma 2481; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0 2482; CHECK-NEXT: ret 2483entry: 2484 %0 = tail call <1 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv1f32.nxv1f16.nxv1i16.f16.iXLen(iXLen 1, <1 x float> %vd, <1 x i16> %vs2, half %rs1, iXLen %vl) 2485 ret <1 x float> %0 2486} 2487 2488declare <1 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv1f32.nxv1f16.nxv1i16.f16.iXLen(iXLen, <1 x float>, <1 x i16>, half, iXLen) 2489 2490define void @test_sf_vc_fwvf_se_e32m1(<2 x float> %vd, <2 x i16> %vs2, half %rs1, iXLen %vl) { 2491; CHECK-LABEL: test_sf_vc_fwvf_se_e32m1: 2492; CHECK: # %bb.0: # %entry 2493; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 2494; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0 2495; CHECK-NEXT: ret 2496entry: 2497 tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2f32.nxv2i16.f16.iXLen(iXLen 1, <2 x float> %vd, <2 x i16> %vs2, half %rs1, iXLen %vl) 2498 ret void 2499} 2500 2501declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2f32.nxv2i16.f16.iXLen(iXLen, <2 x float>, <2 x i16>, half, iXLen) 2502 2503define <2 x float> @test_sf_vc_fw_fwvf_se_e32m1(<2 x float> %vd, <2 x i16> %vs2, half %rs1, iXLen %vl) { 2504; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e32m1: 2505; CHECK: # %bb.0: # %entry 2506; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma 2507; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0 2508; CHECK-NEXT: ret 2509entry: 2510 %0 = tail call <2 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv2f32.nxv2f16.nxv2i16.f16.iXLen(iXLen 1, <2 x float> %vd, <2 x i16> %vs2, half %rs1, iXLen %vl) 2511 ret <2 x float> %0 2512} 2513 2514declare <2 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv2f32.nxv2f16.nxv2i16.f16.iXLen(iXLen, <2 x float>, <2 x i16>, half, iXLen) 2515 2516define void @test_sf_vc_fwvf_se_e32m2(<4 x float> %vd, <4 x i16> %vs2, half %rs1, iXLen %vl) { 2517; CHECK-LABEL: test_sf_vc_fwvf_se_e32m2: 2518; CHECK: # %bb.0: # %entry 2519; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 2520; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0 2521; CHECK-NEXT: ret 2522entry: 2523 tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4f32.nxv4i16.f16.iXLen(iXLen 1, <4 x float> %vd, <4 x i16> %vs2, half %rs1, iXLen %vl) 2524 ret void 2525} 2526 2527declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4f32.nxv4i16.f16.iXLen(iXLen, <4 x float>, <4 x i16>, half, iXLen) 2528 2529define <4 x float> @test_sf_vc_fw_fwvf_se_e32m2(<4 x float> %vd, <4 x i16> %vs2, half %rs1, iXLen %vl) { 2530; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e32m2: 2531; CHECK: # %bb.0: # %entry 2532; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma 2533; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0 2534; CHECK-NEXT: ret 2535entry: 2536 %0 = tail call <4 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv4f32.nxv4f16.nxv4i16.f16.iXLen(iXLen 1, <4 x float> %vd, <4 x i16> %vs2, half %rs1, iXLen %vl) 2537 ret <4 x float> %0 2538} 2539 2540declare <4 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv4f32.nxv4f16.nxv4i16.f16.iXLen(iXLen, <4 x float>, <4 x i16>, half, iXLen) 2541 2542define void @test_sf_vc_fwvf_se_e32m4(<8 x float> %vd, <8 x i16> %vs2, half %rs1, iXLen %vl) { 2543; CHECK-LABEL: test_sf_vc_fwvf_se_e32m4: 2544; CHECK: # %bb.0: # %entry 2545; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma 2546; CHECK-NEXT: sf.vc.fvw 1, v8, v10, fa0 2547; CHECK-NEXT: ret 2548entry: 2549 tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8f32.nxv8i16.f16.iXLen(iXLen 1, <8 x float> %vd, <8 x i16> %vs2, half %rs1, iXLen %vl) 2550 ret void 2551} 2552 2553declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8f32.nxv8i16.f16.iXLen(iXLen, <8 x float>, <8 x i16>, half, iXLen) 2554 2555define <8 x float> @test_sf_vc_fw_fwvf_se_e32m4(<8 x float> %vd, <8 x i16> %vs2, half %rs1, iXLen %vl) { 2556; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e32m4: 2557; CHECK: # %bb.0: # %entry 2558; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma 2559; CHECK-NEXT: sf.vc.v.fvw 1, v8, v10, fa0 2560; CHECK-NEXT: ret 2561entry: 2562 %0 = tail call <8 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv8f32.nxv8f16.nxv8i16.f16.iXLen(iXLen 1, <8 x float> %vd, <8 x i16> %vs2, half %rs1, iXLen %vl) 2563 ret <8 x float> %0 2564} 2565 2566declare <8 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv8f32.nxv8f16.nxv8i16.f16.iXLen(iXLen, <8 x float>, <8 x i16>, half, iXLen) 2567 2568define void @test_sf_vc_fwvf_se_e32m8(<16 x float> %vd, <16 x i16> %vs2, half %rs1, iXLen %vl) { 2569; CHECK-LABEL: test_sf_vc_fwvf_se_e32m8: 2570; CHECK: # %bb.0: # %entry 2571; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma 2572; CHECK-NEXT: sf.vc.fvw 1, v8, v12, fa0 2573; CHECK-NEXT: ret 2574entry: 2575 tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv16f32.nxv16i16.f16.iXLen(iXLen 1, <16 x float> %vd, <16 x i16> %vs2, half %rs1, iXLen %vl) 2576 ret void 2577} 2578 2579declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv16f32.nxv16i16.f16.iXLen(iXLen, <16 x float>, <16 x i16>, half, iXLen) 2580 2581define <16 x float> @test_sf_vc_fw_fwvf_se_e32m8(<16 x float> %vd, <16 x i16> %vs2, half %rs1, iXLen %vl) { 2582; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e32m8: 2583; CHECK: # %bb.0: # %entry 2584; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma 2585; CHECK-NEXT: sf.vc.v.fvw 1, v8, v12, fa0 2586; CHECK-NEXT: ret 2587entry: 2588 %0 = tail call <16 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv16f32.nxv16f16.nxv16i16.f16.iXLen(iXLen 1, <16 x float> %vd, <16 x i16> %vs2, half %rs1, iXLen %vl) 2589 ret <16 x float> %0 2590} 2591 2592declare <16 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv16f32.nxv16f16.nxv16i16.f16.iXLen(iXLen, <16 x float>, <16 x i16>, half, iXLen) 2593 2594define void @test_sf_vc_fwvf_se_e64m1(<1 x double> %vd, <1 x i32> %vs2, float %rs1, iXLen %vl) { 2595; CHECK-LABEL: test_sf_vc_fwvf_se_e64m1: 2596; CHECK: # %bb.0: # %entry 2597; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 2598; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0 2599; CHECK-NEXT: ret 2600entry: 2601 tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1f64.nxv1i32.f32.iXLen(iXLen 1, <1 x double> %vd, <1 x i32> %vs2, float %rs1, iXLen %vl) 2602 ret void 2603} 2604 2605declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1f64.nxv1i32.f32.iXLen(iXLen, <1 x double>, <1 x i32>, float, iXLen) 2606 2607define <1 x double> @test_sf_vc_fw_fwvf_se_e64m1(<1 x double> %vd, <1 x i32> %vs2, float %rs1, iXLen %vl) { 2608; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e64m1: 2609; CHECK: # %bb.0: # %entry 2610; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma 2611; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0 2612; CHECK-NEXT: ret 2613entry: 2614 %0 = tail call <1 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv1f64.nxv1f32.nxv1i32.f32.iXLen(iXLen 1, <1 x double> %vd, <1 x i32> %vs2, float %rs1, iXLen %vl) 2615 ret <1 x double> %0 2616} 2617 2618declare <1 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv1f64.nxv1f32.nxv1i32.f32.iXLen(iXLen, <1 x double>, <1 x i32>, float, iXLen) 2619 2620define void @test_sf_vc_fwvf_se_e64m2(<2 x double> %vd, <2 x i32> %vs2, float %rs1, iXLen %vl) { 2621; CHECK-LABEL: test_sf_vc_fwvf_se_e64m2: 2622; CHECK: # %bb.0: # %entry 2623; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 2624; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0 2625; CHECK-NEXT: ret 2626entry: 2627 tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2f64.nxv2i32.f32.iXLen(iXLen 1, <2 x double> %vd, <2 x i32> %vs2, float %rs1, iXLen %vl) 2628 ret void 2629} 2630 2631declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2f64.nxv2i32.f32.iXLen(iXLen, <2 x double>, <2 x i32>, float, iXLen) 2632 2633define <2 x double> @test_sf_vc_fw_fwvf_se_e64m2(<2 x double> %vd, <2 x i32> %vs2, float %rs1, iXLen %vl) { 2634; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e64m2: 2635; CHECK: # %bb.0: # %entry 2636; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma 2637; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0 2638; CHECK-NEXT: ret 2639entry: 2640 %0 = tail call <2 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv2f64.nxv2f32.nxv2i32.f32.iXLen(iXLen 1, <2 x double> %vd, <2 x i32> %vs2, float %rs1, iXLen %vl) 2641 ret <2 x double> %0 2642} 2643 2644declare <2 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv2f64.nxv2f32.nxv2i32.f32.iXLen(iXLen, <2 x double>, <2 x i32>, float, iXLen) 2645 2646define void @test_sf_vc_fwvf_se_e64m4(<4 x double> %vd, <4 x i32> %vs2, float %rs1, iXLen %vl) { 2647; CHECK-LABEL: test_sf_vc_fwvf_se_e64m4: 2648; CHECK: # %bb.0: # %entry 2649; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 2650; CHECK-NEXT: sf.vc.fvw 1, v8, v10, fa0 2651; CHECK-NEXT: ret 2652entry: 2653 tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4f64.nxv4i32.f32.iXLen(iXLen 1, <4 x double> %vd, <4 x i32> %vs2, float %rs1, iXLen %vl) 2654 ret void 2655} 2656 2657declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4f64.nxv4i32.f32.iXLen(iXLen, <4 x double>, <4 x i32>, float, iXLen) 2658 2659define <4 x double> @test_sf_vc_fw_fwvf_se_e64m4(<4 x double> %vd, <4 x i32> %vs2, float %rs1, iXLen %vl) { 2660; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e64m4: 2661; CHECK: # %bb.0: # %entry 2662; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma 2663; CHECK-NEXT: sf.vc.v.fvw 1, v8, v10, fa0 2664; CHECK-NEXT: ret 2665entry: 2666 %0 = tail call <4 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv4f64.nxv4f32.nxv4i32.f32.iXLen(iXLen 1, <4 x double> %vd, <4 x i32> %vs2, float %rs1, iXLen %vl) 2667 ret <4 x double> %0 2668} 2669 2670declare <4 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv4f64.nxv4f32.nxv4i32.f32.iXLen(iXLen, <4 x double>, <4 x i32>, float, iXLen) 2671 2672define void @test_sf_vc_fwvf_se_e64m8(<8 x double> %vd, <8 x i32> %vs2, float %rs1, iXLen %vl) { 2673; CHECK-LABEL: test_sf_vc_fwvf_se_e64m8: 2674; CHECK: # %bb.0: # %entry 2675; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 2676; CHECK-NEXT: sf.vc.fvw 1, v8, v12, fa0 2677; CHECK-NEXT: ret 2678entry: 2679 tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8f64.nxv8i32.f32.iXLen(iXLen 1, <8 x double> %vd, <8 x i32> %vs2, float %rs1, iXLen %vl) 2680 ret void 2681} 2682 2683declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8f64.nxv8i32.f32.iXLen(iXLen, <8 x double>, <8 x i32>, float, iXLen) 2684 2685define <8 x double> @test_sf_vc_fw_fwvf_se_e64m8(<8 x double> %vd, <8 x i32> %vs2, float %rs1, iXLen %vl) { 2686; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e64m8: 2687; CHECK: # %bb.0: # %entry 2688; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma 2689; CHECK-NEXT: sf.vc.v.fvw 1, v8, v12, fa0 2690; CHECK-NEXT: ret 2691entry: 2692 %0 = tail call <8 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv8f64.nxv8f32.nxv8i32.f32.iXLen(iXLen 1, <8 x double> %vd, <8 x i32> %vs2, float %rs1, iXLen %vl) 2693 ret <8 x double> %0 2694} 2695 2696declare <8 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv8f64.nxv8f32.nxv8i32.f32.iXLen(iXLen, <8 x double>, <8 x i32>, float, iXLen) 2697