1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv32 -mattr=+v,+zvfh,+m,+zvl128b -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,V128,RV32-V128 3; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfh,+m,+zvl128b -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,V128,RV64-V128 4; RUN: llc -mtriple=riscv32 -mattr=+v,+zvfh,+m,+zvl512b -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,V512,RV32-V512 5; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfh,+m,+zvl512b -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,V512,RV64-V512 6 7; Test optimizing interleaves to widening arithmetic. 8 9define <4 x half> @interleave_v2f16(<2 x half> %x, <2 x half> %y) { 10; CHECK-LABEL: interleave_v2f16: 11; CHECK: # %bb.0: 12; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma 13; CHECK-NEXT: vwaddu.vv v10, v8, v9 14; CHECK-NEXT: li a0, -1 15; CHECK-NEXT: vwmaccu.vx v10, a0, v9 16; CHECK-NEXT: vmv1r.v v8, v10 17; CHECK-NEXT: ret 18 %a = shufflevector <2 x half> %x, <2 x half> %y, <4 x i32> <i32 0, i32 2, i32 1, i32 3> 19 ret <4 x half> %a 20} 21 22; Vector order switched for coverage. 23define <4 x float> @interleave_v2f32(<2 x float> %x, <2 x float> %y) { 24; CHECK-LABEL: interleave_v2f32: 25; CHECK: # %bb.0: 26; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma 27; CHECK-NEXT: vwaddu.vv v10, v9, v8 28; CHECK-NEXT: li a0, -1 29; CHECK-NEXT: vwmaccu.vx v10, a0, v8 30; CHECK-NEXT: vmv1r.v v8, v10 31; CHECK-NEXT: ret 32 %a = shufflevector <2 x float> %x, <2 x float> %y, <4 x i32> <i32 2, i32 0, i32 3, i32 1> 33 ret <4 x float> %a 34} 35 36; One vXf64 test case to very that we don't optimize it. 37; FIXME: Is there better codegen we can do here? 38define <4 x double> @interleave_v2f64(<2 x double> %x, <2 x double> %y) { 39; V128-LABEL: interleave_v2f64: 40; V128: # %bb.0: 41; V128-NEXT: vsetivli zero, 4, e16, mf2, ta, ma 42; V128-NEXT: vmv1r.v v12, v9 43; V128-NEXT: vid.v v9 44; V128-NEXT: vmv.v.i v0, 10 45; V128-NEXT: vsrl.vi v14, v9, 1 46; V128-NEXT: vsetvli zero, zero, e64, m2, ta, mu 47; V128-NEXT: vrgatherei16.vv v10, v8, v14 48; V128-NEXT: vrgatherei16.vv v10, v12, v14, v0.t 49; V128-NEXT: vmv.v.v v8, v10 50; V128-NEXT: ret 51; 52; RV32-V512-LABEL: interleave_v2f64: 53; RV32-V512: # %bb.0: 54; RV32-V512-NEXT: vsetivli zero, 4, e16, mf4, ta, ma 55; RV32-V512-NEXT: vid.v v10 56; RV32-V512-NEXT: vsrl.vi v11, v10, 1 57; RV32-V512-NEXT: vmv.v.i v0, 10 58; RV32-V512-NEXT: vsetvli zero, zero, e64, m1, ta, mu 59; RV32-V512-NEXT: vrgatherei16.vv v10, v8, v11 60; RV32-V512-NEXT: vrgatherei16.vv v10, v9, v11, v0.t 61; RV32-V512-NEXT: vmv.v.v v8, v10 62; RV32-V512-NEXT: ret 63; 64; RV64-V512-LABEL: interleave_v2f64: 65; RV64-V512: # %bb.0: 66; RV64-V512-NEXT: vsetivli zero, 4, e64, m1, ta, mu 67; RV64-V512-NEXT: vid.v v10 68; RV64-V512-NEXT: vsrl.vi v11, v10, 1 69; RV64-V512-NEXT: vmv.v.i v0, 10 70; RV64-V512-NEXT: vrgather.vv v10, v8, v11 71; RV64-V512-NEXT: vrgather.vv v10, v9, v11, v0.t 72; RV64-V512-NEXT: vmv.v.v v8, v10 73; RV64-V512-NEXT: ret 74 %a = shufflevector <2 x double> %x, <2 x double> %y, <4 x i32> <i32 0, i32 2, i32 1, i32 3> 75 ret <4 x double> %a 76} 77 78; Undef elements for coverage 79define <8 x half> @interleave_v4f16(<4 x half> %x, <4 x half> %y) { 80; V128-LABEL: interleave_v4f16: 81; V128: # %bb.0: 82; V128-NEXT: vsetivli zero, 4, e16, mf2, ta, ma 83; V128-NEXT: vwaddu.vv v10, v8, v9 84; V128-NEXT: li a0, -1 85; V128-NEXT: vwmaccu.vx v10, a0, v9 86; V128-NEXT: vmv1r.v v8, v10 87; V128-NEXT: ret 88; 89; V512-LABEL: interleave_v4f16: 90; V512: # %bb.0: 91; V512-NEXT: vsetivli zero, 4, e16, mf4, ta, ma 92; V512-NEXT: vwaddu.vv v10, v8, v9 93; V512-NEXT: li a0, -1 94; V512-NEXT: vwmaccu.vx v10, a0, v9 95; V512-NEXT: vmv1r.v v8, v10 96; V512-NEXT: ret 97 %a = shufflevector <4 x half> %x, <4 x half> %y, <8 x i32> <i32 0, i32 4, i32 undef, i32 5, i32 2, i32 undef, i32 3, i32 7> 98 ret <8 x half> %a 99} 100 101define <8 x float> @interleave_v4f32(<4 x float> %x, <4 x float> %y) { 102; V128-LABEL: interleave_v4f32: 103; V128: # %bb.0: 104; V128-NEXT: vsetivli zero, 4, e32, m1, ta, ma 105; V128-NEXT: vwaddu.vv v10, v8, v9 106; V128-NEXT: li a0, -1 107; V128-NEXT: vwmaccu.vx v10, a0, v9 108; V128-NEXT: vmv2r.v v8, v10 109; V128-NEXT: ret 110; 111; V512-LABEL: interleave_v4f32: 112; V512: # %bb.0: 113; V512-NEXT: vsetivli zero, 4, e32, mf2, ta, ma 114; V512-NEXT: vwaddu.vv v10, v8, v9 115; V512-NEXT: li a0, -1 116; V512-NEXT: vwmaccu.vx v10, a0, v9 117; V512-NEXT: vmv1r.v v8, v10 118; V512-NEXT: ret 119 %a = shufflevector <4 x float> %x, <4 x float> %y, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7> 120 ret <8 x float> %a 121} 122 123; Vector order switched for coverage. 124define <16 x half> @interleave_v8f16(<8 x half> %x, <8 x half> %y) { 125; V128-LABEL: interleave_v8f16: 126; V128: # %bb.0: 127; V128-NEXT: vsetivli zero, 8, e16, m1, ta, ma 128; V128-NEXT: vwaddu.vv v10, v9, v8 129; V128-NEXT: li a0, -1 130; V128-NEXT: vwmaccu.vx v10, a0, v8 131; V128-NEXT: vmv2r.v v8, v10 132; V128-NEXT: ret 133; 134; V512-LABEL: interleave_v8f16: 135; V512: # %bb.0: 136; V512-NEXT: vsetivli zero, 8, e16, mf4, ta, ma 137; V512-NEXT: vwaddu.vv v10, v9, v8 138; V512-NEXT: li a0, -1 139; V512-NEXT: vwmaccu.vx v10, a0, v8 140; V512-NEXT: vmv1r.v v8, v10 141; V512-NEXT: ret 142 %a = shufflevector <8 x half> %x, <8 x half> %y, <16 x i32> <i32 8, i32 0, i32 9, i32 1, i32 10, i32 2, i32 11, i32 3, i32 12, i32 4, i32 13, i32 5, i32 14, i32 6, i32 15, i32 7> 143 ret <16 x half> %a 144} 145 146define <16 x float> @interleave_v8f32(<8 x float> %x, <8 x float> %y) { 147; V128-LABEL: interleave_v8f32: 148; V128: # %bb.0: 149; V128-NEXT: vsetivli zero, 8, e32, m2, ta, ma 150; V128-NEXT: vwaddu.vv v12, v8, v10 151; V128-NEXT: li a0, -1 152; V128-NEXT: vwmaccu.vx v12, a0, v10 153; V128-NEXT: vmv4r.v v8, v12 154; V128-NEXT: ret 155; 156; V512-LABEL: interleave_v8f32: 157; V512: # %bb.0: 158; V512-NEXT: vsetivli zero, 8, e32, mf2, ta, ma 159; V512-NEXT: vwaddu.vv v10, v8, v9 160; V512-NEXT: li a0, -1 161; V512-NEXT: vwmaccu.vx v10, a0, v9 162; V512-NEXT: vmv1r.v v8, v10 163; V512-NEXT: ret 164 %a = shufflevector <8 x float> %x, <8 x float> %y, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15> 165 ret <16 x float> %a 166} 167 168define <32 x half> @interleave_v16f16(<16 x half> %x, <16 x half> %y) { 169; V128-LABEL: interleave_v16f16: 170; V128: # %bb.0: 171; V128-NEXT: vsetivli zero, 16, e16, m2, ta, ma 172; V128-NEXT: vwaddu.vv v12, v8, v10 173; V128-NEXT: li a0, -1 174; V128-NEXT: vwmaccu.vx v12, a0, v10 175; V128-NEXT: vmv4r.v v8, v12 176; V128-NEXT: ret 177; 178; V512-LABEL: interleave_v16f16: 179; V512: # %bb.0: 180; V512-NEXT: vsetivli zero, 16, e16, mf2, ta, ma 181; V512-NEXT: vwaddu.vv v10, v8, v9 182; V512-NEXT: li a0, -1 183; V512-NEXT: vwmaccu.vx v10, a0, v9 184; V512-NEXT: vmv1r.v v8, v10 185; V512-NEXT: ret 186 %a = shufflevector <16 x half> %x, <16 x half> %y, <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31> 187 ret <32 x half> %a 188} 189 190define <32 x float> @interleave_v16f32(<16 x float> %x, <16 x float> %y) { 191; V128-LABEL: interleave_v16f32: 192; V128: # %bb.0: 193; V128-NEXT: vsetivli zero, 16, e32, m4, ta, ma 194; V128-NEXT: vwaddu.vv v16, v8, v12 195; V128-NEXT: li a0, -1 196; V128-NEXT: vwmaccu.vx v16, a0, v12 197; V128-NEXT: vmv8r.v v8, v16 198; V128-NEXT: ret 199; 200; V512-LABEL: interleave_v16f32: 201; V512: # %bb.0: 202; V512-NEXT: vsetivli zero, 16, e32, m1, ta, ma 203; V512-NEXT: vwaddu.vv v10, v8, v9 204; V512-NEXT: li a0, -1 205; V512-NEXT: vwmaccu.vx v10, a0, v9 206; V512-NEXT: vmv2r.v v8, v10 207; V512-NEXT: ret 208 %a = shufflevector <16 x float> %x, <16 x float> %y, <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31> 209 ret <32 x float> %a 210} 211 212define <64 x half> @interleave_v32f16(<32 x half> %x, <32 x half> %y) { 213; V128-LABEL: interleave_v32f16: 214; V128: # %bb.0: 215; V128-NEXT: li a0, 32 216; V128-NEXT: vsetvli zero, a0, e16, m4, ta, ma 217; V128-NEXT: vwaddu.vv v16, v8, v12 218; V128-NEXT: li a0, -1 219; V128-NEXT: vwmaccu.vx v16, a0, v12 220; V128-NEXT: vmv8r.v v8, v16 221; V128-NEXT: ret 222; 223; V512-LABEL: interleave_v32f16: 224; V512: # %bb.0: 225; V512-NEXT: li a0, 32 226; V512-NEXT: vsetvli zero, a0, e16, m1, ta, ma 227; V512-NEXT: vwaddu.vv v10, v8, v9 228; V512-NEXT: li a0, -1 229; V512-NEXT: vwmaccu.vx v10, a0, v9 230; V512-NEXT: vmv2r.v v8, v10 231; V512-NEXT: ret 232 %a = shufflevector <32 x half> %x, <32 x half> %y, <64 x i32> <i32 0, i32 32, i32 1, i32 33, i32 2, i32 34, i32 3, i32 35, i32 4, i32 36, i32 5, i32 37, i32 6, i32 38, i32 7, i32 39, i32 8, i32 40, i32 9, i32 41, i32 10, i32 42, i32 11, i32 43, i32 12, i32 44, i32 13, i32 45, i32 14, i32 46, i32 15, i32 47, i32 16, i32 48, i32 17, i32 49, i32 18, i32 50, i32 19, i32 51, i32 20, i32 52, i32 21, i32 53, i32 22, i32 54, i32 23, i32 55, i32 24, i32 56, i32 25, i32 57, i32 26, i32 58, i32 27, i32 59, i32 28, i32 60, i32 29, i32 61, i32 30, i32 62, i32 31, i32 63> 233 ret <64 x half> %a 234} 235 236define <64 x float> @interleave_v32f32(<32 x float> %x, <32 x float> %y) { 237; V128-LABEL: interleave_v32f32: 238; V128: # %bb.0: 239; V128-NEXT: addi sp, sp, -16 240; V128-NEXT: .cfi_def_cfa_offset 16 241; V128-NEXT: csrr a0, vlenb 242; V128-NEXT: slli a0, a0, 3 243; V128-NEXT: sub sp, sp, a0 244; V128-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb 245; V128-NEXT: addi a0, sp, 16 246; V128-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill 247; V128-NEXT: vsetivli zero, 16, e32, m8, ta, ma 248; V128-NEXT: vslidedown.vi v24, v16, 16 249; V128-NEXT: li a0, 32 250; V128-NEXT: lui a1, 699051 251; V128-NEXT: vslidedown.vi v0, v8, 16 252; V128-NEXT: vsetivli zero, 16, e64, m8, ta, ma 253; V128-NEXT: vzext.vf2 v8, v24 254; V128-NEXT: addi a1, a1, -1366 255; V128-NEXT: vzext.vf2 v24, v0 256; V128-NEXT: vmv.s.x v0, a1 257; V128-NEXT: vsll.vx v8, v8, a0 258; V128-NEXT: vsetvli zero, a0, e32, m8, ta, ma 259; V128-NEXT: vmerge.vvm v24, v24, v8, v0 260; V128-NEXT: addi a0, sp, 16 261; V128-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload 262; V128-NEXT: vsetivli zero, 16, e32, m4, ta, ma 263; V128-NEXT: vwaddu.vv v0, v8, v16 264; V128-NEXT: li a0, -1 265; V128-NEXT: vwmaccu.vx v0, a0, v16 266; V128-NEXT: vmv8r.v v8, v0 267; V128-NEXT: vmv8r.v v16, v24 268; V128-NEXT: csrr a0, vlenb 269; V128-NEXT: slli a0, a0, 3 270; V128-NEXT: add sp, sp, a0 271; V128-NEXT: .cfi_def_cfa sp, 16 272; V128-NEXT: addi sp, sp, 16 273; V128-NEXT: .cfi_def_cfa_offset 0 274; V128-NEXT: ret 275; 276; V512-LABEL: interleave_v32f32: 277; V512: # %bb.0: 278; V512-NEXT: li a0, 32 279; V512-NEXT: vsetvli zero, a0, e32, m2, ta, ma 280; V512-NEXT: vwaddu.vv v12, v8, v10 281; V512-NEXT: li a0, -1 282; V512-NEXT: vwmaccu.vx v12, a0, v10 283; V512-NEXT: vmv4r.v v8, v12 284; V512-NEXT: ret 285 %a = shufflevector <32 x float> %x, <32 x float> %y, <64 x i32> <i32 0, i32 32, i32 1, i32 33, i32 2, i32 34, i32 3, i32 35, i32 4, i32 36, i32 5, i32 37, i32 6, i32 38, i32 7, i32 39, i32 8, i32 40, i32 9, i32 41, i32 10, i32 42, i32 11, i32 43, i32 12, i32 44, i32 13, i32 45, i32 14, i32 46, i32 15, i32 47, i32 16, i32 48, i32 17, i32 49, i32 18, i32 50, i32 19, i32 51, i32 20, i32 52, i32 21, i32 53, i32 22, i32 54, i32 23, i32 55, i32 24, i32 56, i32 25, i32 57, i32 26, i32 58, i32 27, i32 59, i32 28, i32 60, i32 29, i32 61, i32 30, i32 62, i32 31, i32 63> 286 ret <64 x float> %a 287} 288 289define <4 x half> @unary_interleave_v4f16(<4 x half> %x) { 290; V128-LABEL: unary_interleave_v4f16: 291; V128: # %bb.0: 292; V128-NEXT: vsetivli zero, 2, e16, mf2, ta, ma 293; V128-NEXT: vslidedown.vi v10, v8, 2 294; V128-NEXT: vsetivli zero, 2, e16, mf4, ta, ma 295; V128-NEXT: vwaddu.vv v9, v8, v10 296; V128-NEXT: li a0, -1 297; V128-NEXT: vwmaccu.vx v9, a0, v10 298; V128-NEXT: vmv1r.v v8, v9 299; V128-NEXT: ret 300; 301; V512-LABEL: unary_interleave_v4f16: 302; V512: # %bb.0: 303; V512-NEXT: vsetivli zero, 2, e16, mf4, ta, ma 304; V512-NEXT: vslidedown.vi v10, v8, 2 305; V512-NEXT: vwaddu.vv v9, v8, v10 306; V512-NEXT: li a0, -1 307; V512-NEXT: vwmaccu.vx v9, a0, v10 308; V512-NEXT: vmv1r.v v8, v9 309; V512-NEXT: ret 310 %a = shufflevector <4 x half> %x, <4 x half> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3> 311 ret <4 x half> %a 312} 313 314define <4 x float> @unary_interleave_v4f32(<4 x float> %x) { 315; V128-LABEL: unary_interleave_v4f32: 316; V128: # %bb.0: 317; V128-NEXT: vsetivli zero, 2, e32, m1, ta, ma 318; V128-NEXT: vslidedown.vi v10, v8, 2 319; V128-NEXT: vsetivli zero, 2, e32, mf2, ta, ma 320; V128-NEXT: vwaddu.vv v9, v8, v10 321; V128-NEXT: li a0, -1 322; V128-NEXT: vwmaccu.vx v9, a0, v10 323; V128-NEXT: vmv1r.v v8, v9 324; V128-NEXT: ret 325; 326; V512-LABEL: unary_interleave_v4f32: 327; V512: # %bb.0: 328; V512-NEXT: vsetivli zero, 2, e32, mf2, ta, ma 329; V512-NEXT: vslidedown.vi v10, v8, 2 330; V512-NEXT: vwaddu.vv v9, v8, v10 331; V512-NEXT: li a0, -1 332; V512-NEXT: vwmaccu.vx v9, a0, v10 333; V512-NEXT: vmv1r.v v8, v9 334; V512-NEXT: ret 335 %a = shufflevector <4 x float> %x, <4 x float> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3> 336 ret <4 x float> %a 337} 338 339; FIXME: Is there better codegen we can do here? 340define <4 x double> @unary_interleave_v4f64(<4 x double> %x) { 341; V128-LABEL: unary_interleave_v4f64: 342; V128: # %bb.0: 343; V128-NEXT: lui a0, 12304 344; V128-NEXT: addi a0, a0, 512 345; V128-NEXT: vsetivli zero, 4, e32, m1, ta, ma 346; V128-NEXT: vmv.s.x v10, a0 347; V128-NEXT: vsetvli zero, zero, e16, mf2, ta, ma 348; V128-NEXT: vsext.vf2 v12, v10 349; V128-NEXT: vsetvli zero, zero, e64, m2, ta, ma 350; V128-NEXT: vrgatherei16.vv v10, v8, v12 351; V128-NEXT: vmv.v.v v8, v10 352; V128-NEXT: ret 353; 354; RV32-V512-LABEL: unary_interleave_v4f64: 355; RV32-V512: # %bb.0: 356; RV32-V512-NEXT: lui a0, 12304 357; RV32-V512-NEXT: addi a0, a0, 512 358; RV32-V512-NEXT: vsetivli zero, 4, e32, m1, ta, ma 359; RV32-V512-NEXT: vmv.s.x v9, a0 360; RV32-V512-NEXT: vsetivli zero, 4, e16, mf4, ta, ma 361; RV32-V512-NEXT: vsext.vf2 v10, v9 362; RV32-V512-NEXT: vsetvli zero, zero, e64, m1, ta, ma 363; RV32-V512-NEXT: vrgatherei16.vv v9, v8, v10 364; RV32-V512-NEXT: vmv.v.v v8, v9 365; RV32-V512-NEXT: ret 366; 367; RV64-V512-LABEL: unary_interleave_v4f64: 368; RV64-V512: # %bb.0: 369; RV64-V512-NEXT: lui a0, 12304 370; RV64-V512-NEXT: addi a0, a0, 512 371; RV64-V512-NEXT: vsetivli zero, 4, e64, m1, ta, ma 372; RV64-V512-NEXT: vmv.s.x v9, a0 373; RV64-V512-NEXT: vsext.vf8 v10, v9 374; RV64-V512-NEXT: vrgather.vv v9, v8, v10 375; RV64-V512-NEXT: vmv.v.v v8, v9 376; RV64-V512-NEXT: ret 377 %a = shufflevector <4 x double> %x, <4 x double> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3> 378 ret <4 x double> %a 379} 380 381define <8 x half> @unary_interleave_v8f16(<8 x half> %x) { 382; V128-LABEL: unary_interleave_v8f16: 383; V128: # %bb.0: 384; V128-NEXT: vsetivli zero, 4, e16, m1, ta, ma 385; V128-NEXT: vslidedown.vi v10, v8, 4 386; V128-NEXT: vsetivli zero, 4, e16, mf2, ta, ma 387; V128-NEXT: vwaddu.vv v9, v8, v10 388; V128-NEXT: li a0, -1 389; V128-NEXT: vwmaccu.vx v9, a0, v10 390; V128-NEXT: vmv1r.v v8, v9 391; V128-NEXT: ret 392; 393; V512-LABEL: unary_interleave_v8f16: 394; V512: # %bb.0: 395; V512-NEXT: vsetivli zero, 4, e16, mf4, ta, ma 396; V512-NEXT: vslidedown.vi v10, v8, 4 397; V512-NEXT: vwaddu.vv v9, v8, v10 398; V512-NEXT: li a0, -1 399; V512-NEXT: vwmaccu.vx v9, a0, v10 400; V512-NEXT: vmv1r.v v8, v9 401; V512-NEXT: ret 402 %a = shufflevector <8 x half> %x, <8 x half> poison, <8 x i32> <i32 0, i32 4, i32 undef, i32 5, i32 2, i32 undef, i32 3, i32 7> 403 ret <8 x half> %a 404} 405 406define <8 x float> @unary_interleave_v8f32(<8 x float> %x) { 407; V128-LABEL: unary_interleave_v8f32: 408; V128: # %bb.0: 409; V128-NEXT: vsetivli zero, 4, e32, m2, ta, ma 410; V128-NEXT: vslidedown.vi v12, v8, 4 411; V128-NEXT: vsetivli zero, 4, e32, m1, ta, ma 412; V128-NEXT: vwaddu.vv v10, v12, v8 413; V128-NEXT: li a0, -1 414; V128-NEXT: vwmaccu.vx v10, a0, v8 415; V128-NEXT: vmv2r.v v8, v10 416; V128-NEXT: ret 417; 418; V512-LABEL: unary_interleave_v8f32: 419; V512: # %bb.0: 420; V512-NEXT: vsetivli zero, 4, e32, mf2, ta, ma 421; V512-NEXT: vslidedown.vi v10, v8, 4 422; V512-NEXT: vwaddu.vv v9, v10, v8 423; V512-NEXT: li a0, -1 424; V512-NEXT: vwmaccu.vx v9, a0, v8 425; V512-NEXT: vmv1r.v v8, v9 426; V512-NEXT: ret 427 %a = shufflevector <8 x float> %x, <8 x float> poison, <8 x i32> <i32 4, i32 0, i32 undef, i32 1, i32 6, i32 undef, i32 7, i32 3> 428 ret <8 x float> %a 429} 430;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: 431; RV32-V128: {{.*}} 432; RV64-V128: {{.*}} 433