10eaf11ffSPhilip Reames; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2d9942319SPhilip Reames; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 3d9942319SPhilip Reames; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 40eaf11ffSPhilip Reames 50eaf11ffSPhilip Reamesdefine <4 x i32> @add_constant_rhs(i32 %a, i32 %b, i32 %c, i32 %d) { 6e39add89SPhilip Reames; CHECK-LABEL: add_constant_rhs: 7e39add89SPhilip Reames; CHECK: # %bb.0: 8e39add89SPhilip Reames; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma 9144b2f57SPhilip Reames; CHECK-NEXT: vmv.v.x v8, a0 10e39add89SPhilip Reames; CHECK-NEXT: lui a0, %hi(.LCPI0_0) 11e39add89SPhilip Reames; CHECK-NEXT: addi a0, a0, %lo(.LCPI0_0) 12e39add89SPhilip Reames; CHECK-NEXT: vle32.v v9, (a0) 13e39add89SPhilip Reames; CHECK-NEXT: vslide1down.vx v8, v8, a1 14e39add89SPhilip Reames; CHECK-NEXT: vslide1down.vx v8, v8, a2 15e39add89SPhilip Reames; CHECK-NEXT: vslide1down.vx v8, v8, a3 16e39add89SPhilip Reames; CHECK-NEXT: vadd.vv v8, v8, v9 17e39add89SPhilip Reames; CHECK-NEXT: ret 180eaf11ffSPhilip Reames %e0 = add i32 %a, 23 190eaf11ffSPhilip Reames %e1 = add i32 %b, 25 200eaf11ffSPhilip Reames %e2 = add i32 %c, 1 210eaf11ffSPhilip Reames %e3 = add i32 %d, 2355 220eaf11ffSPhilip Reames %v0 = insertelement <4 x i32> poison, i32 %e0, i32 0 230eaf11ffSPhilip Reames %v1 = insertelement <4 x i32> %v0, i32 %e1, i32 1 240eaf11ffSPhilip Reames %v2 = insertelement <4 x i32> %v1, i32 %e2, i32 2 250eaf11ffSPhilip Reames %v3 = insertelement <4 x i32> %v2, i32 %e3, i32 3 260eaf11ffSPhilip Reames ret <4 x i32> %v3 270eaf11ffSPhilip Reames} 280eaf11ffSPhilip Reames 290eaf11ffSPhilip Reamesdefine <8 x i32> @add_constant_rhs_8xi32(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h) { 30e39add89SPhilip Reames; CHECK-LABEL: add_constant_rhs_8xi32: 31e39add89SPhilip Reames; CHECK: # %bb.0: 32e39add89SPhilip Reames; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma 33144b2f57SPhilip Reames; CHECK-NEXT: vmv.v.x v8, a0 34*9122c523SPengcheng Wang; CHECK-NEXT: lui a0, %hi(.LCPI1_0) 35*9122c523SPengcheng Wang; CHECK-NEXT: addi a0, a0, %lo(.LCPI1_0) 36e39add89SPhilip Reames; CHECK-NEXT: vslide1down.vx v8, v8, a1 37e39add89SPhilip Reames; CHECK-NEXT: vslide1down.vx v8, v8, a2 38e39add89SPhilip Reames; CHECK-NEXT: vslide1down.vx v8, v8, a3 39e39add89SPhilip Reames; CHECK-NEXT: vslide1down.vx v8, v8, a4 40e39add89SPhilip Reames; CHECK-NEXT: vle32.v v10, (a0) 41e39add89SPhilip Reames; CHECK-NEXT: vslide1down.vx v8, v8, a5 42e39add89SPhilip Reames; CHECK-NEXT: vslide1down.vx v8, v8, a6 43e39add89SPhilip Reames; CHECK-NEXT: vslide1down.vx v8, v8, a7 44e39add89SPhilip Reames; CHECK-NEXT: vadd.vv v8, v8, v10 45e39add89SPhilip Reames; CHECK-NEXT: ret 460eaf11ffSPhilip Reames %e0 = add i32 %a, 23 470eaf11ffSPhilip Reames %e1 = add i32 %b, 25 480eaf11ffSPhilip Reames %e2 = add i32 %c, 1 490eaf11ffSPhilip Reames %e3 = add i32 %d, 2355 500eaf11ffSPhilip Reames %e4 = add i32 %e, 23 510eaf11ffSPhilip Reames %e5 = add i32 %f, 23 520eaf11ffSPhilip Reames %e6 = add i32 %g, 22 530eaf11ffSPhilip Reames %e7 = add i32 %h, 23 540eaf11ffSPhilip Reames %v0 = insertelement <8 x i32> poison, i32 %e0, i32 0 550eaf11ffSPhilip Reames %v1 = insertelement <8 x i32> %v0, i32 %e1, i32 1 560eaf11ffSPhilip Reames %v2 = insertelement <8 x i32> %v1, i32 %e2, i32 2 570eaf11ffSPhilip Reames %v3 = insertelement <8 x i32> %v2, i32 %e3, i32 3 580eaf11ffSPhilip Reames %v4 = insertelement <8 x i32> %v3, i32 %e4, i32 4 590eaf11ffSPhilip Reames %v5 = insertelement <8 x i32> %v4, i32 %e5, i32 5 600eaf11ffSPhilip Reames %v6 = insertelement <8 x i32> %v5, i32 %e6, i32 6 610eaf11ffSPhilip Reames %v7 = insertelement <8 x i32> %v6, i32 %e7, i32 7 620eaf11ffSPhilip Reames ret <8 x i32> %v7 630eaf11ffSPhilip Reames} 640eaf11ffSPhilip Reames 650eaf11ffSPhilip Reames 660eaf11ffSPhilip Reamesdefine <4 x i32> @sub_constant_rhs(i32 %a, i32 %b, i32 %c, i32 %d) { 67e39add89SPhilip Reames; CHECK-LABEL: sub_constant_rhs: 68e39add89SPhilip Reames; CHECK: # %bb.0: 69e39add89SPhilip Reames; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma 70144b2f57SPhilip Reames; CHECK-NEXT: vmv.v.x v8, a0 71e39add89SPhilip Reames; CHECK-NEXT: lui a0, %hi(.LCPI2_0) 72e39add89SPhilip Reames; CHECK-NEXT: addi a0, a0, %lo(.LCPI2_0) 73e39add89SPhilip Reames; CHECK-NEXT: vle32.v v9, (a0) 74e39add89SPhilip Reames; CHECK-NEXT: vslide1down.vx v8, v8, a1 75e39add89SPhilip Reames; CHECK-NEXT: vslide1down.vx v8, v8, a2 76e39add89SPhilip Reames; CHECK-NEXT: vslide1down.vx v8, v8, a3 77e39add89SPhilip Reames; CHECK-NEXT: vsub.vv v8, v8, v9 78e39add89SPhilip Reames; CHECK-NEXT: ret 790eaf11ffSPhilip Reames %e0 = sub i32 %a, 23 800eaf11ffSPhilip Reames %e1 = sub i32 %b, 25 810eaf11ffSPhilip Reames %e2 = sub i32 %c, 1 820eaf11ffSPhilip Reames %e3 = sub i32 %d, 2355 830eaf11ffSPhilip Reames %v0 = insertelement <4 x i32> poison, i32 %e0, i32 0 840eaf11ffSPhilip Reames %v1 = insertelement <4 x i32> %v0, i32 %e1, i32 1 850eaf11ffSPhilip Reames %v2 = insertelement <4 x i32> %v1, i32 %e2, i32 2 860eaf11ffSPhilip Reames %v3 = insertelement <4 x i32> %v2, i32 %e3, i32 3 870eaf11ffSPhilip Reames ret <4 x i32> %v3 880eaf11ffSPhilip Reames} 890eaf11ffSPhilip Reames 900eaf11ffSPhilip Reamesdefine <4 x i32> @mul_constant_rhs(i32 %a, i32 %b, i32 %c, i32 %d) { 91e39add89SPhilip Reames; CHECK-LABEL: mul_constant_rhs: 92e39add89SPhilip Reames; CHECK: # %bb.0: 93e39add89SPhilip Reames; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma 94144b2f57SPhilip Reames; CHECK-NEXT: vmv.v.x v8, a0 95e39add89SPhilip Reames; CHECK-NEXT: lui a0, %hi(.LCPI3_0) 96e39add89SPhilip Reames; CHECK-NEXT: addi a0, a0, %lo(.LCPI3_0) 97e39add89SPhilip Reames; CHECK-NEXT: vle32.v v9, (a0) 98e39add89SPhilip Reames; CHECK-NEXT: vslide1down.vx v8, v8, a1 99e39add89SPhilip Reames; CHECK-NEXT: vslide1down.vx v8, v8, a2 100e39add89SPhilip Reames; CHECK-NEXT: vslide1down.vx v8, v8, a3 101e39add89SPhilip Reames; CHECK-NEXT: vmul.vv v8, v8, v9 102e39add89SPhilip Reames; CHECK-NEXT: ret 1030eaf11ffSPhilip Reames %e0 = mul i32 %a, 23 1040eaf11ffSPhilip Reames %e1 = mul i32 %b, 25 1050eaf11ffSPhilip Reames %e2 = mul i32 %c, 27 1060eaf11ffSPhilip Reames %e3 = mul i32 %d, 2355 1070eaf11ffSPhilip Reames %v0 = insertelement <4 x i32> poison, i32 %e0, i32 0 1080eaf11ffSPhilip Reames %v1 = insertelement <4 x i32> %v0, i32 %e1, i32 1 1090eaf11ffSPhilip Reames %v2 = insertelement <4 x i32> %v1, i32 %e2, i32 2 1100eaf11ffSPhilip Reames %v3 = insertelement <4 x i32> %v2, i32 %e3, i32 3 1110eaf11ffSPhilip Reames ret <4 x i32> %v3 1120eaf11ffSPhilip Reames} 1130eaf11ffSPhilip Reames 1140eaf11ffSPhilip Reamesdefine <4 x i32> @udiv_constant_rhs(i32 %a, i32 %b, i32 %c, i32 %d) { 115e39add89SPhilip Reames; CHECK-LABEL: udiv_constant_rhs: 116e39add89SPhilip Reames; CHECK: # %bb.0: 117e39add89SPhilip Reames; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma 118144b2f57SPhilip Reames; CHECK-NEXT: vmv.v.x v8, a0 119e39add89SPhilip Reames; CHECK-NEXT: lui a0, %hi(.LCPI4_0) 120e39add89SPhilip Reames; CHECK-NEXT: addi a0, a0, %lo(.LCPI4_0) 121*9122c523SPengcheng Wang; CHECK-NEXT: vmv.v.i v9, 0 122e39add89SPhilip Reames; CHECK-NEXT: vslide1down.vx v8, v8, a1 123*9122c523SPengcheng Wang; CHECK-NEXT: lui a1, 524288 124*9122c523SPengcheng Wang; CHECK-NEXT: vle32.v v10, (a0) 125e39add89SPhilip Reames; CHECK-NEXT: lui a0, %hi(.LCPI4_1) 126e39add89SPhilip Reames; CHECK-NEXT: addi a0, a0, %lo(.LCPI4_1) 127*9122c523SPengcheng Wang; CHECK-NEXT: vslide1down.vx v9, v9, a1 128*9122c523SPengcheng Wang; CHECK-NEXT: vle32.v v11, (a0) 129*9122c523SPengcheng Wang; CHECK-NEXT: vslide1down.vx v8, v8, a2 130*9122c523SPengcheng Wang; CHECK-NEXT: vslide1down.vx v8, v8, a3 131*9122c523SPengcheng Wang; CHECK-NEXT: vmulhu.vv v10, v8, v10 132*9122c523SPengcheng Wang; CHECK-NEXT: vsub.vv v12, v8, v10 133*9122c523SPengcheng Wang; CHECK-NEXT: vmulhu.vv v9, v12, v9 134*9122c523SPengcheng Wang; CHECK-NEXT: vadd.vv v9, v9, v10 135e39add89SPhilip Reames; CHECK-NEXT: vmv.v.i v0, 4 136*9122c523SPengcheng Wang; CHECK-NEXT: vsrl.vv v9, v9, v11 137e39add89SPhilip Reames; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 138e39add89SPhilip Reames; CHECK-NEXT: ret 1390eaf11ffSPhilip Reames %e0 = udiv i32 %a, 23 1400eaf11ffSPhilip Reames %e1 = udiv i32 %b, 25 1410eaf11ffSPhilip Reames %e2 = udiv i32 %c, 1 1420eaf11ffSPhilip Reames %e3 = udiv i32 %d, 235 1430eaf11ffSPhilip Reames %v0 = insertelement <4 x i32> poison, i32 %e0, i32 0 1440eaf11ffSPhilip Reames %v1 = insertelement <4 x i32> %v0, i32 %e1, i32 1 1450eaf11ffSPhilip Reames %v2 = insertelement <4 x i32> %v1, i32 %e2, i32 2 1460eaf11ffSPhilip Reames %v3 = insertelement <4 x i32> %v2, i32 %e3, i32 3 1470eaf11ffSPhilip Reames ret <4 x i32> %v3 1480eaf11ffSPhilip Reames} 1490eaf11ffSPhilip Reames 1500eaf11ffSPhilip Reames 1510eaf11ffSPhilip Reamesdefine <4 x float> @fadd_constant_rhs(float %a, float %b, float %c, float %d) { 152d9942319SPhilip Reames; CHECK-LABEL: fadd_constant_rhs: 153d9942319SPhilip Reames; CHECK: # %bb.0: 154d9942319SPhilip Reames; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma 155144b2f57SPhilip Reames; CHECK-NEXT: vfmv.v.f v8, fa0 156e39add89SPhilip Reames; CHECK-NEXT: lui a0, %hi(.LCPI5_0) 157e39add89SPhilip Reames; CHECK-NEXT: addi a0, a0, %lo(.LCPI5_0) 158e39add89SPhilip Reames; CHECK-NEXT: vle32.v v9, (a0) 159d9942319SPhilip Reames; CHECK-NEXT: vfslide1down.vf v8, v8, fa1 160d9942319SPhilip Reames; CHECK-NEXT: vfslide1down.vf v8, v8, fa2 161e39add89SPhilip Reames; CHECK-NEXT: vfslide1down.vf v8, v8, fa3 162e39add89SPhilip Reames; CHECK-NEXT: vfadd.vv v8, v8, v9 163d9942319SPhilip Reames; CHECK-NEXT: ret 1640eaf11ffSPhilip Reames %e0 = fadd float %a, 23.0 1650eaf11ffSPhilip Reames %e1 = fadd float %b, 25.0 1660eaf11ffSPhilip Reames %e2 = fadd float %c, 2.0 1670eaf11ffSPhilip Reames %e3 = fadd float %d, 23.0 1680eaf11ffSPhilip Reames %v0 = insertelement <4 x float> poison, float %e0, i32 0 1690eaf11ffSPhilip Reames %v1 = insertelement <4 x float> %v0, float %e1, i32 1 1700eaf11ffSPhilip Reames %v2 = insertelement <4 x float> %v1, float %e2, i32 2 1710eaf11ffSPhilip Reames %v3 = insertelement <4 x float> %v2, float %e3, i32 3 1720eaf11ffSPhilip Reames ret <4 x float> %v3 1730eaf11ffSPhilip Reames} 1740eaf11ffSPhilip Reames 1750eaf11ffSPhilip Reamesdefine <4 x float> @fdiv_constant_rhs(float %a, float %b, float %c, float %d) { 176d9942319SPhilip Reames; CHECK-LABEL: fdiv_constant_rhs: 177d9942319SPhilip Reames; CHECK: # %bb.0: 178d9942319SPhilip Reames; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma 179144b2f57SPhilip Reames; CHECK-NEXT: vfmv.v.f v8, fa0 180e39add89SPhilip Reames; CHECK-NEXT: lui a0, %hi(.LCPI6_0) 181e39add89SPhilip Reames; CHECK-NEXT: addi a0, a0, %lo(.LCPI6_0) 182e39add89SPhilip Reames; CHECK-NEXT: vle32.v v9, (a0) 183d9942319SPhilip Reames; CHECK-NEXT: vfslide1down.vf v8, v8, fa1 184d9942319SPhilip Reames; CHECK-NEXT: vfslide1down.vf v8, v8, fa2 185e39add89SPhilip Reames; CHECK-NEXT: vfslide1down.vf v8, v8, fa3 186e39add89SPhilip Reames; CHECK-NEXT: vfdiv.vv v8, v8, v9 187d9942319SPhilip Reames; CHECK-NEXT: ret 1880eaf11ffSPhilip Reames %e0 = fdiv float %a, 23.0 1890eaf11ffSPhilip Reames %e1 = fdiv float %b, 25.0 1900eaf11ffSPhilip Reames %e2 = fdiv float %c, 10.0 1910eaf11ffSPhilip Reames %e3 = fdiv float %d, 23.0 1920eaf11ffSPhilip Reames %v0 = insertelement <4 x float> poison, float %e0, i32 0 1930eaf11ffSPhilip Reames %v1 = insertelement <4 x float> %v0, float %e1, i32 1 1940eaf11ffSPhilip Reames %v2 = insertelement <4 x float> %v1, float %e2, i32 2 1950eaf11ffSPhilip Reames %v3 = insertelement <4 x float> %v2, float %e3, i32 3 1960eaf11ffSPhilip Reames ret <4 x float> %v3 1970eaf11ffSPhilip Reames} 1980eaf11ffSPhilip Reames 1990eaf11ffSPhilip Reamesdefine <4 x i32> @add_constant_rhs_splat(i32 %a, i32 %b, i32 %c, i32 %d) { 200e39add89SPhilip Reames; CHECK-LABEL: add_constant_rhs_splat: 201e39add89SPhilip Reames; CHECK: # %bb.0: 202e39add89SPhilip Reames; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma 203144b2f57SPhilip Reames; CHECK-NEXT: vmv.v.x v8, a0 204e39add89SPhilip Reames; CHECK-NEXT: vslide1down.vx v8, v8, a1 205e39add89SPhilip Reames; CHECK-NEXT: vslide1down.vx v8, v8, a2 206e39add89SPhilip Reames; CHECK-NEXT: vslide1down.vx v8, v8, a3 207e39add89SPhilip Reames; CHECK-NEXT: li a0, 23 208e39add89SPhilip Reames; CHECK-NEXT: vadd.vx v8, v8, a0 209e39add89SPhilip Reames; CHECK-NEXT: ret 2100eaf11ffSPhilip Reames %e0 = add i32 %a, 23 2110eaf11ffSPhilip Reames %e1 = add i32 %b, 23 2120eaf11ffSPhilip Reames %e2 = add i32 %c, 23 2130eaf11ffSPhilip Reames %e3 = add i32 %d, 23 2140eaf11ffSPhilip Reames %v0 = insertelement <4 x i32> poison, i32 %e0, i32 0 2150eaf11ffSPhilip Reames %v1 = insertelement <4 x i32> %v0, i32 %e1, i32 1 2160eaf11ffSPhilip Reames %v2 = insertelement <4 x i32> %v1, i32 %e2, i32 2 2170eaf11ffSPhilip Reames %v3 = insertelement <4 x i32> %v2, i32 %e3, i32 3 2180eaf11ffSPhilip Reames ret <4 x i32> %v3 2190eaf11ffSPhilip Reames} 2200eaf11ffSPhilip Reames 2210eaf11ffSPhilip Reamesdefine <4 x i32> @add_constant_rhs_with_identity(i32 %a, i32 %b, i32 %c, i32 %d) { 2220eaf11ffSPhilip Reames; RV32-LABEL: add_constant_rhs_with_identity: 2230eaf11ffSPhilip Reames; RV32: # %bb.0: 2240eaf11ffSPhilip Reames; RV32-NEXT: addi a1, a1, 25 2250eaf11ffSPhilip Reames; RV32-NEXT: addi a2, a2, 1 2260eaf11ffSPhilip Reames; RV32-NEXT: addi a3, a3, 2047 2270eaf11ffSPhilip Reames; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma 228144b2f57SPhilip Reames; RV32-NEXT: vmv.v.x v8, a0 229*9122c523SPengcheng Wang; RV32-NEXT: addi a0, a3, 308 2300eaf11ffSPhilip Reames; RV32-NEXT: vslide1down.vx v8, v8, a1 2310eaf11ffSPhilip Reames; RV32-NEXT: vslide1down.vx v8, v8, a2 232*9122c523SPengcheng Wang; RV32-NEXT: vslide1down.vx v8, v8, a0 2330eaf11ffSPhilip Reames; RV32-NEXT: ret 2340eaf11ffSPhilip Reames; 2350eaf11ffSPhilip Reames; RV64-LABEL: add_constant_rhs_with_identity: 2360eaf11ffSPhilip Reames; RV64: # %bb.0: 2370eaf11ffSPhilip Reames; RV64-NEXT: addiw a1, a1, 25 2380eaf11ffSPhilip Reames; RV64-NEXT: addiw a2, a2, 1 23986240751SPhilip Reames; RV64-NEXT: addi a3, a3, 2047 2400eaf11ffSPhilip Reames; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma 241144b2f57SPhilip Reames; RV64-NEXT: vmv.v.x v8, a0 242*9122c523SPengcheng Wang; RV64-NEXT: addiw a0, a3, 308 2430eaf11ffSPhilip Reames; RV64-NEXT: vslide1down.vx v8, v8, a1 2440eaf11ffSPhilip Reames; RV64-NEXT: vslide1down.vx v8, v8, a2 245*9122c523SPengcheng Wang; RV64-NEXT: vslide1down.vx v8, v8, a0 2460eaf11ffSPhilip Reames; RV64-NEXT: ret 2470eaf11ffSPhilip Reames %e0 = add i32 %a, 0 2480eaf11ffSPhilip Reames %e1 = add i32 %b, 25 2490eaf11ffSPhilip Reames %e2 = add i32 %c, 1 2500eaf11ffSPhilip Reames %e3 = add i32 %d, 2355 2510eaf11ffSPhilip Reames %v0 = insertelement <4 x i32> poison, i32 %e0, i32 0 2520eaf11ffSPhilip Reames %v1 = insertelement <4 x i32> %v0, i32 %e1, i32 1 2530eaf11ffSPhilip Reames %v2 = insertelement <4 x i32> %v1, i32 %e2, i32 2 2540eaf11ffSPhilip Reames %v3 = insertelement <4 x i32> %v2, i32 %e3, i32 3 2550eaf11ffSPhilip Reames ret <4 x i32> %v3 2560eaf11ffSPhilip Reames} 2570eaf11ffSPhilip Reames 2580eaf11ffSPhilip Reamesdefine <4 x i32> @add_constant_rhs_identity(i32 %a, i32 %b, i32 %c, i32 %d) { 2590eaf11ffSPhilip Reames; RV32-LABEL: add_constant_rhs_identity: 2600eaf11ffSPhilip Reames; RV32: # %bb.0: 2610eaf11ffSPhilip Reames; RV32-NEXT: addi a1, a1, 25 2620eaf11ffSPhilip Reames; RV32-NEXT: addi a2, a2, 1 2630eaf11ffSPhilip Reames; RV32-NEXT: addi a3, a3, 2047 2640eaf11ffSPhilip Reames; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma 265144b2f57SPhilip Reames; RV32-NEXT: vmv.v.x v8, a0 266*9122c523SPengcheng Wang; RV32-NEXT: addi a0, a3, 308 2670eaf11ffSPhilip Reames; RV32-NEXT: vslide1down.vx v8, v8, a1 2680eaf11ffSPhilip Reames; RV32-NEXT: vslide1down.vx v8, v8, a2 269*9122c523SPengcheng Wang; RV32-NEXT: vslide1down.vx v8, v8, a0 2700eaf11ffSPhilip Reames; RV32-NEXT: ret 2710eaf11ffSPhilip Reames; 2720eaf11ffSPhilip Reames; RV64-LABEL: add_constant_rhs_identity: 2730eaf11ffSPhilip Reames; RV64: # %bb.0: 2740eaf11ffSPhilip Reames; RV64-NEXT: addiw a1, a1, 25 2750eaf11ffSPhilip Reames; RV64-NEXT: addiw a2, a2, 1 27686240751SPhilip Reames; RV64-NEXT: addi a3, a3, 2047 2770eaf11ffSPhilip Reames; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma 278144b2f57SPhilip Reames; RV64-NEXT: vmv.v.x v8, a0 279*9122c523SPengcheng Wang; RV64-NEXT: addiw a0, a3, 308 2800eaf11ffSPhilip Reames; RV64-NEXT: vslide1down.vx v8, v8, a1 2810eaf11ffSPhilip Reames; RV64-NEXT: vslide1down.vx v8, v8, a2 282*9122c523SPengcheng Wang; RV64-NEXT: vslide1down.vx v8, v8, a0 2830eaf11ffSPhilip Reames; RV64-NEXT: ret 2840eaf11ffSPhilip Reames %e0 = add i32 %a, 0 2850eaf11ffSPhilip Reames %e1 = add i32 %b, 25 2860eaf11ffSPhilip Reames %e2 = add i32 %c, 1 2870eaf11ffSPhilip Reames %e3 = add i32 %d, 2355 2880eaf11ffSPhilip Reames %v0 = insertelement <4 x i32> poison, i32 %e0, i32 0 2890eaf11ffSPhilip Reames %v1 = insertelement <4 x i32> %v0, i32 %e1, i32 1 2900eaf11ffSPhilip Reames %v2 = insertelement <4 x i32> %v1, i32 %e2, i32 2 2910eaf11ffSPhilip Reames %v3 = insertelement <4 x i32> %v2, i32 %e3, i32 3 2920eaf11ffSPhilip Reames ret <4 x i32> %v3 2930eaf11ffSPhilip Reames} 2940eaf11ffSPhilip Reames 2950eaf11ffSPhilip Reamesdefine <4 x i32> @add_constant_rhs_identity2(i32 %a, i32 %b, i32 %c, i32 %d) { 296144b2f57SPhilip Reames; CHECK-LABEL: add_constant_rhs_identity2: 297144b2f57SPhilip Reames; CHECK: # %bb.0: 298144b2f57SPhilip Reames; CHECK-NEXT: addi a0, a0, 23 299144b2f57SPhilip Reames; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma 300144b2f57SPhilip Reames; CHECK-NEXT: vmv.v.x v8, a0 301144b2f57SPhilip Reames; CHECK-NEXT: vslide1down.vx v8, v8, a1 302144b2f57SPhilip Reames; CHECK-NEXT: vslide1down.vx v8, v8, a2 303144b2f57SPhilip Reames; CHECK-NEXT: vslide1down.vx v8, v8, a3 304144b2f57SPhilip Reames; CHECK-NEXT: ret 3050eaf11ffSPhilip Reames %e0 = add i32 %a, 23 3060eaf11ffSPhilip Reames %v0 = insertelement <4 x i32> poison, i32 %e0, i32 0 3070eaf11ffSPhilip Reames %v1 = insertelement <4 x i32> %v0, i32 %b, i32 1 3080eaf11ffSPhilip Reames %v2 = insertelement <4 x i32> %v1, i32 %c, i32 2 3090eaf11ffSPhilip Reames %v3 = insertelement <4 x i32> %v2, i32 %d, i32 3 3100eaf11ffSPhilip Reames ret <4 x i32> %v3 3110eaf11ffSPhilip Reames} 3120eaf11ffSPhilip Reames 3130eaf11ffSPhilip Reamesdefine <4 x i32> @add_constant_rhs_inverse(i32 %a, i32 %b, i32 %c, i32 %d) { 314e39add89SPhilip Reames; CHECK-LABEL: add_constant_rhs_inverse: 315e39add89SPhilip Reames; CHECK: # %bb.0: 316e39add89SPhilip Reames; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma 317144b2f57SPhilip Reames; CHECK-NEXT: vmv.v.x v8, a0 318e39add89SPhilip Reames; CHECK-NEXT: lui a0, %hi(.LCPI11_0) 319e39add89SPhilip Reames; CHECK-NEXT: addi a0, a0, %lo(.LCPI11_0) 320e39add89SPhilip Reames; CHECK-NEXT: vle32.v v9, (a0) 321e39add89SPhilip Reames; CHECK-NEXT: vslide1down.vx v8, v8, a1 322e39add89SPhilip Reames; CHECK-NEXT: vslide1down.vx v8, v8, a2 323e39add89SPhilip Reames; CHECK-NEXT: vslide1down.vx v8, v8, a3 324e39add89SPhilip Reames; CHECK-NEXT: vadd.vv v8, v8, v9 325e39add89SPhilip Reames; CHECK-NEXT: ret 3260eaf11ffSPhilip Reames %e0 = sub i32 %a, 1 3270eaf11ffSPhilip Reames %e1 = add i32 %b, 25 3280eaf11ffSPhilip Reames %e2 = add i32 %c, 1 3290eaf11ffSPhilip Reames %e3 = add i32 %d, 2355 3300eaf11ffSPhilip Reames %v0 = insertelement <4 x i32> poison, i32 %e0, i32 0 3310eaf11ffSPhilip Reames %v1 = insertelement <4 x i32> %v0, i32 %e1, i32 1 3320eaf11ffSPhilip Reames %v2 = insertelement <4 x i32> %v1, i32 %e2, i32 2 3330eaf11ffSPhilip Reames %v3 = insertelement <4 x i32> %v2, i32 %e3, i32 3 3340eaf11ffSPhilip Reames ret <4 x i32> %v3 3350eaf11ffSPhilip Reames} 3360eaf11ffSPhilip Reames 3370eaf11ffSPhilip Reamesdefine <4 x i32> @add_constant_rhs_commute(i32 %a, i32 %b, i32 %c, i32 %d) { 338e39add89SPhilip Reames; CHECK-LABEL: add_constant_rhs_commute: 339e39add89SPhilip Reames; CHECK: # %bb.0: 340e39add89SPhilip Reames; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma 341144b2f57SPhilip Reames; CHECK-NEXT: vmv.v.x v8, a0 342e39add89SPhilip Reames; CHECK-NEXT: lui a0, %hi(.LCPI12_0) 343e39add89SPhilip Reames; CHECK-NEXT: addi a0, a0, %lo(.LCPI12_0) 344e39add89SPhilip Reames; CHECK-NEXT: vle32.v v9, (a0) 345e39add89SPhilip Reames; CHECK-NEXT: vslide1down.vx v8, v8, a1 346e39add89SPhilip Reames; CHECK-NEXT: vslide1down.vx v8, v8, a2 347e39add89SPhilip Reames; CHECK-NEXT: vslide1down.vx v8, v8, a3 348e39add89SPhilip Reames; CHECK-NEXT: vadd.vv v8, v8, v9 349e39add89SPhilip Reames; CHECK-NEXT: ret 3500eaf11ffSPhilip Reames %e0 = add i32 %a, 23 3510eaf11ffSPhilip Reames %e1 = add i32 %b, 25 3520eaf11ffSPhilip Reames %e2 = add i32 1, %c 3530eaf11ffSPhilip Reames %e3 = add i32 %d, 2355 3540eaf11ffSPhilip Reames %v0 = insertelement <4 x i32> poison, i32 %e0, i32 0 3550eaf11ffSPhilip Reames %v1 = insertelement <4 x i32> %v0, i32 %e1, i32 1 3560eaf11ffSPhilip Reames %v2 = insertelement <4 x i32> %v1, i32 %e2, i32 2 3570eaf11ffSPhilip Reames %v3 = insertelement <4 x i32> %v2, i32 %e3, i32 3 3580eaf11ffSPhilip Reames ret <4 x i32> %v3 3590eaf11ffSPhilip Reames} 3600eaf11ffSPhilip Reames 3610eaf11ffSPhilip Reames 3620eaf11ffSPhilip Reamesdefine <4 x i32> @add_general_rhs(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h) { 3630eaf11ffSPhilip Reames; RV32-LABEL: add_general_rhs: 3640eaf11ffSPhilip Reames; RV32: # %bb.0: 3650eaf11ffSPhilip Reames; RV32-NEXT: add a0, a0, a4 3660eaf11ffSPhilip Reames; RV32-NEXT: add a1, a1, a5 3670eaf11ffSPhilip Reames; RV32-NEXT: add a2, a2, a6 3680eaf11ffSPhilip Reames; RV32-NEXT: add a3, a3, a7 3690eaf11ffSPhilip Reames; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma 370144b2f57SPhilip Reames; RV32-NEXT: vmv.v.x v8, a0 3710eaf11ffSPhilip Reames; RV32-NEXT: vslide1down.vx v8, v8, a1 3720eaf11ffSPhilip Reames; RV32-NEXT: vslide1down.vx v8, v8, a2 3730eaf11ffSPhilip Reames; RV32-NEXT: vslide1down.vx v8, v8, a3 3740eaf11ffSPhilip Reames; RV32-NEXT: ret 3750eaf11ffSPhilip Reames; 3760eaf11ffSPhilip Reames; RV64-LABEL: add_general_rhs: 3770eaf11ffSPhilip Reames; RV64: # %bb.0: 378144b2f57SPhilip Reames; RV64-NEXT: add a0, a0, a4 3790eaf11ffSPhilip Reames; RV64-NEXT: addw a1, a1, a5 3800eaf11ffSPhilip Reames; RV64-NEXT: addw a2, a2, a6 3810eaf11ffSPhilip Reames; RV64-NEXT: addw a3, a3, a7 3820eaf11ffSPhilip Reames; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma 383144b2f57SPhilip Reames; RV64-NEXT: vmv.v.x v8, a0 3840eaf11ffSPhilip Reames; RV64-NEXT: vslide1down.vx v8, v8, a1 3850eaf11ffSPhilip Reames; RV64-NEXT: vslide1down.vx v8, v8, a2 3860eaf11ffSPhilip Reames; RV64-NEXT: vslide1down.vx v8, v8, a3 3870eaf11ffSPhilip Reames; RV64-NEXT: ret 3880eaf11ffSPhilip Reames %e0 = add i32 %a, %e 3890eaf11ffSPhilip Reames %e1 = add i32 %b, %f 3900eaf11ffSPhilip Reames %e2 = add i32 %c, %g 3910eaf11ffSPhilip Reames %e3 = add i32 %d, %h 3920eaf11ffSPhilip Reames %v0 = insertelement <4 x i32> poison, i32 %e0, i32 0 3930eaf11ffSPhilip Reames %v1 = insertelement <4 x i32> %v0, i32 %e1, i32 1 3940eaf11ffSPhilip Reames %v2 = insertelement <4 x i32> %v1, i32 %e2, i32 2 3950eaf11ffSPhilip Reames %v3 = insertelement <4 x i32> %v2, i32 %e3, i32 3 3960eaf11ffSPhilip Reames ret <4 x i32> %v3 3970eaf11ffSPhilip Reames} 3980eaf11ffSPhilip Reames 3990eaf11ffSPhilip Reamesdefine <4 x i32> @add_general_splat(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e) { 4000eaf11ffSPhilip Reames; RV32-LABEL: add_general_splat: 4010eaf11ffSPhilip Reames; RV32: # %bb.0: 4020eaf11ffSPhilip Reames; RV32-NEXT: add a0, a0, a4 4030eaf11ffSPhilip Reames; RV32-NEXT: add a1, a1, a4 4040eaf11ffSPhilip Reames; RV32-NEXT: add a2, a2, a4 4050eaf11ffSPhilip Reames; RV32-NEXT: add a3, a3, a4 4060eaf11ffSPhilip Reames; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma 407144b2f57SPhilip Reames; RV32-NEXT: vmv.v.x v8, a0 4080eaf11ffSPhilip Reames; RV32-NEXT: vslide1down.vx v8, v8, a1 4090eaf11ffSPhilip Reames; RV32-NEXT: vslide1down.vx v8, v8, a2 4100eaf11ffSPhilip Reames; RV32-NEXT: vslide1down.vx v8, v8, a3 4110eaf11ffSPhilip Reames; RV32-NEXT: ret 4120eaf11ffSPhilip Reames; 4130eaf11ffSPhilip Reames; RV64-LABEL: add_general_splat: 4140eaf11ffSPhilip Reames; RV64: # %bb.0: 415144b2f57SPhilip Reames; RV64-NEXT: add a0, a0, a4 4160eaf11ffSPhilip Reames; RV64-NEXT: addw a1, a1, a4 4170eaf11ffSPhilip Reames; RV64-NEXT: addw a2, a2, a4 4180eaf11ffSPhilip Reames; RV64-NEXT: addw a3, a3, a4 4190eaf11ffSPhilip Reames; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma 420144b2f57SPhilip Reames; RV64-NEXT: vmv.v.x v8, a0 4210eaf11ffSPhilip Reames; RV64-NEXT: vslide1down.vx v8, v8, a1 4220eaf11ffSPhilip Reames; RV64-NEXT: vslide1down.vx v8, v8, a2 4230eaf11ffSPhilip Reames; RV64-NEXT: vslide1down.vx v8, v8, a3 4240eaf11ffSPhilip Reames; RV64-NEXT: ret 4250eaf11ffSPhilip Reames %e0 = add i32 %a, %e 4260eaf11ffSPhilip Reames %e1 = add i32 %b, %e 4270eaf11ffSPhilip Reames %e2 = add i32 %c, %e 4280eaf11ffSPhilip Reames %e3 = add i32 %d, %e 4290eaf11ffSPhilip Reames %v0 = insertelement <4 x i32> poison, i32 %e0, i32 0 4300eaf11ffSPhilip Reames %v1 = insertelement <4 x i32> %v0, i32 %e1, i32 1 4310eaf11ffSPhilip Reames %v2 = insertelement <4 x i32> %v1, i32 %e2, i32 2 4320eaf11ffSPhilip Reames %v3 = insertelement <4 x i32> %v2, i32 %e3, i32 3 4330eaf11ffSPhilip Reames ret <4 x i32> %v3 4340eaf11ffSPhilip Reames} 435d51855f7SCraig Topper 436d51855f7SCraig Topper; This test previously failed with an assertion failure because constant shift 437d51855f7SCraig Topper; amounts are type legalized early. 438d51855f7SCraig Topperdefine void @buggy(i32 %0) #0 { 43973e96337SPhilip Reames; RV32-LABEL: buggy: 44073e96337SPhilip Reames; RV32: # %bb.0: # %entry 44173e96337SPhilip Reames; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma 44273e96337SPhilip Reames; RV32-NEXT: vmv.v.x v8, a0 44373e96337SPhilip Reames; RV32-NEXT: vadd.vv v8, v8, v8 44473e96337SPhilip Reames; RV32-NEXT: vor.vi v8, v8, 1 44573e96337SPhilip Reames; RV32-NEXT: vrgather.vi v9, v8, 0 44673e96337SPhilip Reames; RV32-NEXT: vse32.v v9, (zero) 44773e96337SPhilip Reames; RV32-NEXT: ret 44873e96337SPhilip Reames; 44973e96337SPhilip Reames; RV64-LABEL: buggy: 45073e96337SPhilip Reames; RV64: # %bb.0: # %entry 45173e96337SPhilip Reames; RV64-NEXT: slli a0, a0, 1 45273e96337SPhilip Reames; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma 45373e96337SPhilip Reames; RV64-NEXT: vmv.v.x v8, a0 45473e96337SPhilip Reames; RV64-NEXT: vor.vi v8, v8, 1 45573e96337SPhilip Reames; RV64-NEXT: vrgather.vi v9, v8, 0 45673e96337SPhilip Reames; RV64-NEXT: vse32.v v9, (zero) 45773e96337SPhilip Reames; RV64-NEXT: ret 458d51855f7SCraig Topperentry: 459d51855f7SCraig Topper %mul.us.us.i.3 = shl i32 %0, 1 460d51855f7SCraig Topper %1 = insertelement <4 x i32> zeroinitializer, i32 %mul.us.us.i.3, i64 0 461d51855f7SCraig Topper %2 = or <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1> 462d51855f7SCraig Topper %3 = shufflevector <4 x i32> %2, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer 463d51855f7SCraig Topper store <4 x i32> %3, ptr null, align 16 464d51855f7SCraig Topper ret void 465d51855f7SCraig Topper} 46673e96337SPhilip Reames 46773e96337SPhilip Reames 46873e96337SPhilip Reamesdefine <8 x i32> @add_constant_rhs_8xi32_vector_in(<8 x i32> %vin, i32 %a, i32 %b, i32 %c, i32 %d) { 46973e96337SPhilip Reames; CHECK-LABEL: add_constant_rhs_8xi32_vector_in: 47073e96337SPhilip Reames; CHECK: # %bb.0: 47173e96337SPhilip Reames; CHECK-NEXT: addi a0, a0, 23 47273e96337SPhilip Reames; CHECK-NEXT: addi a1, a1, 25 47373e96337SPhilip Reames; CHECK-NEXT: addi a2, a2, 1 47473e96337SPhilip Reames; CHECK-NEXT: addi a3, a3, 2047 47573e96337SPhilip Reames; CHECK-NEXT: addi a3, a3, 308 47673e96337SPhilip Reames; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, ma 47773e96337SPhilip Reames; CHECK-NEXT: vmv.s.x v8, a0 47873e96337SPhilip Reames; CHECK-NEXT: vmv.s.x v10, a1 47973e96337SPhilip Reames; CHECK-NEXT: vslideup.vi v8, v10, 1 48073e96337SPhilip Reames; CHECK-NEXT: vmv.s.x v10, a2 48173e96337SPhilip Reames; CHECK-NEXT: vsetivli zero, 3, e32, m1, tu, ma 48273e96337SPhilip Reames; CHECK-NEXT: vslideup.vi v8, v10, 2 48373e96337SPhilip Reames; CHECK-NEXT: vmv.s.x v10, a3 48473e96337SPhilip Reames; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, ma 48573e96337SPhilip Reames; CHECK-NEXT: vslideup.vi v8, v10, 3 48673e96337SPhilip Reames; CHECK-NEXT: ret 48773e96337SPhilip Reames %e0 = add i32 %a, 23 48873e96337SPhilip Reames %e1 = add i32 %b, 25 48973e96337SPhilip Reames %e2 = add i32 %c, 1 49073e96337SPhilip Reames %e3 = add i32 %d, 2355 49173e96337SPhilip Reames %v0 = insertelement <8 x i32> %vin, i32 %e0, i32 0 49273e96337SPhilip Reames %v1 = insertelement <8 x i32> %v0, i32 %e1, i32 1 49373e96337SPhilip Reames %v2 = insertelement <8 x i32> %v1, i32 %e2, i32 2 49473e96337SPhilip Reames %v3 = insertelement <8 x i32> %v2, i32 %e3, i32 3 49573e96337SPhilip Reames ret <8 x i32> %v3 49673e96337SPhilip Reames} 49773e96337SPhilip Reames 49873e96337SPhilip Reamesdefine <8 x i32> @add_constant_rhs_8xi32_vector_in2(<8 x i32> %vin, i32 %a, i32 %b, i32 %c, i32 %d) { 49973e96337SPhilip Reames; CHECK-LABEL: add_constant_rhs_8xi32_vector_in2: 50073e96337SPhilip Reames; CHECK: # %bb.0: 50173e96337SPhilip Reames; CHECK-NEXT: addi a0, a0, 23 50273e96337SPhilip Reames; CHECK-NEXT: addi a1, a1, 25 50373e96337SPhilip Reames; CHECK-NEXT: addi a2, a2, 1 50473e96337SPhilip Reames; CHECK-NEXT: addi a3, a3, 2047 50573e96337SPhilip Reames; CHECK-NEXT: addi a3, a3, 308 50673e96337SPhilip Reames; CHECK-NEXT: vsetivli zero, 5, e32, m2, tu, ma 50773e96337SPhilip Reames; CHECK-NEXT: vmv.s.x v10, a0 50873e96337SPhilip Reames; CHECK-NEXT: vslideup.vi v8, v10, 4 50973e96337SPhilip Reames; CHECK-NEXT: vmv.s.x v10, a1 51073e96337SPhilip Reames; CHECK-NEXT: vsetivli zero, 6, e32, m2, tu, ma 51173e96337SPhilip Reames; CHECK-NEXT: vslideup.vi v8, v10, 5 51273e96337SPhilip Reames; CHECK-NEXT: vmv.s.x v10, a2 51373e96337SPhilip Reames; CHECK-NEXT: vsetivli zero, 7, e32, m2, tu, ma 51473e96337SPhilip Reames; CHECK-NEXT: vslideup.vi v8, v10, 6 51573e96337SPhilip Reames; CHECK-NEXT: vmv.s.x v10, a3 51673e96337SPhilip Reames; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma 51773e96337SPhilip Reames; CHECK-NEXT: vslideup.vi v8, v10, 7 51873e96337SPhilip Reames; CHECK-NEXT: ret 51973e96337SPhilip Reames %e0 = add i32 %a, 23 52073e96337SPhilip Reames %e1 = add i32 %b, 25 52173e96337SPhilip Reames %e2 = add i32 %c, 1 52273e96337SPhilip Reames %e3 = add i32 %d, 2355 52373e96337SPhilip Reames %v0 = insertelement <8 x i32> %vin, i32 %e0, i32 4 52473e96337SPhilip Reames %v1 = insertelement <8 x i32> %v0, i32 %e1, i32 5 52573e96337SPhilip Reames %v2 = insertelement <8 x i32> %v1, i32 %e2, i32 6 52673e96337SPhilip Reames %v3 = insertelement <8 x i32> %v2, i32 %e3, i32 7 52773e96337SPhilip Reames ret <8 x i32> %v3 52873e96337SPhilip Reames} 52973e96337SPhilip Reames 53073e96337SPhilip Reamesdefine <8 x i32> @add_constant_rhs_8xi32_vector_in3(<8 x i32> %vin, i32 %a, i32 %b, i32 %c, i32 %d) { 53173e96337SPhilip Reames; CHECK-LABEL: add_constant_rhs_8xi32_vector_in3: 53273e96337SPhilip Reames; CHECK: # %bb.0: 53373e96337SPhilip Reames; CHECK-NEXT: addi a0, a0, 23 53473e96337SPhilip Reames; CHECK-NEXT: addi a1, a1, 25 53573e96337SPhilip Reames; CHECK-NEXT: addi a2, a2, 1 53673e96337SPhilip Reames; CHECK-NEXT: addi a3, a3, 2047 53773e96337SPhilip Reames; CHECK-NEXT: addi a3, a3, 308 53873e96337SPhilip Reames; CHECK-NEXT: vsetivli zero, 3, e32, m1, tu, ma 53973e96337SPhilip Reames; CHECK-NEXT: vmv.s.x v8, a0 54073e96337SPhilip Reames; CHECK-NEXT: vmv.s.x v10, a1 54173e96337SPhilip Reames; CHECK-NEXT: vslideup.vi v8, v10, 2 54273e96337SPhilip Reames; CHECK-NEXT: vmv.s.x v10, a2 54373e96337SPhilip Reames; CHECK-NEXT: vsetivli zero, 5, e32, m2, tu, ma 54473e96337SPhilip Reames; CHECK-NEXT: vslideup.vi v8, v10, 4 54573e96337SPhilip Reames; CHECK-NEXT: vmv.s.x v10, a3 54673e96337SPhilip Reames; CHECK-NEXT: vsetivli zero, 7, e32, m2, tu, ma 54773e96337SPhilip Reames; CHECK-NEXT: vslideup.vi v8, v10, 6 54873e96337SPhilip Reames; CHECK-NEXT: ret 54973e96337SPhilip Reames %e0 = add i32 %a, 23 55073e96337SPhilip Reames %e1 = add i32 %b, 25 55173e96337SPhilip Reames %e2 = add i32 %c, 1 55273e96337SPhilip Reames %e3 = add i32 %d, 2355 55373e96337SPhilip Reames %v0 = insertelement <8 x i32> %vin, i32 %e0, i32 0 55473e96337SPhilip Reames %v1 = insertelement <8 x i32> %v0, i32 %e1, i32 2 55573e96337SPhilip Reames %v2 = insertelement <8 x i32> %v1, i32 %e2, i32 4 55673e96337SPhilip Reames %v3 = insertelement <8 x i32> %v2, i32 %e3, i32 6 55773e96337SPhilip Reames ret <8 x i32> %v3 55873e96337SPhilip Reames} 5591aa493f0SPhilip Reames 5601aa493f0SPhilip Reamesdefine <8 x i32> @add_constant_rhs_8xi32_partial(<8 x i32> %vin, i32 %a, i32 %b, i32 %c, i32 %d) { 5611aa493f0SPhilip Reames; CHECK-LABEL: add_constant_rhs_8xi32_partial: 5621aa493f0SPhilip Reames; CHECK: # %bb.0: 5631aa493f0SPhilip Reames; CHECK-NEXT: vsetivli zero, 5, e32, m2, tu, ma 564ff5e536bSPhilip Reames; CHECK-NEXT: vmv.s.x v10, a0 565*9122c523SPengcheng Wang; CHECK-NEXT: vmv.s.x v12, a1 5661aa493f0SPhilip Reames; CHECK-NEXT: vslideup.vi v8, v10, 4 5671aa493f0SPhilip Reames; CHECK-NEXT: vmv.s.x v10, a2 568675e7bd1SPiyou Chen; CHECK-NEXT: lui a0, %hi(.LCPI19_0) 569675e7bd1SPiyou Chen; CHECK-NEXT: addi a0, a0, %lo(.LCPI19_0) 570*9122c523SPengcheng Wang; CHECK-NEXT: vsetivli zero, 6, e32, m2, tu, ma 571*9122c523SPengcheng Wang; CHECK-NEXT: vslideup.vi v8, v12, 5 572675e7bd1SPiyou Chen; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma 573*9122c523SPengcheng Wang; CHECK-NEXT: vle32.v v12, (a0) 574*9122c523SPengcheng Wang; CHECK-NEXT: vsetivli zero, 7, e32, m2, tu, ma 575*9122c523SPengcheng Wang; CHECK-NEXT: vslideup.vi v8, v10, 6 576*9122c523SPengcheng Wang; CHECK-NEXT: vmv.s.x v10, a3 577*9122c523SPengcheng Wang; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma 578*9122c523SPengcheng Wang; CHECK-NEXT: vslideup.vi v8, v10, 7 579*9122c523SPengcheng Wang; CHECK-NEXT: vadd.vv v8, v8, v12 5801aa493f0SPhilip Reames; CHECK-NEXT: ret 5811aa493f0SPhilip Reames %vadd = add <8 x i32> %vin, <i32 1, i32 2, i32 3, i32 5, i32 undef, i32 undef, i32 undef, i32 undef> 5821aa493f0SPhilip Reames %e0 = add i32 %a, 23 5831aa493f0SPhilip Reames %e1 = add i32 %b, 25 5841aa493f0SPhilip Reames %e2 = add i32 %c, 1 5851aa493f0SPhilip Reames %e3 = add i32 %d, 2355 5861aa493f0SPhilip Reames %v0 = insertelement <8 x i32> %vadd, i32 %e0, i32 4 5871aa493f0SPhilip Reames %v1 = insertelement <8 x i32> %v0, i32 %e1, i32 5 5881aa493f0SPhilip Reames %v2 = insertelement <8 x i32> %v1, i32 %e2, i32 6 5891aa493f0SPhilip Reames %v3 = insertelement <8 x i32> %v2, i32 %e3, i32 7 5901aa493f0SPhilip Reames ret <8 x i32> %v3 5911aa493f0SPhilip Reames} 592c8d431e0SPhilip Reames 593d0f72f88SPhilip Reames; Here we can not pull the ashr through into the vector domain due to 594d0f72f88SPhilip Reames; the truncate semantics of the build_vector. Doing so would 595c8d431e0SPhilip Reames; truncate before the ashr instead of after it, so if %a or %b 596c8d431e0SPhilip Reames; is e.g. UINT32_MAX+1 we get different result. 597c8d431e0SPhilip Reamesdefine <2 x i32> @build_vec_of_trunc_op(i64 %a, i64 %b) { 598c8d431e0SPhilip Reames; RV32-LABEL: build_vec_of_trunc_op: 599c8d431e0SPhilip Reames; RV32: # %bb.0: # %entry 600c8d431e0SPhilip Reames; RV32-NEXT: slli a1, a1, 31 601c8d431e0SPhilip Reames; RV32-NEXT: srli a0, a0, 1 602c8d431e0SPhilip Reames; RV32-NEXT: slli a3, a3, 31 603c8d431e0SPhilip Reames; RV32-NEXT: srli a2, a2, 1 604*9122c523SPengcheng Wang; RV32-NEXT: or a0, a0, a1 605c8d431e0SPhilip Reames; RV32-NEXT: or a2, a2, a3 606c8d431e0SPhilip Reames; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma 607c8d431e0SPhilip Reames; RV32-NEXT: vmv.v.x v8, a0 608c8d431e0SPhilip Reames; RV32-NEXT: vslide1down.vx v8, v8, a2 609c8d431e0SPhilip Reames; RV32-NEXT: ret 610c8d431e0SPhilip Reames; 611c8d431e0SPhilip Reames; RV64-LABEL: build_vec_of_trunc_op: 612c8d431e0SPhilip Reames; RV64: # %bb.0: # %entry 613d0f72f88SPhilip Reames; RV64-NEXT: srli a0, a0, 1 614d0f72f88SPhilip Reames; RV64-NEXT: srli a1, a1, 1 615c8d431e0SPhilip Reames; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma 616c8d431e0SPhilip Reames; RV64-NEXT: vmv.v.x v8, a0 617c8d431e0SPhilip Reames; RV64-NEXT: vslide1down.vx v8, v8, a1 618c8d431e0SPhilip Reames; RV64-NEXT: ret 619c8d431e0SPhilip Reamesentry: 620c8d431e0SPhilip Reames %conv11.i = ashr i64 %a, 1 621c8d431e0SPhilip Reames %conv11.2 = ashr i64 %b, 1 622c8d431e0SPhilip Reames %0 = trunc i64 %conv11.i to i32 623c8d431e0SPhilip Reames %1 = trunc i64 %conv11.2 to i32 624c8d431e0SPhilip Reames %2 = insertelement <2 x i32> zeroinitializer, i32 %0, i64 0 625c8d431e0SPhilip Reames %3 = insertelement <2 x i32> %2, i32 %1, i64 1 626c8d431e0SPhilip Reames ret <2 x i32> %3 627c8d431e0SPhilip Reames} 628