1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 2; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s 3 4define void @lshr_v32i8(ptr %res, ptr %a0, ptr %a1) nounwind { 5; CHECK-LABEL: lshr_v32i8: 6; CHECK: # %bb.0: # %entry 7; CHECK-NEXT: xvld $xr0, $a1, 0 8; CHECK-NEXT: xvld $xr1, $a2, 0 9; CHECK-NEXT: xvsrl.b $xr0, $xr0, $xr1 10; CHECK-NEXT: xvst $xr0, $a0, 0 11; CHECK-NEXT: ret 12entry: 13 %v0 = load <32 x i8>, ptr %a0 14 %v1 = load <32 x i8>, ptr %a1 15 %v2 = lshr <32 x i8> %v0, %v1 16 store <32 x i8> %v2, ptr %res 17 ret void 18} 19 20define void @lshr_v16i16(ptr %res, ptr %a0, ptr %a1) nounwind { 21; CHECK-LABEL: lshr_v16i16: 22; CHECK: # %bb.0: # %entry 23; CHECK-NEXT: xvld $xr0, $a1, 0 24; CHECK-NEXT: xvld $xr1, $a2, 0 25; CHECK-NEXT: xvsrl.h $xr0, $xr0, $xr1 26; CHECK-NEXT: xvst $xr0, $a0, 0 27; CHECK-NEXT: ret 28entry: 29 %v0 = load <16 x i16>, ptr %a0 30 %v1 = load <16 x i16>, ptr %a1 31 %v2 = lshr <16 x i16> %v0, %v1 32 store <16 x i16> %v2, ptr %res 33 ret void 34} 35 36define void @lshr_v8i32(ptr %res, ptr %a0, ptr %a1) nounwind { 37; CHECK-LABEL: lshr_v8i32: 38; CHECK: # %bb.0: # %entry 39; CHECK-NEXT: xvld $xr0, $a1, 0 40; CHECK-NEXT: xvld $xr1, $a2, 0 41; CHECK-NEXT: xvsrl.w $xr0, $xr0, $xr1 42; CHECK-NEXT: xvst $xr0, $a0, 0 43; CHECK-NEXT: ret 44entry: 45 %v0 = load <8 x i32>, ptr %a0 46 %v1 = load <8 x i32>, ptr %a1 47 %v2 = lshr <8 x i32> %v0, %v1 48 store <8 x i32> %v2, ptr %res 49 ret void 50} 51 52define void @lshr_v4i64(ptr %res, ptr %a0, ptr %a1) nounwind { 53; CHECK-LABEL: lshr_v4i64: 54; CHECK: # %bb.0: # %entry 55; CHECK-NEXT: xvld $xr0, $a1, 0 56; CHECK-NEXT: xvld $xr1, $a2, 0 57; CHECK-NEXT: xvsrl.d $xr0, $xr0, $xr1 58; CHECK-NEXT: xvst $xr0, $a0, 0 59; CHECK-NEXT: ret 60entry: 61 %v0 = load <4 x i64>, ptr %a0 62 %v1 = load <4 x i64>, ptr %a1 63 %v2 = lshr <4 x i64> %v0, %v1 64 store <4 x i64> %v2, ptr %res 65 ret void 66} 67 68define void @lshr_v32i8_1(ptr %res, ptr %a0) nounwind { 69; CHECK-LABEL: lshr_v32i8_1: 70; CHECK: # %bb.0: # %entry 71; CHECK-NEXT: xvld $xr0, $a1, 0 72; CHECK-NEXT: xvsrli.b $xr0, $xr0, 1 73; CHECK-NEXT: xvst $xr0, $a0, 0 74; CHECK-NEXT: ret 75entry: 76 %v0 = load <32 x i8>, ptr %a0 77 %v1 = lshr <32 x i8> %v0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> 78 store <32 x i8> %v1, ptr %res 79 ret void 80} 81 82define void @lshr_v32i8_7(ptr %res, ptr %a0) nounwind { 83; CHECK-LABEL: lshr_v32i8_7: 84; CHECK: # %bb.0: # %entry 85; CHECK-NEXT: xvld $xr0, $a1, 0 86; CHECK-NEXT: xvsrli.b $xr0, $xr0, 7 87; CHECK-NEXT: xvst $xr0, $a0, 0 88; CHECK-NEXT: ret 89entry: 90 %v0 = load <32 x i8>, ptr %a0 91 %v1 = lshr <32 x i8> %v0, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7> 92 store <32 x i8> %v1, ptr %res 93 ret void 94} 95 96define void @lshr_v16i16_1(ptr %res, ptr %a0) nounwind { 97; CHECK-LABEL: lshr_v16i16_1: 98; CHECK: # %bb.0: # %entry 99; CHECK-NEXT: xvld $xr0, $a1, 0 100; CHECK-NEXT: xvsrli.h $xr0, $xr0, 1 101; CHECK-NEXT: xvst $xr0, $a0, 0 102; CHECK-NEXT: ret 103entry: 104 %v0 = load <16 x i16>, ptr %a0 105 %v1 = lshr <16 x i16> %v0, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> 106 store <16 x i16> %v1, ptr %res 107 ret void 108} 109 110define void @lshr_v16i16_15(ptr %res, ptr %a0) nounwind { 111; CHECK-LABEL: lshr_v16i16_15: 112; CHECK: # %bb.0: # %entry 113; CHECK-NEXT: xvld $xr0, $a1, 0 114; CHECK-NEXT: xvsrli.h $xr0, $xr0, 15 115; CHECK-NEXT: xvst $xr0, $a0, 0 116; CHECK-NEXT: ret 117entry: 118 %v0 = load <16 x i16>, ptr %a0 119 %v1 = lshr <16 x i16> %v0, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15> 120 store <16 x i16> %v1, ptr %res 121 ret void 122} 123 124define void @lshr_v8i32_1(ptr %res, ptr %a0) nounwind { 125; CHECK-LABEL: lshr_v8i32_1: 126; CHECK: # %bb.0: # %entry 127; CHECK-NEXT: xvld $xr0, $a1, 0 128; CHECK-NEXT: xvsrli.w $xr0, $xr0, 1 129; CHECK-NEXT: xvst $xr0, $a0, 0 130; CHECK-NEXT: ret 131entry: 132 %v0 = load <8 x i32>, ptr %a0 133 %v1 = lshr <8 x i32> %v0, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> 134 store <8 x i32> %v1, ptr %res 135 ret void 136} 137 138define void @lshr_v8i32_31(ptr %res, ptr %a0) nounwind { 139; CHECK-LABEL: lshr_v8i32_31: 140; CHECK: # %bb.0: # %entry 141; CHECK-NEXT: xvld $xr0, $a1, 0 142; CHECK-NEXT: xvsrli.w $xr0, $xr0, 31 143; CHECK-NEXT: xvst $xr0, $a0, 0 144; CHECK-NEXT: ret 145entry: 146 %v0 = load <8 x i32>, ptr %a0 147 %v1 = lshr <8 x i32> %v0, <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31> 148 store <8 x i32> %v1, ptr %res 149 ret void 150} 151 152define void @lshr_v4i64_1(ptr %res, ptr %a0) nounwind { 153; CHECK-LABEL: lshr_v4i64_1: 154; CHECK: # %bb.0: # %entry 155; CHECK-NEXT: xvld $xr0, $a1, 0 156; CHECK-NEXT: xvsrli.d $xr0, $xr0, 1 157; CHECK-NEXT: xvst $xr0, $a0, 0 158; CHECK-NEXT: ret 159entry: 160 %v0 = load <4 x i64>, ptr %a0 161 %v1 = lshr <4 x i64> %v0, <i64 1, i64 1, i64 1, i64 1> 162 store <4 x i64> %v1, ptr %res 163 ret void 164} 165 166define void @lshr_v4i64_63(ptr %res, ptr %a0) nounwind { 167; CHECK-LABEL: lshr_v4i64_63: 168; CHECK: # %bb.0: # %entry 169; CHECK-NEXT: xvld $xr0, $a1, 0 170; CHECK-NEXT: xvsrli.d $xr0, $xr0, 63 171; CHECK-NEXT: xvst $xr0, $a0, 0 172; CHECK-NEXT: ret 173entry: 174 %v0 = load <4 x i64>, ptr %a0 175 %v1 = lshr <4 x i64> %v0, <i64 63, i64 63, i64 63, i64 63> 176 store <4 x i64> %v1, ptr %res 177 ret void 178} 179