1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3 2; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s 3 4define void @and_v32i8(ptr %res, ptr %a0, ptr %a1) nounwind { 5; CHECK-LABEL: and_v32i8: 6; CHECK: # %bb.0: # %entry 7; CHECK-NEXT: xvld $xr0, $a1, 0 8; CHECK-NEXT: xvld $xr1, $a2, 0 9; CHECK-NEXT: xvand.v $xr0, $xr0, $xr1 10; CHECK-NEXT: xvst $xr0, $a0, 0 11; CHECK-NEXT: ret 12entry: 13 %v0 = load <32 x i8>, ptr %a0 14 %v1 = load <32 x i8>, ptr %a1 15 %v2 = and <32 x i8> %v0, %v1 16 store <32 x i8> %v2, ptr %res 17 ret void 18} 19 20define void @and_v16i16(ptr %res, ptr %a0, ptr %a1) nounwind { 21; CHECK-LABEL: and_v16i16: 22; CHECK: # %bb.0: # %entry 23; CHECK-NEXT: xvld $xr0, $a1, 0 24; CHECK-NEXT: xvld $xr1, $a2, 0 25; CHECK-NEXT: xvand.v $xr0, $xr0, $xr1 26; CHECK-NEXT: xvst $xr0, $a0, 0 27; CHECK-NEXT: ret 28entry: 29 %v0 = load <16 x i16>, ptr %a0 30 %v1 = load <16 x i16>, ptr %a1 31 %v2 = and <16 x i16> %v0, %v1 32 store <16 x i16> %v2, ptr %res 33 ret void 34} 35 36define void @and_v8i32(ptr %res, ptr %a0, ptr %a1) nounwind { 37; CHECK-LABEL: and_v8i32: 38; CHECK: # %bb.0: # %entry 39; CHECK-NEXT: xvld $xr0, $a1, 0 40; CHECK-NEXT: xvld $xr1, $a2, 0 41; CHECK-NEXT: xvand.v $xr0, $xr0, $xr1 42; CHECK-NEXT: xvst $xr0, $a0, 0 43; CHECK-NEXT: ret 44entry: 45 %v0 = load <8 x i32>, ptr %a0 46 %v1 = load <8 x i32>, ptr %a1 47 %v2 = and <8 x i32> %v0, %v1 48 store <8 x i32> %v2, ptr %res 49 ret void 50} 51 52define void @and_v4i64(ptr %res, ptr %a0, ptr %a1) nounwind { 53; CHECK-LABEL: and_v4i64: 54; CHECK: # %bb.0: # %entry 55; CHECK-NEXT: xvld $xr0, $a1, 0 56; CHECK-NEXT: xvld $xr1, $a2, 0 57; CHECK-NEXT: xvand.v $xr0, $xr0, $xr1 58; CHECK-NEXT: xvst $xr0, $a0, 0 59; CHECK-NEXT: ret 60entry: 61 %v0 = load <4 x i64>, ptr %a0 62 %v1 = load <4 x i64>, ptr %a1 63 %v2 = and <4 x i64> %v0, %v1 64 store <4 x i64> %v2, ptr %res 65 ret void 66} 67 68define void @and_u_v32i8(ptr %res, ptr %a0) nounwind { 69; CHECK-LABEL: and_u_v32i8: 70; CHECK: # %bb.0: # %entry 71; CHECK-NEXT: xvld $xr0, $a1, 0 72; CHECK-NEXT: xvandi.b $xr0, $xr0, 31 73; CHECK-NEXT: xvst $xr0, $a0, 0 74; CHECK-NEXT: ret 75entry: 76 %v0 = load <32 x i8>, ptr %a0 77 %v1 = and <32 x i8> %v0, <i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31> 78 store <32 x i8> %v1, ptr %res 79 ret void 80} 81 82define void @and_u_v16i16(ptr %res, ptr %a0) nounwind { 83; CHECK-LABEL: and_u_v16i16: 84; CHECK: # %bb.0: # %entry 85; CHECK-NEXT: xvld $xr0, $a1, 0 86; CHECK-NEXT: xvrepli.h $xr1, 31 87; CHECK-NEXT: xvand.v $xr0, $xr0, $xr1 88; CHECK-NEXT: xvst $xr0, $a0, 0 89; CHECK-NEXT: ret 90entry: 91 %v0 = load <16 x i16>, ptr %a0 92 %v1 = and <16 x i16> %v0, <i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31> 93 store <16 x i16> %v1, ptr %res 94 ret void 95} 96 97define void @and_u_v8i32(ptr %res, ptr %a0) nounwind { 98; CHECK-LABEL: and_u_v8i32: 99; CHECK: # %bb.0: # %entry 100; CHECK-NEXT: xvld $xr0, $a1, 0 101; CHECK-NEXT: xvrepli.w $xr1, 31 102; CHECK-NEXT: xvand.v $xr0, $xr0, $xr1 103; CHECK-NEXT: xvst $xr0, $a0, 0 104; CHECK-NEXT: ret 105entry: 106 %v0 = load <8 x i32>, ptr %a0 107 %v1 = and <8 x i32> %v0, <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31> 108 store <8 x i32> %v1, ptr %res 109 ret void 110} 111 112define void @and_u_v4i64(ptr %res, ptr %a0) nounwind { 113; CHECK-LABEL: and_u_v4i64: 114; CHECK: # %bb.0: # %entry 115; CHECK-NEXT: xvld $xr0, $a1, 0 116; CHECK-NEXT: xvrepli.d $xr1, 31 117; CHECK-NEXT: xvand.v $xr0, $xr0, $xr1 118; CHECK-NEXT: xvst $xr0, $a0, 0 119; CHECK-NEXT: ret 120entry: 121 %v0 = load <4 x i64>, ptr %a0 122 %v1 = and <4 x i64> %v0, <i64 31, i64 31, i64 31, i64 31> 123 store <4 x i64> %v1, ptr %res 124 ret void 125} 126