1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s 3 4declare <32 x i8> @llvm.loongarch.lasx.xvadd.b(<32 x i8>, <32 x i8>) 5 6define <32 x i8> @lasx_xvadd_b(<32 x i8> %va, <32 x i8> %vb) nounwind { 7; CHECK-LABEL: lasx_xvadd_b: 8; CHECK: # %bb.0: # %entry 9; CHECK-NEXT: xvadd.b $xr0, $xr0, $xr1 10; CHECK-NEXT: ret 11entry: 12 %res = call <32 x i8> @llvm.loongarch.lasx.xvadd.b(<32 x i8> %va, <32 x i8> %vb) 13 ret <32 x i8> %res 14} 15 16declare <16 x i16> @llvm.loongarch.lasx.xvadd.h(<16 x i16>, <16 x i16>) 17 18define <16 x i16> @lasx_xvadd_h(<16 x i16> %va, <16 x i16> %vb) nounwind { 19; CHECK-LABEL: lasx_xvadd_h: 20; CHECK: # %bb.0: # %entry 21; CHECK-NEXT: xvadd.h $xr0, $xr0, $xr1 22; CHECK-NEXT: ret 23entry: 24 %res = call <16 x i16> @llvm.loongarch.lasx.xvadd.h(<16 x i16> %va, <16 x i16> %vb) 25 ret <16 x i16> %res 26} 27 28declare <8 x i32> @llvm.loongarch.lasx.xvadd.w(<8 x i32>, <8 x i32>) 29 30define <8 x i32> @lasx_xvadd_w(<8 x i32> %va, <8 x i32> %vb) nounwind { 31; CHECK-LABEL: lasx_xvadd_w: 32; CHECK: # %bb.0: # %entry 33; CHECK-NEXT: xvadd.w $xr0, $xr0, $xr1 34; CHECK-NEXT: ret 35entry: 36 %res = call <8 x i32> @llvm.loongarch.lasx.xvadd.w(<8 x i32> %va, <8 x i32> %vb) 37 ret <8 x i32> %res 38} 39 40declare <4 x i64> @llvm.loongarch.lasx.xvadd.d(<4 x i64>, <4 x i64>) 41 42define <4 x i64> @lasx_xvadd_d(<4 x i64> %va, <4 x i64> %vb) nounwind { 43; CHECK-LABEL: lasx_xvadd_d: 44; CHECK: # %bb.0: # %entry 45; CHECK-NEXT: xvadd.d $xr0, $xr0, $xr1 46; CHECK-NEXT: ret 47entry: 48 %res = call <4 x i64> @llvm.loongarch.lasx.xvadd.d(<4 x i64> %va, <4 x i64> %vb) 49 ret <4 x i64> %res 50} 51 52declare <4 x i64> @llvm.loongarch.lasx.xvadd.q(<4 x i64>, <4 x i64>) 53 54define <4 x i64> @lasx_xvadd_q(<4 x i64> %va, <4 x i64> %vb) nounwind { 55; CHECK-LABEL: lasx_xvadd_q: 56; CHECK: # %bb.0: # %entry 57; CHECK-NEXT: xvadd.q $xr0, $xr0, $xr1 58; CHECK-NEXT: ret 59entry: 60 %res = call <4 x i64> @llvm.loongarch.lasx.xvadd.q(<4 x i64> %va, <4 x i64> %vb) 61 ret <4 x i64> %res 62} 63