18f200116SSanjay Patel; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 28f200116SSanjay Patel; RUN: llc < %s -mtriple=arm-eabi -mattr=neon | FileCheck %s 38f200116SSanjay Patel 48f200116SSanjay Pateldefine i32 @sext_inc(i1 zeroext %x) { 58f200116SSanjay Patel; CHECK-LABEL: sext_inc: 6*25528d6dSFrancis Visoiu Mistrih; CHECK: @ %bb.0: 7a0547c3dSSanjay Patel; CHECK-NEXT: eor r0, r0, #1 88f200116SSanjay Patel; CHECK-NEXT: mov pc, lr 98f200116SSanjay Patel %ext = sext i1 %x to i32 108f200116SSanjay Patel %add = add i32 %ext, 1 118f200116SSanjay Patel ret i32 %add 128f200116SSanjay Patel} 138f200116SSanjay Patel 148f200116SSanjay Pateldefine <4 x i32> @sext_inc_vec(<4 x i1> %x) { 158f200116SSanjay Patel; CHECK-LABEL: sext_inc_vec: 16*25528d6dSFrancis Visoiu Mistrih; CHECK: @ %bb.0: 17a0547c3dSSanjay Patel; CHECK-NEXT: vmov.i16 d16, #0x1 18a0547c3dSSanjay Patel; CHECK-NEXT: vmov d17, r0, r1 19a0547c3dSSanjay Patel; CHECK-NEXT: veor d16, d17, d16 20eecb353dSKristof Beyls; CHECK-NEXT: vmov.i32 q9, #0x1 218f200116SSanjay Patel; CHECK-NEXT: vmovl.u16 q8, d16 22a0547c3dSSanjay Patel; CHECK-NEXT: vand q8, q8, q9 238f200116SSanjay Patel; CHECK-NEXT: vmov r0, r1, d16 248f200116SSanjay Patel; CHECK-NEXT: vmov r2, r3, d17 258f200116SSanjay Patel; CHECK-NEXT: mov pc, lr 268f200116SSanjay Patel %ext = sext <4 x i1> %x to <4 x i32> 278f200116SSanjay Patel %add = add <4 x i32> %ext, <i32 1, i32 1, i32 1, i32 1> 288f200116SSanjay Patel ret <4 x i32> %add 298f200116SSanjay Patel} 308f200116SSanjay Patel 316b01b4f5SSanjay Pateldefine <4 x i32> @cmpgt_sext_inc_vec(<4 x i32> %x, <4 x i32> %y) { 326b01b4f5SSanjay Patel; CHECK-LABEL: cmpgt_sext_inc_vec: 33*25528d6dSFrancis Visoiu Mistrih; CHECK: @ %bb.0: 34eecb353dSKristof Beyls; CHECK-NEXT: vmov d17, r2, r3 35eecb353dSKristof Beyls; CHECK-NEXT: vmov d16, r0, r1 36eecb353dSKristof Beyls; CHECK-NEXT: mov r0, sp 37eecb353dSKristof Beyls; CHECK-NEXT: vld1.64 {d18, d19}, [r0] 38eecb353dSKristof Beyls; CHECK-NEXT: vcge.s32 q8, q9, q8 39eecb353dSKristof Beyls; CHECK-NEXT: vmov.i32 q9, #0x1 40eecb353dSKristof Beyls; CHECK-NEXT: vand q8, q8, q9 416b01b4f5SSanjay Patel; CHECK-NEXT: vmov r0, r1, d16 426b01b4f5SSanjay Patel; CHECK-NEXT: vmov r2, r3, d17 436b01b4f5SSanjay Patel; CHECK-NEXT: mov pc, lr 446b01b4f5SSanjay Patel %cmp = icmp sgt <4 x i32> %x, %y 456b01b4f5SSanjay Patel %ext = sext <4 x i1> %cmp to <4 x i32> 466b01b4f5SSanjay Patel %add = add <4 x i32> %ext, <i32 1, i32 1, i32 1, i32 1> 476b01b4f5SSanjay Patel ret <4 x i32> %add 486b01b4f5SSanjay Patel} 496b01b4f5SSanjay Patel 506b01b4f5SSanjay Pateldefine <4 x i32> @cmpne_sext_inc_vec(<4 x i32> %x, <4 x i32> %y) { 516b01b4f5SSanjay Patel; CHECK-LABEL: cmpne_sext_inc_vec: 52*25528d6dSFrancis Visoiu Mistrih; CHECK: @ %bb.0: 53eecb353dSKristof Beyls; CHECK-NEXT: vmov d17, r2, r3 546b01b4f5SSanjay Patel; CHECK-NEXT: mov r12, sp 55eecb353dSKristof Beyls; CHECK-NEXT: vld1.64 {d18, d19}, [r12] 56eecb353dSKristof Beyls; CHECK-NEXT: vmov d16, r0, r1 57eecb353dSKristof Beyls; CHECK-NEXT: vceq.i32 q8, q8, q9 58eecb353dSKristof Beyls; CHECK-NEXT: vmov.i32 q9, #0x1 59eecb353dSKristof Beyls; CHECK-NEXT: vand q8, q8, q9 606b01b4f5SSanjay Patel; CHECK-NEXT: vmov r0, r1, d16 616b01b4f5SSanjay Patel; CHECK-NEXT: vmov r2, r3, d17 626b01b4f5SSanjay Patel; CHECK-NEXT: mov pc, lr 636b01b4f5SSanjay Patel %cmp = icmp ne <4 x i32> %x, %y 646b01b4f5SSanjay Patel %ext = sext <4 x i1> %cmp to <4 x i32> 656b01b4f5SSanjay Patel %add = add <4 x i32> %ext, <i32 1, i32 1, i32 1, i32 1> 666b01b4f5SSanjay Patel ret <4 x i32> %add 676b01b4f5SSanjay Patel} 686b01b4f5SSanjay Patel 69