xref: /llvm-project/llvm/test/CodeGen/AArch64/sve-fixed-length-bitselect.ll (revision c95253b1bac865b6d90cce186b7d665de163d50c)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -aarch64-sve-vector-bits-min=256 < %s | FileCheck %s
3
4target triple = "aarch64"
5
6;
7; NOTE: SVE lowering for the BSP pseudoinst is not currently implemented, so we
8;       don't currently expect the code below to lower to BSL/BIT/BIF. Once
9;       this is implemented, this test will be fleshed out.
10;
11
12define void @fixed_bitselect_v8i32(ptr %pre_cond_ptr, ptr %left_ptr, ptr %right_ptr, ptr %result_ptr) #0 {
13; CHECK-LABEL: fixed_bitselect_v8i32:
14; CHECK:       // %bb.0:
15; CHECK-NEXT:    ptrue p0.s, vl8
16; CHECK-NEXT:    mov z1.s, #-1 // =0xffffffffffffffff
17; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
18; CHECK-NEXT:    ld1w { z2.s }, p0/z, [x1]
19; CHECK-NEXT:    ld1w { z3.s }, p0/z, [x2]
20; CHECK-NEXT:    add z1.s, z0.s, z1.s
21; CHECK-NEXT:    subr z0.s, z0.s, #0 // =0x0
22; CHECK-NEXT:    and z0.d, z0.d, z2.d
23; CHECK-NEXT:    and z1.d, z1.d, z3.d
24; CHECK-NEXT:    orr z0.d, z1.d, z0.d
25; CHECK-NEXT:    st1w { z0.s }, p0, [x3]
26; CHECK-NEXT:    ret
27  %pre_cond = load <8 x i32>, ptr %pre_cond_ptr
28  %left = load <8 x i32>, ptr %left_ptr
29  %right = load <8 x i32>, ptr %right_ptr
30
31  %neg_cond = sub <8 x i32> zeroinitializer, %pre_cond
32  %min_cond = add <8 x i32> %pre_cond, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
33  %left_bits_0 = and <8 x i32> %neg_cond, %left
34  %right_bits_0 = and <8 x i32> %min_cond, %right
35  %bsl0000 = or <8 x i32> %right_bits_0, %left_bits_0
36  store <8 x i32> %bsl0000, ptr %result_ptr
37  ret void
38}
39
40attributes #0 = { "target-features"="+sve" }
41