xref: /llvm-project/llvm/test/CodeGen/WebAssembly/simd-bitmask-mask.ll (revision 417e500668621e1275851ccf6e573a39482368b5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mattr=+simd128 | FileCheck %s
3
4; Test that masks on the output of bitselect are optimized out.
5
6target triple = "wasm32-unknown-unknown"
7
8declare i32 @llvm.wasm.bitmask.v16i8(<16 x i8>)
9declare i32 @llvm.wasm.bitmask.v8i16(<8 x i16>)
10declare i32 @llvm.wasm.bitmask.v4i32(<4 x i32>)
11declare i32 @llvm.wasm.bitmask.v2i64(<2 x i64>)
12
13define i32 @bitmask_v16i8_mask(<16 x i8> %x) {
14; CHECK-LABEL: bitmask_v16i8_mask:
15; CHECK:         .functype bitmask_v16i8_mask (v128) -> (i32)
16; CHECK-NEXT:  # %bb.0:
17; CHECK-NEXT:    local.get 0
18; CHECK-NEXT:    i8x16.bitmask
19; CHECK-NEXT:    i32.const 32767
20; CHECK-NEXT:    i32.and
21; CHECK-NEXT:    # fallthrough-return
22  %m = tail call i32 @llvm.wasm.bitmask.v16i8(<16 x i8> %x)
23  %v = and i32 %m, 32767 ;; 2^15 - 1
24  ret i32 %v
25}
26
27define i32 @bitmask_v16i8_no_mask(<16 x i8> %x) {
28; CHECK-LABEL: bitmask_v16i8_no_mask:
29; CHECK:         .functype bitmask_v16i8_no_mask (v128) -> (i32)
30; CHECK-NEXT:  # %bb.0:
31; CHECK-NEXT:    local.get 0
32; CHECK-NEXT:    i8x16.bitmask
33; CHECK-NEXT:    # fallthrough-return
34  %m = tail call i32 @llvm.wasm.bitmask.v16i8(<16 x i8> %x)
35  %v = and i32 %m, 65535 ;; 2^16 - 1
36  ret i32 %v
37}
38
39define i32 @bitmask_v8i16_mask(<8 x i16> %x) {
40; CHECK-LABEL: bitmask_v8i16_mask:
41; CHECK:         .functype bitmask_v8i16_mask (v128) -> (i32)
42; CHECK-NEXT:  # %bb.0:
43; CHECK-NEXT:    local.get 0
44; CHECK-NEXT:    i16x8.bitmask
45; CHECK-NEXT:    i32.const 127
46; CHECK-NEXT:    i32.and
47; CHECK-NEXT:    # fallthrough-return
48  %m = tail call i32 @llvm.wasm.bitmask.v8i16(<8 x i16> %x)
49  %v = and i32 %m, 127 ;; 2^7 - 1
50  ret i32 %v
51}
52
53define i32 @bitmask_v8i16_no_mask(<8 x i16> %x) {
54; CHECK-LABEL: bitmask_v8i16_no_mask:
55; CHECK:         .functype bitmask_v8i16_no_mask (v128) -> (i32)
56; CHECK-NEXT:  # %bb.0:
57; CHECK-NEXT:    local.get 0
58; CHECK-NEXT:    i16x8.bitmask
59; CHECK-NEXT:    # fallthrough-return
60  %m = tail call i32 @llvm.wasm.bitmask.v8i16(<8 x i16> %x)
61  %v = and i32 %m, 255 ;; 2^8 - 1
62  ret i32 %v
63}
64
65define i32 @bitmask_v4i32_mask(<4 x i32> %x) {
66; CHECK-LABEL: bitmask_v4i32_mask:
67; CHECK:         .functype bitmask_v4i32_mask (v128) -> (i32)
68; CHECK-NEXT:  # %bb.0:
69; CHECK-NEXT:    local.get 0
70; CHECK-NEXT:    i32x4.bitmask
71; CHECK-NEXT:    i32.const 7
72; CHECK-NEXT:    i32.and
73; CHECK-NEXT:    # fallthrough-return
74  %m = tail call i32 @llvm.wasm.bitmask.v4i32(<4 x i32> %x)
75  %v = and i32 %m, 7 ;; 2^3 - 1
76  ret i32 %v
77}
78
79define i32 @bitmask_v4i32_no_mask(<4 x i32> %x) {
80; CHECK-LABEL: bitmask_v4i32_no_mask:
81; CHECK:         .functype bitmask_v4i32_no_mask (v128) -> (i32)
82; CHECK-NEXT:  # %bb.0:
83; CHECK-NEXT:    local.get 0
84; CHECK-NEXT:    i32x4.bitmask
85; CHECK-NEXT:    # fallthrough-return
86  %m = tail call i32 @llvm.wasm.bitmask.v4i32(<4 x i32> %x)
87  %v = and i32 %m, 15 ;; 2^4 - 1
88  ret i32 %v
89}
90
91define i32 @bitmask_v2i64_mask(<2 x i64> %x) {
92; CHECK-LABEL: bitmask_v2i64_mask:
93; CHECK:         .functype bitmask_v2i64_mask (v128) -> (i32)
94; CHECK-NEXT:  # %bb.0:
95; CHECK-NEXT:    local.get 0
96; CHECK-NEXT:    i64x2.bitmask
97; CHECK-NEXT:    i32.const 1
98; CHECK-NEXT:    i32.and
99; CHECK-NEXT:    # fallthrough-return
100  %m = tail call i32 @llvm.wasm.bitmask.v2i64(<2 x i64> %x)
101  %v = and i32 %m, 1 ;; 2^1 - 1
102  ret i32 %v
103}
104
105define i32 @bitmask_v2i64_no_mask(<2 x i64> %x) {
106; CHECK-LABEL: bitmask_v2i64_no_mask:
107; CHECK:         .functype bitmask_v2i64_no_mask (v128) -> (i32)
108; CHECK-NEXT:  # %bb.0:
109; CHECK-NEXT:    local.get 0
110; CHECK-NEXT:    i64x2.bitmask
111; CHECK-NEXT:    # fallthrough-return
112  %m = tail call i32 @llvm.wasm.bitmask.v2i64(<2 x i64> %x)
113  %v = and i32 %m, 3 ;; 2^2 - 1
114  ret i32 %v
115}
116