xref: /llvm-project/llvm/test/CodeGen/X86/vmaskmov-offset.ll (revision 7048857f52005810a2d8f43e52e659bfd6342521)
1; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=core-avx2 -stop-after finalize-isel -o - %s | FileCheck %s
3
4declare void @llvm.masked.store.v16f32.p0(<16 x float>, ptr, i32, <16 x i1>)
5declare <16 x float> @llvm.masked.load.v16f32.p0(ptr, i32, <16 x i1>, <16 x float>)
6
7define void @test_v16f(<16 x i32> %x) {
8  ; CHECK-LABEL: name: test_v16f
9  ; CHECK: bb.0.bb:
10  ; CHECK-NEXT:   liveins: $ymm0, $ymm1
11  ; CHECK-NEXT: {{  $}}
12  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:vr256 = COPY $ymm1
13  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr256 = COPY $ymm0
14  ; CHECK-NEXT:   [[AVX_SET0_:%[0-9]+]]:vr256 = AVX_SET0
15  ; CHECK-NEXT:   [[VPCMPEQDYrr:%[0-9]+]]:vr256 = VPCMPEQDYrr [[COPY]], [[AVX_SET0_]]
16  ; CHECK-NEXT:   [[VPCMPEQDYrr1:%[0-9]+]]:vr256 = VPCMPEQDYrr [[COPY1]], [[AVX_SET0_]]
17  ; CHECK-NEXT:   [[VMASKMOVPSYrm:%[0-9]+]]:vr256 = VMASKMOVPSYrm [[VPCMPEQDYrr1]], %stack.0.stack_input_vec, 1, $noreg, 0, $noreg :: (load unknown-size from %ir.stack_input_vec, align 4)
18  ; CHECK-NEXT:   [[VMASKMOVPSYrm1:%[0-9]+]]:vr256 = VMASKMOVPSYrm [[VPCMPEQDYrr]], %stack.0.stack_input_vec, 1, $noreg, 32, $noreg :: (load unknown-size from %ir.stack_input_vec + 32, align 4)
19  ; CHECK-NEXT:   VMASKMOVPSYmr %stack.1.stack_output_vec, 1, $noreg, 32, $noreg, [[VPCMPEQDYrr]], killed [[VMASKMOVPSYrm1]] :: (store unknown-size into %ir.stack_output_vec + 32, align 4)
20  ; CHECK-NEXT:   VMASKMOVPSYmr %stack.1.stack_output_vec, 1, $noreg, 0, $noreg, [[VPCMPEQDYrr1]], killed [[VMASKMOVPSYrm]] :: (store unknown-size into %ir.stack_output_vec, align 4)
21  ; CHECK-NEXT:   RET 0
22bb:
23  %stack_input_vec = alloca <16 x float>, align 64
24  %stack_output_vec = alloca <16 x float>, align 64
25  %mask = icmp eq <16 x i32> %x, zeroinitializer
26  %masked_loaded_vec = call <16 x float> @llvm.masked.load.v16f32.p0(ptr nonnull %stack_input_vec, i32 4, <16 x i1> %mask, <16 x float> undef)
27  call void @llvm.masked.store.v16f32.p0(<16 x float> %masked_loaded_vec, ptr nonnull %stack_output_vec, i32 4, <16 x i1> %mask)
28  ret void
29}
30
31declare void @llvm.masked.store.v8f64.p0(<8 x double>, ptr, i32, <8 x i1>)
32declare <8 x double> @llvm.masked.load.v8f64.p0(ptr, i32, <8 x i1>, <8 x double>)
33
34define void @test_v8d(<8 x i64> %x) {
35  ; CHECK-LABEL: name: test_v8d
36  ; CHECK: bb.0.bb:
37  ; CHECK-NEXT:   liveins: $ymm0, $ymm1
38  ; CHECK-NEXT: {{  $}}
39  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:vr256 = COPY $ymm1
40  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr256 = COPY $ymm0
41  ; CHECK-NEXT:   [[AVX_SET0_:%[0-9]+]]:vr256 = AVX_SET0
42  ; CHECK-NEXT:   [[VPCMPEQQYrr:%[0-9]+]]:vr256 = VPCMPEQQYrr [[COPY]], [[AVX_SET0_]]
43  ; CHECK-NEXT:   [[VPCMPEQQYrr1:%[0-9]+]]:vr256 = VPCMPEQQYrr [[COPY1]], [[AVX_SET0_]]
44  ; CHECK-NEXT:   [[VMASKMOVPDYrm:%[0-9]+]]:vr256 = VMASKMOVPDYrm [[VPCMPEQQYrr1]], %stack.0.stack_input_vec, 1, $noreg, 0, $noreg :: (load unknown-size from %ir.stack_input_vec, align 4)
45  ; CHECK-NEXT:   [[VMASKMOVPDYrm1:%[0-9]+]]:vr256 = VMASKMOVPDYrm [[VPCMPEQQYrr]], %stack.0.stack_input_vec, 1, $noreg, 32, $noreg :: (load unknown-size from %ir.stack_input_vec + 32, align 4)
46  ; CHECK-NEXT:   VMASKMOVPDYmr %stack.1.stack_output_vec, 1, $noreg, 32, $noreg, [[VPCMPEQQYrr]], killed [[VMASKMOVPDYrm1]] :: (store unknown-size into %ir.stack_output_vec + 32, align 4)
47  ; CHECK-NEXT:   VMASKMOVPDYmr %stack.1.stack_output_vec, 1, $noreg, 0, $noreg, [[VPCMPEQQYrr1]], killed [[VMASKMOVPDYrm]] :: (store unknown-size into %ir.stack_output_vec, align 4)
48  ; CHECK-NEXT:   RET 0
49bb:
50  %stack_input_vec = alloca <8 x double>, align 64
51  %stack_output_vec = alloca <8 x double>, align 64
52  %mask = icmp eq <8 x i64> %x, zeroinitializer
53  %masked_loaded_vec = call <8 x double> @llvm.masked.load.v8f64.p0(ptr nonnull %stack_input_vec, i32 4, <8 x i1> %mask, <8 x double> undef)
54  call void @llvm.masked.store.v8f64.p0(<8 x double> %masked_loaded_vec, ptr nonnull %stack_output_vec, i32 4, <8 x i1> %mask)
55  ret void
56}
57
58define <2 x double> @mload_constmask_v2f64(ptr %addr, <2 x double> %dst) {
59  ; CHECK-LABEL: name: mload_constmask_v2f64
60  ; CHECK: bb.0 (%ir-block.0):
61  ; CHECK-NEXT:   liveins: $rdi, $xmm0
62  ; CHECK-NEXT: {{  $}}
63  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
64  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
65  ; CHECK-NEXT:   [[VMOVHPDrm:%[0-9]+]]:vr128 = VMOVHPDrm [[COPY]], [[COPY1]], 1, $noreg, 8, $noreg :: (load (s64) from %ir.addr + 8, align 4)
66  ; CHECK-NEXT:   $xmm0 = COPY [[VMOVHPDrm]]
67  ; CHECK-NEXT:   RET 0, $xmm0
68  %res = call <2 x double> @llvm.masked.load.v2f64.p0(ptr %addr, i32 4, <2 x i1> <i1 0, i1 1>, <2 x double> %dst)
69  ret <2 x double> %res
70}
71
72define void @one_mask_bit_set2(ptr %addr, <4 x float> %val) {
73  ; CHECK-LABEL: name: one_mask_bit_set2
74  ; CHECK: bb.0 (%ir-block.0):
75  ; CHECK-NEXT:   liveins: $rdi, $xmm0
76  ; CHECK-NEXT: {{  $}}
77  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
78  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
79  ; CHECK-NEXT:   VEXTRACTPSmri [[COPY1]], 1, $noreg, 8, $noreg, [[COPY]], 2 :: (store (s32) into %ir.addr + 8)
80  ; CHECK-NEXT:   RET 0
81  call void @llvm.masked.store.v4f32.p0(<4 x float> %val, ptr %addr, i32 4, <4 x i1><i1 false, i1 false, i1 true, i1 false>)
82  ret void
83}
84
85declare <2 x double> @llvm.masked.load.v2f64.p0(ptr, i32, <2 x i1>, <2 x double>)
86declare void @llvm.masked.store.v4f32.p0(<4 x float>, ptr, i32, <4 x i1>)
87