xref: /llvm-project/llvm/test/CodeGen/X86/fold-vex.ll (revision 2f448bf509432c1a19ec46ab8cbc7353c03c6280)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; Use CPU parameters to ensure that a CPU-specific attribute is not overriding the AVX definition.
3
4; RUN: llc < %s -mtriple=x86_64-unknown-unknown                  -mattr=+avx | FileCheck %s
5; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=corei7-avx             | FileCheck %s
6; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=btver2                 | FileCheck %s
7; RUN: llc < %s -mtriple=x86_64-unknown-unknown                  -mattr=-avx | FileCheck %s --check-prefix=SSE
8; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=corei7-avx -mattr=-avx | FileCheck %s --check-prefix=SSE
9; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=btver2     -mattr=-avx | FileCheck %s --check-prefix=SSE
10
11; No need to load unaligned operand from memory using an explicit instruction with AVX.
12; The operand should be folded into the AND instr.
13
14; With SSE, folding memory operands into math/logic ops requires 16-byte alignment
15; unless specially configured on some CPUs such as AMD Family 10H.
16
17define <4 x i32> @test1(ptr %p0, <4 x i32> %in1) nounwind {
18; CHECK-LABEL: test1:
19; CHECK:       # %bb.0:
20; CHECK-NEXT:    vandps (%rdi), %xmm0, %xmm0
21; CHECK-NEXT:    retq
22;
23; SSE-LABEL: test1:
24; SSE:       # %bb.0:
25; SSE-NEXT:    movups (%rdi), %xmm1
26; SSE-NEXT:    andps %xmm1, %xmm0
27; SSE-NEXT:    retq
28  %in0 = load <4 x i32>, ptr %p0, align 2
29  %a = and <4 x i32> %in0, %in1
30  ret <4 x i32> %a
31
32
33}
34
35