xref: /llvm-project/llvm/test/CodeGen/X86/load-local-v3i1.ll (revision 189900eb149bb55ae3787346f57c1ccbdc50fb3c)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s
3
4; widen a v3i1 to v4i1 to do a vector load/store. We would previously
5; reconstruct the said v3i1 from the first element of the vector by filling all
6; the lanes of the vector with that first element, which was obviously wrong.
7; This was done in the type-legalizing of the DAG, when legalizing the load.
8
9; Function Attrs: argmemonly nounwind readonly
10declare <3 x i32> @llvm.masked.load.v3i32.p1(ptr addrspace(1), i32, <3 x i1>, <3 x i32>)
11
12; Function Attrs: argmemonly nounwind
13declare void @llvm.masked.store.v3i32.p1(<3 x i32>, ptr addrspace(1), i32, <3 x i1>)
14
15define  <3 x i32> @masked_load_v3(ptr addrspace(1), <3 x i1>) {
16; CHECK-LABEL: masked_load_v3:
17; CHECK:       # %bb.0: # %entry
18; CHECK-NEXT:    andb $1, %sil
19; CHECK-NEXT:    andb $1, %dl
20; CHECK-NEXT:    addb %dl, %dl
21; CHECK-NEXT:    orb %sil, %dl
22; CHECK-NEXT:    andb $1, %cl
23; CHECK-NEXT:    shlb $2, %cl
24; CHECK-NEXT:    orb %dl, %cl
25; CHECK-NEXT:    testb $1, %cl
26; CHECK-NEXT:    # implicit-def: $xmm0
27; CHECK-NEXT:    jne .LBB0_1
28; CHECK-NEXT:  # %bb.2: # %else
29; CHECK-NEXT:    testb $2, %cl
30; CHECK-NEXT:    jne .LBB0_3
31; CHECK-NEXT:  .LBB0_4: # %else2
32; CHECK-NEXT:    testb $4, %cl
33; CHECK-NEXT:    jne .LBB0_5
34; CHECK-NEXT:  .LBB0_6: # %else5
35; CHECK-NEXT:    retq
36; CHECK-NEXT:  .LBB0_1: # %cond.load
37; CHECK-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
38; CHECK-NEXT:    testb $2, %cl
39; CHECK-NEXT:    je .LBB0_4
40; CHECK-NEXT:  .LBB0_3: # %cond.load1
41; CHECK-NEXT:    pinsrd $1, 4(%rdi), %xmm0
42; CHECK-NEXT:    testb $4, %cl
43; CHECK-NEXT:    je .LBB0_6
44; CHECK-NEXT:  .LBB0_5: # %cond.load4
45; CHECK-NEXT:    pinsrd $2, 8(%rdi), %xmm0
46; CHECK-NEXT:    retq
47entry:
48  %2 = call <3 x i32> @llvm.masked.load.v3i32.p1(ptr addrspace(1) %0, i32 4, <3 x i1> %1, <3 x i32> undef)
49  ret <3 x i32> %2
50}
51
52define void @masked_store4_v3(<3 x i32>, ptr addrspace(1), <3 x i1>) {
53; CHECK-LABEL: masked_store4_v3:
54; CHECK:       # %bb.0: # %entry
55; CHECK-NEXT:    andb $1, %sil
56; CHECK-NEXT:    andb $1, %dl
57; CHECK-NEXT:    addb %dl, %dl
58; CHECK-NEXT:    orb %sil, %dl
59; CHECK-NEXT:    andb $1, %cl
60; CHECK-NEXT:    shlb $2, %cl
61; CHECK-NEXT:    orb %dl, %cl
62; CHECK-NEXT:    testb $1, %cl
63; CHECK-NEXT:    jne .LBB1_1
64; CHECK-NEXT:  # %bb.2: # %else
65; CHECK-NEXT:    testb $2, %cl
66; CHECK-NEXT:    jne .LBB1_3
67; CHECK-NEXT:  .LBB1_4: # %else2
68; CHECK-NEXT:    testb $4, %cl
69; CHECK-NEXT:    jne .LBB1_5
70; CHECK-NEXT:  .LBB1_6: # %else4
71; CHECK-NEXT:    retq
72; CHECK-NEXT:  .LBB1_1: # %cond.store
73; CHECK-NEXT:    movss %xmm0, (%rdi)
74; CHECK-NEXT:    testb $2, %cl
75; CHECK-NEXT:    je .LBB1_4
76; CHECK-NEXT:  .LBB1_3: # %cond.store1
77; CHECK-NEXT:    extractps $1, %xmm0, 4(%rdi)
78; CHECK-NEXT:    testb $4, %cl
79; CHECK-NEXT:    je .LBB1_6
80; CHECK-NEXT:  .LBB1_5: # %cond.store3
81; CHECK-NEXT:    extractps $2, %xmm0, 8(%rdi)
82; CHECK-NEXT:    retq
83entry:
84  call void @llvm.masked.store.v3i32.p1(<3 x i32> %0, ptr addrspace(1) %1, i32 4, <3 x i1> %2)
85  ret void
86}
87
88define void @local_load_v3i1(ptr addrspace(1) %out, ptr addrspace(1) %in, ptr %predicate_ptr) nounwind {
89; CHECK-LABEL: local_load_v3i1:
90; CHECK:       # %bb.0:
91; CHECK-NEXT:    pushq %rbp
92; CHECK-NEXT:    pushq %r15
93; CHECK-NEXT:    pushq %r14
94; CHECK-NEXT:    pushq %rbx
95; CHECK-NEXT:    pushq %rax
96; CHECK-NEXT:    movq %rdi, %rbx
97; CHECK-NEXT:    movzbl (%rdx), %eax
98; CHECK-NEXT:    movl %eax, %ecx
99; CHECK-NEXT:    shrb %cl
100; CHECK-NEXT:    andb $1, %cl
101; CHECK-NEXT:    movl %eax, %edx
102; CHECK-NEXT:    shrb $2, %dl
103; CHECK-NEXT:    andb $1, %al
104; CHECK-NEXT:    movzbl %al, %ebp
105; CHECK-NEXT:    movzbl %dl, %r14d
106; CHECK-NEXT:    movzbl %cl, %r15d
107; CHECK-NEXT:    movq %rsi, %rdi
108; CHECK-NEXT:    movl %ebp, %esi
109; CHECK-NEXT:    movl %r15d, %edx
110; CHECK-NEXT:    movl %r14d, %ecx
111; CHECK-NEXT:    callq masked_load_v3@PLT
112; CHECK-NEXT:    movq %rbx, %rdi
113; CHECK-NEXT:    movl %ebp, %esi
114; CHECK-NEXT:    movl %r15d, %edx
115; CHECK-NEXT:    movl %r14d, %ecx
116; CHECK-NEXT:    callq masked_store4_v3@PLT
117; CHECK-NEXT:    addq $8, %rsp
118; CHECK-NEXT:    popq %rbx
119; CHECK-NEXT:    popq %r14
120; CHECK-NEXT:    popq %r15
121; CHECK-NEXT:    popq %rbp
122; CHECK-NEXT:    retq
123  %predicate = load <3 x i1>, ptr %predicate_ptr
124  %load1 = call <3 x i32> @masked_load_v3(ptr addrspace(1) %in, <3 x i1> %predicate)
125  call void @masked_store4_v3(<3 x i32> %load1, ptr addrspace(1) %out, <3 x i1> %predicate)
126  ret void
127}
128