xref: /llvm-project/llvm/test/CodeGen/AMDGPU/GlobalISel/artifact-combiner-cse-leaves-dead-cast.mir (revision 373c343a77a7afaa07179db1754a52b620dfaf2e)
1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -run-pass=legalizer -o - %s | FileCheck %s
3
4# The G_UNMERGE_VALUES of the G_SEXT of G_BUILD_VECTOR will introduce
5# a new G_SEXT for each of the scalars. The sext of %and[4-7] already
6# exist, so the CSE MIR builder in the artifact combiner would re-use
7# those instructions and introduce dead copies which were never
8# deleted, and also kept the illegal %sext[4-7] alive which would fail
9# legalization.
10
11---
12name: artifact_combiner_sext_already_exists
13tracksRegLiveness: true
14body:             |
15  bb.0:
16    ; CHECK-LABEL: name: artifact_combiner_sext_already_exists
17    ; CHECK: %undef:_(p4) = G_IMPLICIT_DEF
18    ; CHECK-NEXT: %load:_(s32) = G_LOAD %undef(p4) :: (dereferenceable invariant load (s8), align 16, addrspace 4)
19    ; CHECK-NEXT: %unmerge3_0:_(s1) = G_TRUNC %load(s32)
20    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
21    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR %load, [[C]](s32)
22    ; CHECK-NEXT: %unmerge3_1:_(s1) = G_TRUNC [[LSHR]](s32)
23    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
24    ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR %load, [[C1]](s32)
25    ; CHECK-NEXT: %unmerge3_2:_(s1) = G_TRUNC [[LSHR1]](s32)
26    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
27    ; CHECK-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR %load, [[C2]](s32)
28    ; CHECK-NEXT: %unmerge3_3:_(s1) = G_TRUNC [[LSHR2]](s32)
29    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
30    ; CHECK-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR %load, [[C3]](s32)
31    ; CHECK-NEXT: %unmerge3_4:_(s1) = G_TRUNC [[LSHR3]](s32)
32    ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
33    ; CHECK-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR %load, [[C4]](s32)
34    ; CHECK-NEXT: %unmerge3_5:_(s1) = G_TRUNC [[LSHR4]](s32)
35    ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
36    ; CHECK-NEXT: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR %load, [[C5]](s32)
37    ; CHECK-NEXT: %unmerge3_6:_(s1) = G_TRUNC [[LSHR5]](s32)
38    ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 7
39    ; CHECK-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR %load, [[C6]](s32)
40    ; CHECK-NEXT: %unmerge3_7:_(s1) = G_TRUNC [[LSHR6]](s32)
41    ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
42    ; CHECK-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
43    ; CHECK-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
44    ; CHECK-NEXT: %negone:_(s1) = G_CONSTANT i1 true
45    ; CHECK-NEXT: %and0:_(s1) = G_XOR %unmerge3_0, %negone
46    ; CHECK-NEXT: %and1:_(s1) = G_XOR %unmerge3_1, %negone
47    ; CHECK-NEXT: %and2:_(s1) = G_XOR %unmerge3_2, %negone
48    ; CHECK-NEXT: %and3:_(s1) = G_XOR %unmerge3_3, %negone
49    ; CHECK-NEXT: %and4:_(s1) = G_XOR %unmerge3_4, %negone
50    ; CHECK-NEXT: %and5:_(s1) = G_XOR %unmerge3_5, %negone
51    ; CHECK-NEXT: %and6:_(s1) = G_XOR %unmerge3_6, %negone
52    ; CHECK-NEXT: %and7:_(s1) = G_XOR %unmerge3_7, %negone
53    ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT %and0(s1)
54    ; CHECK-NEXT: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
55    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[SEXT]], [[C10]]
56    ; CHECK-NEXT: [[SEXT1:%[0-9]+]]:_(s32) = G_SEXT %and1(s1)
57    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[SEXT1]], [[C10]]
58    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C7]](s32)
59    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]]
60    ; CHECK-NEXT: [[SEXT2:%[0-9]+]]:_(s32) = G_SEXT %and2(s1)
61    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[SEXT2]], [[C10]]
62    ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND2]], [[C8]](s32)
63    ; CHECK-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
64    ; CHECK-NEXT: [[SEXT3:%[0-9]+]]:_(s32) = G_SEXT %and3(s1)
65    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[SEXT3]], [[C10]]
66    ; CHECK-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[C9]](s32)
67    ; CHECK-NEXT: %merge0:_(s32) = G_OR [[OR1]], [[SHL2]]
68    ; CHECK-NEXT: [[SEXT4:%[0-9]+]]:_(s32) = G_SEXT %and4(s1)
69    ; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s32) = G_AND [[SEXT4]], [[C10]]
70    ; CHECK-NEXT: [[SEXT5:%[0-9]+]]:_(s32) = G_SEXT %and5(s1)
71    ; CHECK-NEXT: [[AND5:%[0-9]+]]:_(s32) = G_AND [[SEXT5]], [[C10]]
72    ; CHECK-NEXT: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[AND5]], [[C7]](s32)
73    ; CHECK-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[AND4]], [[SHL3]]
74    ; CHECK-NEXT: [[SEXT6:%[0-9]+]]:_(s32) = G_SEXT %and6(s1)
75    ; CHECK-NEXT: [[AND6:%[0-9]+]]:_(s32) = G_AND [[SEXT6]], [[C10]]
76    ; CHECK-NEXT: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[AND6]], [[C8]](s32)
77    ; CHECK-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[OR2]], [[SHL4]]
78    ; CHECK-NEXT: [[SEXT7:%[0-9]+]]:_(s32) = G_SEXT %and7(s1)
79    ; CHECK-NEXT: [[AND7:%[0-9]+]]:_(s32) = G_AND [[SEXT7]], [[C10]]
80    ; CHECK-NEXT: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C9]](s32)
81    ; CHECK-NEXT: %merge1:_(s32) = G_OR [[OR3]], [[SHL5]]
82    ; CHECK-NEXT: %bv:_(<2 x s32>) = G_BUILD_VECTOR %merge0(s32), %merge1(s32)
83    ; CHECK-NEXT: %null:_(p1) = G_CONSTANT i64 0
84    ; CHECK-NEXT: G_STORE %bv(<2 x s32>), %null(p1) :: (store (<2 x s32>), addrspace 1)
85    ; CHECK-NEXT: S_ENDPGM 0
86    %undef:_(p4) = G_IMPLICIT_DEF
87    %load:_(s32) = G_LOAD %undef :: (dereferenceable invariant load (s8), align 16, addrspace 4)
88    %trunc:_(s8) = G_TRUNC %load
89    %unmerge3_0:_(s1), %unmerge3_1:_(s1), %unmerge3_2:_(s1), %unmerge3_3:_(s1), %unmerge3_4:_(s1), %unmerge3_5:_(s1), %unmerge3_6:_(s1), %unmerge3_7:_(s1) = G_UNMERGE_VALUES %trunc
90    %negone:_(s1) = G_CONSTANT i1 true
91    %and0:_(s1) = G_XOR %unmerge3_0, %negone
92    %and1:_(s1) = G_XOR %unmerge3_1, %negone
93    %and2:_(s1) = G_XOR %unmerge3_2, %negone
94    %and3:_(s1) = G_XOR %unmerge3_3, %negone
95    %and4:_(s1) = G_XOR %unmerge3_4, %negone
96    %and5:_(s1) = G_XOR %unmerge3_5, %negone
97    %and6:_(s1) = G_XOR %unmerge3_6, %negone
98    %and7:_(s1) = G_XOR %unmerge3_7, %negone
99    %boolvec:_(<8 x s1>) = G_BUILD_VECTOR %and0(s1), %and1(s1), %and2(s1), %and3(s1), %and4(s1), %and5(s1), %and6(s1), %and7(s1)
100    %sext:_(<8 x s8>) = G_SEXT %boolvec(<8 x s1>)
101    %sext_lo:_(<4 x s8>), %sext_hi:_(<4 x s8>) = G_UNMERGE_VALUES %sext(<8 x s8>)
102    %sext0:_(s8), %sext1:_(s8), %sext2:_(s8), %sext3:_(s8) = G_UNMERGE_VALUES %sext_lo(<4 x s8>)
103    %merge0:_(s32) = G_MERGE_VALUES %sext0(s8), %sext1(s8), %sext2(s8), %sext3(s8)
104    %sext4:_(s8) = G_SEXT %and4(s1)
105    %sext5:_(s8) = G_SEXT %and5(s1)
106    %sext6:_(s8) = G_SEXT %and6(s1)
107    %sext7:_(s8) = G_SEXT %and7(s1)
108    %merge1:_(s32) = G_MERGE_VALUES %sext4, %sext5, %sext6, %sext7
109    %bv:_(<2 x s32>) = G_BUILD_VECTOR %merge0(s32), %merge1(s32)
110    %null:_(p1) = G_CONSTANT i64 0
111    G_STORE %bv(<2 x s32>), %null :: (store (<2 x s32>), addrspace 1)
112    S_ENDPGM 0
113
114...
115