xref: /llvm-project/llvm/test/CodeGen/AMDGPU/fold-immediate-operand-shrink-with-carry.mir (revision 8871c3c562690347d75190be758312d1f92a7db4)
1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2# RUN: llc -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs -run-pass si-fold-operands,dead-mi-elimination  %s -o - | FileCheck -check-prefix=GCN %s
3
4---
5
6# Uses a carry out in an instruction that can't be shrunk.
7
8name: shrink_scalar_imm_vgpr_v_add_i32_e64_other_carry_out_use
9tracksRegLiveness: true
10
11body:             |
12  bb.0:
13    ; GCN-LABEL: name: shrink_scalar_imm_vgpr_v_add_i32_e64_other_carry_out_use
14    ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 12345
15    ; GCN-NEXT: [[DEF:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
16    ; GCN-NEXT: [[V_ADD_CO_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_CO_U32_e32 [[S_MOV_B32_]], [[DEF]], implicit-def $vcc, implicit $exec
17    ; GCN-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY killed $vcc
18    ; GCN-NEXT: S_ENDPGM 0, implicit [[COPY]]
19    %0:sreg_32_xm0 = S_MOV_B32 12345
20    %1:vgpr_32 = IMPLICIT_DEF
21    %2:vgpr_32 = IMPLICIT_DEF
22    %3:vgpr_32 = IMPLICIT_DEF
23
24    %4:vgpr_32, %5:sreg_64_xexec = V_ADD_CO_U32_e64 %0, %1, 0, implicit $exec
25    S_ENDPGM 0, implicit %5
26
27...
28---
29
30name: shrink_scalar_imm_multi_use_with_used_carry
31tracksRegLiveness: true
32
33body:             |
34  bb.0:
35    ; GCN-LABEL: name: shrink_scalar_imm_multi_use_with_used_carry
36    ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 12345
37    ; GCN-NEXT: [[DEF:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
38    ; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
39    ; GCN-NEXT: [[V_ADD_CO_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_CO_U32_e32 [[S_MOV_B32_]], [[DEF]], implicit-def $vcc, implicit $exec
40    ; GCN-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY killed $vcc
41    ; GCN-NEXT: [[V_ADD_CO_U32_e32_1:%[0-9]+]]:vgpr_32 = V_ADD_CO_U32_e32 [[S_MOV_B32_]], [[DEF1]], implicit-def $vcc, implicit $exec
42    ; GCN-NEXT: S_ENDPGM 0, implicit [[COPY]], implicit [[V_ADD_CO_U32_e32_1]]
43    %0:sreg_32_xm0 = S_MOV_B32 12345
44    %1:vgpr_32 = IMPLICIT_DEF
45    %2:vgpr_32 = IMPLICIT_DEF
46    %3:vgpr_32 = IMPLICIT_DEF
47    %4:vgpr_32 = IMPLICIT_DEF
48
49    %5:vgpr_32, %6:sreg_64_xexec = V_ADD_CO_U32_e64 %0, %1, 0, implicit $exec
50    %7:vgpr_32, %8:sreg_64_xexec = V_ADD_CO_U32_e64 %0, %2, 0, implicit $exec
51    S_ENDPGM 0, implicit %6, implicit %7
52
53...
54---
55
56# TODO: Is it OK to leave the broken use around on the DBG_VALUE?
57
58name: shrink_scalar_imm_vgpr_v_add_i32_e64_dbg_only_carry_out_use
59tracksRegLiveness: true
60
61body:             |
62  bb.0:
63    ; GCN-LABEL: name: shrink_scalar_imm_vgpr_v_add_i32_e64_dbg_only_carry_out_use
64    ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 12345
65    ; GCN-NEXT: [[DEF:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
66    ; GCN-NEXT: [[V_ADD_CO_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_CO_U32_e32 [[S_MOV_B32_]], [[DEF]], implicit-def $vcc, implicit $exec
67    ; GCN-NEXT: DBG_VALUE %5:sreg_64_xexec, $noreg
68    ; GCN-NEXT: S_ENDPGM 0, implicit [[V_ADD_CO_U32_e32_]]
69    %0:sreg_32_xm0 = S_MOV_B32 12345
70    %1:vgpr_32 = IMPLICIT_DEF
71    %2:vgpr_32 = IMPLICIT_DEF
72    %3:vgpr_32 = IMPLICIT_DEF
73
74    %4:vgpr_32, %5:sreg_64_xexec = V_ADD_CO_U32_e64 %0, %1, 0, implicit $exec
75    DBG_VALUE %5, $noreg
76    S_ENDPGM 0, implicit %4
77
78...
79
80---
81
82# Uses carry out in a normal pattern
83
84name: shrink_scalar_imm_vgpr_v_add_i32_e64_carry_out_use
85tracksRegLiveness: true
86
87body:             |
88  bb.0:
89    ; GCN-LABEL: name: shrink_scalar_imm_vgpr_v_add_i32_e64_carry_out_use
90    ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 12345
91    ; GCN-NEXT: [[DEF:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
92    ; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
93    ; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
94    ; GCN-NEXT: [[V_ADD_CO_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_CO_U32_e32 [[S_MOV_B32_]], [[DEF]], implicit-def $vcc, implicit $exec
95    ; GCN-NEXT: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY killed $vcc
96    ; GCN-NEXT: [[V_ADDC_U32_e64_:%[0-9]+]]:vgpr_32, [[V_ADDC_U32_e64_1:%[0-9]+]]:sreg_64_xexec = V_ADDC_U32_e64 [[DEF1]], [[DEF2]], [[COPY]], 0, implicit $exec
97    ; GCN-NEXT: S_ENDPGM 0, implicit [[V_ADDC_U32_e64_]]
98    %0:sreg_32_xm0 = S_MOV_B32 12345
99    %1:vgpr_32 = IMPLICIT_DEF
100    %2:vgpr_32 = IMPLICIT_DEF
101    %3:vgpr_32 = IMPLICIT_DEF
102
103    %4:vgpr_32, %5:sreg_64_xexec = V_ADD_CO_U32_e64 %0, %1, 0, implicit $exec
104    %6:vgpr_32, %7:sreg_64_xexec = V_ADDC_U32_e64 %2, %3, %5, 0, implicit $exec
105    S_ENDPGM 0, implicit %6
106
107...
108