xref: /llvm-project/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wave.reduce.umin.mir (revision f6a8eb98b13ee50c67ecf4804461a23fba7398aa)
1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 3
2# RUN: llc -mtriple=amdgcn -run-pass=finalize-isel -verify-machineinstrs %s -o - | FileCheck -check-prefix=GCN %s
3# RUN: llc -mtriple=amdgcn -passes=finalize-isel -verify-machineinstrs %s -o - | FileCheck -check-prefix=GCN %s
4
5---
6name:            uniform_value
7tracksRegLiveness: true
8machineFunctionInfo:
9  isEntryFunction: true
10body:             |
11  bb.0.entry:
12    liveins: $sgpr0_sgpr1
13
14    ; GCN-LABEL: name: uniform_value
15    ; GCN: liveins: $sgpr0_sgpr1
16    ; GCN-NEXT: {{  $}}
17    ; GCN-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr0_sgpr1
18    ; GCN-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
19    ; GCN-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec_xnull = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0
20    ; GCN-NEXT: [[S_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[COPY]](p4), 44, 0
21    ; GCN-NEXT: [[S_MOV_B32_:%[0-9]+]]:sgpr_32 = S_MOV_B32 [[S_LOAD_DWORD_IMM]]
22    ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
23    ; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR killed [[V_MOV_B32_e32_]], killed [[COPY1]], killed [[S_LOAD_DWORDX2_IMM]], 0, 0, implicit $exec
24    ; GCN-NEXT: S_ENDPGM 0
25    %1:sgpr_64(p4) = COPY $sgpr0_sgpr1
26    %4:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
27    %5:sreg_64_xexec_xnull = S_LOAD_DWORDX2_IMM %1(p4), 36, 0
28    %6:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM %1(p4), 44, 0
29    %7:sgpr_32 = WAVE_REDUCE_UMIN_PSEUDO_U32 killed %6, 1, implicit $exec
30    %8:vgpr_32 = COPY %7
31    GLOBAL_STORE_DWORD_SADDR killed %4, killed %8, killed %5, 0, 0, implicit $exec
32    S_ENDPGM 0
33
34...
35
36---
37name:            divergent_value
38machineFunctionInfo:
39  isEntryFunction: true
40tracksRegLiveness: true
41body:             |
42  ; GCN-LABEL: name: divergent_value
43  ; GCN: bb.0.entry:
44  ; GCN-NEXT:   successors: %bb.2(0x80000000)
45  ; GCN-NEXT:   liveins: $vgpr0, $sgpr0_sgpr1
46  ; GCN-NEXT: {{  $}}
47  ; GCN-NEXT:   [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr0_sgpr1
48  ; GCN-NEXT:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
49  ; GCN-NEXT:   [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec_xnull = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0
50  ; GCN-NEXT:   [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
51  ; GCN-NEXT:   [[S_MOV_B64_:%[0-9]+]]:sreg_64_xexec = S_MOV_B64 $exec
52  ; GCN-NEXT:   [[S_MOV_B32_:%[0-9]+]]:sgpr_32 = S_MOV_B32 4294967295
53  ; GCN-NEXT:   S_BRANCH %bb.2
54  ; GCN-NEXT: {{  $}}
55  ; GCN-NEXT: bb.2:
56  ; GCN-NEXT:   successors: %bb.2(0x40000000), %bb.3(0x40000000)
57  ; GCN-NEXT: {{  $}}
58  ; GCN-NEXT:   [[PHI:%[0-9]+]]:sgpr_32 = PHI [[S_MOV_B32_]], %bb.0, %4, %bb.2
59  ; GCN-NEXT:   [[PHI1:%[0-9]+]]:sreg_64_xexec = PHI [[S_MOV_B64_]], %bb.0, %11, %bb.2
60  ; GCN-NEXT:   [[S_FF1_I32_B64_:%[0-9]+]]:sgpr_32 = S_FF1_I32_B64 [[PHI1]]
61  ; GCN-NEXT:   [[V_READLANE_B32_:%[0-9]+]]:sgpr_32 = V_READLANE_B32 [[COPY1]], [[S_FF1_I32_B64_]]
62  ; GCN-NEXT:   [[S_MIN_U32_:%[0-9]+]]:sgpr_32 = S_MIN_U32 [[PHI]], [[V_READLANE_B32_]], implicit-def $scc
63  ; GCN-NEXT:   [[S_BITSET0_B64_:%[0-9]+]]:sreg_64_xexec = S_BITSET0_B64 [[S_FF1_I32_B64_]], [[PHI1]]
64  ; GCN-NEXT:   S_CMP_LG_U64 [[S_BITSET0_B64_]], 0, implicit-def $scc
65  ; GCN-NEXT:   S_CBRANCH_SCC1 %bb.2, implicit $scc
66  ; GCN-NEXT: {{  $}}
67  ; GCN-NEXT: bb.3:
68  ; GCN-NEXT:   successors: %bb.1(0x80000000)
69  ; GCN-NEXT: {{  $}}
70  ; GCN-NEXT:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[S_MIN_U32_]]
71  ; GCN-NEXT:   GLOBAL_STORE_DWORD_SADDR killed [[V_MOV_B32_e32_]], killed [[COPY2]], killed [[S_LOAD_DWORDX2_IMM]], 0, 0, implicit $exec
72  ; GCN-NEXT: {{  $}}
73  ; GCN-NEXT: bb.1:
74  ; GCN-NEXT:   [[PHI2:%[0-9]+]]:vgpr_32 = PHI [[COPY1]], %bb.3
75  ; GCN-NEXT:   S_ENDPGM 0
76  bb.0.entry:
77    liveins: $vgpr0, $sgpr0_sgpr1
78    %1:sgpr_64(p4) = COPY $sgpr0_sgpr1
79    %0:vgpr_32 = COPY $vgpr0
80    %4:sreg_64_xexec_xnull = S_LOAD_DWORDX2_IMM %1(p4), 36, 0
81    %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
82    %6:sgpr_32 = WAVE_REDUCE_UMIN_PSEUDO_U32 %0, 1, implicit $exec
83    %7:vgpr_32 = COPY %6
84    GLOBAL_STORE_DWORD_SADDR killed %5, killed %7, killed %4, 0, 0, implicit $exec
85  bb.1:
86    %8:vgpr_32 = PHI %0, %bb.0
87    S_ENDPGM 0
88
89...
90