xref: /llvm-project/llvm/test/CodeGen/NVPTX/machine-sink.ll (revision b279f6b098d3849f7f1c1f539b108307d5f8ae2d)
1; RUN: llc < %s -mtriple=nvptx64 -mcpu=sm_20 | FileCheck %s
2; RUN: %if ptxas %{ llc < %s -mtriple=nvptx64 -mcpu=sm_20 | %ptxas-verify %}
3
4target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64"
5
6@scalar1 = internal addrspace(3) global float 0.000000e+00, align 4
7@scalar2 = internal addrspace(3) global float 0.000000e+00, align 4
8
9; We shouldn't sink mul.rn.f32 to BB %merge because BB %merge post-dominates
10; BB %entry. Over-sinking created more register pressure on this example. The
11; backend would sink the fmuls to BB %merge, but not the loads for being
12; conservative on sinking memory accesses. As a result, the loads and
13; the two fmuls would be separated to two basic blocks, causing two
14; cross-BB live ranges.
15define float @post_dominate(float %x, i1 %cond) {
16; CHECK-LABEL: post_dominate(
17entry:
18  %0 = load float, ptr addrspacecast (ptr addrspace(3) @scalar1 to ptr), align 4
19  %1 = load float, ptr addrspacecast (ptr addrspace(3) @scalar2 to ptr), align 4
20; CHECK: ld.shared.f32
21; CHECK: ld.shared.f32
22  %2 = fmul float %0, %0
23  %3 = fmul float %1, %2
24; CHECK-NOT: bra
25; CHECK: mul.rn.f32
26; CHECK: mul.rn.f32
27  br i1 %cond, label %then, label %merge
28
29then:
30  %z = fadd float %x, %x
31  br label %then2
32
33then2:
34  %z2 = fadd float %z, %z
35  br label %merge
36
37merge:
38  %y = phi float [ 0.0, %entry ], [ %z2, %then2 ]
39  %w = fadd float %y, %3
40  ret float %w
41}
42