xref: /llvm-project/llvm/test/CodeGen/X86/callbr-asm-instr-scheduling.ll (revision 6b0e2fa6f0b1045ed616e263c75ee59768e9f7f8)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=x86_64-unknown-linux-gnu -verify-machineinstrs -mcpu=znver2 -O2 -frame-pointer=none < %s | FileCheck %s
3
4; Make sure that instructions aren't scheduled after the "callbr". In the
5; example below, we don't want the "shrxq" through "leaq" instructions to be
6; moved after the "callbr".
7
8%struct.cpuinfo_x86 = type { i8, i8, i8, i8, i32, [3 x i32], i8, i8, i8, i8, i32, i32, %union.anon.83, [16 x i8], [64 x i8], i32, i32, i32, i32, i32, i32, i64, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i32, i8, i8 }
9%union.anon.83 = type { i64, [72 x i8] }
10%struct.pgd_t = type { i64 }
11%struct.p4d_t = type { i64 }
12%struct.pud_t = type { i64 }
13
14@boot_cpu_data = external dso_local global %struct.cpuinfo_x86, align 8
15@page_offset_base = external dso_local local_unnamed_addr global i64, align 8
16@pgdir_shift = external dso_local local_unnamed_addr global i32, align 4
17@__force_order = external dso_local global i64, align 8
18@ptrs_per_p4d = external dso_local local_unnamed_addr global i32, align 4
19
20define i64 @early_ioremap_pmd(i64 %addr) {
21; CHECK-LABEL: early_ioremap_pmd:
22; CHECK:       # %bb.0: # %entry
23; CHECK-NEXT:    #APP
24; CHECK-NEXT:    movq %cr3, %rax
25; CHECK-EMPTY:
26; CHECK-NEXT:    #NO_APP
27; CHECK-NEXT:    movabsq $9223372036854771712, %rdx # imm = 0x7FFFFFFFFFFFF000
28; CHECK-NEXT:    andq %rax, %rdx
29; CHECK-NEXT:    movzbl pgdir_shift(%rip), %eax
30; CHECK-NEXT:    movq page_offset_base(%rip), %rcx
31; CHECK-NEXT:    shrxq %rax, %rdi, %rax
32; CHECK-NEXT:    addq %rcx, %rdx
33; CHECK-NEXT:    andl $511, %eax # imm = 0x1FF
34; CHECK-NEXT:    leaq (%rdx,%rax,8), %rax
35; CHECK-NEXT:    #APP
36; CHECK-NEXT:  .Ltmp0:
37; CHECK-NEXT:    jmp .Ltmp1
38; CHECK-NEXT:  .Ltmp2:
39; CHECK-NEXT:    .zero (-(((.Ltmp3-.Ltmp4)-(.Ltmp2-.Ltmp0))>0))*((.Ltmp3-.Ltmp4)-(.Ltmp2-.Ltmp0)),144
40; CHECK-NEXT:  .Ltmp5:
41entry:
42  %0 = tail call i64 asm sideeffect "mov %cr3,$0\0A\09", "=r,=*m,~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i64) nonnull @__force_order)
43  %and.i = and i64 %0, 9223372036854771712
44  %1 = load i64, ptr @page_offset_base, align 8
45  %add = add i64 %and.i, %1
46  %2 = inttoptr i64 %add to ptr
47  %3 = load i32, ptr @pgdir_shift, align 4
48  %sh_prom = zext i32 %3 to i64
49  %shr = lshr i64 %addr, %sh_prom
50  %and = and i64 %shr, 511
51  %arrayidx = getelementptr %struct.pgd_t, ptr %2, i64 %and
52  callbr void asm sideeffect "1: jmp 6f\0A2:\0A.skip -(((5f-4f) - (2b-1b)) > 0) * ((5f-4f) - (2b-1b)),0x90\0A3:\0A.section .altinstructions,\22a\22\0A .long 1b - .\0A .long 4f - .\0A .word ${1:P}\0A .byte 3b - 1b\0A .byte 5f - 4f\0A .byte 3b - 2b\0A.previous\0A.section .altinstr_replacement,\22ax\22\0A4: jmp ${5:l}\0A5:\0A.previous\0A.section .altinstructions,\22a\22\0A .long 1b - .\0A .long 0\0A .word ${0:P}\0A .byte 3b - 1b\0A .byte 0\0A .byte 0\0A.previous\0A.section .altinstr_aux,\22ax\22\0A6:\0A testb $2,$3\0A jnz ${4:l}\0A jmp ${5:l}\0A.previous\0A", "i,i,i,*m,!i,!i,~{dirflag},~{fpsr},~{flags}"(i16 528, i32 117, i32 1, ptr elementtype(i8) getelementptr inbounds (%struct.cpuinfo_x86, ptr @boot_cpu_data, i64 0, i32 12, i32 1, i64 58))
53          to label %_static_cpu_has.exit.thread.i [label %if.end.i, label %if.then.i]
54
55_static_cpu_has.exit.thread.i:                    ; preds = %entry
56  br label %if.end.i
57
58if.then.i:                                        ; preds = %entry
59  br label %p4d_offset.exit
60
61if.end.i:                                         ; preds = %_static_cpu_has.exit.thread.i, %entry
62  %4 = load i64, ptr %arrayidx, align 8
63  %5 = inttoptr i64 %4 to ptr
64  %6 = load i32, ptr @ptrs_per_p4d, align 4
65  %sub.i.i = add i32 %6, 33554431
66  %7 = and i32 %sub.i.i, 33554431
67  %and.i1.i = zext i32 %7 to i64
68  %add.ptr.i = getelementptr %struct.p4d_t, ptr %5, i64 %and.i1.i
69  br label %p4d_offset.exit
70
71p4d_offset.exit:                                  ; preds = %if.end.i, %if.then.i
72  %retval.0.i = phi ptr [ %add.ptr.i, %if.end.i ], [ %arrayidx, %if.then.i ]
73  %8 = load i64, ptr %retval.0.i, align 8
74  %and.i.i13 = and i64 %8, 4503599627366400
75  %add.i.i14 = add i64 %and.i.i13, %1
76  %9 = inttoptr i64 %add.i.i14 to ptr
77  %coerce.dive.i16 = getelementptr %struct.pud_t, ptr %9, i64 511, i32 0
78  %10 = load i64, ptr %coerce.dive.i16, align 8
79  %tobool.i.i.i = icmp slt i64 %10, 0
80  %..i.i.i = select i1 %tobool.i.i.i, i64 4503598553628672, i64 4503599627366400
81  ret i64 %..i.i.i
82}
83