xref: /llvm-project/llvm/test/CodeGen/Thumb2/2010-03-15-AsmCCClobber.ll (revision b5b663aac17415625340eb29c8010832bfc4c21c)
1; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 \
2; RUN:   -pre-RA-sched=source | FileCheck %s
3; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 \
4; RUN:   -pre-RA-sched=list-hybrid | FileCheck %s
5; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -regalloc=basic | FileCheck %s
6; Radar 7459078
7target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32-n32"
8
9%0 = type { i32, i32 }
10%s1 = type { %s3, i32, %s4, ptr, ptr, ptr, ptr, ptr, ptr, i32, i64, [1 x i32] }
11%s2 = type { ptr, %s4 }
12%s3 = type { %s2, i32, i32, ptr, [4 x i8], float, %s4, ptr, ptr }
13%s4 = type { %s5 }
14%s5 = type { i32 }
15
16; Make sure the cmp is not scheduled before the InlineAsm that clobbers cc.
17; CHECK: bl _f2
18; CHECK: clz {{r[0-9]+}}
19; CHECK-DAG: lsrs    {{r[0-9]+}}
20; CHECK-DAG: lsls    {{r[0-9]+}}
21; CHECK-NEXT: orr.w   {{r[0-9]+}}
22; CHECK-NEXT: InlineAsm Start
23define void @test(ptr %this, i32 %format, i32 %w, i32 %h, i32 %levels, ptr %s, ptr %data, ptr nocapture %rowbytes, ptr %release, ptr %info) nounwind {
24entry:
25  %tmp1 = getelementptr inbounds %s1, ptr %this, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0
26  store volatile i32 1, ptr %tmp1, align 4
27  %tmp12 = getelementptr inbounds %s1, ptr %this, i32 0, i32 1
28  store i32 %levels, ptr %tmp12, align 4
29  %tmp13 = getelementptr inbounds %s1, ptr %this, i32 0, i32 3
30  store ptr %data, ptr %tmp13, align 4
31  %tmp14 = getelementptr inbounds %s1, ptr %this, i32 0, i32 4
32  store ptr %release, ptr %tmp14, align 4
33  %tmp15 = getelementptr inbounds %s1, ptr %this, i32 0, i32 5
34  store ptr %info, ptr %tmp15, align 4
35  %tmp16 = getelementptr inbounds %s1, ptr %this, i32 0, i32 6
36  store ptr null, ptr %tmp16, align 4
37  %tmp17 = getelementptr inbounds %s1, ptr %this, i32 0, i32 7
38  store ptr null, ptr %tmp17, align 4
39  %tmp19 = getelementptr inbounds %s1, ptr %this, i32 0, i32 10
40  store i64 0, ptr %tmp19, align 4
41  tail call  void @f1(ptr %this, ptr %s) nounwind
42  %tmp21 = shl i32 %format, 6
43  %tmp22 = tail call  zeroext i8 @f2(i32 %format) nounwind
44  %toBoolnot = icmp eq i8 %tmp22, 0
45  %tmp23 = zext i1 %toBoolnot to i32
46  %flags.0 = or i32 %tmp23, %tmp21
47  %tmp24 = shl i32 %flags.0, 16
48  %asmtmp.i.i.i = tail call %0 asm sideeffect "\0A0:\09ldrex $1, [$2]\0A\09orr $1, $1, $3\0A\09strex $0, $1, [$2]\0A\09cmp $0, #0\0A\09bne 0b", "=&r,=&r,r,r,~{memory},~{cc}"(ptr %tmp1, i32 %tmp24) nounwind
49  %tmp25 = getelementptr inbounds %s1, ptr %this, i32 0, i32 2, i32 0, i32 0
50  store volatile i32 1, ptr %tmp25, align 4
51  %tmp26 = icmp eq i32 %levels, 0
52  br i1 %tmp26, label %return, label %bb4
53
54bb4:
55  %l.09 = phi i32 [ %tmp28, %bb4 ], [ 0, %entry ]
56  %scevgep = getelementptr %s1, ptr %this, i32 0, i32 11, i32 %l.09
57  %scevgep10 = getelementptr i32, ptr %rowbytes, i32 %l.09
58  %tmp27 = load i32, ptr %scevgep10, align 4
59  store i32 %tmp27, ptr %scevgep, align 4
60  %tmp28 = add i32 %l.09, 1
61  %exitcond = icmp eq i32 %tmp28, %levels
62  br i1 %exitcond, label %return, label %bb4
63
64return:
65  ret void
66}
67
68declare void @f1(ptr, ptr)
69declare zeroext i8 @f2(i32)
70