1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=x86_64-linux-gnu -mcpu=atom | FileCheck %s 3; <rdar://problem/8006248> 4 5; This randomly started passing after an unrelated change, if it fails again it 6; might be worth looking at PR12324: misched bringup. 7 8@llvm.used = appending global [1 x ptr] [ptr @func], section "llvm.metadata" 9 10define void @func(ptr %a, ptr %b, ptr %c, ptr %d) nounwind { 11; CHECK-LABEL: func: 12; CHECK: # %bb.0: # %entry 13; CHECK-NEXT: movslq (%rsi), %rax 14; CHECK-NEXT: movl $4, %esi 15; CHECK-NEXT: subq %rax, %rsi 16; CHECK-NEXT: movq (%rdx), %rax 17; CHECK-NEXT: movswl 8(%rdi), %edx 18; CHECK-NEXT: movswl (%rax,%rsi,2), %eax 19; CHECK-NEXT: imull %edx, %eax 20; CHECK-NEXT: addl $2138875574, %eax # imm = 0x7F7CA6B6 21; CHECK-NEXT: cmpl $2138875574, %eax # imm = 0x7F7CA6B6 22; CHECK-NEXT: setl %dl 23; CHECK-NEXT: cmpl $-8608074, %eax # imm = 0xFF7CA6B6 24; CHECK-NEXT: setge %sil 25; CHECK-NEXT: andb %dl, %sil 26; CHECK-NEXT: movzbl %sil, %edx 27; CHECK-NEXT: movslq %eax, %rsi 28; CHECK-NEXT: movq %rsi, %rdi 29; CHECK-NEXT: negl %edx 30; CHECK-NEXT: subq %rax, %rdi 31; CHECK-NEXT: xorl %eax, %eax 32; CHECK-NEXT: testl $-2, %edx 33; CHECK-NEXT: cmovneq %rax, %rdi 34; CHECK-NEXT: testl %esi, %esi 35; CHECK-NEXT: cmovnsq %rax, %rdi 36; CHECK-NEXT: movq (%rcx), %rax 37; CHECK-NEXT: subq %rdi, %rsi 38; CHECK-NEXT: leaq -2138875574(%rax,%rsi), %rax 39; CHECK-NEXT: movq %rax, (%rcx) 40; CHECK-NEXT: retq 41entry: 42 %tmp103 = getelementptr inbounds [40 x i16], ptr %a, i64 0, i64 4 43 %tmp104 = load i16, ptr %tmp103, align 2 44 %tmp105 = sext i16 %tmp104 to i32 45 %tmp106 = load i32, ptr %b, align 4 46 %tmp107 = sub nsw i32 4, %tmp106 47 %tmp108 = load ptr, ptr %c, align 8 48 %tmp109 = sext i32 %tmp107 to i64 49 %tmp110 = getelementptr inbounds i16, ptr %tmp108, i64 %tmp109 50 %tmp111 = load i16, ptr %tmp110, align 1 51 %tmp112 = sext i16 %tmp111 to i32 52 %tmp = mul i32 355244649, %tmp112 53 %tmp1 = mul i32 %tmp, %tmp105 54 %tmp2 = add i32 %tmp1, 2138875574 55 %tmp3 = add i32 %tmp2, 1546991088 56 %tmp4 = mul i32 %tmp3, 2122487257 57 %tmp5 = icmp sge i32 %tmp4, 2138875574 58 %tmp6 = icmp slt i32 %tmp4, -8608074 59 %tmp7 = or i1 %tmp5, %tmp6 60 %outSign = select i1 %tmp7, i32 1, i32 -1 61 %tmp8 = icmp slt i32 %tmp4, 0 62 %tmp9 = icmp eq i32 %outSign, 1 63 %tmp10 = and i1 %tmp8, %tmp9 64 %tmp11 = sext i32 %tmp4 to i64 65 %tmp12 = add i64 %tmp11, 5089792279245435153 66 %tmp13 = sub i64 %tmp12, 2138875574 67 %tmp14 = zext i32 %tmp4 to i64 68 %tmp15 = sub i64 %tmp11, %tmp14 69 %tmp16 = select i1 %tmp10, i64 %tmp15, i64 0 70 %tmp17 = sub i64 %tmp13, %tmp16 71 %tmp18 = mul i64 %tmp17, 4540133155013554595 72 %tmp19 = sub i64 %tmp18, 5386586244038704851 73 %tmp20 = add i64 %tmp19, -1368057358110947217 74 %tmp21 = mul i64 %tmp20, -422037402840850817 75 %tmp115 = load i64, ptr %d, align 8 76 %alphaX = mul i64 468858157810230901, %tmp21 77 %alphaXbetaY = add i64 %alphaX, %tmp115 78 %transformed = add i64 %alphaXbetaY, 9040145182981852475 79 store i64 %transformed, ptr %d, align 8 80 ret void 81} 82