1; RUN: llc -mtriple=thumb-eabi < %s -o - | FileCheck %s 2 3; Check that stack addresses are generated using a single ADD 4define void @test1(ptr %p) { 5 %x = alloca i8, align 1 6 %y = alloca i8, align 1 7 %z = alloca i8, align 1 8; CHECK: add r1, sp, #8 9; CHECK: str r1, [r0] 10 store volatile ptr %x, ptr %p, align 4 11; CHECK: add r1, sp, #4 12; CHECK: str r1, [r0] 13 store volatile ptr %y, ptr %p, align 4 14; CHECK: mov r1, sp 15; CHECK: str r1, [r0] 16 store volatile ptr %z, ptr %p, align 4 17 ret void 18} 19 20; Stack offsets larger than 1020 still need two ADDs 21define void @test2(ptr %p) { 22 %arr1 = alloca [1024 x i8], align 1 23 %arr2 = alloca [1024 x i8], align 1 24; CHECK: add r1, sp, #1020 25; CHECK: adds r1, #4 26; CHECK: str r1, [r0] 27 store volatile ptr %arr1, ptr %p, align 4 28; CHECK: mov r1, sp 29; CHECK: str r1, [r0] 30 store volatile ptr %arr2, ptr %p, align 4 31 ret void 32} 33 34; If possible stack-based lrdb/ldrh are widened to use SP-based addressing 35define i32 @test3() #0 { 36 %x = alloca i8, align 1 37 %y = alloca i8, align 1 38; CHECK: ldr r0, [sp] 39 %1 = load i8, ptr %x, align 1 40; CHECK: ldr r1, [sp, #4] 41 %2 = load i8, ptr %y, align 1 42 %3 = add nsw i8 %1, %2 43 %4 = zext i8 %3 to i32 44 ret i32 %4 45} 46 47define i32 @test4() #0 { 48 %x = alloca i16, align 2 49 %y = alloca i16, align 2 50; CHECK: ldr r0, [sp] 51 %1 = load i16, ptr %x, align 2 52; CHECK: ldr r1, [sp, #4] 53 %2 = load i16, ptr %y, align 2 54 %3 = add nsw i16 %1, %2 55 %4 = zext i16 %3 to i32 56 ret i32 %4 57} 58 59; Don't widen if the value needs to be zero-extended 60define zeroext i8 @test5() { 61 %x = alloca i8, align 1 62; CHECK: mov r0, sp 63; CHECK: ldrb r0, [r0] 64 %1 = load i8, ptr %x, align 1 65 ret i8 %1 66} 67 68define zeroext i16 @test6() { 69 %x = alloca i16, align 2 70; CHECK: mov r0, sp 71; CHECK: ldrh r0, [r0] 72 %1 = load i16, ptr %x, align 2 73 ret i16 %1 74} 75 76; Accessing the bottom of a large array shouldn't require materializing a base 77; 78; CHECK: movs [[REG:r[0-9]+]], #1 79; CHECK: str [[REG]], [sp, #16] 80; CHECK: str [[REG]], [sp, #4] 81 82define void @test7() { 83 %arr = alloca [200 x i32], align 4 84 85 %arrayidx = getelementptr inbounds [200 x i32], ptr %arr, i32 0, i32 1 86 store i32 1, ptr %arrayidx, align 4 87 88 %arrayidx1 = getelementptr inbounds [200 x i32], ptr %arr, i32 0, i32 4 89 store i32 1, ptr %arrayidx1, align 4 90 91 ret void 92} 93 94; Check that loads/stores with out-of-range offsets are handled correctly 95define void @test8() { 96 %arr3 = alloca [224 x i32], align 4 97 %arr2 = alloca [224 x i32], align 4 98 %arr1 = alloca [224 x i32], align 4 99 100; CHECK: movs [[REG:r[0-9]+]], #1 101; CHECK-DAG: str [[REG]], [sp] 102 store i32 1, ptr %arr1, align 4 103 104; Offset in range for sp-based store, but not for non-sp-based store 105; CHECK-DAG: str [[REG]], [sp, #128] 106 %arr1idx2 = getelementptr inbounds [224 x i32], ptr %arr1, i32 0, i32 32 107 store i32 1, ptr %arr1idx2, align 4 108 109; CHECK-DAG: str [[REG]], [sp, #896] 110 store i32 1, ptr %arr2, align 4 111 112; %arr2 is in range, but this element of it is not 113; CHECK-DAG: add [[RA:r[0-9]+]], sp, #900 114; CHECK-DAG: str [[REG]], [{{r[0-9]+}}, #124] 115 %arr2idx2 = getelementptr inbounds [224 x i32], ptr %arr2, i32 0, i32 32 116 store i32 1, ptr %arr2idx2, align 4 117 118; %arr3 is not in range 119; CHECK-DAG: ldr [[RB:r[0-9]+]], .LCPI7_2 120; CHECK-DAG: add [[RB]], sp 121; CHECK-DAG: str [[REG]], [{{r[0-9]+}}] 122 store i32 1, ptr %arr3, align 4 123 124; CHECK-DAG: ldr [[RC:r[0-9]+]], .LCPI7_3 125; CHECK-DAG: add [[RC]], sp 126; CHECK-DAG: str [[REG]], [{{r[0-9]+}}] 127 %arr3idx2 = getelementptr inbounds [224 x i32], ptr %arr3, i32 0, i32 32 128 store i32 1, ptr %arr3idx2, align 4 129 130 ret void 131} 132