1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py 2# RUN: llc -verify-machineinstrs -mtriple aarch64-unknown-uknown -global-isel-abort=1 -run-pass=instruction-select %s -o - | FileCheck %s 3 4... 5--- 6name: uaddo_s32 7alignment: 4 8legalized: true 9regBankSelected: true 10tracksRegLiveness: true 11body: | 12 bb.1.entry: 13 liveins: $w0, $w1, $x2 14 15 ; CHECK-LABEL: name: uaddo_s32 16 ; CHECK: liveins: $w0, $w1, $x2 17 ; CHECK-NEXT: {{ $}} 18 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 19 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1 20 ; CHECK-NEXT: [[ADDSWrr:%[0-9]+]]:gpr32 = ADDSWrr [[COPY]], [[COPY1]], implicit-def $nzcv 21 ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 3, implicit $nzcv 22 ; CHECK-NEXT: $w0 = COPY [[ADDSWrr]] 23 ; CHECK-NEXT: $w1 = COPY [[CSINCWr]] 24 ; CHECK-NEXT: RET_ReallyLR implicit $w0, implicit $w1 25 %0:gpr(s32) = COPY $w0 26 %1:gpr(s32) = COPY $w1 27 %3:gpr(s32), %4:gpr(s32) = G_UADDO %0, %1 28 $w0 = COPY %3 29 $w1 = COPY %4 30 RET_ReallyLR implicit $w0, implicit $w1 31 32... 33--- 34name: uaddo_s64 35alignment: 4 36legalized: true 37regBankSelected: true 38tracksRegLiveness: true 39body: | 40 bb.1.entry: 41 liveins: $x0, $x1, $x2 42 43 ; CHECK-LABEL: name: uaddo_s64 44 ; CHECK: liveins: $x0, $x1, $x2 45 ; CHECK-NEXT: {{ $}} 46 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 47 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1 48 ; CHECK-NEXT: [[ADDSXrr:%[0-9]+]]:gpr64 = ADDSXrr [[COPY]], [[COPY1]], implicit-def $nzcv 49 ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 3, implicit $nzcv 50 ; CHECK-NEXT: $x0 = COPY [[ADDSXrr]] 51 ; CHECK-NEXT: $w1 = COPY [[CSINCWr]] 52 ; CHECK-NEXT: RET_ReallyLR implicit $x0, implicit $w1 53 %0:gpr(s64) = COPY $x0 54 %1:gpr(s64) = COPY $x1 55 %3:gpr(s64), %4:gpr(s32) = G_UADDO %0, %1 56 $x0 = COPY %3 57 $w1 = COPY %4 58 RET_ReallyLR implicit $x0, implicit $w1 59 60... 61--- 62name: uaddo_s32_imm 63alignment: 4 64legalized: true 65regBankSelected: true 66tracksRegLiveness: true 67body: | 68 bb.1.entry: 69 liveins: $w0, $w1, $x2 70 ; Check that we get ADDSWri when we can fold in a constant. 71 ; 72 ; CHECK-LABEL: name: uaddo_s32_imm 73 ; CHECK: liveins: $w0, $w1, $x2 74 ; CHECK-NEXT: {{ $}} 75 ; CHECK-NEXT: %copy:gpr32sp = COPY $w0 76 ; CHECK-NEXT: %add:gpr32 = ADDSWri %copy, 16, 0, implicit-def $nzcv 77 ; CHECK-NEXT: $w0 = COPY %add 78 ; CHECK-NEXT: RET_ReallyLR implicit $w0 79 %copy:gpr(s32) = COPY $w0 80 %constant:gpr(s32) = G_CONSTANT i32 16 81 %add:gpr(s32), %overflow:gpr(s32) = G_UADDO %copy, %constant 82 $w0 = COPY %add(s32) 83 RET_ReallyLR implicit $w0 84 85... 86--- 87name: uaddo_s32_shifted 88alignment: 4 89legalized: true 90regBankSelected: true 91tracksRegLiveness: true 92body: | 93 bb.1.entry: 94 liveins: $w0, $w1, $x2 95 ; Check that we get ADDSWrs when we can fold in a shift. 96 ; 97 ; CHECK-LABEL: name: uaddo_s32_shifted 98 ; CHECK: liveins: $w0, $w1, $x2 99 ; CHECK-NEXT: {{ $}} 100 ; CHECK-NEXT: %copy1:gpr32 = COPY $w0 101 ; CHECK-NEXT: %copy2:gpr32 = COPY $w1 102 ; CHECK-NEXT: %add:gpr32 = ADDSWrs %copy1, %copy2, 16, implicit-def $nzcv 103 ; CHECK-NEXT: $w0 = COPY %add 104 ; CHECK-NEXT: RET_ReallyLR implicit $w0 105 %copy1:gpr(s32) = COPY $w0 106 %copy2:gpr(s32) = COPY $w1 107 %constant:gpr(s32) = G_CONSTANT i32 16 108 %shift:gpr(s32) = G_SHL %copy2(s32), %constant(s32) 109 %add:gpr(s32), %overflow:gpr(s32) = G_UADDO %copy1, %shift 110 $w0 = COPY %add(s32) 111 RET_ReallyLR implicit $w0 112 113... 114--- 115name: uaddo_s32_neg_imm 116alignment: 4 117legalized: true 118regBankSelected: true 119tracksRegLiveness: true 120body: | 121 bb.1.entry: 122 liveins: $w0, $w1, $x2 123 ; Check that we get SUBSWri when we can fold in a negative constant. 124 ; 125 ; CHECK-LABEL: name: uaddo_s32_neg_imm 126 ; CHECK: liveins: $w0, $w1, $x2 127 ; CHECK-NEXT: {{ $}} 128 ; CHECK-NEXT: %copy:gpr32sp = COPY $w0 129 ; CHECK-NEXT: %add:gpr32 = SUBSWri %copy, 16, 0, implicit-def $nzcv 130 ; CHECK-NEXT: $w0 = COPY %add 131 ; CHECK-NEXT: RET_ReallyLR implicit $w0 132 %copy:gpr(s32) = COPY $w0 133 %constant:gpr(s32) = G_CONSTANT i32 -16 134 %add:gpr(s32), %overflow:gpr(s32) = G_UADDO %copy, %constant 135 $w0 = COPY %add(s32) 136 RET_ReallyLR implicit $w0 137 138... 139--- 140name: uaddo_arith_extended 141alignment: 4 142legalized: true 143regBankSelected: true 144tracksRegLiveness: true 145body: | 146 bb.1.entry: 147 liveins: $w0, $x0 148 ; Check that we get ADDSXrx. 149 ; CHECK-LABEL: name: uaddo_arith_extended 150 ; CHECK: liveins: $w0, $x0 151 ; CHECK-NEXT: {{ $}} 152 ; CHECK-NEXT: %reg0:gpr64sp = COPY $x0 153 ; CHECK-NEXT: %reg1:gpr32 = COPY $w0 154 ; CHECK-NEXT: %add:gpr64 = ADDSXrx %reg0, %reg1, 18, implicit-def $nzcv 155 ; CHECK-NEXT: $x0 = COPY %add 156 ; CHECK-NEXT: RET_ReallyLR implicit $x0 157 %reg0:gpr(s64) = COPY $x0 158 %reg1:gpr(s32) = COPY $w0 159 %ext:gpr(s64) = G_ZEXT %reg1(s32) 160 %cst:gpr(s64) = G_CONSTANT i64 2 161 %shift:gpr(s64) = G_SHL %ext, %cst(s64) 162 %add:gpr(s64), %flags:gpr(s32) = G_UADDO %reg0, %shift 163 $x0 = COPY %add(s64) 164 RET_ReallyLR implicit $x0 165