1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -global-isel -o - \ 3; RUN: -ppc-vsr-nums-as-vr -ppc-asm-full-reg-names -verify-machineinstrs < %s | FileCheck %s 4 5define i64 @load_i64(ptr %p) { 6; CHECK-LABEL: load_i64: 7; CHECK: # %bb.0: # %entry 8; CHECK-NEXT: ld r3, 0(r3) 9; CHECK-NEXT: blr 10entry: 11 %ret = load i64, ptr %p, align 8 12 ret i64 %ret 13} 14 15define i64 @load2_i64(ptr %p, i64 %a) { 16; CHECK-LABEL: load2_i64: 17; CHECK: # %bb.0: # %entry 18; CHECK-NEXT: ld r3, 0(r3) 19; CHECK-NEXT: add r3, r3, r4 20; CHECK-NEXT: blr 21entry: 22 %load = load i64, ptr %p, align 8 23 %ret = add i64 %load, %a 24 ret i64 %ret 25} 26 27define float @load3_i64(ptr %p) { 28; CHECK-LABEL: load3_i64: 29; CHECK: # %bb.0: # %entry 30; CHECK-NEXT: ld r3, 0(r3) 31; CHECK-NEXT: mtfprd f0, r3 32; CHECK-NEXT: xscvsxdsp f1, f0 33; CHECK-NEXT: blr 34entry: 35 %load = load i64, ptr %p, align 8 36 %ret = sitofp i64 %load to float 37 ret float %ret 38} 39 40define double @load4_i64(ptr %p) { 41; CHECK-LABEL: load4_i64: 42; CHECK: # %bb.0: # %entry 43; CHECK-NEXT: ld r3, 0(r3) 44; CHECK-NEXT: mtfprd f0, r3 45; CHECK-NEXT: xscvsxddp f1, f0 46; CHECK-NEXT: blr 47entry: 48 %load = load i64, ptr %p, align 8 49 %ret = sitofp i64 %load to double 50 ret double %ret 51} 52 53define float @load5_i64(ptr %p) { 54; CHECK-LABEL: load5_i64: 55; CHECK: # %bb.0: # %entry 56; CHECK-NEXT: ld r3, 0(r3) 57; CHECK-NEXT: mtfprd f0, r3 58; CHECK-NEXT: xscvuxdsp f1, f0 59; CHECK-NEXT: blr 60entry: 61 %load = load i64, ptr %p, align 8 62 %ret = uitofp i64 %load to float 63 ret float %ret 64} 65 66define double @load6_i64(ptr %p) { 67; CHECK-LABEL: load6_i64: 68; CHECK: # %bb.0: # %entry 69; CHECK-NEXT: ld r3, 0(r3) 70; CHECK-NEXT: mtfprd f0, r3 71; CHECK-NEXT: xscvuxddp f1, f0 72; CHECK-NEXT: blr 73entry: 74 %load = load i64, ptr %p, align 8 75 %ret = uitofp i64 %load to double 76 ret double %ret 77} 78 79define double @load_f64(ptr %p) { 80; CHECK-LABEL: load_f64: 81; CHECK: # %bb.0: # %entry 82; CHECK-NEXT: lfd f1, 0(r3) 83; CHECK-NEXT: blr 84entry: 85 %ret = load double, ptr %p, align 8 86 ret double %ret 87} 88 89define double @load2_f64(ptr %p, double %a) { 90; CHECK-LABEL: load2_f64: 91; CHECK: # %bb.0: # %entry 92; CHECK-NEXT: lfd f0, 0(r3) 93; CHECK-NEXT: xsadddp f1, f0, f1 94; CHECK-NEXT: blr 95entry: 96 %load = load double, ptr %p, align 8 97 %ret = fadd double %load, %a 98 ret double %ret 99} 100 101define i64 @load3_f64(ptr %p) { 102; CHECK-LABEL: load3_f64: 103; CHECK: # %bb.0: # %entry 104; CHECK-NEXT: lfd f0, 0(r3) 105; CHECK-NEXT: xscvdpsxds f0, f0 106; CHECK-NEXT: mffprd r3, f0 107; CHECK-NEXT: blr 108entry: 109 %load = load double, ptr %p, align 8 110 %ret = fptosi double %load to i64 111 ret i64 %ret 112} 113 114define i64 @load4_f64(ptr %p) { 115; CHECK-LABEL: load4_f64: 116; CHECK: # %bb.0: # %entry 117; CHECK-NEXT: lfd f0, 0(r3) 118; CHECK-NEXT: xscvdpuxds f0, f0 119; CHECK-NEXT: mffprd r3, f0 120; CHECK-NEXT: blr 121entry: 122 %load = load double, ptr %p, align 8 123 %ret = fptoui double %load to i64 124 ret i64 %ret 125} 126 127define void @store_i64(ptr %p) { 128; CHECK-LABEL: store_i64: 129; CHECK: # %bb.0: # %entry 130; CHECK-NEXT: li r4, 100 131; CHECK-NEXT: std r4, 0(r3) 132; CHECK-NEXT: blr 133entry: 134 store i64 100, ptr %p, align 8 135 ret void 136} 137 138define void @store2_i64(ptr %p, i64 %a, i64 %b) { 139; CHECK-LABEL: store2_i64: 140; CHECK: # %bb.0: # %entry 141; CHECK-NEXT: add r4, r4, r5 142; CHECK-NEXT: std r4, 0(r3) 143; CHECK-NEXT: blr 144entry: 145 %add = add i64 %a, %b 146 store i64 %add, ptr %p, align 8 147 ret void 148} 149 150define void @store3_i64(ptr %p, float %a) { 151; CHECK-LABEL: store3_i64: 152; CHECK: # %bb.0: # %entry 153; CHECK-NEXT: xscvdpsxds f0, f1 154; CHECK-NEXT: mffprd r4, f0 155; CHECK-NEXT: std r4, 0(r3) 156; CHECK-NEXT: blr 157entry: 158 %conv = fptosi float %a to i64 159 store i64 %conv, ptr %p, align 8 160 ret void 161} 162 163define void @store4_i64(ptr %p, double %a) { 164; CHECK-LABEL: store4_i64: 165; CHECK: # %bb.0: # %entry 166; CHECK-NEXT: xscvdpsxds f0, f1 167; CHECK-NEXT: mffprd r4, f0 168; CHECK-NEXT: std r4, 0(r3) 169; CHECK-NEXT: blr 170entry: 171 %conv = fptosi double %a to i64 172 store i64 %conv, ptr %p, align 8 173 ret void 174} 175 176define void @store5_i64(ptr %p, float %a) { 177; CHECK-LABEL: store5_i64: 178; CHECK: # %bb.0: # %entry 179; CHECK-NEXT: xscvdpuxds f0, f1 180; CHECK-NEXT: mffprd r4, f0 181; CHECK-NEXT: std r4, 0(r3) 182; CHECK-NEXT: blr 183entry: 184 %conv = fptoui float %a to i64 185 store i64 %conv, ptr %p, align 8 186 ret void 187} 188 189define void @store6_i64(ptr %p, double %a) { 190; CHECK-LABEL: store6_i64: 191; CHECK: # %bb.0: # %entry 192; CHECK-NEXT: xscvdpuxds f0, f1 193; CHECK-NEXT: mffprd r4, f0 194; CHECK-NEXT: std r4, 0(r3) 195; CHECK-NEXT: blr 196entry: 197 %conv = fptoui double %a to i64 198 store i64 %conv, ptr %p, align 8 199 ret void 200} 201 202define void @store_f64(ptr %p, double %a) { 203; CHECK-LABEL: store_f64: 204; CHECK: # %bb.0: # %entry 205; CHECK-NEXT: stfd f1, 0(r3) 206; CHECK-NEXT: blr 207entry: 208 store double %a, ptr %p, align 8 209 ret void 210} 211 212define void @store2_f64(ptr %p, double %a, double %b) { 213; CHECK-LABEL: store2_f64: 214; CHECK: # %bb.0: # %entry 215; CHECK-NEXT: xsadddp f0, f1, f2 216; CHECK-NEXT: stfd f0, 0(r3) 217; CHECK-NEXT: blr 218entry: 219 %fadd = fadd double %a, %b 220 store double %fadd, ptr %p, align 8 221 ret void 222} 223 224define void @store3_f64(ptr %p, i64 %a) { 225; CHECK-LABEL: store3_f64: 226; CHECK: # %bb.0: # %entry 227; CHECK-NEXT: mtfprd f0, r4 228; CHECK-NEXT: xscvsxddp f0, f0 229; CHECK-NEXT: stfd f0, 0(r3) 230; CHECK-NEXT: blr 231entry: 232 %conv = sitofp i64 %a to double 233 store double %conv, ptr %p, align 8 234 ret void 235} 236 237define void @store4_f64(ptr %p, i64 %a) { 238; CHECK-LABEL: store4_f64: 239; CHECK: # %bb.0: # %entry 240; CHECK-NEXT: mtfprd f0, r4 241; CHECK-NEXT: xscvuxddp f0, f0 242; CHECK-NEXT: stfd f0, 0(r3) 243; CHECK-NEXT: blr 244entry: 245 %conv = uitofp i64 %a to double 246 store double %conv, ptr %p, align 8 247 ret void 248} 249