1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 2; RUN: llc -verify-machineinstrs -mcpu=pwr4 -mattr=-altivec \ 3; RUN: -mtriple powerpc-ibm-aix-xcoff < %s | \ 4; RUN: FileCheck --check-prefixes=CHECKASM,ASM32PWR4 %s 5 6; RUN: llc -verify-machineinstrs -mcpu=pwr4 -mattr=-altivec \ 7; RUN: -mtriple powerpc64-ibm-aix-xcoff < %s | \ 8; RUN: FileCheck --check-prefixes=CHECKASM,ASM64PWR4 %s 9 10define void @call_test_chars() { 11; ASM32PWR4-LABEL: call_test_chars: 12; ASM32PWR4: # %bb.0: # %entry 13; ASM32PWR4-NEXT: mflr 0 14; ASM32PWR4-NEXT: stwu 1, -64(1) 15; ASM32PWR4-NEXT: li 3, 97 16; ASM32PWR4-NEXT: li 4, 97 17; ASM32PWR4-NEXT: stw 0, 72(1) 18; ASM32PWR4-NEXT: li 5, 97 19; ASM32PWR4-NEXT: li 6, 97 20; ASM32PWR4-NEXT: bl .test_chars 21; ASM32PWR4-NEXT: nop 22; ASM32PWR4-NEXT: addi 1, 1, 64 23; ASM32PWR4-NEXT: lwz 0, 8(1) 24; ASM32PWR4-NEXT: mtlr 0 25; ASM32PWR4-NEXT: blr 26; 27; ASM64PWR4-LABEL: call_test_chars: 28; ASM64PWR4: # %bb.0: # %entry 29; ASM64PWR4-NEXT: mflr 0 30; ASM64PWR4-NEXT: stdu 1, -112(1) 31; ASM64PWR4-NEXT: li 3, 97 32; ASM64PWR4-NEXT: li 4, 97 33; ASM64PWR4-NEXT: std 0, 128(1) 34; ASM64PWR4-NEXT: li 5, 97 35; ASM64PWR4-NEXT: li 6, 97 36; ASM64PWR4-NEXT: bl .test_chars 37; ASM64PWR4-NEXT: nop 38; ASM64PWR4-NEXT: addi 1, 1, 112 39; ASM64PWR4-NEXT: ld 0, 16(1) 40; ASM64PWR4-NEXT: mtlr 0 41; ASM64PWR4-NEXT: blr 42entry: 43 call i8 @test_chars(i8 signext 97, i8 signext 97, i8 signext 97, i8 signext 97) 44 ret void 45} 46 47define signext i8 @test_chars(i8 signext %c1, i8 signext %c2, i8 signext %c3, i8 signext %c4) { 48; CHECKASM-LABEL: test_chars: 49; CHECKASM: # %bb.0: # %entry 50; CHECKASM-NEXT: add 3, 3, 4 51; CHECKASM-NEXT: add 3, 3, 5 52; CHECKASM-NEXT: add 3, 3, 6 53; CHECKASM-NEXT: extsb 3, 3 54; CHECKASM-NEXT: blr 55entry: 56 %conv = sext i8 %c1 to i32 57 %conv1 = sext i8 %c2 to i32 58 %add = add nsw i32 %conv, %conv1 59 %conv2 = sext i8 %c3 to i32 60 %add3 = add nsw i32 %add, %conv2 61 %conv4 = sext i8 %c4 to i32 62 %add5 = add nsw i32 %add3, %conv4 63 %conv6 = trunc i32 %add5 to i8 64 ret i8 %conv6 65} 66 67define void @call_test_chars_mix() { 68; ASM32PWR4-LABEL: call_test_chars_mix: 69; ASM32PWR4: # %bb.0: # %entry 70; ASM32PWR4-NEXT: mflr 0 71; ASM32PWR4-NEXT: stwu 1, -64(1) 72; ASM32PWR4-NEXT: li 3, 97 73; ASM32PWR4-NEXT: li 4, 225 74; ASM32PWR4-NEXT: stw 0, 72(1) 75; ASM32PWR4-NEXT: li 5, 97 76; ASM32PWR4-NEXT: li 6, -31 77; ASM32PWR4-NEXT: bl .test_chars_mix 78; ASM32PWR4-NEXT: nop 79; ASM32PWR4-NEXT: addi 1, 1, 64 80; ASM32PWR4-NEXT: lwz 0, 8(1) 81; ASM32PWR4-NEXT: mtlr 0 82; ASM32PWR4-NEXT: blr 83; 84; ASM64PWR4-LABEL: call_test_chars_mix: 85; ASM64PWR4: # %bb.0: # %entry 86; ASM64PWR4-NEXT: mflr 0 87; ASM64PWR4-NEXT: stdu 1, -112(1) 88; ASM64PWR4-NEXT: li 3, 97 89; ASM64PWR4-NEXT: li 4, 225 90; ASM64PWR4-NEXT: std 0, 128(1) 91; ASM64PWR4-NEXT: li 5, 97 92; ASM64PWR4-NEXT: li 6, -31 93; ASM64PWR4-NEXT: bl .test_chars_mix 94; ASM64PWR4-NEXT: nop 95; ASM64PWR4-NEXT: addi 1, 1, 112 96; ASM64PWR4-NEXT: ld 0, 16(1) 97; ASM64PWR4-NEXT: mtlr 0 98; ASM64PWR4-NEXT: blr 99entry: 100 call i8 @test_chars_mix(i8 signext 97, i8 zeroext -31, i8 zeroext 97, i8 signext -31) 101 ret void 102} 103 104define signext i8 @test_chars_mix(i8 signext %c1, i8 zeroext %c2, i8 zeroext %c3, i8 signext %c4) { 105; CHECKASM-LABEL: test_chars_mix: 106; CHECKASM: # %bb.0: # %entry 107; CHECKASM-NEXT: add 3, 3, 4 108; CHECKASM-NEXT: add 3, 3, 5 109; CHECKASM-NEXT: add 3, 3, 6 110; CHECKASM-NEXT: extsb 3, 3 111; CHECKASM-NEXT: blr 112entry: 113 %conv = sext i8 %c1 to i32 114 %conv1 = zext i8 %c2 to i32 115 %add = add nsw i32 %conv, %conv1 116 %conv2 = zext i8 %c3 to i32 117 %add3 = add nsw i32 %add, %conv2 118 %conv4 = sext i8 %c4 to i32 119 %add5 = add nsw i32 %add3, %conv4 120 %conv6 = trunc i32 %add5 to i8 121 ret i8 %conv6 122} 123 124@global_i1 = global i8 0, align 1 125 126define void @test_i1(i1 %b) { 127; ASM32PWR4-LABEL: test_i1: 128; ASM32PWR4: # %bb.0: # %entry 129; ASM32PWR4-NEXT: lwz 4, L..C0(2) # @global_i1 130; ASM32PWR4-NEXT: clrlwi 3, 3, 31 131; ASM32PWR4-NEXT: stb 3, 0(4) 132; ASM32PWR4-NEXT: blr 133; 134; ASM64PWR4-LABEL: test_i1: 135; ASM64PWR4: # %bb.0: # %entry 136; ASM64PWR4-NEXT: ld 4, L..C0(2) # @global_i1 137; ASM64PWR4-NEXT: clrlwi 3, 3, 31 138; ASM64PWR4-NEXT: stb 3, 0(4) 139; ASM64PWR4-NEXT: blr 140 entry: 141 %frombool = zext i1 %b to i8 142 store i8 %frombool, ptr @global_i1, align 1 143 ret void 144} 145 146define void @call_test_i1() { 147; ASM32PWR4-LABEL: call_test_i1: 148; ASM32PWR4: # %bb.0: # %entry 149; ASM32PWR4-NEXT: mflr 0 150; ASM32PWR4-NEXT: stwu 1, -64(1) 151; ASM32PWR4-NEXT: li 3, 1 152; ASM32PWR4-NEXT: stw 0, 72(1) 153; ASM32PWR4-NEXT: bl .test_i1 154; ASM32PWR4-NEXT: nop 155; ASM32PWR4-NEXT: addi 1, 1, 64 156; ASM32PWR4-NEXT: lwz 0, 8(1) 157; ASM32PWR4-NEXT: mtlr 0 158; ASM32PWR4-NEXT: blr 159; 160; ASM64PWR4-LABEL: call_test_i1: 161; ASM64PWR4: # %bb.0: # %entry 162; ASM64PWR4-NEXT: mflr 0 163; ASM64PWR4-NEXT: stdu 1, -112(1) 164; ASM64PWR4-NEXT: li 3, 1 165; ASM64PWR4-NEXT: std 0, 128(1) 166; ASM64PWR4-NEXT: bl .test_i1 167; ASM64PWR4-NEXT: nop 168; ASM64PWR4-NEXT: addi 1, 1, 112 169; ASM64PWR4-NEXT: ld 0, 16(1) 170; ASM64PWR4-NEXT: mtlr 0 171; ASM64PWR4-NEXT: blr 172entry: 173 call void @test_i1(i1 1) 174 ret void 175} 176 177define void @test_i1zext(i1 zeroext %b) { 178; ASM32PWR4-LABEL: test_i1zext: 179; ASM32PWR4: # %bb.0: # %entry 180; ASM32PWR4-NEXT: lwz 4, L..C0(2) # @global_i1 181; ASM32PWR4-NEXT: stb 3, 0(4) 182; ASM32PWR4-NEXT: blr 183; 184; ASM64PWR4-LABEL: test_i1zext: 185; ASM64PWR4: # %bb.0: # %entry 186; ASM64PWR4-NEXT: ld 4, L..C0(2) # @global_i1 187; ASM64PWR4-NEXT: stb 3, 0(4) 188; ASM64PWR4-NEXT: blr 189 entry: 190 %frombool = zext i1 %b to i8 191 store i8 %frombool, ptr @global_i1, align 1 192 ret void 193 } 194 195define i32 @test_ints(i32 signext %a, i32 zeroext %b, i32 zeroext %c, i32 signext %d, i32 signext %e, i32 signext %f, i32 signext %g, i32 signext %h) { 196; CHECKASM-LABEL: test_ints: 197; CHECKASM: # %bb.0: # %entry 198; CHECKASM-NEXT: add 3, 3, 4 199; CHECKASM-NEXT: add 3, 3, 5 200; CHECKASM-NEXT: add 3, 3, 6 201; CHECKASM-NEXT: add 3, 3, 7 202; CHECKASM-NEXT: add 3, 3, 8 203; CHECKASM-NEXT: add 3, 3, 9 204; CHECKASM-NEXT: add 3, 3, 10 205; CHECKASM-NEXT: blr 206entry: 207 %add = add i32 %a, %b 208 %add1 = add i32 %add, %c 209 %add2 = add i32 %add1, %d 210 %add3 = add i32 %add2, %e 211 %add4 = add i32 %add3, %f 212 %add5 = add i32 %add4, %g 213 %add6 = add i32 %add5, %h 214 ret i32 %add6 215} 216 217define void @call_test_ints() { 218; ASM32PWR4-LABEL: call_test_ints: 219; ASM32PWR4: # %bb.0: # %entry 220; ASM32PWR4-NEXT: mflr 0 221; ASM32PWR4-NEXT: stwu 1, -64(1) 222; ASM32PWR4-NEXT: li 3, 1 223; ASM32PWR4-NEXT: li 4, 1 224; ASM32PWR4-NEXT: stw 0, 72(1) 225; ASM32PWR4-NEXT: lis 5, -32768 226; ASM32PWR4-NEXT: lis 6, -32768 227; ASM32PWR4-NEXT: li 7, 1 228; ASM32PWR4-NEXT: li 8, 1 229; ASM32PWR4-NEXT: li 9, 1 230; ASM32PWR4-NEXT: li 10, 1 231; ASM32PWR4-NEXT: bl .test_ints 232; ASM32PWR4-NEXT: nop 233; ASM32PWR4-NEXT: addi 1, 1, 64 234; ASM32PWR4-NEXT: lwz 0, 8(1) 235; ASM32PWR4-NEXT: mtlr 0 236; ASM32PWR4-NEXT: blr 237; 238; ASM64PWR4-LABEL: call_test_ints: 239; ASM64PWR4: # %bb.0: # %entry 240; ASM64PWR4-NEXT: mflr 0 241; ASM64PWR4-NEXT: stdu 1, -112(1) 242; ASM64PWR4-NEXT: li 3, 1 243; ASM64PWR4-NEXT: li 4, 1 244; ASM64PWR4-NEXT: std 0, 128(1) 245; ASM64PWR4-NEXT: rldic 5, 3, 31, 32 246; ASM64PWR4-NEXT: lis 6, -32768 247; ASM64PWR4-NEXT: li 7, 1 248; ASM64PWR4-NEXT: li 8, 1 249; ASM64PWR4-NEXT: li 9, 1 250; ASM64PWR4-NEXT: li 10, 1 251; ASM64PWR4-NEXT: bl .test_ints 252; ASM64PWR4-NEXT: nop 253; ASM64PWR4-NEXT: addi 1, 1, 112 254; ASM64PWR4-NEXT: ld 0, 16(1) 255; ASM64PWR4-NEXT: mtlr 0 256; ASM64PWR4-NEXT: blr 257entry: 258 call i32 @test_ints(i32 signext 1, i32 zeroext 1, i32 zeroext 2147483648, i32 signext -2147483648, i32 signext 1, i32 signext 1, i32 signext 1, i32 signext 1) 259 ret void 260} 261 262define void @call_test_i64() { 263; ASM32PWR4-LABEL: call_test_i64: 264; ASM32PWR4: # %bb.0: # %entry 265; ASM32PWR4-NEXT: mflr 0 266; ASM32PWR4-NEXT: stwu 1, -64(1) 267; ASM32PWR4-NEXT: li 3, 0 268; ASM32PWR4-NEXT: li 4, 1 269; ASM32PWR4-NEXT: stw 0, 72(1) 270; ASM32PWR4-NEXT: li 5, 0 271; ASM32PWR4-NEXT: li 6, 2 272; ASM32PWR4-NEXT: li 7, 0 273; ASM32PWR4-NEXT: li 8, 3 274; ASM32PWR4-NEXT: li 9, 0 275; ASM32PWR4-NEXT: li 10, 4 276; ASM32PWR4-NEXT: bl .test_i64 277; ASM32PWR4-NEXT: nop 278; ASM32PWR4-NEXT: addi 1, 1, 64 279; ASM32PWR4-NEXT: lwz 0, 8(1) 280; ASM32PWR4-NEXT: mtlr 0 281; ASM32PWR4-NEXT: blr 282; 283; ASM64PWR4-LABEL: call_test_i64: 284; ASM64PWR4: # %bb.0: # %entry 285; ASM64PWR4-NEXT: mflr 0 286; ASM64PWR4-NEXT: stdu 1, -112(1) 287; ASM64PWR4-NEXT: li 3, 1 288; ASM64PWR4-NEXT: li 4, 2 289; ASM64PWR4-NEXT: std 0, 128(1) 290; ASM64PWR4-NEXT: li 5, 3 291; ASM64PWR4-NEXT: li 6, 4 292; ASM64PWR4-NEXT: bl .test_i64 293; ASM64PWR4-NEXT: nop 294; ASM64PWR4-NEXT: addi 1, 1, 112 295; ASM64PWR4-NEXT: ld 0, 16(1) 296; ASM64PWR4-NEXT: mtlr 0 297; ASM64PWR4-NEXT: blr 298entry: 299 call i64 @test_i64(i64 1, i64 2, i64 3, i64 4) 300 ret void 301} 302 303define i64 @test_i64(i64 %a, i64 %b, i64 %c, i64 %d) { 304; ASM32PWR4-LABEL: test_i64: 305; ASM32PWR4: # %bb.0: # %entry 306; ASM32PWR4-NEXT: addc 4, 4, 6 307; ASM32PWR4-NEXT: adde 3, 3, 5 308; ASM32PWR4-NEXT: addc 4, 4, 8 309; ASM32PWR4-NEXT: adde 3, 3, 7 310; ASM32PWR4-NEXT: addc 4, 4, 10 311; ASM32PWR4-NEXT: adde 3, 3, 9 312; ASM32PWR4-NEXT: blr 313; 314; ASM64PWR4-LABEL: test_i64: 315; ASM64PWR4: # %bb.0: # %entry 316; ASM64PWR4-NEXT: add 3, 3, 4 317; ASM64PWR4-NEXT: add 3, 3, 5 318; ASM64PWR4-NEXT: add 3, 3, 6 319; ASM64PWR4-NEXT: blr 320entry: 321 %add = add nsw i64 %a, %b 322 %add1 = add nsw i64 %add, %c 323 %add2 = add nsw i64 %add1, %d 324 ret i64 %add2 325} 326 327define void @call_test_int_ptr() { 328; ASM32PWR4-LABEL: call_test_int_ptr: 329; ASM32PWR4: # %bb.0: # %entry 330; ASM32PWR4-NEXT: mflr 0 331; ASM32PWR4-NEXT: stwu 1, -64(1) 332; ASM32PWR4-NEXT: li 3, 0 333; ASM32PWR4-NEXT: stw 0, 72(1) 334; ASM32PWR4-NEXT: stw 3, 60(1) 335; ASM32PWR4-NEXT: addi 3, 1, 60 336; ASM32PWR4-NEXT: bl .test_int_ptr 337; ASM32PWR4-NEXT: nop 338; ASM32PWR4-NEXT: addi 1, 1, 64 339; ASM32PWR4-NEXT: lwz 0, 8(1) 340; ASM32PWR4-NEXT: mtlr 0 341; ASM32PWR4-NEXT: blr 342; 343; ASM64PWR4-LABEL: call_test_int_ptr: 344; ASM64PWR4: # %bb.0: # %entry 345; ASM64PWR4-NEXT: mflr 0 346; ASM64PWR4-NEXT: stdu 1, -128(1) 347; ASM64PWR4-NEXT: li 3, 0 348; ASM64PWR4-NEXT: std 0, 144(1) 349; ASM64PWR4-NEXT: stw 3, 124(1) 350; ASM64PWR4-NEXT: addi 3, 1, 124 351; ASM64PWR4-NEXT: bl .test_int_ptr 352; ASM64PWR4-NEXT: nop 353; ASM64PWR4-NEXT: addi 1, 1, 128 354; ASM64PWR4-NEXT: ld 0, 16(1) 355; ASM64PWR4-NEXT: mtlr 0 356; ASM64PWR4-NEXT: blr 357entry: 358 %b = alloca i32, align 4 359 store i32 0, ptr %b, align 4 360 call void @test_int_ptr(ptr %b) 361 ret void 362} 363 364define void @test_int_ptr(ptr %a) { 365; ASM32PWR4-LABEL: test_int_ptr: 366; ASM32PWR4: # %bb.0: # %entry 367; ASM32PWR4-NEXT: stw 3, -8(1) 368; ASM32PWR4-NEXT: blr 369; 370; ASM64PWR4-LABEL: test_int_ptr: 371; ASM64PWR4: # %bb.0: # %entry 372; ASM64PWR4-NEXT: std 3, -8(1) 373; ASM64PWR4-NEXT: blr 374entry: 375 %a.addr = alloca ptr, align 8 376 store ptr %a, ptr %a.addr, align 8 377 ret void 378} 379 380define i32 @caller(i32 %i) { 381; ASM32PWR4-LABEL: caller: 382; ASM32PWR4: # %bb.0: # %entry 383; ASM32PWR4-NEXT: mflr 0 384; ASM32PWR4-NEXT: stwu 1, -64(1) 385; ASM32PWR4-NEXT: stw 0, 72(1) 386; ASM32PWR4-NEXT: stw 3, 60(1) 387; ASM32PWR4-NEXT: cntlzw 3, 3 388; ASM32PWR4-NEXT: not 3, 3 389; ASM32PWR4-NEXT: rlwinm 3, 3, 27, 31, 31 390; ASM32PWR4-NEXT: stb 3, 59(1) 391; ASM32PWR4-NEXT: bl .call_test_bool[PR] 392; ASM32PWR4-NEXT: nop 393; ASM32PWR4-NEXT: addi 1, 1, 64 394; ASM32PWR4-NEXT: lwz 0, 8(1) 395; ASM32PWR4-NEXT: mtlr 0 396; ASM32PWR4-NEXT: blr 397; 398; ASM64PWR4-LABEL: caller: 399; ASM64PWR4: # %bb.0: # %entry 400; ASM64PWR4-NEXT: mflr 0 401; ASM64PWR4-NEXT: stdu 1, -128(1) 402; ASM64PWR4-NEXT: std 0, 144(1) 403; ASM64PWR4-NEXT: stw 3, 124(1) 404; ASM64PWR4-NEXT: cntlzw 3, 3 405; ASM64PWR4-NEXT: srwi 3, 3, 5 406; ASM64PWR4-NEXT: xori 3, 3, 1 407; ASM64PWR4-NEXT: stb 3, 123(1) 408; ASM64PWR4-NEXT: bl .call_test_bool[PR] 409; ASM64PWR4-NEXT: nop 410; ASM64PWR4-NEXT: addi 1, 1, 128 411; ASM64PWR4-NEXT: ld 0, 16(1) 412; ASM64PWR4-NEXT: mtlr 0 413; ASM64PWR4-NEXT: blr 414entry: 415 %i.addr = alloca i32, align 4 416 %b = alloca i8, align 1 417 store i32 %i, ptr %i.addr, align 4 418 %0 = load i32, ptr %i.addr, align 4 419 %cmp = icmp ne i32 %0, 0 420 %frombool = zext i1 %cmp to i8 421 store i8 %frombool, ptr %b, align 1 422 %1 = load i8, ptr %b, align 1 423 %tobool = trunc i8 %1 to i1 424 %call = call i32 @call_test_bool(i1 zeroext %tobool) 425 ret i32 %call 426} 427 428declare i32 @call_test_bool(i1 zeroext) 429 430@f1 = global float 0.000000e+00, align 4 431@d1 = global double 0.000000e+00, align 8 432 433define void @call_test_floats() { 434; ASM32PWR4-LABEL: call_test_floats: 435; ASM32PWR4: # %bb.0: # %entry 436; ASM32PWR4-NEXT: mflr 0 437; ASM32PWR4-NEXT: stwu 1, -64(1) 438; ASM32PWR4-NEXT: lwz 3, L..C1(2) # @f1 439; ASM32PWR4-NEXT: stw 0, 72(1) 440; ASM32PWR4-NEXT: lfs 1, 0(3) 441; ASM32PWR4-NEXT: fmr 2, 1 442; ASM32PWR4-NEXT: fmr 3, 1 443; ASM32PWR4-NEXT: bl .test_floats 444; ASM32PWR4-NEXT: nop 445; ASM32PWR4-NEXT: addi 1, 1, 64 446; ASM32PWR4-NEXT: lwz 0, 8(1) 447; ASM32PWR4-NEXT: mtlr 0 448; ASM32PWR4-NEXT: blr 449; 450; ASM64PWR4-LABEL: call_test_floats: 451; ASM64PWR4: # %bb.0: # %entry 452; ASM64PWR4-NEXT: mflr 0 453; ASM64PWR4-NEXT: stdu 1, -112(1) 454; ASM64PWR4-NEXT: ld 3, L..C1(2) # @f1 455; ASM64PWR4-NEXT: std 0, 128(1) 456; ASM64PWR4-NEXT: lfs 1, 0(3) 457; ASM64PWR4-NEXT: fmr 2, 1 458; ASM64PWR4-NEXT: fmr 3, 1 459; ASM64PWR4-NEXT: bl .test_floats 460; ASM64PWR4-NEXT: nop 461; ASM64PWR4-NEXT: addi 1, 1, 112 462; ASM64PWR4-NEXT: ld 0, 16(1) 463; ASM64PWR4-NEXT: mtlr 0 464; ASM64PWR4-NEXT: blr 465entry: 466 %0 = load float, ptr @f1, align 4 467 call float @test_floats(float %0, float %0, float %0) 468 ret void 469} 470 471define float @test_floats(float %f1, float %f2, float %f3) { 472; CHECKASM-LABEL: test_floats: 473; CHECKASM: # %bb.0: # %entry 474; CHECKASM-NEXT: fadds 0, 1, 2 475; CHECKASM-NEXT: fadds 1, 0, 3 476; CHECKASM-NEXT: blr 477entry: 478 %add = fadd float %f1, %f2 479 %add1 = fadd float %add, %f3 480 ret float %add1 481} 482 483define void @call_test_fpr_max() { 484; ASM32PWR4-LABEL: call_test_fpr_max: 485; ASM32PWR4: # %bb.0: # %entry 486; ASM32PWR4-NEXT: mflr 0 487; ASM32PWR4-NEXT: stwu 1, -128(1) 488; ASM32PWR4-NEXT: lwz 3, L..C2(2) # @d1 489; ASM32PWR4-NEXT: stw 0, 136(1) 490; ASM32PWR4-NEXT: lfd 1, 0(3) 491; ASM32PWR4-NEXT: fmr 2, 1 492; ASM32PWR4-NEXT: fmr 3, 1 493; ASM32PWR4-NEXT: stfd 1, 120(1) 494; ASM32PWR4-NEXT: stfd 1, 112(1) 495; ASM32PWR4-NEXT: fmr 4, 1 496; ASM32PWR4-NEXT: fmr 5, 1 497; ASM32PWR4-NEXT: stfd 1, 104(1) 498; ASM32PWR4-NEXT: fmr 6, 1 499; ASM32PWR4-NEXT: fmr 7, 1 500; ASM32PWR4-NEXT: stfd 1, 96(1) 501; ASM32PWR4-NEXT: stfd 1, 88(1) 502; ASM32PWR4-NEXT: fmr 8, 1 503; ASM32PWR4-NEXT: fmr 9, 1 504; ASM32PWR4-NEXT: stfd 1, 80(1) 505; ASM32PWR4-NEXT: fmr 10, 1 506; ASM32PWR4-NEXT: fmr 11, 1 507; ASM32PWR4-NEXT: stfd 1, 72(1) 508; ASM32PWR4-NEXT: stfd 1, 64(1) 509; ASM32PWR4-NEXT: fmr 12, 1 510; ASM32PWR4-NEXT: fmr 13, 1 511; ASM32PWR4-NEXT: stfd 1, 56(1) 512; ASM32PWR4-NEXT: bl .test_fpr_max 513; ASM32PWR4-NEXT: nop 514; ASM32PWR4-NEXT: addi 1, 1, 128 515; ASM32PWR4-NEXT: lwz 0, 8(1) 516; ASM32PWR4-NEXT: mtlr 0 517; ASM32PWR4-NEXT: blr 518; 519; ASM64PWR4-LABEL: call_test_fpr_max: 520; ASM64PWR4: # %bb.0: # %entry 521; ASM64PWR4-NEXT: mflr 0 522; ASM64PWR4-NEXT: stdu 1, -160(1) 523; ASM64PWR4-NEXT: ld 3, L..C2(2) # @d1 524; ASM64PWR4-NEXT: std 0, 176(1) 525; ASM64PWR4-NEXT: lfd 1, 0(3) 526; ASM64PWR4-NEXT: fmr 2, 1 527; ASM64PWR4-NEXT: fmr 3, 1 528; ASM64PWR4-NEXT: stfd 1, 144(1) 529; ASM64PWR4-NEXT: stfd 1, 136(1) 530; ASM64PWR4-NEXT: fmr 4, 1 531; ASM64PWR4-NEXT: fmr 5, 1 532; ASM64PWR4-NEXT: stfd 1, 128(1) 533; ASM64PWR4-NEXT: fmr 6, 1 534; ASM64PWR4-NEXT: fmr 7, 1 535; ASM64PWR4-NEXT: stfd 1, 120(1) 536; ASM64PWR4-NEXT: stfd 1, 112(1) 537; ASM64PWR4-NEXT: fmr 8, 1 538; ASM64PWR4-NEXT: fmr 9, 1 539; ASM64PWR4-NEXT: fmr 10, 1 540; ASM64PWR4-NEXT: fmr 11, 1 541; ASM64PWR4-NEXT: fmr 12, 1 542; ASM64PWR4-NEXT: fmr 13, 1 543; ASM64PWR4-NEXT: bl .test_fpr_max 544; ASM64PWR4-NEXT: nop 545; ASM64PWR4-NEXT: addi 1, 1, 160 546; ASM64PWR4-NEXT: ld 0, 16(1) 547; ASM64PWR4-NEXT: mtlr 0 548; ASM64PWR4-NEXT: blr 549entry: 550 %0 = load double, ptr @d1, align 8 551 call double @test_fpr_max(double %0, double %0, double %0, double %0, double %0, double %0, double %0, double %0, double %0, double %0, double %0, double %0, double %0) 552 ret void 553} 554 555define double @test_fpr_max(double %d1, double %d2, double %d3, double %d4, double %d5, double %d6, double %d7, double %d8, double %d9, double %d10, double %d11, double %d12, double %d13) { 556; CHECKASM-LABEL: test_fpr_max: 557; CHECKASM: # %bb.0: # %entry 558; CHECKASM-NEXT: fadd 0, 1, 2 559; CHECKASM-NEXT: fadd 0, 0, 3 560; CHECKASM-NEXT: fadd 0, 0, 4 561; CHECKASM-NEXT: fadd 0, 0, 5 562; CHECKASM-NEXT: fadd 0, 0, 6 563; CHECKASM-NEXT: fadd 0, 0, 7 564; CHECKASM-NEXT: fadd 0, 0, 8 565; CHECKASM-NEXT: fadd 0, 0, 9 566; CHECKASM-NEXT: fadd 0, 0, 10 567; CHECKASM-NEXT: fadd 0, 0, 11 568; CHECKASM-NEXT: fadd 0, 0, 12 569; CHECKASM-NEXT: fadd 1, 0, 13 570; CHECKASM-NEXT: blr 571entry: 572 %add = fadd double %d1, %d2 573 %add1 = fadd double %add, %d3 574 %add2 = fadd double %add1, %d4 575 %add3 = fadd double %add2, %d5 576 %add4 = fadd double %add3, %d6 577 %add5 = fadd double %add4, %d7 578 %add6 = fadd double %add5, %d8 579 %add7 = fadd double %add6, %d9 580 %add8 = fadd double %add7, %d10 581 %add9 = fadd double %add8, %d11 582 %add10 = fadd double %add9, %d12 583 %add11 = fadd double %add10, %d13 584 ret double %add11 585} 586 587define void @call_test_mix() { 588; ASM32PWR4-LABEL: call_test_mix: 589; ASM32PWR4: # %bb.0: # %entry 590; ASM32PWR4-NEXT: mflr 0 591; ASM32PWR4-NEXT: stwu 1, -64(1) 592; ASM32PWR4-NEXT: lwz 3, L..C1(2) # @f1 593; ASM32PWR4-NEXT: stw 0, 72(1) 594; ASM32PWR4-NEXT: li 4, 1 595; ASM32PWR4-NEXT: li 7, 97 596; ASM32PWR4-NEXT: lfs 1, 0(3) 597; ASM32PWR4-NEXT: lwz 3, L..C2(2) # @d1 598; ASM32PWR4-NEXT: lfd 2, 0(3) 599; ASM32PWR4-NEXT: bl .test_mix 600; ASM32PWR4-NEXT: nop 601; ASM32PWR4-NEXT: addi 1, 1, 64 602; ASM32PWR4-NEXT: lwz 0, 8(1) 603; ASM32PWR4-NEXT: mtlr 0 604; ASM32PWR4-NEXT: blr 605; 606; ASM64PWR4-LABEL: call_test_mix: 607; ASM64PWR4: # %bb.0: # %entry 608; ASM64PWR4-NEXT: mflr 0 609; ASM64PWR4-NEXT: stdu 1, -112(1) 610; ASM64PWR4-NEXT: ld 3, L..C1(2) # @f1 611; ASM64PWR4-NEXT: std 0, 128(1) 612; ASM64PWR4-NEXT: li 4, 1 613; ASM64PWR4-NEXT: li 6, 97 614; ASM64PWR4-NEXT: lfs 1, 0(3) 615; ASM64PWR4-NEXT: ld 3, L..C2(2) # @d1 616; ASM64PWR4-NEXT: lfd 2, 0(3) 617; ASM64PWR4-NEXT: bl .test_mix 618; ASM64PWR4-NEXT: nop 619; ASM64PWR4-NEXT: addi 1, 1, 112 620; ASM64PWR4-NEXT: ld 0, 16(1) 621; ASM64PWR4-NEXT: mtlr 0 622; ASM64PWR4-NEXT: blr 623entry: 624 %0 = load float, ptr @f1, align 4 625 %1 = load double, ptr @d1, align 8 626 call i32 @test_mix(float %0, i32 1, double %1, i8 signext 97) 627 ret void 628} 629 630define i32 @test_mix(float %f, i32 signext %i, double %d, i8 signext %c) { 631; ASM32PWR4-LABEL: test_mix: 632; ASM32PWR4: # %bb.0: # %entry 633; ASM32PWR4-NEXT: lis 3, 17200 634; ASM32PWR4-NEXT: fadd 1, 1, 2 635; ASM32PWR4-NEXT: stw 3, -16(1) 636; ASM32PWR4-NEXT: lwz 3, L..C3(2) # %const.0 637; ASM32PWR4-NEXT: frsp 1, 1 638; ASM32PWR4-NEXT: lfs 0, 0(3) 639; ASM32PWR4-NEXT: clrlwi 3, 7, 24 640; ASM32PWR4-NEXT: add 3, 4, 3 641; ASM32PWR4-NEXT: xoris 3, 3, 32768 642; ASM32PWR4-NEXT: stw 3, -12(1) 643; ASM32PWR4-NEXT: addi 3, 1, -4 644; ASM32PWR4-NEXT: lfd 2, -16(1) 645; ASM32PWR4-NEXT: fsub 0, 2, 0 646; ASM32PWR4-NEXT: frsp 0, 0 647; ASM32PWR4-NEXT: fadds 0, 0, 1 648; ASM32PWR4-NEXT: fctiwz 0, 0 649; ASM32PWR4-NEXT: stfiwx 0, 0, 3 650; ASM32PWR4-NEXT: lwz 3, -4(1) 651; ASM32PWR4-NEXT: blr 652; 653; ASM64PWR4-LABEL: test_mix: 654; ASM64PWR4: # %bb.0: # %entry 655; ASM64PWR4-NEXT: clrlwi 5, 6, 24 656; ASM64PWR4-NEXT: fadd 0, 1, 2 657; ASM64PWR4-NEXT: addi 3, 1, -4 658; ASM64PWR4-NEXT: frsp 0, 0 659; ASM64PWR4-NEXT: add 4, 4, 5 660; ASM64PWR4-NEXT: extsw 4, 4 661; ASM64PWR4-NEXT: std 4, -16(1) 662; ASM64PWR4-NEXT: lfd 1, -16(1) 663; ASM64PWR4-NEXT: fcfid 1, 1 664; ASM64PWR4-NEXT: frsp 1, 1 665; ASM64PWR4-NEXT: fadds 0, 1, 0 666; ASM64PWR4-NEXT: fctiwz 0, 0 667; ASM64PWR4-NEXT: stfiwx 0, 0, 3 668; ASM64PWR4-NEXT: lwz 3, -4(1) 669; ASM64PWR4-NEXT: blr 670entry: 671 %conv = fpext float %f to double 672 %add = fadd double %conv, %d 673 %conv1 = fptrunc double %add to float 674 %conv2 = zext i8 %c to i32 675 %add3 = add nsw i32 %i, %conv2 676 %conv4 = sitofp i32 %add3 to float 677 %add5 = fadd float %conv4, %conv1 678 %conv6 = fptosi float %add5 to i32 679 ret i32 %conv6 680} 681 682define i64 @callee_mixed_ints(i32 %a, i8 signext %b, i32 %c, i16 signext %d, i64 %e) { 683; ASM32PWR4-LABEL: callee_mixed_ints: 684; ASM32PWR4: # %bb.0: # %entry 685; ASM32PWR4-NEXT: clrlwi 4, 4, 24 686; ASM32PWR4-NEXT: add 3, 3, 4 687; ASM32PWR4-NEXT: add 3, 3, 5 688; ASM32PWR4-NEXT: add 3, 3, 6 689; ASM32PWR4-NEXT: srawi 5, 3, 31 690; ASM32PWR4-NEXT: addc 4, 3, 8 691; ASM32PWR4-NEXT: adde 3, 5, 7 692; ASM32PWR4-NEXT: blr 693; 694; ASM64PWR4-LABEL: callee_mixed_ints: 695; ASM64PWR4: # %bb.0: # %entry 696; ASM64PWR4-NEXT: clrlwi 4, 4, 24 697; ASM64PWR4-NEXT: add 3, 3, 4 698; ASM64PWR4-NEXT: add 3, 3, 5 699; ASM64PWR4-NEXT: add 3, 3, 6 700; ASM64PWR4-NEXT: extsw 3, 3 701; ASM64PWR4-NEXT: add 3, 3, 7 702; ASM64PWR4-NEXT: blr 703entry: 704 %conv = zext i8 %b to i32 705 %add = add nsw i32 %a, %conv 706 %add1 = add nsw i32 %add, %c 707 %conv2 = sext i16 %d to i32 708 %add3 = add nsw i32 %add1, %conv2 709 %conv4 = sext i32 %add3 to i64 710 %add5 = add nsw i64 %conv4, %e 711 ret i64 %add5 712 } 713 714define void @call_test_vararg() { 715; ASM32PWR4-LABEL: call_test_vararg: 716; ASM32PWR4: # %bb.0: # %entry 717; ASM32PWR4-NEXT: mflr 0 718; ASM32PWR4-NEXT: stwu 1, -80(1) 719; ASM32PWR4-NEXT: lwz 3, L..C1(2) # @f1 720; ASM32PWR4-NEXT: stw 0, 88(1) 721; ASM32PWR4-NEXT: lfs 1, 0(3) 722; ASM32PWR4-NEXT: lwz 3, L..C2(2) # @d1 723; ASM32PWR4-NEXT: stfd 1, 64(1) 724; ASM32PWR4-NEXT: lfd 2, 0(3) 725; ASM32PWR4-NEXT: li 3, 42 726; ASM32PWR4-NEXT: stfd 2, 72(1) 727; ASM32PWR4-NEXT: lwz 4, 64(1) 728; ASM32PWR4-NEXT: lwz 5, 68(1) 729; ASM32PWR4-NEXT: lwz 6, 72(1) 730; ASM32PWR4-NEXT: lwz 7, 76(1) 731; ASM32PWR4-NEXT: bl .test_vararg[PR] 732; ASM32PWR4-NEXT: nop 733; ASM32PWR4-NEXT: addi 1, 1, 80 734; ASM32PWR4-NEXT: lwz 0, 8(1) 735; ASM32PWR4-NEXT: mtlr 0 736; ASM32PWR4-NEXT: blr 737; 738; ASM64PWR4-LABEL: call_test_vararg: 739; ASM64PWR4: # %bb.0: # %entry 740; ASM64PWR4-NEXT: mflr 0 741; ASM64PWR4-NEXT: stdu 1, -128(1) 742; ASM64PWR4-NEXT: ld 3, L..C1(2) # @f1 743; ASM64PWR4-NEXT: std 0, 144(1) 744; ASM64PWR4-NEXT: lfs 1, 0(3) 745; ASM64PWR4-NEXT: ld 3, L..C2(2) # @d1 746; ASM64PWR4-NEXT: stfd 1, 112(1) 747; ASM64PWR4-NEXT: lfd 2, 0(3) 748; ASM64PWR4-NEXT: li 3, 42 749; ASM64PWR4-NEXT: stfd 2, 120(1) 750; ASM64PWR4-NEXT: ld 4, 112(1) 751; ASM64PWR4-NEXT: ld 5, 120(1) 752; ASM64PWR4-NEXT: bl .test_vararg[PR] 753; ASM64PWR4-NEXT: nop 754; ASM64PWR4-NEXT: addi 1, 1, 128 755; ASM64PWR4-NEXT: ld 0, 16(1) 756; ASM64PWR4-NEXT: mtlr 0 757; ASM64PWR4-NEXT: blr 758entry: 759 %0 = load float, ptr @f1, align 4 760 %conv = fpext float %0 to double 761 %1 = load double, ptr @d1, align 8 762 call void (i32, ...) @test_vararg(i32 42, double %conv, double %1) 763 ret void 764} 765 766declare void @test_vararg(i32, ...) 767 768define void @call_test_vararg2() { 769; ASM32PWR4-LABEL: call_test_vararg2: 770; ASM32PWR4: # %bb.0: # %entry 771; ASM32PWR4-NEXT: mflr 0 772; ASM32PWR4-NEXT: stwu 1, -80(1) 773; ASM32PWR4-NEXT: lwz 3, L..C1(2) # @f1 774; ASM32PWR4-NEXT: stw 0, 88(1) 775; ASM32PWR4-NEXT: li 6, 42 776; ASM32PWR4-NEXT: lfs 1, 0(3) 777; ASM32PWR4-NEXT: lwz 3, L..C2(2) # @d1 778; ASM32PWR4-NEXT: stfd 1, 64(1) 779; ASM32PWR4-NEXT: lfd 2, 0(3) 780; ASM32PWR4-NEXT: li 3, 42 781; ASM32PWR4-NEXT: stfd 2, 72(1) 782; ASM32PWR4-NEXT: lwz 4, 64(1) 783; ASM32PWR4-NEXT: lwz 5, 68(1) 784; ASM32PWR4-NEXT: lwz 7, 72(1) 785; ASM32PWR4-NEXT: lwz 8, 76(1) 786; ASM32PWR4-NEXT: bl .test_vararg[PR] 787; ASM32PWR4-NEXT: nop 788; ASM32PWR4-NEXT: addi 1, 1, 80 789; ASM32PWR4-NEXT: lwz 0, 8(1) 790; ASM32PWR4-NEXT: mtlr 0 791; ASM32PWR4-NEXT: blr 792; 793; ASM64PWR4-LABEL: call_test_vararg2: 794; ASM64PWR4: # %bb.0: # %entry 795; ASM64PWR4-NEXT: mflr 0 796; ASM64PWR4-NEXT: stdu 1, -128(1) 797; ASM64PWR4-NEXT: ld 3, L..C1(2) # @f1 798; ASM64PWR4-NEXT: std 0, 144(1) 799; ASM64PWR4-NEXT: li 5, 42 800; ASM64PWR4-NEXT: lfs 1, 0(3) 801; ASM64PWR4-NEXT: ld 3, L..C2(2) # @d1 802; ASM64PWR4-NEXT: stfd 1, 112(1) 803; ASM64PWR4-NEXT: lfd 2, 0(3) 804; ASM64PWR4-NEXT: li 3, 42 805; ASM64PWR4-NEXT: stfd 2, 120(1) 806; ASM64PWR4-NEXT: ld 4, 112(1) 807; ASM64PWR4-NEXT: ld 6, 120(1) 808; ASM64PWR4-NEXT: bl .test_vararg[PR] 809; ASM64PWR4-NEXT: nop 810; ASM64PWR4-NEXT: addi 1, 1, 128 811; ASM64PWR4-NEXT: ld 0, 16(1) 812; ASM64PWR4-NEXT: mtlr 0 813; ASM64PWR4-NEXT: blr 814entry: 815 %0 = load float, ptr @f1, align 4 816 %conv = fpext float %0 to double 817 %1 = load double, ptr @d1, align 8 818 call void (i32, ...) @test_vararg(i32 42, double %conv, i32 42, double %1) 819 ret void 820} 821 822define void @call_test_vararg3() { 823; ASM32PWR4-LABEL: call_test_vararg3: 824; ASM32PWR4: # %bb.0: # %entry 825; ASM32PWR4-NEXT: mflr 0 826; ASM32PWR4-NEXT: stwu 1, -80(1) 827; ASM32PWR4-NEXT: lwz 3, L..C1(2) # @f1 828; ASM32PWR4-NEXT: stw 0, 88(1) 829; ASM32PWR4-NEXT: li 6, 0 830; ASM32PWR4-NEXT: li 7, 42 831; ASM32PWR4-NEXT: lfs 1, 0(3) 832; ASM32PWR4-NEXT: lwz 3, L..C2(2) # @d1 833; ASM32PWR4-NEXT: stfd 1, 64(1) 834; ASM32PWR4-NEXT: lfd 2, 0(3) 835; ASM32PWR4-NEXT: li 3, 42 836; ASM32PWR4-NEXT: stfd 2, 72(1) 837; ASM32PWR4-NEXT: lwz 4, 64(1) 838; ASM32PWR4-NEXT: lwz 5, 68(1) 839; ASM32PWR4-NEXT: lwz 8, 72(1) 840; ASM32PWR4-NEXT: lwz 9, 76(1) 841; ASM32PWR4-NEXT: bl .test_vararg[PR] 842; ASM32PWR4-NEXT: nop 843; ASM32PWR4-NEXT: addi 1, 1, 80 844; ASM32PWR4-NEXT: lwz 0, 8(1) 845; ASM32PWR4-NEXT: mtlr 0 846; ASM32PWR4-NEXT: blr 847; 848; ASM64PWR4-LABEL: call_test_vararg3: 849; ASM64PWR4: # %bb.0: # %entry 850; ASM64PWR4-NEXT: mflr 0 851; ASM64PWR4-NEXT: stdu 1, -128(1) 852; ASM64PWR4-NEXT: ld 3, L..C1(2) # @f1 853; ASM64PWR4-NEXT: std 0, 144(1) 854; ASM64PWR4-NEXT: li 5, 42 855; ASM64PWR4-NEXT: lfs 1, 0(3) 856; ASM64PWR4-NEXT: ld 3, L..C2(2) # @d1 857; ASM64PWR4-NEXT: stfd 1, 112(1) 858; ASM64PWR4-NEXT: lfd 2, 0(3) 859; ASM64PWR4-NEXT: li 3, 42 860; ASM64PWR4-NEXT: stfd 2, 120(1) 861; ASM64PWR4-NEXT: ld 4, 112(1) 862; ASM64PWR4-NEXT: ld 6, 120(1) 863; ASM64PWR4-NEXT: bl .test_vararg[PR] 864; ASM64PWR4-NEXT: nop 865; ASM64PWR4-NEXT: addi 1, 1, 128 866; ASM64PWR4-NEXT: ld 0, 16(1) 867; ASM64PWR4-NEXT: mtlr 0 868; ASM64PWR4-NEXT: blr 869entry: 870 %0 = load float, ptr @f1, align 4 871 %conv = fpext float %0 to double 872 %1 = load double, ptr @d1, align 8 873 call void (i32, ...) @test_vararg(i32 42, double %conv, i64 42, double %1) 874 ret void 875} 876 877define void @call_test_vararg4() { 878; ASM32PWR4-LABEL: call_test_vararg4: 879; ASM32PWR4: # %bb.0: # %entry 880; ASM32PWR4-NEXT: mflr 0 881; ASM32PWR4-NEXT: stwu 1, -64(1) 882; ASM32PWR4-NEXT: lwz 3, L..C1(2) # @f1 883; ASM32PWR4-NEXT: stw 0, 72(1) 884; ASM32PWR4-NEXT: lfs 1, 0(3) 885; ASM32PWR4-NEXT: li 3, 42 886; ASM32PWR4-NEXT: stfs 1, 60(1) 887; ASM32PWR4-NEXT: lwz 4, 60(1) 888; ASM32PWR4-NEXT: bl .test_vararg[PR] 889; ASM32PWR4-NEXT: nop 890; ASM32PWR4-NEXT: addi 1, 1, 64 891; ASM32PWR4-NEXT: lwz 0, 8(1) 892; ASM32PWR4-NEXT: mtlr 0 893; ASM32PWR4-NEXT: blr 894; 895; ASM64PWR4-LABEL: call_test_vararg4: 896; ASM64PWR4: # %bb.0: # %entry 897; ASM64PWR4-NEXT: mflr 0 898; ASM64PWR4-NEXT: stdu 1, -128(1) 899; ASM64PWR4-NEXT: ld 3, L..C1(2) # @f1 900; ASM64PWR4-NEXT: std 0, 144(1) 901; ASM64PWR4-NEXT: lfs 1, 0(3) 902; ASM64PWR4-NEXT: li 3, 42 903; ASM64PWR4-NEXT: stfs 1, 124(1) 904; ASM64PWR4-NEXT: lwz 4, 124(1) 905; ASM64PWR4-NEXT: bl .test_vararg[PR] 906; ASM64PWR4-NEXT: nop 907; ASM64PWR4-NEXT: addi 1, 1, 128 908; ASM64PWR4-NEXT: ld 0, 16(1) 909; ASM64PWR4-NEXT: mtlr 0 910; ASM64PWR4-NEXT: blr 911entry: 912 %0 = load float, ptr @f1, align 4 913 call void (i32, ...) @test_vararg(i32 42, float %0) 914 ret void 915} 916 917@c = common global i8 0, align 1 918@si = common global i16 0, align 2 919@i = common global i32 0, align 4 920@lli = common global i64 0, align 8 921@f = common global float 0.000000e+00, align 4 922@d = common global double 0.000000e+00, align 8 923 924; Basic saving of integral type arguments to the parameter save area. 925define void @call_test_stackarg_int() { 926; ASM32PWR4-LABEL: call_test_stackarg_int: 927; ASM32PWR4: # %bb.0: # %entry 928; ASM32PWR4-NEXT: mflr 0 929; ASM32PWR4-NEXT: stwu 1, -80(1) 930; ASM32PWR4-NEXT: lwz 3, L..C4(2) # @si 931; ASM32PWR4-NEXT: stw 0, 88(1) 932; ASM32PWR4-NEXT: lwz 4, L..C5(2) # @i 933; ASM32PWR4-NEXT: li 6, 4 934; ASM32PWR4-NEXT: li 8, 6 935; ASM32PWR4-NEXT: li 9, 7 936; ASM32PWR4-NEXT: li 10, 8 937; ASM32PWR4-NEXT: lha 7, 0(3) 938; ASM32PWR4-NEXT: lwz 3, L..C6(2) # @c 939; ASM32PWR4-NEXT: lbz 11, 0(3) 940; ASM32PWR4-NEXT: lwz 3, L..C7(2) # @lli 941; ASM32PWR4-NEXT: lwz 5, 0(4) 942; ASM32PWR4-NEXT: lwz 4, 0(3) 943; ASM32PWR4-NEXT: lwz 3, 4(3) 944; ASM32PWR4-NEXT: stw 5, 76(1) 945; ASM32PWR4-NEXT: stw 3, 72(1) 946; ASM32PWR4-NEXT: li 3, 1 947; ASM32PWR4-NEXT: stw 4, 68(1) 948; ASM32PWR4-NEXT: li 4, 2 949; ASM32PWR4-NEXT: stw 5, 64(1) 950; ASM32PWR4-NEXT: li 5, 3 951; ASM32PWR4-NEXT: stw 7, 60(1) 952; ASM32PWR4-NEXT: li 7, 5 953; ASM32PWR4-NEXT: stw 11, 56(1) 954; ASM32PWR4-NEXT: bl .test_stackarg_int[PR] 955; ASM32PWR4-NEXT: nop 956; ASM32PWR4-NEXT: addi 1, 1, 80 957; ASM32PWR4-NEXT: lwz 0, 8(1) 958; ASM32PWR4-NEXT: mtlr 0 959; ASM32PWR4-NEXT: blr 960; 961; ASM64PWR4-LABEL: call_test_stackarg_int: 962; ASM64PWR4: # %bb.0: # %entry 963; ASM64PWR4-NEXT: mflr 0 964; ASM64PWR4-NEXT: stdu 1, -160(1) 965; ASM64PWR4-NEXT: ld 3, L..C3(2) # @si 966; ASM64PWR4-NEXT: std 0, 176(1) 967; ASM64PWR4-NEXT: ld 4, L..C4(2) # @i 968; ASM64PWR4-NEXT: li 6, 4 969; ASM64PWR4-NEXT: li 8, 6 970; ASM64PWR4-NEXT: li 9, 7 971; ASM64PWR4-NEXT: li 10, 8 972; ASM64PWR4-NEXT: lha 7, 0(3) 973; ASM64PWR4-NEXT: ld 3, L..C5(2) # @c 974; ASM64PWR4-NEXT: lbz 11, 0(3) 975; ASM64PWR4-NEXT: ld 3, L..C6(2) # @lli 976; ASM64PWR4-NEXT: lwz 5, 0(4) 977; ASM64PWR4-NEXT: li 4, 2 978; ASM64PWR4-NEXT: ld 3, 0(3) 979; ASM64PWR4-NEXT: std 5, 144(1) 980; ASM64PWR4-NEXT: std 3, 136(1) 981; ASM64PWR4-NEXT: li 3, 1 982; ASM64PWR4-NEXT: std 5, 128(1) 983; ASM64PWR4-NEXT: li 5, 3 984; ASM64PWR4-NEXT: std 7, 120(1) 985; ASM64PWR4-NEXT: li 7, 5 986; ASM64PWR4-NEXT: std 11, 112(1) 987; ASM64PWR4-NEXT: bl .test_stackarg_int[PR] 988; ASM64PWR4-NEXT: nop 989; ASM64PWR4-NEXT: addi 1, 1, 160 990; ASM64PWR4-NEXT: ld 0, 16(1) 991; ASM64PWR4-NEXT: mtlr 0 992; ASM64PWR4-NEXT: blr 993entry: 994 %0 = load i8, ptr @c, align 1 995 %1 = load i16, ptr @si, align 2 996 %2 = load i32, ptr @i, align 4 997 %3 = load i64, ptr @lli, align 8 998 %4 = load i32, ptr @i, align 4 999 call void @test_stackarg_int(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i8 zeroext %0, i16 signext %1, i32 %2, i64 %3, i32 %4) 1000 ret void 1001} 1002 1003declare void @test_stackarg_int(i32, i32, i32, i32, i32, i32, i32, i32, i8 zeroext, i16 signext, i32, i64, i32) 1004 1005; Basic saving of floating point type arguments to the parameter save area. 1006; The float and double arguments will pass in both fpr as well as parameter save area. 1007define void @call_test_stackarg_float() { 1008; ASM32PWR4-LABEL: call_test_stackarg_float: 1009; ASM32PWR4: # %bb.0: # %entry 1010; ASM32PWR4-NEXT: mflr 0 1011; ASM32PWR4-NEXT: stwu 1, -80(1) 1012; ASM32PWR4-NEXT: lwz 3, L..C8(2) # @f 1013; ASM32PWR4-NEXT: stw 0, 88(1) 1014; ASM32PWR4-NEXT: li 4, 2 1015; ASM32PWR4-NEXT: li 5, 3 1016; ASM32PWR4-NEXT: li 6, 4 1017; ASM32PWR4-NEXT: li 7, 5 1018; ASM32PWR4-NEXT: lfs 1, 0(3) 1019; ASM32PWR4-NEXT: lwz 3, L..C9(2) # @d 1020; ASM32PWR4-NEXT: li 8, 6 1021; ASM32PWR4-NEXT: li 9, 7 1022; ASM32PWR4-NEXT: lfd 2, 0(3) 1023; ASM32PWR4-NEXT: li 3, 1 1024; ASM32PWR4-NEXT: li 10, 8 1025; ASM32PWR4-NEXT: stfd 2, 60(1) 1026; ASM32PWR4-NEXT: stfs 1, 56(1) 1027; ASM32PWR4-NEXT: bl .test_stackarg_float[PR] 1028; ASM32PWR4-NEXT: nop 1029; ASM32PWR4-NEXT: addi 1, 1, 80 1030; ASM32PWR4-NEXT: lwz 0, 8(1) 1031; ASM32PWR4-NEXT: mtlr 0 1032; ASM32PWR4-NEXT: blr 1033; 1034; ASM64PWR4-LABEL: call_test_stackarg_float: 1035; ASM64PWR4: # %bb.0: # %entry 1036; ASM64PWR4-NEXT: mflr 0 1037; ASM64PWR4-NEXT: stdu 1, -128(1) 1038; ASM64PWR4-NEXT: ld 3, L..C7(2) # @f 1039; ASM64PWR4-NEXT: std 0, 144(1) 1040; ASM64PWR4-NEXT: li 4, 2 1041; ASM64PWR4-NEXT: li 5, 3 1042; ASM64PWR4-NEXT: li 6, 4 1043; ASM64PWR4-NEXT: li 7, 5 1044; ASM64PWR4-NEXT: lfs 1, 0(3) 1045; ASM64PWR4-NEXT: ld 3, L..C8(2) # @d 1046; ASM64PWR4-NEXT: li 8, 6 1047; ASM64PWR4-NEXT: li 9, 7 1048; ASM64PWR4-NEXT: lfd 2, 0(3) 1049; ASM64PWR4-NEXT: li 3, 1 1050; ASM64PWR4-NEXT: li 10, 8 1051; ASM64PWR4-NEXT: stfd 2, 120(1) 1052; ASM64PWR4-NEXT: stfs 1, 112(1) 1053; ASM64PWR4-NEXT: bl .test_stackarg_float[PR] 1054; ASM64PWR4-NEXT: nop 1055; ASM64PWR4-NEXT: addi 1, 1, 128 1056; ASM64PWR4-NEXT: ld 0, 16(1) 1057; ASM64PWR4-NEXT: mtlr 0 1058; ASM64PWR4-NEXT: blr 1059entry: 1060 %0 = load float, ptr @f, align 4 1061 %1 = load double, ptr @d, align 8 1062 call void @test_stackarg_float(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, float %0, double %1) 1063 ret void 1064} 1065 1066declare void @test_stackarg_float(i32, i32, i32, i32, i32, i32, i32, i32, float, double) 1067 1068define void @call_test_stackarg_float2() { 1069; ASM32PWR4-LABEL: call_test_stackarg_float2: 1070; ASM32PWR4: # %bb.0: # %entry 1071; ASM32PWR4-NEXT: mflr 0 1072; ASM32PWR4-NEXT: stwu 1, -64(1) 1073; ASM32PWR4-NEXT: lwz 3, L..C9(2) # @d 1074; ASM32PWR4-NEXT: stw 0, 72(1) 1075; ASM32PWR4-NEXT: li 4, 2 1076; ASM32PWR4-NEXT: li 5, 3 1077; ASM32PWR4-NEXT: li 6, 4 1078; ASM32PWR4-NEXT: li 7, 5 1079; ASM32PWR4-NEXT: lfd 1, 0(3) 1080; ASM32PWR4-NEXT: li 3, 1 1081; ASM32PWR4-NEXT: li 8, 6 1082; ASM32PWR4-NEXT: stfd 1, 56(1) 1083; ASM32PWR4-NEXT: lwz 9, 56(1) 1084; ASM32PWR4-NEXT: lwz 10, 60(1) 1085; ASM32PWR4-NEXT: bl .test_stackarg_float2[PR] 1086; ASM32PWR4-NEXT: nop 1087; ASM32PWR4-NEXT: addi 1, 1, 64 1088; ASM32PWR4-NEXT: lwz 0, 8(1) 1089; ASM32PWR4-NEXT: mtlr 0 1090; ASM32PWR4-NEXT: blr 1091; 1092; ASM64PWR4-LABEL: call_test_stackarg_float2: 1093; ASM64PWR4: # %bb.0: # %entry 1094; ASM64PWR4-NEXT: mflr 0 1095; ASM64PWR4-NEXT: stdu 1, -128(1) 1096; ASM64PWR4-NEXT: ld 3, L..C8(2) # @d 1097; ASM64PWR4-NEXT: std 0, 144(1) 1098; ASM64PWR4-NEXT: li 4, 2 1099; ASM64PWR4-NEXT: li 5, 3 1100; ASM64PWR4-NEXT: li 6, 4 1101; ASM64PWR4-NEXT: li 7, 5 1102; ASM64PWR4-NEXT: lfd 1, 0(3) 1103; ASM64PWR4-NEXT: li 3, 1 1104; ASM64PWR4-NEXT: li 8, 6 1105; ASM64PWR4-NEXT: stfd 1, 120(1) 1106; ASM64PWR4-NEXT: ld 9, 120(1) 1107; ASM64PWR4-NEXT: bl .test_stackarg_float2[PR] 1108; ASM64PWR4-NEXT: nop 1109; ASM64PWR4-NEXT: addi 1, 1, 128 1110; ASM64PWR4-NEXT: ld 0, 16(1) 1111; ASM64PWR4-NEXT: mtlr 0 1112; ASM64PWR4-NEXT: blr 1113entry: 1114 %0 = load double, ptr @d, align 8 1115 call void (i32, i32, i32, i32, i32, i32, ...) @test_stackarg_float2(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, double %0) 1116 ret void 1117} 1118 1119declare void @test_stackarg_float2(i32, i32, i32, i32, i32, i32, ...) 1120 1121; A double arg will pass on the stack in PPC32 if there is only one available GPR. 1122define void @call_test_stackarg_float3() { 1123; ASM32PWR4-LABEL: call_test_stackarg_float3: 1124; ASM32PWR4: # %bb.0: # %entry 1125; ASM32PWR4-NEXT: mflr 0 1126; ASM32PWR4-NEXT: stwu 1, -80(1) 1127; ASM32PWR4-NEXT: lwz 3, L..C9(2) # @d 1128; ASM32PWR4-NEXT: stw 0, 88(1) 1129; ASM32PWR4-NEXT: li 4, 2 1130; ASM32PWR4-NEXT: li 5, 3 1131; ASM32PWR4-NEXT: li 6, 4 1132; ASM32PWR4-NEXT: li 7, 5 1133; ASM32PWR4-NEXT: lfd 1, 0(3) 1134; ASM32PWR4-NEXT: lwz 3, L..C8(2) # @f 1135; ASM32PWR4-NEXT: li 8, 6 1136; ASM32PWR4-NEXT: li 9, 7 1137; ASM32PWR4-NEXT: stfd 1, 72(1) 1138; ASM32PWR4-NEXT: lwz 10, 72(1) 1139; ASM32PWR4-NEXT: lfs 2, 0(3) 1140; ASM32PWR4-NEXT: li 3, 1 1141; ASM32PWR4-NEXT: stfs 2, 60(1) 1142; ASM32PWR4-NEXT: stfd 1, 52(1) 1143; ASM32PWR4-NEXT: bl .test_stackarg_float3[PR] 1144; ASM32PWR4-NEXT: nop 1145; ASM32PWR4-NEXT: addi 1, 1, 80 1146; ASM32PWR4-NEXT: lwz 0, 8(1) 1147; ASM32PWR4-NEXT: mtlr 0 1148; ASM32PWR4-NEXT: blr 1149; 1150; ASM64PWR4-LABEL: call_test_stackarg_float3: 1151; ASM64PWR4: # %bb.0: # %entry 1152; ASM64PWR4-NEXT: mflr 0 1153; ASM64PWR4-NEXT: stdu 1, -128(1) 1154; ASM64PWR4-NEXT: ld 3, L..C8(2) # @d 1155; ASM64PWR4-NEXT: std 0, 144(1) 1156; ASM64PWR4-NEXT: li 4, 2 1157; ASM64PWR4-NEXT: li 5, 3 1158; ASM64PWR4-NEXT: li 6, 4 1159; ASM64PWR4-NEXT: li 7, 5 1160; ASM64PWR4-NEXT: lfd 1, 0(3) 1161; ASM64PWR4-NEXT: ld 3, L..C7(2) # @f 1162; ASM64PWR4-NEXT: li 8, 6 1163; ASM64PWR4-NEXT: li 9, 7 1164; ASM64PWR4-NEXT: stfd 1, 120(1) 1165; ASM64PWR4-NEXT: ld 10, 120(1) 1166; ASM64PWR4-NEXT: lfs 2, 0(3) 1167; ASM64PWR4-NEXT: li 3, 1 1168; ASM64PWR4-NEXT: stfs 2, 112(1) 1169; ASM64PWR4-NEXT: bl .test_stackarg_float3[PR] 1170; ASM64PWR4-NEXT: nop 1171; ASM64PWR4-NEXT: addi 1, 1, 128 1172; ASM64PWR4-NEXT: ld 0, 16(1) 1173; ASM64PWR4-NEXT: mtlr 0 1174; ASM64PWR4-NEXT: blr 1175entry: 1176 %0 = load double, ptr @d, align 8 1177 %1 = load float, ptr @f, align 4 1178 call void (i32, i32, i32, i32, i32, i32, i32, ...) @test_stackarg_float3(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, double %0, float %1) 1179 ret void 1180} 1181 1182declare void @test_stackarg_float3(i32, i32, i32, i32, i32, i32, i32, ...) 1183 1184define i64 @test_ints_stack(i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i32 %i8, i64 %ll9, i16 signext %s10, i8 zeroext %c11, i32 %ui12, i32 %si13, i64 %ll14, i8 zeroext %uc15, i32 %i16, i8 signext %si8, i1 zeroext %zi1) { 1185; ASM32PWR4-LABEL: test_ints_stack: 1186; ASM32PWR4: # %bb.0: # %entry 1187; ASM32PWR4-NEXT: add 3, 3, 4 1188; ASM32PWR4-NEXT: stw 31, -4(1) # 4-byte Folded Spill 1189; ASM32PWR4-NEXT: add 3, 3, 5 1190; ASM32PWR4-NEXT: add 3, 3, 6 1191; ASM32PWR4-NEXT: add 3, 3, 7 1192; ASM32PWR4-NEXT: lbz 12, 99(1) 1193; ASM32PWR4-NEXT: add 3, 3, 8 1194; ASM32PWR4-NEXT: add 3, 3, 9 1195; ASM32PWR4-NEXT: lwz 0, 92(1) 1196; ASM32PWR4-NEXT: add 3, 3, 10 1197; ASM32PWR4-NEXT: extsb 4, 12 1198; ASM32PWR4-NEXT: srawi 8, 3, 31 1199; ASM32PWR4-NEXT: lwz 31, 76(1) 1200; ASM32PWR4-NEXT: srawi 12, 0, 31 1201; ASM32PWR4-NEXT: lwz 6, 60(1) 1202; ASM32PWR4-NEXT: lha 11, 66(1) 1203; ASM32PWR4-NEXT: lwz 7, 56(1) 1204; ASM32PWR4-NEXT: stw 30, -8(1) # 4-byte Folded Spill 1205; ASM32PWR4-NEXT: srawi 30, 31, 31 1206; ASM32PWR4-NEXT: addc 3, 3, 6 1207; ASM32PWR4-NEXT: adde 7, 8, 7 1208; ASM32PWR4-NEXT: lbz 6, 71(1) 1209; ASM32PWR4-NEXT: srawi 8, 11, 31 1210; ASM32PWR4-NEXT: addc 3, 3, 11 1211; ASM32PWR4-NEXT: adde 7, 7, 8 1212; ASM32PWR4-NEXT: lwz 9, 72(1) 1213; ASM32PWR4-NEXT: addc 3, 3, 6 1214; ASM32PWR4-NEXT: addze 6, 7 1215; ASM32PWR4-NEXT: addc 3, 3, 9 1216; ASM32PWR4-NEXT: lwz 5, 84(1) 1217; ASM32PWR4-NEXT: addze 6, 6 1218; ASM32PWR4-NEXT: addc 3, 3, 31 1219; ASM32PWR4-NEXT: lwz 7, 80(1) 1220; ASM32PWR4-NEXT: adde 6, 6, 30 1221; ASM32PWR4-NEXT: addc 3, 3, 5 1222; ASM32PWR4-NEXT: lbz 8, 91(1) 1223; ASM32PWR4-NEXT: adde 5, 6, 7 1224; ASM32PWR4-NEXT: addc 3, 3, 8 1225; ASM32PWR4-NEXT: lbz 6, 103(1) 1226; ASM32PWR4-NEXT: addze 5, 5 1227; ASM32PWR4-NEXT: addc 3, 3, 0 1228; ASM32PWR4-NEXT: adde 5, 5, 12 1229; ASM32PWR4-NEXT: lwz 31, -4(1) # 4-byte Folded Reload 1230; ASM32PWR4-NEXT: srawi 7, 4, 31 1231; ASM32PWR4-NEXT: addc 3, 3, 4 1232; ASM32PWR4-NEXT: adde 5, 5, 7 1233; ASM32PWR4-NEXT: lwz 30, -8(1) # 4-byte Folded Reload 1234; ASM32PWR4-NEXT: addc 4, 3, 6 1235; ASM32PWR4-NEXT: addze 3, 5 1236; ASM32PWR4-NEXT: blr 1237; 1238; ASM64PWR4-LABEL: test_ints_stack: 1239; ASM64PWR4: # %bb.0: # %entry 1240; ASM64PWR4-NEXT: add 3, 3, 4 1241; ASM64PWR4-NEXT: std 31, -8(1) # 8-byte Folded Spill 1242; ASM64PWR4-NEXT: add 3, 3, 5 1243; ASM64PWR4-NEXT: add 3, 3, 6 1244; ASM64PWR4-NEXT: add 3, 3, 7 1245; ASM64PWR4-NEXT: std 2, -16(1) # 8-byte Folded Spill 1246; ASM64PWR4-NEXT: add 3, 3, 8 1247; ASM64PWR4-NEXT: add 3, 3, 9 1248; ASM64PWR4-NEXT: ld 6, 112(1) 1249; ASM64PWR4-NEXT: add 3, 3, 10 1250; ASM64PWR4-NEXT: extsw 3, 3 1251; ASM64PWR4-NEXT: lha 0, 126(1) 1252; ASM64PWR4-NEXT: add 3, 3, 6 1253; ASM64PWR4-NEXT: add 3, 3, 0 1254; ASM64PWR4-NEXT: lbz 5, 135(1) 1255; ASM64PWR4-NEXT: lwz 7, 140(1) 1256; ASM64PWR4-NEXT: add 3, 3, 5 1257; ASM64PWR4-NEXT: lwa 12, 148(1) 1258; ASM64PWR4-NEXT: add 3, 3, 7 1259; ASM64PWR4-NEXT: add 3, 3, 12 1260; ASM64PWR4-NEXT: ld 31, 152(1) 1261; ASM64PWR4-NEXT: lbz 5, 167(1) 1262; ASM64PWR4-NEXT: add 3, 3, 31 1263; ASM64PWR4-NEXT: lwa 11, 172(1) 1264; ASM64PWR4-NEXT: add 3, 3, 5 1265; ASM64PWR4-NEXT: add 3, 3, 11 1266; ASM64PWR4-NEXT: lbz 2, 183(1) 1267; ASM64PWR4-NEXT: lbz 6, 191(1) 1268; ASM64PWR4-NEXT: extsb 4, 2 1269; ASM64PWR4-NEXT: add 3, 3, 4 1270; ASM64PWR4-NEXT: add 3, 3, 6 1271; ASM64PWR4-NEXT: ld 2, -16(1) # 8-byte Folded Reload 1272; ASM64PWR4-NEXT: ld 31, -8(1) # 8-byte Folded Reload 1273; ASM64PWR4-NEXT: blr 1274entry: 1275 %add = add nsw i32 %i1, %i2 1276 %add1 = add nsw i32 %add, %i3 1277 %add2 = add nsw i32 %add1, %i4 1278 %add3 = add nsw i32 %add2, %i5 1279 %add4 = add nsw i32 %add3, %i6 1280 %add5 = add nsw i32 %add4, %i7 1281 %add6 = add nsw i32 %add5, %i8 1282 %conv = sext i32 %add6 to i64 1283 %add7 = add nsw i64 %conv, %ll9 1284 %conv8 = sext i16 %s10 to i64 1285 %add9 = add nsw i64 %add7, %conv8 1286 %conv10 = zext i8 %c11 to i64 1287 %add11 = add nsw i64 %add9, %conv10 1288 %conv12 = zext i32 %ui12 to i64 1289 %add13 = add nsw i64 %add11, %conv12 1290 %conv14 = sext i32 %si13 to i64 1291 %add15 = add nsw i64 %add13, %conv14 1292 %add16 = add nsw i64 %add15, %ll14 1293 %conv17 = zext i8 %uc15 to i64 1294 %add18 = add nsw i64 %add16, %conv17 1295 %conv19 = sext i32 %i16 to i64 1296 %add20 = add nsw i64 %add18, %conv19 1297 %conv21 = sext i8 %si8 to i64 1298 %add22 = add nsw i64 %add20, %conv21 1299 %conv23 = zext i1 %zi1 to i64 1300 %add24 = add nsw i64 %add22, %conv23 1301 ret i64 %add24 1302} 1303 1304@ll1 = common global i64 0, align 8 1305@si1 = common global i16 0, align 2 1306@ch = common global i8 0, align 1 1307@ui = common global i32 0, align 4 1308@sint = common global i32 0, align 4 1309@ll2 = common global i64 0, align 8 1310@uc1 = common global i8 0, align 1 1311@i1 = common global i32 0, align 4 1312 1313define void @caller_ints_stack() { 1314; ASM32PWR4-LABEL: caller_ints_stack: 1315; ASM32PWR4: # %bb.0: # %entry 1316; ASM32PWR4-NEXT: mflr 0 1317; ASM32PWR4-NEXT: stwu 1, -96(1) 1318; ASM32PWR4-NEXT: lwz 3, L..C10(2) # @si1 1319; ASM32PWR4-NEXT: stw 0, 104(1) 1320; ASM32PWR4-NEXT: lwz 4, L..C11(2) # @ch 1321; ASM32PWR4-NEXT: lwz 6, L..C12(2) # @sint 1322; ASM32PWR4-NEXT: lwz 8, L..C13(2) # @ll2 1323; ASM32PWR4-NEXT: lwz 10, L..C14(2) # @uc1 1324; ASM32PWR4-NEXT: lwz 12, L..C15(2) # @i1 1325; ASM32PWR4-NEXT: lha 5, 0(3) 1326; ASM32PWR4-NEXT: lwz 3, L..C16(2) # @ll1 1327; ASM32PWR4-NEXT: lwz 11, 0(3) 1328; ASM32PWR4-NEXT: lwz 7, 4(3) 1329; ASM32PWR4-NEXT: lwz 3, L..C17(2) # @ui 1330; ASM32PWR4-NEXT: lbz 4, 0(4) 1331; ASM32PWR4-NEXT: lwz 3, 0(3) 1332; ASM32PWR4-NEXT: lwz 6, 0(6) 1333; ASM32PWR4-NEXT: lwz 9, 0(8) 1334; ASM32PWR4-NEXT: lwz 8, 4(8) 1335; ASM32PWR4-NEXT: lbz 10, 0(10) 1336; ASM32PWR4-NEXT: lwz 12, 0(12) 1337; ASM32PWR4-NEXT: stw 10, 88(1) 1338; ASM32PWR4-NEXT: li 10, 8 1339; ASM32PWR4-NEXT: stw 8, 84(1) 1340; ASM32PWR4-NEXT: li 8, 6 1341; ASM32PWR4-NEXT: stw 9, 80(1) 1342; ASM32PWR4-NEXT: li 9, 7 1343; ASM32PWR4-NEXT: stw 6, 76(1) 1344; ASM32PWR4-NEXT: li 6, 4 1345; ASM32PWR4-NEXT: stw 3, 72(1) 1346; ASM32PWR4-NEXT: li 3, 1 1347; ASM32PWR4-NEXT: stw 4, 68(1) 1348; ASM32PWR4-NEXT: li 4, 2 1349; ASM32PWR4-NEXT: stw 5, 64(1) 1350; ASM32PWR4-NEXT: li 5, 3 1351; ASM32PWR4-NEXT: stw 7, 60(1) 1352; ASM32PWR4-NEXT: li 7, 5 1353; ASM32PWR4-NEXT: stw 12, 92(1) 1354; ASM32PWR4-NEXT: stw 11, 56(1) 1355; ASM32PWR4-NEXT: bl .test_ints_stack 1356; ASM32PWR4-NEXT: nop 1357; ASM32PWR4-NEXT: addi 1, 1, 96 1358; ASM32PWR4-NEXT: lwz 0, 8(1) 1359; ASM32PWR4-NEXT: mtlr 0 1360; ASM32PWR4-NEXT: blr 1361; 1362; ASM64PWR4-LABEL: caller_ints_stack: 1363; ASM64PWR4: # %bb.0: # %entry 1364; ASM64PWR4-NEXT: mflr 0 1365; ASM64PWR4-NEXT: stdu 1, -176(1) 1366; ASM64PWR4-NEXT: ld 3, L..C9(2) # @si1 1367; ASM64PWR4-NEXT: std 0, 192(1) 1368; ASM64PWR4-NEXT: ld 4, L..C10(2) # @ch 1369; ASM64PWR4-NEXT: ld 6, L..C11(2) # @ll2 1370; ASM64PWR4-NEXT: ld 8, L..C12(2) # @uc1 1371; ASM64PWR4-NEXT: ld 9, L..C13(2) # @i1 1372; ASM64PWR4-NEXT: li 10, 8 1373; ASM64PWR4-NEXT: lha 7, 0(3) 1374; ASM64PWR4-NEXT: ld 3, L..C14(2) # @ll1 1375; ASM64PWR4-NEXT: ld 11, 0(3) 1376; ASM64PWR4-NEXT: ld 3, L..C15(2) # @ui 1377; ASM64PWR4-NEXT: lbz 5, 0(4) 1378; ASM64PWR4-NEXT: ld 4, L..C16(2) # @sint 1379; ASM64PWR4-NEXT: lwz 3, 0(3) 1380; ASM64PWR4-NEXT: lwz 4, 0(4) 1381; ASM64PWR4-NEXT: ld 6, 0(6) 1382; ASM64PWR4-NEXT: lbz 8, 0(8) 1383; ASM64PWR4-NEXT: lwz 9, 0(9) 1384; ASM64PWR4-NEXT: std 9, 168(1) 1385; ASM64PWR4-NEXT: li 9, 7 1386; ASM64PWR4-NEXT: std 8, 160(1) 1387; ASM64PWR4-NEXT: li 8, 6 1388; ASM64PWR4-NEXT: std 6, 152(1) 1389; ASM64PWR4-NEXT: li 6, 4 1390; ASM64PWR4-NEXT: std 4, 144(1) 1391; ASM64PWR4-NEXT: li 4, 2 1392; ASM64PWR4-NEXT: std 3, 136(1) 1393; ASM64PWR4-NEXT: li 3, 1 1394; ASM64PWR4-NEXT: std 5, 128(1) 1395; ASM64PWR4-NEXT: li 5, 3 1396; ASM64PWR4-NEXT: std 7, 120(1) 1397; ASM64PWR4-NEXT: li 7, 5 1398; ASM64PWR4-NEXT: std 11, 112(1) 1399; ASM64PWR4-NEXT: bl .test_ints_stack 1400; ASM64PWR4-NEXT: nop 1401; ASM64PWR4-NEXT: addi 1, 1, 176 1402; ASM64PWR4-NEXT: ld 0, 16(1) 1403; ASM64PWR4-NEXT: mtlr 0 1404; ASM64PWR4-NEXT: blr 1405entry: 1406 %0 = load i64, ptr @ll1, align 8 1407 %1 = load i16, ptr @si1, align 2 1408 %2 = load i8, ptr @ch, align 1 1409 %3 = load i32, ptr @ui, align 4 1410 %4 = load i32, ptr @sint, align 4 1411 %5 = load i64, ptr @ll2, align 8 1412 %6 = load i8, ptr @uc1, align 1 1413 %7 = load i32, ptr @i1, align 4 1414 %call = call i64 @test_ints_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i64 %0, i16 signext %1, i8 zeroext %2, i32 %3, i32 %4, i64 %5, i8 zeroext %6, i32 %7) 1415 ret void 1416} 1417 1418@globali1 = global i8 0, align 1 1419 1420define void @test_i1_stack(i32 %a, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h, i32 %i, i1 zeroext %b) { 1421; ASM32PWR4-LABEL: test_i1_stack: 1422; ASM32PWR4: # %bb.0: # %entry 1423; ASM32PWR4-NEXT: lbz 3, 59(1) 1424; ASM32PWR4-NEXT: lwz 4, L..C18(2) # @globali1 1425; ASM32PWR4-NEXT: stb 3, 0(4) 1426; ASM32PWR4-NEXT: blr 1427; 1428; ASM64PWR4-LABEL: test_i1_stack: 1429; ASM64PWR4: # %bb.0: # %entry 1430; ASM64PWR4-NEXT: lbz 3, 119(1) 1431; ASM64PWR4-NEXT: ld 4, L..C17(2) # @globali1 1432; ASM64PWR4-NEXT: stb 3, 0(4) 1433; ASM64PWR4-NEXT: blr 1434 entry: 1435 %frombool = zext i1 %b to i8 1436 store i8 %frombool, ptr @globali1, align 1 1437 ret void 1438} 1439 1440define void @call_test_i1_stack() { 1441; ASM32PWR4-LABEL: call_test_i1_stack: 1442; ASM32PWR4: # %bb.0: # %entry 1443; ASM32PWR4-NEXT: mflr 0 1444; ASM32PWR4-NEXT: stwu 1, -64(1) 1445; ASM32PWR4-NEXT: li 11, 1 1446; ASM32PWR4-NEXT: li 3, 1 1447; ASM32PWR4-NEXT: stw 0, 72(1) 1448; ASM32PWR4-NEXT: li 4, 2 1449; ASM32PWR4-NEXT: li 5, 3 1450; ASM32PWR4-NEXT: stw 11, 56(1) 1451; ASM32PWR4-NEXT: li 6, 4 1452; ASM32PWR4-NEXT: li 7, 5 1453; ASM32PWR4-NEXT: li 8, 6 1454; ASM32PWR4-NEXT: li 9, 7 1455; ASM32PWR4-NEXT: li 10, 8 1456; ASM32PWR4-NEXT: bl .test_i1_stack 1457; ASM32PWR4-NEXT: nop 1458; ASM32PWR4-NEXT: addi 1, 1, 64 1459; ASM32PWR4-NEXT: lwz 0, 8(1) 1460; ASM32PWR4-NEXT: mtlr 0 1461; ASM32PWR4-NEXT: blr 1462; 1463; ASM64PWR4-LABEL: call_test_i1_stack: 1464; ASM64PWR4: # %bb.0: # %entry 1465; ASM64PWR4-NEXT: mflr 0 1466; ASM64PWR4-NEXT: stdu 1, -128(1) 1467; ASM64PWR4-NEXT: li 11, 1 1468; ASM64PWR4-NEXT: li 3, 1 1469; ASM64PWR4-NEXT: std 0, 144(1) 1470; ASM64PWR4-NEXT: li 4, 2 1471; ASM64PWR4-NEXT: li 5, 3 1472; ASM64PWR4-NEXT: std 11, 112(1) 1473; ASM64PWR4-NEXT: li 6, 4 1474; ASM64PWR4-NEXT: li 7, 5 1475; ASM64PWR4-NEXT: li 8, 6 1476; ASM64PWR4-NEXT: li 9, 7 1477; ASM64PWR4-NEXT: li 10, 8 1478; ASM64PWR4-NEXT: bl .test_i1_stack 1479; ASM64PWR4-NEXT: nop 1480; ASM64PWR4-NEXT: addi 1, 1, 128 1481; ASM64PWR4-NEXT: ld 0, 16(1) 1482; ASM64PWR4-NEXT: mtlr 0 1483; ASM64PWR4-NEXT: blr 1484 entry: 1485 call void @test_i1_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i1 true) 1486 ret void 1487} 1488 1489define double @test_fpr_stack(double %d1, double %d2, double %d3, double %d4, double %d5, double %d6, double %d7, double %d8, double %d9, double %s10, double %l11, double %d12, double %d13, float %f14, double %d15, float %f16) { 1490; ASM32PWR4-LABEL: test_fpr_stack: 1491; ASM32PWR4: # %bb.0: # %entry 1492; ASM32PWR4-NEXT: fadd 0, 1, 2 1493; ASM32PWR4-NEXT: lfs 1, 128(1) 1494; ASM32PWR4-NEXT: fadd 0, 0, 3 1495; ASM32PWR4-NEXT: lfd 2, 132(1) 1496; ASM32PWR4-NEXT: fadd 0, 0, 4 1497; ASM32PWR4-NEXT: fadd 0, 0, 5 1498; ASM32PWR4-NEXT: fadd 0, 0, 6 1499; ASM32PWR4-NEXT: fadd 0, 0, 7 1500; ASM32PWR4-NEXT: fadd 0, 0, 8 1501; ASM32PWR4-NEXT: fadd 0, 0, 9 1502; ASM32PWR4-NEXT: fadd 0, 0, 10 1503; ASM32PWR4-NEXT: fadd 0, 0, 11 1504; ASM32PWR4-NEXT: fadd 0, 0, 12 1505; ASM32PWR4-NEXT: fadd 0, 0, 13 1506; ASM32PWR4-NEXT: fadd 0, 0, 13 1507; ASM32PWR4-NEXT: fadd 0, 0, 1 1508; ASM32PWR4-NEXT: lfs 1, 140(1) 1509; ASM32PWR4-NEXT: fadd 0, 0, 2 1510; ASM32PWR4-NEXT: fadd 1, 0, 1 1511; ASM32PWR4-NEXT: blr 1512; 1513; ASM64PWR4-LABEL: test_fpr_stack: 1514; ASM64PWR4: # %bb.0: # %entry 1515; ASM64PWR4-NEXT: fadd 0, 1, 2 1516; ASM64PWR4-NEXT: lfs 1, 152(1) 1517; ASM64PWR4-NEXT: fadd 0, 0, 3 1518; ASM64PWR4-NEXT: lfd 2, 160(1) 1519; ASM64PWR4-NEXT: fadd 0, 0, 4 1520; ASM64PWR4-NEXT: fadd 0, 0, 5 1521; ASM64PWR4-NEXT: fadd 0, 0, 6 1522; ASM64PWR4-NEXT: fadd 0, 0, 7 1523; ASM64PWR4-NEXT: fadd 0, 0, 8 1524; ASM64PWR4-NEXT: fadd 0, 0, 9 1525; ASM64PWR4-NEXT: fadd 0, 0, 10 1526; ASM64PWR4-NEXT: fadd 0, 0, 11 1527; ASM64PWR4-NEXT: fadd 0, 0, 12 1528; ASM64PWR4-NEXT: fadd 0, 0, 13 1529; ASM64PWR4-NEXT: fadd 0, 0, 13 1530; ASM64PWR4-NEXT: fadd 0, 0, 1 1531; ASM64PWR4-NEXT: lfs 1, 168(1) 1532; ASM64PWR4-NEXT: fadd 0, 0, 2 1533; ASM64PWR4-NEXT: fadd 1, 0, 1 1534; ASM64PWR4-NEXT: blr 1535 entry: 1536 %add = fadd double %d1, %d2 1537 %add1 = fadd double %add, %d3 1538 %add2 = fadd double %add1, %d4 1539 %add3 = fadd double %add2, %d5 1540 %add4 = fadd double %add3, %d6 1541 %add5 = fadd double %add4, %d7 1542 %add6 = fadd double %add5, %d8 1543 %add7 = fadd double %add6, %d9 1544 %add8 = fadd double %add7, %s10 1545 %add9 = fadd double %add8, %l11 1546 %add10 = fadd double %add9, %d12 1547 %add11 = fadd double %add10, %d13 1548 %add12 = fadd double %add11, %d13 1549 %conv = fpext float %f14 to double 1550 %add13 = fadd double %add12, %conv 1551 %add14 = fadd double %add13, %d15 1552 %conv15 = fpext float %f16 to double 1553 %add16 = fadd double %add14, %conv15 1554 ret double %add16 1555 } 1556 1557@f14 = common global float 0.000000e+00, align 4 1558@d15 = common global double 0.000000e+00, align 8 1559@f16 = common global float 0.000000e+00, align 4 1560 1561define void @caller_fpr_stack() { 1562; ASM32PWR4-LABEL: caller_fpr_stack: 1563; ASM32PWR4: # %bb.0: # %entry 1564; ASM32PWR4-NEXT: mflr 0 1565; ASM32PWR4-NEXT: stwu 1, -144(1) 1566; ASM32PWR4-NEXT: lwz 3, L..C19(2) # @d15 1567; ASM32PWR4-NEXT: lwz 4, L..C20(2) # @f14 1568; ASM32PWR4-NEXT: lwz 5, L..C21(2) # @f16 1569; ASM32PWR4-NEXT: stw 0, 152(1) 1570; ASM32PWR4-NEXT: lis 6, 16361 1571; ASM32PWR4-NEXT: ori 6, 6, 39321 1572; ASM32PWR4-NEXT: lfd 0, 0(3) 1573; ASM32PWR4-NEXT: lwz 3, 0(4) 1574; ASM32PWR4-NEXT: lwz 4, 0(5) 1575; ASM32PWR4-NEXT: li 5, 0 1576; ASM32PWR4-NEXT: stw 5, 60(1) 1577; ASM32PWR4-NEXT: lis 5, 16352 1578; ASM32PWR4-NEXT: stw 5, 56(1) 1579; ASM32PWR4-NEXT: lis 5, 13107 1580; ASM32PWR4-NEXT: ori 5, 5, 13107 1581; ASM32PWR4-NEXT: stw 5, 68(1) 1582; ASM32PWR4-NEXT: lis 5, 16355 1583; ASM32PWR4-NEXT: ori 5, 5, 13107 1584; ASM32PWR4-NEXT: stw 5, 64(1) 1585; ASM32PWR4-NEXT: lis 5, 26214 1586; ASM32PWR4-NEXT: ori 5, 5, 26214 1587; ASM32PWR4-NEXT: stw 5, 76(1) 1588; ASM32PWR4-NEXT: lis 5, 16358 1589; ASM32PWR4-NEXT: ori 5, 5, 26214 1590; ASM32PWR4-NEXT: stw 5, 72(1) 1591; ASM32PWR4-NEXT: lis 5, -26215 1592; ASM32PWR4-NEXT: ori 5, 5, 39322 1593; ASM32PWR4-NEXT: stw 5, 84(1) 1594; ASM32PWR4-NEXT: stw 5, 100(1) 1595; ASM32PWR4-NEXT: lis 5, 16313 1596; ASM32PWR4-NEXT: ori 5, 5, 39321 1597; ASM32PWR4-NEXT: stw 5, 96(1) 1598; ASM32PWR4-NEXT: lis 5, -15729 1599; ASM32PWR4-NEXT: ori 5, 5, 23593 1600; ASM32PWR4-NEXT: stw 5, 108(1) 1601; ASM32PWR4-NEXT: lis 5, 16316 1602; ASM32PWR4-NEXT: ori 5, 5, 10485 1603; ASM32PWR4-NEXT: stw 5, 104(1) 1604; ASM32PWR4-NEXT: lis 5, -5243 1605; ASM32PWR4-NEXT: ori 5, 5, 7864 1606; ASM32PWR4-NEXT: stw 5, 116(1) 1607; ASM32PWR4-NEXT: lis 5, 16318 1608; ASM32PWR4-NEXT: ori 5, 5, 47185 1609; ASM32PWR4-NEXT: stw 6, 80(1) 1610; ASM32PWR4-NEXT: lis 6, -13108 1611; ASM32PWR4-NEXT: ori 6, 6, 52429 1612; ASM32PWR4-NEXT: stw 5, 112(1) 1613; ASM32PWR4-NEXT: lis 5, 2621 1614; ASM32PWR4-NEXT: ori 5, 5, 28836 1615; ASM32PWR4-NEXT: stw 6, 92(1) 1616; ASM32PWR4-NEXT: lis 6, 16364 1617; ASM32PWR4-NEXT: ori 6, 6, 52428 1618; ASM32PWR4-NEXT: stw 5, 124(1) 1619; ASM32PWR4-NEXT: lis 5, 16320 1620; ASM32PWR4-NEXT: ori 5, 5, 41943 1621; ASM32PWR4-NEXT: stw 6, 88(1) 1622; ASM32PWR4-NEXT: lwz 6, L..C22(2) # %const.0 1623; ASM32PWR4-NEXT: stw 5, 120(1) 1624; ASM32PWR4-NEXT: lwz 5, L..C23(2) # %const.1 1625; ASM32PWR4-NEXT: lfd 2, 0(6) 1626; ASM32PWR4-NEXT: lwz 6, L..C24(2) # %const.2 1627; ASM32PWR4-NEXT: lfd 3, 0(5) 1628; ASM32PWR4-NEXT: lwz 5, L..C25(2) # %const.3 1629; ASM32PWR4-NEXT: lfd 4, 0(6) 1630; ASM32PWR4-NEXT: lwz 6, L..C26(2) # %const.4 1631; ASM32PWR4-NEXT: lfd 6, 0(5) 1632; ASM32PWR4-NEXT: lwz 5, L..C27(2) # %const.5 1633; ASM32PWR4-NEXT: lfd 7, 0(6) 1634; ASM32PWR4-NEXT: lwz 6, L..C28(2) # %const.6 1635; ASM32PWR4-NEXT: lfd 8, 0(5) 1636; ASM32PWR4-NEXT: lwz 5, L..C29(2) # %const.7 1637; ASM32PWR4-NEXT: lfd 9, 0(6) 1638; ASM32PWR4-NEXT: lwz 6, L..C30(2) # %const.8 1639; ASM32PWR4-NEXT: lfd 1, 0(5) 1640; ASM32PWR4-NEXT: lwz 5, L..C31(2) # %const.9 1641; ASM32PWR4-NEXT: lfd 11, 0(6) 1642; ASM32PWR4-NEXT: lwz 6, L..C32(2) # %const.10 1643; ASM32PWR4-NEXT: fmr 10, 1 1644; ASM32PWR4-NEXT: lfd 12, 0(5) 1645; ASM32PWR4-NEXT: lwz 5, L..C33(2) # %const.11 1646; ASM32PWR4-NEXT: lfd 13, 0(6) 1647; ASM32PWR4-NEXT: lfs 5, 0(5) 1648; ASM32PWR4-NEXT: stfd 0, 132(1) 1649; ASM32PWR4-NEXT: stw 4, 140(1) 1650; ASM32PWR4-NEXT: stw 3, 128(1) 1651; ASM32PWR4-NEXT: bl .test_fpr_stack 1652; ASM32PWR4-NEXT: nop 1653; ASM32PWR4-NEXT: addi 1, 1, 144 1654; ASM32PWR4-NEXT: lwz 0, 8(1) 1655; ASM32PWR4-NEXT: mtlr 0 1656; ASM32PWR4-NEXT: blr 1657; 1658; ASM64PWR4-LABEL: caller_fpr_stack: 1659; ASM64PWR4: # %bb.0: # %entry 1660; ASM64PWR4-NEXT: mflr 0 1661; ASM64PWR4-NEXT: stdu 1, -176(1) 1662; ASM64PWR4-NEXT: ld 3, L..C18(2) # @f14 1663; ASM64PWR4-NEXT: std 0, 192(1) 1664; ASM64PWR4-NEXT: ld 4, L..C19(2) # @d15 1665; ASM64PWR4-NEXT: ld 5, L..C20(2) # @f16 1666; ASM64PWR4-NEXT: ld 6, L..C21(2) # %const.9 1667; ASM64PWR4-NEXT: lis 7, 16313 1668; ASM64PWR4-NEXT: lwz 3, 0(3) 1669; ASM64PWR4-NEXT: ld 4, 0(4) 1670; ASM64PWR4-NEXT: lwz 5, 0(5) 1671; ASM64PWR4-NEXT: stw 3, 152(1) 1672; ASM64PWR4-NEXT: ld 3, L..C22(2) # %const.0 1673; ASM64PWR4-NEXT: std 4, 160(1) 1674; ASM64PWR4-NEXT: ld 4, L..C23(2) # %const.1 1675; ASM64PWR4-NEXT: lfd 2, 0(3) 1676; ASM64PWR4-NEXT: ld 3, L..C24(2) # %const.2 1677; ASM64PWR4-NEXT: lfd 3, 0(4) 1678; ASM64PWR4-NEXT: ld 4, L..C25(2) # %const.3 1679; ASM64PWR4-NEXT: lfd 4, 0(3) 1680; ASM64PWR4-NEXT: ld 3, L..C26(2) # %const.4 1681; ASM64PWR4-NEXT: lfd 6, 0(4) 1682; ASM64PWR4-NEXT: ld 4, L..C27(2) # %const.5 1683; ASM64PWR4-NEXT: lfd 7, 0(3) 1684; ASM64PWR4-NEXT: ld 3, L..C28(2) # %const.6 1685; ASM64PWR4-NEXT: lfd 8, 0(4) 1686; ASM64PWR4-NEXT: ld 4, L..C29(2) # %const.7 1687; ASM64PWR4-NEXT: lfd 9, 0(3) 1688; ASM64PWR4-NEXT: ld 3, L..C30(2) # %const.8 1689; ASM64PWR4-NEXT: lfd 1, 0(4) 1690; ASM64PWR4-NEXT: lis 4, 16320 1691; ASM64PWR4-NEXT: ori 4, 4, 41943 1692; ASM64PWR4-NEXT: rldic 4, 4, 32, 2 1693; ASM64PWR4-NEXT: lfd 11, 0(3) 1694; ASM64PWR4-NEXT: lis 3, 16316 1695; ASM64PWR4-NEXT: fmr 10, 1 1696; ASM64PWR4-NEXT: ori 3, 3, 10485 1697; ASM64PWR4-NEXT: oris 4, 4, 2621 1698; ASM64PWR4-NEXT: stw 5, 168(1) 1699; ASM64PWR4-NEXT: lis 5, 16318 1700; ASM64PWR4-NEXT: rldic 3, 3, 32, 2 1701; ASM64PWR4-NEXT: ori 5, 5, 47185 1702; ASM64PWR4-NEXT: ori 4, 4, 28836 1703; ASM64PWR4-NEXT: lfd 12, 0(6) 1704; ASM64PWR4-NEXT: ld 6, L..C31(2) # %const.10 1705; ASM64PWR4-NEXT: oris 3, 3, 49807 1706; ASM64PWR4-NEXT: ori 3, 3, 23593 1707; ASM64PWR4-NEXT: std 4, 144(1) 1708; ASM64PWR4-NEXT: rldic 4, 5, 32, 2 1709; ASM64PWR4-NEXT: oris 4, 4, 60293 1710; ASM64PWR4-NEXT: ori 4, 4, 7864 1711; ASM64PWR4-NEXT: std 3, 128(1) 1712; ASM64PWR4-NEXT: ld 3, L..C32(2) # %const.11 1713; ASM64PWR4-NEXT: ori 5, 7, 39321 1714; ASM64PWR4-NEXT: rldic 5, 5, 32, 2 1715; ASM64PWR4-NEXT: std 4, 136(1) 1716; ASM64PWR4-NEXT: lis 4, 4091 1717; ASM64PWR4-NEXT: ori 4, 4, 13107 1718; ASM64PWR4-NEXT: rldic 4, 4, 34, 2 1719; ASM64PWR4-NEXT: lfs 5, 0(3) 1720; ASM64PWR4-NEXT: oris 3, 5, 39321 1721; ASM64PWR4-NEXT: ori 3, 3, 39322 1722; ASM64PWR4-NEXT: lfd 13, 0(6) 1723; ASM64PWR4-NEXT: std 3, 120(1) 1724; ASM64PWR4-NEXT: oris 3, 4, 52428 1725; ASM64PWR4-NEXT: ori 3, 3, 52429 1726; ASM64PWR4-NEXT: std 3, 112(1) 1727; ASM64PWR4-NEXT: bl .test_fpr_stack 1728; ASM64PWR4-NEXT: nop 1729; ASM64PWR4-NEXT: addi 1, 1, 176 1730; ASM64PWR4-NEXT: ld 0, 16(1) 1731; ASM64PWR4-NEXT: mtlr 0 1732; ASM64PWR4-NEXT: blr 1733entry: 1734 %0 = load float, ptr @f14, align 4 1735 %1 = load double, ptr @d15, align 8 1736 %2 = load float, ptr @f16, align 4 1737 %call = call double @test_fpr_stack(double 1.000000e-01, double 2.000000e-01, double 3.000000e-01, double 4.000000e-01, double 5.000000e-01, double 6.000000e-01, double 0x3FE6666666666666, double 8.000000e-01, double 9.000000e-01, double 1.000000e-01, double 1.100000e-01, double 1.200000e-01, double 1.300000e-01, float %0, double %1, float %2) 1738 ret void 1739} 1740 1741define i32 @mix_callee(double %d1, double %d2, double %d3, double %d4, i8 zeroext %c1, i16 signext %s1, i64 %ll1, i32 %i1, i32 %i2, i32 %i3) { 1742; ASM32PWR4-LABEL: mix_callee: 1743; ASM32PWR4: # %bb.0: # %entry 1744; ASM32PWR4-NEXT: lha 3, 62(1) 1745; ASM32PWR4-NEXT: lis 8, 17200 1746; ASM32PWR4-NEXT: fadd 1, 1, 2 1747; ASM32PWR4-NEXT: fadd 1, 1, 3 1748; ASM32PWR4-NEXT: lbz 5, 59(1) 1749; ASM32PWR4-NEXT: fadd 1, 1, 4 1750; ASM32PWR4-NEXT: lwz 4, 68(1) 1751; ASM32PWR4-NEXT: add 3, 5, 3 1752; ASM32PWR4-NEXT: lwz 5, L..C34(2) # %const.0 1753; ASM32PWR4-NEXT: lwz 6, 72(1) 1754; ASM32PWR4-NEXT: add 3, 3, 4 1755; ASM32PWR4-NEXT: lwz 7, 76(1) 1756; ASM32PWR4-NEXT: add 3, 3, 6 1757; ASM32PWR4-NEXT: stw 8, -16(1) 1758; ASM32PWR4-NEXT: add 3, 3, 7 1759; ASM32PWR4-NEXT: lwz 8, 80(1) 1760; ASM32PWR4-NEXT: add 3, 3, 8 1761; ASM32PWR4-NEXT: lfs 0, 0(5) 1762; ASM32PWR4-NEXT: xoris 3, 3, 32768 1763; ASM32PWR4-NEXT: stw 3, -12(1) 1764; ASM32PWR4-NEXT: addi 3, 1, -4 1765; ASM32PWR4-NEXT: lfd 2, -16(1) 1766; ASM32PWR4-NEXT: fsub 0, 2, 0 1767; ASM32PWR4-NEXT: fadd 0, 0, 1 1768; ASM32PWR4-NEXT: fctiwz 0, 0 1769; ASM32PWR4-NEXT: stfiwx 0, 0, 3 1770; ASM32PWR4-NEXT: lwz 3, -4(1) 1771; ASM32PWR4-NEXT: blr 1772; 1773; ASM64PWR4-LABEL: mix_callee: 1774; ASM64PWR4: # %bb.0: # %entry 1775; ASM64PWR4-NEXT: lwz 3, 116(1) 1776; ASM64PWR4-NEXT: add 4, 7, 8 1777; ASM64PWR4-NEXT: fadd 0, 1, 2 1778; ASM64PWR4-NEXT: add 4, 4, 9 1779; ASM64PWR4-NEXT: fadd 0, 0, 3 1780; ASM64PWR4-NEXT: add 4, 4, 10 1781; ASM64PWR4-NEXT: lwz 5, 124(1) 1782; ASM64PWR4-NEXT: add 3, 4, 3 1783; ASM64PWR4-NEXT: add 3, 3, 5 1784; ASM64PWR4-NEXT: fadd 0, 0, 4 1785; ASM64PWR4-NEXT: extsw 3, 3 1786; ASM64PWR4-NEXT: std 3, -16(1) 1787; ASM64PWR4-NEXT: addi 3, 1, -4 1788; ASM64PWR4-NEXT: lfd 1, -16(1) 1789; ASM64PWR4-NEXT: fcfid 1, 1 1790; ASM64PWR4-NEXT: fadd 0, 1, 0 1791; ASM64PWR4-NEXT: fctiwz 0, 0 1792; ASM64PWR4-NEXT: stfiwx 0, 0, 3 1793; ASM64PWR4-NEXT: lwz 3, -4(1) 1794; ASM64PWR4-NEXT: blr 1795 entry: 1796 %add = fadd double %d1, %d2 1797 %add1 = fadd double %add, %d3 1798 %add2 = fadd double %add1, %d4 1799 %conv = zext i8 %c1 to i32 1800 %conv3 = sext i16 %s1 to i32 1801 %add4 = add nsw i32 %conv, %conv3 1802 %conv5 = sext i32 %add4 to i64 1803 %add6 = add nsw i64 %conv5, %ll1 1804 %conv7 = sext i32 %i1 to i64 1805 %add8 = add nsw i64 %add6, %conv7 1806 %conv9 = sext i32 %i2 to i64 1807 %add10 = add nsw i64 %add8, %conv9 1808 %conv11 = sext i32 %i3 to i64 1809 %add12 = add nsw i64 %add10, %conv11 1810 %conv13 = trunc i64 %add12 to i32 1811 %conv14 = sitofp i32 %conv13 to double 1812 %add15 = fadd double %conv14, %add2 1813 %conv16 = fptosi double %add15 to i32 1814 ret i32 %conv16 1815 } 1816 1817define void @caller_mix() { 1818; ASM32PWR4-LABEL: caller_mix: 1819; ASM32PWR4: # %bb.0: # %entry 1820; ASM32PWR4-NEXT: mflr 0 1821; ASM32PWR4-NEXT: stwu 1, -96(1) 1822; ASM32PWR4-NEXT: li 3, 60 1823; ASM32PWR4-NEXT: stw 0, 104(1) 1824; ASM32PWR4-NEXT: stw 3, 80(1) 1825; ASM32PWR4-NEXT: li 3, 50 1826; ASM32PWR4-NEXT: stw 3, 76(1) 1827; ASM32PWR4-NEXT: li 3, 40 1828; ASM32PWR4-NEXT: stw 3, 72(1) 1829; ASM32PWR4-NEXT: li 3, 0 1830; ASM32PWR4-NEXT: stw 3, 64(1) 1831; ASM32PWR4-NEXT: li 3, 2 1832; ASM32PWR4-NEXT: stw 3, 60(1) 1833; ASM32PWR4-NEXT: lwz 3, L..C35(2) # %const.0 1834; ASM32PWR4-NEXT: lfd 1, 0(3) 1835; ASM32PWR4-NEXT: lwz 3, L..C36(2) # %const.1 1836; ASM32PWR4-NEXT: lfd 2, 0(3) 1837; ASM32PWR4-NEXT: lwz 3, L..C37(2) # %const.2 1838; ASM32PWR4-NEXT: lfd 3, 0(3) 1839; ASM32PWR4-NEXT: lwz 3, L..C38(2) # %const.3 1840; ASM32PWR4-NEXT: lfd 4, 0(3) 1841; ASM32PWR4-NEXT: li 3, 1 1842; ASM32PWR4-NEXT: stw 3, 56(1) 1843; ASM32PWR4-NEXT: lis 3, 457 1844; ASM32PWR4-NEXT: ori 3, 3, 50048 1845; ASM32PWR4-NEXT: stw 3, 68(1) 1846; ASM32PWR4-NEXT: bl .mix_callee 1847; ASM32PWR4-NEXT: nop 1848; ASM32PWR4-NEXT: addi 1, 1, 96 1849; ASM32PWR4-NEXT: lwz 0, 8(1) 1850; ASM32PWR4-NEXT: mtlr 0 1851; ASM32PWR4-NEXT: blr 1852; 1853; ASM64PWR4-LABEL: caller_mix: 1854; ASM64PWR4: # %bb.0: # %entry 1855; ASM64PWR4-NEXT: mflr 0 1856; ASM64PWR4-NEXT: stdu 1, -128(1) 1857; ASM64PWR4-NEXT: ld 3, L..C33(2) # %const.0 1858; ASM64PWR4-NEXT: ld 4, L..C34(2) # %const.1 1859; ASM64PWR4-NEXT: lis 5, 457 1860; ASM64PWR4-NEXT: li 7, 1 1861; ASM64PWR4-NEXT: std 0, 144(1) 1862; ASM64PWR4-NEXT: ori 9, 5, 50048 1863; ASM64PWR4-NEXT: li 8, 2 1864; ASM64PWR4-NEXT: lfd 1, 0(3) 1865; ASM64PWR4-NEXT: ld 3, L..C35(2) # %const.2 1866; ASM64PWR4-NEXT: li 10, 40 1867; ASM64PWR4-NEXT: lfd 2, 0(4) 1868; ASM64PWR4-NEXT: ld 4, L..C36(2) # %const.3 1869; ASM64PWR4-NEXT: lfd 3, 0(3) 1870; ASM64PWR4-NEXT: li 3, 60 1871; ASM64PWR4-NEXT: lfd 4, 0(4) 1872; ASM64PWR4-NEXT: li 4, 50 1873; ASM64PWR4-NEXT: std 3, 120(1) 1874; ASM64PWR4-NEXT: std 4, 112(1) 1875; ASM64PWR4-NEXT: bl .mix_callee 1876; ASM64PWR4-NEXT: nop 1877; ASM64PWR4-NEXT: addi 1, 1, 128 1878; ASM64PWR4-NEXT: ld 0, 16(1) 1879; ASM64PWR4-NEXT: mtlr 0 1880; ASM64PWR4-NEXT: blr 1881 entry: 1882%call = call i32 @mix_callee(double 1.000000e-01, double 2.000000e-01, double 3.000000e-01, double 4.000000e-01, i8 zeroext 1, i16 signext 2, i64 30000000, i32 40, i32 50, i32 60) 1883 ret void 1884 } 1885 1886 define i32 @mix_floats(i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i32 %i8, double %d1, double %d2, double %d3, double %d4, double %d5, double %d6, double %d7, double %d8, double %d9, double %d10, double %d11, double %d12, double %d13, double %d14) { 1887; ASM32PWR4-LABEL: mix_floats: 1888; ASM32PWR4: # %bb.0: # %entry 1889; ASM32PWR4-NEXT: add 3, 3, 4 1890; ASM32PWR4-NEXT: lwz 4, L..C39(2) # %const.0 1891; ASM32PWR4-NEXT: lis 11, 17200 1892; ASM32PWR4-NEXT: stfd 31, -8(1) # 8-byte Folded Spill 1893; ASM32PWR4-NEXT: add 3, 3, 5 1894; ASM32PWR4-NEXT: add 3, 3, 6 1895; ASM32PWR4-NEXT: add 3, 3, 7 1896; ASM32PWR4-NEXT: stw 11, -24(1) 1897; ASM32PWR4-NEXT: add 3, 3, 8 1898; ASM32PWR4-NEXT: add 3, 3, 9 1899; ASM32PWR4-NEXT: add 3, 3, 10 1900; ASM32PWR4-NEXT: lfs 0, 0(4) 1901; ASM32PWR4-NEXT: xoris 3, 3, 32768 1902; ASM32PWR4-NEXT: stw 3, -20(1) 1903; ASM32PWR4-NEXT: addi 3, 1, -12 1904; ASM32PWR4-NEXT: lfd 31, -24(1) 1905; ASM32PWR4-NEXT: fsub 0, 31, 0 1906; ASM32PWR4-NEXT: fadd 0, 0, 1 1907; ASM32PWR4-NEXT: lfd 1, 160(1) 1908; ASM32PWR4-NEXT: fadd 0, 0, 2 1909; ASM32PWR4-NEXT: fadd 0, 0, 3 1910; ASM32PWR4-NEXT: fadd 0, 0, 4 1911; ASM32PWR4-NEXT: fadd 0, 0, 5 1912; ASM32PWR4-NEXT: fadd 0, 0, 6 1913; ASM32PWR4-NEXT: fadd 0, 0, 7 1914; ASM32PWR4-NEXT: fadd 0, 0, 8 1915; ASM32PWR4-NEXT: fadd 0, 0, 9 1916; ASM32PWR4-NEXT: fadd 0, 0, 10 1917; ASM32PWR4-NEXT: fadd 0, 0, 11 1918; ASM32PWR4-NEXT: fadd 0, 0, 12 1919; ASM32PWR4-NEXT: fadd 0, 0, 13 1920; ASM32PWR4-NEXT: fadd 0, 0, 1 1921; ASM32PWR4-NEXT: fctiwz 0, 0 1922; ASM32PWR4-NEXT: stfiwx 0, 0, 3 1923; ASM32PWR4-NEXT: lwz 3, -12(1) 1924; ASM32PWR4-NEXT: lfd 31, -8(1) # 8-byte Folded Reload 1925; ASM32PWR4-NEXT: blr 1926; 1927; ASM64PWR4-LABEL: mix_floats: 1928; ASM64PWR4: # %bb.0: # %entry 1929; ASM64PWR4-NEXT: add 3, 3, 4 1930; ASM64PWR4-NEXT: add 3, 3, 5 1931; ASM64PWR4-NEXT: add 3, 3, 6 1932; ASM64PWR4-NEXT: add 3, 3, 7 1933; ASM64PWR4-NEXT: add 3, 3, 8 1934; ASM64PWR4-NEXT: add 3, 3, 9 1935; ASM64PWR4-NEXT: add 3, 3, 10 1936; ASM64PWR4-NEXT: extsw 3, 3 1937; ASM64PWR4-NEXT: std 3, -16(1) 1938; ASM64PWR4-NEXT: addi 3, 1, -4 1939; ASM64PWR4-NEXT: lfd 0, -16(1) 1940; ASM64PWR4-NEXT: fcfid 0, 0 1941; ASM64PWR4-NEXT: fadd 0, 0, 1 1942; ASM64PWR4-NEXT: lfd 1, 216(1) 1943; ASM64PWR4-NEXT: fadd 0, 0, 2 1944; ASM64PWR4-NEXT: fadd 0, 0, 3 1945; ASM64PWR4-NEXT: fadd 0, 0, 4 1946; ASM64PWR4-NEXT: fadd 0, 0, 5 1947; ASM64PWR4-NEXT: fadd 0, 0, 6 1948; ASM64PWR4-NEXT: fadd 0, 0, 7 1949; ASM64PWR4-NEXT: fadd 0, 0, 8 1950; ASM64PWR4-NEXT: fadd 0, 0, 9 1951; ASM64PWR4-NEXT: fadd 0, 0, 10 1952; ASM64PWR4-NEXT: fadd 0, 0, 11 1953; ASM64PWR4-NEXT: fadd 0, 0, 12 1954; ASM64PWR4-NEXT: fadd 0, 0, 13 1955; ASM64PWR4-NEXT: fadd 0, 0, 1 1956; ASM64PWR4-NEXT: fctiwz 0, 0 1957; ASM64PWR4-NEXT: stfiwx 0, 0, 3 1958; ASM64PWR4-NEXT: lwz 3, -4(1) 1959; ASM64PWR4-NEXT: blr 1960 entry: 1961 %add = add nsw i32 %i1, %i2 1962 %add1 = add nsw i32 %add, %i3 1963 %add2 = add nsw i32 %add1, %i4 1964 %add3 = add nsw i32 %add2, %i5 1965 %add4 = add nsw i32 %add3, %i6 1966 %add5 = add nsw i32 %add4, %i7 1967 %add6 = add nsw i32 %add5, %i8 1968 %conv = sitofp i32 %add6 to double 1969 %add7 = fadd double %conv, %d1 1970 %add8 = fadd double %add7, %d2 1971 %add9 = fadd double %add8, %d3 1972 %add10 = fadd double %add9, %d4 1973 %add11 = fadd double %add10, %d5 1974 %add12 = fadd double %add11, %d6 1975 %add13 = fadd double %add12, %d7 1976 %add14 = fadd double %add13, %d8 1977 %add15 = fadd double %add14, %d9 1978 %add16 = fadd double %add15, %d10 1979 %add17 = fadd double %add16, %d11 1980 %add18 = fadd double %add17, %d12 1981 %add19 = fadd double %add18, %d13 1982 %add20 = fadd double %add19, %d14 1983 %conv21 = fptosi double %add20 to i32 1984 ret i32 %conv21 1985 } 1986 1987 define void @mix_floats_caller() { 1988; ASM32PWR4-LABEL: mix_floats_caller: 1989; ASM32PWR4: # %bb.0: # %entry 1990; ASM32PWR4-NEXT: mflr 0 1991; ASM32PWR4-NEXT: stwu 1, -176(1) 1992; ASM32PWR4-NEXT: li 3, 0 1993; ASM32PWR4-NEXT: stw 0, 184(1) 1994; ASM32PWR4-NEXT: lis 4, 16352 1995; ASM32PWR4-NEXT: lis 5, 16339 1996; ASM32PWR4-NEXT: lis 6, 16364 1997; ASM32PWR4-NEXT: stw 3, 92(1) 1998; ASM32PWR4-NEXT: ori 5, 5, 13107 1999; ASM32PWR4-NEXT: ori 6, 6, 52428 2000; ASM32PWR4-NEXT: stw 3, 132(1) 2001; ASM32PWR4-NEXT: lis 3, 16368 2002; ASM32PWR4-NEXT: li 8, 6 2003; ASM32PWR4-NEXT: li 9, 7 2004; ASM32PWR4-NEXT: li 10, 8 2005; ASM32PWR4-NEXT: stw 3, 128(1) 2006; ASM32PWR4-NEXT: lis 3, -26215 2007; ASM32PWR4-NEXT: ori 3, 3, 39322 2008; ASM32PWR4-NEXT: stw 4, 88(1) 2009; ASM32PWR4-NEXT: lis 4, 16313 2010; ASM32PWR4-NEXT: ori 4, 4, 39321 2011; ASM32PWR4-NEXT: stw 3, 60(1) 2012; ASM32PWR4-NEXT: stw 3, 68(1) 2013; ASM32PWR4-NEXT: stw 3, 84(1) 2014; ASM32PWR4-NEXT: stw 3, 116(1) 2015; ASM32PWR4-NEXT: stw 3, 140(1) 2016; ASM32PWR4-NEXT: lis 3, 16369 2017; ASM32PWR4-NEXT: ori 3, 3, 39321 2018; ASM32PWR4-NEXT: stw 4, 56(1) 2019; ASM32PWR4-NEXT: lis 4, 16329 2020; ASM32PWR4-NEXT: ori 4, 4, 39321 2021; ASM32PWR4-NEXT: stw 3, 136(1) 2022; ASM32PWR4-NEXT: lis 3, 16371 2023; ASM32PWR4-NEXT: ori 3, 3, 13107 2024; ASM32PWR4-NEXT: stw 4, 64(1) 2025; ASM32PWR4-NEXT: lis 4, 13107 2026; ASM32PWR4-NEXT: ori 4, 4, 13107 2027; ASM32PWR4-NEXT: stw 3, 144(1) 2028; ASM32PWR4-NEXT: lis 3, 16372 2029; ASM32PWR4-NEXT: ori 3, 3, 52428 2030; ASM32PWR4-NEXT: stw 4, 76(1) 2031; ASM32PWR4-NEXT: stw 4, 100(1) 2032; ASM32PWR4-NEXT: stw 4, 148(1) 2033; ASM32PWR4-NEXT: lwz 4, L..C40(2) # %const.0 2034; ASM32PWR4-NEXT: stw 3, 152(1) 2035; ASM32PWR4-NEXT: lwz 3, L..C41(2) # %const.1 2036; ASM32PWR4-NEXT: lfd 1, 0(4) 2037; ASM32PWR4-NEXT: lwz 4, L..C42(2) # %const.2 2038; ASM32PWR4-NEXT: lfd 2, 0(3) 2039; ASM32PWR4-NEXT: lwz 3, L..C43(2) # %const.3 2040; ASM32PWR4-NEXT: stw 5, 72(1) 2041; ASM32PWR4-NEXT: lis 5, 16345 2042; ASM32PWR4-NEXT: ori 5, 5, 39321 2043; ASM32PWR4-NEXT: stw 5, 80(1) 2044; ASM32PWR4-NEXT: lis 5, 16355 2045; ASM32PWR4-NEXT: ori 5, 5, 13107 2046; ASM32PWR4-NEXT: lfd 3, 0(4) 2047; ASM32PWR4-NEXT: lwz 4, L..C44(2) # %const.4 2048; ASM32PWR4-NEXT: lfd 4, 0(3) 2049; ASM32PWR4-NEXT: lwz 3, L..C45(2) # %const.5 2050; ASM32PWR4-NEXT: stw 5, 96(1) 2051; ASM32PWR4-NEXT: lis 5, 26214 2052; ASM32PWR4-NEXT: ori 7, 5, 26214 2053; ASM32PWR4-NEXT: lis 5, 16358 2054; ASM32PWR4-NEXT: lfd 6, 0(4) 2055; ASM32PWR4-NEXT: lwz 4, L..C46(2) # %const.6 2056; ASM32PWR4-NEXT: ori 5, 5, 26214 2057; ASM32PWR4-NEXT: lfd 7, 0(3) 2058; ASM32PWR4-NEXT: lwz 3, L..C47(2) # %const.7 2059; ASM32PWR4-NEXT: stw 5, 104(1) 2060; ASM32PWR4-NEXT: lis 5, 16361 2061; ASM32PWR4-NEXT: ori 5, 5, 39321 2062; ASM32PWR4-NEXT: lfd 8, 0(4) 2063; ASM32PWR4-NEXT: lwz 4, L..C48(2) # %const.8 2064; ASM32PWR4-NEXT: lfd 9, 0(3) 2065; ASM32PWR4-NEXT: lwz 3, L..C49(2) # %const.9 2066; ASM32PWR4-NEXT: stw 5, 112(1) 2067; ASM32PWR4-NEXT: lis 5, -13108 2068; ASM32PWR4-NEXT: ori 5, 5, 52429 2069; ASM32PWR4-NEXT: stw 5, 124(1) 2070; ASM32PWR4-NEXT: stw 5, 156(1) 2071; ASM32PWR4-NEXT: lwz 5, L..C50(2) # %const.12 2072; ASM32PWR4-NEXT: lfd 11, 0(4) 2073; ASM32PWR4-NEXT: lwz 4, L..C51(2) # %const.10 2074; ASM32PWR4-NEXT: lfd 12, 0(3) 2075; ASM32PWR4-NEXT: lwz 3, L..C52(2) # %const.11 2076; ASM32PWR4-NEXT: lfd 13, 0(4) 2077; ASM32PWR4-NEXT: lis 4, 16374 2078; ASM32PWR4-NEXT: ori 11, 4, 26214 2079; ASM32PWR4-NEXT: li 4, 2 2080; ASM32PWR4-NEXT: lfs 5, 0(3) 2081; ASM32PWR4-NEXT: li 3, 1 2082; ASM32PWR4-NEXT: lfs 10, 0(5) 2083; ASM32PWR4-NEXT: li 5, 3 2084; ASM32PWR4-NEXT: stw 7, 108(1) 2085; ASM32PWR4-NEXT: stw 6, 120(1) 2086; ASM32PWR4-NEXT: li 6, 4 2087; ASM32PWR4-NEXT: stw 7, 164(1) 2088; ASM32PWR4-NEXT: li 7, 5 2089; ASM32PWR4-NEXT: stw 11, 160(1) 2090; ASM32PWR4-NEXT: bl .mix_floats 2091; ASM32PWR4-NEXT: nop 2092; ASM32PWR4-NEXT: addi 1, 1, 176 2093; ASM32PWR4-NEXT: lwz 0, 8(1) 2094; ASM32PWR4-NEXT: mtlr 0 2095; ASM32PWR4-NEXT: blr 2096; 2097; ASM64PWR4-LABEL: mix_floats_caller: 2098; ASM64PWR4: # %bb.0: # %entry 2099; ASM64PWR4-NEXT: mflr 0 2100; ASM64PWR4-NEXT: stdu 1, -240(1) 2101; ASM64PWR4-NEXT: li 3, 1023 2102; ASM64PWR4-NEXT: std 0, 256(1) 2103; ASM64PWR4-NEXT: ld 4, L..C37(2) # %const.0 2104; ASM64PWR4-NEXT: ld 8, L..C38(2) # %const.6 2105; ASM64PWR4-NEXT: lis 5, 16371 2106; ASM64PWR4-NEXT: ld 6, L..C39(2) # %const.3 2107; ASM64PWR4-NEXT: ld 9, L..C40(2) # %const.9 2108; ASM64PWR4-NEXT: ld 10, L..C41(2) # %const.11 2109; ASM64PWR4-NEXT: rldic 3, 3, 52, 2 2110; ASM64PWR4-NEXT: lis 11, 4091 2111; ASM64PWR4-NEXT: std 3, 184(1) 2112; ASM64PWR4-NEXT: li 3, 511 2113; ASM64PWR4-NEXT: lis 12, 16361 2114; ASM64PWR4-NEXT: rldic 3, 3, 53, 2 2115; ASM64PWR4-NEXT: lfd 1, 0(4) 2116; ASM64PWR4-NEXT: ld 4, L..C42(2) # %const.2 2117; ASM64PWR4-NEXT: lis 0, 16345 2118; ASM64PWR4-NEXT: std 3, 144(1) 2119; ASM64PWR4-NEXT: ld 3, L..C43(2) # %const.1 2120; ASM64PWR4-NEXT: lfd 2, 0(3) 2121; ASM64PWR4-NEXT: lis 3, 16374 2122; ASM64PWR4-NEXT: ori 7, 3, 26214 2123; ASM64PWR4-NEXT: ori 3, 5, 13107 2124; ASM64PWR4-NEXT: ld 5, L..C44(2) # %const.5 2125; ASM64PWR4-NEXT: lfd 8, 0(8) 2126; ASM64PWR4-NEXT: ld 8, L..C45(2) # %const.8 2127; ASM64PWR4-NEXT: rldimi 7, 7, 32, 0 2128; ASM64PWR4-NEXT: rlwimi 7, 7, 16, 0, 15 2129; ASM64PWR4-NEXT: rldimi 3, 3, 32, 0 2130; ASM64PWR4-NEXT: lfd 3, 0(4) 2131; ASM64PWR4-NEXT: ld 4, L..C46(2) # %const.4 2132; ASM64PWR4-NEXT: rlwimi 3, 3, 16, 0, 15 2133; ASM64PWR4-NEXT: lfd 4, 0(6) 2134; ASM64PWR4-NEXT: lis 6, 16355 2135; ASM64PWR4-NEXT: lfd 7, 0(5) 2136; ASM64PWR4-NEXT: ori 5, 6, 13107 2137; ASM64PWR4-NEXT: ld 6, L..C47(2) # %const.7 2138; ASM64PWR4-NEXT: rldimi 5, 5, 32, 0 2139; ASM64PWR4-NEXT: rlwimi 5, 5, 16, 0, 15 2140; ASM64PWR4-NEXT: lfd 11, 0(8) 2141; ASM64PWR4-NEXT: ld 8, L..C48(2) # %const.10 2142; ASM64PWR4-NEXT: lfd 6, 0(4) 2143; ASM64PWR4-NEXT: lis 4, 16358 2144; ASM64PWR4-NEXT: ori 4, 4, 26214 2145; ASM64PWR4-NEXT: rldimi 4, 4, 32, 0 2146; ASM64PWR4-NEXT: lfd 9, 0(6) 2147; ASM64PWR4-NEXT: lis 6, 16339 2148; ASM64PWR4-NEXT: rlwimi 4, 4, 16, 0, 15 2149; ASM64PWR4-NEXT: ori 6, 6, 13107 2150; ASM64PWR4-NEXT: lfd 12, 0(9) 2151; ASM64PWR4-NEXT: lis 9, 4093 2152; ASM64PWR4-NEXT: ori 9, 9, 13107 2153; ASM64PWR4-NEXT: lfd 13, 0(8) 2154; ASM64PWR4-NEXT: lis 8, 16369 2155; ASM64PWR4-NEXT: ori 8, 8, 39321 2156; ASM64PWR4-NEXT: rldimi 6, 6, 32, 0 2157; ASM64PWR4-NEXT: std 31, 232(1) # 8-byte Folded Spill 2158; ASM64PWR4-NEXT: ld 31, L..C49(2) # %const.12 2159; ASM64PWR4-NEXT: rldic 9, 9, 34, 2 2160; ASM64PWR4-NEXT: rlwimi 6, 6, 16, 0, 15 2161; ASM64PWR4-NEXT: oris 9, 9, 52428 2162; ASM64PWR4-NEXT: lfs 5, 0(10) 2163; ASM64PWR4-NEXT: lis 10, 16329 2164; ASM64PWR4-NEXT: ori 10, 10, 39321 2165; ASM64PWR4-NEXT: std 7, 216(1) 2166; ASM64PWR4-NEXT: ori 7, 11, 13107 2167; ASM64PWR4-NEXT: ori 11, 12, 39321 2168; ASM64PWR4-NEXT: ori 12, 0, 39321 2169; ASM64PWR4-NEXT: std 4, 160(1) 2170; ASM64PWR4-NEXT: rldic 4, 8, 32, 2 2171; ASM64PWR4-NEXT: rldic 7, 7, 34, 2 2172; ASM64PWR4-NEXT: oris 4, 4, 39321 2173; ASM64PWR4-NEXT: std 30, 224(1) # 8-byte Folded Spill 2174; ASM64PWR4-NEXT: lis 30, 16313 2175; ASM64PWR4-NEXT: rldic 8, 11, 32, 2 2176; ASM64PWR4-NEXT: rldic 11, 12, 32, 2 2177; ASM64PWR4-NEXT: std 3, 200(1) 2178; ASM64PWR4-NEXT: ori 3, 30, 39321 2179; ASM64PWR4-NEXT: ori 4, 4, 39322 2180; ASM64PWR4-NEXT: rldic 3, 3, 32, 2 2181; ASM64PWR4-NEXT: std 5, 152(1) 2182; ASM64PWR4-NEXT: rldic 5, 10, 32, 2 2183; ASM64PWR4-NEXT: oris 5, 5, 39321 2184; ASM64PWR4-NEXT: oris 3, 3, 39321 2185; ASM64PWR4-NEXT: std 6, 128(1) 2186; ASM64PWR4-NEXT: oris 6, 7, 52428 2187; ASM64PWR4-NEXT: ori 7, 9, 52429 2188; ASM64PWR4-NEXT: li 9, 7 2189; ASM64PWR4-NEXT: lfs 10, 0(31) 2190; ASM64PWR4-NEXT: li 10, 8 2191; ASM64PWR4-NEXT: std 7, 208(1) 2192; ASM64PWR4-NEXT: oris 7, 8, 39321 2193; ASM64PWR4-NEXT: oris 8, 11, 39321 2194; ASM64PWR4-NEXT: ori 11, 3, 39322 2195; ASM64PWR4-NEXT: li 3, 1 2196; ASM64PWR4-NEXT: std 4, 192(1) 2197; ASM64PWR4-NEXT: ori 4, 6, 52429 2198; ASM64PWR4-NEXT: ori 6, 8, 39322 2199; ASM64PWR4-NEXT: std 4, 176(1) 2200; ASM64PWR4-NEXT: ori 4, 7, 39322 2201; ASM64PWR4-NEXT: ori 7, 5, 39322 2202; ASM64PWR4-NEXT: li 5, 3 2203; ASM64PWR4-NEXT: li 8, 6 2204; ASM64PWR4-NEXT: std 4, 168(1) 2205; ASM64PWR4-NEXT: li 4, 2 2206; ASM64PWR4-NEXT: std 6, 136(1) 2207; ASM64PWR4-NEXT: li 6, 4 2208; ASM64PWR4-NEXT: std 7, 120(1) 2209; ASM64PWR4-NEXT: li 7, 5 2210; ASM64PWR4-NEXT: std 11, 112(1) 2211; ASM64PWR4-NEXT: bl .mix_floats 2212; ASM64PWR4-NEXT: nop 2213; ASM64PWR4-NEXT: ld 31, 232(1) # 8-byte Folded Reload 2214; ASM64PWR4-NEXT: ld 30, 224(1) # 8-byte Folded Reload 2215; ASM64PWR4-NEXT: addi 1, 1, 240 2216; ASM64PWR4-NEXT: ld 0, 16(1) 2217; ASM64PWR4-NEXT: mtlr 0 2218; ASM64PWR4-NEXT: blr 2219 entry: 2220 %call = call i32 @mix_floats(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, double 1.000000e-01, double 2.000000e-01, double 3.000000e-01, double 4.000000e-01, double 5.000000e-01, double 6.000000e-01, double 0x3FE6666666666666, double 8.000000e-01, double 9.000000e-01, double 1.000000e+00, double 1.100000e+00, double 1.200000e+00, double 1.300000e+00, double 1.400000e+00) 2221 ret void 2222 } 2223 2224