1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ 3; RUN: | FileCheck -check-prefix=RV64I %s 4; RUN: llc -mtriple=riscv64 -mattr=+f -target-abi lp64f \ 5; RUN: -verify-machineinstrs < %s \ 6; RUN: | FileCheck -check-prefix=RV64I %s 7; RUN: llc -mtriple=riscv64 -mattr=+d -target-abi lp64d \ 8; RUN: -verify-machineinstrs < %s \ 9; RUN: | FileCheck -check-prefix=RV64I %s 10 11; This file contains tests that should have identical output for the lp64, 12; lp64f, and lp64d ABIs. i.e. where no arguments are passed according to 13; the floating point ABI. It doesn't check codegen when frame pointer 14; elimination is disabled, as there is sufficient coverage for this case in 15; other files. 16 17; Check that on RV64, i128 is passed in a pair of registers. Unlike 18; the convention for varargs, this need not be an aligned pair. 19 20define i64 @callee_i128_in_regs(i64 %a, i128 %b) nounwind { 21; RV64I-LABEL: callee_i128_in_regs: 22; RV64I: # %bb.0: 23; RV64I-NEXT: add a0, a0, a1 24; RV64I-NEXT: ret 25 %b_trunc = trunc i128 %b to i64 26 %1 = add i64 %a, %b_trunc 27 ret i64 %1 28} 29 30define i64 @caller_i128_in_regs() nounwind { 31; RV64I-LABEL: caller_i128_in_regs: 32; RV64I: # %bb.0: 33; RV64I-NEXT: addi sp, sp, -16 34; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 35; RV64I-NEXT: li a0, 1 36; RV64I-NEXT: li a1, 2 37; RV64I-NEXT: li a2, 0 38; RV64I-NEXT: call callee_i128_in_regs 39; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 40; RV64I-NEXT: addi sp, sp, 16 41; RV64I-NEXT: ret 42 %1 = call i64 @callee_i128_in_regs(i64 1, i128 2) 43 ret i64 %1 44} 45 46; Check that the stack is used once the GPRs are exhausted 47 48define i32 @callee_many_scalars(i8 %a, i16 %b, i32 %c, i128 %d, i32 %e, i32 %f, i128 %g, i32 %h) nounwind { 49; RV64I-LABEL: callee_many_scalars: 50; RV64I: # %bb.0: 51; RV64I-NEXT: lw t0, 8(sp) 52; RV64I-NEXT: ld t1, 0(sp) 53; RV64I-NEXT: andi a0, a0, 255 54; RV64I-NEXT: slli a1, a1, 48 55; RV64I-NEXT: xor a3, a3, a7 56; RV64I-NEXT: srli a1, a1, 48 57; RV64I-NEXT: add a0, a0, a2 58; RV64I-NEXT: add a0, a0, a1 59; RV64I-NEXT: add a0, a0, a5 60; RV64I-NEXT: xor a1, a4, t1 61; RV64I-NEXT: add a0, a0, a6 62; RV64I-NEXT: or a1, a3, a1 63; RV64I-NEXT: seqz a1, a1 64; RV64I-NEXT: add a0, a0, t0 65; RV64I-NEXT: addw a0, a1, a0 66; RV64I-NEXT: ret 67 %a_ext = zext i8 %a to i32 68 %b_ext = zext i16 %b to i32 69 %1 = add i32 %a_ext, %b_ext 70 %2 = add i32 %1, %c 71 %3 = icmp eq i128 %d, %g 72 %4 = zext i1 %3 to i32 73 %5 = add i32 %4, %2 74 %6 = add i32 %5, %e 75 %7 = add i32 %6, %f 76 %8 = add i32 %7, %h 77 ret i32 %8 78} 79 80define i32 @caller_many_scalars() nounwind { 81; RV64I-LABEL: caller_many_scalars: 82; RV64I: # %bb.0: 83; RV64I-NEXT: addi sp, sp, -32 84; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill 85; RV64I-NEXT: li a4, 8 86; RV64I-NEXT: li a0, 1 87; RV64I-NEXT: li a1, 2 88; RV64I-NEXT: li a2, 3 89; RV64I-NEXT: li a3, 4 90; RV64I-NEXT: li a5, 5 91; RV64I-NEXT: li a6, 6 92; RV64I-NEXT: li a7, 7 93; RV64I-NEXT: sd zero, 0(sp) 94; RV64I-NEXT: sd a4, 8(sp) 95; RV64I-NEXT: li a4, 0 96; RV64I-NEXT: call callee_many_scalars 97; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload 98; RV64I-NEXT: addi sp, sp, 32 99; RV64I-NEXT: ret 100 %1 = call i32 @callee_many_scalars(i8 1, i16 2, i32 3, i128 4, i32 5, i32 6, i128 7, i32 8) 101 ret i32 %1 102} 103 104; Check that i256 is passed indirectly. 105 106define i64 @callee_large_scalars(i256 %a, i256 %b) nounwind { 107; RV64I-LABEL: callee_large_scalars: 108; RV64I: # %bb.0: 109; RV64I-NEXT: ld a2, 0(a1) 110; RV64I-NEXT: ld a3, 8(a1) 111; RV64I-NEXT: ld a4, 16(a1) 112; RV64I-NEXT: ld a1, 24(a1) 113; RV64I-NEXT: ld a5, 24(a0) 114; RV64I-NEXT: ld a6, 8(a0) 115; RV64I-NEXT: ld a7, 16(a0) 116; RV64I-NEXT: ld a0, 0(a0) 117; RV64I-NEXT: xor a1, a5, a1 118; RV64I-NEXT: xor a3, a6, a3 119; RV64I-NEXT: xor a4, a7, a4 120; RV64I-NEXT: xor a0, a0, a2 121; RV64I-NEXT: or a1, a3, a1 122; RV64I-NEXT: or a0, a0, a4 123; RV64I-NEXT: or a0, a0, a1 124; RV64I-NEXT: seqz a0, a0 125; RV64I-NEXT: ret 126 %1 = icmp eq i256 %a, %b 127 %2 = zext i1 %1 to i64 128 ret i64 %2 129} 130 131define i64 @caller_large_scalars() nounwind { 132; RV64I-LABEL: caller_large_scalars: 133; RV64I: # %bb.0: 134; RV64I-NEXT: addi sp, sp, -80 135; RV64I-NEXT: sd ra, 72(sp) # 8-byte Folded Spill 136; RV64I-NEXT: li a2, 2 137; RV64I-NEXT: li a3, 1 138; RV64I-NEXT: addi a0, sp, 32 139; RV64I-NEXT: mv a1, sp 140; RV64I-NEXT: sd a2, 0(sp) 141; RV64I-NEXT: sd zero, 8(sp) 142; RV64I-NEXT: sd zero, 16(sp) 143; RV64I-NEXT: sd zero, 24(sp) 144; RV64I-NEXT: sd a3, 32(sp) 145; RV64I-NEXT: sd zero, 40(sp) 146; RV64I-NEXT: sd zero, 48(sp) 147; RV64I-NEXT: sd zero, 56(sp) 148; RV64I-NEXT: call callee_large_scalars 149; RV64I-NEXT: ld ra, 72(sp) # 8-byte Folded Reload 150; RV64I-NEXT: addi sp, sp, 80 151; RV64I-NEXT: ret 152 %1 = call i64 @callee_large_scalars(i256 1, i256 2) 153 ret i64 %1 154} 155 156; Check that arguments larger than 2*xlen are handled correctly when their 157; address is passed on the stack rather than in memory 158 159; Must keep define on a single line due to an update_llc_test_checks.py limitation 160define i64 @callee_large_scalars_exhausted_regs(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f, i64 %g, i256 %h, i64 %i, i256 %j) nounwind { 161; RV64I-LABEL: callee_large_scalars_exhausted_regs: 162; RV64I: # %bb.0: 163; RV64I-NEXT: ld a0, 8(sp) 164; RV64I-NEXT: ld a1, 0(a7) 165; RV64I-NEXT: ld a2, 8(a7) 166; RV64I-NEXT: ld a3, 16(a7) 167; RV64I-NEXT: ld a4, 24(a7) 168; RV64I-NEXT: ld a5, 24(a0) 169; RV64I-NEXT: ld a6, 8(a0) 170; RV64I-NEXT: ld a7, 16(a0) 171; RV64I-NEXT: ld a0, 0(a0) 172; RV64I-NEXT: xor a4, a4, a5 173; RV64I-NEXT: xor a2, a2, a6 174; RV64I-NEXT: xor a3, a3, a7 175; RV64I-NEXT: xor a0, a1, a0 176; RV64I-NEXT: or a2, a2, a4 177; RV64I-NEXT: or a0, a0, a3 178; RV64I-NEXT: or a0, a0, a2 179; RV64I-NEXT: seqz a0, a0 180; RV64I-NEXT: ret 181 %1 = icmp eq i256 %h, %j 182 %2 = zext i1 %1 to i64 183 ret i64 %2 184} 185 186define i64 @caller_large_scalars_exhausted_regs() nounwind { 187; RV64I-LABEL: caller_large_scalars_exhausted_regs: 188; RV64I: # %bb.0: 189; RV64I-NEXT: addi sp, sp, -96 190; RV64I-NEXT: sd ra, 88(sp) # 8-byte Folded Spill 191; RV64I-NEXT: addi a7, sp, 16 192; RV64I-NEXT: li t0, 9 193; RV64I-NEXT: li t1, 10 194; RV64I-NEXT: li t2, 8 195; RV64I-NEXT: li a0, 1 196; RV64I-NEXT: li a1, 2 197; RV64I-NEXT: li a2, 3 198; RV64I-NEXT: li a3, 4 199; RV64I-NEXT: li a4, 5 200; RV64I-NEXT: li a5, 6 201; RV64I-NEXT: li a6, 7 202; RV64I-NEXT: sd t0, 0(sp) 203; RV64I-NEXT: sd a7, 8(sp) 204; RV64I-NEXT: addi a7, sp, 48 205; RV64I-NEXT: sd t1, 16(sp) 206; RV64I-NEXT: sd zero, 24(sp) 207; RV64I-NEXT: sd zero, 32(sp) 208; RV64I-NEXT: sd zero, 40(sp) 209; RV64I-NEXT: sd t2, 48(sp) 210; RV64I-NEXT: sd zero, 56(sp) 211; RV64I-NEXT: sd zero, 64(sp) 212; RV64I-NEXT: sd zero, 72(sp) 213; RV64I-NEXT: call callee_large_scalars_exhausted_regs 214; RV64I-NEXT: ld ra, 88(sp) # 8-byte Folded Reload 215; RV64I-NEXT: addi sp, sp, 96 216; RV64I-NEXT: ret 217 %1 = call i64 @callee_large_scalars_exhausted_regs( 218 i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i256 8, i64 9, 219 i256 10) 220 ret i64 %1 221} 222 223; Ensure that libcalls generated in the middle-end obey the calling convention 224 225define i64 @caller_mixed_scalar_libcalls(i64 %a) nounwind { 226; RV64I-LABEL: caller_mixed_scalar_libcalls: 227; RV64I: # %bb.0: 228; RV64I-NEXT: addi sp, sp, -16 229; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 230; RV64I-NEXT: call __floatditf 231; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 232; RV64I-NEXT: addi sp, sp, 16 233; RV64I-NEXT: ret 234 %1 = sitofp i64 %a to fp128 235 %2 = bitcast fp128 %1 to i128 236 %3 = trunc i128 %2 to i64 237 ret i64 %3 238} 239 240; Check passing of coerced integer arrays 241 242%struct.small = type { i64, ptr } 243 244define i64 @callee_small_coerced_struct([2 x i64] %a.coerce) nounwind { 245; RV64I-LABEL: callee_small_coerced_struct: 246; RV64I: # %bb.0: 247; RV64I-NEXT: xor a0, a0, a1 248; RV64I-NEXT: seqz a0, a0 249; RV64I-NEXT: ret 250 %1 = extractvalue [2 x i64] %a.coerce, 0 251 %2 = extractvalue [2 x i64] %a.coerce, 1 252 %3 = icmp eq i64 %1, %2 253 %4 = zext i1 %3 to i64 254 ret i64 %4 255} 256 257define i64 @caller_small_coerced_struct() nounwind { 258; RV64I-LABEL: caller_small_coerced_struct: 259; RV64I: # %bb.0: 260; RV64I-NEXT: addi sp, sp, -16 261; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 262; RV64I-NEXT: li a0, 1 263; RV64I-NEXT: li a1, 2 264; RV64I-NEXT: call callee_small_coerced_struct 265; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 266; RV64I-NEXT: addi sp, sp, 16 267; RV64I-NEXT: ret 268 %1 = call i64 @callee_small_coerced_struct([2 x i64] [i64 1, i64 2]) 269 ret i64 %1 270} 271 272; Check large struct arguments, which are passed byval 273 274%struct.large = type { i64, i64, i64, i64 } 275 276define i64 @callee_large_struct(ptr byval(%struct.large) align 8 %a) nounwind { 277; RV64I-LABEL: callee_large_struct: 278; RV64I: # %bb.0: 279; RV64I-NEXT: ld a1, 0(a0) 280; RV64I-NEXT: ld a0, 24(a0) 281; RV64I-NEXT: add a0, a1, a0 282; RV64I-NEXT: ret 283 %1 = getelementptr inbounds %struct.large, ptr %a, i64 0, i32 3 284 %2 = load i64, ptr %a 285 %3 = load i64, ptr %1 286 %4 = add i64 %2, %3 287 ret i64 %4 288} 289 290define i64 @caller_large_struct() nounwind { 291; RV64I-LABEL: caller_large_struct: 292; RV64I: # %bb.0: 293; RV64I-NEXT: addi sp, sp, -80 294; RV64I-NEXT: sd ra, 72(sp) # 8-byte Folded Spill 295; RV64I-NEXT: li a0, 1 296; RV64I-NEXT: li a1, 2 297; RV64I-NEXT: li a2, 3 298; RV64I-NEXT: li a3, 4 299; RV64I-NEXT: sd a0, 40(sp) 300; RV64I-NEXT: sd a1, 48(sp) 301; RV64I-NEXT: sd a2, 56(sp) 302; RV64I-NEXT: sd a3, 64(sp) 303; RV64I-NEXT: sd a0, 8(sp) 304; RV64I-NEXT: sd a1, 16(sp) 305; RV64I-NEXT: sd a2, 24(sp) 306; RV64I-NEXT: sd a3, 32(sp) 307; RV64I-NEXT: addi a0, sp, 8 308; RV64I-NEXT: call callee_large_struct 309; RV64I-NEXT: ld ra, 72(sp) # 8-byte Folded Reload 310; RV64I-NEXT: addi sp, sp, 80 311; RV64I-NEXT: ret 312 %ls = alloca %struct.large, align 8 313 store i64 1, ptr %ls 314 %b = getelementptr inbounds %struct.large, ptr %ls, i64 0, i32 1 315 store i64 2, ptr %b 316 %c = getelementptr inbounds %struct.large, ptr %ls, i64 0, i32 2 317 store i64 3, ptr %c 318 %d = getelementptr inbounds %struct.large, ptr %ls, i64 0, i32 3 319 store i64 4, ptr %d 320 %1 = call i64 @callee_large_struct(ptr byval(%struct.large) align 8 %ls) 321 ret i64 %1 322} 323 324; Check 2x*xlen values are aligned appropriately when passed on the stack 325; Must keep define on a single line due to an update_llc_test_checks.py limitation 326define i64 @callee_aligned_stack(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i128 %f, i64 %g, i64 %h, i128 %i, i64 %j, [2 x i64] %k) nounwind { 327; The i128 should be 16-byte aligned on the stack, but the two-element array 328; should only be 8-byte aligned 329; RV64I-LABEL: callee_aligned_stack: 330; RV64I: # %bb.0: 331; RV64I-NEXT: ld a0, 32(sp) 332; RV64I-NEXT: ld a1, 0(sp) 333; RV64I-NEXT: ld a2, 16(sp) 334; RV64I-NEXT: ld a3, 40(sp) 335; RV64I-NEXT: add a5, a5, a7 336; RV64I-NEXT: add a1, a5, a1 337; RV64I-NEXT: add a0, a2, a0 338; RV64I-NEXT: add a0, a1, a0 339; RV64I-NEXT: add a0, a0, a3 340; RV64I-NEXT: ret 341 %f_trunc = trunc i128 %f to i64 342 %1 = add i64 %f_trunc, %g 343 %2 = add i64 %1, %h 344 %3 = trunc i128 %i to i64 345 %4 = add i64 %2, %3 346 %5 = add i64 %4, %j 347 %6 = extractvalue [2 x i64] %k, 0 348 %7 = add i64 %5, %6 349 ret i64 %7 350} 351 352define void @caller_aligned_stack() nounwind { 353; The i128 should be 16-byte aligned on the stack, but the two-element array 354; should only be 8-byte aligned 355; RV64I-LABEL: caller_aligned_stack: 356; RV64I: # %bb.0: 357; RV64I-NEXT: addi sp, sp, -64 358; RV64I-NEXT: sd ra, 56(sp) # 8-byte Folded Spill 359; RV64I-NEXT: li a6, 12 360; RV64I-NEXT: li a7, 11 361; RV64I-NEXT: li t0, 10 362; RV64I-NEXT: li t1, 9 363; RV64I-NEXT: li t2, 8 364; RV64I-NEXT: li a0, 1 365; RV64I-NEXT: li a1, 2 366; RV64I-NEXT: li a2, 3 367; RV64I-NEXT: li a3, 4 368; RV64I-NEXT: li a4, 5 369; RV64I-NEXT: li a5, 6 370; RV64I-NEXT: sd a7, 40(sp) 371; RV64I-NEXT: sd a6, 48(sp) 372; RV64I-NEXT: li a7, 7 373; RV64I-NEXT: sd t2, 0(sp) 374; RV64I-NEXT: sd t1, 16(sp) 375; RV64I-NEXT: sd zero, 24(sp) 376; RV64I-NEXT: sd t0, 32(sp) 377; RV64I-NEXT: li a6, 0 378; RV64I-NEXT: call callee_aligned_stack 379; RV64I-NEXT: ld ra, 56(sp) # 8-byte Folded Reload 380; RV64I-NEXT: addi sp, sp, 64 381; RV64I-NEXT: ret 382 %1 = call i64 @callee_aligned_stack(i64 1, i64 2, i64 3, i64 4, i64 5, 383 i128 6, i64 7, i64 8, i128 9, i64 10, [2 x i64] [i64 11, i64 12]) 384 ret void 385} 386 387; Check return of 2x xlen scalars 388 389define i128 @callee_small_scalar_ret() nounwind { 390; RV64I-LABEL: callee_small_scalar_ret: 391; RV64I: # %bb.0: 392; RV64I-NEXT: li a0, -1 393; RV64I-NEXT: li a1, -1 394; RV64I-NEXT: ret 395 ret i128 -1 396} 397 398define i64 @caller_small_scalar_ret() nounwind { 399; RV64I-LABEL: caller_small_scalar_ret: 400; RV64I: # %bb.0: 401; RV64I-NEXT: addi sp, sp, -16 402; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 403; RV64I-NEXT: call callee_small_scalar_ret 404; RV64I-NEXT: not a1, a1 405; RV64I-NEXT: xori a0, a0, -2 406; RV64I-NEXT: or a0, a0, a1 407; RV64I-NEXT: seqz a0, a0 408; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 409; RV64I-NEXT: addi sp, sp, 16 410; RV64I-NEXT: ret 411 %1 = call i128 @callee_small_scalar_ret() 412 %2 = icmp eq i128 -2, %1 413 %3 = zext i1 %2 to i64 414 ret i64 %3 415} 416 417; Check return of 2x xlen structs 418 419define %struct.small @callee_small_struct_ret() nounwind { 420; RV64I-LABEL: callee_small_struct_ret: 421; RV64I: # %bb.0: 422; RV64I-NEXT: li a0, 1 423; RV64I-NEXT: li a1, 0 424; RV64I-NEXT: ret 425 ret %struct.small { i64 1, ptr null } 426} 427 428define i64 @caller_small_struct_ret() nounwind { 429; RV64I-LABEL: caller_small_struct_ret: 430; RV64I: # %bb.0: 431; RV64I-NEXT: addi sp, sp, -16 432; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 433; RV64I-NEXT: call callee_small_struct_ret 434; RV64I-NEXT: add a0, a0, a1 435; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 436; RV64I-NEXT: addi sp, sp, 16 437; RV64I-NEXT: ret 438 %1 = call %struct.small @callee_small_struct_ret() 439 %2 = extractvalue %struct.small %1, 0 440 %3 = extractvalue %struct.small %1, 1 441 %4 = ptrtoint ptr %3 to i64 442 %5 = add i64 %2, %4 443 ret i64 %5 444} 445 446; Check return of >2x xlen scalars 447 448define i256 @callee_large_scalar_ret() nounwind { 449; RV64I-LABEL: callee_large_scalar_ret: 450; RV64I: # %bb.0: 451; RV64I-NEXT: li a1, -1 452; RV64I-NEXT: lui a2, 1018435 453; RV64I-NEXT: addiw a2, a2, 747 454; RV64I-NEXT: sd a2, 0(a0) 455; RV64I-NEXT: sd a1, 8(a0) 456; RV64I-NEXT: sd a1, 16(a0) 457; RV64I-NEXT: sd a1, 24(a0) 458; RV64I-NEXT: ret 459 ret i256 -123456789 460} 461 462define void @caller_large_scalar_ret() nounwind { 463; RV64I-LABEL: caller_large_scalar_ret: 464; RV64I: # %bb.0: 465; RV64I-NEXT: addi sp, sp, -48 466; RV64I-NEXT: sd ra, 40(sp) # 8-byte Folded Spill 467; RV64I-NEXT: mv a0, sp 468; RV64I-NEXT: call callee_large_scalar_ret 469; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload 470; RV64I-NEXT: addi sp, sp, 48 471; RV64I-NEXT: ret 472 %1 = call i256 @callee_large_scalar_ret() 473 ret void 474} 475 476; Check return of >2x xlen structs 477 478define void @callee_large_struct_ret(ptr noalias sret(%struct.large) %agg.result) nounwind { 479; RV64I-LABEL: callee_large_struct_ret: 480; RV64I: # %bb.0: 481; RV64I-NEXT: li a1, 1 482; RV64I-NEXT: li a2, 2 483; RV64I-NEXT: li a3, 3 484; RV64I-NEXT: li a4, 4 485; RV64I-NEXT: sw a1, 0(a0) 486; RV64I-NEXT: sw zero, 4(a0) 487; RV64I-NEXT: sw a2, 8(a0) 488; RV64I-NEXT: sw zero, 12(a0) 489; RV64I-NEXT: sw a3, 16(a0) 490; RV64I-NEXT: sw zero, 20(a0) 491; RV64I-NEXT: sw a4, 24(a0) 492; RV64I-NEXT: sw zero, 28(a0) 493; RV64I-NEXT: ret 494 store i64 1, ptr %agg.result, align 4 495 %b = getelementptr inbounds %struct.large, ptr %agg.result, i64 0, i32 1 496 store i64 2, ptr %b, align 4 497 %c = getelementptr inbounds %struct.large, ptr %agg.result, i64 0, i32 2 498 store i64 3, ptr %c, align 4 499 %d = getelementptr inbounds %struct.large, ptr %agg.result, i64 0, i32 3 500 store i64 4, ptr %d, align 4 501 ret void 502} 503 504define i64 @caller_large_struct_ret() nounwind { 505; RV64I-LABEL: caller_large_struct_ret: 506; RV64I: # %bb.0: 507; RV64I-NEXT: addi sp, sp, -48 508; RV64I-NEXT: sd ra, 40(sp) # 8-byte Folded Spill 509; RV64I-NEXT: addi a0, sp, 8 510; RV64I-NEXT: call callee_large_struct_ret 511; RV64I-NEXT: ld a0, 8(sp) 512; RV64I-NEXT: ld a1, 32(sp) 513; RV64I-NEXT: add a0, a0, a1 514; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload 515; RV64I-NEXT: addi sp, sp, 48 516; RV64I-NEXT: ret 517 %1 = alloca %struct.large 518 call void @callee_large_struct_ret(ptr sret(%struct.large) %1) 519 %2 = load i64, ptr %1 520 %3 = getelementptr inbounds %struct.large, ptr %1, i64 0, i32 3 521 %4 = load i64, ptr %3 522 %5 = add i64 %2, %4 523 ret i64 %5 524} 525