1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ 3; RUN: | FileCheck -check-prefix=RV32I %s 4; RUN: llc -mtriple=riscv32 -mattr=+m -verify-machineinstrs < %s \ 5; RUN: | FileCheck -check-prefix=RV32IM %s 6; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ 7; RUN: | FileCheck -check-prefix=RV64I %s 8; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s \ 9; RUN: | FileCheck -check-prefix=RV64IM %s 10 11define signext i32 @square(i32 %a) nounwind { 12; RV32I-LABEL: square: 13; RV32I: # %bb.0: 14; RV32I-NEXT: addi sp, sp, -16 15; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 16; RV32I-NEXT: mv a1, a0 17; RV32I-NEXT: call __mulsi3 18; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 19; RV32I-NEXT: addi sp, sp, 16 20; RV32I-NEXT: ret 21; 22; RV32IM-LABEL: square: 23; RV32IM: # %bb.0: 24; RV32IM-NEXT: mul a0, a0, a0 25; RV32IM-NEXT: ret 26; 27; RV64I-LABEL: square: 28; RV64I: # %bb.0: 29; RV64I-NEXT: addi sp, sp, -16 30; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 31; RV64I-NEXT: mv a1, a0 32; RV64I-NEXT: call __muldi3 33; RV64I-NEXT: sext.w a0, a0 34; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 35; RV64I-NEXT: addi sp, sp, 16 36; RV64I-NEXT: ret 37; 38; RV64IM-LABEL: square: 39; RV64IM: # %bb.0: 40; RV64IM-NEXT: mulw a0, a0, a0 41; RV64IM-NEXT: ret 42 %1 = mul i32 %a, %a 43 ret i32 %1 44} 45 46define signext i32 @mul(i32 %a, i32 %b) nounwind { 47; RV32I-LABEL: mul: 48; RV32I: # %bb.0: 49; RV32I-NEXT: addi sp, sp, -16 50; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 51; RV32I-NEXT: call __mulsi3 52; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 53; RV32I-NEXT: addi sp, sp, 16 54; RV32I-NEXT: ret 55; 56; RV32IM-LABEL: mul: 57; RV32IM: # %bb.0: 58; RV32IM-NEXT: mul a0, a0, a1 59; RV32IM-NEXT: ret 60; 61; RV64I-LABEL: mul: 62; RV64I: # %bb.0: 63; RV64I-NEXT: addi sp, sp, -16 64; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 65; RV64I-NEXT: call __muldi3 66; RV64I-NEXT: sext.w a0, a0 67; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 68; RV64I-NEXT: addi sp, sp, 16 69; RV64I-NEXT: ret 70; 71; RV64IM-LABEL: mul: 72; RV64IM: # %bb.0: 73; RV64IM-NEXT: mulw a0, a0, a1 74; RV64IM-NEXT: ret 75 %1 = mul i32 %a, %b 76 ret i32 %1 77} 78 79define signext i32 @mul_constant(i32 %a) nounwind { 80; RV32I-LABEL: mul_constant: 81; RV32I: # %bb.0: 82; RV32I-NEXT: slli a1, a0, 2 83; RV32I-NEXT: add a0, a1, a0 84; RV32I-NEXT: ret 85; 86; RV32IM-LABEL: mul_constant: 87; RV32IM: # %bb.0: 88; RV32IM-NEXT: slli a1, a0, 2 89; RV32IM-NEXT: add a0, a1, a0 90; RV32IM-NEXT: ret 91; 92; RV64I-LABEL: mul_constant: 93; RV64I: # %bb.0: 94; RV64I-NEXT: slli a1, a0, 2 95; RV64I-NEXT: addw a0, a1, a0 96; RV64I-NEXT: ret 97; 98; RV64IM-LABEL: mul_constant: 99; RV64IM: # %bb.0: 100; RV64IM-NEXT: slli a1, a0, 2 101; RV64IM-NEXT: addw a0, a1, a0 102; RV64IM-NEXT: ret 103 %1 = mul i32 %a, 5 104 ret i32 %1 105} 106 107define i32 @mul_pow2(i32 %a) nounwind { 108; RV32I-LABEL: mul_pow2: 109; RV32I: # %bb.0: 110; RV32I-NEXT: slli a0, a0, 3 111; RV32I-NEXT: ret 112; 113; RV32IM-LABEL: mul_pow2: 114; RV32IM: # %bb.0: 115; RV32IM-NEXT: slli a0, a0, 3 116; RV32IM-NEXT: ret 117; 118; RV64I-LABEL: mul_pow2: 119; RV64I: # %bb.0: 120; RV64I-NEXT: slliw a0, a0, 3 121; RV64I-NEXT: ret 122; 123; RV64IM-LABEL: mul_pow2: 124; RV64IM: # %bb.0: 125; RV64IM-NEXT: slliw a0, a0, 3 126; RV64IM-NEXT: ret 127 %1 = mul i32 %a, 8 128 ret i32 %1 129} 130 131define i64 @mul64(i64 %a, i64 %b) nounwind { 132; RV32I-LABEL: mul64: 133; RV32I: # %bb.0: 134; RV32I-NEXT: addi sp, sp, -16 135; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 136; RV32I-NEXT: call __muldi3 137; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 138; RV32I-NEXT: addi sp, sp, 16 139; RV32I-NEXT: ret 140; 141; RV32IM-LABEL: mul64: 142; RV32IM: # %bb.0: 143; RV32IM-NEXT: mul a3, a0, a3 144; RV32IM-NEXT: mulhu a4, a0, a2 145; RV32IM-NEXT: mul a1, a1, a2 146; RV32IM-NEXT: add a3, a4, a3 147; RV32IM-NEXT: add a1, a3, a1 148; RV32IM-NEXT: mul a0, a0, a2 149; RV32IM-NEXT: ret 150; 151; RV64I-LABEL: mul64: 152; RV64I: # %bb.0: 153; RV64I-NEXT: tail __muldi3 154; 155; RV64IM-LABEL: mul64: 156; RV64IM: # %bb.0: 157; RV64IM-NEXT: mul a0, a0, a1 158; RV64IM-NEXT: ret 159 %1 = mul i64 %a, %b 160 ret i64 %1 161} 162 163define i64 @mul64_constant(i64 %a) nounwind { 164; RV32I-LABEL: mul64_constant: 165; RV32I: # %bb.0: 166; RV32I-NEXT: slli a2, a0, 2 167; RV32I-NEXT: srli a3, a0, 30 168; RV32I-NEXT: slli a4, a1, 2 169; RV32I-NEXT: add a0, a2, a0 170; RV32I-NEXT: or a3, a4, a3 171; RV32I-NEXT: sltu a2, a0, a2 172; RV32I-NEXT: add a1, a3, a1 173; RV32I-NEXT: add a1, a1, a2 174; RV32I-NEXT: ret 175; 176; RV32IM-LABEL: mul64_constant: 177; RV32IM: # %bb.0: 178; RV32IM-NEXT: li a2, 5 179; RV32IM-NEXT: slli a3, a1, 2 180; RV32IM-NEXT: add a1, a3, a1 181; RV32IM-NEXT: slli a3, a0, 2 182; RV32IM-NEXT: mulhu a2, a0, a2 183; RV32IM-NEXT: add a1, a2, a1 184; RV32IM-NEXT: add a0, a3, a0 185; RV32IM-NEXT: ret 186; 187; RV64I-LABEL: mul64_constant: 188; RV64I: # %bb.0: 189; RV64I-NEXT: slli a1, a0, 2 190; RV64I-NEXT: add a0, a1, a0 191; RV64I-NEXT: ret 192; 193; RV64IM-LABEL: mul64_constant: 194; RV64IM: # %bb.0: 195; RV64IM-NEXT: slli a1, a0, 2 196; RV64IM-NEXT: add a0, a1, a0 197; RV64IM-NEXT: ret 198 %1 = mul i64 %a, 5 199 ret i64 %1 200} 201 202define i32 @mulhs(i32 %a, i32 %b) nounwind { 203; RV32I-LABEL: mulhs: 204; RV32I: # %bb.0: 205; RV32I-NEXT: addi sp, sp, -16 206; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 207; RV32I-NEXT: mv a2, a1 208; RV32I-NEXT: srai a1, a0, 31 209; RV32I-NEXT: srai a3, a2, 31 210; RV32I-NEXT: call __muldi3 211; RV32I-NEXT: mv a0, a1 212; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 213; RV32I-NEXT: addi sp, sp, 16 214; RV32I-NEXT: ret 215; 216; RV32IM-LABEL: mulhs: 217; RV32IM: # %bb.0: 218; RV32IM-NEXT: mulh a0, a0, a1 219; RV32IM-NEXT: ret 220; 221; RV64I-LABEL: mulhs: 222; RV64I: # %bb.0: 223; RV64I-NEXT: addi sp, sp, -16 224; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 225; RV64I-NEXT: sext.w a0, a0 226; RV64I-NEXT: sext.w a1, a1 227; RV64I-NEXT: call __muldi3 228; RV64I-NEXT: srli a0, a0, 32 229; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 230; RV64I-NEXT: addi sp, sp, 16 231; RV64I-NEXT: ret 232; 233; RV64IM-LABEL: mulhs: 234; RV64IM: # %bb.0: 235; RV64IM-NEXT: sext.w a0, a0 236; RV64IM-NEXT: sext.w a1, a1 237; RV64IM-NEXT: mul a0, a0, a1 238; RV64IM-NEXT: srli a0, a0, 32 239; RV64IM-NEXT: ret 240 %1 = sext i32 %a to i64 241 %2 = sext i32 %b to i64 242 %3 = mul i64 %1, %2 243 %4 = lshr i64 %3, 32 244 %5 = trunc i64 %4 to i32 245 ret i32 %5 246} 247 248define i32 @mulhs_positive_constant(i32 %a) nounwind { 249; RV32I-LABEL: mulhs_positive_constant: 250; RV32I: # %bb.0: 251; RV32I-NEXT: srai a1, a0, 31 252; RV32I-NEXT: slli a2, a0, 2 253; RV32I-NEXT: srli a3, a0, 30 254; RV32I-NEXT: add a0, a2, a0 255; RV32I-NEXT: sltu a0, a0, a2 256; RV32I-NEXT: slli a2, a1, 2 257; RV32I-NEXT: or a2, a2, a3 258; RV32I-NEXT: add a1, a2, a1 259; RV32I-NEXT: add a0, a1, a0 260; RV32I-NEXT: ret 261; 262; RV32IM-LABEL: mulhs_positive_constant: 263; RV32IM: # %bb.0: 264; RV32IM-NEXT: li a1, 5 265; RV32IM-NEXT: mulh a0, a0, a1 266; RV32IM-NEXT: ret 267; 268; RV64I-LABEL: mulhs_positive_constant: 269; RV64I: # %bb.0: 270; RV64I-NEXT: sext.w a0, a0 271; RV64I-NEXT: slli a1, a0, 2 272; RV64I-NEXT: add a0, a1, a0 273; RV64I-NEXT: srli a0, a0, 32 274; RV64I-NEXT: ret 275; 276; RV64IM-LABEL: mulhs_positive_constant: 277; RV64IM: # %bb.0: 278; RV64IM-NEXT: sext.w a0, a0 279; RV64IM-NEXT: slli a1, a0, 2 280; RV64IM-NEXT: add a0, a1, a0 281; RV64IM-NEXT: srli a0, a0, 32 282; RV64IM-NEXT: ret 283 %1 = sext i32 %a to i64 284 %2 = mul i64 %1, 5 285 %3 = lshr i64 %2, 32 286 %4 = trunc i64 %3 to i32 287 ret i32 %4 288} 289 290define i32 @mulhs_negative_constant(i32 %a) nounwind { 291; RV32I-LABEL: mulhs_negative_constant: 292; RV32I: # %bb.0: 293; RV32I-NEXT: srai a1, a0, 31 294; RV32I-NEXT: slli a2, a0, 2 295; RV32I-NEXT: srli a3, a0, 30 296; RV32I-NEXT: add a0, a2, a0 297; RV32I-NEXT: slli a4, a1, 2 298; RV32I-NEXT: sltu a2, a0, a2 299; RV32I-NEXT: or a3, a4, a3 300; RV32I-NEXT: snez a0, a0 301; RV32I-NEXT: add a1, a3, a1 302; RV32I-NEXT: add a0, a2, a0 303; RV32I-NEXT: add a0, a1, a0 304; RV32I-NEXT: neg a0, a0 305; RV32I-NEXT: ret 306; 307; RV32IM-LABEL: mulhs_negative_constant: 308; RV32IM: # %bb.0: 309; RV32IM-NEXT: li a1, -5 310; RV32IM-NEXT: mulh a0, a0, a1 311; RV32IM-NEXT: ret 312; 313; RV64I-LABEL: mulhs_negative_constant: 314; RV64I: # %bb.0: 315; RV64I-NEXT: sext.w a0, a0 316; RV64I-NEXT: slli a1, a0, 2 317; RV64I-NEXT: neg a0, a0 318; RV64I-NEXT: sub a0, a0, a1 319; RV64I-NEXT: srli a0, a0, 32 320; RV64I-NEXT: ret 321; 322; RV64IM-LABEL: mulhs_negative_constant: 323; RV64IM: # %bb.0: 324; RV64IM-NEXT: sext.w a0, a0 325; RV64IM-NEXT: slli a1, a0, 2 326; RV64IM-NEXT: neg a0, a0 327; RV64IM-NEXT: sub a0, a0, a1 328; RV64IM-NEXT: srli a0, a0, 32 329; RV64IM-NEXT: ret 330 %1 = sext i32 %a to i64 331 %2 = mul i64 %1, -5 332 %3 = lshr i64 %2, 32 333 %4 = trunc i64 %3 to i32 334 ret i32 %4 335} 336 337define zeroext i32 @mulhu(i32 zeroext %a, i32 zeroext %b) nounwind { 338; RV32I-LABEL: mulhu: 339; RV32I: # %bb.0: 340; RV32I-NEXT: addi sp, sp, -16 341; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 342; RV32I-NEXT: mv a2, a1 343; RV32I-NEXT: li a1, 0 344; RV32I-NEXT: li a3, 0 345; RV32I-NEXT: call __muldi3 346; RV32I-NEXT: mv a0, a1 347; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 348; RV32I-NEXT: addi sp, sp, 16 349; RV32I-NEXT: ret 350; 351; RV32IM-LABEL: mulhu: 352; RV32IM: # %bb.0: 353; RV32IM-NEXT: mulhu a0, a0, a1 354; RV32IM-NEXT: ret 355; 356; RV64I-LABEL: mulhu: 357; RV64I: # %bb.0: 358; RV64I-NEXT: addi sp, sp, -16 359; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 360; RV64I-NEXT: call __muldi3 361; RV64I-NEXT: srli a0, a0, 32 362; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 363; RV64I-NEXT: addi sp, sp, 16 364; RV64I-NEXT: ret 365; 366; RV64IM-LABEL: mulhu: 367; RV64IM: # %bb.0: 368; RV64IM-NEXT: mul a0, a0, a1 369; RV64IM-NEXT: srli a0, a0, 32 370; RV64IM-NEXT: ret 371 %1 = zext i32 %a to i64 372 %2 = zext i32 %b to i64 373 %3 = mul i64 %1, %2 374 %4 = lshr i64 %3, 32 375 %5 = trunc i64 %4 to i32 376 ret i32 %5 377} 378 379define i32 @mulhsu(i32 %a, i32 %b) nounwind { 380; RV32I-LABEL: mulhsu: 381; RV32I: # %bb.0: 382; RV32I-NEXT: addi sp, sp, -16 383; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 384; RV32I-NEXT: mv a2, a1 385; RV32I-NEXT: srai a3, a1, 31 386; RV32I-NEXT: li a1, 0 387; RV32I-NEXT: call __muldi3 388; RV32I-NEXT: mv a0, a1 389; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 390; RV32I-NEXT: addi sp, sp, 16 391; RV32I-NEXT: ret 392; 393; RV32IM-LABEL: mulhsu: 394; RV32IM: # %bb.0: 395; RV32IM-NEXT: mulhsu a0, a1, a0 396; RV32IM-NEXT: ret 397; 398; RV64I-LABEL: mulhsu: 399; RV64I: # %bb.0: 400; RV64I-NEXT: addi sp, sp, -16 401; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 402; RV64I-NEXT: slli a0, a0, 32 403; RV64I-NEXT: srli a0, a0, 32 404; RV64I-NEXT: sext.w a1, a1 405; RV64I-NEXT: call __muldi3 406; RV64I-NEXT: srli a0, a0, 32 407; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 408; RV64I-NEXT: addi sp, sp, 16 409; RV64I-NEXT: ret 410; 411; RV64IM-LABEL: mulhsu: 412; RV64IM: # %bb.0: 413; RV64IM-NEXT: slli a0, a0, 32 414; RV64IM-NEXT: srli a0, a0, 32 415; RV64IM-NEXT: sext.w a1, a1 416; RV64IM-NEXT: mul a0, a0, a1 417; RV64IM-NEXT: srli a0, a0, 32 418; RV64IM-NEXT: ret 419 %1 = zext i32 %a to i64 420 %2 = sext i32 %b to i64 421 %3 = mul i64 %1, %2 422 %4 = lshr i64 %3, 32 423 %5 = trunc i64 %4 to i32 424 ret i32 %5 425} 426 427define i32 @mulhu_constant(i32 %a) nounwind { 428; RV32I-LABEL: mulhu_constant: 429; RV32I: # %bb.0: 430; RV32I-NEXT: slli a1, a0, 2 431; RV32I-NEXT: add a2, a1, a0 432; RV32I-NEXT: sltu a1, a2, a1 433; RV32I-NEXT: srli a0, a0, 30 434; RV32I-NEXT: add a0, a0, a1 435; RV32I-NEXT: ret 436; 437; RV32IM-LABEL: mulhu_constant: 438; RV32IM: # %bb.0: 439; RV32IM-NEXT: li a1, 5 440; RV32IM-NEXT: mulhu a0, a0, a1 441; RV32IM-NEXT: ret 442; 443; RV64I-LABEL: mulhu_constant: 444; RV64I: # %bb.0: 445; RV64I-NEXT: slli a0, a0, 32 446; RV64I-NEXT: srli a1, a0, 32 447; RV64I-NEXT: srli a0, a0, 30 448; RV64I-NEXT: add a0, a0, a1 449; RV64I-NEXT: srli a0, a0, 32 450; RV64I-NEXT: ret 451; 452; RV64IM-LABEL: mulhu_constant: 453; RV64IM: # %bb.0: 454; RV64IM-NEXT: slli a0, a0, 32 455; RV64IM-NEXT: srli a1, a0, 32 456; RV64IM-NEXT: srli a0, a0, 30 457; RV64IM-NEXT: add a0, a0, a1 458; RV64IM-NEXT: srli a0, a0, 32 459; RV64IM-NEXT: ret 460 %1 = zext i32 %a to i64 461 %2 = mul i64 %1, 5 462 %3 = lshr i64 %2, 32 463 %4 = trunc i64 %3 to i32 464 ret i32 %4 465} 466 467define i32 @muli32_p14(i32 %a) nounwind { 468; RV32I-LABEL: muli32_p14: 469; RV32I: # %bb.0: 470; RV32I-NEXT: li a1, 14 471; RV32I-NEXT: tail __mulsi3 472; 473; RV32IM-LABEL: muli32_p14: 474; RV32IM: # %bb.0: 475; RV32IM-NEXT: slli a1, a0, 1 476; RV32IM-NEXT: slli a0, a0, 4 477; RV32IM-NEXT: sub a0, a0, a1 478; RV32IM-NEXT: ret 479; 480; RV64I-LABEL: muli32_p14: 481; RV64I: # %bb.0: 482; RV64I-NEXT: slli a1, a0, 1 483; RV64I-NEXT: slli a0, a0, 4 484; RV64I-NEXT: sub a0, a0, a1 485; RV64I-NEXT: ret 486; 487; RV64IM-LABEL: muli32_p14: 488; RV64IM: # %bb.0: 489; RV64IM-NEXT: slli a1, a0, 1 490; RV64IM-NEXT: slli a0, a0, 4 491; RV64IM-NEXT: subw a0, a0, a1 492; RV64IM-NEXT: ret 493 %1 = mul i32 %a, 14 494 ret i32 %1 495} 496 497define i32 @muli32_p28(i32 %a) nounwind { 498; RV32I-LABEL: muli32_p28: 499; RV32I: # %bb.0: 500; RV32I-NEXT: li a1, 28 501; RV32I-NEXT: tail __mulsi3 502; 503; RV32IM-LABEL: muli32_p28: 504; RV32IM: # %bb.0: 505; RV32IM-NEXT: slli a1, a0, 2 506; RV32IM-NEXT: slli a0, a0, 5 507; RV32IM-NEXT: sub a0, a0, a1 508; RV32IM-NEXT: ret 509; 510; RV64I-LABEL: muli32_p28: 511; RV64I: # %bb.0: 512; RV64I-NEXT: slli a1, a0, 2 513; RV64I-NEXT: slli a0, a0, 5 514; RV64I-NEXT: sub a0, a0, a1 515; RV64I-NEXT: ret 516; 517; RV64IM-LABEL: muli32_p28: 518; RV64IM: # %bb.0: 519; RV64IM-NEXT: slli a1, a0, 2 520; RV64IM-NEXT: slli a0, a0, 5 521; RV64IM-NEXT: subw a0, a0, a1 522; RV64IM-NEXT: ret 523 %1 = mul i32 %a, 28 524 ret i32 %1 525} 526 527define i32 @muli32_p30(i32 %a) nounwind { 528; RV32I-LABEL: muli32_p30: 529; RV32I: # %bb.0: 530; RV32I-NEXT: li a1, 30 531; RV32I-NEXT: tail __mulsi3 532; 533; RV32IM-LABEL: muli32_p30: 534; RV32IM: # %bb.0: 535; RV32IM-NEXT: slli a1, a0, 1 536; RV32IM-NEXT: slli a0, a0, 5 537; RV32IM-NEXT: sub a0, a0, a1 538; RV32IM-NEXT: ret 539; 540; RV64I-LABEL: muli32_p30: 541; RV64I: # %bb.0: 542; RV64I-NEXT: slli a1, a0, 1 543; RV64I-NEXT: slli a0, a0, 5 544; RV64I-NEXT: sub a0, a0, a1 545; RV64I-NEXT: ret 546; 547; RV64IM-LABEL: muli32_p30: 548; RV64IM: # %bb.0: 549; RV64IM-NEXT: slli a1, a0, 1 550; RV64IM-NEXT: slli a0, a0, 5 551; RV64IM-NEXT: subw a0, a0, a1 552; RV64IM-NEXT: ret 553 %1 = mul i32 %a, 30 554 ret i32 %1 555} 556 557define i32 @muli32_p56(i32 %a) nounwind { 558; RV32I-LABEL: muli32_p56: 559; RV32I: # %bb.0: 560; RV32I-NEXT: li a1, 56 561; RV32I-NEXT: tail __mulsi3 562; 563; RV32IM-LABEL: muli32_p56: 564; RV32IM: # %bb.0: 565; RV32IM-NEXT: slli a1, a0, 3 566; RV32IM-NEXT: slli a0, a0, 6 567; RV32IM-NEXT: sub a0, a0, a1 568; RV32IM-NEXT: ret 569; 570; RV64I-LABEL: muli32_p56: 571; RV64I: # %bb.0: 572; RV64I-NEXT: slli a1, a0, 3 573; RV64I-NEXT: slli a0, a0, 6 574; RV64I-NEXT: sub a0, a0, a1 575; RV64I-NEXT: ret 576; 577; RV64IM-LABEL: muli32_p56: 578; RV64IM: # %bb.0: 579; RV64IM-NEXT: slli a1, a0, 3 580; RV64IM-NEXT: slli a0, a0, 6 581; RV64IM-NEXT: subw a0, a0, a1 582; RV64IM-NEXT: ret 583 %1 = mul i32 %a, 56 584 ret i32 %1 585} 586 587define i32 @muli32_p60(i32 %a) nounwind { 588; RV32I-LABEL: muli32_p60: 589; RV32I: # %bb.0: 590; RV32I-NEXT: li a1, 60 591; RV32I-NEXT: tail __mulsi3 592; 593; RV32IM-LABEL: muli32_p60: 594; RV32IM: # %bb.0: 595; RV32IM-NEXT: slli a1, a0, 2 596; RV32IM-NEXT: slli a0, a0, 6 597; RV32IM-NEXT: sub a0, a0, a1 598; RV32IM-NEXT: ret 599; 600; RV64I-LABEL: muli32_p60: 601; RV64I: # %bb.0: 602; RV64I-NEXT: slli a1, a0, 2 603; RV64I-NEXT: slli a0, a0, 6 604; RV64I-NEXT: sub a0, a0, a1 605; RV64I-NEXT: ret 606; 607; RV64IM-LABEL: muli32_p60: 608; RV64IM: # %bb.0: 609; RV64IM-NEXT: slli a1, a0, 2 610; RV64IM-NEXT: slli a0, a0, 6 611; RV64IM-NEXT: subw a0, a0, a1 612; RV64IM-NEXT: ret 613 %1 = mul i32 %a, 60 614 ret i32 %1 615} 616 617define i32 @muli32_p62(i32 %a) nounwind { 618; RV32I-LABEL: muli32_p62: 619; RV32I: # %bb.0: 620; RV32I-NEXT: li a1, 62 621; RV32I-NEXT: tail __mulsi3 622; 623; RV32IM-LABEL: muli32_p62: 624; RV32IM: # %bb.0: 625; RV32IM-NEXT: slli a1, a0, 1 626; RV32IM-NEXT: slli a0, a0, 6 627; RV32IM-NEXT: sub a0, a0, a1 628; RV32IM-NEXT: ret 629; 630; RV64I-LABEL: muli32_p62: 631; RV64I: # %bb.0: 632; RV64I-NEXT: slli a1, a0, 1 633; RV64I-NEXT: slli a0, a0, 6 634; RV64I-NEXT: sub a0, a0, a1 635; RV64I-NEXT: ret 636; 637; RV64IM-LABEL: muli32_p62: 638; RV64IM: # %bb.0: 639; RV64IM-NEXT: slli a1, a0, 1 640; RV64IM-NEXT: slli a0, a0, 6 641; RV64IM-NEXT: subw a0, a0, a1 642; RV64IM-NEXT: ret 643 %1 = mul i32 %a, 62 644 ret i32 %1 645} 646 647define i32 @muli32_p65(i32 %a) nounwind { 648; RV32I-LABEL: muli32_p65: 649; RV32I: # %bb.0: 650; RV32I-NEXT: slli a1, a0, 6 651; RV32I-NEXT: add a0, a1, a0 652; RV32I-NEXT: ret 653; 654; RV32IM-LABEL: muli32_p65: 655; RV32IM: # %bb.0: 656; RV32IM-NEXT: slli a1, a0, 6 657; RV32IM-NEXT: add a0, a1, a0 658; RV32IM-NEXT: ret 659; 660; RV64I-LABEL: muli32_p65: 661; RV64I: # %bb.0: 662; RV64I-NEXT: slli a1, a0, 6 663; RV64I-NEXT: addw a0, a1, a0 664; RV64I-NEXT: ret 665; 666; RV64IM-LABEL: muli32_p65: 667; RV64IM: # %bb.0: 668; RV64IM-NEXT: slli a1, a0, 6 669; RV64IM-NEXT: addw a0, a1, a0 670; RV64IM-NEXT: ret 671 %1 = mul i32 %a, 65 672 ret i32 %1 673} 674 675define i32 @muli32_p63(i32 %a) nounwind { 676; RV32I-LABEL: muli32_p63: 677; RV32I: # %bb.0: 678; RV32I-NEXT: slli a1, a0, 6 679; RV32I-NEXT: sub a0, a1, a0 680; RV32I-NEXT: ret 681; 682; RV32IM-LABEL: muli32_p63: 683; RV32IM: # %bb.0: 684; RV32IM-NEXT: slli a1, a0, 6 685; RV32IM-NEXT: sub a0, a1, a0 686; RV32IM-NEXT: ret 687; 688; RV64I-LABEL: muli32_p63: 689; RV64I: # %bb.0: 690; RV64I-NEXT: slli a1, a0, 6 691; RV64I-NEXT: subw a0, a1, a0 692; RV64I-NEXT: ret 693; 694; RV64IM-LABEL: muli32_p63: 695; RV64IM: # %bb.0: 696; RV64IM-NEXT: slli a1, a0, 6 697; RV64IM-NEXT: subw a0, a1, a0 698; RV64IM-NEXT: ret 699 %1 = mul i32 %a, 63 700 ret i32 %1 701} 702 703define i64 @muli64_p65(i64 %a) nounwind { 704; RV32I-LABEL: muli64_p65: 705; RV32I: # %bb.0: 706; RV32I-NEXT: slli a2, a0, 6 707; RV32I-NEXT: srli a3, a0, 26 708; RV32I-NEXT: slli a4, a1, 6 709; RV32I-NEXT: add a0, a2, a0 710; RV32I-NEXT: or a3, a4, a3 711; RV32I-NEXT: sltu a2, a0, a2 712; RV32I-NEXT: add a1, a3, a1 713; RV32I-NEXT: add a1, a1, a2 714; RV32I-NEXT: ret 715; 716; RV32IM-LABEL: muli64_p65: 717; RV32IM: # %bb.0: 718; RV32IM-NEXT: li a2, 65 719; RV32IM-NEXT: slli a3, a1, 6 720; RV32IM-NEXT: add a1, a3, a1 721; RV32IM-NEXT: slli a3, a0, 6 722; RV32IM-NEXT: mulhu a2, a0, a2 723; RV32IM-NEXT: add a1, a2, a1 724; RV32IM-NEXT: add a0, a3, a0 725; RV32IM-NEXT: ret 726; 727; RV64I-LABEL: muli64_p65: 728; RV64I: # %bb.0: 729; RV64I-NEXT: slli a1, a0, 6 730; RV64I-NEXT: add a0, a1, a0 731; RV64I-NEXT: ret 732; 733; RV64IM-LABEL: muli64_p65: 734; RV64IM: # %bb.0: 735; RV64IM-NEXT: slli a1, a0, 6 736; RV64IM-NEXT: add a0, a1, a0 737; RV64IM-NEXT: ret 738 %1 = mul i64 %a, 65 739 ret i64 %1 740} 741 742define i64 @muli64_p63(i64 %a) nounwind { 743; RV32I-LABEL: muli64_p63: 744; RV32I: # %bb.0: 745; RV32I-NEXT: slli a2, a0, 6 746; RV32I-NEXT: srli a3, a0, 26 747; RV32I-NEXT: slli a4, a1, 6 748; RV32I-NEXT: sltu a5, a2, a0 749; RV32I-NEXT: or a3, a4, a3 750; RV32I-NEXT: sub a1, a3, a1 751; RV32I-NEXT: sub a1, a1, a5 752; RV32I-NEXT: sub a0, a2, a0 753; RV32I-NEXT: ret 754; 755; RV32IM-LABEL: muli64_p63: 756; RV32IM: # %bb.0: 757; RV32IM-NEXT: li a2, 63 758; RV32IM-NEXT: slli a3, a1, 6 759; RV32IM-NEXT: sub a1, a3, a1 760; RV32IM-NEXT: slli a3, a0, 6 761; RV32IM-NEXT: mulhu a2, a0, a2 762; RV32IM-NEXT: add a1, a2, a1 763; RV32IM-NEXT: sub a0, a3, a0 764; RV32IM-NEXT: ret 765; 766; RV64I-LABEL: muli64_p63: 767; RV64I: # %bb.0: 768; RV64I-NEXT: slli a1, a0, 6 769; RV64I-NEXT: sub a0, a1, a0 770; RV64I-NEXT: ret 771; 772; RV64IM-LABEL: muli64_p63: 773; RV64IM: # %bb.0: 774; RV64IM-NEXT: slli a1, a0, 6 775; RV64IM-NEXT: sub a0, a1, a0 776; RV64IM-NEXT: ret 777 %1 = mul i64 %a, 63 778 ret i64 %1 779} 780 781 782 783define i32 @muli32_m63(i32 %a) nounwind { 784; RV32I-LABEL: muli32_m63: 785; RV32I: # %bb.0: 786; RV32I-NEXT: slli a1, a0, 6 787; RV32I-NEXT: sub a0, a0, a1 788; RV32I-NEXT: ret 789; 790; RV32IM-LABEL: muli32_m63: 791; RV32IM: # %bb.0: 792; RV32IM-NEXT: slli a1, a0, 6 793; RV32IM-NEXT: sub a0, a0, a1 794; RV32IM-NEXT: ret 795; 796; RV64I-LABEL: muli32_m63: 797; RV64I: # %bb.0: 798; RV64I-NEXT: slli a1, a0, 6 799; RV64I-NEXT: subw a0, a0, a1 800; RV64I-NEXT: ret 801; 802; RV64IM-LABEL: muli32_m63: 803; RV64IM: # %bb.0: 804; RV64IM-NEXT: slli a1, a0, 6 805; RV64IM-NEXT: subw a0, a0, a1 806; RV64IM-NEXT: ret 807 %1 = mul i32 %a, -63 808 ret i32 %1 809} 810 811define i32 @muli32_m65(i32 %a) nounwind { 812; RV32I-LABEL: muli32_m65: 813; RV32I: # %bb.0: 814; RV32I-NEXT: slli a1, a0, 6 815; RV32I-NEXT: neg a0, a0 816; RV32I-NEXT: sub a0, a0, a1 817; RV32I-NEXT: ret 818; 819; RV32IM-LABEL: muli32_m65: 820; RV32IM: # %bb.0: 821; RV32IM-NEXT: slli a1, a0, 6 822; RV32IM-NEXT: neg a0, a0 823; RV32IM-NEXT: sub a0, a0, a1 824; RV32IM-NEXT: ret 825; 826; RV64I-LABEL: muli32_m65: 827; RV64I: # %bb.0: 828; RV64I-NEXT: slli a1, a0, 6 829; RV64I-NEXT: negw a0, a0 830; RV64I-NEXT: subw a0, a0, a1 831; RV64I-NEXT: ret 832; 833; RV64IM-LABEL: muli32_m65: 834; RV64IM: # %bb.0: 835; RV64IM-NEXT: slli a1, a0, 6 836; RV64IM-NEXT: negw a0, a0 837; RV64IM-NEXT: subw a0, a0, a1 838; RV64IM-NEXT: ret 839 %1 = mul i32 %a, -65 840 ret i32 %1 841} 842 843define i64 @muli64_m63(i64 %a) nounwind { 844; RV32I-LABEL: muli64_m63: 845; RV32I: # %bb.0: 846; RV32I-NEXT: slli a2, a0, 6 847; RV32I-NEXT: srli a3, a0, 26 848; RV32I-NEXT: slli a4, a1, 6 849; RV32I-NEXT: sltu a5, a0, a2 850; RV32I-NEXT: or a3, a4, a3 851; RV32I-NEXT: sub a1, a1, a3 852; RV32I-NEXT: sub a1, a1, a5 853; RV32I-NEXT: sub a0, a0, a2 854; RV32I-NEXT: ret 855; 856; RV32IM-LABEL: muli64_m63: 857; RV32IM: # %bb.0: 858; RV32IM-NEXT: slli a2, a1, 6 859; RV32IM-NEXT: sub a1, a1, a2 860; RV32IM-NEXT: li a2, -63 861; RV32IM-NEXT: mulhu a2, a0, a2 862; RV32IM-NEXT: sub a2, a2, a0 863; RV32IM-NEXT: add a1, a2, a1 864; RV32IM-NEXT: slli a2, a0, 6 865; RV32IM-NEXT: sub a0, a0, a2 866; RV32IM-NEXT: ret 867; 868; RV64I-LABEL: muli64_m63: 869; RV64I: # %bb.0: 870; RV64I-NEXT: slli a1, a0, 6 871; RV64I-NEXT: sub a0, a0, a1 872; RV64I-NEXT: ret 873; 874; RV64IM-LABEL: muli64_m63: 875; RV64IM: # %bb.0: 876; RV64IM-NEXT: slli a1, a0, 6 877; RV64IM-NEXT: sub a0, a0, a1 878; RV64IM-NEXT: ret 879 %1 = mul i64 %a, -63 880 ret i64 %1 881} 882 883define i64 @muli64_m65(i64 %a) nounwind { 884; RV32I-LABEL: muli64_m65: 885; RV32I: # %bb.0: 886; RV32I-NEXT: slli a2, a0, 6 887; RV32I-NEXT: srli a3, a0, 26 888; RV32I-NEXT: slli a4, a1, 6 889; RV32I-NEXT: add a0, a2, a0 890; RV32I-NEXT: or a3, a4, a3 891; RV32I-NEXT: sltu a2, a0, a2 892; RV32I-NEXT: add a1, a3, a1 893; RV32I-NEXT: snez a3, a0 894; RV32I-NEXT: add a1, a1, a2 895; RV32I-NEXT: neg a2, a3 896; RV32I-NEXT: sub a1, a2, a1 897; RV32I-NEXT: neg a0, a0 898; RV32I-NEXT: ret 899; 900; RV32IM-LABEL: muli64_m65: 901; RV32IM: # %bb.0: 902; RV32IM-NEXT: slli a2, a1, 6 903; RV32IM-NEXT: add a1, a2, a1 904; RV32IM-NEXT: li a2, -65 905; RV32IM-NEXT: mulhu a2, a0, a2 906; RV32IM-NEXT: sub a2, a2, a0 907; RV32IM-NEXT: sub a1, a2, a1 908; RV32IM-NEXT: slli a2, a0, 6 909; RV32IM-NEXT: neg a0, a0 910; RV32IM-NEXT: sub a0, a0, a2 911; RV32IM-NEXT: ret 912; 913; RV64I-LABEL: muli64_m65: 914; RV64I: # %bb.0: 915; RV64I-NEXT: slli a1, a0, 6 916; RV64I-NEXT: neg a0, a0 917; RV64I-NEXT: sub a0, a0, a1 918; RV64I-NEXT: ret 919; 920; RV64IM-LABEL: muli64_m65: 921; RV64IM: # %bb.0: 922; RV64IM-NEXT: slli a1, a0, 6 923; RV64IM-NEXT: neg a0, a0 924; RV64IM-NEXT: sub a0, a0, a1 925; RV64IM-NEXT: ret 926 %1 = mul i64 %a, -65 927 ret i64 %1 928} 929 930define i32 @muli32_p384(i32 %a) nounwind { 931; RV32I-LABEL: muli32_p384: 932; RV32I: # %bb.0: 933; RV32I-NEXT: li a1, 384 934; RV32I-NEXT: tail __mulsi3 935; 936; RV32IM-LABEL: muli32_p384: 937; RV32IM: # %bb.0: 938; RV32IM-NEXT: slli a1, a0, 7 939; RV32IM-NEXT: slli a0, a0, 9 940; RV32IM-NEXT: sub a0, a0, a1 941; RV32IM-NEXT: ret 942; 943; RV64I-LABEL: muli32_p384: 944; RV64I: # %bb.0: 945; RV64I-NEXT: slli a1, a0, 7 946; RV64I-NEXT: slli a0, a0, 9 947; RV64I-NEXT: sub a0, a0, a1 948; RV64I-NEXT: ret 949; 950; RV64IM-LABEL: muli32_p384: 951; RV64IM: # %bb.0: 952; RV64IM-NEXT: slli a1, a0, 7 953; RV64IM-NEXT: slli a0, a0, 9 954; RV64IM-NEXT: subw a0, a0, a1 955; RV64IM-NEXT: ret 956 %1 = mul i32 %a, 384 957 ret i32 %1 958} 959 960define i32 @muli32_p12288(i32 %a) nounwind { 961; RV32I-LABEL: muli32_p12288: 962; RV32I: # %bb.0: 963; RV32I-NEXT: lui a1, 3 964; RV32I-NEXT: tail __mulsi3 965; 966; RV32IM-LABEL: muli32_p12288: 967; RV32IM: # %bb.0: 968; RV32IM-NEXT: slli a1, a0, 12 969; RV32IM-NEXT: slli a0, a0, 14 970; RV32IM-NEXT: sub a0, a0, a1 971; RV32IM-NEXT: ret 972; 973; RV64I-LABEL: muli32_p12288: 974; RV64I: # %bb.0: 975; RV64I-NEXT: slli a1, a0, 12 976; RV64I-NEXT: slli a0, a0, 14 977; RV64I-NEXT: sub a0, a0, a1 978; RV64I-NEXT: ret 979; 980; RV64IM-LABEL: muli32_p12288: 981; RV64IM: # %bb.0: 982; RV64IM-NEXT: slli a1, a0, 12 983; RV64IM-NEXT: slli a0, a0, 14 984; RV64IM-NEXT: subw a0, a0, a1 985; RV64IM-NEXT: ret 986 %1 = mul i32 %a, 12288 987 ret i32 %1 988} 989 990define i32 @muli32_p4352(i32 %a) nounwind { 991; RV32I-LABEL: muli32_p4352: 992; RV32I: # %bb.0: 993; RV32I-NEXT: slli a1, a0, 8 994; RV32I-NEXT: slli a0, a0, 12 995; RV32I-NEXT: add a0, a0, a1 996; RV32I-NEXT: ret 997; 998; RV32IM-LABEL: muli32_p4352: 999; RV32IM: # %bb.0: 1000; RV32IM-NEXT: slli a1, a0, 8 1001; RV32IM-NEXT: slli a0, a0, 12 1002; RV32IM-NEXT: add a0, a0, a1 1003; RV32IM-NEXT: ret 1004; 1005; RV64I-LABEL: muli32_p4352: 1006; RV64I: # %bb.0: 1007; RV64I-NEXT: slli a1, a0, 8 1008; RV64I-NEXT: slli a0, a0, 12 1009; RV64I-NEXT: addw a0, a0, a1 1010; RV64I-NEXT: ret 1011; 1012; RV64IM-LABEL: muli32_p4352: 1013; RV64IM: # %bb.0: 1014; RV64IM-NEXT: slli a1, a0, 8 1015; RV64IM-NEXT: slli a0, a0, 12 1016; RV64IM-NEXT: addw a0, a0, a1 1017; RV64IM-NEXT: ret 1018 %1 = mul i32 %a, 4352 1019 ret i32 %1 1020} 1021 1022define i32 @muli32_p3840(i32 %a) nounwind { 1023; RV32I-LABEL: muli32_p3840: 1024; RV32I: # %bb.0: 1025; RV32I-NEXT: slli a1, a0, 8 1026; RV32I-NEXT: slli a0, a0, 12 1027; RV32I-NEXT: sub a0, a0, a1 1028; RV32I-NEXT: ret 1029; 1030; RV32IM-LABEL: muli32_p3840: 1031; RV32IM: # %bb.0: 1032; RV32IM-NEXT: slli a1, a0, 8 1033; RV32IM-NEXT: slli a0, a0, 12 1034; RV32IM-NEXT: sub a0, a0, a1 1035; RV32IM-NEXT: ret 1036; 1037; RV64I-LABEL: muli32_p3840: 1038; RV64I: # %bb.0: 1039; RV64I-NEXT: slli a1, a0, 8 1040; RV64I-NEXT: slli a0, a0, 12 1041; RV64I-NEXT: subw a0, a0, a1 1042; RV64I-NEXT: ret 1043; 1044; RV64IM-LABEL: muli32_p3840: 1045; RV64IM: # %bb.0: 1046; RV64IM-NEXT: slli a1, a0, 8 1047; RV64IM-NEXT: slli a0, a0, 12 1048; RV64IM-NEXT: subw a0, a0, a1 1049; RV64IM-NEXT: ret 1050 %1 = mul i32 %a, 3840 1051 ret i32 %1 1052} 1053 1054define i32 @muli32_m3840(i32 %a) nounwind { 1055; RV32I-LABEL: muli32_m3840: 1056; RV32I: # %bb.0: 1057; RV32I-NEXT: slli a1, a0, 12 1058; RV32I-NEXT: slli a0, a0, 8 1059; RV32I-NEXT: sub a0, a0, a1 1060; RV32I-NEXT: ret 1061; 1062; RV32IM-LABEL: muli32_m3840: 1063; RV32IM: # %bb.0: 1064; RV32IM-NEXT: slli a1, a0, 12 1065; RV32IM-NEXT: slli a0, a0, 8 1066; RV32IM-NEXT: sub a0, a0, a1 1067; RV32IM-NEXT: ret 1068; 1069; RV64I-LABEL: muli32_m3840: 1070; RV64I: # %bb.0: 1071; RV64I-NEXT: slli a1, a0, 12 1072; RV64I-NEXT: slli a0, a0, 8 1073; RV64I-NEXT: subw a0, a0, a1 1074; RV64I-NEXT: ret 1075; 1076; RV64IM-LABEL: muli32_m3840: 1077; RV64IM: # %bb.0: 1078; RV64IM-NEXT: slli a1, a0, 12 1079; RV64IM-NEXT: slli a0, a0, 8 1080; RV64IM-NEXT: subw a0, a0, a1 1081; RV64IM-NEXT: ret 1082 %1 = mul i32 %a, -3840 1083 ret i32 %1 1084} 1085 1086define i32 @muli32_m4352(i32 %a) nounwind { 1087; RV32I-LABEL: muli32_m4352: 1088; RV32I: # %bb.0: 1089; RV32I-NEXT: li a1, -17 1090; RV32I-NEXT: slli a1, a1, 8 1091; RV32I-NEXT: tail __mulsi3 1092; 1093; RV32IM-LABEL: muli32_m4352: 1094; RV32IM: # %bb.0: 1095; RV32IM-NEXT: li a1, -17 1096; RV32IM-NEXT: slli a1, a1, 8 1097; RV32IM-NEXT: mul a0, a0, a1 1098; RV32IM-NEXT: ret 1099; 1100; RV64I-LABEL: muli32_m4352: 1101; RV64I: # %bb.0: 1102; RV64I-NEXT: addi sp, sp, -16 1103; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 1104; RV64I-NEXT: li a1, -17 1105; RV64I-NEXT: slli a1, a1, 8 1106; RV64I-NEXT: call __muldi3 1107; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 1108; RV64I-NEXT: addi sp, sp, 16 1109; RV64I-NEXT: ret 1110; 1111; RV64IM-LABEL: muli32_m4352: 1112; RV64IM: # %bb.0: 1113; RV64IM-NEXT: li a1, -17 1114; RV64IM-NEXT: slli a1, a1, 8 1115; RV64IM-NEXT: mulw a0, a0, a1 1116; RV64IM-NEXT: ret 1117 %1 = mul i32 %a, -4352 1118 ret i32 %1 1119} 1120 1121define i64 @muli64_p4352(i64 %a) nounwind { 1122; RV32I-LABEL: muli64_p4352: 1123; RV32I: # %bb.0: 1124; RV32I-NEXT: srli a2, a0, 24 1125; RV32I-NEXT: slli a3, a1, 8 1126; RV32I-NEXT: or a2, a3, a2 1127; RV32I-NEXT: srli a3, a0, 20 1128; RV32I-NEXT: slli a1, a1, 12 1129; RV32I-NEXT: or a1, a1, a3 1130; RV32I-NEXT: slli a3, a0, 8 1131; RV32I-NEXT: slli a4, a0, 12 1132; RV32I-NEXT: add a0, a4, a3 1133; RV32I-NEXT: add a1, a1, a2 1134; RV32I-NEXT: sltu a2, a0, a4 1135; RV32I-NEXT: add a1, a1, a2 1136; RV32I-NEXT: ret 1137; 1138; RV32IM-LABEL: muli64_p4352: 1139; RV32IM: # %bb.0: 1140; RV32IM-NEXT: li a2, 17 1141; RV32IM-NEXT: slli a2, a2, 8 1142; RV32IM-NEXT: mul a1, a1, a2 1143; RV32IM-NEXT: mulhu a3, a0, a2 1144; RV32IM-NEXT: add a1, a3, a1 1145; RV32IM-NEXT: mul a0, a0, a2 1146; RV32IM-NEXT: ret 1147; 1148; RV64I-LABEL: muli64_p4352: 1149; RV64I: # %bb.0: 1150; RV64I-NEXT: slli a1, a0, 8 1151; RV64I-NEXT: slli a0, a0, 12 1152; RV64I-NEXT: add a0, a0, a1 1153; RV64I-NEXT: ret 1154; 1155; RV64IM-LABEL: muli64_p4352: 1156; RV64IM: # %bb.0: 1157; RV64IM-NEXT: slli a1, a0, 8 1158; RV64IM-NEXT: slli a0, a0, 12 1159; RV64IM-NEXT: add a0, a0, a1 1160; RV64IM-NEXT: ret 1161 %1 = mul i64 %a, 4352 1162 ret i64 %1 1163} 1164 1165define i64 @muli64_p3840(i64 %a) nounwind { 1166; RV32I-LABEL: muli64_p3840: 1167; RV32I: # %bb.0: 1168; RV32I-NEXT: srli a2, a0, 24 1169; RV32I-NEXT: slli a3, a1, 8 1170; RV32I-NEXT: or a2, a3, a2 1171; RV32I-NEXT: srli a3, a0, 20 1172; RV32I-NEXT: slli a1, a1, 12 1173; RV32I-NEXT: or a1, a1, a3 1174; RV32I-NEXT: slli a3, a0, 8 1175; RV32I-NEXT: slli a0, a0, 12 1176; RV32I-NEXT: sub a1, a1, a2 1177; RV32I-NEXT: sltu a2, a0, a3 1178; RV32I-NEXT: sub a1, a1, a2 1179; RV32I-NEXT: sub a0, a0, a3 1180; RV32I-NEXT: ret 1181; 1182; RV32IM-LABEL: muli64_p3840: 1183; RV32IM: # %bb.0: 1184; RV32IM-NEXT: slli a2, a1, 8 1185; RV32IM-NEXT: slli a1, a1, 12 1186; RV32IM-NEXT: sub a1, a1, a2 1187; RV32IM-NEXT: li a2, 15 1188; RV32IM-NEXT: slli a2, a2, 8 1189; RV32IM-NEXT: mulhu a2, a0, a2 1190; RV32IM-NEXT: add a1, a2, a1 1191; RV32IM-NEXT: slli a2, a0, 8 1192; RV32IM-NEXT: slli a0, a0, 12 1193; RV32IM-NEXT: sub a0, a0, a2 1194; RV32IM-NEXT: ret 1195; 1196; RV64I-LABEL: muli64_p3840: 1197; RV64I: # %bb.0: 1198; RV64I-NEXT: slli a1, a0, 8 1199; RV64I-NEXT: slli a0, a0, 12 1200; RV64I-NEXT: sub a0, a0, a1 1201; RV64I-NEXT: ret 1202; 1203; RV64IM-LABEL: muli64_p3840: 1204; RV64IM: # %bb.0: 1205; RV64IM-NEXT: slli a1, a0, 8 1206; RV64IM-NEXT: slli a0, a0, 12 1207; RV64IM-NEXT: sub a0, a0, a1 1208; RV64IM-NEXT: ret 1209 %1 = mul i64 %a, 3840 1210 ret i64 %1 1211} 1212 1213define i64 @muli64_m4352(i64 %a) nounwind { 1214; RV32I-LABEL: muli64_m4352: 1215; RV32I: # %bb.0: 1216; RV32I-NEXT: addi sp, sp, -16 1217; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 1218; RV32I-NEXT: li a2, -17 1219; RV32I-NEXT: slli a2, a2, 8 1220; RV32I-NEXT: li a3, -1 1221; RV32I-NEXT: call __muldi3 1222; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 1223; RV32I-NEXT: addi sp, sp, 16 1224; RV32I-NEXT: ret 1225; 1226; RV32IM-LABEL: muli64_m4352: 1227; RV32IM: # %bb.0: 1228; RV32IM-NEXT: li a2, -17 1229; RV32IM-NEXT: slli a2, a2, 8 1230; RV32IM-NEXT: mul a1, a1, a2 1231; RV32IM-NEXT: mulhu a3, a0, a2 1232; RV32IM-NEXT: sub a3, a3, a0 1233; RV32IM-NEXT: add a1, a3, a1 1234; RV32IM-NEXT: mul a0, a0, a2 1235; RV32IM-NEXT: ret 1236; 1237; RV64I-LABEL: muli64_m4352: 1238; RV64I: # %bb.0: 1239; RV64I-NEXT: li a1, -17 1240; RV64I-NEXT: slli a1, a1, 8 1241; RV64I-NEXT: tail __muldi3 1242; 1243; RV64IM-LABEL: muli64_m4352: 1244; RV64IM: # %bb.0: 1245; RV64IM-NEXT: li a1, -17 1246; RV64IM-NEXT: slli a1, a1, 8 1247; RV64IM-NEXT: mul a0, a0, a1 1248; RV64IM-NEXT: ret 1249 %1 = mul i64 %a, -4352 1250 ret i64 %1 1251} 1252 1253define i64 @muli64_m3840(i64 %a) nounwind { 1254; RV32I-LABEL: muli64_m3840: 1255; RV32I: # %bb.0: 1256; RV32I-NEXT: srli a2, a0, 20 1257; RV32I-NEXT: slli a3, a1, 12 1258; RV32I-NEXT: or a2, a3, a2 1259; RV32I-NEXT: srli a3, a0, 24 1260; RV32I-NEXT: slli a1, a1, 8 1261; RV32I-NEXT: or a1, a1, a3 1262; RV32I-NEXT: slli a3, a0, 12 1263; RV32I-NEXT: slli a0, a0, 8 1264; RV32I-NEXT: sub a1, a1, a2 1265; RV32I-NEXT: sltu a2, a0, a3 1266; RV32I-NEXT: sub a1, a1, a2 1267; RV32I-NEXT: sub a0, a0, a3 1268; RV32I-NEXT: ret 1269; 1270; RV32IM-LABEL: muli64_m3840: 1271; RV32IM: # %bb.0: 1272; RV32IM-NEXT: li a2, -15 1273; RV32IM-NEXT: slli a2, a2, 8 1274; RV32IM-NEXT: mul a1, a1, a2 1275; RV32IM-NEXT: mulhu a3, a0, a2 1276; RV32IM-NEXT: sub a3, a3, a0 1277; RV32IM-NEXT: add a1, a3, a1 1278; RV32IM-NEXT: mul a0, a0, a2 1279; RV32IM-NEXT: ret 1280; 1281; RV64I-LABEL: muli64_m3840: 1282; RV64I: # %bb.0: 1283; RV64I-NEXT: slli a1, a0, 12 1284; RV64I-NEXT: slli a0, a0, 8 1285; RV64I-NEXT: sub a0, a0, a1 1286; RV64I-NEXT: ret 1287; 1288; RV64IM-LABEL: muli64_m3840: 1289; RV64IM: # %bb.0: 1290; RV64IM-NEXT: slli a1, a0, 12 1291; RV64IM-NEXT: slli a0, a0, 8 1292; RV64IM-NEXT: sub a0, a0, a1 1293; RV64IM-NEXT: ret 1294 %1 = mul i64 %a, -3840 1295 ret i64 %1 1296} 1297 1298define i128 @muli128_m3840(i128 %a) nounwind { 1299; RV32I-LABEL: muli128_m3840: 1300; RV32I: # %bb.0: 1301; RV32I-NEXT: lw a3, 4(a1) 1302; RV32I-NEXT: lw a2, 8(a1) 1303; RV32I-NEXT: lw a5, 0(a1) 1304; RV32I-NEXT: lw a4, 12(a1) 1305; RV32I-NEXT: srli a1, a3, 20 1306; RV32I-NEXT: slli a6, a2, 12 1307; RV32I-NEXT: srli a7, a3, 24 1308; RV32I-NEXT: slli t0, a2, 8 1309; RV32I-NEXT: srli t1, a2, 20 1310; RV32I-NEXT: or a1, a6, a1 1311; RV32I-NEXT: slli a6, a4, 12 1312; RV32I-NEXT: srli t2, a2, 24 1313; RV32I-NEXT: slli a4, a4, 8 1314; RV32I-NEXT: or a2, t0, a7 1315; RV32I-NEXT: srli a7, a5, 20 1316; RV32I-NEXT: or a6, a6, t1 1317; RV32I-NEXT: slli t0, a3, 12 1318; RV32I-NEXT: or t1, a4, t2 1319; RV32I-NEXT: srli t2, a5, 24 1320; RV32I-NEXT: slli t3, a3, 8 1321; RV32I-NEXT: or a3, t0, a7 1322; RV32I-NEXT: slli a4, a5, 12 1323; RV32I-NEXT: slli a5, a5, 8 1324; RV32I-NEXT: or t0, t3, t2 1325; RV32I-NEXT: sltu t2, a2, a1 1326; RV32I-NEXT: sub a6, t1, a6 1327; RV32I-NEXT: sltu a7, a5, a4 1328; RV32I-NEXT: sub a6, a6, t2 1329; RV32I-NEXT: mv t1, a7 1330; RV32I-NEXT: beq t0, a3, .LBB36_2 1331; RV32I-NEXT: # %bb.1: 1332; RV32I-NEXT: sltu t1, t0, a3 1333; RV32I-NEXT: .LBB36_2: 1334; RV32I-NEXT: sub a2, a2, a1 1335; RV32I-NEXT: sub a1, t0, a3 1336; RV32I-NEXT: sub a5, a5, a4 1337; RV32I-NEXT: sltu a3, a2, t1 1338; RV32I-NEXT: sub a2, a2, t1 1339; RV32I-NEXT: sub a1, a1, a7 1340; RV32I-NEXT: sub a3, a6, a3 1341; RV32I-NEXT: sw a5, 0(a0) 1342; RV32I-NEXT: sw a1, 4(a0) 1343; RV32I-NEXT: sw a2, 8(a0) 1344; RV32I-NEXT: sw a3, 12(a0) 1345; RV32I-NEXT: ret 1346; 1347; RV32IM-LABEL: muli128_m3840: 1348; RV32IM: # %bb.0: 1349; RV32IM-NEXT: addi sp, sp, -16 1350; RV32IM-NEXT: sw s0, 12(sp) # 4-byte Folded Spill 1351; RV32IM-NEXT: lw a3, 0(a1) 1352; RV32IM-NEXT: lw a2, 4(a1) 1353; RV32IM-NEXT: lw a4, 8(a1) 1354; RV32IM-NEXT: lw a1, 12(a1) 1355; RV32IM-NEXT: li a5, -15 1356; RV32IM-NEXT: li a6, -1 1357; RV32IM-NEXT: slli a5, a5, 8 1358; RV32IM-NEXT: mulhu a7, a3, a5 1359; RV32IM-NEXT: mul t0, a2, a5 1360; RV32IM-NEXT: mulhu t1, a2, a5 1361; RV32IM-NEXT: neg t2, a3 1362; RV32IM-NEXT: mulhu t3, a3, a6 1363; RV32IM-NEXT: mul t4, a4, a5 1364; RV32IM-NEXT: neg t5, a2 1365; RV32IM-NEXT: mulhu a6, a2, a6 1366; RV32IM-NEXT: mul a1, a1, a5 1367; RV32IM-NEXT: mulhu t6, a4, a5 1368; RV32IM-NEXT: add s0, a3, a2 1369; RV32IM-NEXT: mul a5, a3, a5 1370; RV32IM-NEXT: add a7, t0, a7 1371; RV32IM-NEXT: sub t4, t4, a3 1372; RV32IM-NEXT: sub a4, t6, a4 1373; RV32IM-NEXT: sub t6, t3, s0 1374; RV32IM-NEXT: sltu t0, a7, t0 1375; RV32IM-NEXT: sub a3, a7, a3 1376; RV32IM-NEXT: sltu a7, t4, t2 1377; RV32IM-NEXT: add a1, a4, a1 1378; RV32IM-NEXT: add t0, t1, t0 1379; RV32IM-NEXT: sltu a4, a3, t2 1380; RV32IM-NEXT: add a1, t6, a1 1381; RV32IM-NEXT: add a4, t3, a4 1382; RV32IM-NEXT: add a1, a1, a7 1383; RV32IM-NEXT: add a4, t0, a4 1384; RV32IM-NEXT: sub a2, a4, a2 1385; RV32IM-NEXT: sltu a4, a4, t0 1386; RV32IM-NEXT: add t4, a2, t4 1387; RV32IM-NEXT: sltu a7, a2, t5 1388; RV32IM-NEXT: add a4, a6, a4 1389; RV32IM-NEXT: sltu a2, t4, a2 1390; RV32IM-NEXT: add a4, a4, a7 1391; RV32IM-NEXT: add a1, a4, a1 1392; RV32IM-NEXT: add a1, a1, a2 1393; RV32IM-NEXT: sw a5, 0(a0) 1394; RV32IM-NEXT: sw a3, 4(a0) 1395; RV32IM-NEXT: sw t4, 8(a0) 1396; RV32IM-NEXT: sw a1, 12(a0) 1397; RV32IM-NEXT: lw s0, 12(sp) # 4-byte Folded Reload 1398; RV32IM-NEXT: addi sp, sp, 16 1399; RV32IM-NEXT: ret 1400; 1401; RV64I-LABEL: muli128_m3840: 1402; RV64I: # %bb.0: 1403; RV64I-NEXT: srli a2, a0, 52 1404; RV64I-NEXT: slli a3, a1, 12 1405; RV64I-NEXT: or a2, a3, a2 1406; RV64I-NEXT: srli a3, a0, 56 1407; RV64I-NEXT: slli a1, a1, 8 1408; RV64I-NEXT: or a1, a1, a3 1409; RV64I-NEXT: slli a3, a0, 12 1410; RV64I-NEXT: slli a0, a0, 8 1411; RV64I-NEXT: sub a1, a1, a2 1412; RV64I-NEXT: sltu a2, a0, a3 1413; RV64I-NEXT: sub a1, a1, a2 1414; RV64I-NEXT: sub a0, a0, a3 1415; RV64I-NEXT: ret 1416; 1417; RV64IM-LABEL: muli128_m3840: 1418; RV64IM: # %bb.0: 1419; RV64IM-NEXT: li a2, -15 1420; RV64IM-NEXT: slli a2, a2, 8 1421; RV64IM-NEXT: mul a1, a1, a2 1422; RV64IM-NEXT: mulhu a3, a0, a2 1423; RV64IM-NEXT: sub a3, a3, a0 1424; RV64IM-NEXT: add a1, a3, a1 1425; RV64IM-NEXT: mul a0, a0, a2 1426; RV64IM-NEXT: ret 1427 %1 = mul i128 %a, -3840 1428 ret i128 %1 1429} 1430 1431define i128 @muli128_m63(i128 %a) nounwind { 1432; RV32I-LABEL: muli128_m63: 1433; RV32I: # %bb.0: 1434; RV32I-NEXT: lw a3, 0(a1) 1435; RV32I-NEXT: lw a4, 4(a1) 1436; RV32I-NEXT: lw a2, 8(a1) 1437; RV32I-NEXT: lw a1, 12(a1) 1438; RV32I-NEXT: slli a6, a3, 6 1439; RV32I-NEXT: srli a5, a3, 26 1440; RV32I-NEXT: slli t0, a4, 6 1441; RV32I-NEXT: sltu a7, a3, a6 1442; RV32I-NEXT: or t0, t0, a5 1443; RV32I-NEXT: mv a5, a7 1444; RV32I-NEXT: beq a4, t0, .LBB37_2 1445; RV32I-NEXT: # %bb.1: 1446; RV32I-NEXT: sltu a5, a4, t0 1447; RV32I-NEXT: .LBB37_2: 1448; RV32I-NEXT: srli t1, a4, 26 1449; RV32I-NEXT: slli t2, a2, 6 1450; RV32I-NEXT: srli t3, a2, 26 1451; RV32I-NEXT: slli t4, a1, 6 1452; RV32I-NEXT: sub a4, a4, t0 1453; RV32I-NEXT: sub a3, a3, a6 1454; RV32I-NEXT: or a6, t2, t1 1455; RV32I-NEXT: or t0, t4, t3 1456; RV32I-NEXT: sub a4, a4, a7 1457; RV32I-NEXT: sub a7, a2, a6 1458; RV32I-NEXT: sltu a2, a2, a6 1459; RV32I-NEXT: sub a1, a1, t0 1460; RV32I-NEXT: sltu a6, a7, a5 1461; RV32I-NEXT: sub a1, a1, a2 1462; RV32I-NEXT: sub a2, a7, a5 1463; RV32I-NEXT: sub a1, a1, a6 1464; RV32I-NEXT: sw a3, 0(a0) 1465; RV32I-NEXT: sw a4, 4(a0) 1466; RV32I-NEXT: sw a2, 8(a0) 1467; RV32I-NEXT: sw a1, 12(a0) 1468; RV32I-NEXT: ret 1469; 1470; RV32IM-LABEL: muli128_m63: 1471; RV32IM: # %bb.0: 1472; RV32IM-NEXT: addi sp, sp, -16 1473; RV32IM-NEXT: sw s0, 12(sp) # 4-byte Folded Spill 1474; RV32IM-NEXT: sw s1, 8(sp) # 4-byte Folded Spill 1475; RV32IM-NEXT: lw a3, 0(a1) 1476; RV32IM-NEXT: lw a2, 4(a1) 1477; RV32IM-NEXT: lw a4, 8(a1) 1478; RV32IM-NEXT: lw a1, 12(a1) 1479; RV32IM-NEXT: li a5, -63 1480; RV32IM-NEXT: li a6, -1 1481; RV32IM-NEXT: mulhu a7, a3, a5 1482; RV32IM-NEXT: slli t0, a2, 6 1483; RV32IM-NEXT: mulhu t1, a2, a5 1484; RV32IM-NEXT: neg t2, a3 1485; RV32IM-NEXT: mulhu t3, a3, a6 1486; RV32IM-NEXT: slli t4, a4, 6 1487; RV32IM-NEXT: sub t5, a4, a3 1488; RV32IM-NEXT: neg t6, a2 1489; RV32IM-NEXT: mulhu a6, a2, a6 1490; RV32IM-NEXT: slli s0, a1, 6 1491; RV32IM-NEXT: mulhu a5, a4, a5 1492; RV32IM-NEXT: add s1, a3, a2 1493; RV32IM-NEXT: sub t4, t5, t4 1494; RV32IM-NEXT: slli t5, a3, 6 1495; RV32IM-NEXT: sub t0, a2, t0 1496; RV32IM-NEXT: sub a1, a1, s0 1497; RV32IM-NEXT: sub a5, a5, a4 1498; RV32IM-NEXT: sub a4, t3, s1 1499; RV32IM-NEXT: sub t5, a3, t5 1500; RV32IM-NEXT: add a7, t0, a7 1501; RV32IM-NEXT: sltu s0, t4, t2 1502; RV32IM-NEXT: add a1, a5, a1 1503; RV32IM-NEXT: sltu a5, a7, t0 1504; RV32IM-NEXT: sub a3, a7, a3 1505; RV32IM-NEXT: add a1, a4, a1 1506; RV32IM-NEXT: add a5, t1, a5 1507; RV32IM-NEXT: sltu a4, a3, t2 1508; RV32IM-NEXT: add a1, a1, s0 1509; RV32IM-NEXT: add a4, t3, a4 1510; RV32IM-NEXT: add a4, a5, a4 1511; RV32IM-NEXT: sub a2, a4, a2 1512; RV32IM-NEXT: sltu a4, a4, a5 1513; RV32IM-NEXT: add t4, a2, t4 1514; RV32IM-NEXT: sltu a5, a2, t6 1515; RV32IM-NEXT: add a4, a6, a4 1516; RV32IM-NEXT: sltu a2, t4, a2 1517; RV32IM-NEXT: add a4, a4, a5 1518; RV32IM-NEXT: add a1, a4, a1 1519; RV32IM-NEXT: add a1, a1, a2 1520; RV32IM-NEXT: sw t5, 0(a0) 1521; RV32IM-NEXT: sw a3, 4(a0) 1522; RV32IM-NEXT: sw t4, 8(a0) 1523; RV32IM-NEXT: sw a1, 12(a0) 1524; RV32IM-NEXT: lw s0, 12(sp) # 4-byte Folded Reload 1525; RV32IM-NEXT: lw s1, 8(sp) # 4-byte Folded Reload 1526; RV32IM-NEXT: addi sp, sp, 16 1527; RV32IM-NEXT: ret 1528; 1529; RV64I-LABEL: muli128_m63: 1530; RV64I: # %bb.0: 1531; RV64I-NEXT: slli a2, a0, 6 1532; RV64I-NEXT: srli a3, a0, 58 1533; RV64I-NEXT: slli a4, a1, 6 1534; RV64I-NEXT: sltu a5, a0, a2 1535; RV64I-NEXT: or a3, a4, a3 1536; RV64I-NEXT: sub a1, a1, a3 1537; RV64I-NEXT: sub a1, a1, a5 1538; RV64I-NEXT: sub a0, a0, a2 1539; RV64I-NEXT: ret 1540; 1541; RV64IM-LABEL: muli128_m63: 1542; RV64IM: # %bb.0: 1543; RV64IM-NEXT: slli a2, a1, 6 1544; RV64IM-NEXT: sub a1, a1, a2 1545; RV64IM-NEXT: li a2, -63 1546; RV64IM-NEXT: mulhu a2, a0, a2 1547; RV64IM-NEXT: sub a2, a2, a0 1548; RV64IM-NEXT: add a1, a2, a1 1549; RV64IM-NEXT: slli a2, a0, 6 1550; RV64IM-NEXT: sub a0, a0, a2 1551; RV64IM-NEXT: ret 1552 %1 = mul i128 %a, -63 1553 ret i128 %1 1554} 1555 1556define i64 @mulhsu_i64(i64 %a, i64 %b) nounwind { 1557; RV32I-LABEL: mulhsu_i64: 1558; RV32I: # %bb.0: 1559; RV32I-NEXT: addi sp, sp, -48 1560; RV32I-NEXT: sw ra, 44(sp) # 4-byte Folded Spill 1561; RV32I-NEXT: sw s0, 40(sp) # 4-byte Folded Spill 1562; RV32I-NEXT: sw s1, 36(sp) # 4-byte Folded Spill 1563; RV32I-NEXT: sw s2, 32(sp) # 4-byte Folded Spill 1564; RV32I-NEXT: sw s3, 28(sp) # 4-byte Folded Spill 1565; RV32I-NEXT: sw s4, 24(sp) # 4-byte Folded Spill 1566; RV32I-NEXT: sw s5, 20(sp) # 4-byte Folded Spill 1567; RV32I-NEXT: sw s6, 16(sp) # 4-byte Folded Spill 1568; RV32I-NEXT: sw s7, 12(sp) # 4-byte Folded Spill 1569; RV32I-NEXT: sw s8, 8(sp) # 4-byte Folded Spill 1570; RV32I-NEXT: sw s9, 4(sp) # 4-byte Folded Spill 1571; RV32I-NEXT: mv s2, a3 1572; RV32I-NEXT: mv s3, a2 1573; RV32I-NEXT: mv s0, a1 1574; RV32I-NEXT: mv s1, a0 1575; RV32I-NEXT: srai s4, a3, 31 1576; RV32I-NEXT: li a1, 0 1577; RV32I-NEXT: li a3, 0 1578; RV32I-NEXT: call __muldi3 1579; RV32I-NEXT: mv s5, a1 1580; RV32I-NEXT: mv a0, s0 1581; RV32I-NEXT: li a1, 0 1582; RV32I-NEXT: mv a2, s3 1583; RV32I-NEXT: li a3, 0 1584; RV32I-NEXT: call __muldi3 1585; RV32I-NEXT: add s5, a0, s5 1586; RV32I-NEXT: sltu a0, s5, a0 1587; RV32I-NEXT: add s7, a1, a0 1588; RV32I-NEXT: mv a0, s1 1589; RV32I-NEXT: li a1, 0 1590; RV32I-NEXT: mv a2, s2 1591; RV32I-NEXT: li a3, 0 1592; RV32I-NEXT: call __muldi3 1593; RV32I-NEXT: add s5, a0, s5 1594; RV32I-NEXT: sltu a0, s5, a0 1595; RV32I-NEXT: add a0, a1, a0 1596; RV32I-NEXT: add s8, s7, a0 1597; RV32I-NEXT: mv a0, s0 1598; RV32I-NEXT: li a1, 0 1599; RV32I-NEXT: mv a2, s2 1600; RV32I-NEXT: li a3, 0 1601; RV32I-NEXT: call __muldi3 1602; RV32I-NEXT: mv s5, a0 1603; RV32I-NEXT: mv s6, a1 1604; RV32I-NEXT: add s9, a0, s8 1605; RV32I-NEXT: mv a0, s3 1606; RV32I-NEXT: mv a1, s2 1607; RV32I-NEXT: li a2, 0 1608; RV32I-NEXT: li a3, 0 1609; RV32I-NEXT: call __muldi3 1610; RV32I-NEXT: mv s2, a0 1611; RV32I-NEXT: mv s3, a1 1612; RV32I-NEXT: mv a0, s4 1613; RV32I-NEXT: mv a1, s4 1614; RV32I-NEXT: mv a2, s1 1615; RV32I-NEXT: mv a3, s0 1616; RV32I-NEXT: call __muldi3 1617; RV32I-NEXT: add s2, a0, s2 1618; RV32I-NEXT: sltu a3, s9, s5 1619; RV32I-NEXT: sltu a4, s8, s7 1620; RV32I-NEXT: add a1, a1, s3 1621; RV32I-NEXT: add a2, s9, s2 1622; RV32I-NEXT: add a4, s6, a4 1623; RV32I-NEXT: sltu a0, s2, a0 1624; RV32I-NEXT: sltu a5, a2, s9 1625; RV32I-NEXT: add a3, a4, a3 1626; RV32I-NEXT: add a0, a1, a0 1627; RV32I-NEXT: add a0, a3, a0 1628; RV32I-NEXT: add a1, a0, a5 1629; RV32I-NEXT: mv a0, a2 1630; RV32I-NEXT: lw ra, 44(sp) # 4-byte Folded Reload 1631; RV32I-NEXT: lw s0, 40(sp) # 4-byte Folded Reload 1632; RV32I-NEXT: lw s1, 36(sp) # 4-byte Folded Reload 1633; RV32I-NEXT: lw s2, 32(sp) # 4-byte Folded Reload 1634; RV32I-NEXT: lw s3, 28(sp) # 4-byte Folded Reload 1635; RV32I-NEXT: lw s4, 24(sp) # 4-byte Folded Reload 1636; RV32I-NEXT: lw s5, 20(sp) # 4-byte Folded Reload 1637; RV32I-NEXT: lw s6, 16(sp) # 4-byte Folded Reload 1638; RV32I-NEXT: lw s7, 12(sp) # 4-byte Folded Reload 1639; RV32I-NEXT: lw s8, 8(sp) # 4-byte Folded Reload 1640; RV32I-NEXT: lw s9, 4(sp) # 4-byte Folded Reload 1641; RV32I-NEXT: addi sp, sp, 48 1642; RV32I-NEXT: ret 1643; 1644; RV32IM-LABEL: mulhsu_i64: 1645; RV32IM: # %bb.0: 1646; RV32IM-NEXT: srai a4, a3, 31 1647; RV32IM-NEXT: mulhu a5, a0, a2 1648; RV32IM-NEXT: mul a6, a1, a2 1649; RV32IM-NEXT: mulhu a2, a1, a2 1650; RV32IM-NEXT: mul a7, a0, a3 1651; RV32IM-NEXT: mulhu t0, a0, a3 1652; RV32IM-NEXT: mul t1, a1, a3 1653; RV32IM-NEXT: mulhu a3, a1, a3 1654; RV32IM-NEXT: add a5, a6, a5 1655; RV32IM-NEXT: mul t2, a4, a0 1656; RV32IM-NEXT: mul a1, a4, a1 1657; RV32IM-NEXT: mulhu a0, a4, a0 1658; RV32IM-NEXT: sltu a4, a5, a6 1659; RV32IM-NEXT: add a5, a7, a5 1660; RV32IM-NEXT: add a1, a0, a1 1661; RV32IM-NEXT: add a2, a2, a4 1662; RV32IM-NEXT: sltu a0, a5, a7 1663; RV32IM-NEXT: add a0, t0, a0 1664; RV32IM-NEXT: add a0, a2, a0 1665; RV32IM-NEXT: add a4, t1, a0 1666; RV32IM-NEXT: sltu a2, a0, a2 1667; RV32IM-NEXT: add a0, a4, t2 1668; RV32IM-NEXT: sltu a5, a4, t1 1669; RV32IM-NEXT: add a2, a3, a2 1670; RV32IM-NEXT: sltu a3, a0, a4 1671; RV32IM-NEXT: add a2, a2, a5 1672; RV32IM-NEXT: add a1, a1, t2 1673; RV32IM-NEXT: add a1, a2, a1 1674; RV32IM-NEXT: add a1, a1, a3 1675; RV32IM-NEXT: ret 1676; 1677; RV64I-LABEL: mulhsu_i64: 1678; RV64I: # %bb.0: 1679; RV64I-NEXT: addi sp, sp, -16 1680; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 1681; RV64I-NEXT: mv a2, a1 1682; RV64I-NEXT: srai a3, a1, 63 1683; RV64I-NEXT: li a1, 0 1684; RV64I-NEXT: call __multi3 1685; RV64I-NEXT: mv a0, a1 1686; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 1687; RV64I-NEXT: addi sp, sp, 16 1688; RV64I-NEXT: ret 1689; 1690; RV64IM-LABEL: mulhsu_i64: 1691; RV64IM: # %bb.0: 1692; RV64IM-NEXT: mulhsu a0, a1, a0 1693; RV64IM-NEXT: ret 1694 %1 = zext i64 %a to i128 1695 %2 = sext i64 %b to i128 1696 %3 = mul i128 %1, %2 1697 %4 = lshr i128 %3, 64 1698 %5 = trunc i128 %4 to i64 1699 ret i64 %5 1700} 1701 1702define i8 @muladd_demand(i8 %x, i8 %y) nounwind { 1703; RV32I-LABEL: muladd_demand: 1704; RV32I: # %bb.0: 1705; RV32I-NEXT: slli a0, a0, 1 1706; RV32I-NEXT: sub a0, a1, a0 1707; RV32I-NEXT: andi a0, a0, 15 1708; RV32I-NEXT: ret 1709; 1710; RV32IM-LABEL: muladd_demand: 1711; RV32IM: # %bb.0: 1712; RV32IM-NEXT: slli a0, a0, 1 1713; RV32IM-NEXT: sub a0, a1, a0 1714; RV32IM-NEXT: andi a0, a0, 15 1715; RV32IM-NEXT: ret 1716; 1717; RV64I-LABEL: muladd_demand: 1718; RV64I: # %bb.0: 1719; RV64I-NEXT: slli a0, a0, 1 1720; RV64I-NEXT: subw a0, a1, a0 1721; RV64I-NEXT: andi a0, a0, 15 1722; RV64I-NEXT: ret 1723; 1724; RV64IM-LABEL: muladd_demand: 1725; RV64IM: # %bb.0: 1726; RV64IM-NEXT: slli a0, a0, 1 1727; RV64IM-NEXT: subw a0, a1, a0 1728; RV64IM-NEXT: andi a0, a0, 15 1729; RV64IM-NEXT: ret 1730 %m = mul i8 %x, 14 1731 %a = add i8 %y, %m 1732 %r = and i8 %a, 15 1733 ret i8 %r 1734} 1735 1736define i8 @mulsub_demand(i8 %x, i8 %y) nounwind { 1737; RV32I-LABEL: mulsub_demand: 1738; RV32I: # %bb.0: 1739; RV32I-NEXT: slli a0, a0, 1 1740; RV32I-NEXT: add a0, a1, a0 1741; RV32I-NEXT: andi a0, a0, 15 1742; RV32I-NEXT: ret 1743; 1744; RV32IM-LABEL: mulsub_demand: 1745; RV32IM: # %bb.0: 1746; RV32IM-NEXT: slli a0, a0, 1 1747; RV32IM-NEXT: add a0, a1, a0 1748; RV32IM-NEXT: andi a0, a0, 15 1749; RV32IM-NEXT: ret 1750; 1751; RV64I-LABEL: mulsub_demand: 1752; RV64I: # %bb.0: 1753; RV64I-NEXT: slli a0, a0, 1 1754; RV64I-NEXT: add a0, a1, a0 1755; RV64I-NEXT: andi a0, a0, 15 1756; RV64I-NEXT: ret 1757; 1758; RV64IM-LABEL: mulsub_demand: 1759; RV64IM: # %bb.0: 1760; RV64IM-NEXT: slli a0, a0, 1 1761; RV64IM-NEXT: add a0, a1, a0 1762; RV64IM-NEXT: andi a0, a0, 15 1763; RV64IM-NEXT: ret 1764 %m = mul i8 %x, 14 1765 %a = sub i8 %y, %m 1766 %r = and i8 %a, 15 1767 ret i8 %r 1768} 1769 1770define i8 @muladd_demand_2(i8 %x, i8 %y) nounwind { 1771; RV32I-LABEL: muladd_demand_2: 1772; RV32I: # %bb.0: 1773; RV32I-NEXT: slli a0, a0, 1 1774; RV32I-NEXT: sub a1, a1, a0 1775; RV32I-NEXT: ori a0, a1, -16 1776; RV32I-NEXT: ret 1777; 1778; RV32IM-LABEL: muladd_demand_2: 1779; RV32IM: # %bb.0: 1780; RV32IM-NEXT: slli a0, a0, 1 1781; RV32IM-NEXT: sub a1, a1, a0 1782; RV32IM-NEXT: ori a0, a1, -16 1783; RV32IM-NEXT: ret 1784; 1785; RV64I-LABEL: muladd_demand_2: 1786; RV64I: # %bb.0: 1787; RV64I-NEXT: slli a0, a0, 1 1788; RV64I-NEXT: subw a1, a1, a0 1789; RV64I-NEXT: ori a0, a1, -16 1790; RV64I-NEXT: ret 1791; 1792; RV64IM-LABEL: muladd_demand_2: 1793; RV64IM: # %bb.0: 1794; RV64IM-NEXT: slli a0, a0, 1 1795; RV64IM-NEXT: subw a1, a1, a0 1796; RV64IM-NEXT: ori a0, a1, -16 1797; RV64IM-NEXT: ret 1798 %m = mul i8 %x, 14 1799 %a = add i8 %y, %m 1800 %r = or i8 %a, 240 1801 ret i8 %r 1802} 1803 1804define i8 @mulsub_demand_2(i8 %x, i8 %y) nounwind { 1805; RV32I-LABEL: mulsub_demand_2: 1806; RV32I: # %bb.0: 1807; RV32I-NEXT: slli a0, a0, 1 1808; RV32I-NEXT: add a0, a1, a0 1809; RV32I-NEXT: ori a0, a0, -16 1810; RV32I-NEXT: ret 1811; 1812; RV32IM-LABEL: mulsub_demand_2: 1813; RV32IM: # %bb.0: 1814; RV32IM-NEXT: slli a0, a0, 1 1815; RV32IM-NEXT: add a0, a1, a0 1816; RV32IM-NEXT: ori a0, a0, -16 1817; RV32IM-NEXT: ret 1818; 1819; RV64I-LABEL: mulsub_demand_2: 1820; RV64I: # %bb.0: 1821; RV64I-NEXT: slli a0, a0, 1 1822; RV64I-NEXT: add a0, a1, a0 1823; RV64I-NEXT: ori a0, a0, -16 1824; RV64I-NEXT: ret 1825; 1826; RV64IM-LABEL: mulsub_demand_2: 1827; RV64IM: # %bb.0: 1828; RV64IM-NEXT: slli a0, a0, 1 1829; RV64IM-NEXT: add a0, a1, a0 1830; RV64IM-NEXT: ori a0, a0, -16 1831; RV64IM-NEXT: ret 1832 %m = mul i8 %x, 14 1833 %a = sub i8 %y, %m 1834 %r = or i8 %a, 240 1835 ret i8 %r 1836} 1837 1838define i64 @muland_demand(i64 %x) nounwind { 1839; RV32I-LABEL: muland_demand: 1840; RV32I: # %bb.0: 1841; RV32I-NEXT: addi sp, sp, -16 1842; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 1843; RV32I-NEXT: andi a0, a0, -8 1844; RV32I-NEXT: slli a1, a1, 2 1845; RV32I-NEXT: srli a1, a1, 2 1846; RV32I-NEXT: li a2, 12 1847; RV32I-NEXT: li a3, 0 1848; RV32I-NEXT: call __muldi3 1849; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 1850; RV32I-NEXT: addi sp, sp, 16 1851; RV32I-NEXT: ret 1852; 1853; RV32IM-LABEL: muland_demand: 1854; RV32IM: # %bb.0: 1855; RV32IM-NEXT: andi a0, a0, -8 1856; RV32IM-NEXT: slli a2, a1, 2 1857; RV32IM-NEXT: slli a1, a1, 4 1858; RV32IM-NEXT: sub a1, a1, a2 1859; RV32IM-NEXT: li a2, 12 1860; RV32IM-NEXT: mulhu a2, a0, a2 1861; RV32IM-NEXT: add a1, a2, a1 1862; RV32IM-NEXT: slli a2, a0, 2 1863; RV32IM-NEXT: slli a0, a0, 4 1864; RV32IM-NEXT: sub a0, a0, a2 1865; RV32IM-NEXT: ret 1866; 1867; RV64I-LABEL: muland_demand: 1868; RV64I: # %bb.0: 1869; RV64I-NEXT: li a1, -29 1870; RV64I-NEXT: srli a1, a1, 2 1871; RV64I-NEXT: and a0, a0, a1 1872; RV64I-NEXT: li a1, 12 1873; RV64I-NEXT: tail __muldi3 1874; 1875; RV64IM-LABEL: muland_demand: 1876; RV64IM: # %bb.0: 1877; RV64IM-NEXT: andi a0, a0, -8 1878; RV64IM-NEXT: slli a1, a0, 2 1879; RV64IM-NEXT: slli a0, a0, 4 1880; RV64IM-NEXT: sub a0, a0, a1 1881; RV64IM-NEXT: ret 1882 %and = and i64 %x, 4611686018427387896 1883 %mul = mul i64 %and, 12 1884 ret i64 %mul 1885} 1886 1887define i64 @mulzext_demand(i32 signext %x) nounwind { 1888; RV32I-LABEL: mulzext_demand: 1889; RV32I: # %bb.0: 1890; RV32I-NEXT: addi sp, sp, -16 1891; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 1892; RV32I-NEXT: li a3, 3 1893; RV32I-NEXT: li a2, 0 1894; RV32I-NEXT: call __muldi3 1895; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 1896; RV32I-NEXT: addi sp, sp, 16 1897; RV32I-NEXT: ret 1898; 1899; RV32IM-LABEL: mulzext_demand: 1900; RV32IM: # %bb.0: 1901; RV32IM-NEXT: slli a1, a0, 1 1902; RV32IM-NEXT: add a1, a1, a0 1903; RV32IM-NEXT: li a0, 0 1904; RV32IM-NEXT: ret 1905; 1906; RV64I-LABEL: mulzext_demand: 1907; RV64I: # %bb.0: 1908; RV64I-NEXT: li a1, 3 1909; RV64I-NEXT: slli a1, a1, 32 1910; RV64I-NEXT: tail __muldi3 1911; 1912; RV64IM-LABEL: mulzext_demand: 1913; RV64IM: # %bb.0: 1914; RV64IM-NEXT: slli a1, a0, 32 1915; RV64IM-NEXT: slli a0, a0, 34 1916; RV64IM-NEXT: sub a0, a0, a1 1917; RV64IM-NEXT: ret 1918 %ext = zext i32 %x to i64 1919 %mul = mul i64 %ext, 12884901888 1920 ret i64 %mul 1921} 1922 1923define i32 @mulfshl_demand(i32 signext %x) nounwind { 1924; RV32I-LABEL: mulfshl_demand: 1925; RV32I: # %bb.0: 1926; RV32I-NEXT: srli a0, a0, 11 1927; RV32I-NEXT: lui a1, 92808 1928; RV32I-NEXT: tail __mulsi3 1929; 1930; RV32IM-LABEL: mulfshl_demand: 1931; RV32IM: # %bb.0: 1932; RV32IM-NEXT: srli a0, a0, 11 1933; RV32IM-NEXT: lui a1, 92808 1934; RV32IM-NEXT: mul a0, a0, a1 1935; RV32IM-NEXT: ret 1936; 1937; RV64I-LABEL: mulfshl_demand: 1938; RV64I: # %bb.0: 1939; RV64I-NEXT: addi sp, sp, -16 1940; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 1941; RV64I-NEXT: srliw a0, a0, 11 1942; RV64I-NEXT: lui a1, 92808 1943; RV64I-NEXT: call __muldi3 1944; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 1945; RV64I-NEXT: addi sp, sp, 16 1946; RV64I-NEXT: ret 1947; 1948; RV64IM-LABEL: mulfshl_demand: 1949; RV64IM: # %bb.0: 1950; RV64IM-NEXT: srliw a0, a0, 11 1951; RV64IM-NEXT: lui a1, 92808 1952; RV64IM-NEXT: mulw a0, a0, a1 1953; RV64IM-NEXT: ret 1954 %fshl = tail call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 21) 1955 %mul = mul i32 %fshl, 380141568 1956 ret i32 %mul 1957} 1958 1959define i32 @mulor_demand(i32 signext %x, i32 signext %y) nounwind { 1960; RV32I-LABEL: mulor_demand: 1961; RV32I: # %bb.0: 1962; RV32I-NEXT: lui a1, 92808 1963; RV32I-NEXT: tail __mulsi3 1964; 1965; RV32IM-LABEL: mulor_demand: 1966; RV32IM: # %bb.0: 1967; RV32IM-NEXT: lui a1, 92808 1968; RV32IM-NEXT: mul a0, a0, a1 1969; RV32IM-NEXT: ret 1970; 1971; RV64I-LABEL: mulor_demand: 1972; RV64I: # %bb.0: 1973; RV64I-NEXT: addi sp, sp, -16 1974; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 1975; RV64I-NEXT: lui a1, 92808 1976; RV64I-NEXT: call __muldi3 1977; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 1978; RV64I-NEXT: addi sp, sp, 16 1979; RV64I-NEXT: ret 1980; 1981; RV64IM-LABEL: mulor_demand: 1982; RV64IM: # %bb.0: 1983; RV64IM-NEXT: lui a1, 92808 1984; RV64IM-NEXT: mulw a0, a0, a1 1985; RV64IM-NEXT: ret 1986 %mul1 = mul i32 %y, 10485760 1987 %or = or disjoint i32 %mul1, %x 1988 %mul2 = mul i32 %or, 380141568 1989 ret i32 %mul2 1990} 1991