1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 2; RUN: llc -mtriple=riscv32 < %s | FileCheck %s -check-prefix=RV32I 3; RUN: llc -mtriple=riscv64 < %s | FileCheck %s -check-prefix=RV64I 4; RUN: llc -mtriple=riscv64 -mattr=+xventanacondops < %s | FileCheck %s -check-prefix=RV64XVENTANACONDOPS 5; RUN: llc -mtriple=riscv64 -mattr=+xtheadcondmov < %s | FileCheck %s -check-prefix=RV64XTHEADCONDMOV 6; RUN: llc -mtriple=riscv32 -mattr=+experimental-zicond < %s | FileCheck %s -check-prefix=RV32ZICOND 7; RUN: llc -mtriple=riscv64 -mattr=+experimental-zicond < %s | FileCheck %s -check-prefix=RV64ZICOND 8 9define i32 @shl32(i32 %x, i32 %y, i1 %c) { 10; RV32I-LABEL: shl32: 11; RV32I: # %bb.0: 12; RV32I-NEXT: andi a2, a2, 1 13; RV32I-NEXT: beqz a2, .LBB0_2 14; RV32I-NEXT: # %bb.1: 15; RV32I-NEXT: sll a0, a0, a1 16; RV32I-NEXT: .LBB0_2: 17; RV32I-NEXT: ret 18; 19; RV64I-LABEL: shl32: 20; RV64I: # %bb.0: 21; RV64I-NEXT: andi a2, a2, 1 22; RV64I-NEXT: beqz a2, .LBB0_2 23; RV64I-NEXT: # %bb.1: 24; RV64I-NEXT: sllw a0, a0, a1 25; RV64I-NEXT: .LBB0_2: 26; RV64I-NEXT: ret 27; 28; RV64XVENTANACONDOPS-LABEL: shl32: 29; RV64XVENTANACONDOPS: # %bb.0: 30; RV64XVENTANACONDOPS-NEXT: andi a2, a2, 1 31; RV64XVENTANACONDOPS-NEXT: sllw a1, a0, a1 32; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a0, a2 33; RV64XVENTANACONDOPS-NEXT: vt.maskc a1, a1, a2 34; RV64XVENTANACONDOPS-NEXT: or a0, a1, a0 35; RV64XVENTANACONDOPS-NEXT: ret 36; 37; RV64XTHEADCONDMOV-LABEL: shl32: 38; RV64XTHEADCONDMOV: # %bb.0: 39; RV64XTHEADCONDMOV-NEXT: andi a2, a2, 1 40; RV64XTHEADCONDMOV-NEXT: sllw a1, a0, a1 41; RV64XTHEADCONDMOV-NEXT: th.mvnez a0, a1, a2 42; RV64XTHEADCONDMOV-NEXT: ret 43; 44; RV32ZICOND-LABEL: shl32: 45; RV32ZICOND: # %bb.0: 46; RV32ZICOND-NEXT: andi a2, a2, 1 47; RV32ZICOND-NEXT: sll a1, a0, a1 48; RV32ZICOND-NEXT: czero.nez a0, a0, a2 49; RV32ZICOND-NEXT: czero.eqz a1, a1, a2 50; RV32ZICOND-NEXT: or a0, a1, a0 51; RV32ZICOND-NEXT: ret 52; 53; RV64ZICOND-LABEL: shl32: 54; RV64ZICOND: # %bb.0: 55; RV64ZICOND-NEXT: andi a2, a2, 1 56; RV64ZICOND-NEXT: sllw a1, a0, a1 57; RV64ZICOND-NEXT: czero.nez a0, a0, a2 58; RV64ZICOND-NEXT: czero.eqz a1, a1, a2 59; RV64ZICOND-NEXT: or a0, a1, a0 60; RV64ZICOND-NEXT: ret 61 %binop = shl i32 %x, %y 62 %select_ = select i1 %c, i32 %binop, i32 %x 63 ret i32 %select_ 64} 65 66define i32 @ashr32(i32 %x, i32 %y, i1 %c) { 67; RV32I-LABEL: ashr32: 68; RV32I: # %bb.0: 69; RV32I-NEXT: andi a2, a2, 1 70; RV32I-NEXT: beqz a2, .LBB1_2 71; RV32I-NEXT: # %bb.1: 72; RV32I-NEXT: sra a0, a0, a1 73; RV32I-NEXT: .LBB1_2: 74; RV32I-NEXT: ret 75; 76; RV64I-LABEL: ashr32: 77; RV64I: # %bb.0: 78; RV64I-NEXT: andi a2, a2, 1 79; RV64I-NEXT: beqz a2, .LBB1_2 80; RV64I-NEXT: # %bb.1: 81; RV64I-NEXT: sraw a0, a0, a1 82; RV64I-NEXT: .LBB1_2: 83; RV64I-NEXT: ret 84; 85; RV64XVENTANACONDOPS-LABEL: ashr32: 86; RV64XVENTANACONDOPS: # %bb.0: 87; RV64XVENTANACONDOPS-NEXT: andi a2, a2, 1 88; RV64XVENTANACONDOPS-NEXT: sraw a1, a0, a1 89; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a0, a2 90; RV64XVENTANACONDOPS-NEXT: vt.maskc a1, a1, a2 91; RV64XVENTANACONDOPS-NEXT: or a0, a1, a0 92; RV64XVENTANACONDOPS-NEXT: ret 93; 94; RV64XTHEADCONDMOV-LABEL: ashr32: 95; RV64XTHEADCONDMOV: # %bb.0: 96; RV64XTHEADCONDMOV-NEXT: andi a2, a2, 1 97; RV64XTHEADCONDMOV-NEXT: sraw a1, a0, a1 98; RV64XTHEADCONDMOV-NEXT: th.mvnez a0, a1, a2 99; RV64XTHEADCONDMOV-NEXT: ret 100; 101; RV32ZICOND-LABEL: ashr32: 102; RV32ZICOND: # %bb.0: 103; RV32ZICOND-NEXT: andi a2, a2, 1 104; RV32ZICOND-NEXT: sra a1, a0, a1 105; RV32ZICOND-NEXT: czero.nez a0, a0, a2 106; RV32ZICOND-NEXT: czero.eqz a1, a1, a2 107; RV32ZICOND-NEXT: or a0, a1, a0 108; RV32ZICOND-NEXT: ret 109; 110; RV64ZICOND-LABEL: ashr32: 111; RV64ZICOND: # %bb.0: 112; RV64ZICOND-NEXT: andi a2, a2, 1 113; RV64ZICOND-NEXT: sraw a1, a0, a1 114; RV64ZICOND-NEXT: czero.nez a0, a0, a2 115; RV64ZICOND-NEXT: czero.eqz a1, a1, a2 116; RV64ZICOND-NEXT: or a0, a1, a0 117; RV64ZICOND-NEXT: ret 118 %binop = ashr i32 %x, %y 119 %select_ = select i1 %c, i32 %binop, i32 %x 120 ret i32 %select_ 121} 122 123define i32 @lshr32(i32 %x, i32 %y, i1 %c) { 124; RV32I-LABEL: lshr32: 125; RV32I: # %bb.0: 126; RV32I-NEXT: andi a2, a2, 1 127; RV32I-NEXT: beqz a2, .LBB2_2 128; RV32I-NEXT: # %bb.1: 129; RV32I-NEXT: srl a0, a0, a1 130; RV32I-NEXT: .LBB2_2: 131; RV32I-NEXT: ret 132; 133; RV64I-LABEL: lshr32: 134; RV64I: # %bb.0: 135; RV64I-NEXT: andi a2, a2, 1 136; RV64I-NEXT: beqz a2, .LBB2_2 137; RV64I-NEXT: # %bb.1: 138; RV64I-NEXT: srlw a0, a0, a1 139; RV64I-NEXT: .LBB2_2: 140; RV64I-NEXT: ret 141; 142; RV64XVENTANACONDOPS-LABEL: lshr32: 143; RV64XVENTANACONDOPS: # %bb.0: 144; RV64XVENTANACONDOPS-NEXT: andi a2, a2, 1 145; RV64XVENTANACONDOPS-NEXT: srlw a1, a0, a1 146; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a0, a2 147; RV64XVENTANACONDOPS-NEXT: vt.maskc a1, a1, a2 148; RV64XVENTANACONDOPS-NEXT: or a0, a1, a0 149; RV64XVENTANACONDOPS-NEXT: ret 150; 151; RV64XTHEADCONDMOV-LABEL: lshr32: 152; RV64XTHEADCONDMOV: # %bb.0: 153; RV64XTHEADCONDMOV-NEXT: andi a2, a2, 1 154; RV64XTHEADCONDMOV-NEXT: srlw a1, a0, a1 155; RV64XTHEADCONDMOV-NEXT: th.mvnez a0, a1, a2 156; RV64XTHEADCONDMOV-NEXT: ret 157; 158; RV32ZICOND-LABEL: lshr32: 159; RV32ZICOND: # %bb.0: 160; RV32ZICOND-NEXT: andi a2, a2, 1 161; RV32ZICOND-NEXT: srl a1, a0, a1 162; RV32ZICOND-NEXT: czero.nez a0, a0, a2 163; RV32ZICOND-NEXT: czero.eqz a1, a1, a2 164; RV32ZICOND-NEXT: or a0, a1, a0 165; RV32ZICOND-NEXT: ret 166; 167; RV64ZICOND-LABEL: lshr32: 168; RV64ZICOND: # %bb.0: 169; RV64ZICOND-NEXT: andi a2, a2, 1 170; RV64ZICOND-NEXT: srlw a1, a0, a1 171; RV64ZICOND-NEXT: czero.nez a0, a0, a2 172; RV64ZICOND-NEXT: czero.eqz a1, a1, a2 173; RV64ZICOND-NEXT: or a0, a1, a0 174; RV64ZICOND-NEXT: ret 175 %binop = lshr i32 %x, %y 176 %select_ = select i1 %c, i32 %binop, i32 %x 177 ret i32 %select_ 178} 179 180define i32 @sub32(i32 %x, i32 %y, i1 %c) { 181; RV32I-LABEL: sub32: 182; RV32I: # %bb.0: 183; RV32I-NEXT: slli a2, a2, 31 184; RV32I-NEXT: srai a2, a2, 31 185; RV32I-NEXT: and a1, a2, a1 186; RV32I-NEXT: sub a0, a0, a1 187; RV32I-NEXT: ret 188; 189; RV64I-LABEL: sub32: 190; RV64I: # %bb.0: 191; RV64I-NEXT: slli a2, a2, 63 192; RV64I-NEXT: srai a2, a2, 63 193; RV64I-NEXT: and a1, a2, a1 194; RV64I-NEXT: subw a0, a0, a1 195; RV64I-NEXT: ret 196; 197; RV64XVENTANACONDOPS-LABEL: sub32: 198; RV64XVENTANACONDOPS: # %bb.0: 199; RV64XVENTANACONDOPS-NEXT: andi a2, a2, 1 200; RV64XVENTANACONDOPS-NEXT: vt.maskc a1, a1, a2 201; RV64XVENTANACONDOPS-NEXT: subw a0, a0, a1 202; RV64XVENTANACONDOPS-NEXT: ret 203; 204; RV64XTHEADCONDMOV-LABEL: sub32: 205; RV64XTHEADCONDMOV: # %bb.0: 206; RV64XTHEADCONDMOV-NEXT: andi a2, a2, 1 207; RV64XTHEADCONDMOV-NEXT: th.mveqz a1, zero, a2 208; RV64XTHEADCONDMOV-NEXT: subw a0, a0, a1 209; RV64XTHEADCONDMOV-NEXT: ret 210; 211; RV32ZICOND-LABEL: sub32: 212; RV32ZICOND: # %bb.0: 213; RV32ZICOND-NEXT: andi a2, a2, 1 214; RV32ZICOND-NEXT: czero.eqz a1, a1, a2 215; RV32ZICOND-NEXT: sub a0, a0, a1 216; RV32ZICOND-NEXT: ret 217; 218; RV64ZICOND-LABEL: sub32: 219; RV64ZICOND: # %bb.0: 220; RV64ZICOND-NEXT: andi a2, a2, 1 221; RV64ZICOND-NEXT: czero.eqz a1, a1, a2 222; RV64ZICOND-NEXT: subw a0, a0, a1 223; RV64ZICOND-NEXT: ret 224 %binop = sub i32 %x, %y 225 %select_ = select i1 %c, i32 %binop, i32 %x 226 ret i32 %select_ 227} 228 229define i32 @and32(i32 %x, i32 %y, i1 %c) { 230; RV32I-LABEL: and32: 231; RV32I: # %bb.0: 232; RV32I-NEXT: andi a2, a2, 1 233; RV32I-NEXT: beqz a2, .LBB4_2 234; RV32I-NEXT: # %bb.1: 235; RV32I-NEXT: and a0, a0, a1 236; RV32I-NEXT: .LBB4_2: 237; RV32I-NEXT: ret 238; 239; RV64I-LABEL: and32: 240; RV64I: # %bb.0: 241; RV64I-NEXT: andi a2, a2, 1 242; RV64I-NEXT: beqz a2, .LBB4_2 243; RV64I-NEXT: # %bb.1: 244; RV64I-NEXT: and a0, a0, a1 245; RV64I-NEXT: .LBB4_2: 246; RV64I-NEXT: ret 247; 248; RV64XVENTANACONDOPS-LABEL: and32: 249; RV64XVENTANACONDOPS: # %bb.0: 250; RV64XVENTANACONDOPS-NEXT: andi a2, a2, 1 251; RV64XVENTANACONDOPS-NEXT: and a1, a0, a1 252; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a0, a2 253; RV64XVENTANACONDOPS-NEXT: or a0, a1, a0 254; RV64XVENTANACONDOPS-NEXT: ret 255; 256; RV64XTHEADCONDMOV-LABEL: and32: 257; RV64XTHEADCONDMOV: # %bb.0: 258; RV64XTHEADCONDMOV-NEXT: andi a2, a2, 1 259; RV64XTHEADCONDMOV-NEXT: and a1, a0, a1 260; RV64XTHEADCONDMOV-NEXT: th.mvnez a0, a1, a2 261; RV64XTHEADCONDMOV-NEXT: ret 262; 263; RV32ZICOND-LABEL: and32: 264; RV32ZICOND: # %bb.0: 265; RV32ZICOND-NEXT: andi a2, a2, 1 266; RV32ZICOND-NEXT: and a1, a0, a1 267; RV32ZICOND-NEXT: czero.nez a0, a0, a2 268; RV32ZICOND-NEXT: or a0, a1, a0 269; RV32ZICOND-NEXT: ret 270; 271; RV64ZICOND-LABEL: and32: 272; RV64ZICOND: # %bb.0: 273; RV64ZICOND-NEXT: andi a2, a2, 1 274; RV64ZICOND-NEXT: and a1, a0, a1 275; RV64ZICOND-NEXT: czero.nez a0, a0, a2 276; RV64ZICOND-NEXT: or a0, a1, a0 277; RV64ZICOND-NEXT: ret 278 %binop = and i32 %x, %y 279 %select_ = select i1 %c, i32 %binop, i32 %x 280 ret i32 %select_ 281} 282 283 284define i32 @add32(i32 %x, i32 %y, i1 %c) { 285; RV32I-LABEL: add32: 286; RV32I: # %bb.0: 287; RV32I-NEXT: slli a2, a2, 31 288; RV32I-NEXT: srai a2, a2, 31 289; RV32I-NEXT: and a1, a2, a1 290; RV32I-NEXT: add a0, a0, a1 291; RV32I-NEXT: ret 292; 293; RV64I-LABEL: add32: 294; RV64I: # %bb.0: 295; RV64I-NEXT: slli a2, a2, 63 296; RV64I-NEXT: srai a2, a2, 63 297; RV64I-NEXT: and a1, a2, a1 298; RV64I-NEXT: addw a0, a0, a1 299; RV64I-NEXT: ret 300; 301; RV64XVENTANACONDOPS-LABEL: add32: 302; RV64XVENTANACONDOPS: # %bb.0: 303; RV64XVENTANACONDOPS-NEXT: andi a2, a2, 1 304; RV64XVENTANACONDOPS-NEXT: vt.maskc a1, a1, a2 305; RV64XVENTANACONDOPS-NEXT: addw a0, a0, a1 306; RV64XVENTANACONDOPS-NEXT: ret 307; 308; RV64XTHEADCONDMOV-LABEL: add32: 309; RV64XTHEADCONDMOV: # %bb.0: 310; RV64XTHEADCONDMOV-NEXT: andi a2, a2, 1 311; RV64XTHEADCONDMOV-NEXT: th.mveqz a1, zero, a2 312; RV64XTHEADCONDMOV-NEXT: addw a0, a0, a1 313; RV64XTHEADCONDMOV-NEXT: ret 314; 315; RV32ZICOND-LABEL: add32: 316; RV32ZICOND: # %bb.0: 317; RV32ZICOND-NEXT: andi a2, a2, 1 318; RV32ZICOND-NEXT: czero.eqz a1, a1, a2 319; RV32ZICOND-NEXT: add a0, a0, a1 320; RV32ZICOND-NEXT: ret 321; 322; RV64ZICOND-LABEL: add32: 323; RV64ZICOND: # %bb.0: 324; RV64ZICOND-NEXT: andi a2, a2, 1 325; RV64ZICOND-NEXT: czero.eqz a1, a1, a2 326; RV64ZICOND-NEXT: addw a0, a0, a1 327; RV64ZICOND-NEXT: ret 328 %binop = add i32 %x, %y 329 %select_ = select i1 %c, i32 %binop, i32 %x 330 ret i32 %select_ 331} 332 333 334define i32 @or32(i32 %x, i32 %y, i1 %c) { 335; RV32I-LABEL: or32: 336; RV32I: # %bb.0: 337; RV32I-NEXT: slli a2, a2, 31 338; RV32I-NEXT: srai a2, a2, 31 339; RV32I-NEXT: and a1, a2, a1 340; RV32I-NEXT: or a0, a0, a1 341; RV32I-NEXT: ret 342; 343; RV64I-LABEL: or32: 344; RV64I: # %bb.0: 345; RV64I-NEXT: slli a2, a2, 63 346; RV64I-NEXT: srai a2, a2, 63 347; RV64I-NEXT: and a1, a2, a1 348; RV64I-NEXT: or a0, a0, a1 349; RV64I-NEXT: ret 350; 351; RV64XVENTANACONDOPS-LABEL: or32: 352; RV64XVENTANACONDOPS: # %bb.0: 353; RV64XVENTANACONDOPS-NEXT: andi a2, a2, 1 354; RV64XVENTANACONDOPS-NEXT: vt.maskc a1, a1, a2 355; RV64XVENTANACONDOPS-NEXT: or a0, a0, a1 356; RV64XVENTANACONDOPS-NEXT: ret 357; 358; RV64XTHEADCONDMOV-LABEL: or32: 359; RV64XTHEADCONDMOV: # %bb.0: 360; RV64XTHEADCONDMOV-NEXT: andi a2, a2, 1 361; RV64XTHEADCONDMOV-NEXT: th.mveqz a1, zero, a2 362; RV64XTHEADCONDMOV-NEXT: or a0, a0, a1 363; RV64XTHEADCONDMOV-NEXT: ret 364; 365; RV32ZICOND-LABEL: or32: 366; RV32ZICOND: # %bb.0: 367; RV32ZICOND-NEXT: andi a2, a2, 1 368; RV32ZICOND-NEXT: czero.eqz a1, a1, a2 369; RV32ZICOND-NEXT: or a0, a0, a1 370; RV32ZICOND-NEXT: ret 371; 372; RV64ZICOND-LABEL: or32: 373; RV64ZICOND: # %bb.0: 374; RV64ZICOND-NEXT: andi a2, a2, 1 375; RV64ZICOND-NEXT: czero.eqz a1, a1, a2 376; RV64ZICOND-NEXT: or a0, a0, a1 377; RV64ZICOND-NEXT: ret 378 %binop = or i32 %x, %y 379 %select_ = select i1 %c, i32 %binop, i32 %x 380 ret i32 %select_ 381} 382 383define i32 @xor32(i32 %x, i32 %y, i1 %c) { 384; RV32I-LABEL: xor32: 385; RV32I: # %bb.0: 386; RV32I-NEXT: slli a2, a2, 31 387; RV32I-NEXT: srai a2, a2, 31 388; RV32I-NEXT: and a1, a2, a1 389; RV32I-NEXT: xor a0, a0, a1 390; RV32I-NEXT: ret 391; 392; RV64I-LABEL: xor32: 393; RV64I: # %bb.0: 394; RV64I-NEXT: slli a2, a2, 63 395; RV64I-NEXT: srai a2, a2, 63 396; RV64I-NEXT: and a1, a2, a1 397; RV64I-NEXT: xor a0, a0, a1 398; RV64I-NEXT: ret 399; 400; RV64XVENTANACONDOPS-LABEL: xor32: 401; RV64XVENTANACONDOPS: # %bb.0: 402; RV64XVENTANACONDOPS-NEXT: andi a2, a2, 1 403; RV64XVENTANACONDOPS-NEXT: vt.maskc a1, a1, a2 404; RV64XVENTANACONDOPS-NEXT: xor a0, a0, a1 405; RV64XVENTANACONDOPS-NEXT: ret 406; 407; RV64XTHEADCONDMOV-LABEL: xor32: 408; RV64XTHEADCONDMOV: # %bb.0: 409; RV64XTHEADCONDMOV-NEXT: andi a2, a2, 1 410; RV64XTHEADCONDMOV-NEXT: th.mveqz a1, zero, a2 411; RV64XTHEADCONDMOV-NEXT: xor a0, a0, a1 412; RV64XTHEADCONDMOV-NEXT: ret 413; 414; RV32ZICOND-LABEL: xor32: 415; RV32ZICOND: # %bb.0: 416; RV32ZICOND-NEXT: andi a2, a2, 1 417; RV32ZICOND-NEXT: czero.eqz a1, a1, a2 418; RV32ZICOND-NEXT: xor a0, a0, a1 419; RV32ZICOND-NEXT: ret 420; 421; RV64ZICOND-LABEL: xor32: 422; RV64ZICOND: # %bb.0: 423; RV64ZICOND-NEXT: andi a2, a2, 1 424; RV64ZICOND-NEXT: czero.eqz a1, a1, a2 425; RV64ZICOND-NEXT: xor a0, a0, a1 426; RV64ZICOND-NEXT: ret 427 %binop = xor i32 %x, %y 428 %select_ = select i1 %c, i32 %binop, i32 %x 429 ret i32 %select_ 430} 431 432define i64 @shl64(i64 %x, i64 %y, i1 %c) { 433; RV32I-LABEL: shl64: 434; RV32I: # %bb.0: 435; RV32I-NEXT: andi a4, a4, 1 436; RV32I-NEXT: addi a5, a2, -32 437; RV32I-NEXT: sll a3, a0, a2 438; RV32I-NEXT: bltz a5, .LBB8_3 439; RV32I-NEXT: # %bb.1: 440; RV32I-NEXT: mv a2, a3 441; RV32I-NEXT: beqz a4, .LBB8_4 442; RV32I-NEXT: .LBB8_2: 443; RV32I-NEXT: srai a0, a5, 31 444; RV32I-NEXT: and a0, a0, a3 445; RV32I-NEXT: mv a1, a2 446; RV32I-NEXT: ret 447; RV32I-NEXT: .LBB8_3: 448; RV32I-NEXT: sll a6, a1, a2 449; RV32I-NEXT: not a2, a2 450; RV32I-NEXT: srli a7, a0, 1 451; RV32I-NEXT: srl a2, a7, a2 452; RV32I-NEXT: or a2, a6, a2 453; RV32I-NEXT: bnez a4, .LBB8_2 454; RV32I-NEXT: .LBB8_4: 455; RV32I-NEXT: ret 456; 457; RV64I-LABEL: shl64: 458; RV64I: # %bb.0: 459; RV64I-NEXT: andi a2, a2, 1 460; RV64I-NEXT: beqz a2, .LBB8_2 461; RV64I-NEXT: # %bb.1: 462; RV64I-NEXT: sll a0, a0, a1 463; RV64I-NEXT: .LBB8_2: 464; RV64I-NEXT: ret 465; 466; RV64XVENTANACONDOPS-LABEL: shl64: 467; RV64XVENTANACONDOPS: # %bb.0: 468; RV64XVENTANACONDOPS-NEXT: andi a2, a2, 1 469; RV64XVENTANACONDOPS-NEXT: sll a1, a0, a1 470; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a0, a2 471; RV64XVENTANACONDOPS-NEXT: vt.maskc a1, a1, a2 472; RV64XVENTANACONDOPS-NEXT: or a0, a1, a0 473; RV64XVENTANACONDOPS-NEXT: ret 474; 475; RV64XTHEADCONDMOV-LABEL: shl64: 476; RV64XTHEADCONDMOV: # %bb.0: 477; RV64XTHEADCONDMOV-NEXT: andi a2, a2, 1 478; RV64XTHEADCONDMOV-NEXT: sll a1, a0, a1 479; RV64XTHEADCONDMOV-NEXT: th.mvnez a0, a1, a2 480; RV64XTHEADCONDMOV-NEXT: ret 481; 482; RV32ZICOND-LABEL: shl64: 483; RV32ZICOND: # %bb.0: 484; RV32ZICOND-NEXT: andi a4, a4, 1 485; RV32ZICOND-NEXT: sll a3, a1, a2 486; RV32ZICOND-NEXT: not a5, a2 487; RV32ZICOND-NEXT: srli a6, a0, 1 488; RV32ZICOND-NEXT: srl a5, a6, a5 489; RV32ZICOND-NEXT: or a3, a3, a5 490; RV32ZICOND-NEXT: addi a5, a2, -32 491; RV32ZICOND-NEXT: slti a5, a5, 0 492; RV32ZICOND-NEXT: czero.eqz a3, a3, a5 493; RV32ZICOND-NEXT: sll a2, a0, a2 494; RV32ZICOND-NEXT: czero.nez a6, a2, a5 495; RV32ZICOND-NEXT: or a3, a3, a6 496; RV32ZICOND-NEXT: czero.eqz a2, a2, a5 497; RV32ZICOND-NEXT: czero.nez a0, a0, a4 498; RV32ZICOND-NEXT: czero.eqz a2, a2, a4 499; RV32ZICOND-NEXT: or a0, a2, a0 500; RV32ZICOND-NEXT: czero.eqz a2, a3, a4 501; RV32ZICOND-NEXT: czero.nez a1, a1, a4 502; RV32ZICOND-NEXT: or a1, a2, a1 503; RV32ZICOND-NEXT: ret 504; 505; RV64ZICOND-LABEL: shl64: 506; RV64ZICOND: # %bb.0: 507; RV64ZICOND-NEXT: andi a2, a2, 1 508; RV64ZICOND-NEXT: sll a1, a0, a1 509; RV64ZICOND-NEXT: czero.nez a0, a0, a2 510; RV64ZICOND-NEXT: czero.eqz a1, a1, a2 511; RV64ZICOND-NEXT: or a0, a1, a0 512; RV64ZICOND-NEXT: ret 513 %binop = shl i64 %x, %y 514 %select_ = select i1 %c, i64 %binop, i64 %x 515 ret i64 %select_ 516} 517 518define i64 @ashr64(i64 %x, i64 %y, i1 %c) { 519; RV32I-LABEL: ashr64: 520; RV32I: # %bb.0: 521; RV32I-NEXT: andi a5, a4, 1 522; RV32I-NEXT: addi a3, a2, -32 523; RV32I-NEXT: sra a4, a1, a2 524; RV32I-NEXT: bltz a3, .LBB9_2 525; RV32I-NEXT: # %bb.1: 526; RV32I-NEXT: srai a2, a1, 31 527; RV32I-NEXT: mv a3, a4 528; RV32I-NEXT: mv a4, a2 529; RV32I-NEXT: beqz a5, .LBB9_3 530; RV32I-NEXT: j .LBB9_4 531; RV32I-NEXT: .LBB9_2: 532; RV32I-NEXT: srl a3, a0, a2 533; RV32I-NEXT: not a2, a2 534; RV32I-NEXT: slli a6, a1, 1 535; RV32I-NEXT: sll a2, a6, a2 536; RV32I-NEXT: or a3, a3, a2 537; RV32I-NEXT: bnez a5, .LBB9_4 538; RV32I-NEXT: .LBB9_3: 539; RV32I-NEXT: mv a3, a0 540; RV32I-NEXT: mv a4, a1 541; RV32I-NEXT: .LBB9_4: 542; RV32I-NEXT: mv a0, a3 543; RV32I-NEXT: mv a1, a4 544; RV32I-NEXT: ret 545; 546; RV64I-LABEL: ashr64: 547; RV64I: # %bb.0: 548; RV64I-NEXT: andi a2, a2, 1 549; RV64I-NEXT: beqz a2, .LBB9_2 550; RV64I-NEXT: # %bb.1: 551; RV64I-NEXT: sra a0, a0, a1 552; RV64I-NEXT: .LBB9_2: 553; RV64I-NEXT: ret 554; 555; RV64XVENTANACONDOPS-LABEL: ashr64: 556; RV64XVENTANACONDOPS: # %bb.0: 557; RV64XVENTANACONDOPS-NEXT: andi a2, a2, 1 558; RV64XVENTANACONDOPS-NEXT: sra a1, a0, a1 559; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a0, a2 560; RV64XVENTANACONDOPS-NEXT: vt.maskc a1, a1, a2 561; RV64XVENTANACONDOPS-NEXT: or a0, a1, a0 562; RV64XVENTANACONDOPS-NEXT: ret 563; 564; RV64XTHEADCONDMOV-LABEL: ashr64: 565; RV64XTHEADCONDMOV: # %bb.0: 566; RV64XTHEADCONDMOV-NEXT: andi a2, a2, 1 567; RV64XTHEADCONDMOV-NEXT: sra a1, a0, a1 568; RV64XTHEADCONDMOV-NEXT: th.mvnez a0, a1, a2 569; RV64XTHEADCONDMOV-NEXT: ret 570; 571; RV32ZICOND-LABEL: ashr64: 572; RV32ZICOND: # %bb.0: 573; RV32ZICOND-NEXT: andi a4, a4, 1 574; RV32ZICOND-NEXT: srl a3, a0, a2 575; RV32ZICOND-NEXT: not a5, a2 576; RV32ZICOND-NEXT: slli a6, a1, 1 577; RV32ZICOND-NEXT: sll a5, a6, a5 578; RV32ZICOND-NEXT: or a3, a3, a5 579; RV32ZICOND-NEXT: addi a5, a2, -32 580; RV32ZICOND-NEXT: slti a5, a5, 0 581; RV32ZICOND-NEXT: czero.eqz a3, a3, a5 582; RV32ZICOND-NEXT: sra a2, a1, a2 583; RV32ZICOND-NEXT: czero.nez a6, a2, a5 584; RV32ZICOND-NEXT: or a3, a3, a6 585; RV32ZICOND-NEXT: srai a6, a1, 31 586; RV32ZICOND-NEXT: czero.nez a6, a6, a5 587; RV32ZICOND-NEXT: czero.eqz a2, a2, a5 588; RV32ZICOND-NEXT: or a2, a2, a6 589; RV32ZICOND-NEXT: czero.eqz a2, a2, a4 590; RV32ZICOND-NEXT: czero.nez a1, a1, a4 591; RV32ZICOND-NEXT: or a1, a2, a1 592; RV32ZICOND-NEXT: czero.eqz a2, a3, a4 593; RV32ZICOND-NEXT: czero.nez a0, a0, a4 594; RV32ZICOND-NEXT: or a0, a2, a0 595; RV32ZICOND-NEXT: ret 596; 597; RV64ZICOND-LABEL: ashr64: 598; RV64ZICOND: # %bb.0: 599; RV64ZICOND-NEXT: andi a2, a2, 1 600; RV64ZICOND-NEXT: sra a1, a0, a1 601; RV64ZICOND-NEXT: czero.nez a0, a0, a2 602; RV64ZICOND-NEXT: czero.eqz a1, a1, a2 603; RV64ZICOND-NEXT: or a0, a1, a0 604; RV64ZICOND-NEXT: ret 605 %binop = ashr i64 %x, %y 606 %select_ = select i1 %c, i64 %binop, i64 %x 607 ret i64 %select_ 608} 609 610define i64 @lshr64(i64 %x, i64 %y, i1 %c) { 611; RV32I-LABEL: lshr64: 612; RV32I: # %bb.0: 613; RV32I-NEXT: andi a4, a4, 1 614; RV32I-NEXT: addi a5, a2, -32 615; RV32I-NEXT: srl a3, a1, a2 616; RV32I-NEXT: bltz a5, .LBB10_3 617; RV32I-NEXT: # %bb.1: 618; RV32I-NEXT: mv a2, a3 619; RV32I-NEXT: beqz a4, .LBB10_4 620; RV32I-NEXT: .LBB10_2: 621; RV32I-NEXT: srai a1, a5, 31 622; RV32I-NEXT: and a1, a1, a3 623; RV32I-NEXT: mv a0, a2 624; RV32I-NEXT: ret 625; RV32I-NEXT: .LBB10_3: 626; RV32I-NEXT: srl a6, a0, a2 627; RV32I-NEXT: not a2, a2 628; RV32I-NEXT: slli a7, a1, 1 629; RV32I-NEXT: sll a2, a7, a2 630; RV32I-NEXT: or a2, a6, a2 631; RV32I-NEXT: bnez a4, .LBB10_2 632; RV32I-NEXT: .LBB10_4: 633; RV32I-NEXT: ret 634; 635; RV64I-LABEL: lshr64: 636; RV64I: # %bb.0: 637; RV64I-NEXT: andi a2, a2, 1 638; RV64I-NEXT: beqz a2, .LBB10_2 639; RV64I-NEXT: # %bb.1: 640; RV64I-NEXT: srl a0, a0, a1 641; RV64I-NEXT: .LBB10_2: 642; RV64I-NEXT: ret 643; 644; RV64XVENTANACONDOPS-LABEL: lshr64: 645; RV64XVENTANACONDOPS: # %bb.0: 646; RV64XVENTANACONDOPS-NEXT: andi a2, a2, 1 647; RV64XVENTANACONDOPS-NEXT: srl a1, a0, a1 648; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a0, a2 649; RV64XVENTANACONDOPS-NEXT: vt.maskc a1, a1, a2 650; RV64XVENTANACONDOPS-NEXT: or a0, a1, a0 651; RV64XVENTANACONDOPS-NEXT: ret 652; 653; RV64XTHEADCONDMOV-LABEL: lshr64: 654; RV64XTHEADCONDMOV: # %bb.0: 655; RV64XTHEADCONDMOV-NEXT: andi a2, a2, 1 656; RV64XTHEADCONDMOV-NEXT: srl a1, a0, a1 657; RV64XTHEADCONDMOV-NEXT: th.mvnez a0, a1, a2 658; RV64XTHEADCONDMOV-NEXT: ret 659; 660; RV32ZICOND-LABEL: lshr64: 661; RV32ZICOND: # %bb.0: 662; RV32ZICOND-NEXT: andi a4, a4, 1 663; RV32ZICOND-NEXT: srl a3, a0, a2 664; RV32ZICOND-NEXT: not a5, a2 665; RV32ZICOND-NEXT: slli a6, a1, 1 666; RV32ZICOND-NEXT: sll a5, a6, a5 667; RV32ZICOND-NEXT: or a3, a3, a5 668; RV32ZICOND-NEXT: addi a5, a2, -32 669; RV32ZICOND-NEXT: slti a5, a5, 0 670; RV32ZICOND-NEXT: czero.eqz a3, a3, a5 671; RV32ZICOND-NEXT: srl a2, a1, a2 672; RV32ZICOND-NEXT: czero.nez a6, a2, a5 673; RV32ZICOND-NEXT: or a3, a3, a6 674; RV32ZICOND-NEXT: czero.eqz a2, a2, a5 675; RV32ZICOND-NEXT: czero.nez a1, a1, a4 676; RV32ZICOND-NEXT: czero.eqz a2, a2, a4 677; RV32ZICOND-NEXT: or a1, a2, a1 678; RV32ZICOND-NEXT: czero.eqz a2, a3, a4 679; RV32ZICOND-NEXT: czero.nez a0, a0, a4 680; RV32ZICOND-NEXT: or a0, a2, a0 681; RV32ZICOND-NEXT: ret 682; 683; RV64ZICOND-LABEL: lshr64: 684; RV64ZICOND: # %bb.0: 685; RV64ZICOND-NEXT: andi a2, a2, 1 686; RV64ZICOND-NEXT: srl a1, a0, a1 687; RV64ZICOND-NEXT: czero.nez a0, a0, a2 688; RV64ZICOND-NEXT: czero.eqz a1, a1, a2 689; RV64ZICOND-NEXT: or a0, a1, a0 690; RV64ZICOND-NEXT: ret 691 %binop = lshr i64 %x, %y 692 %select_ = select i1 %c, i64 %binop, i64 %x 693 ret i64 %select_ 694} 695 696define i64 @sub64(i64 %x, i64 %y, i1 %c) { 697; RV32I-LABEL: sub64: 698; RV32I: # %bb.0: 699; RV32I-NEXT: slli a4, a4, 31 700; RV32I-NEXT: srai a4, a4, 31 701; RV32I-NEXT: and a2, a4, a2 702; RV32I-NEXT: sltu a5, a0, a2 703; RV32I-NEXT: and a3, a4, a3 704; RV32I-NEXT: sub a1, a1, a3 705; RV32I-NEXT: sub a1, a1, a5 706; RV32I-NEXT: sub a0, a0, a2 707; RV32I-NEXT: ret 708; 709; RV64I-LABEL: sub64: 710; RV64I: # %bb.0: 711; RV64I-NEXT: slli a2, a2, 63 712; RV64I-NEXT: srai a2, a2, 63 713; RV64I-NEXT: and a1, a2, a1 714; RV64I-NEXT: sub a0, a0, a1 715; RV64I-NEXT: ret 716; 717; RV64XVENTANACONDOPS-LABEL: sub64: 718; RV64XVENTANACONDOPS: # %bb.0: 719; RV64XVENTANACONDOPS-NEXT: andi a2, a2, 1 720; RV64XVENTANACONDOPS-NEXT: vt.maskc a1, a1, a2 721; RV64XVENTANACONDOPS-NEXT: sub a0, a0, a1 722; RV64XVENTANACONDOPS-NEXT: ret 723; 724; RV64XTHEADCONDMOV-LABEL: sub64: 725; RV64XTHEADCONDMOV: # %bb.0: 726; RV64XTHEADCONDMOV-NEXT: andi a2, a2, 1 727; RV64XTHEADCONDMOV-NEXT: th.mveqz a1, zero, a2 728; RV64XTHEADCONDMOV-NEXT: sub a0, a0, a1 729; RV64XTHEADCONDMOV-NEXT: ret 730; 731; RV32ZICOND-LABEL: sub64: 732; RV32ZICOND: # %bb.0: 733; RV32ZICOND-NEXT: andi a4, a4, 1 734; RV32ZICOND-NEXT: czero.eqz a2, a2, a4 735; RV32ZICOND-NEXT: sltu a5, a0, a2 736; RV32ZICOND-NEXT: czero.eqz a3, a3, a4 737; RV32ZICOND-NEXT: sub a1, a1, a3 738; RV32ZICOND-NEXT: sub a1, a1, a5 739; RV32ZICOND-NEXT: sub a0, a0, a2 740; RV32ZICOND-NEXT: ret 741; 742; RV64ZICOND-LABEL: sub64: 743; RV64ZICOND: # %bb.0: 744; RV64ZICOND-NEXT: andi a2, a2, 1 745; RV64ZICOND-NEXT: czero.eqz a1, a1, a2 746; RV64ZICOND-NEXT: sub a0, a0, a1 747; RV64ZICOND-NEXT: ret 748 %binop = sub i64 %x, %y 749 %select_ = select i1 %c, i64 %binop, i64 %x 750 ret i64 %select_ 751} 752 753define i64 @and64(i64 %x, i64 %y, i1 %c) { 754; RV32I-LABEL: and64: 755; RV32I: # %bb.0: 756; RV32I-NEXT: andi a4, a4, 1 757; RV32I-NEXT: beqz a4, .LBB12_2 758; RV32I-NEXT: # %bb.1: 759; RV32I-NEXT: and a1, a1, a3 760; RV32I-NEXT: and a0, a0, a2 761; RV32I-NEXT: .LBB12_2: 762; RV32I-NEXT: ret 763; 764; RV64I-LABEL: and64: 765; RV64I: # %bb.0: 766; RV64I-NEXT: andi a2, a2, 1 767; RV64I-NEXT: beqz a2, .LBB12_2 768; RV64I-NEXT: # %bb.1: 769; RV64I-NEXT: and a0, a0, a1 770; RV64I-NEXT: .LBB12_2: 771; RV64I-NEXT: ret 772; 773; RV64XVENTANACONDOPS-LABEL: and64: 774; RV64XVENTANACONDOPS: # %bb.0: 775; RV64XVENTANACONDOPS-NEXT: andi a2, a2, 1 776; RV64XVENTANACONDOPS-NEXT: and a1, a0, a1 777; RV64XVENTANACONDOPS-NEXT: vt.maskcn a0, a0, a2 778; RV64XVENTANACONDOPS-NEXT: or a0, a1, a0 779; RV64XVENTANACONDOPS-NEXT: ret 780; 781; RV64XTHEADCONDMOV-LABEL: and64: 782; RV64XTHEADCONDMOV: # %bb.0: 783; RV64XTHEADCONDMOV-NEXT: andi a2, a2, 1 784; RV64XTHEADCONDMOV-NEXT: and a1, a0, a1 785; RV64XTHEADCONDMOV-NEXT: th.mvnez a0, a1, a2 786; RV64XTHEADCONDMOV-NEXT: ret 787; 788; RV32ZICOND-LABEL: and64: 789; RV32ZICOND: # %bb.0: 790; RV32ZICOND-NEXT: andi a4, a4, 1 791; RV32ZICOND-NEXT: and a3, a1, a3 792; RV32ZICOND-NEXT: and a2, a0, a2 793; RV32ZICOND-NEXT: czero.nez a0, a0, a4 794; RV32ZICOND-NEXT: or a0, a2, a0 795; RV32ZICOND-NEXT: czero.nez a1, a1, a4 796; RV32ZICOND-NEXT: or a1, a3, a1 797; RV32ZICOND-NEXT: ret 798; 799; RV64ZICOND-LABEL: and64: 800; RV64ZICOND: # %bb.0: 801; RV64ZICOND-NEXT: andi a2, a2, 1 802; RV64ZICOND-NEXT: and a1, a0, a1 803; RV64ZICOND-NEXT: czero.nez a0, a0, a2 804; RV64ZICOND-NEXT: or a0, a1, a0 805; RV64ZICOND-NEXT: ret 806 %binop = and i64 %x, %y 807 %select_ = select i1 %c, i64 %binop, i64 %x 808 ret i64 %select_ 809} 810 811 812define i64 @add64(i64 %x, i64 %y, i1 %c) { 813; RV32I-LABEL: add64: 814; RV32I: # %bb.0: 815; RV32I-NEXT: slli a4, a4, 31 816; RV32I-NEXT: srai a4, a4, 31 817; RV32I-NEXT: and a3, a4, a3 818; RV32I-NEXT: add a1, a1, a3 819; RV32I-NEXT: and a2, a4, a2 820; RV32I-NEXT: add a2, a0, a2 821; RV32I-NEXT: sltu a0, a2, a0 822; RV32I-NEXT: add a1, a1, a0 823; RV32I-NEXT: mv a0, a2 824; RV32I-NEXT: ret 825; 826; RV64I-LABEL: add64: 827; RV64I: # %bb.0: 828; RV64I-NEXT: slli a2, a2, 63 829; RV64I-NEXT: srai a2, a2, 63 830; RV64I-NEXT: and a1, a2, a1 831; RV64I-NEXT: add a0, a0, a1 832; RV64I-NEXT: ret 833; 834; RV64XVENTANACONDOPS-LABEL: add64: 835; RV64XVENTANACONDOPS: # %bb.0: 836; RV64XVENTANACONDOPS-NEXT: andi a2, a2, 1 837; RV64XVENTANACONDOPS-NEXT: vt.maskc a1, a1, a2 838; RV64XVENTANACONDOPS-NEXT: add a0, a0, a1 839; RV64XVENTANACONDOPS-NEXT: ret 840; 841; RV64XTHEADCONDMOV-LABEL: add64: 842; RV64XTHEADCONDMOV: # %bb.0: 843; RV64XTHEADCONDMOV-NEXT: andi a2, a2, 1 844; RV64XTHEADCONDMOV-NEXT: th.mveqz a1, zero, a2 845; RV64XTHEADCONDMOV-NEXT: add a0, a0, a1 846; RV64XTHEADCONDMOV-NEXT: ret 847; 848; RV32ZICOND-LABEL: add64: 849; RV32ZICOND: # %bb.0: 850; RV32ZICOND-NEXT: andi a4, a4, 1 851; RV32ZICOND-NEXT: czero.eqz a3, a3, a4 852; RV32ZICOND-NEXT: add a1, a1, a3 853; RV32ZICOND-NEXT: czero.eqz a2, a2, a4 854; RV32ZICOND-NEXT: add a2, a0, a2 855; RV32ZICOND-NEXT: sltu a0, a2, a0 856; RV32ZICOND-NEXT: add a1, a1, a0 857; RV32ZICOND-NEXT: mv a0, a2 858; RV32ZICOND-NEXT: ret 859; 860; RV64ZICOND-LABEL: add64: 861; RV64ZICOND: # %bb.0: 862; RV64ZICOND-NEXT: andi a2, a2, 1 863; RV64ZICOND-NEXT: czero.eqz a1, a1, a2 864; RV64ZICOND-NEXT: add a0, a0, a1 865; RV64ZICOND-NEXT: ret 866 %binop = add i64 %x, %y 867 %select_ = select i1 %c, i64 %binop, i64 %x 868 ret i64 %select_ 869} 870 871 872define i64 @or64(i64 %x, i64 %y, i1 %c) { 873; RV32I-LABEL: or64: 874; RV32I: # %bb.0: 875; RV32I-NEXT: slli a4, a4, 31 876; RV32I-NEXT: srai a4, a4, 31 877; RV32I-NEXT: and a2, a4, a2 878; RV32I-NEXT: or a0, a0, a2 879; RV32I-NEXT: and a3, a4, a3 880; RV32I-NEXT: or a1, a1, a3 881; RV32I-NEXT: ret 882; 883; RV64I-LABEL: or64: 884; RV64I: # %bb.0: 885; RV64I-NEXT: slli a2, a2, 63 886; RV64I-NEXT: srai a2, a2, 63 887; RV64I-NEXT: and a1, a2, a1 888; RV64I-NEXT: or a0, a0, a1 889; RV64I-NEXT: ret 890; 891; RV64XVENTANACONDOPS-LABEL: or64: 892; RV64XVENTANACONDOPS: # %bb.0: 893; RV64XVENTANACONDOPS-NEXT: andi a2, a2, 1 894; RV64XVENTANACONDOPS-NEXT: vt.maskc a1, a1, a2 895; RV64XVENTANACONDOPS-NEXT: or a0, a0, a1 896; RV64XVENTANACONDOPS-NEXT: ret 897; 898; RV64XTHEADCONDMOV-LABEL: or64: 899; RV64XTHEADCONDMOV: # %bb.0: 900; RV64XTHEADCONDMOV-NEXT: andi a2, a2, 1 901; RV64XTHEADCONDMOV-NEXT: th.mveqz a1, zero, a2 902; RV64XTHEADCONDMOV-NEXT: or a0, a0, a1 903; RV64XTHEADCONDMOV-NEXT: ret 904; 905; RV32ZICOND-LABEL: or64: 906; RV32ZICOND: # %bb.0: 907; RV32ZICOND-NEXT: andi a4, a4, 1 908; RV32ZICOND-NEXT: czero.eqz a2, a2, a4 909; RV32ZICOND-NEXT: or a0, a0, a2 910; RV32ZICOND-NEXT: czero.eqz a2, a3, a4 911; RV32ZICOND-NEXT: or a1, a1, a2 912; RV32ZICOND-NEXT: ret 913; 914; RV64ZICOND-LABEL: or64: 915; RV64ZICOND: # %bb.0: 916; RV64ZICOND-NEXT: andi a2, a2, 1 917; RV64ZICOND-NEXT: czero.eqz a1, a1, a2 918; RV64ZICOND-NEXT: or a0, a0, a1 919; RV64ZICOND-NEXT: ret 920 %binop = or i64 %x, %y 921 %select_ = select i1 %c, i64 %binop, i64 %x 922 ret i64 %select_ 923} 924 925define i64 @xor64(i64 %x, i64 %y, i1 %c) { 926; RV32I-LABEL: xor64: 927; RV32I: # %bb.0: 928; RV32I-NEXT: slli a4, a4, 31 929; RV32I-NEXT: srai a4, a4, 31 930; RV32I-NEXT: and a2, a4, a2 931; RV32I-NEXT: xor a0, a0, a2 932; RV32I-NEXT: and a3, a4, a3 933; RV32I-NEXT: xor a1, a1, a3 934; RV32I-NEXT: ret 935; 936; RV64I-LABEL: xor64: 937; RV64I: # %bb.0: 938; RV64I-NEXT: slli a2, a2, 63 939; RV64I-NEXT: srai a2, a2, 63 940; RV64I-NEXT: and a1, a2, a1 941; RV64I-NEXT: xor a0, a0, a1 942; RV64I-NEXT: ret 943; 944; RV64XVENTANACONDOPS-LABEL: xor64: 945; RV64XVENTANACONDOPS: # %bb.0: 946; RV64XVENTANACONDOPS-NEXT: andi a2, a2, 1 947; RV64XVENTANACONDOPS-NEXT: vt.maskc a1, a1, a2 948; RV64XVENTANACONDOPS-NEXT: xor a0, a0, a1 949; RV64XVENTANACONDOPS-NEXT: ret 950; 951; RV64XTHEADCONDMOV-LABEL: xor64: 952; RV64XTHEADCONDMOV: # %bb.0: 953; RV64XTHEADCONDMOV-NEXT: andi a2, a2, 1 954; RV64XTHEADCONDMOV-NEXT: th.mveqz a1, zero, a2 955; RV64XTHEADCONDMOV-NEXT: xor a0, a0, a1 956; RV64XTHEADCONDMOV-NEXT: ret 957; 958; RV32ZICOND-LABEL: xor64: 959; RV32ZICOND: # %bb.0: 960; RV32ZICOND-NEXT: andi a4, a4, 1 961; RV32ZICOND-NEXT: czero.eqz a2, a2, a4 962; RV32ZICOND-NEXT: xor a0, a0, a2 963; RV32ZICOND-NEXT: czero.eqz a2, a3, a4 964; RV32ZICOND-NEXT: xor a1, a1, a2 965; RV32ZICOND-NEXT: ret 966; 967; RV64ZICOND-LABEL: xor64: 968; RV64ZICOND: # %bb.0: 969; RV64ZICOND-NEXT: andi a2, a2, 1 970; RV64ZICOND-NEXT: czero.eqz a1, a1, a2 971; RV64ZICOND-NEXT: xor a0, a0, a1 972; RV64ZICOND-NEXT: ret 973 %binop = xor i64 %x, %y 974 %select_ = select i1 %c, i64 %binop, i64 %x 975 ret i64 %select_ 976} 977