1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc --mtriple=loongarch32 -mattr=+d < %s | FileCheck %s --check-prefix=LA32 3; RUN: llc --mtriple=loongarch64 -mattr=+d < %s | FileCheck %s --check-prefix=LA64 4 5;; Exercise the 'and' LLVM IR: https://llvm.org/docs/LangRef.html#and-instruction 6 7define i1 @and_i1(i1 %a, i1 %b) { 8; LA32-LABEL: and_i1: 9; LA32: # %bb.0: # %entry 10; LA32-NEXT: and $a0, $a0, $a1 11; LA32-NEXT: ret 12; 13; LA64-LABEL: and_i1: 14; LA64: # %bb.0: # %entry 15; LA64-NEXT: and $a0, $a0, $a1 16; LA64-NEXT: ret 17entry: 18 %r = and i1 %a, %b 19 ret i1 %r 20} 21 22define i8 @and_i8(i8 %a, i8 %b) { 23; LA32-LABEL: and_i8: 24; LA32: # %bb.0: # %entry 25; LA32-NEXT: and $a0, $a0, $a1 26; LA32-NEXT: ret 27; 28; LA64-LABEL: and_i8: 29; LA64: # %bb.0: # %entry 30; LA64-NEXT: and $a0, $a0, $a1 31; LA64-NEXT: ret 32entry: 33 %r = and i8 %a, %b 34 ret i8 %r 35} 36 37define i16 @and_i16(i16 %a, i16 %b) { 38; LA32-LABEL: and_i16: 39; LA32: # %bb.0: # %entry 40; LA32-NEXT: and $a0, $a0, $a1 41; LA32-NEXT: ret 42; 43; LA64-LABEL: and_i16: 44; LA64: # %bb.0: # %entry 45; LA64-NEXT: and $a0, $a0, $a1 46; LA64-NEXT: ret 47entry: 48 %r = and i16 %a, %b 49 ret i16 %r 50} 51 52define i32 @and_i32(i32 %a, i32 %b) { 53; LA32-LABEL: and_i32: 54; LA32: # %bb.0: # %entry 55; LA32-NEXT: and $a0, $a0, $a1 56; LA32-NEXT: ret 57; 58; LA64-LABEL: and_i32: 59; LA64: # %bb.0: # %entry 60; LA64-NEXT: and $a0, $a0, $a1 61; LA64-NEXT: ret 62entry: 63 %r = and i32 %a, %b 64 ret i32 %r 65} 66 67define i64 @and_i64(i64 %a, i64 %b) { 68; LA32-LABEL: and_i64: 69; LA32: # %bb.0: # %entry 70; LA32-NEXT: and $a0, $a0, $a2 71; LA32-NEXT: and $a1, $a1, $a3 72; LA32-NEXT: ret 73; 74; LA64-LABEL: and_i64: 75; LA64: # %bb.0: # %entry 76; LA64-NEXT: and $a0, $a0, $a1 77; LA64-NEXT: ret 78entry: 79 %r = and i64 %a, %b 80 ret i64 %r 81} 82 83define i1 @and_i1_0(i1 %b) { 84; LA32-LABEL: and_i1_0: 85; LA32: # %bb.0: # %entry 86; LA32-NEXT: move $a0, $zero 87; LA32-NEXT: ret 88; 89; LA64-LABEL: and_i1_0: 90; LA64: # %bb.0: # %entry 91; LA64-NEXT: move $a0, $zero 92; LA64-NEXT: ret 93entry: 94 %r = and i1 4, %b 95 ret i1 %r 96} 97 98define i1 @and_i1_5(i1 %b) { 99; LA32-LABEL: and_i1_5: 100; LA32: # %bb.0: # %entry 101; LA32-NEXT: ret 102; 103; LA64-LABEL: and_i1_5: 104; LA64: # %bb.0: # %entry 105; LA64-NEXT: ret 106entry: 107 %r = and i1 5, %b 108 ret i1 %r 109} 110 111define i8 @and_i8_5(i8 %b) { 112; LA32-LABEL: and_i8_5: 113; LA32: # %bb.0: # %entry 114; LA32-NEXT: andi $a0, $a0, 5 115; LA32-NEXT: ret 116; 117; LA64-LABEL: and_i8_5: 118; LA64: # %bb.0: # %entry 119; LA64-NEXT: andi $a0, $a0, 5 120; LA64-NEXT: ret 121entry: 122 %r = and i8 5, %b 123 ret i8 %r 124} 125 126define i8 @and_i8_257(i8 %b) { 127; LA32-LABEL: and_i8_257: 128; LA32: # %bb.0: # %entry 129; LA32-NEXT: andi $a0, $a0, 1 130; LA32-NEXT: ret 131; 132; LA64-LABEL: and_i8_257: 133; LA64: # %bb.0: # %entry 134; LA64-NEXT: andi $a0, $a0, 1 135; LA64-NEXT: ret 136entry: 137 %r = and i8 257, %b 138 ret i8 %r 139} 140 141define i16 @and_i16_5(i16 %b) { 142; LA32-LABEL: and_i16_5: 143; LA32: # %bb.0: # %entry 144; LA32-NEXT: andi $a0, $a0, 5 145; LA32-NEXT: ret 146; 147; LA64-LABEL: and_i16_5: 148; LA64: # %bb.0: # %entry 149; LA64-NEXT: andi $a0, $a0, 5 150; LA64-NEXT: ret 151entry: 152 %r = and i16 5, %b 153 ret i16 %r 154} 155 156define i16 @and_i16_0x1000(i16 %b) { 157; LA32-LABEL: and_i16_0x1000: 158; LA32: # %bb.0: # %entry 159; LA32-NEXT: lu12i.w $a1, 1 160; LA32-NEXT: and $a0, $a0, $a1 161; LA32-NEXT: ret 162; 163; LA64-LABEL: and_i16_0x1000: 164; LA64: # %bb.0: # %entry 165; LA64-NEXT: lu12i.w $a1, 1 166; LA64-NEXT: and $a0, $a0, $a1 167; LA64-NEXT: ret 168entry: 169 %r = and i16 4096, %b 170 ret i16 %r 171} 172 173define i16 @and_i16_0x10001(i16 %b) { 174; LA32-LABEL: and_i16_0x10001: 175; LA32: # %bb.0: # %entry 176; LA32-NEXT: andi $a0, $a0, 1 177; LA32-NEXT: ret 178; 179; LA64-LABEL: and_i16_0x10001: 180; LA64: # %bb.0: # %entry 181; LA64-NEXT: andi $a0, $a0, 1 182; LA64-NEXT: ret 183entry: 184 %r = and i16 65537, %b 185 ret i16 %r 186} 187 188define i32 @and_i32_5(i32 %b) { 189; LA32-LABEL: and_i32_5: 190; LA32: # %bb.0: # %entry 191; LA32-NEXT: andi $a0, $a0, 5 192; LA32-NEXT: ret 193; 194; LA64-LABEL: and_i32_5: 195; LA64: # %bb.0: # %entry 196; LA64-NEXT: andi $a0, $a0, 5 197; LA64-NEXT: ret 198entry: 199 %r = and i32 5, %b 200 ret i32 %r 201} 202 203define i32 @and_i32_0x1000(i32 %b) { 204; LA32-LABEL: and_i32_0x1000: 205; LA32: # %bb.0: # %entry 206; LA32-NEXT: lu12i.w $a1, 1 207; LA32-NEXT: and $a0, $a0, $a1 208; LA32-NEXT: ret 209; 210; LA64-LABEL: and_i32_0x1000: 211; LA64: # %bb.0: # %entry 212; LA64-NEXT: lu12i.w $a1, 1 213; LA64-NEXT: and $a0, $a0, $a1 214; LA64-NEXT: ret 215entry: 216 %r = and i32 4096, %b 217 ret i32 %r 218} 219 220define i32 @and_i32_0x100000001(i32 %b) { 221; LA32-LABEL: and_i32_0x100000001: 222; LA32: # %bb.0: # %entry 223; LA32-NEXT: andi $a0, $a0, 1 224; LA32-NEXT: ret 225; 226; LA64-LABEL: and_i32_0x100000001: 227; LA64: # %bb.0: # %entry 228; LA64-NEXT: andi $a0, $a0, 1 229; LA64-NEXT: ret 230entry: 231 %r = and i32 4294967297, %b 232 ret i32 %r 233} 234 235define i64 @and_i64_5(i64 %b) { 236; LA32-LABEL: and_i64_5: 237; LA32: # %bb.0: # %entry 238; LA32-NEXT: andi $a0, $a0, 5 239; LA32-NEXT: move $a1, $zero 240; LA32-NEXT: ret 241; 242; LA64-LABEL: and_i64_5: 243; LA64: # %bb.0: # %entry 244; LA64-NEXT: andi $a0, $a0, 5 245; LA64-NEXT: ret 246entry: 247 %r = and i64 5, %b 248 ret i64 %r 249} 250 251define i64 @and_i64_0x1000(i64 %b) { 252; LA32-LABEL: and_i64_0x1000: 253; LA32: # %bb.0: # %entry 254; LA32-NEXT: lu12i.w $a1, 1 255; LA32-NEXT: and $a0, $a0, $a1 256; LA32-NEXT: move $a1, $zero 257; LA32-NEXT: ret 258; 259; LA64-LABEL: and_i64_0x1000: 260; LA64: # %bb.0: # %entry 261; LA64-NEXT: lu12i.w $a1, 1 262; LA64-NEXT: and $a0, $a0, $a1 263; LA64-NEXT: ret 264entry: 265 %r = and i64 4096, %b 266 ret i64 %r 267} 268 269define signext i32 @and_i32_0xfff0(i32 %a) { 270; LA32-LABEL: and_i32_0xfff0: 271; LA32: # %bb.0: 272; LA32-NEXT: bstrpick.w $a0, $a0, 15, 4 273; LA32-NEXT: slli.w $a0, $a0, 4 274; LA32-NEXT: ret 275; 276; LA64-LABEL: and_i32_0xfff0: 277; LA64: # %bb.0: 278; LA64-NEXT: bstrpick.d $a0, $a0, 15, 4 279; LA64-NEXT: slli.d $a0, $a0, 4 280; LA64-NEXT: ret 281 %b = and i32 %a, 65520 282 ret i32 %b 283} 284 285define signext i32 @and_i32_0xfff0_twice(i32 %a, i32 %b) { 286; LA32-LABEL: and_i32_0xfff0_twice: 287; LA32: # %bb.0: 288; LA32-NEXT: bstrpick.w $a0, $a0, 15, 4 289; LA32-NEXT: slli.w $a0, $a0, 4 290; LA32-NEXT: bstrpick.w $a1, $a1, 15, 4 291; LA32-NEXT: slli.w $a1, $a1, 4 292; LA32-NEXT: sub.w $a0, $a0, $a1 293; LA32-NEXT: ret 294; 295; LA64-LABEL: and_i32_0xfff0_twice: 296; LA64: # %bb.0: 297; LA64-NEXT: bstrpick.d $a0, $a0, 15, 4 298; LA64-NEXT: slli.d $a0, $a0, 4 299; LA64-NEXT: bstrpick.d $a1, $a1, 15, 4 300; LA64-NEXT: slli.d $a1, $a1, 4 301; LA64-NEXT: sub.d $a0, $a0, $a1 302; LA64-NEXT: ret 303 %c = and i32 %a, 65520 304 %d = and i32 %b, 65520 305 %e = sub i32 %c, %d 306 ret i32 %e 307} 308 309define i64 @and_i64_0xfff0(i64 %a) { 310; LA32-LABEL: and_i64_0xfff0: 311; LA32: # %bb.0: 312; LA32-NEXT: bstrpick.w $a0, $a0, 15, 4 313; LA32-NEXT: slli.w $a0, $a0, 4 314; LA32-NEXT: move $a1, $zero 315; LA32-NEXT: ret 316; 317; LA64-LABEL: and_i64_0xfff0: 318; LA64: # %bb.0: 319; LA64-NEXT: bstrpick.d $a0, $a0, 15, 4 320; LA64-NEXT: slli.d $a0, $a0, 4 321; LA64-NEXT: ret 322 %b = and i64 %a, 65520 323 ret i64 %b 324} 325 326define i64 @and_i64_0xfff0_twice(i64 %a, i64 %b) { 327; LA32-LABEL: and_i64_0xfff0_twice: 328; LA32: # %bb.0: 329; LA32-NEXT: bstrpick.w $a0, $a0, 15, 4 330; LA32-NEXT: slli.w $a1, $a0, 4 331; LA32-NEXT: bstrpick.w $a0, $a2, 15, 4 332; LA32-NEXT: slli.w $a2, $a0, 4 333; LA32-NEXT: sub.w $a0, $a1, $a2 334; LA32-NEXT: sltu $a1, $a1, $a2 335; LA32-NEXT: sub.w $a1, $zero, $a1 336; LA32-NEXT: ret 337; 338; LA64-LABEL: and_i64_0xfff0_twice: 339; LA64: # %bb.0: 340; LA64-NEXT: bstrpick.d $a0, $a0, 15, 4 341; LA64-NEXT: slli.d $a0, $a0, 4 342; LA64-NEXT: bstrpick.d $a1, $a1, 15, 4 343; LA64-NEXT: slli.d $a1, $a1, 4 344; LA64-NEXT: sub.d $a0, $a0, $a1 345; LA64-NEXT: ret 346 %c = and i64 %a, 65520 347 %d = and i64 %b, 65520 348 %e = sub i64 %c, %d 349 ret i64 %e 350} 351 352;; This case is not optimized to `bstrpick + slli`, 353;; since the immediate 1044480 can be composed via 354;; a single `lu12i.w $rx, 255`. 355define i64 @and_i64_0xff000(i64 %a) { 356; LA32-LABEL: and_i64_0xff000: 357; LA32: # %bb.0: 358; LA32-NEXT: lu12i.w $a1, 255 359; LA32-NEXT: and $a0, $a0, $a1 360; LA32-NEXT: move $a1, $zero 361; LA32-NEXT: ret 362; 363; LA64-LABEL: and_i64_0xff000: 364; LA64: # %bb.0: 365; LA64-NEXT: lu12i.w $a1, 255 366; LA64-NEXT: and $a0, $a0, $a1 367; LA64-NEXT: ret 368 %b = and i64 %a, 1044480 369 ret i64 %b 370} 371 372define i64 @and_i64_minus_2048(i64 %a) { 373; LA32-LABEL: and_i64_minus_2048: 374; LA32: # %bb.0: 375; LA32-NEXT: bstrins.w $a0, $zero, 10, 0 376; LA32-NEXT: ret 377; 378; LA64-LABEL: and_i64_minus_2048: 379; LA64: # %bb.0: 380; LA64-NEXT: bstrins.d $a0, $zero, 10, 0 381; LA64-NEXT: ret 382 %b = and i64 %a, -2048 383 ret i64 %b 384} 385 386;; This case is not optimized to `bstrpick + slli`, 387;; since the immediate 0xfff0 has more than 2 uses. 388define i64 @and_i64_0xfff0_multiple_times(i64 %a, i64 %b, i64 %c) { 389; LA32-LABEL: and_i64_0xfff0_multiple_times: 390; LA32: # %bb.0: 391; LA32-NEXT: lu12i.w $a1, 15 392; LA32-NEXT: ori $a1, $a1, 4080 393; LA32-NEXT: and $a0, $a0, $a1 394; LA32-NEXT: and $a2, $a2, $a1 395; LA32-NEXT: and $a3, $a4, $a1 396; LA32-NEXT: sltu $a1, $a0, $a2 397; LA32-NEXT: sub.w $a1, $zero, $a1 398; LA32-NEXT: sub.w $a0, $a0, $a2 399; LA32-NEXT: mul.w $a2, $a2, $a3 400; LA32-NEXT: xor $a0, $a0, $a2 401; LA32-NEXT: ret 402; 403; LA64-LABEL: and_i64_0xfff0_multiple_times: 404; LA64: # %bb.0: 405; LA64-NEXT: lu12i.w $a3, 15 406; LA64-NEXT: ori $a3, $a3, 4080 407; LA64-NEXT: and $a0, $a0, $a3 408; LA64-NEXT: and $a1, $a1, $a3 409; LA64-NEXT: and $a2, $a2, $a3 410; LA64-NEXT: sub.d $a0, $a0, $a1 411; LA64-NEXT: mul.d $a1, $a1, $a2 412; LA64-NEXT: xor $a0, $a0, $a1 413; LA64-NEXT: ret 414 %d = and i64 %a, 65520 415 %e = and i64 %b, 65520 416 %f = and i64 %c, 65520 417 %g = sub i64 %d, %e 418 %h = mul i64 %e, %f 419 %i = xor i64 %g, %h 420 ret i64 %i 421} 422 423define i64 @and_i64_0xffffffffff00ffff(i64 %a) { 424; LA32-LABEL: and_i64_0xffffffffff00ffff: 425; LA32: # %bb.0: 426; LA32-NEXT: bstrins.w $a0, $zero, 23, 16 427; LA32-NEXT: ret 428; 429; LA64-LABEL: and_i64_0xffffffffff00ffff: 430; LA64: # %bb.0: 431; LA64-NEXT: bstrins.d $a0, $zero, 23, 16 432; LA64-NEXT: ret 433 %b = and i64 %a, 18446744073692839935 434 ret i64 %b 435} 436 437define i32 @and_add_lsr(i32 %x, i32 %y) { 438; LA32-LABEL: and_add_lsr: 439; LA32: # %bb.0: 440; LA32-NEXT: addi.w $a0, $a0, -1 441; LA32-NEXT: srli.w $a1, $a1, 20 442; LA32-NEXT: and $a0, $a1, $a0 443; LA32-NEXT: ret 444; 445; LA64-LABEL: and_add_lsr: 446; LA64: # %bb.0: 447; LA64-NEXT: addi.w $a0, $a0, -1 448; LA64-NEXT: bstrpick.d $a1, $a1, 31, 20 449; LA64-NEXT: and $a0, $a1, $a0 450; LA64-NEXT: ret 451 %1 = add i32 %x, 4095 452 %2 = lshr i32 %y, 20 453 %r = and i32 %2, %1 454 ret i32 %r 455} 456