1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=hexagon < %s | FileCheck %s 3 4%s.0 = type { i8 } 5@g0 = internal global i8 0, align 1 6 7define void @f0() #0 { 8; CHECK-LABEL: f0: 9; CHECK: .cfi_startproc 10; CHECK-NEXT: // %bb.0: 11; CHECK-NEXT: { 12; CHECK-NEXT: r29 = add(r29,#-8) 13; CHECK-NEXT: r1 = #255 14; CHECK-NEXT: } 15; CHECK-NEXT: { 16; CHECK-NEXT: r0 = add(r29,#7) 17; CHECK-NEXT: } 18; CHECK-NEXT: { 19; CHECK-NEXT: r2 = and(r0,#3) 20; CHECK-NEXT: r0 = and(r0,#-4) 21; CHECK-NEXT: } 22; CHECK-NEXT: { 23; CHECK-NEXT: r2 = asl(r2,#3) 24; CHECK-NEXT: } 25; CHECK-NEXT: { 26; CHECK-NEXT: r1 = asl(r1,r2) 27; CHECK-NEXT: r2 = lsl(#2,r2) 28; CHECK-NEXT: } 29; CHECK-NEXT: { 30; CHECK-NEXT: r3 = sub(#-1,r1) 31; CHECK-NEXT: } 32; CHECK-NEXT: .p2align 4 33; CHECK-NEXT: .LBB0_1: // %atomicrmw.start 34; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 35; CHECK-NEXT: { 36; CHECK-NEXT: r4 = memw_locked(r0) 37; CHECK-NEXT: } 38; CHECK-NEXT: { 39; CHECK-NEXT: r5 = and(r4,r3) 40; CHECK-NEXT: r4 = add(r4,r2) 41; CHECK-NEXT: } 42; CHECK-NEXT: { 43; CHECK-NEXT: r5 |= and(r4,r1) 44; CHECK-NEXT: } 45; CHECK-NEXT: { 46; CHECK-NEXT: memw_locked(r0,p0) = r5 47; CHECK-NEXT: } 48; CHECK-NEXT: { 49; CHECK-NEXT: if (!p0) jump:nt .LBB0_1 50; CHECK-NEXT: } 51; CHECK-NEXT: // %bb.2: // %atomicrmw.end 52; CHECK-NEXT: { 53; CHECK-NEXT: r29 = add(r29,#8) 54; CHECK-NEXT: jumpr r31 55; CHECK-NEXT: } 56 %v0 = alloca %s.0 57 atomicrmw add ptr %v0, i8 2 monotonic 58 ret void 59} 60 61define void @f1() #0 { 62; CHECK-LABEL: f1: 63; CHECK: .cfi_startproc 64; CHECK-NEXT: // %bb.0: // %entry 65; CHECK-NEXT: { 66; CHECK-NEXT: r3 = ##g0 67; CHECK-NEXT: r1:0 = combine(#1,##255) 68; CHECK-NEXT: } 69; CHECK-NEXT: { 70; CHECK-NEXT: r2 = and(r3,#3) 71; CHECK-NEXT: r3 = and(r3,#-4) 72; CHECK-NEXT: } 73; CHECK-NEXT: { 74; CHECK-NEXT: r2 = asl(r2,#3) 75; CHECK-NEXT: } 76; CHECK-NEXT: { 77; CHECK-NEXT: r4 = asl(r0,r2) 78; CHECK-NEXT: } 79; CHECK-NEXT: { 80; CHECK-NEXT: r4 = sub(#-1,r4) 81; CHECK-NEXT: } 82; CHECK-NEXT: .p2align 4 83; CHECK-NEXT: .LBB1_1: // %cmpxchg.start 84; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 85; CHECK-NEXT: { 86; CHECK-NEXT: r5 = memw_locked(r3) 87; CHECK-NEXT: } 88; CHECK-NEXT: { 89; CHECK-NEXT: r6 = lsr(r5,r2) 90; CHECK-NEXT: } 91; CHECK-NEXT: { 92; CHECK-NEXT: p0 = !bitsclr(r6,r0) 93; CHECK-NEXT: if (p0.new) jumpr:nt r31 94; CHECK-NEXT: } 95; CHECK-NEXT: .LBB1_2: // %cmpxchg.trystore 96; CHECK-NEXT: // in Loop: Header=BB1_1 Depth=1 97; CHECK-NEXT: { 98; CHECK-NEXT: r5 = and(r5,r4) 99; CHECK-NEXT: } 100; CHECK-NEXT: { 101; CHECK-NEXT: r5 |= asl(r1,r2) 102; CHECK-NEXT: } 103; CHECK-NEXT: { 104; CHECK-NEXT: memw_locked(r3,p0) = r5 105; CHECK-NEXT: } 106; CHECK-NEXT: { 107; CHECK-NEXT: if (!p0) jump:nt .LBB1_1 108; CHECK-NEXT: } 109; CHECK-NEXT: // %bb.3: // %cmpxchg.end 110; CHECK-NEXT: { 111; CHECK-NEXT: jumpr r31 112; CHECK-NEXT: } 113entry: 114 %v0 = cmpxchg volatile ptr @g0, i8 0, i8 1 seq_cst seq_cst 115 ret void 116} 117 118 119attributes #0 = { "target-cpu"="hexagonv66" } 120 121