1;RUN: llc < %s -mtriple=amdgcn -mcpu=verde -amdgpu-atomic-optimizer-strategy=None -verify-machineinstrs | FileCheck %s 2;RUN: llc < %s -mtriple=amdgcn -mcpu=tonga -amdgpu-atomic-optimizer-strategy=None -verify-machineinstrs | FileCheck %s 3 4;CHECK-LABEL: {{^}}test1: 5;CHECK-NOT: s_waitcnt 6;CHECK: buffer_atomic_swap v0, off, s[0:3], 0 glc 7;CHECK: s_movk_i32 [[SOFS:s[0-9]+]], 0x1ffc 8;CHECK: s_waitcnt vmcnt(0) 9;CHECK: buffer_atomic_swap v0, v1, s[0:3], 0 offen glc 10;CHECK: s_waitcnt vmcnt(0) 11;CHECK: buffer_atomic_swap v0, v1, s[0:3], 0 offen offset:42 glc 12;CHECK-DAG: s_waitcnt vmcnt(0) 13;CHECK: buffer_atomic_swap v0, off, s[0:3], [[SOFS]] offset:4 glc 14;CHECK: s_waitcnt vmcnt(0) 15;CHECK: buffer_atomic_swap v0, off, s[0:3], 0{{$}} 16;CHECK: buffer_atomic_swap v0, off, s[0:3], 0 glc 17define amdgpu_ps float @test1(ptr addrspace(8) inreg %rsrc, i32 %data, i32 %voffset) { 18main_body: 19 %o1 = call i32 @llvm.amdgcn.raw.ptr.buffer.atomic.swap.i32(i32 %data, ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0) 20 %o3 = call i32 @llvm.amdgcn.raw.ptr.buffer.atomic.swap.i32(i32 %o1, ptr addrspace(8) %rsrc, i32 %voffset, i32 0, i32 0) 21 %off5 = add i32 %voffset, 42 22 %o5 = call i32 @llvm.amdgcn.raw.ptr.buffer.atomic.swap.i32(i32 %o3, ptr addrspace(8) %rsrc, i32 %off5, i32 0, i32 0) 23 %o6 = call i32 @llvm.amdgcn.raw.ptr.buffer.atomic.swap.i32(i32 %o5, ptr addrspace(8) %rsrc, i32 4, i32 8188, i32 0) 24 %unused = call i32 @llvm.amdgcn.raw.ptr.buffer.atomic.swap.i32(i32 %o6, ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0) 25 %o7 = bitcast i32 %o6 to float 26 %out = call float @llvm.amdgcn.raw.ptr.buffer.atomic.swap.f32(float %o7, ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0) 27 ret float %out 28} 29 30;CHECK-LABEL: {{^}}test2: 31;CHECK-NOT: s_waitcnt 32;CHECK: buffer_atomic_add v0, v1, s[0:3], 0 offen glc{{$}} 33;CHECK: s_waitcnt vmcnt(0) 34;CHECK: buffer_atomic_sub v0, v1, s[0:3], 0 offen glc slc 35;CHECK: s_waitcnt vmcnt(0) 36;CHECK: buffer_atomic_smin v0, v1, s[0:3], 0 offen glc{{$}} 37;CHECK: s_waitcnt vmcnt(0) 38;CHECK: buffer_atomic_umin v0, v1, s[0:3], 0 offen glc slc 39;CHECK: s_waitcnt vmcnt(0) 40;CHECK: buffer_atomic_smax v0, v1, s[0:3], 0 offen glc{{$}} 41;CHECK: s_waitcnt vmcnt(0) 42;CHECK: buffer_atomic_umax v0, v1, s[0:3], 0 offen glc slc 43;CHECK: s_waitcnt vmcnt(0) 44;CHECK: buffer_atomic_and v0, v1, s[0:3], 0 offen glc{{$}} 45;CHECK: s_waitcnt vmcnt(0) 46;CHECK: buffer_atomic_or v0, v1, s[0:3], 0 offen glc slc 47;CHECK: s_waitcnt vmcnt(0) 48;CHECK: buffer_atomic_xor v0, v1, s[0:3], 0 offen glc 49;CHECK: s_waitcnt vmcnt(0) 50;CHECK: buffer_atomic_inc v0, v1, s[0:3], 0 offen glc 51;CHECK: s_waitcnt vmcnt(0) 52;CHECK: buffer_atomic_dec v0, v1, s[0:3], 0 offen glc 53define amdgpu_ps float @test2(ptr addrspace(8) inreg %rsrc, i32 %data, i32 %voffset) { 54main_body: 55 %t1 = call i32 @llvm.amdgcn.raw.ptr.buffer.atomic.add.i32(i32 %data, ptr addrspace(8) %rsrc, i32 %voffset, i32 0, i32 0) 56 %t2 = call i32 @llvm.amdgcn.raw.ptr.buffer.atomic.sub.i32(i32 %t1, ptr addrspace(8) %rsrc, i32 %voffset, i32 0, i32 2) 57 %t3 = call i32 @llvm.amdgcn.raw.ptr.buffer.atomic.smin.i32(i32 %t2, ptr addrspace(8) %rsrc, i32 %voffset, i32 0, i32 0) 58 %t4 = call i32 @llvm.amdgcn.raw.ptr.buffer.atomic.umin.i32(i32 %t3, ptr addrspace(8) %rsrc, i32 %voffset, i32 0, i32 2) 59 %t5 = call i32 @llvm.amdgcn.raw.ptr.buffer.atomic.smax.i32(i32 %t4, ptr addrspace(8) %rsrc, i32 %voffset, i32 0, i32 0) 60 %t6 = call i32 @llvm.amdgcn.raw.ptr.buffer.atomic.umax.i32(i32 %t5, ptr addrspace(8) %rsrc, i32 %voffset, i32 0, i32 2) 61 %t7 = call i32 @llvm.amdgcn.raw.ptr.buffer.atomic.and.i32(i32 %t6, ptr addrspace(8) %rsrc, i32 %voffset, i32 0, i32 0) 62 %t8 = call i32 @llvm.amdgcn.raw.ptr.buffer.atomic.or.i32(i32 %t7, ptr addrspace(8) %rsrc, i32 %voffset, i32 0, i32 2) 63 %t9 = call i32 @llvm.amdgcn.raw.ptr.buffer.atomic.xor.i32(i32 %t8, ptr addrspace(8) %rsrc, i32 %voffset, i32 0, i32 0) 64 %t10 = call i32 @llvm.amdgcn.raw.ptr.buffer.atomic.inc.i32(i32 %t9, ptr addrspace(8) %rsrc, i32 %voffset, i32 0, i32 0) 65 %t11 = call i32 @llvm.amdgcn.raw.ptr.buffer.atomic.dec.i32(i32 %t10, ptr addrspace(8) %rsrc, i32 %voffset, i32 0, i32 0) 66 %out = bitcast i32 %t11 to float 67 ret float %out 68} 69 70; Ideally, we would teach tablegen & friends that cmpswap only modifies the 71; first vgpr. Since we don't do that yet, the register allocator will have to 72; create copies which we don't bother to track here. 73; 74;CHECK-LABEL: {{^}}test3: 75;CHECK-NOT: s_waitcnt 76;CHECK: buffer_atomic_cmpswap {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], 0 glc 77;CHECK: s_waitcnt vmcnt(0) 78;CHECK: s_movk_i32 [[SOFS:s[0-9]+]], 0x1ffc 79;CHECK: buffer_atomic_cmpswap {{v\[[0-9]+:[0-9]+\]}}, v2, s[0:3], 0 offen glc 80;CHECK: s_waitcnt vmcnt(0) 81;CHECK: buffer_atomic_cmpswap {{v\[[0-9]+:[0-9]+\]}}, v2, s[0:3], 0 offen offset:44 glc 82;CHECK-DAG: s_waitcnt vmcnt(0) 83;CHECK: buffer_atomic_cmpswap {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], [[SOFS]] offset:4 glc 84define amdgpu_ps float @test3(ptr addrspace(8) inreg %rsrc, i32 %data, i32 %cmp, i32 %vindex, i32 %voffset) { 85main_body: 86 %o1 = call i32 @llvm.amdgcn.raw.ptr.buffer.atomic.cmpswap.i32(i32 %data, i32 %cmp, ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0) 87 %o3 = call i32 @llvm.amdgcn.raw.ptr.buffer.atomic.cmpswap.i32(i32 %o1, i32 %cmp, ptr addrspace(8) %rsrc, i32 %voffset, i32 0, i32 0) 88 %ofs.5 = add i32 %voffset, 44 89 %o5 = call i32 @llvm.amdgcn.raw.ptr.buffer.atomic.cmpswap.i32(i32 %o3, i32 %cmp, ptr addrspace(8) %rsrc, i32 %ofs.5, i32 0, i32 0) 90 %o6 = call i32 @llvm.amdgcn.raw.ptr.buffer.atomic.cmpswap.i32(i32 %o5, i32 %cmp, ptr addrspace(8) %rsrc, i32 4, i32 8188, i32 0) 91 92; Detecting the no-return variant doesn't work right now because of how the 93; intrinsic is replaced by an instruction that feeds into an EXTRACT_SUBREG. 94; Since there probably isn't a reasonable use-case of cmpswap that discards 95; the return value, that seems okay. 96; 97; %unused = call i32 @llvm.amdgcn.raw.ptr.buffer.atomic.cmpswap.i32(i32 %o6, i32 %cmp, ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0) 98 %out = bitcast i32 %o6 to float 99 ret float %out 100} 101 102;CHECK-LABEL: {{^}}test4: 103;CHECK: buffer_atomic_add v0, 104define amdgpu_ps float @test4() { 105main_body: 106 %v = call i32 @llvm.amdgcn.raw.ptr.buffer.atomic.add.i32(i32 1, ptr addrspace(8) undef, i32 4, i32 0, i32 0) 107 %v.float = bitcast i32 %v to float 108 ret float %v.float 109} 110 111;CHECK-LABEL: {{^}}test5: 112;CHECK-NOT: s_waitcnt 113;CHECK: buffer_atomic_cmpswap_x2 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], 0 glc 114;CHECK-DAG: s_waitcnt vmcnt(0) 115;CHECK-DAG: s_movk_i32 [[SOFS:s[0-9]+]], 0x1ffc 116;CHECK: buffer_atomic_cmpswap_x2 {{v\[[0-9]+:[0-9]+\]}}, v4, s[0:3], 0 offen glc 117;CHECK: s_waitcnt vmcnt(0) 118;CHECK: buffer_atomic_cmpswap_x2 {{v\[[0-9]+:[0-9]+\]}}, v4, s[0:3], 0 offen offset:44 glc 119;CHECK-DAG: s_waitcnt vmcnt(0) 120;CHECK: buffer_atomic_cmpswap_x2 {{v\[[0-9]+:[0-9]+\]}}, off, s[0:3], [[SOFS]] offset:4 glc 121define amdgpu_ps float @test5(ptr addrspace(8) inreg %rsrc, i64 %data, i64 %cmp, i32 %vindex, i32 %voffset) { 122main_body: 123 %o1 = call i64 @llvm.amdgcn.raw.ptr.buffer.atomic.cmpswap.i64(i64 %data, i64 %cmp, ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0) 124 %o3 = call i64 @llvm.amdgcn.raw.ptr.buffer.atomic.cmpswap.i64(i64 %o1, i64 %cmp, ptr addrspace(8) %rsrc, i32 %voffset, i32 0, i32 0) 125 %ofs.5 = add i32 %voffset, 44 126 %o5 = call i64 @llvm.amdgcn.raw.ptr.buffer.atomic.cmpswap.i64(i64 %o3, i64 %cmp, ptr addrspace(8) %rsrc, i32 %ofs.5, i32 0, i32 0) 127 %o6 = call i64 @llvm.amdgcn.raw.ptr.buffer.atomic.cmpswap.i64(i64 %o5, i64 %cmp, ptr addrspace(8) %rsrc, i32 4, i32 8188, i32 0) 128 %out = sitofp i64 %o6 to float 129 ret float %out 130} 131 132;CHECK-LABEL: {{^}}test_volatile: 133;CHECK-NOT: s_waitcnt 134;CHECK: buffer_atomic_add v0, v1, s[0:3], 0 offen glc{{$}} 135;CHECK-DAG: s_waitcnt vmcnt(0) 136define amdgpu_ps float @test_volatile(ptr addrspace(8) inreg %rsrc, i32 %data, i32 %voffset) { 137main_body: 138 %t1 = call i32 @llvm.amdgcn.raw.ptr.buffer.atomic.add.i32(i32 %data, ptr addrspace(8) %rsrc, i32 %voffset, i32 0, i32 -2147483648) 139 %out = bitcast i32 %t1 to float 140 ret float %out 141} 142 143;CHECK-LABEL: {{^}}test_volatile_noret: 144;CHECK-NOT: s_waitcnt 145;CHECK: buffer_atomic_add v0, v1, s[0:3], 0 offen{{$}} 146define amdgpu_ps void @test_volatile_noret(ptr addrspace(8) inreg %rsrc, i32 %data, i32 %voffset) { 147main_body: 148 %t1 = call i32 @llvm.amdgcn.raw.ptr.buffer.atomic.add.i32(i32 %data, ptr addrspace(8) %rsrc, i32 %voffset, i32 0, i32 -2147483648) 149 ret void 150} 151 152declare i32 @llvm.amdgcn.raw.ptr.buffer.atomic.swap.i32(i32, ptr addrspace(8), i32, i32, i32) #0 153declare float @llvm.amdgcn.raw.ptr.buffer.atomic.swap.f32(float, ptr addrspace(8), i32, i32, i32) #0 154declare i32 @llvm.amdgcn.raw.ptr.buffer.atomic.add.i32(i32, ptr addrspace(8), i32, i32, i32) #0 155declare i32 @llvm.amdgcn.raw.ptr.buffer.atomic.sub.i32(i32, ptr addrspace(8), i32, i32, i32) #0 156declare i32 @llvm.amdgcn.raw.ptr.buffer.atomic.smin.i32(i32, ptr addrspace(8), i32, i32, i32) #0 157declare i32 @llvm.amdgcn.raw.ptr.buffer.atomic.umin.i32(i32, ptr addrspace(8), i32, i32, i32) #0 158declare i32 @llvm.amdgcn.raw.ptr.buffer.atomic.smax.i32(i32, ptr addrspace(8), i32, i32, i32) #0 159declare i32 @llvm.amdgcn.raw.ptr.buffer.atomic.umax.i32(i32, ptr addrspace(8), i32, i32, i32) #0 160declare i32 @llvm.amdgcn.raw.ptr.buffer.atomic.and.i32(i32, ptr addrspace(8), i32, i32, i32) #0 161declare i32 @llvm.amdgcn.raw.ptr.buffer.atomic.or.i32(i32, ptr addrspace(8), i32, i32, i32) #0 162declare i32 @llvm.amdgcn.raw.ptr.buffer.atomic.xor.i32(i32, ptr addrspace(8), i32, i32, i32) #0 163declare i32 @llvm.amdgcn.raw.ptr.buffer.atomic.inc.i32(i32, ptr addrspace(8), i32, i32, i32) #0 164declare i32 @llvm.amdgcn.raw.ptr.buffer.atomic.dec.i32(i32, ptr addrspace(8), i32, i32, i32) #0 165declare i32 @llvm.amdgcn.raw.ptr.buffer.atomic.cmpswap.i32(i32, i32, ptr addrspace(8), i32, i32, i32) #0 166declare i64 @llvm.amdgcn.raw.ptr.buffer.atomic.cmpswap.i64(i64, i64, ptr addrspace(8), i32, i32, i32) #0 167 168attributes #0 = { nounwind } 169