1; RUN: llc -mtriple=amdgcn -mcpu=gfx90a -verify-machineinstrs < %s | FileCheck %s -check-prefixes=GCN,DPP64,GFX90A 2; RUN: llc -mtriple=amdgcn -mcpu=gfx940 -verify-machineinstrs < %s | FileCheck %s -check-prefixes=GCN,DPP64,DPPMOV64,GFX940 3; RUN: llc -mtriple=amdgcn -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck %s -check-prefixes=GCN,DPP32,GFX10PLUS 4; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs < %s | FileCheck %s -check-prefixes=GCN,DPP32,GFX10PLUS 5 6; GCN-LABEL: {{^}}dpp64_ceil: 7; GCN: global_load_{{dwordx2|b64}} [[V:v\[[0-9:]+\]]], 8; DPP64: v_ceil_f64_dpp [[V]], [[V]] row_newbcast:1 row_mask:0xf bank_mask:0xf bound_ctrl:1{{$}} 9; DPP32-COUNT-2: v_mov_b32_dpp v{{[0-9]+}}, v{{[0-9]+}} row_share:1 row_mask:0xf bank_mask:0xf bound_ctrl:1{{$}} 10define amdgpu_kernel void @dpp64_ceil(ptr addrspace(1) %arg, i64 %in1) { 11 %id = tail call i32 @llvm.amdgcn.workitem.id.x() 12 %gep = getelementptr inbounds i64, ptr addrspace(1) %arg, i32 %id 13 %load = load i64, ptr addrspace(1) %gep 14 %tmp0 = call i64 @llvm.amdgcn.update.dpp.i64(i64 %in1, i64 %load, i32 337, i32 15, i32 15, i1 1) #0 15 %tmp1 = bitcast i64 %tmp0 to double 16 %round = tail call double @llvm.ceil.f64(double %tmp1) 17 %tmp2 = bitcast double %round to i64 18 store i64 %tmp2, ptr addrspace(1) %gep 19 ret void 20} 21 22; GCN-LABEL: {{^}}dpp64_rcp: 23; GCN: global_load_{{dwordx2|b64}} [[V:v\[[0-9:]+\]]], 24; DPP64: v_rcp_f64_dpp [[V]], [[V]] row_newbcast:1 row_mask:0xf bank_mask:0xf bound_ctrl:1{{$}} 25; DPP32-COUNT-2: v_mov_b32_dpp v{{[0-9]+}}, v{{[0-9]+}} row_share:1 row_mask:0xf bank_mask:0xf bound_ctrl:1{{$}} 26define amdgpu_kernel void @dpp64_rcp(ptr addrspace(1) %arg, i64 %in1) { 27 %id = tail call i32 @llvm.amdgcn.workitem.id.x() 28 %gep = getelementptr inbounds i64, ptr addrspace(1) %arg, i32 %id 29 %load = load i64, ptr addrspace(1) %gep 30 %tmp0 = call i64 @llvm.amdgcn.update.dpp.i64(i64 %in1, i64 %load, i32 337, i32 15, i32 15, i1 1) #0 31 %tmp1 = bitcast i64 %tmp0 to double 32 %rcp = call double @llvm.amdgcn.rcp.f64(double %tmp1) 33 %tmp2 = bitcast double %rcp to i64 34 store i64 %tmp2, ptr addrspace(1) %gep 35 ret void 36} 37 38; GCN-LABEL: {{^}}dpp64_rcp_unsupported_ctl: 39; GCN-COUNT-2: v_mov_b32_dpp v{{[0-9]+}}, v{{[0-9]+}} quad_perm:[1,0,0,0] row_mask:0xf bank_mask:0xf bound_ctrl:1{{$}} 40; GCN: v_rcp_f64_e32 41define amdgpu_kernel void @dpp64_rcp_unsupported_ctl(ptr addrspace(1) %arg, i64 %in1) { 42 %id = tail call i32 @llvm.amdgcn.workitem.id.x() 43 %gep = getelementptr inbounds i64, ptr addrspace(1) %arg, i32 %id 44 %load = load i64, ptr addrspace(1) %gep 45 %tmp0 = call i64 @llvm.amdgcn.update.dpp.i64(i64 %in1, i64 %load, i32 1, i32 15, i32 15, i1 1) #0 46 %tmp1 = bitcast i64 %tmp0 to double 47 %rcp = fdiv fast double 1.0, %tmp1 48 %tmp2 = bitcast double %rcp to i64 49 store i64 %tmp2, ptr addrspace(1) %gep 50 ret void 51} 52 53; GCN-LABEL: {{^}}dpp64_div: 54; GCN: global_load_{{dwordx2|b64}} [[V:v\[[0-9:]+\]]], 55; DPPMOV64: v_mov_b64_dpp v[{{[0-9:]+}}], [[V]] row_newbcast:1 row_mask:0xf bank_mask:0xf bound_ctrl:1{{$}} 56; GFX90A-COUNT-2: v_mov_b32_dpp v{{[0-9]+}}, v{{[0-9]+}} row_newbcast:1 row_mask:0xf bank_mask:0xf bound_ctrl:1{{$}} 57; GFX10PLUS-COUNT-2: v_mov_b32_dpp v{{[0-9]+}}, v{{[0-9]+}} row_share:1 row_mask:0xf bank_mask:0xf bound_ctrl:1{{$}} 58; GCN: v_div_scale_f64 59; GCN: v_rcp_f64_e32 60define amdgpu_kernel void @dpp64_div(ptr addrspace(1) %arg, i64 %in1) { 61 %id = tail call i32 @llvm.amdgcn.workitem.id.x() 62 %gep = getelementptr inbounds i64, ptr addrspace(1) %arg, i32 %id 63 %load = load i64, ptr addrspace(1) %gep 64 %tmp0 = call i64 @llvm.amdgcn.update.dpp.i64(i64 %in1, i64 %load, i32 337, i32 15, i32 15, i1 1) #0 65 %tmp1 = bitcast i64 %tmp0 to double 66 %rcp = fdiv double 15.0, %tmp1 67 %tmp2 = bitcast double %rcp to i64 68 store i64 %tmp2, ptr addrspace(1) %gep 69 ret void 70} 71 72; GCN-LABEL: {{^}}dpp64_loop: 73; GCN: v_mov_b32_dpp 74; DPP64: v_mov_b32_dpp 75; GFX90A: v_add_co_u32_e32 76; GFX90A: v_addc_co_u32_e32 77; GFX940: v_lshl_add_u64 78; GFX10PLUS: v_mov_b32_dpp 79; GFX10PLUS: v_add_co_u32 80; GFX10PLUS: v_add_co_ci_u32_e32 81define amdgpu_cs void @dpp64_loop(i64 %arg, i64 %val) { 82bb: 83 br label %bb1 84bb1: 85 %i = call i64 @llvm.amdgcn.update.dpp.i64(i64 poison, i64 %val, i32 0, i32 0, i32 0, i1 false) 86 %i2 = add i64 %i, %arg 87 %i3 = atomicrmw add ptr addrspace(1) null, i64 %i2 monotonic, align 8 88 br label %bb1 89} 90 91declare i32 @llvm.amdgcn.workitem.id.x() 92declare i64 @llvm.amdgcn.update.dpp.i64(i64, i64, i32, i32, i32, i1) #0 93declare double @llvm.ceil.f64(double) 94declare double @llvm.amdgcn.rcp.f64(double) 95 96attributes #0 = { nounwind readnone convergent } 97