1*b843c749SSergey Zigachev /* 2*b843c749SSergey Zigachev * Copyright 2017 Valve Corporation 3*b843c749SSergey Zigachev * 4*b843c749SSergey Zigachev * Permission is hereby granted, free of charge, to any person obtaining a 5*b843c749SSergey Zigachev * copy of this software and associated documentation files (the "Software"), 6*b843c749SSergey Zigachev * to deal in the Software without restriction, including without limitation 7*b843c749SSergey Zigachev * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8*b843c749SSergey Zigachev * and/or sell copies of the Software, and to permit persons to whom the 9*b843c749SSergey Zigachev * Software is furnished to do so, subject to the following conditions: 10*b843c749SSergey Zigachev * 11*b843c749SSergey Zigachev * The above copyright notice and this permission notice shall be included in 12*b843c749SSergey Zigachev * all copies or substantial portions of the Software. 13*b843c749SSergey Zigachev * 14*b843c749SSergey Zigachev * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15*b843c749SSergey Zigachev * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16*b843c749SSergey Zigachev * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17*b843c749SSergey Zigachev * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18*b843c749SSergey Zigachev * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19*b843c749SSergey Zigachev * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20*b843c749SSergey Zigachev * OTHER DEALINGS IN THE SOFTWARE. 21*b843c749SSergey Zigachev * 22*b843c749SSergey Zigachev * Authors: Andres Rodriguez 23*b843c749SSergey Zigachev */ 24*b843c749SSergey Zigachev 25*b843c749SSergey Zigachev #include "amdgpu.h" 26*b843c749SSergey Zigachev #include "amdgpu_ring.h" 27*b843c749SSergey Zigachev 28*b843c749SSergey Zigachev static int amdgpu_queue_mapper_init(struct amdgpu_queue_mapper *mapper, 29*b843c749SSergey Zigachev int hw_ip) 30*b843c749SSergey Zigachev { 31*b843c749SSergey Zigachev if (!mapper) 32*b843c749SSergey Zigachev return -EINVAL; 33*b843c749SSergey Zigachev 34*b843c749SSergey Zigachev if (hw_ip > AMDGPU_MAX_IP_NUM) 35*b843c749SSergey Zigachev return -EINVAL; 36*b843c749SSergey Zigachev 37*b843c749SSergey Zigachev mapper->hw_ip = hw_ip; 38*b843c749SSergey Zigachev mutex_init(&mapper->lock); 39*b843c749SSergey Zigachev 40*b843c749SSergey Zigachev memset(mapper->queue_map, 0, sizeof(mapper->queue_map)); 41*b843c749SSergey Zigachev 42*b843c749SSergey Zigachev return 0; 43*b843c749SSergey Zigachev } 44*b843c749SSergey Zigachev 45*b843c749SSergey Zigachev static struct amdgpu_ring *amdgpu_get_cached_map(struct amdgpu_queue_mapper *mapper, 46*b843c749SSergey Zigachev int ring) 47*b843c749SSergey Zigachev { 48*b843c749SSergey Zigachev return mapper->queue_map[ring]; 49*b843c749SSergey Zigachev } 50*b843c749SSergey Zigachev 51*b843c749SSergey Zigachev static int amdgpu_update_cached_map(struct amdgpu_queue_mapper *mapper, 52*b843c749SSergey Zigachev int ring, struct amdgpu_ring *pring) 53*b843c749SSergey Zigachev { 54*b843c749SSergey Zigachev if (WARN_ON(mapper->queue_map[ring])) { 55*b843c749SSergey Zigachev DRM_ERROR("Un-expected ring re-map\n"); 56*b843c749SSergey Zigachev return -EINVAL; 57*b843c749SSergey Zigachev } 58*b843c749SSergey Zigachev 59*b843c749SSergey Zigachev mapper->queue_map[ring] = pring; 60*b843c749SSergey Zigachev 61*b843c749SSergey Zigachev return 0; 62*b843c749SSergey Zigachev } 63*b843c749SSergey Zigachev 64*b843c749SSergey Zigachev static int amdgpu_identity_map(struct amdgpu_device *adev, 65*b843c749SSergey Zigachev struct amdgpu_queue_mapper *mapper, 66*b843c749SSergey Zigachev u32 ring, 67*b843c749SSergey Zigachev struct amdgpu_ring **out_ring) 68*b843c749SSergey Zigachev { 69*b843c749SSergey Zigachev switch (mapper->hw_ip) { 70*b843c749SSergey Zigachev case AMDGPU_HW_IP_GFX: 71*b843c749SSergey Zigachev *out_ring = &adev->gfx.gfx_ring[ring]; 72*b843c749SSergey Zigachev break; 73*b843c749SSergey Zigachev case AMDGPU_HW_IP_COMPUTE: 74*b843c749SSergey Zigachev *out_ring = &adev->gfx.compute_ring[ring]; 75*b843c749SSergey Zigachev break; 76*b843c749SSergey Zigachev case AMDGPU_HW_IP_DMA: 77*b843c749SSergey Zigachev *out_ring = &adev->sdma.instance[ring].ring; 78*b843c749SSergey Zigachev break; 79*b843c749SSergey Zigachev case AMDGPU_HW_IP_UVD: 80*b843c749SSergey Zigachev *out_ring = &adev->uvd.inst[0].ring; 81*b843c749SSergey Zigachev break; 82*b843c749SSergey Zigachev case AMDGPU_HW_IP_VCE: 83*b843c749SSergey Zigachev *out_ring = &adev->vce.ring[ring]; 84*b843c749SSergey Zigachev break; 85*b843c749SSergey Zigachev case AMDGPU_HW_IP_UVD_ENC: 86*b843c749SSergey Zigachev *out_ring = &adev->uvd.inst[0].ring_enc[ring]; 87*b843c749SSergey Zigachev break; 88*b843c749SSergey Zigachev case AMDGPU_HW_IP_VCN_DEC: 89*b843c749SSergey Zigachev *out_ring = &adev->vcn.ring_dec; 90*b843c749SSergey Zigachev break; 91*b843c749SSergey Zigachev case AMDGPU_HW_IP_VCN_ENC: 92*b843c749SSergey Zigachev *out_ring = &adev->vcn.ring_enc[ring]; 93*b843c749SSergey Zigachev break; 94*b843c749SSergey Zigachev case AMDGPU_HW_IP_VCN_JPEG: 95*b843c749SSergey Zigachev *out_ring = &adev->vcn.ring_jpeg; 96*b843c749SSergey Zigachev break; 97*b843c749SSergey Zigachev default: 98*b843c749SSergey Zigachev *out_ring = NULL; 99*b843c749SSergey Zigachev DRM_ERROR("unknown HW IP type: %d\n", mapper->hw_ip); 100*b843c749SSergey Zigachev return -EINVAL; 101*b843c749SSergey Zigachev } 102*b843c749SSergey Zigachev 103*b843c749SSergey Zigachev return amdgpu_update_cached_map(mapper, ring, *out_ring); 104*b843c749SSergey Zigachev } 105*b843c749SSergey Zigachev 106*b843c749SSergey Zigachev static enum amdgpu_ring_type amdgpu_hw_ip_to_ring_type(int hw_ip) 107*b843c749SSergey Zigachev { 108*b843c749SSergey Zigachev switch (hw_ip) { 109*b843c749SSergey Zigachev case AMDGPU_HW_IP_GFX: 110*b843c749SSergey Zigachev return AMDGPU_RING_TYPE_GFX; 111*b843c749SSergey Zigachev case AMDGPU_HW_IP_COMPUTE: 112*b843c749SSergey Zigachev return AMDGPU_RING_TYPE_COMPUTE; 113*b843c749SSergey Zigachev case AMDGPU_HW_IP_DMA: 114*b843c749SSergey Zigachev return AMDGPU_RING_TYPE_SDMA; 115*b843c749SSergey Zigachev case AMDGPU_HW_IP_UVD: 116*b843c749SSergey Zigachev return AMDGPU_RING_TYPE_UVD; 117*b843c749SSergey Zigachev case AMDGPU_HW_IP_VCE: 118*b843c749SSergey Zigachev return AMDGPU_RING_TYPE_VCE; 119*b843c749SSergey Zigachev default: 120*b843c749SSergey Zigachev DRM_ERROR("Invalid HW IP specified %d\n", hw_ip); 121*b843c749SSergey Zigachev return -1; 122*b843c749SSergey Zigachev } 123*b843c749SSergey Zigachev } 124*b843c749SSergey Zigachev 125*b843c749SSergey Zigachev static int amdgpu_lru_map(struct amdgpu_device *adev, 126*b843c749SSergey Zigachev struct amdgpu_queue_mapper *mapper, 127*b843c749SSergey Zigachev u32 user_ring, bool lru_pipe_order, 128*b843c749SSergey Zigachev struct amdgpu_ring **out_ring) 129*b843c749SSergey Zigachev { 130*b843c749SSergey Zigachev int r, i, j; 131*b843c749SSergey Zigachev int ring_type = amdgpu_hw_ip_to_ring_type(mapper->hw_ip); 132*b843c749SSergey Zigachev int ring_blacklist[AMDGPU_MAX_RINGS]; 133*b843c749SSergey Zigachev struct amdgpu_ring *ring; 134*b843c749SSergey Zigachev 135*b843c749SSergey Zigachev /* 0 is a valid ring index, so initialize to -1 */ 136*b843c749SSergey Zigachev memset(ring_blacklist, 0xff, sizeof(ring_blacklist)); 137*b843c749SSergey Zigachev 138*b843c749SSergey Zigachev for (i = 0, j = 0; i < AMDGPU_MAX_RINGS; i++) { 139*b843c749SSergey Zigachev ring = mapper->queue_map[i]; 140*b843c749SSergey Zigachev if (ring) 141*b843c749SSergey Zigachev ring_blacklist[j++] = ring->idx; 142*b843c749SSergey Zigachev } 143*b843c749SSergey Zigachev 144*b843c749SSergey Zigachev r = amdgpu_ring_lru_get(adev, ring_type, ring_blacklist, 145*b843c749SSergey Zigachev j, lru_pipe_order, out_ring); 146*b843c749SSergey Zigachev if (r) 147*b843c749SSergey Zigachev return r; 148*b843c749SSergey Zigachev 149*b843c749SSergey Zigachev return amdgpu_update_cached_map(mapper, user_ring, *out_ring); 150*b843c749SSergey Zigachev } 151*b843c749SSergey Zigachev 152*b843c749SSergey Zigachev /** 153*b843c749SSergey Zigachev * amdgpu_queue_mgr_init - init an amdgpu_queue_mgr struct 154*b843c749SSergey Zigachev * 155*b843c749SSergey Zigachev * @adev: amdgpu_device pointer 156*b843c749SSergey Zigachev * @mgr: amdgpu_queue_mgr structure holding queue information 157*b843c749SSergey Zigachev * 158*b843c749SSergey Zigachev * Initialize the the selected @mgr (all asics). 159*b843c749SSergey Zigachev * 160*b843c749SSergey Zigachev * Returns 0 on success, error on failure. 161*b843c749SSergey Zigachev */ 162*b843c749SSergey Zigachev int amdgpu_queue_mgr_init(struct amdgpu_device *adev, 163*b843c749SSergey Zigachev struct amdgpu_queue_mgr *mgr) 164*b843c749SSergey Zigachev { 165*b843c749SSergey Zigachev int i, r; 166*b843c749SSergey Zigachev 167*b843c749SSergey Zigachev if (!adev || !mgr) 168*b843c749SSergey Zigachev return -EINVAL; 169*b843c749SSergey Zigachev 170*b843c749SSergey Zigachev memset(mgr, 0, sizeof(*mgr)); 171*b843c749SSergey Zigachev 172*b843c749SSergey Zigachev for (i = 0; i < AMDGPU_MAX_IP_NUM; ++i) { 173*b843c749SSergey Zigachev r = amdgpu_queue_mapper_init(&mgr->mapper[i], i); 174*b843c749SSergey Zigachev if (r) 175*b843c749SSergey Zigachev return r; 176*b843c749SSergey Zigachev } 177*b843c749SSergey Zigachev 178*b843c749SSergey Zigachev return 0; 179*b843c749SSergey Zigachev } 180*b843c749SSergey Zigachev 181*b843c749SSergey Zigachev /** 182*b843c749SSergey Zigachev * amdgpu_queue_mgr_fini - de-initialize an amdgpu_queue_mgr struct 183*b843c749SSergey Zigachev * 184*b843c749SSergey Zigachev * @adev: amdgpu_device pointer 185*b843c749SSergey Zigachev * @mgr: amdgpu_queue_mgr structure holding queue information 186*b843c749SSergey Zigachev * 187*b843c749SSergey Zigachev * De-initialize the the selected @mgr (all asics). 188*b843c749SSergey Zigachev * 189*b843c749SSergey Zigachev * Returns 0 on success, error on failure. 190*b843c749SSergey Zigachev */ 191*b843c749SSergey Zigachev int amdgpu_queue_mgr_fini(struct amdgpu_device *adev, 192*b843c749SSergey Zigachev struct amdgpu_queue_mgr *mgr) 193*b843c749SSergey Zigachev { 194*b843c749SSergey Zigachev return 0; 195*b843c749SSergey Zigachev } 196*b843c749SSergey Zigachev 197*b843c749SSergey Zigachev /** 198*b843c749SSergey Zigachev * amdgpu_queue_mgr_map - Map a userspace ring id to an amdgpu_ring 199*b843c749SSergey Zigachev * 200*b843c749SSergey Zigachev * @adev: amdgpu_device pointer 201*b843c749SSergey Zigachev * @mgr: amdgpu_queue_mgr structure holding queue information 202*b843c749SSergey Zigachev * @hw_ip: HW IP enum 203*b843c749SSergey Zigachev * @instance: HW instance 204*b843c749SSergey Zigachev * @ring: user ring id 205*b843c749SSergey Zigachev * @our_ring: pointer to mapped amdgpu_ring 206*b843c749SSergey Zigachev * 207*b843c749SSergey Zigachev * Map a userspace ring id to an appropriate kernel ring. Different 208*b843c749SSergey Zigachev * policies are configurable at a HW IP level. 209*b843c749SSergey Zigachev * 210*b843c749SSergey Zigachev * Returns 0 on success, error on failure. 211*b843c749SSergey Zigachev */ 212*b843c749SSergey Zigachev int amdgpu_queue_mgr_map(struct amdgpu_device *adev, 213*b843c749SSergey Zigachev struct amdgpu_queue_mgr *mgr, 214*b843c749SSergey Zigachev u32 hw_ip, u32 instance, u32 ring, 215*b843c749SSergey Zigachev struct amdgpu_ring **out_ring) 216*b843c749SSergey Zigachev { 217*b843c749SSergey Zigachev int i, r, ip_num_rings = 0; 218*b843c749SSergey Zigachev struct amdgpu_queue_mapper *mapper = &mgr->mapper[hw_ip]; 219*b843c749SSergey Zigachev 220*b843c749SSergey Zigachev if (!adev || !mgr || !out_ring) 221*b843c749SSergey Zigachev return -EINVAL; 222*b843c749SSergey Zigachev 223*b843c749SSergey Zigachev if (hw_ip >= AMDGPU_MAX_IP_NUM) 224*b843c749SSergey Zigachev return -EINVAL; 225*b843c749SSergey Zigachev 226*b843c749SSergey Zigachev if (ring >= AMDGPU_MAX_RINGS) 227*b843c749SSergey Zigachev return -EINVAL; 228*b843c749SSergey Zigachev 229*b843c749SSergey Zigachev /* Right now all IPs have only one instance - multiple rings. */ 230*b843c749SSergey Zigachev if (instance != 0) { 231*b843c749SSergey Zigachev DRM_DEBUG("invalid ip instance: %d\n", instance); 232*b843c749SSergey Zigachev return -EINVAL; 233*b843c749SSergey Zigachev } 234*b843c749SSergey Zigachev 235*b843c749SSergey Zigachev switch (hw_ip) { 236*b843c749SSergey Zigachev case AMDGPU_HW_IP_GFX: 237*b843c749SSergey Zigachev ip_num_rings = adev->gfx.num_gfx_rings; 238*b843c749SSergey Zigachev break; 239*b843c749SSergey Zigachev case AMDGPU_HW_IP_COMPUTE: 240*b843c749SSergey Zigachev ip_num_rings = adev->gfx.num_compute_rings; 241*b843c749SSergey Zigachev break; 242*b843c749SSergey Zigachev case AMDGPU_HW_IP_DMA: 243*b843c749SSergey Zigachev ip_num_rings = adev->sdma.num_instances; 244*b843c749SSergey Zigachev break; 245*b843c749SSergey Zigachev case AMDGPU_HW_IP_UVD: 246*b843c749SSergey Zigachev for (i = 0; i < adev->uvd.num_uvd_inst; i++) { 247*b843c749SSergey Zigachev if (!(adev->uvd.harvest_config & (1 << i))) 248*b843c749SSergey Zigachev ip_num_rings++; 249*b843c749SSergey Zigachev } 250*b843c749SSergey Zigachev break; 251*b843c749SSergey Zigachev case AMDGPU_HW_IP_VCE: 252*b843c749SSergey Zigachev ip_num_rings = adev->vce.num_rings; 253*b843c749SSergey Zigachev break; 254*b843c749SSergey Zigachev case AMDGPU_HW_IP_UVD_ENC: 255*b843c749SSergey Zigachev for (i = 0; i < adev->uvd.num_uvd_inst; i++) { 256*b843c749SSergey Zigachev if (!(adev->uvd.harvest_config & (1 << i))) 257*b843c749SSergey Zigachev ip_num_rings++; 258*b843c749SSergey Zigachev } 259*b843c749SSergey Zigachev ip_num_rings = 260*b843c749SSergey Zigachev adev->uvd.num_enc_rings * ip_num_rings; 261*b843c749SSergey Zigachev break; 262*b843c749SSergey Zigachev case AMDGPU_HW_IP_VCN_DEC: 263*b843c749SSergey Zigachev ip_num_rings = 1; 264*b843c749SSergey Zigachev break; 265*b843c749SSergey Zigachev case AMDGPU_HW_IP_VCN_ENC: 266*b843c749SSergey Zigachev ip_num_rings = adev->vcn.num_enc_rings; 267*b843c749SSergey Zigachev break; 268*b843c749SSergey Zigachev case AMDGPU_HW_IP_VCN_JPEG: 269*b843c749SSergey Zigachev ip_num_rings = 1; 270*b843c749SSergey Zigachev break; 271*b843c749SSergey Zigachev default: 272*b843c749SSergey Zigachev DRM_DEBUG("unknown ip type: %d\n", hw_ip); 273*b843c749SSergey Zigachev return -EINVAL; 274*b843c749SSergey Zigachev } 275*b843c749SSergey Zigachev 276*b843c749SSergey Zigachev if (ring >= ip_num_rings) { 277*b843c749SSergey Zigachev DRM_DEBUG("Ring index:%d exceeds maximum:%d for ip:%d\n", 278*b843c749SSergey Zigachev ring, ip_num_rings, hw_ip); 279*b843c749SSergey Zigachev return -EINVAL; 280*b843c749SSergey Zigachev } 281*b843c749SSergey Zigachev 282*b843c749SSergey Zigachev mutex_lock(&mapper->lock); 283*b843c749SSergey Zigachev 284*b843c749SSergey Zigachev *out_ring = amdgpu_get_cached_map(mapper, ring); 285*b843c749SSergey Zigachev if (*out_ring) { 286*b843c749SSergey Zigachev /* cache hit */ 287*b843c749SSergey Zigachev r = 0; 288*b843c749SSergey Zigachev goto out_unlock; 289*b843c749SSergey Zigachev } 290*b843c749SSergey Zigachev 291*b843c749SSergey Zigachev switch (mapper->hw_ip) { 292*b843c749SSergey Zigachev case AMDGPU_HW_IP_GFX: 293*b843c749SSergey Zigachev case AMDGPU_HW_IP_UVD: 294*b843c749SSergey Zigachev case AMDGPU_HW_IP_VCE: 295*b843c749SSergey Zigachev case AMDGPU_HW_IP_UVD_ENC: 296*b843c749SSergey Zigachev case AMDGPU_HW_IP_VCN_DEC: 297*b843c749SSergey Zigachev case AMDGPU_HW_IP_VCN_ENC: 298*b843c749SSergey Zigachev case AMDGPU_HW_IP_VCN_JPEG: 299*b843c749SSergey Zigachev r = amdgpu_identity_map(adev, mapper, ring, out_ring); 300*b843c749SSergey Zigachev break; 301*b843c749SSergey Zigachev case AMDGPU_HW_IP_DMA: 302*b843c749SSergey Zigachev r = amdgpu_lru_map(adev, mapper, ring, false, out_ring); 303*b843c749SSergey Zigachev break; 304*b843c749SSergey Zigachev case AMDGPU_HW_IP_COMPUTE: 305*b843c749SSergey Zigachev r = amdgpu_lru_map(adev, mapper, ring, true, out_ring); 306*b843c749SSergey Zigachev break; 307*b843c749SSergey Zigachev default: 308*b843c749SSergey Zigachev *out_ring = NULL; 309*b843c749SSergey Zigachev r = -EINVAL; 310*b843c749SSergey Zigachev DRM_DEBUG("unknown HW IP type: %d\n", mapper->hw_ip); 311*b843c749SSergey Zigachev } 312*b843c749SSergey Zigachev 313*b843c749SSergey Zigachev out_unlock: 314*b843c749SSergey Zigachev mutex_unlock(&mapper->lock); 315*b843c749SSergey Zigachev return r; 316*b843c749SSergey Zigachev } 317