1*b843c749SSergey Zigachev /* 2*b843c749SSergey Zigachev * Copyright 2008 Advanced Micro Devices, Inc. 3*b843c749SSergey Zigachev * Copyright 2008 Red Hat Inc. 4*b843c749SSergey Zigachev * Copyright 2009 Jerome Glisse. 5*b843c749SSergey Zigachev * 6*b843c749SSergey Zigachev * Permission is hereby granted, free of charge, to any person obtaining a 7*b843c749SSergey Zigachev * copy of this software and associated documentation files (the "Software"), 8*b843c749SSergey Zigachev * to deal in the Software without restriction, including without limitation 9*b843c749SSergey Zigachev * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10*b843c749SSergey Zigachev * and/or sell copies of the Software, and to permit persons to whom the 11*b843c749SSergey Zigachev * Software is furnished to do so, subject to the following conditions: 12*b843c749SSergey Zigachev * 13*b843c749SSergey Zigachev * The above copyright notice and this permission notice shall be included in 14*b843c749SSergey Zigachev * all copies or substantial portions of the Software. 15*b843c749SSergey Zigachev * 16*b843c749SSergey Zigachev * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17*b843c749SSergey Zigachev * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18*b843c749SSergey Zigachev * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19*b843c749SSergey Zigachev * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20*b843c749SSergey Zigachev * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21*b843c749SSergey Zigachev * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22*b843c749SSergey Zigachev * OTHER DEALINGS IN THE SOFTWARE. 23*b843c749SSergey Zigachev * 24*b843c749SSergey Zigachev * Authors: Dave Airlie 25*b843c749SSergey Zigachev * Alex Deucher 26*b843c749SSergey Zigachev * Jerome Glisse 27*b843c749SSergey Zigachev */ 28*b843c749SSergey Zigachev #include <linux/ktime.h> 29*b843c749SSergey Zigachev #include <linux/pagemap.h> 30*b843c749SSergey Zigachev #include <drm/drmP.h> 31*b843c749SSergey Zigachev #include <drm/amdgpu_drm.h> 32*b843c749SSergey Zigachev #include "amdgpu.h" 33*b843c749SSergey Zigachev #include "amdgpu_display.h" 34*b843c749SSergey Zigachev 35*b843c749SSergey Zigachev void amdgpu_gem_object_free(struct drm_gem_object *gobj) 36*b843c749SSergey Zigachev { 37*b843c749SSergey Zigachev struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj); 38*b843c749SSergey Zigachev 39*b843c749SSergey Zigachev if (robj) { 40*b843c749SSergey Zigachev amdgpu_mn_unregister(robj); 41*b843c749SSergey Zigachev amdgpu_bo_unref(&robj); 42*b843c749SSergey Zigachev } 43*b843c749SSergey Zigachev } 44*b843c749SSergey Zigachev 45*b843c749SSergey Zigachev int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, 46*b843c749SSergey Zigachev int alignment, u32 initial_domain, 47*b843c749SSergey Zigachev u64 flags, enum ttm_bo_type type, 48*b843c749SSergey Zigachev struct reservation_object *resv, 49*b843c749SSergey Zigachev struct drm_gem_object **obj) 50*b843c749SSergey Zigachev { 51*b843c749SSergey Zigachev struct amdgpu_bo *bo; 52*b843c749SSergey Zigachev struct amdgpu_bo_param bp; 53*b843c749SSergey Zigachev int r; 54*b843c749SSergey Zigachev 55*b843c749SSergey Zigachev memset(&bp, 0, sizeof(bp)); 56*b843c749SSergey Zigachev *obj = NULL; 57*b843c749SSergey Zigachev /* At least align on page size */ 58*b843c749SSergey Zigachev if (alignment < PAGE_SIZE) { 59*b843c749SSergey Zigachev alignment = PAGE_SIZE; 60*b843c749SSergey Zigachev } 61*b843c749SSergey Zigachev 62*b843c749SSergey Zigachev bp.size = size; 63*b843c749SSergey Zigachev bp.byte_align = alignment; 64*b843c749SSergey Zigachev bp.type = type; 65*b843c749SSergey Zigachev bp.resv = resv; 66*b843c749SSergey Zigachev bp.preferred_domain = initial_domain; 67*b843c749SSergey Zigachev retry: 68*b843c749SSergey Zigachev bp.flags = flags; 69*b843c749SSergey Zigachev bp.domain = initial_domain; 70*b843c749SSergey Zigachev r = amdgpu_bo_create(adev, &bp, &bo); 71*b843c749SSergey Zigachev if (r) { 72*b843c749SSergey Zigachev if (r != -ERESTARTSYS) { 73*b843c749SSergey Zigachev if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) { 74*b843c749SSergey Zigachev flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 75*b843c749SSergey Zigachev goto retry; 76*b843c749SSergey Zigachev } 77*b843c749SSergey Zigachev 78*b843c749SSergey Zigachev if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) { 79*b843c749SSergey Zigachev initial_domain |= AMDGPU_GEM_DOMAIN_GTT; 80*b843c749SSergey Zigachev goto retry; 81*b843c749SSergey Zigachev } 82*b843c749SSergey Zigachev DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n", 83*b843c749SSergey Zigachev size, initial_domain, alignment, r); 84*b843c749SSergey Zigachev } 85*b843c749SSergey Zigachev return r; 86*b843c749SSergey Zigachev } 87*b843c749SSergey Zigachev *obj = &bo->gem_base; 88*b843c749SSergey Zigachev 89*b843c749SSergey Zigachev return 0; 90*b843c749SSergey Zigachev } 91*b843c749SSergey Zigachev 92*b843c749SSergey Zigachev void amdgpu_gem_force_release(struct amdgpu_device *adev) 93*b843c749SSergey Zigachev { 94*b843c749SSergey Zigachev struct drm_device *ddev = adev->ddev; 95*b843c749SSergey Zigachev struct drm_file *file; 96*b843c749SSergey Zigachev 97*b843c749SSergey Zigachev mutex_lock(&ddev->filelist_mutex); 98*b843c749SSergey Zigachev 99*b843c749SSergey Zigachev list_for_each_entry(file, &ddev->filelist, lhead) { 100*b843c749SSergey Zigachev struct drm_gem_object *gobj; 101*b843c749SSergey Zigachev int handle; 102*b843c749SSergey Zigachev 103*b843c749SSergey Zigachev WARN_ONCE(1, "Still active user space clients!\n"); 104*b843c749SSergey Zigachev spin_lock(&file->table_lock); 105*b843c749SSergey Zigachev idr_for_each_entry(&file->object_idr, gobj, handle) { 106*b843c749SSergey Zigachev WARN_ONCE(1, "And also active allocations!\n"); 107*b843c749SSergey Zigachev drm_gem_object_put_unlocked(gobj); 108*b843c749SSergey Zigachev } 109*b843c749SSergey Zigachev idr_destroy(&file->object_idr); 110*b843c749SSergey Zigachev spin_unlock(&file->table_lock); 111*b843c749SSergey Zigachev } 112*b843c749SSergey Zigachev 113*b843c749SSergey Zigachev mutex_unlock(&ddev->filelist_mutex); 114*b843c749SSergey Zigachev } 115*b843c749SSergey Zigachev 116*b843c749SSergey Zigachev /* 117*b843c749SSergey Zigachev * Call from drm_gem_handle_create which appear in both new and open ioctl 118*b843c749SSergey Zigachev * case. 119*b843c749SSergey Zigachev */ 120*b843c749SSergey Zigachev int amdgpu_gem_object_open(struct drm_gem_object *obj, 121*b843c749SSergey Zigachev struct drm_file *file_priv) 122*b843c749SSergey Zigachev { 123*b843c749SSergey Zigachev struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj); 124*b843c749SSergey Zigachev struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); 125*b843c749SSergey Zigachev struct amdgpu_fpriv *fpriv = file_priv->driver_priv; 126*b843c749SSergey Zigachev struct amdgpu_vm *vm = &fpriv->vm; 127*b843c749SSergey Zigachev struct amdgpu_bo_va *bo_va; 128*b843c749SSergey Zigachev struct mm_struct *mm; 129*b843c749SSergey Zigachev int r; 130*b843c749SSergey Zigachev 131*b843c749SSergey Zigachev mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm); 132*b843c749SSergey Zigachev if (mm && mm != current->mm) 133*b843c749SSergey Zigachev return -EPERM; 134*b843c749SSergey Zigachev 135*b843c749SSergey Zigachev if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID && 136*b843c749SSergey Zigachev abo->tbo.resv != vm->root.base.bo->tbo.resv) 137*b843c749SSergey Zigachev return -EPERM; 138*b843c749SSergey Zigachev 139*b843c749SSergey Zigachev r = amdgpu_bo_reserve(abo, false); 140*b843c749SSergey Zigachev if (r) 141*b843c749SSergey Zigachev return r; 142*b843c749SSergey Zigachev 143*b843c749SSergey Zigachev bo_va = amdgpu_vm_bo_find(vm, abo); 144*b843c749SSergey Zigachev if (!bo_va) { 145*b843c749SSergey Zigachev bo_va = amdgpu_vm_bo_add(adev, vm, abo); 146*b843c749SSergey Zigachev } else { 147*b843c749SSergey Zigachev ++bo_va->ref_count; 148*b843c749SSergey Zigachev } 149*b843c749SSergey Zigachev amdgpu_bo_unreserve(abo); 150*b843c749SSergey Zigachev return 0; 151*b843c749SSergey Zigachev } 152*b843c749SSergey Zigachev 153*b843c749SSergey Zigachev void amdgpu_gem_object_close(struct drm_gem_object *obj, 154*b843c749SSergey Zigachev struct drm_file *file_priv) 155*b843c749SSergey Zigachev { 156*b843c749SSergey Zigachev struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 157*b843c749SSergey Zigachev struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 158*b843c749SSergey Zigachev struct amdgpu_fpriv *fpriv = file_priv->driver_priv; 159*b843c749SSergey Zigachev struct amdgpu_vm *vm = &fpriv->vm; 160*b843c749SSergey Zigachev 161*b843c749SSergey Zigachev struct amdgpu_bo_list_entry vm_pd; 162*b843c749SSergey Zigachev struct list_head list, duplicates; 163*b843c749SSergey Zigachev struct ttm_validate_buffer tv; 164*b843c749SSergey Zigachev struct ww_acquire_ctx ticket; 165*b843c749SSergey Zigachev struct amdgpu_bo_va *bo_va; 166*b843c749SSergey Zigachev int r; 167*b843c749SSergey Zigachev 168*b843c749SSergey Zigachev INIT_LIST_HEAD(&list); 169*b843c749SSergey Zigachev INIT_LIST_HEAD(&duplicates); 170*b843c749SSergey Zigachev 171*b843c749SSergey Zigachev tv.bo = &bo->tbo; 172*b843c749SSergey Zigachev tv.shared = true; 173*b843c749SSergey Zigachev list_add(&tv.head, &list); 174*b843c749SSergey Zigachev 175*b843c749SSergey Zigachev amdgpu_vm_get_pd_bo(vm, &list, &vm_pd); 176*b843c749SSergey Zigachev 177*b843c749SSergey Zigachev r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates); 178*b843c749SSergey Zigachev if (r) { 179*b843c749SSergey Zigachev dev_err(adev->dev, "leaking bo va because " 180*b843c749SSergey Zigachev "we fail to reserve bo (%d)\n", r); 181*b843c749SSergey Zigachev return; 182*b843c749SSergey Zigachev } 183*b843c749SSergey Zigachev bo_va = amdgpu_vm_bo_find(vm, bo); 184*b843c749SSergey Zigachev if (bo_va && --bo_va->ref_count == 0) { 185*b843c749SSergey Zigachev amdgpu_vm_bo_rmv(adev, bo_va); 186*b843c749SSergey Zigachev 187*b843c749SSergey Zigachev if (amdgpu_vm_ready(vm)) { 188*b843c749SSergey Zigachev struct dma_fence *fence = NULL; 189*b843c749SSergey Zigachev 190*b843c749SSergey Zigachev r = amdgpu_vm_clear_freed(adev, vm, &fence); 191*b843c749SSergey Zigachev if (unlikely(r)) { 192*b843c749SSergey Zigachev dev_err(adev->dev, "failed to clear page " 193*b843c749SSergey Zigachev "tables on GEM object close (%d)\n", r); 194*b843c749SSergey Zigachev } 195*b843c749SSergey Zigachev 196*b843c749SSergey Zigachev if (fence) { 197*b843c749SSergey Zigachev amdgpu_bo_fence(bo, fence, true); 198*b843c749SSergey Zigachev dma_fence_put(fence); 199*b843c749SSergey Zigachev } 200*b843c749SSergey Zigachev } 201*b843c749SSergey Zigachev } 202*b843c749SSergey Zigachev ttm_eu_backoff_reservation(&ticket, &list); 203*b843c749SSergey Zigachev } 204*b843c749SSergey Zigachev 205*b843c749SSergey Zigachev /* 206*b843c749SSergey Zigachev * GEM ioctls. 207*b843c749SSergey Zigachev */ 208*b843c749SSergey Zigachev int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, 209*b843c749SSergey Zigachev struct drm_file *filp) 210*b843c749SSergey Zigachev { 211*b843c749SSergey Zigachev struct amdgpu_device *adev = dev->dev_private; 212*b843c749SSergey Zigachev struct amdgpu_fpriv *fpriv = filp->driver_priv; 213*b843c749SSergey Zigachev struct amdgpu_vm *vm = &fpriv->vm; 214*b843c749SSergey Zigachev union drm_amdgpu_gem_create *args = data; 215*b843c749SSergey Zigachev uint64_t flags = args->in.domain_flags; 216*b843c749SSergey Zigachev uint64_t size = args->in.bo_size; 217*b843c749SSergey Zigachev struct reservation_object *resv = NULL; 218*b843c749SSergey Zigachev struct drm_gem_object *gobj; 219*b843c749SSergey Zigachev uint32_t handle; 220*b843c749SSergey Zigachev int r; 221*b843c749SSergey Zigachev 222*b843c749SSergey Zigachev /* reject invalid gem flags */ 223*b843c749SSergey Zigachev if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | 224*b843c749SSergey Zigachev AMDGPU_GEM_CREATE_NO_CPU_ACCESS | 225*b843c749SSergey Zigachev AMDGPU_GEM_CREATE_CPU_GTT_USWC | 226*b843c749SSergey Zigachev AMDGPU_GEM_CREATE_VRAM_CLEARED | 227*b843c749SSergey Zigachev AMDGPU_GEM_CREATE_VM_ALWAYS_VALID | 228*b843c749SSergey Zigachev AMDGPU_GEM_CREATE_EXPLICIT_SYNC)) 229*b843c749SSergey Zigachev 230*b843c749SSergey Zigachev return -EINVAL; 231*b843c749SSergey Zigachev 232*b843c749SSergey Zigachev /* reject invalid gem domains */ 233*b843c749SSergey Zigachev if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK) 234*b843c749SSergey Zigachev return -EINVAL; 235*b843c749SSergey Zigachev 236*b843c749SSergey Zigachev /* create a gem object to contain this object in */ 237*b843c749SSergey Zigachev if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS | 238*b843c749SSergey Zigachev AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) { 239*b843c749SSergey Zigachev if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) { 240*b843c749SSergey Zigachev /* if gds bo is created from user space, it must be 241*b843c749SSergey Zigachev * passed to bo list 242*b843c749SSergey Zigachev */ 243*b843c749SSergey Zigachev DRM_ERROR("GDS bo cannot be per-vm-bo\n"); 244*b843c749SSergey Zigachev return -EINVAL; 245*b843c749SSergey Zigachev } 246*b843c749SSergey Zigachev flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS; 247*b843c749SSergey Zigachev if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS) 248*b843c749SSergey Zigachev size = size << AMDGPU_GDS_SHIFT; 249*b843c749SSergey Zigachev else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS) 250*b843c749SSergey Zigachev size = size << AMDGPU_GWS_SHIFT; 251*b843c749SSergey Zigachev else if (args->in.domains == AMDGPU_GEM_DOMAIN_OA) 252*b843c749SSergey Zigachev size = size << AMDGPU_OA_SHIFT; 253*b843c749SSergey Zigachev else 254*b843c749SSergey Zigachev return -EINVAL; 255*b843c749SSergey Zigachev } 256*b843c749SSergey Zigachev size = roundup(size, PAGE_SIZE); 257*b843c749SSergey Zigachev 258*b843c749SSergey Zigachev if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) { 259*b843c749SSergey Zigachev r = amdgpu_bo_reserve(vm->root.base.bo, false); 260*b843c749SSergey Zigachev if (r) 261*b843c749SSergey Zigachev return r; 262*b843c749SSergey Zigachev 263*b843c749SSergey Zigachev resv = vm->root.base.bo->tbo.resv; 264*b843c749SSergey Zigachev } 265*b843c749SSergey Zigachev 266*b843c749SSergey Zigachev r = amdgpu_gem_object_create(adev, size, args->in.alignment, 267*b843c749SSergey Zigachev (u32)(0xffffffff & args->in.domains), 268*b843c749SSergey Zigachev flags, ttm_bo_type_device, resv, &gobj); 269*b843c749SSergey Zigachev if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) { 270*b843c749SSergey Zigachev if (!r) { 271*b843c749SSergey Zigachev struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj); 272*b843c749SSergey Zigachev 273*b843c749SSergey Zigachev abo->parent = amdgpu_bo_ref(vm->root.base.bo); 274*b843c749SSergey Zigachev } 275*b843c749SSergey Zigachev amdgpu_bo_unreserve(vm->root.base.bo); 276*b843c749SSergey Zigachev } 277*b843c749SSergey Zigachev if (r) 278*b843c749SSergey Zigachev return r; 279*b843c749SSergey Zigachev 280*b843c749SSergey Zigachev r = drm_gem_handle_create(filp, gobj, &handle); 281*b843c749SSergey Zigachev /* drop reference from allocate - handle holds it now */ 282*b843c749SSergey Zigachev drm_gem_object_put_unlocked(gobj); 283*b843c749SSergey Zigachev if (r) 284*b843c749SSergey Zigachev return r; 285*b843c749SSergey Zigachev 286*b843c749SSergey Zigachev memset(args, 0, sizeof(*args)); 287*b843c749SSergey Zigachev args->out.handle = handle; 288*b843c749SSergey Zigachev return 0; 289*b843c749SSergey Zigachev } 290*b843c749SSergey Zigachev 291*b843c749SSergey Zigachev int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, 292*b843c749SSergey Zigachev struct drm_file *filp) 293*b843c749SSergey Zigachev { 294*b843c749SSergey Zigachev struct ttm_operation_ctx ctx = { true, false }; 295*b843c749SSergey Zigachev struct amdgpu_device *adev = dev->dev_private; 296*b843c749SSergey Zigachev struct drm_amdgpu_gem_userptr *args = data; 297*b843c749SSergey Zigachev struct drm_gem_object *gobj; 298*b843c749SSergey Zigachev struct amdgpu_bo *bo; 299*b843c749SSergey Zigachev uint32_t handle; 300*b843c749SSergey Zigachev int r; 301*b843c749SSergey Zigachev 302*b843c749SSergey Zigachev if (offset_in_page(args->addr | args->size)) 303*b843c749SSergey Zigachev return -EINVAL; 304*b843c749SSergey Zigachev 305*b843c749SSergey Zigachev /* reject unknown flag values */ 306*b843c749SSergey Zigachev if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY | 307*b843c749SSergey Zigachev AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE | 308*b843c749SSergey Zigachev AMDGPU_GEM_USERPTR_REGISTER)) 309*b843c749SSergey Zigachev return -EINVAL; 310*b843c749SSergey Zigachev 311*b843c749SSergey Zigachev if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) && 312*b843c749SSergey Zigachev !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) { 313*b843c749SSergey Zigachev 314*b843c749SSergey Zigachev /* if we want to write to it we must install a MMU notifier */ 315*b843c749SSergey Zigachev return -EACCES; 316*b843c749SSergey Zigachev } 317*b843c749SSergey Zigachev 318*b843c749SSergey Zigachev /* create a gem object to contain this object in */ 319*b843c749SSergey Zigachev r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU, 320*b843c749SSergey Zigachev 0, ttm_bo_type_device, NULL, &gobj); 321*b843c749SSergey Zigachev if (r) 322*b843c749SSergey Zigachev return r; 323*b843c749SSergey Zigachev 324*b843c749SSergey Zigachev bo = gem_to_amdgpu_bo(gobj); 325*b843c749SSergey Zigachev bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; 326*b843c749SSergey Zigachev bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; 327*b843c749SSergey Zigachev r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); 328*b843c749SSergey Zigachev if (r) 329*b843c749SSergey Zigachev goto release_object; 330*b843c749SSergey Zigachev 331*b843c749SSergey Zigachev if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) { 332*b843c749SSergey Zigachev r = amdgpu_mn_register(bo, args->addr); 333*b843c749SSergey Zigachev if (r) 334*b843c749SSergey Zigachev goto release_object; 335*b843c749SSergey Zigachev } 336*b843c749SSergey Zigachev 337*b843c749SSergey Zigachev if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) { 338*b843c749SSergey Zigachev r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, 339*b843c749SSergey Zigachev bo->tbo.ttm->pages); 340*b843c749SSergey Zigachev if (r) 341*b843c749SSergey Zigachev goto release_object; 342*b843c749SSergey Zigachev 343*b843c749SSergey Zigachev r = amdgpu_bo_reserve(bo, true); 344*b843c749SSergey Zigachev if (r) 345*b843c749SSergey Zigachev goto free_pages; 346*b843c749SSergey Zigachev 347*b843c749SSergey Zigachev amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); 348*b843c749SSergey Zigachev r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 349*b843c749SSergey Zigachev amdgpu_bo_unreserve(bo); 350*b843c749SSergey Zigachev if (r) 351*b843c749SSergey Zigachev goto free_pages; 352*b843c749SSergey Zigachev } 353*b843c749SSergey Zigachev 354*b843c749SSergey Zigachev r = drm_gem_handle_create(filp, gobj, &handle); 355*b843c749SSergey Zigachev /* drop reference from allocate - handle holds it now */ 356*b843c749SSergey Zigachev drm_gem_object_put_unlocked(gobj); 357*b843c749SSergey Zigachev if (r) 358*b843c749SSergey Zigachev return r; 359*b843c749SSergey Zigachev 360*b843c749SSergey Zigachev args->handle = handle; 361*b843c749SSergey Zigachev return 0; 362*b843c749SSergey Zigachev 363*b843c749SSergey Zigachev free_pages: 364*b843c749SSergey Zigachev release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages); 365*b843c749SSergey Zigachev 366*b843c749SSergey Zigachev release_object: 367*b843c749SSergey Zigachev drm_gem_object_put_unlocked(gobj); 368*b843c749SSergey Zigachev 369*b843c749SSergey Zigachev return r; 370*b843c749SSergey Zigachev } 371*b843c749SSergey Zigachev 372*b843c749SSergey Zigachev int amdgpu_mode_dumb_mmap(struct drm_file *filp, 373*b843c749SSergey Zigachev struct drm_device *dev, 374*b843c749SSergey Zigachev uint32_t handle, uint64_t *offset_p) 375*b843c749SSergey Zigachev { 376*b843c749SSergey Zigachev struct drm_gem_object *gobj; 377*b843c749SSergey Zigachev struct amdgpu_bo *robj; 378*b843c749SSergey Zigachev 379*b843c749SSergey Zigachev gobj = drm_gem_object_lookup(filp, handle); 380*b843c749SSergey Zigachev if (gobj == NULL) { 381*b843c749SSergey Zigachev return -ENOENT; 382*b843c749SSergey Zigachev } 383*b843c749SSergey Zigachev robj = gem_to_amdgpu_bo(gobj); 384*b843c749SSergey Zigachev if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) || 385*b843c749SSergey Zigachev (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) { 386*b843c749SSergey Zigachev drm_gem_object_put_unlocked(gobj); 387*b843c749SSergey Zigachev return -EPERM; 388*b843c749SSergey Zigachev } 389*b843c749SSergey Zigachev *offset_p = amdgpu_bo_mmap_offset(robj); 390*b843c749SSergey Zigachev drm_gem_object_put_unlocked(gobj); 391*b843c749SSergey Zigachev return 0; 392*b843c749SSergey Zigachev } 393*b843c749SSergey Zigachev 394*b843c749SSergey Zigachev int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data, 395*b843c749SSergey Zigachev struct drm_file *filp) 396*b843c749SSergey Zigachev { 397*b843c749SSergey Zigachev union drm_amdgpu_gem_mmap *args = data; 398*b843c749SSergey Zigachev uint32_t handle = args->in.handle; 399*b843c749SSergey Zigachev memset(args, 0, sizeof(*args)); 400*b843c749SSergey Zigachev return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr); 401*b843c749SSergey Zigachev } 402*b843c749SSergey Zigachev 403*b843c749SSergey Zigachev /** 404*b843c749SSergey Zigachev * amdgpu_gem_timeout - calculate jiffies timeout from absolute value 405*b843c749SSergey Zigachev * 406*b843c749SSergey Zigachev * @timeout_ns: timeout in ns 407*b843c749SSergey Zigachev * 408*b843c749SSergey Zigachev * Calculate the timeout in jiffies from an absolute timeout in ns. 409*b843c749SSergey Zigachev */ 410*b843c749SSergey Zigachev unsigned long amdgpu_gem_timeout(uint64_t timeout_ns) 411*b843c749SSergey Zigachev { 412*b843c749SSergey Zigachev unsigned long timeout_jiffies; 413*b843c749SSergey Zigachev ktime_t timeout; 414*b843c749SSergey Zigachev 415*b843c749SSergey Zigachev /* clamp timeout if it's to large */ 416*b843c749SSergey Zigachev if (((int64_t)timeout_ns) < 0) 417*b843c749SSergey Zigachev return MAX_SCHEDULE_TIMEOUT; 418*b843c749SSergey Zigachev 419*b843c749SSergey Zigachev timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get()); 420*b843c749SSergey Zigachev if (ktime_to_ns(timeout) < 0) 421*b843c749SSergey Zigachev return 0; 422*b843c749SSergey Zigachev 423*b843c749SSergey Zigachev timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout)); 424*b843c749SSergey Zigachev /* clamp timeout to avoid unsigned-> signed overflow */ 425*b843c749SSergey Zigachev if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT ) 426*b843c749SSergey Zigachev return MAX_SCHEDULE_TIMEOUT - 1; 427*b843c749SSergey Zigachev 428*b843c749SSergey Zigachev return timeout_jiffies; 429*b843c749SSergey Zigachev } 430*b843c749SSergey Zigachev 431*b843c749SSergey Zigachev int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, 432*b843c749SSergey Zigachev struct drm_file *filp) 433*b843c749SSergey Zigachev { 434*b843c749SSergey Zigachev union drm_amdgpu_gem_wait_idle *args = data; 435*b843c749SSergey Zigachev struct drm_gem_object *gobj; 436*b843c749SSergey Zigachev struct amdgpu_bo *robj; 437*b843c749SSergey Zigachev uint32_t handle = args->in.handle; 438*b843c749SSergey Zigachev unsigned long timeout = amdgpu_gem_timeout(args->in.timeout); 439*b843c749SSergey Zigachev int r = 0; 440*b843c749SSergey Zigachev long ret; 441*b843c749SSergey Zigachev 442*b843c749SSergey Zigachev gobj = drm_gem_object_lookup(filp, handle); 443*b843c749SSergey Zigachev if (gobj == NULL) { 444*b843c749SSergey Zigachev return -ENOENT; 445*b843c749SSergey Zigachev } 446*b843c749SSergey Zigachev robj = gem_to_amdgpu_bo(gobj); 447*b843c749SSergey Zigachev ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 448*b843c749SSergey Zigachev timeout); 449*b843c749SSergey Zigachev 450*b843c749SSergey Zigachev /* ret == 0 means not signaled, 451*b843c749SSergey Zigachev * ret > 0 means signaled 452*b843c749SSergey Zigachev * ret < 0 means interrupted before timeout 453*b843c749SSergey Zigachev */ 454*b843c749SSergey Zigachev if (ret >= 0) { 455*b843c749SSergey Zigachev memset(args, 0, sizeof(*args)); 456*b843c749SSergey Zigachev args->out.status = (ret == 0); 457*b843c749SSergey Zigachev } else 458*b843c749SSergey Zigachev r = ret; 459*b843c749SSergey Zigachev 460*b843c749SSergey Zigachev drm_gem_object_put_unlocked(gobj); 461*b843c749SSergey Zigachev return r; 462*b843c749SSergey Zigachev } 463*b843c749SSergey Zigachev 464*b843c749SSergey Zigachev int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, 465*b843c749SSergey Zigachev struct drm_file *filp) 466*b843c749SSergey Zigachev { 467*b843c749SSergey Zigachev struct drm_amdgpu_gem_metadata *args = data; 468*b843c749SSergey Zigachev struct drm_gem_object *gobj; 469*b843c749SSergey Zigachev struct amdgpu_bo *robj; 470*b843c749SSergey Zigachev int r = -1; 471*b843c749SSergey Zigachev 472*b843c749SSergey Zigachev DRM_DEBUG("%d \n", args->handle); 473*b843c749SSergey Zigachev gobj = drm_gem_object_lookup(filp, args->handle); 474*b843c749SSergey Zigachev if (gobj == NULL) 475*b843c749SSergey Zigachev return -ENOENT; 476*b843c749SSergey Zigachev robj = gem_to_amdgpu_bo(gobj); 477*b843c749SSergey Zigachev 478*b843c749SSergey Zigachev r = amdgpu_bo_reserve(robj, false); 479*b843c749SSergey Zigachev if (unlikely(r != 0)) 480*b843c749SSergey Zigachev goto out; 481*b843c749SSergey Zigachev 482*b843c749SSergey Zigachev if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) { 483*b843c749SSergey Zigachev amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info); 484*b843c749SSergey Zigachev r = amdgpu_bo_get_metadata(robj, args->data.data, 485*b843c749SSergey Zigachev sizeof(args->data.data), 486*b843c749SSergey Zigachev &args->data.data_size_bytes, 487*b843c749SSergey Zigachev &args->data.flags); 488*b843c749SSergey Zigachev } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) { 489*b843c749SSergey Zigachev if (args->data.data_size_bytes > sizeof(args->data.data)) { 490*b843c749SSergey Zigachev r = -EINVAL; 491*b843c749SSergey Zigachev goto unreserve; 492*b843c749SSergey Zigachev } 493*b843c749SSergey Zigachev r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info); 494*b843c749SSergey Zigachev if (!r) 495*b843c749SSergey Zigachev r = amdgpu_bo_set_metadata(robj, args->data.data, 496*b843c749SSergey Zigachev args->data.data_size_bytes, 497*b843c749SSergey Zigachev args->data.flags); 498*b843c749SSergey Zigachev } 499*b843c749SSergey Zigachev 500*b843c749SSergey Zigachev unreserve: 501*b843c749SSergey Zigachev amdgpu_bo_unreserve(robj); 502*b843c749SSergey Zigachev out: 503*b843c749SSergey Zigachev drm_gem_object_put_unlocked(gobj); 504*b843c749SSergey Zigachev return r; 505*b843c749SSergey Zigachev } 506*b843c749SSergey Zigachev 507*b843c749SSergey Zigachev /** 508*b843c749SSergey Zigachev * amdgpu_gem_va_update_vm -update the bo_va in its VM 509*b843c749SSergey Zigachev * 510*b843c749SSergey Zigachev * @adev: amdgpu_device pointer 511*b843c749SSergey Zigachev * @vm: vm to update 512*b843c749SSergey Zigachev * @bo_va: bo_va to update 513*b843c749SSergey Zigachev * @operation: map, unmap or clear 514*b843c749SSergey Zigachev * 515*b843c749SSergey Zigachev * Update the bo_va directly after setting its address. Errors are not 516*b843c749SSergey Zigachev * vital here, so they are not reported back to userspace. 517*b843c749SSergey Zigachev */ 518*b843c749SSergey Zigachev static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, 519*b843c749SSergey Zigachev struct amdgpu_vm *vm, 520*b843c749SSergey Zigachev struct amdgpu_bo_va *bo_va, 521*b843c749SSergey Zigachev uint32_t operation) 522*b843c749SSergey Zigachev { 523*b843c749SSergey Zigachev int r; 524*b843c749SSergey Zigachev 525*b843c749SSergey Zigachev if (!amdgpu_vm_ready(vm)) 526*b843c749SSergey Zigachev return; 527*b843c749SSergey Zigachev 528*b843c749SSergey Zigachev r = amdgpu_vm_clear_freed(adev, vm, NULL); 529*b843c749SSergey Zigachev if (r) 530*b843c749SSergey Zigachev goto error; 531*b843c749SSergey Zigachev 532*b843c749SSergey Zigachev if (operation == AMDGPU_VA_OP_MAP || 533*b843c749SSergey Zigachev operation == AMDGPU_VA_OP_REPLACE) { 534*b843c749SSergey Zigachev r = amdgpu_vm_bo_update(adev, bo_va, false); 535*b843c749SSergey Zigachev if (r) 536*b843c749SSergey Zigachev goto error; 537*b843c749SSergey Zigachev } 538*b843c749SSergey Zigachev 539*b843c749SSergey Zigachev r = amdgpu_vm_update_directories(adev, vm); 540*b843c749SSergey Zigachev 541*b843c749SSergey Zigachev error: 542*b843c749SSergey Zigachev if (r && r != -ERESTARTSYS) 543*b843c749SSergey Zigachev DRM_ERROR("Couldn't update BO_VA (%d)\n", r); 544*b843c749SSergey Zigachev } 545*b843c749SSergey Zigachev 546*b843c749SSergey Zigachev int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, 547*b843c749SSergey Zigachev struct drm_file *filp) 548*b843c749SSergey Zigachev { 549*b843c749SSergey Zigachev const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE | 550*b843c749SSergey Zigachev AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE | 551*b843c749SSergey Zigachev AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK; 552*b843c749SSergey Zigachev const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE | 553*b843c749SSergey Zigachev AMDGPU_VM_PAGE_PRT; 554*b843c749SSergey Zigachev 555*b843c749SSergey Zigachev struct drm_amdgpu_gem_va *args = data; 556*b843c749SSergey Zigachev struct drm_gem_object *gobj; 557*b843c749SSergey Zigachev struct amdgpu_device *adev = dev->dev_private; 558*b843c749SSergey Zigachev struct amdgpu_fpriv *fpriv = filp->driver_priv; 559*b843c749SSergey Zigachev struct amdgpu_bo *abo; 560*b843c749SSergey Zigachev struct amdgpu_bo_va *bo_va; 561*b843c749SSergey Zigachev struct amdgpu_bo_list_entry vm_pd; 562*b843c749SSergey Zigachev struct ttm_validate_buffer tv; 563*b843c749SSergey Zigachev struct ww_acquire_ctx ticket; 564*b843c749SSergey Zigachev struct list_head list, duplicates; 565*b843c749SSergey Zigachev uint64_t va_flags; 566*b843c749SSergey Zigachev uint64_t vm_size; 567*b843c749SSergey Zigachev int r = 0; 568*b843c749SSergey Zigachev 569*b843c749SSergey Zigachev if (args->va_address < AMDGPU_VA_RESERVED_SIZE) { 570*b843c749SSergey Zigachev dev_dbg(&dev->pdev->dev, 571*b843c749SSergey Zigachev "va_address 0x%LX is in reserved area 0x%LX\n", 572*b843c749SSergey Zigachev args->va_address, AMDGPU_VA_RESERVED_SIZE); 573*b843c749SSergey Zigachev return -EINVAL; 574*b843c749SSergey Zigachev } 575*b843c749SSergey Zigachev 576*b843c749SSergey Zigachev if (args->va_address >= AMDGPU_VA_HOLE_START && 577*b843c749SSergey Zigachev args->va_address < AMDGPU_VA_HOLE_END) { 578*b843c749SSergey Zigachev dev_dbg(&dev->pdev->dev, 579*b843c749SSergey Zigachev "va_address 0x%LX is in VA hole 0x%LX-0x%LX\n", 580*b843c749SSergey Zigachev args->va_address, AMDGPU_VA_HOLE_START, 581*b843c749SSergey Zigachev AMDGPU_VA_HOLE_END); 582*b843c749SSergey Zigachev return -EINVAL; 583*b843c749SSergey Zigachev } 584*b843c749SSergey Zigachev 585*b843c749SSergey Zigachev args->va_address &= AMDGPU_VA_HOLE_MASK; 586*b843c749SSergey Zigachev 587*b843c749SSergey Zigachev vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; 588*b843c749SSergey Zigachev vm_size -= AMDGPU_VA_RESERVED_SIZE; 589*b843c749SSergey Zigachev if (args->va_address + args->map_size > vm_size) { 590*b843c749SSergey Zigachev dev_dbg(&dev->pdev->dev, 591*b843c749SSergey Zigachev "va_address 0x%llx is in top reserved area 0x%llx\n", 592*b843c749SSergey Zigachev args->va_address + args->map_size, vm_size); 593*b843c749SSergey Zigachev return -EINVAL; 594*b843c749SSergey Zigachev } 595*b843c749SSergey Zigachev 596*b843c749SSergey Zigachev if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) { 597*b843c749SSergey Zigachev dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n", 598*b843c749SSergey Zigachev args->flags); 599*b843c749SSergey Zigachev return -EINVAL; 600*b843c749SSergey Zigachev } 601*b843c749SSergey Zigachev 602*b843c749SSergey Zigachev switch (args->operation) { 603*b843c749SSergey Zigachev case AMDGPU_VA_OP_MAP: 604*b843c749SSergey Zigachev case AMDGPU_VA_OP_UNMAP: 605*b843c749SSergey Zigachev case AMDGPU_VA_OP_CLEAR: 606*b843c749SSergey Zigachev case AMDGPU_VA_OP_REPLACE: 607*b843c749SSergey Zigachev break; 608*b843c749SSergey Zigachev default: 609*b843c749SSergey Zigachev dev_dbg(&dev->pdev->dev, "unsupported operation %d\n", 610*b843c749SSergey Zigachev args->operation); 611*b843c749SSergey Zigachev return -EINVAL; 612*b843c749SSergey Zigachev } 613*b843c749SSergey Zigachev 614*b843c749SSergey Zigachev INIT_LIST_HEAD(&list); 615*b843c749SSergey Zigachev INIT_LIST_HEAD(&duplicates); 616*b843c749SSergey Zigachev if ((args->operation != AMDGPU_VA_OP_CLEAR) && 617*b843c749SSergey Zigachev !(args->flags & AMDGPU_VM_PAGE_PRT)) { 618*b843c749SSergey Zigachev gobj = drm_gem_object_lookup(filp, args->handle); 619*b843c749SSergey Zigachev if (gobj == NULL) 620*b843c749SSergey Zigachev return -ENOENT; 621*b843c749SSergey Zigachev abo = gem_to_amdgpu_bo(gobj); 622*b843c749SSergey Zigachev tv.bo = &abo->tbo; 623*b843c749SSergey Zigachev tv.shared = !!(abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID); 624*b843c749SSergey Zigachev list_add(&tv.head, &list); 625*b843c749SSergey Zigachev } else { 626*b843c749SSergey Zigachev gobj = NULL; 627*b843c749SSergey Zigachev abo = NULL; 628*b843c749SSergey Zigachev } 629*b843c749SSergey Zigachev 630*b843c749SSergey Zigachev amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd); 631*b843c749SSergey Zigachev 632*b843c749SSergey Zigachev r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); 633*b843c749SSergey Zigachev if (r) 634*b843c749SSergey Zigachev goto error_unref; 635*b843c749SSergey Zigachev 636*b843c749SSergey Zigachev if (abo) { 637*b843c749SSergey Zigachev bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo); 638*b843c749SSergey Zigachev if (!bo_va) { 639*b843c749SSergey Zigachev r = -ENOENT; 640*b843c749SSergey Zigachev goto error_backoff; 641*b843c749SSergey Zigachev } 642*b843c749SSergey Zigachev } else if (args->operation != AMDGPU_VA_OP_CLEAR) { 643*b843c749SSergey Zigachev bo_va = fpriv->prt_va; 644*b843c749SSergey Zigachev } else { 645*b843c749SSergey Zigachev bo_va = NULL; 646*b843c749SSergey Zigachev } 647*b843c749SSergey Zigachev 648*b843c749SSergey Zigachev switch (args->operation) { 649*b843c749SSergey Zigachev case AMDGPU_VA_OP_MAP: 650*b843c749SSergey Zigachev r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address, 651*b843c749SSergey Zigachev args->map_size); 652*b843c749SSergey Zigachev if (r) 653*b843c749SSergey Zigachev goto error_backoff; 654*b843c749SSergey Zigachev 655*b843c749SSergey Zigachev va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags); 656*b843c749SSergey Zigachev r = amdgpu_vm_bo_map(adev, bo_va, args->va_address, 657*b843c749SSergey Zigachev args->offset_in_bo, args->map_size, 658*b843c749SSergey Zigachev va_flags); 659*b843c749SSergey Zigachev break; 660*b843c749SSergey Zigachev case AMDGPU_VA_OP_UNMAP: 661*b843c749SSergey Zigachev r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address); 662*b843c749SSergey Zigachev break; 663*b843c749SSergey Zigachev 664*b843c749SSergey Zigachev case AMDGPU_VA_OP_CLEAR: 665*b843c749SSergey Zigachev r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm, 666*b843c749SSergey Zigachev args->va_address, 667*b843c749SSergey Zigachev args->map_size); 668*b843c749SSergey Zigachev break; 669*b843c749SSergey Zigachev case AMDGPU_VA_OP_REPLACE: 670*b843c749SSergey Zigachev r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address, 671*b843c749SSergey Zigachev args->map_size); 672*b843c749SSergey Zigachev if (r) 673*b843c749SSergey Zigachev goto error_backoff; 674*b843c749SSergey Zigachev 675*b843c749SSergey Zigachev va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags); 676*b843c749SSergey Zigachev r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address, 677*b843c749SSergey Zigachev args->offset_in_bo, args->map_size, 678*b843c749SSergey Zigachev va_flags); 679*b843c749SSergey Zigachev break; 680*b843c749SSergey Zigachev default: 681*b843c749SSergey Zigachev break; 682*b843c749SSergey Zigachev } 683*b843c749SSergey Zigachev if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug) 684*b843c749SSergey Zigachev amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va, 685*b843c749SSergey Zigachev args->operation); 686*b843c749SSergey Zigachev 687*b843c749SSergey Zigachev error_backoff: 688*b843c749SSergey Zigachev ttm_eu_backoff_reservation(&ticket, &list); 689*b843c749SSergey Zigachev 690*b843c749SSergey Zigachev error_unref: 691*b843c749SSergey Zigachev drm_gem_object_put_unlocked(gobj); 692*b843c749SSergey Zigachev return r; 693*b843c749SSergey Zigachev } 694*b843c749SSergey Zigachev 695*b843c749SSergey Zigachev int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, 696*b843c749SSergey Zigachev struct drm_file *filp) 697*b843c749SSergey Zigachev { 698*b843c749SSergey Zigachev struct amdgpu_device *adev = dev->dev_private; 699*b843c749SSergey Zigachev struct drm_amdgpu_gem_op *args = data; 700*b843c749SSergey Zigachev struct drm_gem_object *gobj; 701*b843c749SSergey Zigachev struct amdgpu_bo *robj; 702*b843c749SSergey Zigachev int r; 703*b843c749SSergey Zigachev 704*b843c749SSergey Zigachev gobj = drm_gem_object_lookup(filp, args->handle); 705*b843c749SSergey Zigachev if (gobj == NULL) { 706*b843c749SSergey Zigachev return -ENOENT; 707*b843c749SSergey Zigachev } 708*b843c749SSergey Zigachev robj = gem_to_amdgpu_bo(gobj); 709*b843c749SSergey Zigachev 710*b843c749SSergey Zigachev r = amdgpu_bo_reserve(robj, false); 711*b843c749SSergey Zigachev if (unlikely(r)) 712*b843c749SSergey Zigachev goto out; 713*b843c749SSergey Zigachev 714*b843c749SSergey Zigachev switch (args->op) { 715*b843c749SSergey Zigachev case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: { 716*b843c749SSergey Zigachev struct drm_amdgpu_gem_create_in info; 717*b843c749SSergey Zigachev void __user *out = u64_to_user_ptr(args->value); 718*b843c749SSergey Zigachev 719*b843c749SSergey Zigachev info.bo_size = robj->gem_base.size; 720*b843c749SSergey Zigachev info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT; 721*b843c749SSergey Zigachev info.domains = robj->preferred_domains; 722*b843c749SSergey Zigachev info.domain_flags = robj->flags; 723*b843c749SSergey Zigachev amdgpu_bo_unreserve(robj); 724*b843c749SSergey Zigachev if (copy_to_user(out, &info, sizeof(info))) 725*b843c749SSergey Zigachev r = -EFAULT; 726*b843c749SSergey Zigachev break; 727*b843c749SSergey Zigachev } 728*b843c749SSergey Zigachev case AMDGPU_GEM_OP_SET_PLACEMENT: 729*b843c749SSergey Zigachev if (robj->prime_shared_count && (args->value & AMDGPU_GEM_DOMAIN_VRAM)) { 730*b843c749SSergey Zigachev r = -EINVAL; 731*b843c749SSergey Zigachev amdgpu_bo_unreserve(robj); 732*b843c749SSergey Zigachev break; 733*b843c749SSergey Zigachev } 734*b843c749SSergey Zigachev if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) { 735*b843c749SSergey Zigachev r = -EPERM; 736*b843c749SSergey Zigachev amdgpu_bo_unreserve(robj); 737*b843c749SSergey Zigachev break; 738*b843c749SSergey Zigachev } 739*b843c749SSergey Zigachev robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM | 740*b843c749SSergey Zigachev AMDGPU_GEM_DOMAIN_GTT | 741*b843c749SSergey Zigachev AMDGPU_GEM_DOMAIN_CPU); 742*b843c749SSergey Zigachev robj->allowed_domains = robj->preferred_domains; 743*b843c749SSergey Zigachev if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) 744*b843c749SSergey Zigachev robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; 745*b843c749SSergey Zigachev 746*b843c749SSergey Zigachev if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) 747*b843c749SSergey Zigachev amdgpu_vm_bo_invalidate(adev, robj, true); 748*b843c749SSergey Zigachev 749*b843c749SSergey Zigachev amdgpu_bo_unreserve(robj); 750*b843c749SSergey Zigachev break; 751*b843c749SSergey Zigachev default: 752*b843c749SSergey Zigachev amdgpu_bo_unreserve(robj); 753*b843c749SSergey Zigachev r = -EINVAL; 754*b843c749SSergey Zigachev } 755*b843c749SSergey Zigachev 756*b843c749SSergey Zigachev out: 757*b843c749SSergey Zigachev drm_gem_object_put_unlocked(gobj); 758*b843c749SSergey Zigachev return r; 759*b843c749SSergey Zigachev } 760*b843c749SSergey Zigachev 761*b843c749SSergey Zigachev int amdgpu_mode_dumb_create(struct drm_file *file_priv, 762*b843c749SSergey Zigachev struct drm_device *dev, 763*b843c749SSergey Zigachev struct drm_mode_create_dumb *args) 764*b843c749SSergey Zigachev { 765*b843c749SSergey Zigachev struct amdgpu_device *adev = dev->dev_private; 766*b843c749SSergey Zigachev struct drm_gem_object *gobj; 767*b843c749SSergey Zigachev uint32_t handle; 768*b843c749SSergey Zigachev u32 domain; 769*b843c749SSergey Zigachev int r; 770*b843c749SSergey Zigachev 771*b843c749SSergey Zigachev args->pitch = amdgpu_align_pitch(adev, args->width, 772*b843c749SSergey Zigachev DIV_ROUND_UP(args->bpp, 8), 0); 773*b843c749SSergey Zigachev args->size = (u64)args->pitch * args->height; 774*b843c749SSergey Zigachev args->size = ALIGN(args->size, PAGE_SIZE); 775*b843c749SSergey Zigachev domain = amdgpu_bo_get_preferred_pin_domain(adev, 776*b843c749SSergey Zigachev amdgpu_display_supported_domains(adev)); 777*b843c749SSergey Zigachev r = amdgpu_gem_object_create(adev, args->size, 0, domain, 778*b843c749SSergey Zigachev AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 779*b843c749SSergey Zigachev ttm_bo_type_device, NULL, &gobj); 780*b843c749SSergey Zigachev if (r) 781*b843c749SSergey Zigachev return -ENOMEM; 782*b843c749SSergey Zigachev 783*b843c749SSergey Zigachev r = drm_gem_handle_create(file_priv, gobj, &handle); 784*b843c749SSergey Zigachev /* drop reference from allocate - handle holds it now */ 785*b843c749SSergey Zigachev drm_gem_object_put_unlocked(gobj); 786*b843c749SSergey Zigachev if (r) { 787*b843c749SSergey Zigachev return r; 788*b843c749SSergey Zigachev } 789*b843c749SSergey Zigachev args->handle = handle; 790*b843c749SSergey Zigachev return 0; 791*b843c749SSergey Zigachev } 792*b843c749SSergey Zigachev 793*b843c749SSergey Zigachev #if defined(CONFIG_DEBUG_FS) 794*b843c749SSergey Zigachev 795*b843c749SSergey Zigachev #define amdgpu_debugfs_gem_bo_print_flag(m, bo, flag) \ 796*b843c749SSergey Zigachev if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) { \ 797*b843c749SSergey Zigachev seq_printf((m), " " #flag); \ 798*b843c749SSergey Zigachev } 799*b843c749SSergey Zigachev 800*b843c749SSergey Zigachev static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data) 801*b843c749SSergey Zigachev { 802*b843c749SSergey Zigachev struct drm_gem_object *gobj = ptr; 803*b843c749SSergey Zigachev struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); 804*b843c749SSergey Zigachev struct seq_file *m = data; 805*b843c749SSergey Zigachev 806*b843c749SSergey Zigachev struct dma_buf_attachment *attachment; 807*b843c749SSergey Zigachev struct dma_buf *dma_buf; 808*b843c749SSergey Zigachev unsigned domain; 809*b843c749SSergey Zigachev const char *placement; 810*b843c749SSergey Zigachev unsigned pin_count; 811*b843c749SSergey Zigachev 812*b843c749SSergey Zigachev domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); 813*b843c749SSergey Zigachev switch (domain) { 814*b843c749SSergey Zigachev case AMDGPU_GEM_DOMAIN_VRAM: 815*b843c749SSergey Zigachev placement = "VRAM"; 816*b843c749SSergey Zigachev break; 817*b843c749SSergey Zigachev case AMDGPU_GEM_DOMAIN_GTT: 818*b843c749SSergey Zigachev placement = " GTT"; 819*b843c749SSergey Zigachev break; 820*b843c749SSergey Zigachev case AMDGPU_GEM_DOMAIN_CPU: 821*b843c749SSergey Zigachev default: 822*b843c749SSergey Zigachev placement = " CPU"; 823*b843c749SSergey Zigachev break; 824*b843c749SSergey Zigachev } 825*b843c749SSergey Zigachev seq_printf(m, "\t0x%08x: %12ld byte %s", 826*b843c749SSergey Zigachev id, amdgpu_bo_size(bo), placement); 827*b843c749SSergey Zigachev 828*b843c749SSergey Zigachev pin_count = READ_ONCE(bo->pin_count); 829*b843c749SSergey Zigachev if (pin_count) 830*b843c749SSergey Zigachev seq_printf(m, " pin count %d", pin_count); 831*b843c749SSergey Zigachev 832*b843c749SSergey Zigachev dma_buf = READ_ONCE(bo->gem_base.dma_buf); 833*b843c749SSergey Zigachev attachment = READ_ONCE(bo->gem_base.import_attach); 834*b843c749SSergey Zigachev 835*b843c749SSergey Zigachev if (attachment) 836*b843c749SSergey Zigachev seq_printf(m, " imported from %p", dma_buf); 837*b843c749SSergey Zigachev else if (dma_buf) 838*b843c749SSergey Zigachev seq_printf(m, " exported as %p", dma_buf); 839*b843c749SSergey Zigachev 840*b843c749SSergey Zigachev amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED); 841*b843c749SSergey Zigachev amdgpu_debugfs_gem_bo_print_flag(m, bo, NO_CPU_ACCESS); 842*b843c749SSergey Zigachev amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_GTT_USWC); 843*b843c749SSergey Zigachev amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CLEARED); 844*b843c749SSergey Zigachev amdgpu_debugfs_gem_bo_print_flag(m, bo, SHADOW); 845*b843c749SSergey Zigachev amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CONTIGUOUS); 846*b843c749SSergey Zigachev amdgpu_debugfs_gem_bo_print_flag(m, bo, VM_ALWAYS_VALID); 847*b843c749SSergey Zigachev amdgpu_debugfs_gem_bo_print_flag(m, bo, EXPLICIT_SYNC); 848*b843c749SSergey Zigachev 849*b843c749SSergey Zigachev seq_printf(m, "\n"); 850*b843c749SSergey Zigachev 851*b843c749SSergey Zigachev return 0; 852*b843c749SSergey Zigachev } 853*b843c749SSergey Zigachev 854*b843c749SSergey Zigachev static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data) 855*b843c749SSergey Zigachev { 856*b843c749SSergey Zigachev struct drm_info_node *node = (struct drm_info_node *)m->private; 857*b843c749SSergey Zigachev struct drm_device *dev = node->minor->dev; 858*b843c749SSergey Zigachev struct drm_file *file; 859*b843c749SSergey Zigachev int r; 860*b843c749SSergey Zigachev 861*b843c749SSergey Zigachev r = mutex_lock_interruptible(&dev->filelist_mutex); 862*b843c749SSergey Zigachev if (r) 863*b843c749SSergey Zigachev return r; 864*b843c749SSergey Zigachev 865*b843c749SSergey Zigachev list_for_each_entry(file, &dev->filelist, lhead) { 866*b843c749SSergey Zigachev struct task_struct *task; 867*b843c749SSergey Zigachev 868*b843c749SSergey Zigachev /* 869*b843c749SSergey Zigachev * Although we have a valid reference on file->pid, that does 870*b843c749SSergey Zigachev * not guarantee that the task_struct who called get_pid() is 871*b843c749SSergey Zigachev * still alive (e.g. get_pid(current) => fork() => exit()). 872*b843c749SSergey Zigachev * Therefore, we need to protect this ->comm access using RCU. 873*b843c749SSergey Zigachev */ 874*b843c749SSergey Zigachev rcu_read_lock(); 875*b843c749SSergey Zigachev task = pid_task(file->pid, PIDTYPE_PID); 876*b843c749SSergey Zigachev seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid), 877*b843c749SSergey Zigachev task ? task->comm : "<unknown>"); 878*b843c749SSergey Zigachev rcu_read_unlock(); 879*b843c749SSergey Zigachev 880*b843c749SSergey Zigachev spin_lock(&file->table_lock); 881*b843c749SSergey Zigachev idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m); 882*b843c749SSergey Zigachev spin_unlock(&file->table_lock); 883*b843c749SSergey Zigachev } 884*b843c749SSergey Zigachev 885*b843c749SSergey Zigachev mutex_unlock(&dev->filelist_mutex); 886*b843c749SSergey Zigachev return 0; 887*b843c749SSergey Zigachev } 888*b843c749SSergey Zigachev 889*b843c749SSergey Zigachev static const struct drm_info_list amdgpu_debugfs_gem_list[] = { 890*b843c749SSergey Zigachev {"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL}, 891*b843c749SSergey Zigachev }; 892*b843c749SSergey Zigachev #endif 893*b843c749SSergey Zigachev 894*b843c749SSergey Zigachev int amdgpu_debugfs_gem_init(struct amdgpu_device *adev) 895*b843c749SSergey Zigachev { 896*b843c749SSergey Zigachev #if defined(CONFIG_DEBUG_FS) 897*b843c749SSergey Zigachev return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1); 898*b843c749SSergey Zigachev #endif 899*b843c749SSergey Zigachev return 0; 900*b843c749SSergey Zigachev } 901