1 /* $OpenBSD: radeon_prime.c,v 1.4 2015/02/10 06:19:36 jsg Exp $ */ 2 /* 3 * Copyright 2012 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 * based on nouveau_prime.c 24 * 25 * Authors: Alex Deucher 26 */ 27 #include <dev/pci/drm/drmP.h> 28 29 #include "radeon.h" 30 #include <dev/pci/drm/radeon_drm.h> 31 32 static struct sg_table *radeon_gem_map_dma_buf(struct dma_buf_attachment *attachment, 33 enum dma_data_direction dir) 34 { 35 struct radeon_bo *bo = attachment->dmabuf->priv; 36 struct drm_device *dev = bo->rdev->ddev; 37 int npages = bo->tbo.num_pages; 38 struct sg_table *sg; 39 int nents; 40 41 mutex_lock(&dev->struct_mutex); 42 sg = drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages); 43 nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir); 44 mutex_unlock(&dev->struct_mutex); 45 return sg; 46 } 47 48 static void radeon_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, 49 struct sg_table *sg, enum dma_data_direction dir) 50 { 51 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); 52 sg_free_table(sg); 53 kfree(sg); 54 } 55 56 static void radeon_gem_dmabuf_release(struct dma_buf *dma_buf) 57 { 58 struct radeon_bo *bo = dma_buf->priv; 59 60 if (bo->gem_base.export_dma_buf == dma_buf) { 61 DRM_ERROR("unreference dmabuf %p\n", &bo->gem_base); 62 bo->gem_base.export_dma_buf = NULL; 63 drm_gem_object_unreference_unlocked(&bo->gem_base); 64 } 65 } 66 67 static void *radeon_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num) 68 { 69 return NULL; 70 } 71 72 static void radeon_gem_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr) 73 { 74 75 } 76 static void *radeon_gem_kmap(struct dma_buf *dma_buf, unsigned long page_num) 77 { 78 return NULL; 79 } 80 81 static void radeon_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr) 82 { 83 84 } 85 86 static int radeon_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) 87 { 88 return -EINVAL; 89 } 90 91 static void *radeon_gem_prime_vmap(struct dma_buf *dma_buf) 92 { 93 struct radeon_bo *bo = dma_buf->priv; 94 struct drm_device *dev = bo->rdev->ddev; 95 int ret; 96 97 mutex_lock(&dev->struct_mutex); 98 if (bo->vmapping_count) { 99 bo->vmapping_count++; 100 goto out_unlock; 101 } 102 103 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, 104 &bo->dma_buf_vmap); 105 if (ret) { 106 mutex_unlock(&dev->struct_mutex); 107 return ERR_PTR(ret); 108 } 109 bo->vmapping_count = 1; 110 out_unlock: 111 mutex_unlock(&dev->struct_mutex); 112 return bo->dma_buf_vmap.virtual; 113 } 114 115 static void radeon_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr) 116 { 117 struct radeon_bo *bo = dma_buf->priv; 118 struct drm_device *dev = bo->rdev->ddev; 119 120 mutex_lock(&dev->struct_mutex); 121 bo->vmapping_count--; 122 if (bo->vmapping_count == 0) { 123 ttm_bo_kunmap(&bo->dma_buf_vmap); 124 } 125 mutex_unlock(&dev->struct_mutex); 126 } 127 const static struct dma_buf_ops radeon_dmabuf_ops = { 128 .map_dma_buf = radeon_gem_map_dma_buf, 129 .unmap_dma_buf = radeon_gem_unmap_dma_buf, 130 .release = radeon_gem_dmabuf_release, 131 .kmap = radeon_gem_kmap, 132 .kmap_atomic = radeon_gem_kmap_atomic, 133 .kunmap = radeon_gem_kunmap, 134 .kunmap_atomic = radeon_gem_kunmap_atomic, 135 .mmap = radeon_gem_prime_mmap, 136 .vmap = radeon_gem_prime_vmap, 137 .vunmap = radeon_gem_prime_vunmap, 138 }; 139 140 static int radeon_prime_create(struct drm_device *dev, 141 size_t size, 142 struct sg_table *sg, 143 struct radeon_bo **pbo) 144 { 145 struct radeon_device *rdev = dev->dev_private; 146 struct radeon_bo *bo; 147 int ret; 148 149 ret = radeon_bo_create(rdev, size, PAGE_SIZE, false, 150 RADEON_GEM_DOMAIN_GTT, sg, pbo); 151 if (ret) 152 return ret; 153 bo = *pbo; 154 bo->gem_base.driver_private = bo; 155 156 mutex_lock(&rdev->gem.mutex); 157 list_add_tail(&bo->list, &rdev->gem.objects); 158 mutex_unlock(&rdev->gem.mutex); 159 160 return 0; 161 } 162 163 struct dma_buf *radeon_gem_prime_export(struct drm_device *dev, 164 struct drm_gem_object *obj, 165 int flags) 166 { 167 struct radeon_bo *bo = gem_to_radeon_bo(obj); 168 int ret = 0; 169 170 ret = radeon_bo_reserve(bo, false); 171 if (unlikely(ret != 0)) 172 return ERR_PTR(ret); 173 174 /* pin buffer into GTT */ 175 ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL); 176 if (ret) { 177 radeon_bo_unreserve(bo); 178 return ERR_PTR(ret); 179 } 180 radeon_bo_unreserve(bo); 181 return dma_buf_export(bo, &radeon_dmabuf_ops, obj->size, flags); 182 } 183 184 struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev, 185 struct dma_buf *dma_buf) 186 { 187 struct dma_buf_attachment *attach; 188 struct sg_table *sg; 189 struct radeon_bo *bo; 190 int ret; 191 192 if (dma_buf->ops == &radeon_dmabuf_ops) { 193 bo = dma_buf->priv; 194 if (bo->gem_base.dev == dev) { 195 drm_gem_object_reference(&bo->gem_base); 196 dma_buf_put(dma_buf); 197 return &bo->gem_base; 198 } 199 } 200 201 /* need to attach */ 202 attach = dma_buf_attach(dma_buf, dev->dev); 203 if (IS_ERR(attach)) 204 return ERR_CAST(attach); 205 206 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); 207 if (IS_ERR(sg)) { 208 ret = PTR_ERR(sg); 209 goto fail_detach; 210 } 211 212 ret = radeon_prime_create(dev, dma_buf->size, sg, &bo); 213 if (ret) 214 goto fail_unmap; 215 216 bo->gem_base.import_attach = attach; 217 218 return &bo->gem_base; 219 220 fail_unmap: 221 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL); 222 fail_detach: 223 dma_buf_detach(dma_buf, attach); 224 return ERR_PTR(ret); 225 } 226