1 /* $NetBSD: virtgpu_object.c,v 1.2 2018/08/27 04:58:37 riastradh Exp $ */ 2 3 /* 4 * Copyright (C) 2015 Red Hat, Inc. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining 8 * a copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sublicense, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial 17 * portions of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 20 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 22 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 23 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 24 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 25 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 */ 27 28 #include <sys/cdefs.h> 29 __KERNEL_RCSID(0, "$NetBSD: virtgpu_object.c,v 1.2 2018/08/27 04:58:37 riastradh Exp $"); 30 31 #include "virtgpu_drv.h" 32 33 static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) 34 { 35 struct virtio_gpu_object *bo; 36 struct virtio_gpu_device *vgdev; 37 38 bo = container_of(tbo, struct virtio_gpu_object, tbo); 39 vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private; 40 41 if (bo->hw_res_handle) 42 virtio_gpu_cmd_unref_resource(vgdev, bo->hw_res_handle); 43 if (bo->pages) 44 virtio_gpu_object_free_sg_table(bo); 45 drm_gem_object_release(&bo->gem_base); 46 kfree(bo); 47 } 48 49 static void virtio_gpu_init_ttm_placement(struct virtio_gpu_object *vgbo, 50 bool pinned) 51 { 52 u32 c = 1; 53 u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0; 54 55 vgbo->placement.placement = &vgbo->placement_code; 56 vgbo->placement.busy_placement = &vgbo->placement_code; 57 vgbo->placement_code.fpfn = 0; 58 vgbo->placement_code.lpfn = 0; 59 vgbo->placement_code.flags = 60 TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT | pflag; 61 vgbo->placement.num_placement = c; 62 vgbo->placement.num_busy_placement = c; 63 64 } 65 66 int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, 67 unsigned long size, bool kernel, bool pinned, 68 struct virtio_gpu_object **bo_ptr) 69 { 70 struct virtio_gpu_object *bo; 71 enum ttm_bo_type type; 72 size_t acc_size; 73 int ret; 74 75 if (kernel) 76 type = ttm_bo_type_kernel; 77 else 78 type = ttm_bo_type_device; 79 *bo_ptr = NULL; 80 81 acc_size = ttm_bo_dma_acc_size(&vgdev->mman.bdev, size, 82 sizeof(struct virtio_gpu_object)); 83 84 bo = kzalloc(sizeof(struct virtio_gpu_object), GFP_KERNEL); 85 if (bo == NULL) 86 return -ENOMEM; 87 size = roundup(size, PAGE_SIZE); 88 ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, size); 89 if (ret != 0) { 90 kfree(bo); 91 return ret; 92 } 93 bo->dumb = false; 94 virtio_gpu_init_ttm_placement(bo, pinned); 95 96 ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, size, type, 97 &bo->placement, 0, !kernel, NULL, acc_size, 98 NULL, NULL, &virtio_gpu_ttm_bo_destroy); 99 /* ttm_bo_init failure will call the destroy */ 100 if (ret != 0) 101 return ret; 102 103 *bo_ptr = bo; 104 return 0; 105 } 106 107 int virtio_gpu_object_kmap(struct virtio_gpu_object *bo, void **ptr) 108 { 109 bool is_iomem; 110 int r; 111 112 if (bo->vmap) { 113 if (ptr) 114 *ptr = bo->vmap; 115 return 0; 116 } 117 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); 118 if (r) 119 return r; 120 bo->vmap = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); 121 if (ptr) 122 *ptr = bo->vmap; 123 return 0; 124 } 125 126 int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev, 127 struct virtio_gpu_object *bo) 128 { 129 int ret; 130 struct page **pages = bo->tbo.ttm->pages; 131 int nr_pages = bo->tbo.num_pages; 132 133 /* wtf swapping */ 134 if (bo->pages) 135 return 0; 136 137 if (bo->tbo.ttm->state == tt_unpopulated) 138 bo->tbo.ttm->bdev->driver->ttm_tt_populate(bo->tbo.ttm); 139 bo->pages = kmalloc(sizeof(struct sg_table), GFP_KERNEL); 140 if (!bo->pages) 141 goto out; 142 143 ret = sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0, 144 nr_pages << PAGE_SHIFT, GFP_KERNEL); 145 if (ret) 146 goto out; 147 return 0; 148 out: 149 kfree(bo->pages); 150 bo->pages = NULL; 151 return -ENOMEM; 152 } 153 154 void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo) 155 { 156 sg_free_table(bo->pages); 157 kfree(bo->pages); 158 bo->pages = NULL; 159 } 160 161 int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait) 162 { 163 int r; 164 165 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL); 166 if (unlikely(r != 0)) 167 return r; 168 r = ttm_bo_wait(&bo->tbo, true, true, no_wait); 169 ttm_bo_unreserve(&bo->tbo); 170 return r; 171 } 172 173