1 /* $NetBSD: drm_gem_vm.c,v 1.15 2022/07/06 01:12:45 riastradh Exp $ */ 2 3 /*- 4 * Copyright (c) 2013 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Taylor R. Campbell. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __KERNEL_RCSID(0, "$NetBSD: drm_gem_vm.c,v 1.15 2022/07/06 01:12:45 riastradh Exp $"); 34 35 #include <sys/types.h> 36 #include <sys/file.h> 37 #include <sys/mman.h> 38 39 #include <uvm/uvm_extern.h> 40 41 #include <drm/drm_drv.h> 42 #include <drm/drm_gem.h> 43 #include <drm/drm_legacy.h> 44 #include <drm/drm_vma_manager.h> 45 46 static int drm_gem_mmap_object_locked(struct drm_device *, off_t, size_t, 47 int, struct uvm_object **, voff_t *, struct file *); 48 49 void 50 drm_gem_pager_reference(struct uvm_object *uobj) 51 { 52 struct drm_gem_object *const obj = container_of(uobj, 53 struct drm_gem_object, gemo_uvmobj); 54 55 drm_gem_object_get(obj); 56 } 57 58 void 59 drm_gem_pager_detach(struct uvm_object *uobj) 60 { 61 struct drm_gem_object *const obj = container_of(uobj, 62 struct drm_gem_object, gemo_uvmobj); 63 64 drm_gem_object_put_unlocked(obj); 65 } 66 67 int 68 drm_gem_or_legacy_mmap_object(struct drm_device *dev, off_t byte_offset, 69 size_t nbytes, int prot, struct uvm_object **uobjp, voff_t *uoffsetp, 70 struct file *file) 71 { 72 int ret; 73 74 KASSERT(nbytes > 0); 75 76 ret = drm_gem_mmap_object(dev, byte_offset, nbytes, prot, uobjp, 77 uoffsetp, file); 78 if (ret) 79 return ret; 80 if (*uobjp != NULL) 81 return 0; 82 83 return drm_legacy_mmap_object(dev, byte_offset, nbytes, prot, uobjp, 84 uoffsetp, file); 85 } 86 87 int 88 drm_gem_mmap_object(struct drm_device *dev, off_t byte_offset, size_t nbytes, 89 int prot, struct uvm_object **uobjp, voff_t *uoffsetp, struct file *file) 90 { 91 int ret; 92 93 KASSERT(nbytes > 0); 94 95 mutex_lock(&dev->struct_mutex); 96 ret = drm_gem_mmap_object_locked(dev, byte_offset, nbytes, prot, 97 uobjp, uoffsetp, file); 98 mutex_unlock(&dev->struct_mutex); 99 100 return ret; 101 } 102 103 static int 104 drm_gem_mmap_object_locked(struct drm_device *dev, off_t byte_offset, 105 size_t nbytes, int prot __unused, struct uvm_object **uobjp, 106 voff_t *uoffsetp, struct file *fp) 107 { 108 struct drm_file *file = fp->f_data; 109 const unsigned long startpage = (byte_offset >> PAGE_SHIFT); 110 const unsigned long npages = (nbytes >> PAGE_SHIFT); 111 112 KASSERT(mutex_is_locked(&dev->struct_mutex)); 113 KASSERT(drm_core_check_feature(dev, DRIVER_GEM)); 114 KASSERT(dev->driver->gem_uvm_ops != NULL); 115 KASSERT(prot == (prot & (PROT_READ | PROT_WRITE))); 116 KASSERT(0 <= byte_offset); 117 KASSERT(byte_offset == (byte_offset & ~(PAGE_SIZE-1))); 118 KASSERT(nbytes == (npages << PAGE_SHIFT)); 119 120 struct drm_vma_offset_node *const node = 121 drm_vma_offset_exact_lookup(dev->vma_offset_manager, startpage, 122 npages); 123 if (node == NULL) { 124 /* Fall back to vanilla device mappings. */ 125 *uobjp = NULL; 126 *uoffsetp = (voff_t)-1; 127 return 0; 128 } 129 130 if (!drm_vma_node_is_allowed(node, file)) 131 return -EACCES; 132 133 struct drm_gem_object *const obj = container_of(node, 134 struct drm_gem_object, vma_node); 135 KASSERT(obj->dev == dev); 136 137 /* Success! */ 138 drm_gem_object_get(obj); 139 *uobjp = &obj->gemo_uvmobj; 140 *uoffsetp = 0; 141 return 0; 142 } 143