1 /*- 2 * Copyright 2003 Eric Anholt 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * ERIC ANHOLT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 20 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 22 * 23 * $FreeBSD: head/sys/dev/drm2/drm_vm.c 235783 2012-05-22 11:07:44Z kib $" 24 */ 25 26 /** @file drm_vm.c 27 * Support code for mmaping of DRM maps. 28 */ 29 30 #include <sys/conf.h> 31 #include <sys/mutex2.h> 32 #include <vm/vm_page.h> 33 #include <vm/vm_pager.h> 34 35 #include "dev/drm/drmP.h" 36 #include "dev/drm/drm.h" 37 38 int drm_mmap(struct dev_mmap_args *ap) 39 { 40 struct cdev *kdev = ap->a_head.a_dev; 41 vm_offset_t offset = ap->a_offset; 42 struct drm_device *dev = drm_get_device_from_kdev(kdev); 43 struct drm_file *file_priv = NULL; 44 drm_local_map_t *map; 45 enum drm_map_type type; 46 vm_paddr_t phys; 47 48 /* d_mmap gets called twice, we can only reference file_priv during 49 * the first call. We need to assume that if error is EBADF the 50 * call was succesful and the client is authenticated. 51 */ 52 DRM_LOCK(dev); 53 file_priv = drm_find_file_by_proc(dev, curthread); 54 DRM_UNLOCK(dev); 55 56 if (!file_priv) { 57 DRM_ERROR("Could not find authenticator!\n"); 58 return EINVAL; 59 } 60 61 if (!file_priv->authenticated) 62 return EACCES; 63 64 DRM_DEBUG("called with offset %016jx\n", (uintmax_t)offset); 65 if (dev->dma && offset < ptoa(dev->dma->page_count)) { 66 drm_device_dma_t *dma = dev->dma; 67 68 spin_lock(&dev->dma_lock); 69 70 if (dma->pagelist != NULL) { 71 unsigned long page = offset >> PAGE_SHIFT; 72 unsigned long phys = dma->pagelist[page]; 73 74 spin_unlock(&dev->dma_lock); 75 // XXX *paddr = phys; 76 ap->a_result = phys; 77 return 0; 78 } else { 79 spin_unlock(&dev->dma_lock); 80 return -1; 81 } 82 } 83 84 /* A sequential search of a linked list is 85 fine here because: 1) there will only be 86 about 5-10 entries in the list and, 2) a 87 DRI client only has to do this mapping 88 once, so it doesn't have to be optimized 89 for performance, even if the list was a 90 bit longer. 91 */ 92 DRM_LOCK(dev); 93 TAILQ_FOREACH(map, &dev->maplist, link) { 94 if (offset >> DRM_MAP_HANDLE_SHIFT == 95 (unsigned long)map->handle >> DRM_MAP_HANDLE_SHIFT) 96 break; 97 } 98 99 if (map == NULL) { 100 DRM_DEBUG("Can't find map, request offset = %016jx\n", 101 (uintmax_t)offset); 102 TAILQ_FOREACH(map, &dev->maplist, link) { 103 DRM_DEBUG("map offset = %016lx, handle = %016lx\n", 104 map->offset, (unsigned long)map->handle); 105 } 106 DRM_UNLOCK(dev); 107 return -1; 108 } 109 if (((map->flags & _DRM_RESTRICTED) && !DRM_SUSER(DRM_CURPROC))) { 110 DRM_UNLOCK(dev); 111 DRM_DEBUG("restricted map\n"); 112 return -1; 113 } 114 type = map->type; 115 DRM_UNLOCK(dev); 116 117 offset = offset & ((1ULL << DRM_MAP_HANDLE_SHIFT) - 1); 118 119 switch (type) { 120 case _DRM_FRAME_BUFFER: 121 case _DRM_AGP: 122 #if 0 /* XXX */ 123 *memattr = VM_MEMATTR_WRITE_COMBINING; 124 #endif 125 /* FALLTHROUGH */ 126 case _DRM_REGISTERS: 127 phys = map->offset + offset; 128 break; 129 case _DRM_SCATTER_GATHER: 130 #if 0 /* XXX */ 131 *memattr = VM_MEMATTR_WRITE_COMBINING; 132 #endif 133 /* FALLTHROUGH */ 134 case _DRM_CONSISTENT: 135 case _DRM_SHM: 136 phys = vtophys((char *)map->virtual + offset); 137 break; 138 default: 139 DRM_ERROR("bad map type %d\n", type); 140 return -1; /* This should never happen. */ 141 } 142 143 ap->a_result = atop(phys); 144 return 0; 145 } 146 147 /* XXX The following is just temporary hack to replace the 148 * vm_phys_fictitious functions available on FreeBSD 149 */ 150 #define VM_PHYS_FICTITIOUS_NSEGS 8 151 static struct vm_phys_fictitious_seg { 152 vm_paddr_t start; 153 vm_paddr_t end; 154 vm_page_t first_page; 155 } vm_phys_fictitious_segs[VM_PHYS_FICTITIOUS_NSEGS]; 156 static struct mtx vm_phys_fictitious_reg_mtx = MTX_INITIALIZER; 157 158 MALLOC_DEFINE(M_FICT_PAGES, "", ""); 159 160 vm_page_t 161 vm_phys_fictitious_to_vm_page(vm_paddr_t pa) 162 { 163 struct vm_phys_fictitious_seg *seg; 164 vm_page_t m; 165 int segind; 166 167 m = NULL; 168 for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) { 169 seg = &vm_phys_fictitious_segs[segind]; 170 if (pa >= seg->start && pa < seg->end) { 171 m = &seg->first_page[atop(pa - seg->start)]; 172 KASSERT((m->flags & PG_FICTITIOUS) != 0, 173 ("%p not fictitious", m)); 174 break; 175 } 176 } 177 return (m); 178 } 179 180 int 181 vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end, 182 vm_memattr_t memattr) 183 { 184 struct vm_phys_fictitious_seg *seg; 185 vm_page_t fp; 186 long i, page_count; 187 int segind; 188 189 page_count = (end - start) / PAGE_SIZE; 190 191 fp = kmalloc(page_count * sizeof(struct vm_page), M_FICT_PAGES, 192 M_WAITOK | M_ZERO); 193 194 for (i = 0; i < page_count; i++) { 195 vm_page_initfake(&fp[i], start + PAGE_SIZE * i, memattr); 196 fp[i].flags &= ~(PG_BUSY | PG_UNMANAGED); 197 } 198 mtx_lock(&vm_phys_fictitious_reg_mtx); 199 for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) { 200 seg = &vm_phys_fictitious_segs[segind]; 201 if (seg->start == 0 && seg->end == 0) { 202 seg->start = start; 203 seg->end = end; 204 seg->first_page = fp; 205 mtx_unlock(&vm_phys_fictitious_reg_mtx); 206 return (0); 207 } 208 } 209 mtx_unlock(&vm_phys_fictitious_reg_mtx); 210 kfree(fp, M_FICT_PAGES); 211 return (EBUSY); 212 } 213 214 void 215 vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end) 216 { 217 struct vm_phys_fictitious_seg *seg; 218 vm_page_t fp; 219 int segind; 220 221 mtx_lock(&vm_phys_fictitious_reg_mtx); 222 for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) { 223 seg = &vm_phys_fictitious_segs[segind]; 224 if (seg->start == start && seg->end == end) { 225 seg->start = seg->end = 0; 226 fp = seg->first_page; 227 seg->first_page = NULL; 228 mtx_unlock(&vm_phys_fictitious_reg_mtx); 229 kfree(fp, M_FICT_PAGES); 230 return; 231 } 232 } 233 mtx_unlock(&vm_phys_fictitious_reg_mtx); 234 KASSERT(0, ("Unregistering not registered fictitious range")); 235 } 236