1*f46a341eSmpi /* $OpenBSD: uvm_device.c,v 1.68 2024/12/15 11:02:59 mpi Exp $ */ 21414b0faSart /* $NetBSD: uvm_device.c,v 1.30 2000/11/25 06:27:59 chs Exp $ */ 3cd7ee8acSart 4cd7ee8acSart /* 5cd7ee8acSart * Copyright (c) 1997 Charles D. Cranor and Washington University. 6cd7ee8acSart * All rights reserved. 7cd7ee8acSart * 8cd7ee8acSart * Redistribution and use in source and binary forms, with or without 9cd7ee8acSart * modification, are permitted provided that the following conditions 10cd7ee8acSart * are met: 11cd7ee8acSart * 1. Redistributions of source code must retain the above copyright 12cd7ee8acSart * notice, this list of conditions and the following disclaimer. 13cd7ee8acSart * 2. Redistributions in binary form must reproduce the above copyright 14cd7ee8acSart * notice, this list of conditions and the following disclaimer in the 15cd7ee8acSart * documentation and/or other materials provided with the distribution. 16cd7ee8acSart * 17cd7ee8acSart * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18cd7ee8acSart * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19cd7ee8acSart * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20cd7ee8acSart * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21cd7ee8acSart * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22cd7ee8acSart * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23cd7ee8acSart * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24cd7ee8acSart * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25cd7ee8acSart * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26cd7ee8acSart * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27cd7ee8acSart * 28cd7ee8acSart * from: Id: uvm_device.c,v 1.1.2.9 1998/02/06 05:11:47 chs Exp 29cd7ee8acSart */ 30cd7ee8acSart 31cd7ee8acSart /* 32cd7ee8acSart * uvm_device.c: the device pager. 33cd7ee8acSart */ 34cd7ee8acSart 35cd7ee8acSart #include <sys/param.h> 36cd7ee8acSart #include <sys/systm.h> 37cd7ee8acSart #include <sys/conf.h> 38cd7ee8acSart #include <sys/malloc.h> 396e865048Soga #include <sys/mutex.h> 40cd7ee8acSart 41cd7ee8acSart #include <uvm/uvm.h> 42cd7ee8acSart #include <uvm/uvm_device.h> 43cd7ee8acSart 440fabd1b7Skettenis #include "drm.h" 450fabd1b7Skettenis 46cd7ee8acSart /* 47cd7ee8acSart * private global data structure 48cd7ee8acSart * 49cd7ee8acSart * we keep a list of active device objects in the system. 50cd7ee8acSart */ 51cd7ee8acSart 526e865048Soga LIST_HEAD(, uvm_device) udv_list = LIST_HEAD_INITIALIZER(udv_list); 536e865048Soga struct mutex udv_lock = MUTEX_INITIALIZER(IPL_NONE); 54cd7ee8acSart 55cd7ee8acSart /* 56cd7ee8acSart * functions 57cd7ee8acSart */ 581e3e475dSoga static void udv_reference(struct uvm_object *); 591e3e475dSoga static void udv_detach(struct uvm_object *); 601e3e475dSoga static int udv_fault(struct uvm_faultinfo *, vaddr_t, 611e3e475dSoga vm_page_t *, int, int, vm_fault_t, 621e3e475dSoga vm_prot_t, int); 631e3e475dSoga static boolean_t udv_flush(struct uvm_object *, voff_t, voff_t, 641e3e475dSoga int); 65cd7ee8acSart 66cd7ee8acSart /* 67cd7ee8acSart * master pager structure 68cd7ee8acSart */ 699f7b7ef0Smpi const struct uvm_pagerops uvm_deviceops = { 709f7b7ef0Smpi .pgo_reference = udv_reference, 719f7b7ef0Smpi .pgo_detach = udv_detach, 729f7b7ef0Smpi .pgo_fault = udv_fault, 739f7b7ef0Smpi .pgo_flush = udv_flush, 74cd7ee8acSart }; 75cd7ee8acSart 76cd7ee8acSart /* 7752887a38Smpi * the ops! 7852887a38Smpi */ 7952887a38Smpi 8052887a38Smpi 8152887a38Smpi /* 82cd7ee8acSart * udv_attach 83cd7ee8acSart * 84cd7ee8acSart * get a VM object that is associated with a device. allocate a new 85cd7ee8acSart * one if needed. 86cd7ee8acSart * 87b8a635f6Stedu * => nothing should be locked so that we can sleep here. 882023d591Soga * 892023d591Soga * The last two arguments (off and size) are only used for access checking. 90cd7ee8acSart */ 91cd7ee8acSart struct uvm_object * 92f6bbfd1fSmatthew udv_attach(dev_t device, vm_prot_t accessprot, voff_t off, vsize_t size) 93cd7ee8acSart { 94cd7ee8acSart struct uvm_device *udv, *lcv; 95c4071fd1Smillert paddr_t (*mapfn)(dev_t, off_t, int); 960fabd1b7Skettenis #if NDRM > 0 970fabd1b7Skettenis struct uvm_object *obj; 980fabd1b7Skettenis #endif 99cd7ee8acSart 10052887a38Smpi /* 10152887a38Smpi * before we do anything, ensure this device supports mmap 10252887a38Smpi */ 103cd7ee8acSart mapfn = cdevsw[major(device)].d_mmap; 104cd7ee8acSart if (mapfn == NULL || 105c4071fd1Smillert mapfn == (paddr_t (*)(dev_t, off_t, int)) enodev || 106c4071fd1Smillert mapfn == (paddr_t (*)(dev_t, off_t, int)) nullop) 107cd7ee8acSart return(NULL); 108cd7ee8acSart 10952887a38Smpi /* 11052887a38Smpi * Negative offsets on the object are not allowed. 11152887a38Smpi */ 1124c5043c0Sart if (off < 0) 1134c5043c0Sart return(NULL); 1144c5043c0Sart 1150fabd1b7Skettenis #if NDRM > 0 116f6bbfd1fSmatthew obj = udv_attach_drm(device, accessprot, off, size); 1170fabd1b7Skettenis if (obj) 1180fabd1b7Skettenis return(obj); 1190fabd1b7Skettenis #endif 1200fabd1b7Skettenis 121cd7ee8acSart /* 1228a42ed70Sart * Check that the specified range of the device allows the 1238a42ed70Sart * desired protection. 1248a42ed70Sart * 1258a42ed70Sart * XXX clobbers off and size, but nothing else here needs them. 1268a42ed70Sart */ 1278a42ed70Sart while (size != 0) { 1288a42ed70Sart if ((*mapfn)(device, off, accessprot) == -1) 1298a42ed70Sart return (NULL); 1308a42ed70Sart off += PAGE_SIZE; size -= PAGE_SIZE; 1318a42ed70Sart } 1328a42ed70Sart 13352887a38Smpi /* 13452887a38Smpi * keep looping until we get it 13552887a38Smpi */ 136fd628a11Sart for (;;) { 13752887a38Smpi /* 13852887a38Smpi * first, attempt to find it on the main list 13952887a38Smpi */ 1406e865048Soga mtx_enter(&udv_lock); 141fd628a11Sart LIST_FOREACH(lcv, &udv_list, u_list) { 142cd7ee8acSart if (device == lcv->u_device) 143cd7ee8acSart break; 144cd7ee8acSart } 145cd7ee8acSart 14652887a38Smpi /* 14752887a38Smpi * got it on main list. put a hold on it and unlock udv_lock. 14852887a38Smpi */ 149cd7ee8acSart if (lcv) { 150cd7ee8acSart /* 151cd7ee8acSart * if someone else has a hold on it, sleep and start 1526e865048Soga * over again. Else, we need take HOLD flag so we 1536e865048Soga * don't have to re-order locking here. 154cd7ee8acSart */ 155cd7ee8acSart if (lcv->u_flags & UVM_DEVICE_HOLD) { 156cd7ee8acSart lcv->u_flags |= UVM_DEVICE_WANTED; 157168ba30cScheloha msleep_nsec(lcv, &udv_lock, PVM | PNORELOCK, 158168ba30cScheloha "udv_attach", INFSLP); 159cd7ee8acSart continue; 160cd7ee8acSart } 161cd7ee8acSart 162cd7ee8acSart /* we are now holding it */ 163cd7ee8acSart lcv->u_flags |= UVM_DEVICE_HOLD; 1646e865048Soga mtx_leave(&udv_lock); 165cd7ee8acSart 16652887a38Smpi /* 16752887a38Smpi * bump reference count, unhold, return. 16852887a38Smpi */ 16969c04514Smpi rw_enter(lcv->u_obj.vmobjlock, RW_WRITE); 170cd7ee8acSart lcv->u_obj.uo_refs++; 17169c04514Smpi rw_exit(lcv->u_obj.vmobjlock); 172cd7ee8acSart 1736e865048Soga mtx_enter(&udv_lock); 174cd7ee8acSart if (lcv->u_flags & UVM_DEVICE_WANTED) 175cd7ee8acSart wakeup(lcv); 176cd7ee8acSart lcv->u_flags &= ~(UVM_DEVICE_WANTED|UVM_DEVICE_HOLD); 1776e865048Soga mtx_leave(&udv_lock); 178cd7ee8acSart return(&lcv->u_obj); 179cd7ee8acSart } 180cd7ee8acSart 18152887a38Smpi /* 18252887a38Smpi * Did not find it on main list. Need to allocate a new one. 18352887a38Smpi */ 1846e865048Soga mtx_leave(&udv_lock); 185cd7ee8acSart /* NOTE: we could sleep in the following malloc() */ 1866a2ae821Schl udv = malloc(sizeof(*udv), M_TEMP, M_WAITOK); 1872c850ee8Smpi uvm_obj_init(&udv->u_obj, &uvm_deviceops, 1); 1886e865048Soga mtx_enter(&udv_lock); 189cd7ee8acSart 190cd7ee8acSart /* 191cd7ee8acSart * now we have to double check to make sure no one added it 192cd7ee8acSart * to the list while we were sleeping... 193cd7ee8acSart */ 194fd628a11Sart LIST_FOREACH(lcv, &udv_list, u_list) { 195cd7ee8acSart if (device == lcv->u_device) 196cd7ee8acSart break; 197cd7ee8acSart } 198cd7ee8acSart 199cd7ee8acSart /* 200fd628a11Sart * did we lose a race to someone else? 201fd628a11Sart * free our memory and retry. 202cd7ee8acSart */ 203cd7ee8acSart if (lcv) { 2046e865048Soga mtx_leave(&udv_lock); 2052c850ee8Smpi uvm_obj_destroy(&udv->u_obj); 206bae2bd50Sderaadt free(udv, M_TEMP, sizeof(*udv)); 207cd7ee8acSart continue; 208cd7ee8acSart } 209cd7ee8acSart 210cd7ee8acSart /* 211cd7ee8acSart * we have it! init the data structures, add to list 212cd7ee8acSart * and return. 213cd7ee8acSart */ 214cd7ee8acSart udv->u_flags = 0; 215cd7ee8acSart udv->u_device = device; 216cd7ee8acSart LIST_INSERT_HEAD(&udv_list, udv, u_list); 2176e865048Soga mtx_leave(&udv_lock); 218cd7ee8acSart return(&udv->u_obj); 219fd628a11Sart } 220cd7ee8acSart /*NOTREACHED*/ 221cd7ee8acSart } 222cd7ee8acSart 223cd7ee8acSart /* 224cd7ee8acSart * udv_reference 225cd7ee8acSart * 226cd7ee8acSart * add a reference to a VM object. Note that the reference count must 227cd7ee8acSart * already be one (the passed in reference) so there is no chance of the 228cd7ee8acSart * udv being released or locked out here. 229cd7ee8acSart */ 2301e3e475dSoga static void 2312023d591Soga udv_reference(struct uvm_object *uobj) 232cd7ee8acSart { 23369c04514Smpi rw_enter(uobj->vmobjlock, RW_WRITE); 234cd7ee8acSart uobj->uo_refs++; 23569c04514Smpi rw_exit(uobj->vmobjlock); 236cd7ee8acSart } 237cd7ee8acSart 238cd7ee8acSart /* 239cd7ee8acSart * udv_detach 240cd7ee8acSart * 241cd7ee8acSart * remove a reference to a VM object. 242cd7ee8acSart */ 2431e3e475dSoga static void 2442023d591Soga udv_detach(struct uvm_object *uobj) 245cd7ee8acSart { 246cd7ee8acSart struct uvm_device *udv = (struct uvm_device *)uobj; 247cd7ee8acSart 24852887a38Smpi /* 24952887a38Smpi * loop until done 25052887a38Smpi */ 251d0888d5fSart again: 25269c04514Smpi rw_enter(uobj->vmobjlock, RW_WRITE); 253cd7ee8acSart if (uobj->uo_refs > 1) { 254fd628a11Sart uobj->uo_refs--; 25569c04514Smpi rw_exit(uobj->vmobjlock); 256cd7ee8acSart return; 257cd7ee8acSart } 258262a556aSdlg KASSERT(uobj->uo_npages == 0 && RBT_EMPTY(uvm_objtree, &uobj->memt)); 259cd7ee8acSart 26052887a38Smpi /* 26152887a38Smpi * is it being held? if so, wait until others are done. 26252887a38Smpi */ 2636e865048Soga mtx_enter(&udv_lock); 264cd7ee8acSart if (udv->u_flags & UVM_DEVICE_HOLD) { 265cd7ee8acSart udv->u_flags |= UVM_DEVICE_WANTED; 26669c04514Smpi rw_exit(uobj->vmobjlock); 267744ebc9fSmpi msleep_nsec(udv, &udv_lock, PVM | PNORELOCK, "udv_detach", 268744ebc9fSmpi INFSLP); 269d0888d5fSart goto again; 270cd7ee8acSart } 271cd7ee8acSart 27252887a38Smpi /* 27352887a38Smpi * got it! nuke it now. 27452887a38Smpi */ 275cd7ee8acSart LIST_REMOVE(udv, u_list); 276cd7ee8acSart if (udv->u_flags & UVM_DEVICE_WANTED) 277cd7ee8acSart wakeup(udv); 2786e865048Soga mtx_leave(&udv_lock); 27969c04514Smpi rw_exit(uobj->vmobjlock); 2802c850ee8Smpi 2812c850ee8Smpi uvm_obj_destroy(uobj); 282bae2bd50Sderaadt free(udv, M_TEMP, sizeof(*udv)); 283cd7ee8acSart } 284cd7ee8acSart 285cd7ee8acSart 286cd7ee8acSart /* 287cd7ee8acSart * udv_flush 288cd7ee8acSart * 289cd7ee8acSart * flush pages out of a uvm object. a no-op for devices. 290cd7ee8acSart */ 2911e3e475dSoga static boolean_t 2922023d591Soga udv_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags) 293cd7ee8acSart { 294cd7ee8acSart 295cd7ee8acSart return(TRUE); 296cd7ee8acSart } 297cd7ee8acSart 298cd7ee8acSart /* 299cd7ee8acSart * udv_fault: non-standard fault routine for device "pages" 300cd7ee8acSart * 301cd7ee8acSart * => rather than having a "get" function, we have a fault routine 302cd7ee8acSart * since we don't return vm_pages we need full control over the 303cd7ee8acSart * pmap_enter map in 304cd7ee8acSart * => on return, we unlock all fault data structures 305cd7ee8acSart * => flags: PGO_ALLPAGES: get all of the pages 306cd7ee8acSart * PGO_LOCKED: fault data structures are locked 307cd7ee8acSart * XXX: currently PGO_LOCKED is always required ... consider removing 308cd7ee8acSart * it as a flag 309cd7ee8acSart * => NOTE: vaddr is the VA of pps[0] in ufi->entry, _NOT_ pps[centeridx] 310cd7ee8acSart */ 3111e3e475dSoga static int 3122023d591Soga udv_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps, int npages, 3132023d591Soga int centeridx, vm_fault_t fault_type, vm_prot_t access_type, int flags) 314cd7ee8acSart { 315cd7ee8acSart struct vm_map_entry *entry = ufi->entry; 316cd7ee8acSart struct uvm_object *uobj = entry->object.uvm_obj; 317cd7ee8acSart struct uvm_device *udv = (struct uvm_device *)uobj; 31840cf655dSart vaddr_t curr_va; 3193a40dae1Sart off_t curr_offset; 32097c974cdSmiod paddr_t paddr; 3214c5043c0Sart int lcv, retval; 322cd7ee8acSart dev_t device; 323c4071fd1Smillert paddr_t (*mapfn)(dev_t, off_t, int); 324e70b55e9Sart vm_prot_t mapprot; 325cd7ee8acSart 3269f7b7ef0Smpi KERNEL_ASSERT_LOCKED(); 3279f7b7ef0Smpi 328cd7ee8acSart /* 329cd7ee8acSart * we do not allow device mappings to be mapped copy-on-write 330cd7ee8acSart * so we kill any attempt to do so here. 331cd7ee8acSart */ 332cd7ee8acSart if (UVM_ET_ISCOPYONWRITE(entry)) { 333ec3489eeSmpi uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj); 334*f46a341eSmpi return EACCES; 335cd7ee8acSart } 336cd7ee8acSart 33752887a38Smpi /* 33852887a38Smpi * get device map function. 33952887a38Smpi */ 340cd7ee8acSart device = udv->u_device; 341cd7ee8acSart mapfn = cdevsw[major(device)].d_mmap; 342cd7ee8acSart 343cd7ee8acSart /* 344cd7ee8acSart * now we must determine the offset in udv to use and the VA to 345cd7ee8acSart * use for pmap_enter. note that we always use orig_map's pmap 346cd7ee8acSart * for pmap_enter (even if we have a submap). since virtual 347cd7ee8acSart * addresses in a submap must match the main map, this is ok. 348cd7ee8acSart */ 349cd7ee8acSart /* udv offset = (offset from start of entry) + entry's offset */ 3503a40dae1Sart curr_offset = entry->offset + (vaddr - entry->start); 351cd7ee8acSart /* pmap va = vaddr (virtual address of pps[0]) */ 352cd7ee8acSart curr_va = vaddr; 353cd7ee8acSart 35452887a38Smpi /* 35552887a38Smpi * loop over the page range entering in as needed 35652887a38Smpi */ 357*f46a341eSmpi retval = 0; 358cd7ee8acSart for (lcv = 0 ; lcv < npages ; lcv++, curr_offset += PAGE_SIZE, 359cd7ee8acSart curr_va += PAGE_SIZE) { 360cd7ee8acSart if ((flags & PGO_ALLPAGES) == 0 && lcv != centeridx) 361cd7ee8acSart continue; 362cd7ee8acSart 363cd7ee8acSart if (pps[lcv] == PGO_DONTCARE) 364cd7ee8acSart continue; 365cd7ee8acSart 36697c974cdSmiod paddr = (*mapfn)(device, curr_offset, access_type); 36797c974cdSmiod if (paddr == -1) { 368*f46a341eSmpi retval = EACCES; /* XXX */ 369cd7ee8acSart break; 370cd7ee8acSart } 371e70b55e9Sart mapprot = ufi->entry->protection; 37265f111fbSart if (pmap_enter(ufi->orig_map->pmap, curr_va, paddr, 373cf0f9f55Sart mapprot, PMAP_CANFAIL | mapprot) != 0) { 37465f111fbSart /* 37565f111fbSart * pmap_enter() didn't have the resource to 37665f111fbSart * enter this mapping. Unlock everything, 37765f111fbSart * wait for the pagedaemon to free up some 37865f111fbSart * pages, and then tell uvm_fault() to start 37965f111fbSart * the fault again. 38065f111fbSart * 38165f111fbSart * XXX Needs some rethinking for the PGO_ALLPAGES 38265f111fbSart * XXX case. 38365f111fbSart */ 38465f111fbSart uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, 385ec3489eeSmpi uobj); 3863d404988Sdrahn 3873d404988Sdrahn /* sync what we have so far */ 3883d404988Sdrahn pmap_update(ufi->orig_map->pmap); 38965f111fbSart uvm_wait("udv_fault"); 390*f46a341eSmpi return ERESTART; 39165f111fbSart } 392cd7ee8acSart } 393cd7ee8acSart 394ec3489eeSmpi uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj); 3953d404988Sdrahn pmap_update(ufi->orig_map->pmap); 396*f46a341eSmpi return retval; 397cd7ee8acSart } 398