xref: /netbsd-src/sys/uvm/uvm_device.c (revision 0fdc653939292e710f65bfefb6c6edb104c06a73)
1*0fdc6539Sriastradh /*	$NetBSD: uvm_device.c,v 1.80 2022/07/07 13:27:02 riastradh Exp $	*/
21f6b921cSmrg 
3f2caacc7Smrg /*
4f2caacc7Smrg  * Copyright (c) 1997 Charles D. Cranor and Washington University.
5f2caacc7Smrg  * All rights reserved.
6f2caacc7Smrg  *
7f2caacc7Smrg  * Redistribution and use in source and binary forms, with or without
8f2caacc7Smrg  * modification, are permitted provided that the following conditions
9f2caacc7Smrg  * are met:
10f2caacc7Smrg  * 1. Redistributions of source code must retain the above copyright
11f2caacc7Smrg  *    notice, this list of conditions and the following disclaimer.
12f2caacc7Smrg  * 2. Redistributions in binary form must reproduce the above copyright
13f2caacc7Smrg  *    notice, this list of conditions and the following disclaimer in the
14f2caacc7Smrg  *    documentation and/or other materials provided with the distribution.
15f2caacc7Smrg  *
16f2caacc7Smrg  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17f2caacc7Smrg  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18f2caacc7Smrg  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19f2caacc7Smrg  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20f2caacc7Smrg  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21f2caacc7Smrg  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22f2caacc7Smrg  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23f2caacc7Smrg  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24f2caacc7Smrg  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25f2caacc7Smrg  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
261f6b921cSmrg  *
271f6b921cSmrg  * from: Id: uvm_device.c,v 1.1.2.9 1998/02/06 05:11:47 chs Exp
28f2caacc7Smrg  */
29f2caacc7Smrg 
30f2caacc7Smrg /*
31f2caacc7Smrg  * uvm_device.c: the device pager.
32f2caacc7Smrg  */
33f2caacc7Smrg 
34b616d1caSlukem #include <sys/cdefs.h>
35*0fdc6539Sriastradh __KERNEL_RCSID(0, "$NetBSD: uvm_device.c,v 1.80 2022/07/07 13:27:02 riastradh Exp $");
36b616d1caSlukem 
37b616d1caSlukem #include "opt_uvmhist.h"
38b616d1caSlukem 
39f2caacc7Smrg #include <sys/param.h>
40f2caacc7Smrg #include <sys/systm.h>
41f2caacc7Smrg #include <sys/conf.h>
42f2caacc7Smrg #include <sys/proc.h>
43e62ee4d4Spara #include <sys/kmem.h>
44f2caacc7Smrg 
45f2caacc7Smrg #include <uvm/uvm.h>
46f2caacc7Smrg #include <uvm/uvm_device.h>
4750ba38feSjmcneill #include <uvm/uvm_pmap.h>
48f2caacc7Smrg 
49f2caacc7Smrg /*
50f2caacc7Smrg  * private global data structure
51f2caacc7Smrg  *
52f2caacc7Smrg  * we keep a list of active device objects in the system.
53f2caacc7Smrg  */
54f2caacc7Smrg 
55f2caacc7Smrg LIST_HEAD(udv_list_struct, uvm_device);
56f2caacc7Smrg static struct udv_list_struct udv_list;
575ce257a9Sad static kmutex_t udv_lock __cacheline_aligned;
58f2caacc7Smrg 
59f2caacc7Smrg /*
60f2caacc7Smrg  * functions
61f2caacc7Smrg  */
62f2caacc7Smrg 
63325f5482Sjunyoung static void	udv_init(void);
64325f5482Sjunyoung static void	udv_reference(struct uvm_object *);
65325f5482Sjunyoung static void	udv_detach(struct uvm_object *);
66325f5482Sjunyoung static int	udv_fault(struct uvm_faultinfo *, vaddr_t,
672d1a0b57Sdrochner 			  struct vm_page **, int, int, vm_prot_t,
68154a970fSthorpej 			  int);
69f2caacc7Smrg 
70f2caacc7Smrg /*
71f2caacc7Smrg  * master pager structure
72f2caacc7Smrg  */
73f2caacc7Smrg 
74e8abff70Syamt const struct uvm_pagerops uvm_deviceops = {
7537ba677aSchristos 	.pgo_init = udv_init,
7637ba677aSchristos 	.pgo_reference = udv_reference,
7737ba677aSchristos 	.pgo_detach = udv_detach,
7837ba677aSchristos 	.pgo_fault = udv_fault,
79f2caacc7Smrg };
80f2caacc7Smrg 
81f2caacc7Smrg /*
82f2caacc7Smrg  * the ops!
83f2caacc7Smrg  */
84f2caacc7Smrg 
85f2caacc7Smrg /*
86f2caacc7Smrg  * udv_init
87f2caacc7Smrg  *
88f2caacc7Smrg  * init pager private data structures.
89f2caacc7Smrg  */
90f2caacc7Smrg 
9164c6d1d2Schs static void
udv_init(void)9264c6d1d2Schs udv_init(void)
93f2caacc7Smrg {
94f2caacc7Smrg 	LIST_INIT(&udv_list);
954a780c9aSad 	mutex_init(&udv_lock, MUTEX_DEFAULT, IPL_NONE);
96f2caacc7Smrg }
97f2caacc7Smrg 
98f2caacc7Smrg /*
99f2caacc7Smrg  * udv_attach
100f2caacc7Smrg  *
101f2caacc7Smrg  * get a VM object that is associated with a device.   allocate a new
102f2caacc7Smrg  * one if needed.
103f2caacc7Smrg  *
104f2caacc7Smrg  * => caller must _not_ already be holding the lock on the uvm_object.
105f2caacc7Smrg  * => in fact, nothing should be locked so that we can sleep here.
106f2caacc7Smrg  */
10764c6d1d2Schs 
1088106d135Smrg struct uvm_object *
udv_attach(dev_t device,vm_prot_t accessprot,voff_t off,vsize_t size)1096d40f9ffSchs udv_attach(dev_t device, vm_prot_t accessprot,
110e569faccSthorpej     voff_t off,		/* used only for access check */
111e569faccSthorpej     vsize_t size	/* used only for access check */)
112f2caacc7Smrg {
113f2caacc7Smrg 	struct uvm_device *udv, *lcv;
11477a6b82bSgehenna 	const struct cdevsw *cdev;
11532e1c788Sriastradh 	dev_mmap_t *mapfn;
116894ca870Schristos 
117f3bd60e2Sskrll 	UVMHIST_FUNC(__func__);
118f3bd60e2Sskrll 	UVMHIST_CALLARGS(maphist, "(device=%#jx)", device,0,0,0);
119f2caacc7Smrg 
1201dc3d718Sriastradh 	KASSERT(size > 0);
1211dc3d718Sriastradh 
122f2caacc7Smrg 	/*
123f2caacc7Smrg 	 * before we do anything, ensure this device supports mmap
124f2caacc7Smrg 	 */
125f2caacc7Smrg 
12677a6b82bSgehenna 	cdev = cdevsw_lookup(device);
127de874f61Sad 	if (cdev == NULL) {
12858c59e79Sriastradh 		return NULL;
129de874f61Sad 	}
13077a6b82bSgehenna 	mapfn = cdev->d_mmap;
13110e7aa0dSriastradh 	if (mapfn == NULL || mapfn == nommap) {
13258c59e79Sriastradh 		return NULL;
133de874f61Sad 	}
134f2caacc7Smrg 
135f2caacc7Smrg 	/*
136583a8e6eSriastradh 	 * Negative offsets on the object are not allowed, unless the
137583a8e6eSriastradh 	 * device has affirmatively set D_NEGOFFSAFE.
138f8a6b48dSdrochner 	 */
139583a8e6eSriastradh 	if ((cdev->d_flag & D_NEGOFFSAFE) == 0 && off != UVM_UNKNOWN_OFFSET) {
140583a8e6eSriastradh 		if (off < 0)
141583a8e6eSriastradh 			return NULL;
142c39ca7f4Sriastradh #if SIZE_MAX > UINT32_MAX	/* XXX -Wtype-limits */
143583a8e6eSriastradh 		if (size > __type_max(voff_t))
144583a8e6eSriastradh 			return NULL;
145c39ca7f4Sriastradh #endif
146583a8e6eSriastradh 		if (off > __type_max(voff_t) - size)
147583a8e6eSriastradh 			return NULL;
148583a8e6eSriastradh 	}
149f8a6b48dSdrochner 
150f8a6b48dSdrochner 	/*
1519639d2bbScgd 	 * Check that the specified range of the device allows the
1529639d2bbScgd 	 * desired protection.
1539639d2bbScgd 	 *
1549639d2bbScgd 	 * XXX assumes VM_PROT_* == PROT_*
1559639d2bbScgd 	 * XXX clobbers off and size, but nothing else here needs them.
1569639d2bbScgd 	 */
1573f844e84Sriastradh 	do {
1583f844e84Sriastradh 		KASSERTMSG((off % PAGE_SIZE) == 0, "off=%jd", (intmax_t)off);
1593f844e84Sriastradh 		KASSERTMSG(size >= PAGE_SIZE, "size=%"PRIuVSIZE, size);
1603f844e84Sriastradh 		if (cdev_mmap(device, off, accessprot) == -1)
1613f844e84Sriastradh 			return NULL;
1623f844e84Sriastradh 		KASSERT(off <= __type_max(voff_t) - PAGE_SIZE ||
1633f844e84Sriastradh 		    (cdev->d_flag & D_NEGOFFSAFE) != 0);
1643f844e84Sriastradh 		if (__predict_false(off > __type_max(voff_t) - PAGE_SIZE)) {
1653f844e84Sriastradh 			/*
1663f844e84Sriastradh 			 * off += PAGE_SIZE, with two's-complement
1673f844e84Sriastradh 			 * wraparound, or
1683f844e84Sriastradh 			 *
1693f844e84Sriastradh 			 *	off += PAGE_SIZE - 2*(VOFF_MAX + 1).
1703f844e84Sriastradh 			 */
171*0fdc6539Sriastradh 			CTASSERT(MIN_PAGE_SIZE >= 2);
1723f844e84Sriastradh 			off -= __type_max(voff_t);
1733f844e84Sriastradh 			off += PAGE_SIZE - 2;
1743f844e84Sriastradh 			off -= __type_max(voff_t);
1753f844e84Sriastradh 		} else {
1763f844e84Sriastradh 			off += PAGE_SIZE;
177de874f61Sad 		}
1783f844e84Sriastradh 		size -= PAGE_SIZE;
1793f844e84Sriastradh 	} while (size != 0);
1809639d2bbScgd 
1819639d2bbScgd 	/*
182f2caacc7Smrg 	 * keep looping until we get it
183f2caacc7Smrg 	 */
184f2caacc7Smrg 
1852ed28d2cSchs 	for (;;) {
186f2caacc7Smrg 
187f2caacc7Smrg 		/*
188f2caacc7Smrg 		 * first, attempt to find it on the main list
189f2caacc7Smrg 		 */
190f2caacc7Smrg 
1914a780c9aSad 		mutex_enter(&udv_lock);
1922ed28d2cSchs 		LIST_FOREACH(lcv, &udv_list, u_list) {
193f2caacc7Smrg 			if (device == lcv->u_device)
194f2caacc7Smrg 				break;
195f2caacc7Smrg 		}
196f2caacc7Smrg 
197f2caacc7Smrg 		/*
198f2caacc7Smrg 		 * got it on main list.  put a hold on it and unlock udv_lock.
199f2caacc7Smrg 		 */
200f2caacc7Smrg 
201f2caacc7Smrg 		if (lcv) {
202f2caacc7Smrg 
203f2caacc7Smrg 			/*
2048106d135Smrg 			 * if someone else has a hold on it, sleep and start
2058106d135Smrg 			 * over again.
206f2caacc7Smrg 			 */
207f2caacc7Smrg 
208f2caacc7Smrg 			if (lcv->u_flags & UVM_DEVICE_HOLD) {
209f2caacc7Smrg 				lcv->u_flags |= UVM_DEVICE_WANTED;
210b3667adaSthorpej 				UVM_UNLOCK_AND_WAIT(lcv, &udv_lock, false,
2118106d135Smrg 				    "udv_attach",0);
212f2caacc7Smrg 				continue;
213f2caacc7Smrg 			}
214f2caacc7Smrg 
2158106d135Smrg 			/* we are now holding it */
2168106d135Smrg 			lcv->u_flags |= UVM_DEVICE_HOLD;
2174a780c9aSad 			mutex_exit(&udv_lock);
218f2caacc7Smrg 
219f2caacc7Smrg 			/*
220f2caacc7Smrg 			 * bump reference count, unhold, return.
221f2caacc7Smrg 			 */
222f2caacc7Smrg 
223d2a0ebb6Sad 			rw_enter(lcv->u_obj.vmobjlock, RW_WRITER);
224f2caacc7Smrg 			lcv->u_obj.uo_refs++;
225d2a0ebb6Sad 			rw_exit(lcv->u_obj.vmobjlock);
226f2caacc7Smrg 
2274a780c9aSad 			mutex_enter(&udv_lock);
228f2caacc7Smrg 			if (lcv->u_flags & UVM_DEVICE_WANTED)
229f2caacc7Smrg 				wakeup(lcv);
230f2caacc7Smrg 			lcv->u_flags &= ~(UVM_DEVICE_WANTED|UVM_DEVICE_HOLD);
2314a780c9aSad 			mutex_exit(&udv_lock);
23258c59e79Sriastradh 			return &lcv->u_obj;
233f2caacc7Smrg 		}
234f2caacc7Smrg 
235f2caacc7Smrg 		/*
236c22a3698Srmind 		 * Did not find it on main list.  Need to allocate a new one.
237f2caacc7Smrg 		 */
238f2caacc7Smrg 
2394a780c9aSad 		mutex_exit(&udv_lock);
240e225b7bdSrmind 
241e225b7bdSrmind 		/* Note: both calls may allocate memory and sleep. */
242e62ee4d4Spara 		udv = kmem_alloc(sizeof(*udv), KM_SLEEP);
243e225b7bdSrmind 		uvm_obj_init(&udv->u_obj, &uvm_deviceops, true, 1);
244e225b7bdSrmind 
2454a780c9aSad 		mutex_enter(&udv_lock);
246f2caacc7Smrg 
247f2caacc7Smrg 		/*
2488106d135Smrg 		 * now we have to double check to make sure no one added it
2498106d135Smrg 		 * to the list while we were sleeping...
250f2caacc7Smrg 		 */
251f2caacc7Smrg 
2522ed28d2cSchs 		LIST_FOREACH(lcv, &udv_list, u_list) {
253f2caacc7Smrg 			if (device == lcv->u_device)
254f2caacc7Smrg 				break;
255f2caacc7Smrg 		}
256f2caacc7Smrg 
257f2caacc7Smrg 		/*
2582ed28d2cSchs 		 * did we lose a race to someone else?
2592ed28d2cSchs 		 * free our memory and retry.
260f2caacc7Smrg 		 */
261f2caacc7Smrg 
262f2caacc7Smrg 		if (lcv) {
2634a780c9aSad 			mutex_exit(&udv_lock);
264e225b7bdSrmind 			uvm_obj_destroy(&udv->u_obj, true);
265e62ee4d4Spara 			kmem_free(udv, sizeof(*udv));
266f2caacc7Smrg 			continue;
267f2caacc7Smrg 		}
268f2caacc7Smrg 
269f2caacc7Smrg 		/*
2708106d135Smrg 		 * we have it!   init the data structures, add to list
2718106d135Smrg 		 * and return.
272f2caacc7Smrg 		 */
273f2caacc7Smrg 
274f2caacc7Smrg 		udv->u_flags = 0;
275f2caacc7Smrg 		udv->u_device = device;
276f2caacc7Smrg 		LIST_INSERT_HEAD(&udv_list, udv, u_list);
2774a780c9aSad 		mutex_exit(&udv_lock);
27858c59e79Sriastradh 		return &udv->u_obj;
2792ed28d2cSchs 	}
280f2caacc7Smrg 	/*NOTREACHED*/
281f2caacc7Smrg }
282f2caacc7Smrg 
283f2caacc7Smrg /*
284f2caacc7Smrg  * udv_reference
285f2caacc7Smrg  *
286f2caacc7Smrg  * add a reference to a VM object.   Note that the reference count must
287f2caacc7Smrg  * already be one (the passed in reference) so there is no chance of the
288f2caacc7Smrg  * udv being released or locked out here.
289f2caacc7Smrg  *
290f2caacc7Smrg  * => caller must call with object unlocked.
291f2caacc7Smrg  */
292f2caacc7Smrg 
2938106d135Smrg static void
udv_reference(struct uvm_object * uobj)294e569faccSthorpej udv_reference(struct uvm_object *uobj)
295f2caacc7Smrg {
296f3bd60e2Sskrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
297f2caacc7Smrg 
298d2a0ebb6Sad 	rw_enter(uobj->vmobjlock, RW_WRITER);
299f2caacc7Smrg 	uobj->uo_refs++;
3004c1762c6Srin 	UVMHIST_LOG(maphist, "<- done (uobj=%#jx, ref = %jd)",
301cb32a134Spgoyette 	    (uintptr_t)uobj, uobj->uo_refs,0,0);
302d2a0ebb6Sad 	rw_exit(uobj->vmobjlock);
303f2caacc7Smrg }
304f2caacc7Smrg 
305f2caacc7Smrg /*
306f2caacc7Smrg  * udv_detach
307f2caacc7Smrg  *
308f2caacc7Smrg  * remove a reference to a VM object.
309f2caacc7Smrg  *
310f2caacc7Smrg  * => caller must call with object unlocked and map locked.
311f2caacc7Smrg  */
312f2caacc7Smrg 
3138106d135Smrg static void
udv_detach(struct uvm_object * uobj)314e569faccSthorpej udv_detach(struct uvm_object *uobj)
315f2caacc7Smrg {
316f2caacc7Smrg 	struct uvm_device *udv = (struct uvm_device *)uobj;
317f3bd60e2Sskrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
318f2caacc7Smrg 
319f2caacc7Smrg 	/*
320f2caacc7Smrg 	 * loop until done
321f2caacc7Smrg 	 */
322d3aef3adSpk again:
323d2a0ebb6Sad 	rw_enter(uobj->vmobjlock, RW_WRITER);
324f2caacc7Smrg 	if (uobj->uo_refs > 1) {
3252ed28d2cSchs 		uobj->uo_refs--;
326d2a0ebb6Sad 		rw_exit(uobj->vmobjlock);
3274c1762c6Srin 		UVMHIST_LOG(maphist," <- done, uobj=%#jx, ref=%jd",
328cb32a134Spgoyette 		    (uintptr_t)uobj,uobj->uo_refs,0,0);
329f2caacc7Smrg 		return;
330f2caacc7Smrg 	}
331f2caacc7Smrg 
332f2caacc7Smrg 	/*
333f2caacc7Smrg 	 * is it being held?   if so, wait until others are done.
334f2caacc7Smrg 	 */
3352ed28d2cSchs 
3364a780c9aSad 	mutex_enter(&udv_lock);
337f2caacc7Smrg 	if (udv->u_flags & UVM_DEVICE_HOLD) {
338f2caacc7Smrg 		udv->u_flags |= UVM_DEVICE_WANTED;
339d2a0ebb6Sad 		rw_exit(uobj->vmobjlock);
340b3667adaSthorpej 		UVM_UNLOCK_AND_WAIT(udv, &udv_lock, false, "udv_detach",0);
341d3aef3adSpk 		goto again;
342f2caacc7Smrg 	}
343f2caacc7Smrg 
344f2caacc7Smrg 	/*
345f2caacc7Smrg 	 * got it!   nuke it now.
346f2caacc7Smrg 	 */
3472ed28d2cSchs 
348f2caacc7Smrg 	LIST_REMOVE(udv, u_list);
349f2caacc7Smrg 	if (udv->u_flags & UVM_DEVICE_WANTED)
350f2caacc7Smrg 		wakeup(udv);
3514a780c9aSad 	mutex_exit(&udv_lock);
352d2a0ebb6Sad 	rw_exit(uobj->vmobjlock);
353e225b7bdSrmind 
354e225b7bdSrmind 	uvm_obj_destroy(uobj, true);
355e62ee4d4Spara 	kmem_free(udv, sizeof(*udv));
3564c1762c6Srin 	UVMHIST_LOG(maphist," <- done, freed uobj=%#jx", (uintptr_t)uobj,
357cb32a134Spgoyette 	    0, 0, 0);
358f2caacc7Smrg }
359f2caacc7Smrg 
360f2caacc7Smrg /*
361f2caacc7Smrg  * udv_fault: non-standard fault routine for device "pages"
362f2caacc7Smrg  *
363f2caacc7Smrg  * => rather than having a "get" function, we have a fault routine
364f2caacc7Smrg  *	since we don't return vm_pages we need full control over the
365f2caacc7Smrg  *	pmap_enter map in
366f2caacc7Smrg  * => all the usual fault data structured are locked by the caller
367f2caacc7Smrg  *	(i.e. maps(read), amap (if any), uobj)
368f2caacc7Smrg  * => on return, we unlock all fault data structures
369f2caacc7Smrg  * => flags: PGO_ALLPAGES: get all of the pages
370f2caacc7Smrg  *	     PGO_LOCKED: fault data structures are locked
371f2caacc7Smrg  *    XXX: currently PGO_LOCKED is always required ... consider removing
372f2caacc7Smrg  *	it as a flag
373f2caacc7Smrg  * => NOTE: vaddr is the VA of pps[0] in ufi->entry, _NOT_ pps[centeridx]
374f2caacc7Smrg  */
375f2caacc7Smrg 
3768106d135Smrg static int
udv_fault(struct uvm_faultinfo * ufi,vaddr_t vaddr,struct vm_page ** pps,int npages,int centeridx,vm_prot_t access_type,int flags)377e569faccSthorpej udv_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, struct vm_page **pps,
3782d1a0b57Sdrochner     int npages, int centeridx, vm_prot_t access_type,
379e569faccSthorpej     int flags)
380f2caacc7Smrg {
381f2caacc7Smrg 	struct vm_map_entry *entry = ufi->entry;
3827cb9f7e5Schs 	struct uvm_object *uobj = entry->object.uvm_obj;
3837cb9f7e5Schs 	struct uvm_device *udv = (struct uvm_device *)uobj;
3846e5b64c8Skleink 	vaddr_t curr_va;
385eeff58b5Ssimonb 	off_t curr_offset;
386889c658bSsimonb 	paddr_t paddr, mdpgno;
387ee655184Sjmcneill 	u_int mmapflags;
388889c658bSsimonb 	int lcv, retval;
389f2caacc7Smrg 	dev_t device;
3900f2e70dfSross 	vm_prot_t mapprot;
391f3bd60e2Sskrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
392e9de1129Sskrll 	UVMHIST_LOG(maphist,"  flags=%#jx", flags,0,0,0);
393f2caacc7Smrg 
394f2caacc7Smrg 	/*
395f2caacc7Smrg 	 * we do not allow device mappings to be mapped copy-on-write
396f2caacc7Smrg 	 * so we kill any attempt to do so here.
397f2caacc7Smrg 	 */
398f2caacc7Smrg 
399f2caacc7Smrg 	if (UVM_ET_ISCOPYONWRITE(entry)) {
4004c1762c6Srin 		UVMHIST_LOG(maphist, "<- failed -- COW entry (etype=%#jx)",
401f2caacc7Smrg 		    entry->etype, 0,0,0);
402e225b7bdSrmind 		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
40358c59e79Sriastradh 		return EIO;
404f2caacc7Smrg 	}
405f2caacc7Smrg 
406f2caacc7Smrg 	/*
4077cb9f7e5Schs 	 * get device map function.
408f2caacc7Smrg 	 */
4092ed28d2cSchs 
410f2caacc7Smrg 	device = udv->u_device;
411de874f61Sad 	if (cdevsw_lookup(device) == NULL) {
412de874f61Sad 		/* XXX This should not happen */
413e225b7bdSrmind 		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
41458c59e79Sriastradh 		return EIO;
41577a6b82bSgehenna 	}
416f2caacc7Smrg 
417f2caacc7Smrg 	/*
4188ffef382Schuck 	 * now we must determine the offset in udv to use and the VA to
4198ffef382Schuck 	 * use for pmap_enter.  note that we always use orig_map's pmap
4208ffef382Schuck 	 * for pmap_enter (even if we have a submap).   since virtual
4218ffef382Schuck 	 * addresses in a submap must match the main map, this is ok.
422f2caacc7Smrg 	 */
4232ed28d2cSchs 
424f2caacc7Smrg 	/* udv offset = (offset from start of entry) + entry's offset */
425eeff58b5Ssimonb 	curr_offset = entry->offset + (vaddr - entry->start);
4268ffef382Schuck 	/* pmap va = vaddr (virtual address of pps[0]) */
4278ffef382Schuck 	curr_va = vaddr;
428f2caacc7Smrg 
429f2caacc7Smrg 	/*
430f2caacc7Smrg 	 * loop over the page range entering in as needed
431f2caacc7Smrg 	 */
432f2caacc7Smrg 
433dd82ad8eSchs 	retval = 0;
4348106d135Smrg 	for (lcv = 0 ; lcv < npages ; lcv++, curr_offset += PAGE_SIZE,
4358106d135Smrg 	    curr_va += PAGE_SIZE) {
436f2caacc7Smrg 		if ((flags & PGO_ALLPAGES) == 0 && lcv != centeridx)
437f2caacc7Smrg 			continue;
438f2caacc7Smrg 
439f2caacc7Smrg 		if (pps[lcv] == PGO_DONTCARE)
440f2caacc7Smrg 			continue;
441f2caacc7Smrg 
442de874f61Sad 		mdpgno = cdev_mmap(device, curr_offset, access_type);
443d39ed4e5Smrg 		if (mdpgno == -1) {
444dd82ad8eSchs 			retval = EIO;
445f2caacc7Smrg 			break;
446f2caacc7Smrg 		}
447d39ed4e5Smrg 		paddr = pmap_phys_address(mdpgno);
448ee655184Sjmcneill 		mmapflags = pmap_mmap_flags(mdpgno);
4490f2e70dfSross 		mapprot = ufi->entry->protection;
4508106d135Smrg 		UVMHIST_LOG(maphist,
4514c1762c6Srin 		    "  MAPPING: device: pm=%#jx, va=%#jx, pa=%#jx, at=%jd",
452cb32a134Spgoyette 		    (uintptr_t)ufi->orig_map->pmap, curr_va, paddr, mapprot);
453ee655184Sjmcneill 		if (pmap_enter(ufi->orig_map->pmap, curr_va, paddr, mapprot,
454ee655184Sjmcneill 		    PMAP_CANFAIL | mapprot | mmapflags) != 0) {
4551da427a8Sthorpej 			/*
4561da427a8Sthorpej 			 * pmap_enter() didn't have the resource to
4571da427a8Sthorpej 			 * enter this mapping.  Unlock everything,
4581da427a8Sthorpej 			 * wait for the pagedaemon to free up some
4591da427a8Sthorpej 			 * pages, and then tell uvm_fault() to start
4601da427a8Sthorpej 			 * the fault again.
4611da427a8Sthorpej 			 *
4621da427a8Sthorpej 			 * XXX Needs some rethinking for the PGO_ALLPAGES
4631da427a8Sthorpej 			 * XXX case.
4641da427a8Sthorpej 			 */
4654f702c0cSad 			pmap_update(ufi->orig_map->pmap);	/* sync what we have so far */
4661da427a8Sthorpej 			uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap,
467e225b7bdSrmind 			    uobj);
468d80ed595Schs 			return ENOMEM;
4691da427a8Sthorpej 		}
470f2caacc7Smrg 	}
471f2caacc7Smrg 
4720e7661f0Schris 	pmap_update(ufi->orig_map->pmap);
473e225b7bdSrmind 	uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
47458c59e79Sriastradh 	return retval;
475f2caacc7Smrg }
476