xref: /netbsd-src/sys/uvm/uvm_device.c (revision 946379e7b37692fc43f68eb0d1c10daa0a7f3b6c)
1 /*	$NetBSD: uvm_device.c,v 1.64 2014/12/14 23:48:58 chs Exp $	*/
2 
3 /*
4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  *
27  * from: Id: uvm_device.c,v 1.1.2.9 1998/02/06 05:11:47 chs Exp
28  */
29 
30 /*
31  * uvm_device.c: the device pager.
32  */
33 
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: uvm_device.c,v 1.64 2014/12/14 23:48:58 chs Exp $");
36 
37 #include "opt_uvmhist.h"
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/conf.h>
42 #include <sys/proc.h>
43 #include <sys/kmem.h>
44 
45 #include <uvm/uvm.h>
46 #include <uvm/uvm_device.h>
47 #include <uvm/uvm_pmap.h>
48 
49 /*
50  * private global data structure
51  *
52  * we keep a list of active device objects in the system.
53  */
54 
55 LIST_HEAD(udv_list_struct, uvm_device);
56 static struct udv_list_struct udv_list;
57 static kmutex_t udv_lock;
58 
59 /*
60  * functions
61  */
62 
63 static void	udv_init(void);
64 static void	udv_reference(struct uvm_object *);
65 static void	udv_detach(struct uvm_object *);
66 static int	udv_fault(struct uvm_faultinfo *, vaddr_t,
67 			  struct vm_page **, int, int, vm_prot_t,
68 			  int);
69 
70 /*
71  * master pager structure
72  */
73 
74 const struct uvm_pagerops uvm_deviceops = {
75 	.pgo_init = udv_init,
76 	.pgo_reference = udv_reference,
77 	.pgo_detach = udv_detach,
78 	.pgo_fault = udv_fault,
79 };
80 
81 /*
82  * the ops!
83  */
84 
85 /*
86  * udv_init
87  *
88  * init pager private data structures.
89  */
90 
91 static void
92 udv_init(void)
93 {
94 	LIST_INIT(&udv_list);
95 	mutex_init(&udv_lock, MUTEX_DEFAULT, IPL_NONE);
96 }
97 
98 /*
99  * udv_attach
100  *
101  * get a VM object that is associated with a device.   allocate a new
102  * one if needed.
103  *
104  * => caller must _not_ already be holding the lock on the uvm_object.
105  * => in fact, nothing should be locked so that we can sleep here.
106  */
107 
108 struct uvm_object *
109 udv_attach(dev_t device, vm_prot_t accessprot,
110     voff_t off,		/* used only for access check */
111     vsize_t size	/* used only for access check */)
112 {
113 	struct uvm_device *udv, *lcv;
114 	const struct cdevsw *cdev;
115 	dev_type_mmap((*mapfn));
116 
117 	UVMHIST_FUNC("udv_attach"); UVMHIST_CALLED(maphist);
118 
119 	UVMHIST_LOG(maphist, "(device=0x%x)", device,0,0,0);
120 
121 	/*
122 	 * before we do anything, ensure this device supports mmap
123 	 */
124 
125 	cdev = cdevsw_lookup(device);
126 	if (cdev == NULL) {
127 		return (NULL);
128 	}
129 	mapfn = cdev->d_mmap;
130 	if (mapfn == NULL || mapfn == nommap || mapfn == nullmmap) {
131 		return(NULL);
132 	}
133 
134 	/*
135 	 * Negative offsets on the object are not allowed.
136 	 */
137 
138 	if ((cdev->d_flag & D_NEGOFFSAFE) == 0 &&
139 	    off != UVM_UNKNOWN_OFFSET && off < 0)
140 		return(NULL);
141 
142 	/*
143 	 * Check that the specified range of the device allows the
144 	 * desired protection.
145 	 *
146 	 * XXX assumes VM_PROT_* == PROT_*
147 	 * XXX clobbers off and size, but nothing else here needs them.
148 	 */
149 
150 	while (size != 0) {
151 		if (cdev_mmap(device, off, accessprot) == -1) {
152 			return (NULL);
153 		}
154 		off += PAGE_SIZE; size -= PAGE_SIZE;
155 	}
156 
157 	/*
158 	 * keep looping until we get it
159 	 */
160 
161 	for (;;) {
162 
163 		/*
164 		 * first, attempt to find it on the main list
165 		 */
166 
167 		mutex_enter(&udv_lock);
168 		LIST_FOREACH(lcv, &udv_list, u_list) {
169 			if (device == lcv->u_device)
170 				break;
171 		}
172 
173 		/*
174 		 * got it on main list.  put a hold on it and unlock udv_lock.
175 		 */
176 
177 		if (lcv) {
178 
179 			/*
180 			 * if someone else has a hold on it, sleep and start
181 			 * over again.
182 			 */
183 
184 			if (lcv->u_flags & UVM_DEVICE_HOLD) {
185 				lcv->u_flags |= UVM_DEVICE_WANTED;
186 				UVM_UNLOCK_AND_WAIT(lcv, &udv_lock, false,
187 				    "udv_attach",0);
188 				continue;
189 			}
190 
191 			/* we are now holding it */
192 			lcv->u_flags |= UVM_DEVICE_HOLD;
193 			mutex_exit(&udv_lock);
194 
195 			/*
196 			 * bump reference count, unhold, return.
197 			 */
198 
199 			mutex_enter(lcv->u_obj.vmobjlock);
200 			lcv->u_obj.uo_refs++;
201 			mutex_exit(lcv->u_obj.vmobjlock);
202 
203 			mutex_enter(&udv_lock);
204 			if (lcv->u_flags & UVM_DEVICE_WANTED)
205 				wakeup(lcv);
206 			lcv->u_flags &= ~(UVM_DEVICE_WANTED|UVM_DEVICE_HOLD);
207 			mutex_exit(&udv_lock);
208 			return(&lcv->u_obj);
209 		}
210 
211 		/*
212 		 * Did not find it on main list.  Need to allocate a new one.
213 		 */
214 
215 		mutex_exit(&udv_lock);
216 
217 		/* Note: both calls may allocate memory and sleep. */
218 		udv = kmem_alloc(sizeof(*udv), KM_SLEEP);
219 		uvm_obj_init(&udv->u_obj, &uvm_deviceops, true, 1);
220 
221 		mutex_enter(&udv_lock);
222 
223 		/*
224 		 * now we have to double check to make sure no one added it
225 		 * to the list while we were sleeping...
226 		 */
227 
228 		LIST_FOREACH(lcv, &udv_list, u_list) {
229 			if (device == lcv->u_device)
230 				break;
231 		}
232 
233 		/*
234 		 * did we lose a race to someone else?
235 		 * free our memory and retry.
236 		 */
237 
238 		if (lcv) {
239 			mutex_exit(&udv_lock);
240 			uvm_obj_destroy(&udv->u_obj, true);
241 			kmem_free(udv, sizeof(*udv));
242 			continue;
243 		}
244 
245 		/*
246 		 * we have it!   init the data structures, add to list
247 		 * and return.
248 		 */
249 
250 		udv->u_flags = 0;
251 		udv->u_device = device;
252 		LIST_INSERT_HEAD(&udv_list, udv, u_list);
253 		mutex_exit(&udv_lock);
254 		return(&udv->u_obj);
255 	}
256 	/*NOTREACHED*/
257 }
258 
259 /*
260  * udv_reference
261  *
262  * add a reference to a VM object.   Note that the reference count must
263  * already be one (the passed in reference) so there is no chance of the
264  * udv being released or locked out here.
265  *
266  * => caller must call with object unlocked.
267  */
268 
269 static void
270 udv_reference(struct uvm_object *uobj)
271 {
272 	UVMHIST_FUNC("udv_reference"); UVMHIST_CALLED(maphist);
273 
274 	mutex_enter(uobj->vmobjlock);
275 	uobj->uo_refs++;
276 	UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)",
277 		    uobj, uobj->uo_refs,0,0);
278 	mutex_exit(uobj->vmobjlock);
279 }
280 
281 /*
282  * udv_detach
283  *
284  * remove a reference to a VM object.
285  *
286  * => caller must call with object unlocked and map locked.
287  */
288 
289 static void
290 udv_detach(struct uvm_object *uobj)
291 {
292 	struct uvm_device *udv = (struct uvm_device *)uobj;
293 	UVMHIST_FUNC("udv_detach"); UVMHIST_CALLED(maphist);
294 
295 	/*
296 	 * loop until done
297 	 */
298 again:
299 	mutex_enter(uobj->vmobjlock);
300 	if (uobj->uo_refs > 1) {
301 		uobj->uo_refs--;
302 		mutex_exit(uobj->vmobjlock);
303 		UVMHIST_LOG(maphist," <- done, uobj=0x%x, ref=%d",
304 			  uobj,uobj->uo_refs,0,0);
305 		return;
306 	}
307 
308 	/*
309 	 * is it being held?   if so, wait until others are done.
310 	 */
311 
312 	mutex_enter(&udv_lock);
313 	if (udv->u_flags & UVM_DEVICE_HOLD) {
314 		udv->u_flags |= UVM_DEVICE_WANTED;
315 		mutex_exit(uobj->vmobjlock);
316 		UVM_UNLOCK_AND_WAIT(udv, &udv_lock, false, "udv_detach",0);
317 		goto again;
318 	}
319 
320 	/*
321 	 * got it!   nuke it now.
322 	 */
323 
324 	LIST_REMOVE(udv, u_list);
325 	if (udv->u_flags & UVM_DEVICE_WANTED)
326 		wakeup(udv);
327 	mutex_exit(&udv_lock);
328 	mutex_exit(uobj->vmobjlock);
329 
330 	uvm_obj_destroy(uobj, true);
331 	kmem_free(udv, sizeof(*udv));
332 	UVMHIST_LOG(maphist," <- done, freed uobj=0x%x", uobj,0,0,0);
333 }
334 
335 /*
336  * udv_fault: non-standard fault routine for device "pages"
337  *
338  * => rather than having a "get" function, we have a fault routine
339  *	since we don't return vm_pages we need full control over the
340  *	pmap_enter map in
341  * => all the usual fault data structured are locked by the caller
342  *	(i.e. maps(read), amap (if any), uobj)
343  * => on return, we unlock all fault data structures
344  * => flags: PGO_ALLPAGES: get all of the pages
345  *	     PGO_LOCKED: fault data structures are locked
346  *    XXX: currently PGO_LOCKED is always required ... consider removing
347  *	it as a flag
348  * => NOTE: vaddr is the VA of pps[0] in ufi->entry, _NOT_ pps[centeridx]
349  */
350 
351 static int
352 udv_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, struct vm_page **pps,
353     int npages, int centeridx, vm_prot_t access_type,
354     int flags)
355 {
356 	struct vm_map_entry *entry = ufi->entry;
357 	struct uvm_object *uobj = entry->object.uvm_obj;
358 	struct uvm_device *udv = (struct uvm_device *)uobj;
359 	vaddr_t curr_va;
360 	off_t curr_offset;
361 	paddr_t paddr, mdpgno;
362 	u_int mmapflags;
363 	int lcv, retval;
364 	dev_t device;
365 	vm_prot_t mapprot;
366 	UVMHIST_FUNC("udv_fault"); UVMHIST_CALLED(maphist);
367 	UVMHIST_LOG(maphist,"  flags=%d", flags,0,0,0);
368 
369 	/*
370 	 * we do not allow device mappings to be mapped copy-on-write
371 	 * so we kill any attempt to do so here.
372 	 */
373 
374 	if (UVM_ET_ISCOPYONWRITE(entry)) {
375 		UVMHIST_LOG(maphist, "<- failed -- COW entry (etype=0x%x)",
376 		entry->etype, 0,0,0);
377 		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
378 		return(EIO);
379 	}
380 
381 	/*
382 	 * get device map function.
383 	 */
384 
385 	device = udv->u_device;
386 	if (cdevsw_lookup(device) == NULL) {
387 		/* XXX This should not happen */
388 		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
389 		return (EIO);
390 	}
391 
392 	/*
393 	 * now we must determine the offset in udv to use and the VA to
394 	 * use for pmap_enter.  note that we always use orig_map's pmap
395 	 * for pmap_enter (even if we have a submap).   since virtual
396 	 * addresses in a submap must match the main map, this is ok.
397 	 */
398 
399 	/* udv offset = (offset from start of entry) + entry's offset */
400 	curr_offset = entry->offset + (vaddr - entry->start);
401 	/* pmap va = vaddr (virtual address of pps[0]) */
402 	curr_va = vaddr;
403 
404 	/*
405 	 * loop over the page range entering in as needed
406 	 */
407 
408 	retval = 0;
409 	for (lcv = 0 ; lcv < npages ; lcv++, curr_offset += PAGE_SIZE,
410 	    curr_va += PAGE_SIZE) {
411 		if ((flags & PGO_ALLPAGES) == 0 && lcv != centeridx)
412 			continue;
413 
414 		if (pps[lcv] == PGO_DONTCARE)
415 			continue;
416 
417 		mdpgno = cdev_mmap(device, curr_offset, access_type);
418 		if (mdpgno == -1) {
419 			retval = EIO;
420 			break;
421 		}
422 		paddr = pmap_phys_address(mdpgno);
423 		mmapflags = pmap_mmap_flags(mdpgno);
424 		mapprot = ufi->entry->protection;
425 		UVMHIST_LOG(maphist,
426 		    "  MAPPING: device: pm=0x%x, va=0x%x, pa=0x%lx, at=%d",
427 		    ufi->orig_map->pmap, curr_va, paddr, mapprot);
428 		if (pmap_enter(ufi->orig_map->pmap, curr_va, paddr, mapprot,
429 		    PMAP_CANFAIL | mapprot | mmapflags) != 0) {
430 			/*
431 			 * pmap_enter() didn't have the resource to
432 			 * enter this mapping.  Unlock everything,
433 			 * wait for the pagedaemon to free up some
434 			 * pages, and then tell uvm_fault() to start
435 			 * the fault again.
436 			 *
437 			 * XXX Needs some rethinking for the PGO_ALLPAGES
438 			 * XXX case.
439 			 */
440 			pmap_update(ufi->orig_map->pmap);	/* sync what we have so far */
441 			uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap,
442 			    uobj);
443 			uvm_wait("udv_fault");
444 			return (ERESTART);
445 		}
446 	}
447 
448 	pmap_update(ufi->orig_map->pmap);
449 	uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
450 	return (retval);
451 }
452