xref: /netbsd-src/sys/uvm/uvm_bio.c (revision 6a493d6bc668897c91594964a732d38505b70cbb)
1 /*	$NetBSD: uvm_bio.c,v 1.80 2013/10/25 20:23:33 martin Exp $	*/
2 
3 /*
4  * Copyright (c) 1998 Chuck Silvers.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The name of the author may not be used to endorse or promote products
16  *    derived from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  */
31 
32 /*
33  * uvm_bio.c: buffered i/o object mapping cache
34  */
35 
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.80 2013/10/25 20:23:33 martin Exp $");
38 
39 #include "opt_uvmhist.h"
40 #include "opt_ubc.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kmem.h>
45 #include <sys/kernel.h>
46 #include <sys/proc.h>
47 #include <sys/vnode.h>
48 
49 #include <uvm/uvm.h>
50 
51 /*
52  * global data structures
53  */
54 
55 /*
56  * local functions
57  */
58 
59 static int	ubc_fault(struct uvm_faultinfo *, vaddr_t, struct vm_page **,
60 			  int, int, vm_prot_t, int);
61 static struct ubc_map *ubc_find_mapping(struct uvm_object *, voff_t);
62 
63 /*
64  * local data structues
65  */
66 
67 #define UBC_HASH(uobj, offset) 						\
68 	(((((u_long)(uobj)) >> 8) + (((u_long)(offset)) >> PAGE_SHIFT)) & \
69 				ubc_object.hashmask)
70 
71 #define UBC_QUEUE(offset)						\
72 	(&ubc_object.inactive[(((u_long)(offset)) >> ubc_winshift) &	\
73 			     (UBC_NQUEUES - 1)])
74 
75 #define UBC_UMAP_ADDR(u)						\
76 	(vaddr_t)(ubc_object.kva + (((u) - ubc_object.umap) << ubc_winshift))
77 
78 
79 #define UMAP_PAGES_LOCKED	0x0001
80 #define UMAP_MAPPING_CACHED	0x0002
81 
82 struct ubc_map {
83 	struct uvm_object *	uobj;		/* mapped object */
84 	voff_t			offset;		/* offset into uobj */
85 	voff_t			writeoff;	/* write offset */
86 	vsize_t			writelen;	/* write len */
87 	int			refcount;	/* refcount on mapping */
88 	int			flags;		/* extra state */
89 	int			advice;
90 
91 	LIST_ENTRY(ubc_map)	hash;		/* hash table */
92 	TAILQ_ENTRY(ubc_map)	inactive;	/* inactive queue */
93 	LIST_ENTRY(ubc_map)	list;		/* per-object list */
94 };
95 
96 static struct ubc_object {
97 	struct uvm_object uobj;		/* glue for uvm_map() */
98 	char *kva;			/* where ubc_object is mapped */
99 	struct ubc_map *umap;		/* array of ubc_map's */
100 
101 	LIST_HEAD(, ubc_map) *hash;	/* hashtable for cached ubc_map's */
102 	u_long hashmask;		/* mask for hashtable */
103 
104 	TAILQ_HEAD(ubc_inactive_head, ubc_map) *inactive;
105 					/* inactive queues for ubc_map's */
106 } ubc_object;
107 
108 const struct uvm_pagerops ubc_pager = {
109 	.pgo_fault = ubc_fault,
110 	/* ... rest are NULL */
111 };
112 
113 int ubc_nwins = UBC_NWINS;
114 int ubc_winshift = UBC_WINSHIFT;
115 int ubc_winsize;
116 #if defined(PMAP_PREFER)
117 int ubc_nqueues;
118 #define UBC_NQUEUES ubc_nqueues
119 #else
120 #define UBC_NQUEUES 1
121 #endif
122 
123 #if defined(UBC_STATS)
124 
125 #define	UBC_EVCNT_DEFINE(name) \
126 struct evcnt ubc_evcnt_##name = \
127 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "ubc", #name); \
128 EVCNT_ATTACH_STATIC(ubc_evcnt_##name);
129 #define	UBC_EVCNT_INCR(name) ubc_evcnt_##name.ev_count++
130 
131 #else /* defined(UBC_STATS) */
132 
133 #define	UBC_EVCNT_DEFINE(name)	/* nothing */
134 #define	UBC_EVCNT_INCR(name)	/* nothing */
135 
136 #endif /* defined(UBC_STATS) */
137 
138 UBC_EVCNT_DEFINE(wincachehit)
139 UBC_EVCNT_DEFINE(wincachemiss)
140 UBC_EVCNT_DEFINE(faultbusy)
141 
142 /*
143  * ubc_init
144  *
145  * init pager private data structures.
146  */
147 
148 void
149 ubc_init(void)
150 {
151 	struct ubc_map *umap;
152 	vaddr_t va;
153 	int i;
154 
155 	/*
156 	 * Make sure ubc_winshift is sane.
157 	 */
158 	if (ubc_winshift < PAGE_SHIFT)
159 		ubc_winshift = PAGE_SHIFT;
160 
161 	/*
162 	 * init ubc_object.
163 	 * alloc and init ubc_map's.
164 	 * init inactive queues.
165 	 * alloc and init hashtable.
166 	 * map in ubc_object.
167 	 */
168 
169 	uvm_obj_init(&ubc_object.uobj, &ubc_pager, true, UVM_OBJ_KERN);
170 
171 	ubc_object.umap = kmem_zalloc(ubc_nwins * sizeof(struct ubc_map),
172 	    KM_SLEEP);
173 	if (ubc_object.umap == NULL)
174 		panic("ubc_init: failed to allocate ubc_map");
175 
176 	if (ubc_winshift < PAGE_SHIFT) {
177 		ubc_winshift = PAGE_SHIFT;
178 	}
179 	va = (vaddr_t)1L;
180 #ifdef PMAP_PREFER
181 	PMAP_PREFER(0, &va, 0, 0);	/* kernel is never topdown */
182 	ubc_nqueues = va >> ubc_winshift;
183 	if (ubc_nqueues == 0) {
184 		ubc_nqueues = 1;
185 	}
186 #endif
187 	ubc_winsize = 1 << ubc_winshift;
188 	ubc_object.inactive = kmem_alloc(UBC_NQUEUES *
189 	    sizeof(struct ubc_inactive_head), KM_SLEEP);
190 	if (ubc_object.inactive == NULL)
191 		panic("ubc_init: failed to allocate inactive queue heads");
192 	for (i = 0; i < UBC_NQUEUES; i++) {
193 		TAILQ_INIT(&ubc_object.inactive[i]);
194 	}
195 	for (i = 0; i < ubc_nwins; i++) {
196 		umap = &ubc_object.umap[i];
197 		TAILQ_INSERT_TAIL(&ubc_object.inactive[i & (UBC_NQUEUES - 1)],
198 				  umap, inactive);
199 	}
200 
201 	ubc_object.hash = hashinit(ubc_nwins, HASH_LIST, true,
202 	    &ubc_object.hashmask);
203 	for (i = 0; i <= ubc_object.hashmask; i++) {
204 		LIST_INIT(&ubc_object.hash[i]);
205 	}
206 
207 	if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva,
208 		    ubc_nwins << ubc_winshift, &ubc_object.uobj, 0, (vsize_t)va,
209 		    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
210 				UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) {
211 		panic("ubc_init: failed to map ubc_object");
212 	}
213 	UVMHIST_INIT(ubchist, 300);
214 }
215 
216 /*
217  * ubc_fault_page: helper of ubc_fault to handle a single page.
218  *
219  * => Caller has UVM object locked.
220  * => Caller will perform pmap_update().
221  */
222 
223 static inline int
224 ubc_fault_page(const struct uvm_faultinfo *ufi, const struct ubc_map *umap,
225     struct vm_page *pg, vm_prot_t prot, vm_prot_t access_type, vaddr_t va)
226 {
227 	struct uvm_object *uobj;
228 	vm_prot_t mask;
229 	int error;
230 	bool rdonly;
231 
232 	uobj = pg->uobject;
233 	KASSERT(mutex_owned(uobj->vmobjlock));
234 
235 	if (pg->flags & PG_WANTED) {
236 		wakeup(pg);
237 	}
238 	KASSERT((pg->flags & PG_FAKE) == 0);
239 	if (pg->flags & PG_RELEASED) {
240 		mutex_enter(&uvm_pageqlock);
241 		uvm_pagefree(pg);
242 		mutex_exit(&uvm_pageqlock);
243 		return 0;
244 	}
245 	if (pg->loan_count != 0) {
246 
247 		/*
248 		 * Avoid unneeded loan break, if possible.
249 		 */
250 
251 		if ((access_type & VM_PROT_WRITE) == 0) {
252 			prot &= ~VM_PROT_WRITE;
253 		}
254 		if (prot & VM_PROT_WRITE) {
255 			struct vm_page *newpg;
256 
257 			newpg = uvm_loanbreak(pg);
258 			if (newpg == NULL) {
259 				uvm_page_unbusy(&pg, 1);
260 				return ENOMEM;
261 			}
262 			pg = newpg;
263 		}
264 	}
265 
266 	/*
267 	 * Note that a page whose backing store is partially allocated
268 	 * is marked as PG_RDONLY.
269 	 */
270 
271 	KASSERT((pg->flags & PG_RDONLY) == 0 ||
272 	    (access_type & VM_PROT_WRITE) == 0 ||
273 	    pg->offset < umap->writeoff ||
274 	    pg->offset + PAGE_SIZE > umap->writeoff + umap->writelen);
275 
276 	rdonly = ((access_type & VM_PROT_WRITE) == 0 &&
277 	    (pg->flags & PG_RDONLY) != 0) ||
278 	    UVM_OBJ_NEEDS_WRITEFAULT(uobj);
279 	mask = rdonly ? ~VM_PROT_WRITE : VM_PROT_ALL;
280 
281 	error = pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg),
282 	    prot & mask, PMAP_CANFAIL | (access_type & mask));
283 
284 	mutex_enter(&uvm_pageqlock);
285 	uvm_pageactivate(pg);
286 	mutex_exit(&uvm_pageqlock);
287 	pg->flags &= ~(PG_BUSY|PG_WANTED);
288 	UVM_PAGE_OWN(pg, NULL);
289 
290 	return error;
291 }
292 
293 /*
294  * ubc_fault: fault routine for ubc mapping
295  */
296 
297 static int
298 ubc_fault(struct uvm_faultinfo *ufi, vaddr_t ign1, struct vm_page **ign2,
299     int ign3, int ign4, vm_prot_t access_type, int flags)
300 {
301 	struct uvm_object *uobj;
302 	struct ubc_map *umap;
303 	vaddr_t va, eva, ubc_offset, slot_offset;
304 	struct vm_page *pgs[ubc_winsize >> PAGE_SHIFT];
305 	int i, error, npages;
306 	vm_prot_t prot;
307 
308 	UVMHIST_FUNC("ubc_fault"); UVMHIST_CALLED(ubchist);
309 
310 	/*
311 	 * no need to try with PGO_LOCKED...
312 	 * we don't need to have the map locked since we know that
313 	 * no one will mess with it until our reference is released.
314 	 */
315 
316 	if (flags & PGO_LOCKED) {
317 		uvmfault_unlockall(ufi, NULL, &ubc_object.uobj);
318 		flags &= ~PGO_LOCKED;
319 	}
320 
321 	va = ufi->orig_rvaddr;
322 	ubc_offset = va - (vaddr_t)ubc_object.kva;
323 	umap = &ubc_object.umap[ubc_offset >> ubc_winshift];
324 	KASSERT(umap->refcount != 0);
325 	KASSERT((umap->flags & UMAP_PAGES_LOCKED) == 0);
326 	slot_offset = ubc_offset & (ubc_winsize - 1);
327 
328 	/*
329 	 * some platforms cannot write to individual bytes atomically, so
330 	 * software has to do read/modify/write of larger quantities instead.
331 	 * this means that the access_type for "write" operations
332 	 * can be VM_PROT_READ, which confuses us mightily.
333 	 *
334 	 * deal with this by resetting access_type based on the info
335 	 * that ubc_alloc() stores for us.
336 	 */
337 
338 	access_type = umap->writelen ? VM_PROT_WRITE : VM_PROT_READ;
339 	UVMHIST_LOG(ubchist, "va 0x%lx ubc_offset 0x%lx access_type %d",
340 	    va, ubc_offset, access_type, 0);
341 
342 #ifdef DIAGNOSTIC
343 	if ((access_type & VM_PROT_WRITE) != 0) {
344 		if (slot_offset < trunc_page(umap->writeoff) ||
345 		    umap->writeoff + umap->writelen <= slot_offset) {
346 			panic("ubc_fault: out of range write");
347 		}
348 	}
349 #endif
350 
351 	/* no umap locking needed since we have a ref on the umap */
352 	uobj = umap->uobj;
353 
354 	if ((access_type & VM_PROT_WRITE) == 0) {
355 		npages = (ubc_winsize - slot_offset) >> PAGE_SHIFT;
356 	} else {
357 		npages = (round_page(umap->offset + umap->writeoff +
358 		    umap->writelen) - (umap->offset + slot_offset))
359 		    >> PAGE_SHIFT;
360 		flags |= PGO_PASTEOF;
361 	}
362 
363 again:
364 	memset(pgs, 0, sizeof (pgs));
365 	mutex_enter(uobj->vmobjlock);
366 
367 	UVMHIST_LOG(ubchist, "slot_offset 0x%x writeoff 0x%x writelen 0x%x ",
368 	    slot_offset, umap->writeoff, umap->writelen, 0);
369 	UVMHIST_LOG(ubchist, "getpages uobj %p offset 0x%x npages %d",
370 	    uobj, umap->offset + slot_offset, npages, 0);
371 
372 	error = (*uobj->pgops->pgo_get)(uobj, umap->offset + slot_offset, pgs,
373 	    &npages, 0, access_type, umap->advice, flags | PGO_NOBLOCKALLOC |
374 	    PGO_NOTIMESTAMP);
375 	UVMHIST_LOG(ubchist, "getpages error %d npages %d", error, npages, 0,
376 	    0);
377 
378 	if (error == EAGAIN) {
379 		kpause("ubc_fault", false, hz >> 2, NULL);
380 		goto again;
381 	}
382 	if (error) {
383 		return error;
384 	}
385 
386 	/*
387 	 * For virtually-indexed, virtually-tagged caches we should avoid
388 	 * creating writable mappings when we do not absolutely need them,
389 	 * since the "compatible alias" trick does not work on such caches.
390 	 * Otherwise, we can always map the pages writable.
391 	 */
392 
393 #ifdef PMAP_CACHE_VIVT
394 	prot = VM_PROT_READ | access_type;
395 #else
396 	prot = VM_PROT_READ | VM_PROT_WRITE;
397 #endif
398 
399 	va = ufi->orig_rvaddr;
400 	eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT);
401 
402 	UVMHIST_LOG(ubchist, "va 0x%lx eva 0x%lx", va, eva, 0, 0);
403 
404 	/*
405 	 * Note: normally all returned pages would have the same UVM object.
406 	 * However, layered file-systems and e.g. tmpfs, may return pages
407 	 * which belong to underlying UVM object.  In such case, lock is
408 	 * shared amongst the objects.
409 	 */
410 	mutex_enter(uobj->vmobjlock);
411 	for (i = 0; va < eva; i++, va += PAGE_SIZE) {
412 		struct vm_page *pg;
413 
414 		UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i], 0, 0);
415 		pg = pgs[i];
416 
417 		if (pg == NULL || pg == PGO_DONTCARE) {
418 			continue;
419 		}
420 		KASSERT(uobj->vmobjlock == pg->uobject->vmobjlock);
421 		error = ubc_fault_page(ufi, umap, pg, prot, access_type, va);
422 		if (error) {
423 			/*
424 			 * Flush (there might be pages entered), drop the lock,
425 			 * and perform uvm_wait().  Note: page will re-fault.
426 			 */
427 			pmap_update(ufi->orig_map->pmap);
428 			mutex_exit(uobj->vmobjlock);
429 			uvm_wait("ubc_fault");
430 			mutex_enter(uobj->vmobjlock);
431 		}
432 	}
433 	/* Must make VA visible before the unlock. */
434 	pmap_update(ufi->orig_map->pmap);
435 	mutex_exit(uobj->vmobjlock);
436 
437 	return 0;
438 }
439 
440 /*
441  * local functions
442  */
443 
444 static struct ubc_map *
445 ubc_find_mapping(struct uvm_object *uobj, voff_t offset)
446 {
447 	struct ubc_map *umap;
448 
449 	LIST_FOREACH(umap, &ubc_object.hash[UBC_HASH(uobj, offset)], hash) {
450 		if (umap->uobj == uobj && umap->offset == offset) {
451 			return umap;
452 		}
453 	}
454 	return NULL;
455 }
456 
457 
458 /*
459  * ubc interface functions
460  */
461 
462 /*
463  * ubc_alloc:  allocate a file mapping window
464  */
465 
466 void *
467 ubc_alloc(struct uvm_object *uobj, voff_t offset, vsize_t *lenp, int advice,
468     int flags)
469 {
470 	vaddr_t slot_offset, va;
471 	struct ubc_map *umap;
472 	voff_t umap_offset;
473 	int error;
474 	UVMHIST_FUNC("ubc_alloc"); UVMHIST_CALLED(ubchist);
475 
476 	UVMHIST_LOG(ubchist, "uobj %p offset 0x%lx len 0x%lx",
477 	    uobj, offset, *lenp, 0);
478 
479 	KASSERT(*lenp > 0);
480 	umap_offset = (offset & ~((voff_t)ubc_winsize - 1));
481 	slot_offset = (vaddr_t)(offset & ((voff_t)ubc_winsize - 1));
482 	*lenp = MIN(*lenp, ubc_winsize - slot_offset);
483 
484 	mutex_enter(ubc_object.uobj.vmobjlock);
485 again:
486 	/*
487 	 * The UVM object is already referenced.
488 	 * Lock order: UBC object -> ubc_map::uobj.
489 	 */
490 	umap = ubc_find_mapping(uobj, umap_offset);
491 	if (umap == NULL) {
492 		struct uvm_object *oobj;
493 
494 		UBC_EVCNT_INCR(wincachemiss);
495 		umap = TAILQ_FIRST(UBC_QUEUE(offset));
496 		if (umap == NULL) {
497 			kpause("ubc_alloc", false, hz >> 2,
498 			    ubc_object.uobj.vmobjlock);
499 			goto again;
500 		}
501 
502 		va = UBC_UMAP_ADDR(umap);
503 		oobj = umap->uobj;
504 
505 		/*
506 		 * Remove from old hash (if any), add to new hash.
507 		 */
508 
509 		if (oobj != NULL) {
510 			/*
511 			 * Mapping must be removed before the list entry,
512 			 * since there is a race with ubc_purge().
513 			 */
514 			if (umap->flags & UMAP_MAPPING_CACHED) {
515 				umap->flags &= ~UMAP_MAPPING_CACHED;
516 				mutex_enter(oobj->vmobjlock);
517 				pmap_remove(pmap_kernel(), va,
518 				    va + ubc_winsize);
519 				pmap_update(pmap_kernel());
520 				mutex_exit(oobj->vmobjlock);
521 			}
522 			LIST_REMOVE(umap, hash);
523 			LIST_REMOVE(umap, list);
524 		} else {
525 			KASSERT((umap->flags & UMAP_MAPPING_CACHED) == 0);
526 		}
527 		umap->uobj = uobj;
528 		umap->offset = umap_offset;
529 		LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)],
530 		    umap, hash);
531 		LIST_INSERT_HEAD(&uobj->uo_ubc, umap, list);
532 	} else {
533 		UBC_EVCNT_INCR(wincachehit);
534 		va = UBC_UMAP_ADDR(umap);
535 	}
536 
537 	if (umap->refcount == 0) {
538 		TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive);
539 	}
540 
541 	if (flags & UBC_WRITE) {
542 		KASSERTMSG(umap->writeoff == 0 && umap->writelen == 0,
543 		    "ubc_alloc: concurrent writes to uobj %p", uobj);
544 		umap->writeoff = slot_offset;
545 		umap->writelen = *lenp;
546 	}
547 
548 	umap->refcount++;
549 	umap->advice = advice;
550 	mutex_exit(ubc_object.uobj.vmobjlock);
551 	UVMHIST_LOG(ubchist, "umap %p refs %d va %p flags 0x%x",
552 	    umap, umap->refcount, va, flags);
553 
554 	if (flags & UBC_FAULTBUSY) {
555 		int npages = (*lenp + PAGE_SIZE - 1) >> PAGE_SHIFT;
556 		struct vm_page *pgs[npages];
557 		int gpflags =
558 		    PGO_SYNCIO|PGO_OVERWRITE|PGO_PASTEOF|PGO_NOBLOCKALLOC|
559 		    PGO_NOTIMESTAMP;
560 		int i;
561 		KDASSERT(flags & UBC_WRITE);
562 		KASSERT(umap->refcount == 1);
563 
564 		UBC_EVCNT_INCR(faultbusy);
565 again_faultbusy:
566 		mutex_enter(uobj->vmobjlock);
567 		if (umap->flags & UMAP_MAPPING_CACHED) {
568 			umap->flags &= ~UMAP_MAPPING_CACHED;
569 			pmap_remove(pmap_kernel(), va, va + ubc_winsize);
570 		}
571 		memset(pgs, 0, sizeof(pgs));
572 
573 		error = (*uobj->pgops->pgo_get)(uobj, trunc_page(offset), pgs,
574 		    &npages, 0, VM_PROT_READ | VM_PROT_WRITE, advice, gpflags);
575 		UVMHIST_LOG(ubchist, "faultbusy getpages %d", error, 0, 0, 0);
576 		if (error) {
577 			goto out;
578 		}
579 		for (i = 0; i < npages; i++) {
580 			struct vm_page *pg = pgs[i];
581 
582 			KASSERT(pg->uobject == uobj);
583 			if (pg->loan_count != 0) {
584 				mutex_enter(uobj->vmobjlock);
585 				if (pg->loan_count != 0) {
586 					pg = uvm_loanbreak(pg);
587 				}
588 				if (pg == NULL) {
589 					pmap_kremove(va, ubc_winsize);
590 					pmap_update(pmap_kernel());
591 					uvm_page_unbusy(pgs, npages);
592 					mutex_exit(uobj->vmobjlock);
593 					uvm_wait("ubc_alloc");
594 					goto again_faultbusy;
595 				}
596 				mutex_exit(uobj->vmobjlock);
597 				pgs[i] = pg;
598 			}
599 			pmap_kenter_pa(va + slot_offset + (i << PAGE_SHIFT),
600 			    VM_PAGE_TO_PHYS(pg),
601 			    VM_PROT_READ | VM_PROT_WRITE, 0);
602 		}
603 		pmap_update(pmap_kernel());
604 		umap->flags |= UMAP_PAGES_LOCKED;
605 	} else {
606 		KASSERT((umap->flags & UMAP_PAGES_LOCKED) == 0);
607 	}
608 
609 out:
610 	return (void *)(va + slot_offset);
611 }
612 
613 /*
614  * ubc_release:  free a file mapping window.
615  */
616 
617 void
618 ubc_release(void *va, int flags)
619 {
620 	struct ubc_map *umap;
621 	struct uvm_object *uobj;
622 	vaddr_t umapva;
623 	bool unmapped;
624 	UVMHIST_FUNC("ubc_release"); UVMHIST_CALLED(ubchist);
625 
626 	UVMHIST_LOG(ubchist, "va %p", va, 0, 0, 0);
627 	umap = &ubc_object.umap[((char *)va - ubc_object.kva) >> ubc_winshift];
628 	umapva = UBC_UMAP_ADDR(umap);
629 	uobj = umap->uobj;
630 	KASSERT(uobj != NULL);
631 
632 	if (umap->flags & UMAP_PAGES_LOCKED) {
633 		const voff_t slot_offset = umap->writeoff;
634 		const voff_t endoff = umap->writeoff + umap->writelen;
635 		const voff_t zerolen = round_page(endoff) - endoff;
636 		const u_int npages = (round_page(endoff) -
637 		    trunc_page(slot_offset)) >> PAGE_SHIFT;
638 		struct vm_page *pgs[npages];
639 
640 		KASSERT((umap->flags & UMAP_MAPPING_CACHED) == 0);
641 		if (zerolen) {
642 			memset((char *)umapva + endoff, 0, zerolen);
643 		}
644 		umap->flags &= ~UMAP_PAGES_LOCKED;
645 		mutex_enter(uobj->vmobjlock);
646 		mutex_enter(&uvm_pageqlock);
647 		for (u_int i = 0; i < npages; i++) {
648 			paddr_t pa;
649 			bool rv __diagused;
650 
651 			rv = pmap_extract(pmap_kernel(),
652 			    umapva + slot_offset + (i << PAGE_SHIFT), &pa);
653 			KASSERT(rv);
654 			pgs[i] = PHYS_TO_VM_PAGE(pa);
655 			pgs[i]->flags &= ~(PG_FAKE|PG_CLEAN);
656 			KASSERT(pgs[i]->loan_count == 0);
657 			uvm_pageactivate(pgs[i]);
658 		}
659 		mutex_exit(&uvm_pageqlock);
660 		pmap_kremove(umapva, ubc_winsize);
661 		pmap_update(pmap_kernel());
662 		uvm_page_unbusy(pgs, npages);
663 		mutex_exit(uobj->vmobjlock);
664 		unmapped = true;
665 	} else {
666 		unmapped = false;
667 	}
668 
669 	mutex_enter(ubc_object.uobj.vmobjlock);
670 	umap->writeoff = 0;
671 	umap->writelen = 0;
672 	umap->refcount--;
673 	if (umap->refcount == 0) {
674 		if (flags & UBC_UNMAP) {
675 			/*
676 			 * Invalidate any cached mappings if requested.
677 			 * This is typically used to avoid leaving
678 			 * incompatible cache aliases around indefinitely.
679 			 */
680 			mutex_enter(uobj->vmobjlock);
681 			pmap_remove(pmap_kernel(), umapva,
682 				    umapva + ubc_winsize);
683 			pmap_update(pmap_kernel());
684 			mutex_exit(uobj->vmobjlock);
685 
686 			umap->flags &= ~UMAP_MAPPING_CACHED;
687 			LIST_REMOVE(umap, hash);
688 			LIST_REMOVE(umap, list);
689 			umap->uobj = NULL;
690 			TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap,
691 			    inactive);
692 		} else {
693 			if (!unmapped) {
694 				umap->flags |= UMAP_MAPPING_CACHED;
695 			}
696 			TAILQ_INSERT_TAIL(UBC_QUEUE(umap->offset), umap,
697 			    inactive);
698 		}
699 	}
700 	UVMHIST_LOG(ubchist, "umap %p refs %d", umap, umap->refcount, 0, 0);
701 	mutex_exit(ubc_object.uobj.vmobjlock);
702 }
703 
704 /*
705  * ubc_uiomove: move data to/from an object.
706  */
707 
708 int
709 ubc_uiomove(struct uvm_object *uobj, struct uio *uio, vsize_t todo, int advice,
710     int flags)
711 {
712 	const bool overwrite = (flags & UBC_FAULTBUSY) != 0;
713 	voff_t off;
714 	int error;
715 
716 	KASSERT(todo <= uio->uio_resid);
717 	KASSERT(((flags & UBC_WRITE) != 0 && uio->uio_rw == UIO_WRITE) ||
718 	    ((flags & UBC_READ) != 0 && uio->uio_rw == UIO_READ));
719 
720 	off = uio->uio_offset;
721 	error = 0;
722 	while (todo > 0) {
723 		vsize_t bytelen = todo;
724 		void *win;
725 
726 		win = ubc_alloc(uobj, off, &bytelen, advice, flags);
727 		if (error == 0) {
728 			error = uiomove(win, bytelen, uio);
729 		}
730 		if (error != 0 && overwrite) {
731 			/*
732 			 * if we haven't initialized the pages yet,
733 			 * do it now.  it's safe to use memset here
734 			 * because we just mapped the pages above.
735 			 */
736 			printf("%s: error=%d\n", __func__, error);
737 			memset(win, 0, bytelen);
738 		}
739 		ubc_release(win, flags);
740 		off += bytelen;
741 		todo -= bytelen;
742 		if (error != 0 && (flags & UBC_PARTIALOK) != 0) {
743 			break;
744 		}
745 	}
746 
747 	return error;
748 }
749 
750 /*
751  * ubc_zerorange: set a range of bytes in an object to zero.
752  */
753 
754 void
755 ubc_zerorange(struct uvm_object *uobj, off_t off, size_t len, int flags)
756 {
757 	void *win;
758 
759 	/*
760 	 * XXXUBC invent kzero() and use it
761 	 */
762 
763 	while (len) {
764 		vsize_t bytelen = len;
765 
766 		win = ubc_alloc(uobj, off, &bytelen, UVM_ADV_NORMAL, UBC_WRITE);
767 		memset(win, 0, bytelen);
768 		ubc_release(win, flags);
769 
770 		off += bytelen;
771 		len -= bytelen;
772 	}
773 }
774 
775 /*
776  * ubc_purge: disassociate ubc_map structures from an empty uvm_object.
777  */
778 
779 void
780 ubc_purge(struct uvm_object *uobj)
781 {
782 	struct ubc_map *umap;
783 	vaddr_t va;
784 
785 	KASSERT(uobj->uo_npages == 0);
786 
787 	/*
788 	 * Safe to check without lock held, as ubc_alloc() removes
789 	 * the mapping and list entry in the correct order.
790 	 */
791 	if (__predict_true(LIST_EMPTY(&uobj->uo_ubc))) {
792 		return;
793 	}
794 	mutex_enter(ubc_object.uobj.vmobjlock);
795 	while ((umap = LIST_FIRST(&uobj->uo_ubc)) != NULL) {
796 		KASSERT(umap->refcount == 0);
797 		for (va = 0; va < ubc_winsize; va += PAGE_SIZE) {
798 			KASSERT(!pmap_extract(pmap_kernel(),
799 			    va + UBC_UMAP_ADDR(umap), NULL));
800 		}
801 		LIST_REMOVE(umap, list);
802 		LIST_REMOVE(umap, hash);
803 		umap->flags &= ~UMAP_MAPPING_CACHED;
804 		umap->uobj = NULL;
805 	}
806 	mutex_exit(ubc_object.uobj.vmobjlock);
807 }
808