xref: /netbsd-src/sys/uvm/uvm_bio.c (revision 404fbe5fb94ca1e054339640cabb2801ce52dd30)
1 /*	$NetBSD: uvm_bio.c,v 1.66 2008/11/27 08:46:09 pooka Exp $	*/
2 
3 /*
4  * Copyright (c) 1998 Chuck Silvers.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The name of the author may not be used to endorse or promote products
16  *    derived from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  */
31 
32 /*
33  * uvm_bio.c: buffered i/o object mapping cache
34  */
35 
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.66 2008/11/27 08:46:09 pooka Exp $");
38 
39 #include "opt_uvmhist.h"
40 #include "opt_ubc.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kmem.h>
45 #include <sys/kernel.h>
46 #include <sys/proc.h>
47 
48 #include <uvm/uvm.h>
49 
50 /*
51  * global data structures
52  */
53 
54 /*
55  * local functions
56  */
57 
58 static int	ubc_fault(struct uvm_faultinfo *, vaddr_t, struct vm_page **,
59 			  int, int, vm_prot_t, int);
60 static struct ubc_map *ubc_find_mapping(struct uvm_object *, voff_t);
61 
62 /*
63  * local data structues
64  */
65 
66 #define UBC_HASH(uobj, offset) 						\
67 	(((((u_long)(uobj)) >> 8) + (((u_long)(offset)) >> PAGE_SHIFT)) & \
68 				ubc_object.hashmask)
69 
70 #define UBC_QUEUE(offset)						\
71 	(&ubc_object.inactive[(((u_long)(offset)) >> ubc_winshift) &	\
72 			     (UBC_NQUEUES - 1)])
73 
74 #define UBC_UMAP_ADDR(u)						\
75 	(vaddr_t)(ubc_object.kva + (((u) - ubc_object.umap) << ubc_winshift))
76 
77 
78 #define UMAP_PAGES_LOCKED	0x0001
79 #define UMAP_MAPPING_CACHED	0x0002
80 
81 struct ubc_map
82 {
83 	struct uvm_object *	uobj;		/* mapped object */
84 	voff_t			offset;		/* offset into uobj */
85 	voff_t			writeoff;	/* write offset */
86 	vsize_t			writelen;	/* write len */
87 	int			refcount;	/* refcount on mapping */
88 	int			flags;		/* extra state */
89 	int			advice;
90 
91 	LIST_ENTRY(ubc_map)	hash;		/* hash table */
92 	TAILQ_ENTRY(ubc_map)	inactive;	/* inactive queue */
93 };
94 
95 static struct ubc_object
96 {
97 	struct uvm_object uobj;		/* glue for uvm_map() */
98 	char *kva;			/* where ubc_object is mapped */
99 	struct ubc_map *umap;		/* array of ubc_map's */
100 
101 	LIST_HEAD(, ubc_map) *hash;	/* hashtable for cached ubc_map's */
102 	u_long hashmask;		/* mask for hashtable */
103 
104 	TAILQ_HEAD(ubc_inactive_head, ubc_map) *inactive;
105 					/* inactive queues for ubc_map's */
106 
107 } ubc_object;
108 
109 const struct uvm_pagerops ubc_pager = {
110 	.pgo_fault = ubc_fault,
111 	/* ... rest are NULL */
112 };
113 
114 int ubc_nwins = UBC_NWINS;
115 int ubc_winshift = UBC_WINSHIFT;
116 int ubc_winsize;
117 #if defined(PMAP_PREFER)
118 int ubc_nqueues;
119 #define UBC_NQUEUES ubc_nqueues
120 #else
121 #define UBC_NQUEUES 1
122 #endif
123 
124 #if defined(UBC_STATS)
125 
126 #define	UBC_EVCNT_DEFINE(name) \
127 struct evcnt ubc_evcnt_##name = \
128 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "ubc", #name); \
129 EVCNT_ATTACH_STATIC(ubc_evcnt_##name);
130 #define	UBC_EVCNT_INCR(name) ubc_evcnt_##name.ev_count++
131 
132 #else /* defined(UBC_STATS) */
133 
134 #define	UBC_EVCNT_DEFINE(name)	/* nothing */
135 #define	UBC_EVCNT_INCR(name)	/* nothing */
136 
137 #endif /* defined(UBC_STATS) */
138 
139 UBC_EVCNT_DEFINE(wincachehit)
140 UBC_EVCNT_DEFINE(wincachemiss)
141 UBC_EVCNT_DEFINE(faultbusy)
142 
143 /*
144  * ubc_init
145  *
146  * init pager private data structures.
147  */
148 
149 void
150 ubc_init(void)
151 {
152 	struct ubc_map *umap;
153 	vaddr_t va;
154 	int i;
155 
156 	/*
157 	 * Make sure ubc_winshift is sane.
158 	 */
159 	if (ubc_winshift < PAGE_SHIFT)
160 		ubc_winshift = PAGE_SHIFT;
161 
162 	/*
163 	 * init ubc_object.
164 	 * alloc and init ubc_map's.
165 	 * init inactive queues.
166 	 * alloc and init hashtable.
167 	 * map in ubc_object.
168 	 */
169 
170 	UVM_OBJ_INIT(&ubc_object.uobj, &ubc_pager, UVM_OBJ_KERN);
171 
172 	ubc_object.umap = kmem_zalloc(ubc_nwins * sizeof(struct ubc_map),
173 	    KM_SLEEP);
174 	if (ubc_object.umap == NULL)
175 		panic("ubc_init: failed to allocate ubc_map");
176 
177 	if (ubc_winshift < PAGE_SHIFT) {
178 		ubc_winshift = PAGE_SHIFT;
179 	}
180 	va = (vaddr_t)1L;
181 #ifdef PMAP_PREFER
182 	PMAP_PREFER(0, &va, 0, 0);	/* kernel is never topdown */
183 	ubc_nqueues = va >> ubc_winshift;
184 	if (ubc_nqueues == 0) {
185 		ubc_nqueues = 1;
186 	}
187 #endif
188 	ubc_winsize = 1 << ubc_winshift;
189 	ubc_object.inactive = kmem_alloc(UBC_NQUEUES *
190 	    sizeof(struct ubc_inactive_head), KM_SLEEP);
191 	if (ubc_object.inactive == NULL)
192 		panic("ubc_init: failed to allocate inactive queue heads");
193 	for (i = 0; i < UBC_NQUEUES; i++) {
194 		TAILQ_INIT(&ubc_object.inactive[i]);
195 	}
196 	for (i = 0; i < ubc_nwins; i++) {
197 		umap = &ubc_object.umap[i];
198 		TAILQ_INSERT_TAIL(&ubc_object.inactive[i & (UBC_NQUEUES - 1)],
199 				  umap, inactive);
200 	}
201 
202 	ubc_object.hash = hashinit(ubc_nwins, HASH_LIST, true,
203 	    &ubc_object.hashmask);
204 	for (i = 0; i <= ubc_object.hashmask; i++) {
205 		LIST_INIT(&ubc_object.hash[i]);
206 	}
207 
208 	if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva,
209 		    ubc_nwins << ubc_winshift, &ubc_object.uobj, 0, (vsize_t)va,
210 		    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
211 				UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) {
212 		panic("ubc_init: failed to map ubc_object");
213 	}
214 	UVMHIST_INIT(ubchist, 300);
215 }
216 
217 /*
218  * ubc_fault: fault routine for ubc mapping
219  */
220 
221 static int
222 ubc_fault(struct uvm_faultinfo *ufi, vaddr_t ign1, struct vm_page **ign2,
223     int ign3, int ign4, vm_prot_t access_type, int flags)
224 {
225 	struct uvm_object *uobj;
226 	struct ubc_map *umap;
227 	vaddr_t va, eva, ubc_offset, slot_offset;
228 	int i, error, npages;
229 	struct vm_page *pgs[ubc_winsize >> PAGE_SHIFT], *pg;
230 	vm_prot_t prot;
231 	UVMHIST_FUNC("ubc_fault");  UVMHIST_CALLED(ubchist);
232 
233 	/*
234 	 * no need to try with PGO_LOCKED...
235 	 * we don't need to have the map locked since we know that
236 	 * no one will mess with it until our reference is released.
237 	 */
238 
239 	if (flags & PGO_LOCKED) {
240 		uvmfault_unlockall(ufi, NULL, &ubc_object.uobj, NULL);
241 		flags &= ~PGO_LOCKED;
242 	}
243 
244 	va = ufi->orig_rvaddr;
245 	ubc_offset = va - (vaddr_t)ubc_object.kva;
246 	umap = &ubc_object.umap[ubc_offset >> ubc_winshift];
247 	KASSERT(umap->refcount != 0);
248 	KASSERT((umap->flags & UMAP_PAGES_LOCKED) == 0);
249 	slot_offset = ubc_offset & (ubc_winsize - 1);
250 
251 	/*
252 	 * some platforms cannot write to individual bytes atomically, so
253 	 * software has to do read/modify/write of larger quantities instead.
254 	 * this means that the access_type for "write" operations
255 	 * can be VM_PROT_READ, which confuses us mightily.
256 	 *
257 	 * deal with this by resetting access_type based on the info
258 	 * that ubc_alloc() stores for us.
259 	 */
260 
261 	access_type = umap->writelen ? VM_PROT_WRITE : VM_PROT_READ;
262 	UVMHIST_LOG(ubchist, "va 0x%lx ubc_offset 0x%lx access_type %d",
263 	    va, ubc_offset, access_type, 0);
264 
265 #ifdef DIAGNOSTIC
266 	if ((access_type & VM_PROT_WRITE) != 0) {
267 		if (slot_offset < trunc_page(umap->writeoff) ||
268 		    umap->writeoff + umap->writelen <= slot_offset) {
269 			panic("ubc_fault: out of range write");
270 		}
271 	}
272 #endif
273 
274 	/* no umap locking needed since we have a ref on the umap */
275 	uobj = umap->uobj;
276 
277 	if ((access_type & VM_PROT_WRITE) == 0) {
278 		npages = (ubc_winsize - slot_offset) >> PAGE_SHIFT;
279 	} else {
280 		npages = (round_page(umap->offset + umap->writeoff +
281 		    umap->writelen) - (umap->offset + slot_offset))
282 		    >> PAGE_SHIFT;
283 		flags |= PGO_PASTEOF;
284 	}
285 
286 again:
287 	memset(pgs, 0, sizeof (pgs));
288 	mutex_enter(&uobj->vmobjlock);
289 
290 	UVMHIST_LOG(ubchist, "slot_offset 0x%x writeoff 0x%x writelen 0x%x ",
291 	    slot_offset, umap->writeoff, umap->writelen, 0);
292 	UVMHIST_LOG(ubchist, "getpages uobj %p offset 0x%x npages %d",
293 	    uobj, umap->offset + slot_offset, npages, 0);
294 
295 	error = (*uobj->pgops->pgo_get)(uobj, umap->offset + slot_offset, pgs,
296 	    &npages, 0, access_type, umap->advice, flags | PGO_NOBLOCKALLOC |
297 	    PGO_NOTIMESTAMP);
298 	UVMHIST_LOG(ubchist, "getpages error %d npages %d", error, npages, 0,
299 	    0);
300 
301 	if (error == EAGAIN) {
302 		kpause("ubc_fault", false, hz, NULL);
303 		goto again;
304 	}
305 	if (error) {
306 		return error;
307 	}
308 
309 	va = ufi->orig_rvaddr;
310 	eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT);
311 
312 	UVMHIST_LOG(ubchist, "va 0x%lx eva 0x%lx", va, eva, 0, 0);
313 	for (i = 0; va < eva; i++, va += PAGE_SIZE) {
314 		bool rdonly;
315 		vm_prot_t mask;
316 
317 		/*
318 		 * for virtually-indexed, virtually-tagged caches we should
319 		 * avoid creating writable mappings when we don't absolutely
320 		 * need them, since the "compatible alias" trick doesn't work
321 		 * on such caches.  otherwise, we can always map the pages
322 		 * writable.
323 		 */
324 
325 #ifdef PMAP_CACHE_VIVT
326 		prot = VM_PROT_READ | access_type;
327 #else
328 		prot = VM_PROT_READ | VM_PROT_WRITE;
329 #endif
330 		UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i], 0, 0);
331 		pg = pgs[i];
332 
333 		if (pg == NULL || pg == PGO_DONTCARE) {
334 			continue;
335 		}
336 
337 		uobj = pg->uobject;
338 		mutex_enter(&uobj->vmobjlock);
339 		if (pg->flags & PG_WANTED) {
340 			wakeup(pg);
341 		}
342 		KASSERT((pg->flags & PG_FAKE) == 0);
343 		if (pg->flags & PG_RELEASED) {
344 			mutex_enter(&uvm_pageqlock);
345 			uvm_pagefree(pg);
346 			mutex_exit(&uvm_pageqlock);
347 			mutex_exit(&uobj->vmobjlock);
348 			continue;
349 		}
350 		if (pg->loan_count != 0) {
351 
352 			/*
353 			 * avoid unneeded loan break if possible.
354 			 */
355 
356 			if ((access_type & VM_PROT_WRITE) == 0)
357 				prot &= ~VM_PROT_WRITE;
358 
359 			if (prot & VM_PROT_WRITE) {
360 				struct vm_page *newpg;
361 
362 				newpg = uvm_loanbreak(pg);
363 				if (newpg == NULL) {
364 					uvm_page_unbusy(&pg, 1);
365 					mutex_exit(&uobj->vmobjlock);
366 					uvm_wait("ubc_loanbrk");
367 					continue; /* will re-fault */
368 				}
369 				pg = newpg;
370 			}
371 		}
372 
373 		/*
374 		 * note that a page whose backing store is partially allocated
375 		 * is marked as PG_RDONLY.
376 		 */
377 
378 		rdonly = ((access_type & VM_PROT_WRITE) == 0 &&
379 		    (pg->flags & PG_RDONLY) != 0) ||
380 		    UVM_OBJ_NEEDS_WRITEFAULT(uobj);
381 		KASSERT((pg->flags & PG_RDONLY) == 0 ||
382 		    (access_type & VM_PROT_WRITE) == 0 ||
383 		    pg->offset < umap->writeoff ||
384 		    pg->offset + PAGE_SIZE > umap->writeoff + umap->writelen);
385 		mask = rdonly ? ~VM_PROT_WRITE : VM_PROT_ALL;
386 		error = pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg),
387 		    prot & mask, PMAP_CANFAIL | (access_type & mask));
388 		mutex_enter(&uvm_pageqlock);
389 		uvm_pageactivate(pg);
390 		mutex_exit(&uvm_pageqlock);
391 		pg->flags &= ~(PG_BUSY|PG_WANTED);
392 		UVM_PAGE_OWN(pg, NULL);
393 		mutex_exit(&uobj->vmobjlock);
394 		if (error) {
395 			UVMHIST_LOG(ubchist, "pmap_enter fail %d",
396 			    error, 0, 0, 0);
397 			uvm_wait("ubc_pmfail");
398 			/* will refault */
399 		}
400 	}
401 	pmap_update(ufi->orig_map->pmap);
402 	return 0;
403 }
404 
405 /*
406  * local functions
407  */
408 
409 static struct ubc_map *
410 ubc_find_mapping(struct uvm_object *uobj, voff_t offset)
411 {
412 	struct ubc_map *umap;
413 
414 	LIST_FOREACH(umap, &ubc_object.hash[UBC_HASH(uobj, offset)], hash) {
415 		if (umap->uobj == uobj && umap->offset == offset) {
416 			return umap;
417 		}
418 	}
419 	return NULL;
420 }
421 
422 
423 /*
424  * ubc interface functions
425  */
426 
427 /*
428  * ubc_alloc:  allocate a file mapping window
429  */
430 
431 void *
432 ubc_alloc(struct uvm_object *uobj, voff_t offset, vsize_t *lenp, int advice,
433     int flags)
434 {
435 	vaddr_t slot_offset, va;
436 	struct ubc_map *umap;
437 	voff_t umap_offset;
438 	int error;
439 	UVMHIST_FUNC("ubc_alloc"); UVMHIST_CALLED(ubchist);
440 
441 	UVMHIST_LOG(ubchist, "uobj %p offset 0x%lx len 0x%lx",
442 	    uobj, offset, *lenp, 0);
443 
444 	KASSERT(*lenp > 0);
445 	umap_offset = (offset & ~((voff_t)ubc_winsize - 1));
446 	slot_offset = (vaddr_t)(offset & ((voff_t)ubc_winsize - 1));
447 	*lenp = MIN(*lenp, ubc_winsize - slot_offset);
448 
449 	/*
450 	 * the object is always locked here, so we don't need to add a ref.
451 	 */
452 
453 again:
454 	mutex_enter(&ubc_object.uobj.vmobjlock);
455 	umap = ubc_find_mapping(uobj, umap_offset);
456 	if (umap == NULL) {
457 		UBC_EVCNT_INCR(wincachemiss);
458 		umap = TAILQ_FIRST(UBC_QUEUE(offset));
459 		if (umap == NULL) {
460 			mutex_exit(&ubc_object.uobj.vmobjlock);
461 			kpause("ubc_alloc", false, hz, NULL);
462 			goto again;
463 		}
464 
465 		/*
466 		 * remove from old hash (if any), add to new hash.
467 		 */
468 
469 		if (umap->uobj != NULL) {
470 			LIST_REMOVE(umap, hash);
471 		}
472 		umap->uobj = uobj;
473 		umap->offset = umap_offset;
474 		LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)],
475 		    umap, hash);
476 		va = UBC_UMAP_ADDR(umap);
477 		if (umap->flags & UMAP_MAPPING_CACHED) {
478 			umap->flags &= ~UMAP_MAPPING_CACHED;
479 			pmap_remove(pmap_kernel(), va, va + ubc_winsize);
480 			pmap_update(pmap_kernel());
481 		}
482 	} else {
483 		UBC_EVCNT_INCR(wincachehit);
484 		va = UBC_UMAP_ADDR(umap);
485 	}
486 
487 	if (umap->refcount == 0) {
488 		TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive);
489 	}
490 
491 #ifdef DIAGNOSTIC
492 	if ((flags & UBC_WRITE) && (umap->writeoff || umap->writelen)) {
493 		panic("ubc_alloc: concurrent writes uobj %p", uobj);
494 	}
495 #endif
496 	if (flags & UBC_WRITE) {
497 		umap->writeoff = slot_offset;
498 		umap->writelen = *lenp;
499 	}
500 
501 	umap->refcount++;
502 	umap->advice = advice;
503 	mutex_exit(&ubc_object.uobj.vmobjlock);
504 	UVMHIST_LOG(ubchist, "umap %p refs %d va %p flags 0x%x",
505 	    umap, umap->refcount, va, flags);
506 
507 	if (flags & UBC_FAULTBUSY) {
508 		int npages = (*lenp + PAGE_SIZE - 1) >> PAGE_SHIFT;
509 		struct vm_page *pgs[npages];
510 		int gpflags =
511 		    PGO_SYNCIO|PGO_OVERWRITE|PGO_PASTEOF|PGO_NOBLOCKALLOC|
512 		    PGO_NOTIMESTAMP;
513 		int i;
514 		KDASSERT(flags & UBC_WRITE);
515 		KASSERT(umap->refcount == 1);
516 
517 		UBC_EVCNT_INCR(faultbusy);
518 		if (umap->flags & UMAP_MAPPING_CACHED) {
519 			umap->flags &= ~UMAP_MAPPING_CACHED;
520 			pmap_remove(pmap_kernel(), va, va + ubc_winsize);
521 		}
522 again_faultbusy:
523 		memset(pgs, 0, sizeof(pgs));
524 		mutex_enter(&uobj->vmobjlock);
525 		error = (*uobj->pgops->pgo_get)(uobj, trunc_page(offset), pgs,
526 		    &npages, 0, VM_PROT_READ | VM_PROT_WRITE, advice, gpflags);
527 		UVMHIST_LOG(ubchist, "faultbusy getpages %d", error, 0, 0, 0);
528 		if (error) {
529 			goto out;
530 		}
531 		for (i = 0; i < npages; i++) {
532 			struct vm_page *pg = pgs[i];
533 
534 			KASSERT(pg->uobject == uobj);
535 			if (pg->loan_count != 0) {
536 				mutex_enter(&uobj->vmobjlock);
537 				if (pg->loan_count != 0) {
538 					pg = uvm_loanbreak(pg);
539 				}
540 				mutex_exit(&uobj->vmobjlock);
541 				if (pg == NULL) {
542 					pmap_kremove(va, ubc_winsize);
543 					pmap_update(pmap_kernel());
544 					mutex_enter(&uobj->vmobjlock);
545 					uvm_page_unbusy(pgs, npages);
546 					mutex_exit(&uobj->vmobjlock);
547 					uvm_wait("ubc_alloc");
548 					goto again_faultbusy;
549 				}
550 				pgs[i] = pg;
551 			}
552 			pmap_kenter_pa(va + slot_offset + (i << PAGE_SHIFT),
553 			    VM_PAGE_TO_PHYS(pg), VM_PROT_READ | VM_PROT_WRITE);
554 		}
555 		pmap_update(pmap_kernel());
556 		umap->flags |= UMAP_PAGES_LOCKED;
557 	} else {
558 		KASSERT((umap->flags & UMAP_PAGES_LOCKED) == 0);
559 	}
560 
561 out:
562 	return (void *)(va + slot_offset);
563 }
564 
565 /*
566  * ubc_release:  free a file mapping window.
567  */
568 
569 void
570 ubc_release(void *va, int flags)
571 {
572 	struct ubc_map *umap;
573 	struct uvm_object *uobj;
574 	vaddr_t umapva;
575 	bool unmapped;
576 	UVMHIST_FUNC("ubc_release"); UVMHIST_CALLED(ubchist);
577 
578 	UVMHIST_LOG(ubchist, "va %p", va, 0, 0, 0);
579 	umap = &ubc_object.umap[((char *)va - ubc_object.kva) >> ubc_winshift];
580 	umapva = UBC_UMAP_ADDR(umap);
581 	uobj = umap->uobj;
582 	KASSERT(uobj != NULL);
583 
584 	if (umap->flags & UMAP_PAGES_LOCKED) {
585 		int slot_offset = umap->writeoff;
586 		int endoff = umap->writeoff + umap->writelen;
587 		int zerolen = round_page(endoff) - endoff;
588 		int npages = (int)(round_page(umap->writeoff + umap->writelen)
589 				   - trunc_page(umap->writeoff)) >> PAGE_SHIFT;
590 		struct vm_page *pgs[npages];
591 		paddr_t pa;
592 		int i;
593 		bool rv;
594 
595 		KASSERT((umap->flags & UMAP_MAPPING_CACHED) == 0);
596 		if (zerolen) {
597 			memset((char *)umapva + endoff, 0, zerolen);
598 		}
599 		umap->flags &= ~UMAP_PAGES_LOCKED;
600 		mutex_enter(&uvm_pageqlock);
601 		for (i = 0; i < npages; i++) {
602 			rv = pmap_extract(pmap_kernel(),
603 			    umapva + slot_offset + (i << PAGE_SHIFT), &pa);
604 			KASSERT(rv);
605 			pgs[i] = PHYS_TO_VM_PAGE(pa);
606 			pgs[i]->flags &= ~(PG_FAKE|PG_CLEAN);
607 			KASSERT(pgs[i]->loan_count == 0);
608 			uvm_pageactivate(pgs[i]);
609 		}
610 		mutex_exit(&uvm_pageqlock);
611 		pmap_kremove(umapva, ubc_winsize);
612 		pmap_update(pmap_kernel());
613 		mutex_enter(&uobj->vmobjlock);
614 		uvm_page_unbusy(pgs, npages);
615 		mutex_exit(&uobj->vmobjlock);
616 		unmapped = true;
617 	} else {
618 		unmapped = false;
619 	}
620 
621 	mutex_enter(&ubc_object.uobj.vmobjlock);
622 	umap->writeoff = 0;
623 	umap->writelen = 0;
624 	umap->refcount--;
625 	if (umap->refcount == 0) {
626 		if (flags & UBC_UNMAP) {
627 
628 			/*
629 			 * Invalidate any cached mappings if requested.
630 			 * This is typically used to avoid leaving
631 			 * incompatible cache aliases around indefinitely.
632 			 */
633 
634 			pmap_remove(pmap_kernel(), umapva,
635 				    umapva + ubc_winsize);
636 			umap->flags &= ~UMAP_MAPPING_CACHED;
637 			pmap_update(pmap_kernel());
638 			LIST_REMOVE(umap, hash);
639 			umap->uobj = NULL;
640 			TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap,
641 			    inactive);
642 		} else {
643 			if (!unmapped) {
644 				umap->flags |= UMAP_MAPPING_CACHED;
645 			}
646 			TAILQ_INSERT_TAIL(UBC_QUEUE(umap->offset), umap,
647 			    inactive);
648 		}
649 	}
650 	UVMHIST_LOG(ubchist, "umap %p refs %d", umap, umap->refcount, 0, 0);
651 	mutex_exit(&ubc_object.uobj.vmobjlock);
652 }
653 
654 /*
655  * ubc_uiomove: move data to/from an object.
656  */
657 
658 int
659 ubc_uiomove(struct uvm_object *uobj, struct uio *uio, vsize_t todo, int advice,
660     int flags)
661 {
662 	voff_t off;
663 	const bool overwrite = (flags & UBC_FAULTBUSY) != 0;
664 	int error;
665 
666 	KASSERT(todo <= uio->uio_resid);
667 	KASSERT(((flags & UBC_WRITE) != 0 && uio->uio_rw == UIO_WRITE) ||
668 	    ((flags & UBC_READ) != 0 && uio->uio_rw == UIO_READ));
669 
670 	off = uio->uio_offset;
671 	error = 0;
672 	while (todo > 0) {
673 		vsize_t bytelen = todo;
674 		void *win;
675 
676 		win = ubc_alloc(uobj, off, &bytelen, advice, flags);
677 		if (error == 0) {
678 			error = uiomove(win, bytelen, uio);
679 		}
680 		if (error != 0 && overwrite) {
681 			/*
682 			 * if we haven't initialized the pages yet,
683 			 * do it now.  it's safe to use memset here
684 			 * because we just mapped the pages above.
685 			 */
686 			printf("%s: error=%d\n", __func__, error);
687 			memset(win, 0, bytelen);
688 		}
689 		ubc_release(win, flags);
690 		off += bytelen;
691 		todo -= bytelen;
692 		if (error != 0 && (flags & UBC_PARTIALOK) != 0) {
693 			break;
694 		}
695 	}
696 
697 	return error;
698 }
699