xref: /netbsd-src/sys/uvm/uvm_bio.c (revision ce2c90c7c172d95d2402a5b3d96d8f8e6d138a21)
1 /*	$NetBSD: uvm_bio.c,v 1.52 2006/10/12 10:14:20 yamt Exp $	*/
2 
3 /*
4  * Copyright (c) 1998 Chuck Silvers.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The name of the author may not be used to endorse or promote products
16  *    derived from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  */
31 
32 /*
33  * uvm_bio.c: buffered i/o object mapping cache
34  */
35 
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.52 2006/10/12 10:14:20 yamt Exp $");
38 
39 #include "opt_uvmhist.h"
40 #include "opt_ubc.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/malloc.h>
45 #include <sys/kernel.h>
46 
47 #include <uvm/uvm.h>
48 
49 /*
50  * global data structures
51  */
52 
53 /*
54  * local functions
55  */
56 
57 static int	ubc_fault(struct uvm_faultinfo *, vaddr_t, struct vm_page **,
58 			  int, int, vm_prot_t, int);
59 static struct ubc_map *ubc_find_mapping(struct uvm_object *, voff_t);
60 
61 /*
62  * local data structues
63  */
64 
65 #define UBC_HASH(uobj, offset) 						\
66 	(((((u_long)(uobj)) >> 8) + (((u_long)(offset)) >> PAGE_SHIFT)) & \
67 				ubc_object.hashmask)
68 
69 #define UBC_QUEUE(offset)						\
70 	(&ubc_object.inactive[(((u_long)(offset)) >> ubc_winshift) &	\
71 			     (UBC_NQUEUES - 1)])
72 
73 #define UBC_UMAP_ADDR(u)						\
74 	(vaddr_t)(ubc_object.kva + (((u) - ubc_object.umap) << ubc_winshift))
75 
76 
77 #define UMAP_PAGES_LOCKED	0x0001
78 #define UMAP_MAPPING_CACHED	0x0002
79 
80 struct ubc_map
81 {
82 	struct uvm_object *	uobj;		/* mapped object */
83 	voff_t			offset;		/* offset into uobj */
84 	voff_t			writeoff;	/* write offset */
85 	vsize_t			writelen;	/* write len */
86 	int			refcount;	/* refcount on mapping */
87 	int			flags;		/* extra state */
88 	int			advice;
89 
90 	LIST_ENTRY(ubc_map)	hash;		/* hash table */
91 	TAILQ_ENTRY(ubc_map)	inactive;	/* inactive queue */
92 };
93 
94 static struct ubc_object
95 {
96 	struct uvm_object uobj;		/* glue for uvm_map() */
97 	char *kva;			/* where ubc_object is mapped */
98 	struct ubc_map *umap;		/* array of ubc_map's */
99 
100 	LIST_HEAD(, ubc_map) *hash;	/* hashtable for cached ubc_map's */
101 	u_long hashmask;		/* mask for hashtable */
102 
103 	TAILQ_HEAD(ubc_inactive_head, ubc_map) *inactive;
104 					/* inactive queues for ubc_map's */
105 
106 } ubc_object;
107 
108 struct uvm_pagerops ubc_pager =
109 {
110 	.pgo_fault = ubc_fault,
111 	/* ... rest are NULL */
112 };
113 
114 int ubc_nwins = UBC_NWINS;
115 int ubc_winshift = UBC_WINSHIFT;
116 int ubc_winsize;
117 #if defined(PMAP_PREFER)
118 int ubc_nqueues;
119 #define UBC_NQUEUES ubc_nqueues
120 #else
121 #define UBC_NQUEUES 1
122 #endif
123 
124 #if defined(UBC_STATS)
125 
126 #define	UBC_EVCNT_DEFINE(name) \
127 struct evcnt ubc_evcnt_##name = \
128 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "ubc", #name); \
129 EVCNT_ATTACH_STATIC(ubc_evcnt_##name);
130 #define	UBC_EVCNT_INCR(name) ubc_evcnt_##name.ev_count++
131 
132 #else /* defined(UBC_STATS) */
133 
134 #define	UBC_EVCNT_DEFINE(name)	/* nothing */
135 #define	UBC_EVCNT_INCR(name)	/* nothing */
136 
137 #endif /* defined(UBC_STATS) */
138 
139 UBC_EVCNT_DEFINE(wincachehit)
140 UBC_EVCNT_DEFINE(wincachemiss)
141 
142 /*
143  * ubc_init
144  *
145  * init pager private data structures.
146  */
147 
148 void
149 ubc_init(void)
150 {
151 	struct ubc_map *umap;
152 	vaddr_t va;
153 	int i;
154 
155 	/*
156 	 * Make sure ubc_winshift is sane.
157 	 */
158 	if (ubc_winshift < PAGE_SHIFT)
159 		ubc_winshift = PAGE_SHIFT;
160 
161 	/*
162 	 * init ubc_object.
163 	 * alloc and init ubc_map's.
164 	 * init inactive queues.
165 	 * alloc and init hashtable.
166 	 * map in ubc_object.
167 	 */
168 
169 	UVM_OBJ_INIT(&ubc_object.uobj, &ubc_pager, UVM_OBJ_KERN);
170 
171 	ubc_object.umap = malloc(ubc_nwins * sizeof(struct ubc_map),
172 				 M_TEMP, M_NOWAIT);
173 	if (ubc_object.umap == NULL)
174 		panic("ubc_init: failed to allocate ubc_map");
175 	memset(ubc_object.umap, 0, ubc_nwins * sizeof(struct ubc_map));
176 
177 	if (ubc_winshift < PAGE_SHIFT) {
178 		ubc_winshift = PAGE_SHIFT;
179 	}
180 	va = (vaddr_t)1L;
181 #ifdef PMAP_PREFER
182 	PMAP_PREFER(0, &va, 0, 0);	/* kernel is never topdown */
183 	ubc_nqueues = va >> ubc_winshift;
184 	if (ubc_nqueues == 0) {
185 		ubc_nqueues = 1;
186 	}
187 #endif
188 	ubc_winsize = 1 << ubc_winshift;
189 	ubc_object.inactive = malloc(UBC_NQUEUES *
190 	    sizeof(struct ubc_inactive_head), M_TEMP, M_NOWAIT);
191 	if (ubc_object.inactive == NULL)
192 		panic("ubc_init: failed to allocate inactive queue heads");
193 	for (i = 0; i < UBC_NQUEUES; i++) {
194 		TAILQ_INIT(&ubc_object.inactive[i]);
195 	}
196 	for (i = 0; i < ubc_nwins; i++) {
197 		umap = &ubc_object.umap[i];
198 		TAILQ_INSERT_TAIL(&ubc_object.inactive[i & (UBC_NQUEUES - 1)],
199 				  umap, inactive);
200 	}
201 
202 	ubc_object.hash = hashinit(ubc_nwins, HASH_LIST, M_TEMP, M_NOWAIT,
203 				   &ubc_object.hashmask);
204 	for (i = 0; i <= ubc_object.hashmask; i++) {
205 		LIST_INIT(&ubc_object.hash[i]);
206 	}
207 
208 	if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva,
209 		    ubc_nwins << ubc_winshift, &ubc_object.uobj, 0, (vsize_t)va,
210 		    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
211 				UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) {
212 		panic("ubc_init: failed to map ubc_object");
213 	}
214 	UVMHIST_INIT(ubchist, 300);
215 }
216 
217 /*
218  * ubc_fault: fault routine for ubc mapping
219  */
220 
221 static int
222 ubc_fault(struct uvm_faultinfo *ufi, vaddr_t ign1 __unused,
223     struct vm_page **ign2 __unused,
224     int ign3 __unused, int ign4 __unused, vm_prot_t access_type,
225     int flags)
226 {
227 	struct uvm_object *uobj;
228 	struct ubc_map *umap;
229 	vaddr_t va, eva, ubc_offset, slot_offset;
230 	int i, error, npages;
231 	struct vm_page *pgs[ubc_winsize >> PAGE_SHIFT], *pg;
232 	vm_prot_t prot;
233 	UVMHIST_FUNC("ubc_fault");  UVMHIST_CALLED(ubchist);
234 
235 	/*
236 	 * no need to try with PGO_LOCKED...
237 	 * we don't need to have the map locked since we know that
238 	 * no one will mess with it until our reference is released.
239 	 */
240 
241 	if (flags & PGO_LOCKED) {
242 		uvmfault_unlockall(ufi, NULL, &ubc_object.uobj, NULL);
243 		flags &= ~PGO_LOCKED;
244 	}
245 
246 	va = ufi->orig_rvaddr;
247 	ubc_offset = va - (vaddr_t)ubc_object.kva;
248 	umap = &ubc_object.umap[ubc_offset >> ubc_winshift];
249 	KASSERT(umap->refcount != 0);
250 	slot_offset = ubc_offset & (ubc_winsize - 1);
251 
252 	/*
253 	 * some platforms cannot write to individual bytes atomically, so
254 	 * software has to do read/modify/write of larger quantities instead.
255 	 * this means that the access_type for "write" operations
256 	 * can be VM_PROT_READ, which confuses us mightily.
257 	 *
258 	 * deal with this by resetting access_type based on the info
259 	 * that ubc_alloc() stores for us.
260 	 */
261 
262 	access_type = umap->writelen ? VM_PROT_WRITE : VM_PROT_READ;
263 	UVMHIST_LOG(ubchist, "va 0x%lx ubc_offset 0x%lx access_type %d",
264 	    va, ubc_offset, access_type, 0);
265 
266 #ifdef DIAGNOSTIC
267 	if ((access_type & VM_PROT_WRITE) != 0) {
268 		if (slot_offset < trunc_page(umap->writeoff) ||
269 		    umap->writeoff + umap->writelen <= slot_offset) {
270 			panic("ubc_fault: out of range write");
271 		}
272 	}
273 #endif
274 
275 	/* no umap locking needed since we have a ref on the umap */
276 	uobj = umap->uobj;
277 
278 	if ((access_type & VM_PROT_WRITE) == 0) {
279 		npages = (ubc_winsize - slot_offset) >> PAGE_SHIFT;
280 	} else {
281 		npages = (round_page(umap->offset + umap->writeoff +
282 		    umap->writelen) - (umap->offset + slot_offset))
283 		    >> PAGE_SHIFT;
284 		flags |= PGO_PASTEOF;
285 	}
286 
287 again:
288 	memset(pgs, 0, sizeof (pgs));
289 	simple_lock(&uobj->vmobjlock);
290 
291 	UVMHIST_LOG(ubchist, "slot_offset 0x%x writeoff 0x%x writelen 0x%x ",
292 	    slot_offset, umap->writeoff, umap->writelen, 0);
293 	UVMHIST_LOG(ubchist, "getpages uobj %p offset 0x%x npages %d",
294 	    uobj, umap->offset + slot_offset, npages, 0);
295 
296 	error = (*uobj->pgops->pgo_get)(uobj, umap->offset + slot_offset, pgs,
297 	    &npages, 0, access_type, umap->advice, flags | PGO_NOBLOCKALLOC |
298 	    PGO_NOTIMESTAMP);
299 	UVMHIST_LOG(ubchist, "getpages error %d npages %d", error, npages, 0,
300 	    0);
301 
302 	if (error == EAGAIN) {
303 		tsleep(&lbolt, PVM, "ubc_fault", 0);
304 		goto again;
305 	}
306 	if (error) {
307 		return error;
308 	}
309 
310 	va = ufi->orig_rvaddr;
311 	eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT);
312 
313 	UVMHIST_LOG(ubchist, "va 0x%lx eva 0x%lx", va, eva, 0, 0);
314 	for (i = 0; va < eva; i++, va += PAGE_SIZE) {
315 		boolean_t rdonly;
316 		vm_prot_t mask;
317 
318 		/*
319 		 * for virtually-indexed, virtually-tagged caches we should
320 		 * avoid creating writable mappings when we don't absolutely
321 		 * need them, since the "compatible alias" trick doesn't work
322 		 * on such caches.  otherwise, we can always map the pages
323 		 * writable.
324 		 */
325 
326 #ifdef PMAP_CACHE_VIVT
327 		prot = VM_PROT_READ | access_type;
328 #else
329 		prot = VM_PROT_READ | VM_PROT_WRITE;
330 #endif
331 		UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i], 0, 0);
332 		pg = pgs[i];
333 
334 		if (pg == NULL || pg == PGO_DONTCARE) {
335 			continue;
336 		}
337 
338 		uobj = pg->uobject;
339 		simple_lock(&uobj->vmobjlock);
340 		if (pg->flags & PG_WANTED) {
341 			wakeup(pg);
342 		}
343 		KASSERT((pg->flags & PG_FAKE) == 0);
344 		if (pg->flags & PG_RELEASED) {
345 			uvm_lock_pageq();
346 			uvm_pagefree(pg);
347 			uvm_unlock_pageq();
348 			simple_unlock(&uobj->vmobjlock);
349 			continue;
350 		}
351 		if (pg->loan_count != 0) {
352 
353 			/*
354 			 * avoid unneeded loan break if possible.
355 			 */
356 
357 			if ((access_type & VM_PROT_WRITE) == 0)
358 				prot &= ~VM_PROT_WRITE;
359 
360 			if (prot & VM_PROT_WRITE) {
361 				struct vm_page *newpg;
362 
363 				newpg = uvm_loanbreak(pg);
364 				if (newpg == NULL) {
365 					uvm_page_unbusy(&pg, 1);
366 					simple_unlock(&uobj->vmobjlock);
367 					uvm_wait("ubc_loanbrk");
368 					continue; /* will re-fault */
369 				}
370 				pg = newpg;
371 			}
372 		}
373 
374 		/*
375 		 * note that a page whose backing store is partially allocated
376 		 * is marked as PG_RDONLY.
377 		 */
378 
379 		rdonly = ((access_type & VM_PROT_WRITE) == 0 &&
380 		    (pg->flags & PG_RDONLY) != 0) ||
381 		    UVM_OBJ_NEEDS_WRITEFAULT(uobj);
382 		KASSERT((pg->flags & PG_RDONLY) == 0 ||
383 		    (access_type & VM_PROT_WRITE) == 0 ||
384 		    pg->offset < umap->writeoff ||
385 		    pg->offset + PAGE_SIZE > umap->writeoff + umap->writelen);
386 		mask = rdonly ? ~VM_PROT_WRITE : VM_PROT_ALL;
387 		error = pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg),
388 		    prot & mask, PMAP_CANFAIL | (access_type & mask));
389 		uvm_lock_pageq();
390 		uvm_pageactivate(pg);
391 		uvm_unlock_pageq();
392 		pg->flags &= ~(PG_BUSY|PG_WANTED);
393 		UVM_PAGE_OWN(pg, NULL);
394 		simple_unlock(&uobj->vmobjlock);
395 		if (error) {
396 			UVMHIST_LOG(ubchist, "pmap_enter fail %d",
397 			    error, 0, 0, 0);
398 			uvm_wait("ubc_pmfail");
399 			/* will refault */
400 		}
401 	}
402 	pmap_update(ufi->orig_map->pmap);
403 	return 0;
404 }
405 
406 /*
407  * local functions
408  */
409 
410 static struct ubc_map *
411 ubc_find_mapping(struct uvm_object *uobj, voff_t offset)
412 {
413 	struct ubc_map *umap;
414 
415 	LIST_FOREACH(umap, &ubc_object.hash[UBC_HASH(uobj, offset)], hash) {
416 		if (umap->uobj == uobj && umap->offset == offset) {
417 			return umap;
418 		}
419 	}
420 	return NULL;
421 }
422 
423 
424 /*
425  * ubc interface functions
426  */
427 
428 /*
429  * ubc_alloc:  allocate a file mapping window
430  */
431 
432 void *
433 ubc_alloc(struct uvm_object *uobj, voff_t offset, vsize_t *lenp, int advice,
434     int flags)
435 {
436 	vaddr_t slot_offset, va;
437 	struct ubc_map *umap;
438 	voff_t umap_offset;
439 	int error;
440 	UVMHIST_FUNC("ubc_alloc"); UVMHIST_CALLED(ubchist);
441 
442 	UVMHIST_LOG(ubchist, "uobj %p offset 0x%lx len 0x%lx",
443 	    uobj, offset, *lenp, 0);
444 
445 	KASSERT(*lenp > 0);
446 	umap_offset = (offset & ~((voff_t)ubc_winsize - 1));
447 	slot_offset = (vaddr_t)(offset & ((voff_t)ubc_winsize - 1));
448 	*lenp = MIN(*lenp, ubc_winsize - slot_offset);
449 
450 	/*
451 	 * the object is always locked here, so we don't need to add a ref.
452 	 */
453 
454 again:
455 	simple_lock(&ubc_object.uobj.vmobjlock);
456 	umap = ubc_find_mapping(uobj, umap_offset);
457 	if (umap == NULL) {
458 		UBC_EVCNT_INCR(wincachemiss);
459 		umap = TAILQ_FIRST(UBC_QUEUE(offset));
460 		if (umap == NULL) {
461 			simple_unlock(&ubc_object.uobj.vmobjlock);
462 			tsleep(&lbolt, PVM, "ubc_alloc", 0);
463 			goto again;
464 		}
465 
466 		/*
467 		 * remove from old hash (if any), add to new hash.
468 		 */
469 
470 		if (umap->uobj != NULL) {
471 			LIST_REMOVE(umap, hash);
472 		}
473 		umap->uobj = uobj;
474 		umap->offset = umap_offset;
475 		LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)],
476 		    umap, hash);
477 		va = UBC_UMAP_ADDR(umap);
478 		if (umap->flags & UMAP_MAPPING_CACHED) {
479 			umap->flags &= ~UMAP_MAPPING_CACHED;
480 			pmap_remove(pmap_kernel(), va, va + ubc_winsize);
481 			pmap_update(pmap_kernel());
482 		}
483 	} else {
484 		UBC_EVCNT_INCR(wincachehit);
485 		va = UBC_UMAP_ADDR(umap);
486 	}
487 
488 	if (umap->refcount == 0) {
489 		TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive);
490 	}
491 
492 #ifdef DIAGNOSTIC
493 	if ((flags & UBC_WRITE) && (umap->writeoff || umap->writelen)) {
494 		panic("ubc_alloc: concurrent writes uobj %p", uobj);
495 	}
496 #endif
497 	if (flags & UBC_WRITE) {
498 		umap->writeoff = slot_offset;
499 		umap->writelen = *lenp;
500 	}
501 
502 	umap->refcount++;
503 	umap->advice = advice;
504 	simple_unlock(&ubc_object.uobj.vmobjlock);
505 	UVMHIST_LOG(ubchist, "umap %p refs %d va %p flags 0x%x",
506 	    umap, umap->refcount, va, flags);
507 
508 	if (flags & UBC_FAULTBUSY) {
509 		int npages = (*lenp + PAGE_SIZE - 1) >> PAGE_SHIFT;
510 		struct vm_page *pgs[npages];
511 		int gpflags =
512 		    PGO_SYNCIO|PGO_OVERWRITE|PGO_PASTEOF|PGO_NOBLOCKALLOC|
513 		    PGO_NOTIMESTAMP;
514 		int i;
515 		KDASSERT(flags & UBC_WRITE);
516 
517 		if (umap->flags & UMAP_MAPPING_CACHED) {
518 			umap->flags &= ~UMAP_MAPPING_CACHED;
519 			pmap_remove(pmap_kernel(), va, va + ubc_winsize);
520 		}
521 		memset(pgs, 0, sizeof(pgs));
522 		simple_lock(&uobj->vmobjlock);
523 		error = (*uobj->pgops->pgo_get)(uobj, trunc_page(offset), pgs,
524 		    &npages, 0, VM_PROT_READ | VM_PROT_WRITE, advice, gpflags);
525 		UVMHIST_LOG(ubchist, "faultbusy getpages %d", error, 0, 0, 0);
526 		if (error) {
527 			goto out;
528 		}
529 		for (i = 0; i < npages; i++) {
530 			pmap_kenter_pa(va + slot_offset + (i << PAGE_SHIFT),
531 			    VM_PAGE_TO_PHYS(pgs[i]),
532 			    VM_PROT_READ | VM_PROT_WRITE);
533 		}
534 		pmap_update(pmap_kernel());
535 		umap->flags |= UMAP_PAGES_LOCKED;
536 	}
537 
538 out:
539 	return (void *)(va + slot_offset);
540 }
541 
542 /*
543  * ubc_release:  free a file mapping window.
544  */
545 
546 void
547 ubc_release(void *va, int flags)
548 {
549 	struct ubc_map *umap;
550 	struct uvm_object *uobj;
551 	vaddr_t umapva;
552 	boolean_t unmapped;
553 	UVMHIST_FUNC("ubc_release"); UVMHIST_CALLED(ubchist);
554 
555 	UVMHIST_LOG(ubchist, "va %p", va, 0, 0, 0);
556 	umap = &ubc_object.umap[((char *)va - ubc_object.kva) >> ubc_winshift];
557 	umapva = UBC_UMAP_ADDR(umap);
558 	uobj = umap->uobj;
559 	KASSERT(uobj != NULL);
560 
561 	if (umap->flags & UMAP_PAGES_LOCKED) {
562 		int slot_offset = umap->writeoff;
563 		int endoff = umap->writeoff + umap->writelen;
564 		int zerolen = round_page(endoff) - endoff;
565 		int npages = (int)(round_page(umap->writeoff + umap->writelen)
566 				   - trunc_page(umap->writeoff)) >> PAGE_SHIFT;
567 		struct vm_page *pgs[npages];
568 		paddr_t pa;
569 		int i;
570 		boolean_t rv;
571 
572 		if (zerolen) {
573 			memset((char *)umapva + endoff, 0, zerolen);
574 		}
575 		umap->flags &= ~UMAP_PAGES_LOCKED;
576 		uvm_lock_pageq();
577 		for (i = 0; i < npages; i++) {
578 			rv = pmap_extract(pmap_kernel(),
579 			    umapva + slot_offset + (i << PAGE_SHIFT), &pa);
580 			KASSERT(rv);
581 			pgs[i] = PHYS_TO_VM_PAGE(pa);
582 			pgs[i]->flags &= ~(PG_FAKE|PG_CLEAN);
583 			KASSERT(pgs[i]->loan_count == 0);
584 			uvm_pageactivate(pgs[i]);
585 		}
586 		uvm_unlock_pageq();
587 		pmap_kremove(umapva, ubc_winsize);
588 		pmap_update(pmap_kernel());
589 		simple_lock(&uobj->vmobjlock);
590 		uvm_page_unbusy(pgs, npages);
591 		simple_unlock(&uobj->vmobjlock);
592 		unmapped = TRUE;
593 	} else {
594 		unmapped = FALSE;
595 	}
596 
597 	simple_lock(&ubc_object.uobj.vmobjlock);
598 	umap->writeoff = 0;
599 	umap->writelen = 0;
600 	umap->refcount--;
601 	if (umap->refcount == 0) {
602 		if (flags & UBC_UNMAP) {
603 
604 			/*
605 			 * Invalidate any cached mappings if requested.
606 			 * This is typically used to avoid leaving
607 			 * incompatible cache aliases around indefinitely.
608 			 */
609 
610 			pmap_remove(pmap_kernel(), umapva,
611 				    umapva + ubc_winsize);
612 			umap->flags &= ~UMAP_MAPPING_CACHED;
613 			pmap_update(pmap_kernel());
614 			LIST_REMOVE(umap, hash);
615 			umap->uobj = NULL;
616 			TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap,
617 			    inactive);
618 		} else {
619 			if (!unmapped) {
620 				umap->flags |= UMAP_MAPPING_CACHED;
621 			}
622 			TAILQ_INSERT_TAIL(UBC_QUEUE(umap->offset), umap,
623 			    inactive);
624 		}
625 	}
626 	UVMHIST_LOG(ubchist, "umap %p refs %d", umap, umap->refcount, 0, 0);
627 	simple_unlock(&ubc_object.uobj.vmobjlock);
628 }
629 
630 
631 #if 0 /* notused */
632 /*
633  * removing a range of mappings from the ubc mapping cache.
634  */
635 
636 void
637 ubc_flush(struct uvm_object *uobj, voff_t start, voff_t end)
638 {
639 	struct ubc_map *umap;
640 	vaddr_t va;
641 	UVMHIST_FUNC("ubc_flush");  UVMHIST_CALLED(ubchist);
642 
643 	UVMHIST_LOG(ubchist, "uobj %p start 0x%lx end 0x%lx",
644 		    uobj, start, end, 0);
645 
646 	simple_lock(&ubc_object.uobj.vmobjlock);
647 	for (umap = ubc_object.umap;
648 	     umap < &ubc_object.umap[ubc_nwins];
649 	     umap++) {
650 
651 		if (umap->uobj != uobj || umap->offset < start ||
652 		    (umap->offset >= end && end != 0) ||
653 		    umap->refcount > 0) {
654 			continue;
655 		}
656 
657 		/*
658 		 * remove from hash,
659 		 * move to head of inactive queue.
660 		 */
661 
662 		va = (vaddr_t)(ubc_object.kva +
663 		    ((umap - ubc_object.umap) << ubc_winshift));
664 		pmap_remove(pmap_kernel(), va, va + ubc_winsize);
665 
666 		LIST_REMOVE(umap, hash);
667 		umap->uobj = NULL;
668 		TAILQ_REMOVE(UBC_QUEUE(umap->offset), umap, inactive);
669 		TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap, inactive);
670 	}
671 	pmap_update(pmap_kernel());
672 	simple_unlock(&ubc_object.uobj.vmobjlock);
673 }
674 #endif /* notused */
675