xref: /netbsd-src/sys/uvm/uvm_bio.c (revision 23c8222edbfb0f0932d88a8351d3a0cf817dfb9e)
1 /*	$NetBSD: uvm_bio.c,v 1.32 2004/05/05 11:35:40 yamt Exp $	*/
2 
3 /*
4  * Copyright (c) 1998 Chuck Silvers.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The name of the author may not be used to endorse or promote products
16  *    derived from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  */
31 
32 /*
33  * uvm_bio.c: buffered i/o vnode mapping cache
34  */
35 
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.32 2004/05/05 11:35:40 yamt Exp $");
38 
39 #include "opt_uvmhist.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/malloc.h>
44 #include <sys/kernel.h>
45 #include <sys/vnode.h>
46 #include <sys/proc.h>
47 
48 #include <uvm/uvm.h>
49 
50 /*
51  * global data structures
52  */
53 
54 /*
55  * local functions
56  */
57 
58 int	ubc_fault(struct uvm_faultinfo *, vaddr_t, struct vm_page **, int,
59     int, vm_fault_t, vm_prot_t, int);
60 struct ubc_map *ubc_find_mapping(struct uvm_object *, voff_t);
61 
62 /*
63  * local data structues
64  */
65 
66 #define UBC_HASH(uobj, offset) 						\
67 	(((((u_long)(uobj)) >> 8) + (((u_long)(offset)) >> PAGE_SHIFT)) & \
68 				ubc_object.hashmask)
69 
70 #define UBC_QUEUE(offset)						\
71 	(&ubc_object.inactive[(((u_long)(offset)) >> ubc_winshift) &	\
72 			     (UBC_NQUEUES - 1)])
73 
74 #define UBC_UMAP_ADDR(u)						\
75 	(vaddr_t)(ubc_object.kva + (((u) - ubc_object.umap) << ubc_winshift))
76 
77 
78 #define UMAP_PAGES_LOCKED	0x0001
79 #define UMAP_MAPPING_CACHED	0x0002
80 
81 struct ubc_map
82 {
83 	struct uvm_object *	uobj;		/* mapped object */
84 	voff_t			offset;		/* offset into uobj */
85 	voff_t			writeoff;	/* overwrite offset */
86 	vsize_t			writelen;	/* overwrite len */
87 	int			refcount;	/* refcount on mapping */
88 	int			flags;		/* extra state */
89 
90 	LIST_ENTRY(ubc_map)	hash;		/* hash table */
91 	TAILQ_ENTRY(ubc_map)	inactive;	/* inactive queue */
92 };
93 
94 static struct ubc_object
95 {
96 	struct uvm_object uobj;		/* glue for uvm_map() */
97 	char *kva;			/* where ubc_object is mapped */
98 	struct ubc_map *umap;		/* array of ubc_map's */
99 
100 	LIST_HEAD(, ubc_map) *hash;	/* hashtable for cached ubc_map's */
101 	u_long hashmask;		/* mask for hashtable */
102 
103 	TAILQ_HEAD(ubc_inactive_head, ubc_map) *inactive;
104 					/* inactive queues for ubc_map's */
105 
106 } ubc_object;
107 
108 struct uvm_pagerops ubc_pager =
109 {
110 	NULL,		/* init */
111 	NULL,		/* reference */
112 	NULL,		/* detach */
113 	ubc_fault,	/* fault */
114 	/* ... rest are NULL */
115 };
116 
117 int ubc_nwins = UBC_NWINS;
118 int ubc_winshift = UBC_WINSHIFT;
119 int ubc_winsize;
120 #if defined(PMAP_PREFER)
121 int ubc_nqueues;
122 boolean_t ubc_release_unmap = FALSE;
123 #define UBC_NQUEUES ubc_nqueues
124 #define UBC_RELEASE_UNMAP(uobj) \
125 	(ubc_release_unmap && (((struct vnode *)uobj)->v_flag & VTEXT))
126 #elif defined(PMAP_CACHE_VIVT)
127 #define UBC_NQUEUES 1
128 #define UBC_RELEASE_UNMAP(uobj) TRUE
129 #else
130 #define UBC_NQUEUES 1
131 #define UBC_RELEASE_UNMAP(uobj) FALSE
132 #endif
133 
134 /*
135  * ubc_init
136  *
137  * init pager private data structures.
138  */
139 
140 void
141 ubc_init(void)
142 {
143 	struct ubc_map *umap;
144 	vaddr_t va;
145 	int i;
146 
147 	/*
148 	 * Make sure ubc_winshift is sane.
149 	 */
150 	if (ubc_winshift < PAGE_SHIFT)
151 		ubc_winshift = PAGE_SHIFT;
152 
153 	/*
154 	 * init ubc_object.
155 	 * alloc and init ubc_map's.
156 	 * init inactive queues.
157 	 * alloc and init hashtable.
158 	 * map in ubc_object.
159 	 */
160 
161 	simple_lock_init(&ubc_object.uobj.vmobjlock);
162 	ubc_object.uobj.pgops = &ubc_pager;
163 	TAILQ_INIT(&ubc_object.uobj.memq);
164 	ubc_object.uobj.uo_npages = 0;
165 	ubc_object.uobj.uo_refs = UVM_OBJ_KERN;
166 
167 	ubc_object.umap = malloc(ubc_nwins * sizeof(struct ubc_map),
168 				 M_TEMP, M_NOWAIT);
169 	if (ubc_object.umap == NULL)
170 		panic("ubc_init: failed to allocate ubc_map");
171 	memset(ubc_object.umap, 0, ubc_nwins * sizeof(struct ubc_map));
172 
173 	if (ubc_winshift < PAGE_SHIFT) {
174 		ubc_winshift = PAGE_SHIFT;
175 	}
176 	va = (vaddr_t)1L;
177 #ifdef PMAP_PREFER
178 	PMAP_PREFER(0, &va);
179 	ubc_nqueues = va >> ubc_winshift;
180 	if (ubc_nqueues == 0) {
181 		ubc_nqueues = 1;
182 	}
183 	if (ubc_nqueues != 1) {
184 		ubc_release_unmap = TRUE;
185 	}
186 #endif
187 	ubc_winsize = 1 << ubc_winshift;
188 	ubc_object.inactive = malloc(UBC_NQUEUES *
189 	    sizeof(struct ubc_inactive_head), M_TEMP, M_NOWAIT);
190 	if (ubc_object.inactive == NULL)
191 		panic("ubc_init: failed to allocate inactive queue heads");
192 	for (i = 0; i < UBC_NQUEUES; i++) {
193 		TAILQ_INIT(&ubc_object.inactive[i]);
194 	}
195 	for (i = 0; i < ubc_nwins; i++) {
196 		umap = &ubc_object.umap[i];
197 		TAILQ_INSERT_TAIL(&ubc_object.inactive[i & (UBC_NQUEUES - 1)],
198 				  umap, inactive);
199 	}
200 
201 	ubc_object.hash = hashinit(ubc_nwins, HASH_LIST, M_TEMP, M_NOWAIT,
202 				   &ubc_object.hashmask);
203 	for (i = 0; i <= ubc_object.hashmask; i++) {
204 		LIST_INIT(&ubc_object.hash[i]);
205 	}
206 
207 	if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva,
208 		    ubc_nwins << ubc_winshift, &ubc_object.uobj, 0, (vsize_t)va,
209 		    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
210 				UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) {
211 		panic("ubc_init: failed to map ubc_object");
212 	}
213 	UVMHIST_INIT(ubchist, 300);
214 }
215 
216 /*
217  * ubc_fault: fault routine for ubc mapping
218  */
219 
220 int
221 ubc_fault(ufi, ign1, ign2, ign3, ign4, fault_type, access_type, flags)
222 	struct uvm_faultinfo *ufi;
223 	vaddr_t ign1;
224 	struct vm_page **ign2;
225 	int ign3, ign4;
226 	vm_fault_t fault_type;
227 	vm_prot_t access_type;
228 	int flags;
229 {
230 	struct uvm_object *uobj;
231 	struct vnode *vp;
232 	struct ubc_map *umap;
233 	vaddr_t va, eva, ubc_offset, slot_offset;
234 	int i, error, npages;
235 	struct vm_page *pgs[ubc_winsize >> PAGE_SHIFT], *pg;
236 	vm_prot_t prot;
237 	UVMHIST_FUNC("ubc_fault");  UVMHIST_CALLED(ubchist);
238 
239 	/*
240 	 * no need to try with PGO_LOCKED...
241 	 * we don't need to have the map locked since we know that
242 	 * no one will mess with it until our reference is released.
243 	 */
244 
245 	if (flags & PGO_LOCKED) {
246 		uvmfault_unlockall(ufi, NULL, &ubc_object.uobj, NULL);
247 		flags &= ~PGO_LOCKED;
248 	}
249 
250 	va = ufi->orig_rvaddr;
251 	ubc_offset = va - (vaddr_t)ubc_object.kva;
252 
253 	UVMHIST_LOG(ubchist, "va 0x%lx ubc_offset 0x%lx at %d",
254 	    va, ubc_offset, access_type, 0);
255 
256 	umap = &ubc_object.umap[ubc_offset >> ubc_winshift];
257 	KASSERT(umap->refcount != 0);
258 	slot_offset = ubc_offset & (ubc_winsize - 1);
259 
260 	/* no umap locking needed since we have a ref on the umap */
261 	uobj = umap->uobj;
262 	vp = (struct vnode *)uobj;
263 	KASSERT(vp != NULL);
264 
265 	npages = MIN(ubc_winsize - slot_offset,
266 		     (round_page(MAX(vp->v_size, umap->offset +
267 				     umap->writeoff + umap->writelen)) -
268 		      umap->offset)) >> PAGE_SHIFT;
269 
270 again:
271 	memset(pgs, 0, sizeof (pgs));
272 	simple_lock(&uobj->vmobjlock);
273 
274 	UVMHIST_LOG(ubchist, "slot_offset 0x%x writeoff 0x%x writelen 0x%x "
275 	    "v_size 0x%x", slot_offset, umap->writeoff, umap->writelen,
276 	    vp->v_size);
277 	UVMHIST_LOG(ubchist, "getpages vp %p offset 0x%x npages %d",
278 	    uobj, umap->offset + slot_offset, npages, 0);
279 
280 	flags |= PGO_PASTEOF;
281 	error = VOP_GETPAGES(vp, umap->offset + slot_offset, pgs, &npages, 0,
282 	    access_type, 0, flags);
283 	UVMHIST_LOG(ubchist, "getpages error %d npages %d", error, npages, 0,
284 	    0);
285 
286 	if (error == EAGAIN) {
287 		tsleep(&lbolt, PVM, "ubc_fault", 0);
288 		goto again;
289 	}
290 	if (error) {
291 		return error;
292 	}
293 
294 	va = ufi->orig_rvaddr;
295 	eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT);
296 
297 	UVMHIST_LOG(ubchist, "va 0x%lx eva 0x%lx", va, eva, 0, 0);
298 	simple_lock(&uobj->vmobjlock);
299 	uvm_lock_pageq();
300 	for (i = 0; va < eva; i++, va += PAGE_SIZE) {
301 		/*
302 		 * for virtually-indexed, virtually-tagged caches we should
303 		 * avoid creating writable mappings when we don't absolutely
304 		 * need them, since the "compatible alias" trick doesn't work
305 		 * on such caches.  otherwise, we can always map the pages
306 		 * writable.
307 		 */
308 
309 #ifdef PMAP_CACHE_VIVT
310 		prot = VM_PROT_READ | access_type;
311 #else
312 		prot = VM_PROT_READ | VM_PROT_WRITE;
313 #endif
314 		UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i], 0, 0);
315 		pg = pgs[i];
316 
317 		if (pg == NULL || pg == PGO_DONTCARE) {
318 			continue;
319 		}
320 		if (pg->flags & PG_WANTED) {
321 			wakeup(pg);
322 		}
323 		KASSERT((pg->flags & PG_FAKE) == 0);
324 		if (pg->flags & PG_RELEASED) {
325 			uvm_pagefree(pg);
326 			continue;
327 		}
328 		if (pg->loan_count != 0) {
329 			/*
330 			 * avoid unneeded loan break if possible.
331 			 */
332 			if ((access_type & VM_PROT_WRITE) == 0)
333 				prot &= ~VM_PROT_WRITE;
334 
335 			if (prot & VM_PROT_WRITE) {
336 				uvm_unlock_pageq();
337 				pg = uvm_loanbreak(pg);
338 				uvm_lock_pageq();
339 				if (pg == NULL)
340 					continue; /* will re-fault */
341 			}
342 		}
343 		KASSERT(access_type == VM_PROT_READ ||
344 		    (pg->flags & PG_RDONLY) == 0);
345 		pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg),
346 		    (pg->flags & PG_RDONLY) ? prot & ~VM_PROT_WRITE : prot,
347 		    access_type);
348 		uvm_pageactivate(pg);
349 		pg->flags &= ~(PG_BUSY);
350 		UVM_PAGE_OWN(pg, NULL);
351 	}
352 	uvm_unlock_pageq();
353 	simple_unlock(&uobj->vmobjlock);
354 	pmap_update(ufi->orig_map->pmap);
355 	return 0;
356 }
357 
358 /*
359  * local functions
360  */
361 
362 struct ubc_map *
363 ubc_find_mapping(uobj, offset)
364 	struct uvm_object *uobj;
365 	voff_t offset;
366 {
367 	struct ubc_map *umap;
368 
369 	LIST_FOREACH(umap, &ubc_object.hash[UBC_HASH(uobj, offset)], hash) {
370 		if (umap->uobj == uobj && umap->offset == offset) {
371 			return umap;
372 		}
373 	}
374 	return NULL;
375 }
376 
377 
378 /*
379  * ubc interface functions
380  */
381 
382 /*
383  * ubc_alloc:  allocate a file mapping window
384  */
385 
386 void *
387 ubc_alloc(uobj, offset, lenp, flags)
388 	struct uvm_object *uobj;
389 	voff_t offset;
390 	vsize_t *lenp;
391 	int flags;
392 {
393 	struct vnode *vp = (struct vnode *)uobj;
394 	vaddr_t slot_offset, va;
395 	struct ubc_map *umap;
396 	voff_t umap_offset;
397 	int error;
398 	UVMHIST_FUNC("ubc_alloc"); UVMHIST_CALLED(ubchist);
399 
400 	UVMHIST_LOG(ubchist, "uobj %p offset 0x%lx len 0x%lx filesize 0x%x",
401 	    uobj, offset, *lenp, vp->v_size);
402 
403 	umap_offset = (offset & ~((voff_t)ubc_winsize - 1));
404 	slot_offset = (vaddr_t)(offset & ((voff_t)ubc_winsize - 1));
405 	*lenp = MIN(*lenp, ubc_winsize - slot_offset);
406 
407 	/*
408 	 * the vnode is always locked here, so we don't need to add a ref.
409 	 */
410 
411 again:
412 	simple_lock(&ubc_object.uobj.vmobjlock);
413 	umap = ubc_find_mapping(uobj, umap_offset);
414 	if (umap == NULL) {
415 		umap = TAILQ_FIRST(UBC_QUEUE(offset));
416 		if (umap == NULL) {
417 			simple_unlock(&ubc_object.uobj.vmobjlock);
418 			tsleep(&lbolt, PVM, "ubc_alloc", 0);
419 			goto again;
420 		}
421 
422 		/*
423 		 * remove from old hash (if any), add to new hash.
424 		 */
425 
426 		if (umap->uobj != NULL) {
427 			LIST_REMOVE(umap, hash);
428 		}
429 		umap->uobj = uobj;
430 		umap->offset = umap_offset;
431 		LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)],
432 		    umap, hash);
433 		va = UBC_UMAP_ADDR(umap);
434 		if (umap->flags & UMAP_MAPPING_CACHED) {
435 			umap->flags &= ~UMAP_MAPPING_CACHED;
436 			pmap_remove(pmap_kernel(), va, va + ubc_winsize);
437 			pmap_update(pmap_kernel());
438 		}
439 	} else {
440 		va = UBC_UMAP_ADDR(umap);
441 	}
442 
443 	if (umap->refcount == 0) {
444 		TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive);
445 	}
446 
447 #ifdef DIAGNOSTIC
448 	if ((flags & UBC_WRITE) && (umap->writeoff || umap->writelen)) {
449 		panic("ubc_fault: concurrent writes vp %p", uobj);
450 	}
451 #endif
452 	if (flags & UBC_WRITE) {
453 		umap->writeoff = slot_offset;
454 		umap->writelen = *lenp;
455 	}
456 
457 	umap->refcount++;
458 	simple_unlock(&ubc_object.uobj.vmobjlock);
459 	UVMHIST_LOG(ubchist, "umap %p refs %d va %p flags 0x%x",
460 	    umap, umap->refcount, va, flags);
461 
462 	if (flags & UBC_FAULTBUSY) {
463 		int npages = (*lenp + PAGE_SIZE - 1) >> PAGE_SHIFT;
464 		struct vm_page *pgs[npages];
465 		int gpflags = PGO_SYNCIO|PGO_OVERWRITE|PGO_PASTEOF;
466 		int i;
467 		KDASSERT(flags & UBC_WRITE);
468 
469 		if (umap->flags & UMAP_MAPPING_CACHED) {
470 			umap->flags &= ~UMAP_MAPPING_CACHED;
471 			pmap_remove(pmap_kernel(), va, va + ubc_winsize);
472 		}
473 		memset(pgs, 0, sizeof(pgs));
474 		simple_lock(&uobj->vmobjlock);
475 		error = VOP_GETPAGES(vp, trunc_page(offset), pgs, &npages, 0,
476 		    VM_PROT_READ|VM_PROT_WRITE, 0, gpflags);
477 		UVMHIST_LOG(ubchist, "faultbusy getpages %d", error, 0, 0, 0);
478 		if (error) {
479 			goto out;
480 		}
481 		for (i = 0; i < npages; i++) {
482 			pmap_kenter_pa(va + slot_offset + (i << PAGE_SHIFT),
483 			    VM_PAGE_TO_PHYS(pgs[i]),
484 			    VM_PROT_READ | VM_PROT_WRITE);
485 		}
486 		pmap_update(pmap_kernel());
487 		umap->flags |= UMAP_PAGES_LOCKED;
488 	}
489 
490 out:
491 	return (void *)(va + slot_offset);
492 }
493 
494 /*
495  * ubc_release:  free a file mapping window.
496  */
497 
498 void
499 ubc_release(va, flags)
500 	void *va;
501 	int flags;
502 {
503 	struct ubc_map *umap;
504 	struct uvm_object *uobj;
505 	vaddr_t umapva;
506 	boolean_t unmapped;
507 	UVMHIST_FUNC("ubc_release"); UVMHIST_CALLED(ubchist);
508 
509 	UVMHIST_LOG(ubchist, "va %p", va, 0, 0, 0);
510 	umap = &ubc_object.umap[((char *)va - ubc_object.kva) >> ubc_winshift];
511 	umapva = UBC_UMAP_ADDR(umap);
512 	uobj = umap->uobj;
513 	KASSERT(uobj != NULL);
514 
515 	if (umap->flags & UMAP_PAGES_LOCKED) {
516 		int slot_offset = umap->writeoff;
517 		int endoff = umap->writeoff + umap->writelen;
518 		int zerolen = round_page(endoff) - endoff;
519 		int npages = (int)(round_page(umap->writeoff + umap->writelen)
520 				   - trunc_page(umap->writeoff)) >> PAGE_SHIFT;
521 		struct vm_page *pgs[npages];
522 		paddr_t pa;
523 		int i;
524 		boolean_t rv;
525 
526 		if (zerolen) {
527 			memset((char *)umapva + endoff, 0, zerolen);
528 		}
529 		umap->flags &= ~UMAP_PAGES_LOCKED;
530 		uvm_lock_pageq();
531 		for (i = 0; i < npages; i++) {
532 			rv = pmap_extract(pmap_kernel(),
533 			    umapva + slot_offset + (i << PAGE_SHIFT), &pa);
534 			KASSERT(rv);
535 			pgs[i] = PHYS_TO_VM_PAGE(pa);
536 			pgs[i]->flags &= ~(PG_FAKE|PG_CLEAN);
537 			KASSERT(pgs[i]->loan_count == 0);
538 			uvm_pageactivate(pgs[i]);
539 		}
540 		uvm_unlock_pageq();
541 		pmap_kremove(umapva, ubc_winsize);
542 		pmap_update(pmap_kernel());
543 		simple_lock(&uobj->vmobjlock);
544 		uvm_page_unbusy(pgs, npages);
545 		simple_unlock(&uobj->vmobjlock);
546 		unmapped = TRUE;
547 	} else {
548 		unmapped = FALSE;
549 	}
550 
551 	simple_lock(&ubc_object.uobj.vmobjlock);
552 	umap->writeoff = 0;
553 	umap->writelen = 0;
554 	umap->refcount--;
555 	if (umap->refcount == 0) {
556 		if (UBC_RELEASE_UNMAP(uobj)) {
557 
558 			/*
559 			 * if the cache is virtually indexed and virtually
560 			 * tagged, we cannot create a compatible cache alias.
561 			 *
562 			 * if this file is the executable image of
563 			 * some process, that process will likely have
564 			 * the file mapped at an alignment other than
565 			 * what PMAP_PREFER() would like.  we'd like
566 			 * to have process text be able to use the
567 			 * cache even if someone is also reading the
568 			 * file.
569 			 *
570 			 * so invalidate mappings of such files as soon as
571 			 * possible.
572 			 */
573 
574 			pmap_remove(pmap_kernel(), umapva,
575 				    umapva + ubc_winsize);
576 			umap->flags &= ~UMAP_MAPPING_CACHED;
577 			pmap_update(pmap_kernel());
578 			LIST_REMOVE(umap, hash);
579 			umap->uobj = NULL;
580 			TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap,
581 			    inactive);
582 		} else {
583 			if (!unmapped) {
584 				umap->flags |= UMAP_MAPPING_CACHED;
585 			}
586 			TAILQ_INSERT_TAIL(UBC_QUEUE(umap->offset), umap,
587 			    inactive);
588 		}
589 	}
590 	UVMHIST_LOG(ubchist, "umap %p refs %d", umap, umap->refcount, 0, 0);
591 	simple_unlock(&ubc_object.uobj.vmobjlock);
592 }
593 
594 
595 #if 0 /* notused */
596 /*
597  * removing a range of mappings from the ubc mapping cache.
598  */
599 
600 void
601 ubc_flush(uobj, start, end)
602 	struct uvm_object *uobj;
603 	voff_t start, end;
604 {
605 	struct ubc_map *umap;
606 	vaddr_t va;
607 	UVMHIST_FUNC("ubc_flush");  UVMHIST_CALLED(ubchist);
608 
609 	UVMHIST_LOG(ubchist, "uobj %p start 0x%lx end 0x%lx",
610 		    uobj, start, end, 0);
611 
612 	simple_lock(&ubc_object.uobj.vmobjlock);
613 	for (umap = ubc_object.umap;
614 	     umap < &ubc_object.umap[ubc_nwins];
615 	     umap++) {
616 
617 		if (umap->uobj != uobj || umap->offset < start ||
618 		    (umap->offset >= end && end != 0) ||
619 		    umap->refcount > 0) {
620 			continue;
621 		}
622 
623 		/*
624 		 * remove from hash,
625 		 * move to head of inactive queue.
626 		 */
627 
628 		va = (vaddr_t)(ubc_object.kva +
629 		    ((umap - ubc_object.umap) << ubc_winshift));
630 		pmap_remove(pmap_kernel(), va, va + ubc_winsize);
631 
632 		LIST_REMOVE(umap, hash);
633 		umap->uobj = NULL;
634 		TAILQ_REMOVE(UBC_QUEUE(umap->offset), umap, inactive);
635 		TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap, inactive);
636 	}
637 	pmap_update(pmap_kernel());
638 	simple_unlock(&ubc_object.uobj.vmobjlock);
639 }
640 #endif /* notused */
641