xref: /netbsd-src/sys/uvm/uvm_bio.c (revision 1c9b56c830954ccf3b57004ac65562e3d6afacf6)
1 /*	$NetBSD: uvm_bio.c,v 1.36 2005/01/17 04:37:20 atatat Exp $	*/
2 
3 /*
4  * Copyright (c) 1998 Chuck Silvers.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The name of the author may not be used to endorse or promote products
16  *    derived from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  */
31 
32 /*
33  * uvm_bio.c: buffered i/o object mapping cache
34  */
35 
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.36 2005/01/17 04:37:20 atatat Exp $");
38 
39 #include "opt_uvmhist.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/malloc.h>
44 #include <sys/kernel.h>
45 
46 #include <uvm/uvm.h>
47 
48 /*
49  * global data structures
50  */
51 
52 /*
53  * local functions
54  */
55 
56 int	ubc_fault(struct uvm_faultinfo *, vaddr_t, struct vm_page **, int,
57     int, vm_fault_t, vm_prot_t, int);
58 struct ubc_map *ubc_find_mapping(struct uvm_object *, voff_t);
59 
60 /*
61  * local data structues
62  */
63 
64 #define UBC_HASH(uobj, offset) 						\
65 	(((((u_long)(uobj)) >> 8) + (((u_long)(offset)) >> PAGE_SHIFT)) & \
66 				ubc_object.hashmask)
67 
68 #define UBC_QUEUE(offset)						\
69 	(&ubc_object.inactive[(((u_long)(offset)) >> ubc_winshift) &	\
70 			     (UBC_NQUEUES - 1)])
71 
72 #define UBC_UMAP_ADDR(u)						\
73 	(vaddr_t)(ubc_object.kva + (((u) - ubc_object.umap) << ubc_winshift))
74 
75 
76 #define UMAP_PAGES_LOCKED	0x0001
77 #define UMAP_MAPPING_CACHED	0x0002
78 
79 struct ubc_map
80 {
81 	struct uvm_object *	uobj;		/* mapped object */
82 	voff_t			offset;		/* offset into uobj */
83 	voff_t			writeoff;	/* write offset */
84 	vsize_t			writelen;	/* write len */
85 	int			refcount;	/* refcount on mapping */
86 	int			flags;		/* extra state */
87 
88 	LIST_ENTRY(ubc_map)	hash;		/* hash table */
89 	TAILQ_ENTRY(ubc_map)	inactive;	/* inactive queue */
90 };
91 
92 static struct ubc_object
93 {
94 	struct uvm_object uobj;		/* glue for uvm_map() */
95 	char *kva;			/* where ubc_object is mapped */
96 	struct ubc_map *umap;		/* array of ubc_map's */
97 
98 	LIST_HEAD(, ubc_map) *hash;	/* hashtable for cached ubc_map's */
99 	u_long hashmask;		/* mask for hashtable */
100 
101 	TAILQ_HEAD(ubc_inactive_head, ubc_map) *inactive;
102 					/* inactive queues for ubc_map's */
103 
104 } ubc_object;
105 
106 struct uvm_pagerops ubc_pager =
107 {
108 	NULL,		/* init */
109 	NULL,		/* reference */
110 	NULL,		/* detach */
111 	ubc_fault,	/* fault */
112 	/* ... rest are NULL */
113 };
114 
115 int ubc_nwins = UBC_NWINS;
116 int ubc_winshift = UBC_WINSHIFT;
117 int ubc_winsize;
118 #if defined(PMAP_PREFER)
119 int ubc_nqueues;
120 #define UBC_NQUEUES ubc_nqueues
121 #else
122 #define UBC_NQUEUES 1
123 #endif
124 
125 /*
126  * ubc_init
127  *
128  * init pager private data structures.
129  */
130 
131 void
132 ubc_init(void)
133 {
134 	struct ubc_map *umap;
135 	vaddr_t va;
136 	int i;
137 
138 	/*
139 	 * Make sure ubc_winshift is sane.
140 	 */
141 	if (ubc_winshift < PAGE_SHIFT)
142 		ubc_winshift = PAGE_SHIFT;
143 
144 	/*
145 	 * init ubc_object.
146 	 * alloc and init ubc_map's.
147 	 * init inactive queues.
148 	 * alloc and init hashtable.
149 	 * map in ubc_object.
150 	 */
151 
152 	simple_lock_init(&ubc_object.uobj.vmobjlock);
153 	ubc_object.uobj.pgops = &ubc_pager;
154 	TAILQ_INIT(&ubc_object.uobj.memq);
155 	ubc_object.uobj.uo_npages = 0;
156 	ubc_object.uobj.uo_refs = UVM_OBJ_KERN;
157 
158 	ubc_object.umap = malloc(ubc_nwins * sizeof(struct ubc_map),
159 				 M_TEMP, M_NOWAIT);
160 	if (ubc_object.umap == NULL)
161 		panic("ubc_init: failed to allocate ubc_map");
162 	memset(ubc_object.umap, 0, ubc_nwins * sizeof(struct ubc_map));
163 
164 	if (ubc_winshift < PAGE_SHIFT) {
165 		ubc_winshift = PAGE_SHIFT;
166 	}
167 	va = (vaddr_t)1L;
168 #ifdef PMAP_PREFER
169 	PMAP_PREFER(0, &va, 0, 0);	/* kernel is never topdown */
170 	ubc_nqueues = va >> ubc_winshift;
171 	if (ubc_nqueues == 0) {
172 		ubc_nqueues = 1;
173 	}
174 #endif
175 	ubc_winsize = 1 << ubc_winshift;
176 	ubc_object.inactive = malloc(UBC_NQUEUES *
177 	    sizeof(struct ubc_inactive_head), M_TEMP, M_NOWAIT);
178 	if (ubc_object.inactive == NULL)
179 		panic("ubc_init: failed to allocate inactive queue heads");
180 	for (i = 0; i < UBC_NQUEUES; i++) {
181 		TAILQ_INIT(&ubc_object.inactive[i]);
182 	}
183 	for (i = 0; i < ubc_nwins; i++) {
184 		umap = &ubc_object.umap[i];
185 		TAILQ_INSERT_TAIL(&ubc_object.inactive[i & (UBC_NQUEUES - 1)],
186 				  umap, inactive);
187 	}
188 
189 	ubc_object.hash = hashinit(ubc_nwins, HASH_LIST, M_TEMP, M_NOWAIT,
190 				   &ubc_object.hashmask);
191 	for (i = 0; i <= ubc_object.hashmask; i++) {
192 		LIST_INIT(&ubc_object.hash[i]);
193 	}
194 
195 	if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva,
196 		    ubc_nwins << ubc_winshift, &ubc_object.uobj, 0, (vsize_t)va,
197 		    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
198 				UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) {
199 		panic("ubc_init: failed to map ubc_object");
200 	}
201 	UVMHIST_INIT(ubchist, 300);
202 }
203 
204 /*
205  * ubc_fault: fault routine for ubc mapping
206  */
207 
208 int
209 ubc_fault(ufi, ign1, ign2, ign3, ign4, fault_type, access_type, flags)
210 	struct uvm_faultinfo *ufi;
211 	vaddr_t ign1;
212 	struct vm_page **ign2;
213 	int ign3, ign4;
214 	vm_fault_t fault_type;
215 	vm_prot_t access_type;
216 	int flags;
217 {
218 	struct uvm_object *uobj;
219 	struct ubc_map *umap;
220 	vaddr_t va, eva, ubc_offset, slot_offset;
221 	int i, error, npages;
222 	struct vm_page *pgs[ubc_winsize >> PAGE_SHIFT], *pg;
223 	vm_prot_t prot;
224 	UVMHIST_FUNC("ubc_fault");  UVMHIST_CALLED(ubchist);
225 
226 	/*
227 	 * no need to try with PGO_LOCKED...
228 	 * we don't need to have the map locked since we know that
229 	 * no one will mess with it until our reference is released.
230 	 */
231 
232 	if (flags & PGO_LOCKED) {
233 		uvmfault_unlockall(ufi, NULL, &ubc_object.uobj, NULL);
234 		flags &= ~PGO_LOCKED;
235 	}
236 
237 	va = ufi->orig_rvaddr;
238 	ubc_offset = va - (vaddr_t)ubc_object.kva;
239 	umap = &ubc_object.umap[ubc_offset >> ubc_winshift];
240 	KASSERT(umap->refcount != 0);
241 	slot_offset = ubc_offset & (ubc_winsize - 1);
242 
243 	/*
244 	 * some platforms cannot write to individual bytes atomically, so
245 	 * software has to do read/modify/write of larger quantities instead.
246 	 * this means that the access_type for "write" operations
247 	 * can be VM_PROT_READ, which confuses us mightily.
248 	 *
249 	 * deal with this by resetting access_type based on the info
250 	 * that ubc_alloc() stores for us.
251 	 */
252 
253 	access_type = umap->writelen ? VM_PROT_WRITE : VM_PROT_READ;
254 	UVMHIST_LOG(ubchist, "va 0x%lx ubc_offset 0x%lx access_type %d",
255 	    va, ubc_offset, access_type, 0);
256 
257 #ifdef DIAGNOSTIC
258 	if ((access_type & VM_PROT_WRITE) != 0) {
259 		if (slot_offset < trunc_page(umap->writeoff) ||
260 		    umap->writeoff + umap->writelen <= slot_offset) {
261 			panic("ubc_fault: out of range write");
262 		}
263 	}
264 #endif
265 
266 	/* no umap locking needed since we have a ref on the umap */
267 	uobj = umap->uobj;
268 
269 	if ((access_type & VM_PROT_WRITE) == 0) {
270 		npages = (ubc_winsize - slot_offset) >> PAGE_SHIFT;
271 	} else {
272 		npages = (round_page(umap->offset + umap->writeoff +
273 		    umap->writelen) - (umap->offset + slot_offset))
274 		    >> PAGE_SHIFT;
275 		flags |= PGO_PASTEOF;
276 	}
277 
278 again:
279 	memset(pgs, 0, sizeof (pgs));
280 	simple_lock(&uobj->vmobjlock);
281 
282 	UVMHIST_LOG(ubchist, "slot_offset 0x%x writeoff 0x%x writelen 0x%x ",
283 	    slot_offset, umap->writeoff, umap->writelen, 0);
284 	UVMHIST_LOG(ubchist, "getpages uobj %p offset 0x%x npages %d",
285 	    uobj, umap->offset + slot_offset, npages, 0);
286 
287 	error = (*uobj->pgops->pgo_get)(uobj, umap->offset + slot_offset, pgs,
288 	    &npages, 0, access_type, 0, flags);
289 	UVMHIST_LOG(ubchist, "getpages error %d npages %d", error, npages, 0,
290 	    0);
291 
292 	if (error == EAGAIN) {
293 		tsleep(&lbolt, PVM, "ubc_fault", 0);
294 		goto again;
295 	}
296 	if (error) {
297 		return error;
298 	}
299 
300 	va = ufi->orig_rvaddr;
301 	eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT);
302 
303 	UVMHIST_LOG(ubchist, "va 0x%lx eva 0x%lx", va, eva, 0, 0);
304 	simple_lock(&uobj->vmobjlock);
305 	uvm_lock_pageq();
306 	for (i = 0; va < eva; i++, va += PAGE_SIZE) {
307 
308 		/*
309 		 * for virtually-indexed, virtually-tagged caches we should
310 		 * avoid creating writable mappings when we don't absolutely
311 		 * need them, since the "compatible alias" trick doesn't work
312 		 * on such caches.  otherwise, we can always map the pages
313 		 * writable.
314 		 */
315 
316 #ifdef PMAP_CACHE_VIVT
317 		prot = VM_PROT_READ | access_type;
318 #else
319 		prot = VM_PROT_READ | VM_PROT_WRITE;
320 #endif
321 		UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i], 0, 0);
322 		pg = pgs[i];
323 
324 		if (pg == NULL || pg == PGO_DONTCARE) {
325 			continue;
326 		}
327 		if (pg->flags & PG_WANTED) {
328 			wakeup(pg);
329 		}
330 		KASSERT((pg->flags & PG_FAKE) == 0);
331 		if (pg->flags & PG_RELEASED) {
332 			uvm_pagefree(pg);
333 			continue;
334 		}
335 		if (pg->loan_count != 0) {
336 
337 			/*
338 			 * avoid unneeded loan break if possible.
339 			 */
340 
341 			if ((access_type & VM_PROT_WRITE) == 0)
342 				prot &= ~VM_PROT_WRITE;
343 
344 			if (prot & VM_PROT_WRITE) {
345 				uvm_unlock_pageq();
346 				pg = uvm_loanbreak(pg);
347 				uvm_lock_pageq();
348 				if (pg == NULL)
349 					continue; /* will re-fault */
350 			}
351 		}
352 		KASSERT(access_type == VM_PROT_READ ||
353 		    (pg->flags & PG_RDONLY) == 0);
354 		pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg),
355 		    (pg->flags & PG_RDONLY) ? prot & ~VM_PROT_WRITE : prot,
356 		    access_type);
357 		uvm_pageactivate(pg);
358 		pg->flags &= ~(PG_BUSY);
359 		UVM_PAGE_OWN(pg, NULL);
360 	}
361 	uvm_unlock_pageq();
362 	simple_unlock(&uobj->vmobjlock);
363 	pmap_update(ufi->orig_map->pmap);
364 	return 0;
365 }
366 
367 /*
368  * local functions
369  */
370 
371 struct ubc_map *
372 ubc_find_mapping(uobj, offset)
373 	struct uvm_object *uobj;
374 	voff_t offset;
375 {
376 	struct ubc_map *umap;
377 
378 	LIST_FOREACH(umap, &ubc_object.hash[UBC_HASH(uobj, offset)], hash) {
379 		if (umap->uobj == uobj && umap->offset == offset) {
380 			return umap;
381 		}
382 	}
383 	return NULL;
384 }
385 
386 
387 /*
388  * ubc interface functions
389  */
390 
391 /*
392  * ubc_alloc:  allocate a file mapping window
393  */
394 
395 void *
396 ubc_alloc(uobj, offset, lenp, flags)
397 	struct uvm_object *uobj;
398 	voff_t offset;
399 	vsize_t *lenp;
400 	int flags;
401 {
402 	vaddr_t slot_offset, va;
403 	struct ubc_map *umap;
404 	voff_t umap_offset;
405 	int error;
406 	UVMHIST_FUNC("ubc_alloc"); UVMHIST_CALLED(ubchist);
407 
408 	UVMHIST_LOG(ubchist, "uobj %p offset 0x%lx len 0x%lx",
409 	    uobj, offset, *lenp, 0);
410 
411 	KASSERT(*lenp > 0);
412 	umap_offset = (offset & ~((voff_t)ubc_winsize - 1));
413 	slot_offset = (vaddr_t)(offset & ((voff_t)ubc_winsize - 1));
414 	*lenp = MIN(*lenp, ubc_winsize - slot_offset);
415 
416 	/*
417 	 * the object is always locked here, so we don't need to add a ref.
418 	 */
419 
420 again:
421 	simple_lock(&ubc_object.uobj.vmobjlock);
422 	umap = ubc_find_mapping(uobj, umap_offset);
423 	if (umap == NULL) {
424 		umap = TAILQ_FIRST(UBC_QUEUE(offset));
425 		if (umap == NULL) {
426 			simple_unlock(&ubc_object.uobj.vmobjlock);
427 			tsleep(&lbolt, PVM, "ubc_alloc", 0);
428 			goto again;
429 		}
430 
431 		/*
432 		 * remove from old hash (if any), add to new hash.
433 		 */
434 
435 		if (umap->uobj != NULL) {
436 			LIST_REMOVE(umap, hash);
437 		}
438 		umap->uobj = uobj;
439 		umap->offset = umap_offset;
440 		LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)],
441 		    umap, hash);
442 		va = UBC_UMAP_ADDR(umap);
443 		if (umap->flags & UMAP_MAPPING_CACHED) {
444 			umap->flags &= ~UMAP_MAPPING_CACHED;
445 			pmap_remove(pmap_kernel(), va, va + ubc_winsize);
446 			pmap_update(pmap_kernel());
447 		}
448 	} else {
449 		va = UBC_UMAP_ADDR(umap);
450 	}
451 
452 	if (umap->refcount == 0) {
453 		TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive);
454 	}
455 
456 #ifdef DIAGNOSTIC
457 	if ((flags & UBC_WRITE) && (umap->writeoff || umap->writelen)) {
458 		panic("ubc_alloc: concurrent writes uobj %p", uobj);
459 	}
460 #endif
461 	if (flags & UBC_WRITE) {
462 		umap->writeoff = slot_offset;
463 		umap->writelen = *lenp;
464 	}
465 
466 	umap->refcount++;
467 	simple_unlock(&ubc_object.uobj.vmobjlock);
468 	UVMHIST_LOG(ubchist, "umap %p refs %d va %p flags 0x%x",
469 	    umap, umap->refcount, va, flags);
470 
471 	if (flags & UBC_FAULTBUSY) {
472 		int npages = (*lenp + PAGE_SIZE - 1) >> PAGE_SHIFT;
473 		struct vm_page *pgs[npages];
474 		int gpflags = PGO_SYNCIO|PGO_OVERWRITE|PGO_PASTEOF;
475 		int i;
476 		KDASSERT(flags & UBC_WRITE);
477 
478 		if (umap->flags & UMAP_MAPPING_CACHED) {
479 			umap->flags &= ~UMAP_MAPPING_CACHED;
480 			pmap_remove(pmap_kernel(), va, va + ubc_winsize);
481 		}
482 		memset(pgs, 0, sizeof(pgs));
483 		simple_lock(&uobj->vmobjlock);
484 		error = (*uobj->pgops->pgo_get)(uobj, trunc_page(offset), pgs,
485 		    &npages, 0, VM_PROT_READ | VM_PROT_WRITE, 0, gpflags);
486 		UVMHIST_LOG(ubchist, "faultbusy getpages %d", error, 0, 0, 0);
487 		if (error) {
488 			goto out;
489 		}
490 		for (i = 0; i < npages; i++) {
491 			pmap_kenter_pa(va + slot_offset + (i << PAGE_SHIFT),
492 			    VM_PAGE_TO_PHYS(pgs[i]),
493 			    VM_PROT_READ | VM_PROT_WRITE);
494 		}
495 		pmap_update(pmap_kernel());
496 		umap->flags |= UMAP_PAGES_LOCKED;
497 	}
498 
499 out:
500 	return (void *)(va + slot_offset);
501 }
502 
503 /*
504  * ubc_release:  free a file mapping window.
505  */
506 
507 void
508 ubc_release(va, flags)
509 	void *va;
510 	int flags;
511 {
512 	struct ubc_map *umap;
513 	struct uvm_object *uobj;
514 	vaddr_t umapva;
515 	boolean_t unmapped;
516 	UVMHIST_FUNC("ubc_release"); UVMHIST_CALLED(ubchist);
517 
518 	UVMHIST_LOG(ubchist, "va %p", va, 0, 0, 0);
519 	umap = &ubc_object.umap[((char *)va - ubc_object.kva) >> ubc_winshift];
520 	umapva = UBC_UMAP_ADDR(umap);
521 	uobj = umap->uobj;
522 	KASSERT(uobj != NULL);
523 
524 	if (umap->flags & UMAP_PAGES_LOCKED) {
525 		int slot_offset = umap->writeoff;
526 		int endoff = umap->writeoff + umap->writelen;
527 		int zerolen = round_page(endoff) - endoff;
528 		int npages = (int)(round_page(umap->writeoff + umap->writelen)
529 				   - trunc_page(umap->writeoff)) >> PAGE_SHIFT;
530 		struct vm_page *pgs[npages];
531 		paddr_t pa;
532 		int i;
533 		boolean_t rv;
534 
535 		if (zerolen) {
536 			memset((char *)umapva + endoff, 0, zerolen);
537 		}
538 		umap->flags &= ~UMAP_PAGES_LOCKED;
539 		uvm_lock_pageq();
540 		for (i = 0; i < npages; i++) {
541 			rv = pmap_extract(pmap_kernel(),
542 			    umapva + slot_offset + (i << PAGE_SHIFT), &pa);
543 			KASSERT(rv);
544 			pgs[i] = PHYS_TO_VM_PAGE(pa);
545 			pgs[i]->flags &= ~(PG_FAKE|PG_CLEAN);
546 			KASSERT(pgs[i]->loan_count == 0);
547 			uvm_pageactivate(pgs[i]);
548 		}
549 		uvm_unlock_pageq();
550 		pmap_kremove(umapva, ubc_winsize);
551 		pmap_update(pmap_kernel());
552 		simple_lock(&uobj->vmobjlock);
553 		uvm_page_unbusy(pgs, npages);
554 		simple_unlock(&uobj->vmobjlock);
555 		unmapped = TRUE;
556 	} else {
557 		unmapped = FALSE;
558 	}
559 
560 	simple_lock(&ubc_object.uobj.vmobjlock);
561 	umap->writeoff = 0;
562 	umap->writelen = 0;
563 	umap->refcount--;
564 	if (umap->refcount == 0) {
565 		if (flags & UBC_UNMAP) {
566 
567 			/*
568 			 * Invalidate any cached mappings if requested.
569 			 * This is typically used to avoid leaving
570 			 * incompatible cache aliases around indefinitely.
571 			 */
572 
573 			pmap_remove(pmap_kernel(), umapva,
574 				    umapva + ubc_winsize);
575 			umap->flags &= ~UMAP_MAPPING_CACHED;
576 			pmap_update(pmap_kernel());
577 			LIST_REMOVE(umap, hash);
578 			umap->uobj = NULL;
579 			TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap,
580 			    inactive);
581 		} else {
582 			if (!unmapped) {
583 				umap->flags |= UMAP_MAPPING_CACHED;
584 			}
585 			TAILQ_INSERT_TAIL(UBC_QUEUE(umap->offset), umap,
586 			    inactive);
587 		}
588 	}
589 	UVMHIST_LOG(ubchist, "umap %p refs %d", umap, umap->refcount, 0, 0);
590 	simple_unlock(&ubc_object.uobj.vmobjlock);
591 }
592 
593 
594 #if 0 /* notused */
595 /*
596  * removing a range of mappings from the ubc mapping cache.
597  */
598 
599 void
600 ubc_flush(uobj, start, end)
601 	struct uvm_object *uobj;
602 	voff_t start, end;
603 {
604 	struct ubc_map *umap;
605 	vaddr_t va;
606 	UVMHIST_FUNC("ubc_flush");  UVMHIST_CALLED(ubchist);
607 
608 	UVMHIST_LOG(ubchist, "uobj %p start 0x%lx end 0x%lx",
609 		    uobj, start, end, 0);
610 
611 	simple_lock(&ubc_object.uobj.vmobjlock);
612 	for (umap = ubc_object.umap;
613 	     umap < &ubc_object.umap[ubc_nwins];
614 	     umap++) {
615 
616 		if (umap->uobj != uobj || umap->offset < start ||
617 		    (umap->offset >= end && end != 0) ||
618 		    umap->refcount > 0) {
619 			continue;
620 		}
621 
622 		/*
623 		 * remove from hash,
624 		 * move to head of inactive queue.
625 		 */
626 
627 		va = (vaddr_t)(ubc_object.kva +
628 		    ((umap - ubc_object.umap) << ubc_winshift));
629 		pmap_remove(pmap_kernel(), va, va + ubc_winsize);
630 
631 		LIST_REMOVE(umap, hash);
632 		umap->uobj = NULL;
633 		TAILQ_REMOVE(UBC_QUEUE(umap->offset), umap, inactive);
634 		TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap, inactive);
635 	}
636 	pmap_update(pmap_kernel());
637 	simple_unlock(&ubc_object.uobj.vmobjlock);
638 }
639 #endif /* notused */
640