1 /* $NetBSD: uvm_bio.c,v 1.46 2006/05/03 15:57:35 yamt Exp $ */ 2 3 /* 4 * Copyright (c) 1998 Chuck Silvers. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The name of the author may not be used to endorse or promote products 16 * derived from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 */ 31 32 /* 33 * uvm_bio.c: buffered i/o object mapping cache 34 */ 35 36 #include <sys/cdefs.h> 37 __KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.46 2006/05/03 15:57:35 yamt Exp $"); 38 39 #include "opt_uvmhist.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/malloc.h> 44 #include <sys/kernel.h> 45 46 #include <uvm/uvm.h> 47 48 /* 49 * global data structures 50 */ 51 52 /* 53 * local functions 54 */ 55 56 static int ubc_fault(struct uvm_faultinfo *, vaddr_t, struct vm_page **, 57 int, int, vm_prot_t, int); 58 static struct ubc_map *ubc_find_mapping(struct uvm_object *, voff_t); 59 60 /* 61 * local data structues 62 */ 63 64 #define UBC_HASH(uobj, offset) \ 65 (((((u_long)(uobj)) >> 8) + (((u_long)(offset)) >> PAGE_SHIFT)) & \ 66 ubc_object.hashmask) 67 68 #define UBC_QUEUE(offset) \ 69 (&ubc_object.inactive[(((u_long)(offset)) >> ubc_winshift) & \ 70 (UBC_NQUEUES - 1)]) 71 72 #define UBC_UMAP_ADDR(u) \ 73 (vaddr_t)(ubc_object.kva + (((u) - ubc_object.umap) << ubc_winshift)) 74 75 76 #define UMAP_PAGES_LOCKED 0x0001 77 #define UMAP_MAPPING_CACHED 0x0002 78 79 struct ubc_map 80 { 81 struct uvm_object * uobj; /* mapped object */ 82 voff_t offset; /* offset into uobj */ 83 voff_t writeoff; /* write offset */ 84 vsize_t writelen; /* write len */ 85 int refcount; /* refcount on mapping */ 86 int flags; /* extra state */ 87 int advice; 88 89 LIST_ENTRY(ubc_map) hash; /* hash table */ 90 TAILQ_ENTRY(ubc_map) inactive; /* inactive queue */ 91 }; 92 93 static struct ubc_object 94 { 95 struct uvm_object uobj; /* glue for uvm_map() */ 96 char *kva; /* where ubc_object is mapped */ 97 struct ubc_map *umap; /* array of ubc_map's */ 98 99 LIST_HEAD(, ubc_map) *hash; /* hashtable for cached ubc_map's */ 100 u_long hashmask; /* mask for hashtable */ 101 102 TAILQ_HEAD(ubc_inactive_head, ubc_map) *inactive; 103 /* inactive queues for ubc_map's */ 104 105 } ubc_object; 106 107 struct uvm_pagerops ubc_pager = 108 { 109 NULL, /* init */ 110 NULL, /* reference */ 111 NULL, /* detach */ 112 ubc_fault, /* fault */ 113 /* ... rest are NULL */ 114 }; 115 116 int ubc_nwins = UBC_NWINS; 117 int ubc_winshift = UBC_WINSHIFT; 118 int ubc_winsize; 119 #if defined(PMAP_PREFER) 120 int ubc_nqueues; 121 #define UBC_NQUEUES ubc_nqueues 122 #else 123 #define UBC_NQUEUES 1 124 #endif 125 126 /* 127 * ubc_init 128 * 129 * init pager private data structures. 130 */ 131 132 void 133 ubc_init(void) 134 { 135 struct ubc_map *umap; 136 vaddr_t va; 137 int i; 138 139 /* 140 * Make sure ubc_winshift is sane. 141 */ 142 if (ubc_winshift < PAGE_SHIFT) 143 ubc_winshift = PAGE_SHIFT; 144 145 /* 146 * init ubc_object. 147 * alloc and init ubc_map's. 148 * init inactive queues. 149 * alloc and init hashtable. 150 * map in ubc_object. 151 */ 152 153 UVM_OBJ_INIT(&ubc_object.uobj, &ubc_pager, UVM_OBJ_KERN); 154 155 ubc_object.umap = malloc(ubc_nwins * sizeof(struct ubc_map), 156 M_TEMP, M_NOWAIT); 157 if (ubc_object.umap == NULL) 158 panic("ubc_init: failed to allocate ubc_map"); 159 memset(ubc_object.umap, 0, ubc_nwins * sizeof(struct ubc_map)); 160 161 if (ubc_winshift < PAGE_SHIFT) { 162 ubc_winshift = PAGE_SHIFT; 163 } 164 va = (vaddr_t)1L; 165 #ifdef PMAP_PREFER 166 PMAP_PREFER(0, &va, 0, 0); /* kernel is never topdown */ 167 ubc_nqueues = va >> ubc_winshift; 168 if (ubc_nqueues == 0) { 169 ubc_nqueues = 1; 170 } 171 #endif 172 ubc_winsize = 1 << ubc_winshift; 173 ubc_object.inactive = malloc(UBC_NQUEUES * 174 sizeof(struct ubc_inactive_head), M_TEMP, M_NOWAIT); 175 if (ubc_object.inactive == NULL) 176 panic("ubc_init: failed to allocate inactive queue heads"); 177 for (i = 0; i < UBC_NQUEUES; i++) { 178 TAILQ_INIT(&ubc_object.inactive[i]); 179 } 180 for (i = 0; i < ubc_nwins; i++) { 181 umap = &ubc_object.umap[i]; 182 TAILQ_INSERT_TAIL(&ubc_object.inactive[i & (UBC_NQUEUES - 1)], 183 umap, inactive); 184 } 185 186 ubc_object.hash = hashinit(ubc_nwins, HASH_LIST, M_TEMP, M_NOWAIT, 187 &ubc_object.hashmask); 188 for (i = 0; i <= ubc_object.hashmask; i++) { 189 LIST_INIT(&ubc_object.hash[i]); 190 } 191 192 if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva, 193 ubc_nwins << ubc_winshift, &ubc_object.uobj, 0, (vsize_t)va, 194 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 195 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) { 196 panic("ubc_init: failed to map ubc_object"); 197 } 198 UVMHIST_INIT(ubchist, 300); 199 } 200 201 /* 202 * ubc_fault: fault routine for ubc mapping 203 */ 204 205 static int 206 ubc_fault(struct uvm_faultinfo *ufi, vaddr_t ign1, struct vm_page **ign2, 207 int ign3, int ign4, vm_prot_t access_type, 208 int flags) 209 { 210 struct uvm_object *uobj; 211 struct ubc_map *umap; 212 vaddr_t va, eva, ubc_offset, slot_offset; 213 int i, error, npages; 214 struct vm_page *pgs[ubc_winsize >> PAGE_SHIFT], *pg; 215 vm_prot_t prot; 216 UVMHIST_FUNC("ubc_fault"); UVMHIST_CALLED(ubchist); 217 218 /* 219 * no need to try with PGO_LOCKED... 220 * we don't need to have the map locked since we know that 221 * no one will mess with it until our reference is released. 222 */ 223 224 if (flags & PGO_LOCKED) { 225 uvmfault_unlockall(ufi, NULL, &ubc_object.uobj, NULL); 226 flags &= ~PGO_LOCKED; 227 } 228 229 va = ufi->orig_rvaddr; 230 ubc_offset = va - (vaddr_t)ubc_object.kva; 231 umap = &ubc_object.umap[ubc_offset >> ubc_winshift]; 232 KASSERT(umap->refcount != 0); 233 slot_offset = ubc_offset & (ubc_winsize - 1); 234 235 /* 236 * some platforms cannot write to individual bytes atomically, so 237 * software has to do read/modify/write of larger quantities instead. 238 * this means that the access_type for "write" operations 239 * can be VM_PROT_READ, which confuses us mightily. 240 * 241 * deal with this by resetting access_type based on the info 242 * that ubc_alloc() stores for us. 243 */ 244 245 access_type = umap->writelen ? VM_PROT_WRITE : VM_PROT_READ; 246 UVMHIST_LOG(ubchist, "va 0x%lx ubc_offset 0x%lx access_type %d", 247 va, ubc_offset, access_type, 0); 248 249 #ifdef DIAGNOSTIC 250 if ((access_type & VM_PROT_WRITE) != 0) { 251 if (slot_offset < trunc_page(umap->writeoff) || 252 umap->writeoff + umap->writelen <= slot_offset) { 253 panic("ubc_fault: out of range write"); 254 } 255 } 256 #endif 257 258 /* no umap locking needed since we have a ref on the umap */ 259 uobj = umap->uobj; 260 261 if ((access_type & VM_PROT_WRITE) == 0) { 262 npages = (ubc_winsize - slot_offset) >> PAGE_SHIFT; 263 } else { 264 npages = (round_page(umap->offset + umap->writeoff + 265 umap->writelen) - (umap->offset + slot_offset)) 266 >> PAGE_SHIFT; 267 flags |= PGO_PASTEOF; 268 } 269 270 again: 271 memset(pgs, 0, sizeof (pgs)); 272 simple_lock(&uobj->vmobjlock); 273 274 UVMHIST_LOG(ubchist, "slot_offset 0x%x writeoff 0x%x writelen 0x%x ", 275 slot_offset, umap->writeoff, umap->writelen, 0); 276 UVMHIST_LOG(ubchist, "getpages uobj %p offset 0x%x npages %d", 277 uobj, umap->offset + slot_offset, npages, 0); 278 279 error = (*uobj->pgops->pgo_get)(uobj, umap->offset + slot_offset, pgs, 280 &npages, 0, access_type, umap->advice, flags | PGO_NOBLOCKALLOC | 281 PGO_NOTIMESTAMP); 282 UVMHIST_LOG(ubchist, "getpages error %d npages %d", error, npages, 0, 283 0); 284 285 if (error == EAGAIN) { 286 tsleep(&lbolt, PVM, "ubc_fault", 0); 287 goto again; 288 } 289 if (error) { 290 return error; 291 } 292 293 va = ufi->orig_rvaddr; 294 eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT); 295 296 UVMHIST_LOG(ubchist, "va 0x%lx eva 0x%lx", va, eva, 0, 0); 297 for (i = 0; va < eva; i++, va += PAGE_SIZE) { 298 boolean_t rdonly; 299 vm_prot_t mask; 300 301 /* 302 * for virtually-indexed, virtually-tagged caches we should 303 * avoid creating writable mappings when we don't absolutely 304 * need them, since the "compatible alias" trick doesn't work 305 * on such caches. otherwise, we can always map the pages 306 * writable. 307 */ 308 309 #ifdef PMAP_CACHE_VIVT 310 prot = VM_PROT_READ | access_type; 311 #else 312 prot = VM_PROT_READ | VM_PROT_WRITE; 313 #endif 314 UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i], 0, 0); 315 pg = pgs[i]; 316 317 if (pg == NULL || pg == PGO_DONTCARE) { 318 continue; 319 } 320 321 uobj = pg->uobject; 322 simple_lock(&uobj->vmobjlock); 323 if (pg->flags & PG_WANTED) { 324 wakeup(pg); 325 } 326 KASSERT((pg->flags & PG_FAKE) == 0); 327 if (pg->flags & PG_RELEASED) { 328 uvm_lock_pageq(); 329 uvm_pagefree(pg); 330 uvm_unlock_pageq(); 331 simple_unlock(&uobj->vmobjlock); 332 continue; 333 } 334 if (pg->loan_count != 0) { 335 336 /* 337 * avoid unneeded loan break if possible. 338 */ 339 340 if ((access_type & VM_PROT_WRITE) == 0) 341 prot &= ~VM_PROT_WRITE; 342 343 if (prot & VM_PROT_WRITE) { 344 pg = uvm_loanbreak(pg); 345 if (pg == NULL) 346 continue; /* will re-fault */ 347 } 348 } 349 350 /* 351 * note that a page whose backing store is partially allocated 352 * is marked as PG_RDONLY. 353 */ 354 355 rdonly = (access_type & VM_PROT_WRITE) == 0 && 356 (pg->flags & PG_RDONLY) != 0; 357 KASSERT((pg->flags & PG_RDONLY) == 0 || 358 (access_type & VM_PROT_WRITE) == 0 || 359 pg->offset < umap->writeoff || 360 pg->offset + PAGE_SIZE > umap->writeoff + umap->writelen); 361 mask = rdonly ? ~VM_PROT_WRITE : VM_PROT_ALL; 362 error = pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg), 363 prot & mask, PMAP_CANFAIL | (access_type & mask)); 364 uvm_lock_pageq(); 365 uvm_pageactivate(pg); 366 uvm_unlock_pageq(); 367 pg->flags &= ~(PG_BUSY|PG_WANTED); 368 UVM_PAGE_OWN(pg, NULL); 369 simple_unlock(&uobj->vmobjlock); 370 if (error) { 371 UVMHIST_LOG(ubchist, "pmap_enter fail %d", 372 error, 0, 0, 0); 373 uvm_wait("ubc_pmfail"); 374 /* will refault */ 375 } 376 } 377 pmap_update(ufi->orig_map->pmap); 378 return 0; 379 } 380 381 /* 382 * local functions 383 */ 384 385 static struct ubc_map * 386 ubc_find_mapping(struct uvm_object *uobj, voff_t offset) 387 { 388 struct ubc_map *umap; 389 390 LIST_FOREACH(umap, &ubc_object.hash[UBC_HASH(uobj, offset)], hash) { 391 if (umap->uobj == uobj && umap->offset == offset) { 392 return umap; 393 } 394 } 395 return NULL; 396 } 397 398 399 /* 400 * ubc interface functions 401 */ 402 403 /* 404 * ubc_alloc: allocate a file mapping window 405 */ 406 407 void * 408 ubc_alloc(struct uvm_object *uobj, voff_t offset, vsize_t *lenp, int advice, 409 int flags) 410 { 411 vaddr_t slot_offset, va; 412 struct ubc_map *umap; 413 voff_t umap_offset; 414 int error; 415 UVMHIST_FUNC("ubc_alloc"); UVMHIST_CALLED(ubchist); 416 417 UVMHIST_LOG(ubchist, "uobj %p offset 0x%lx len 0x%lx", 418 uobj, offset, *lenp, 0); 419 420 KASSERT(*lenp > 0); 421 umap_offset = (offset & ~((voff_t)ubc_winsize - 1)); 422 slot_offset = (vaddr_t)(offset & ((voff_t)ubc_winsize - 1)); 423 *lenp = MIN(*lenp, ubc_winsize - slot_offset); 424 425 /* 426 * the object is always locked here, so we don't need to add a ref. 427 */ 428 429 again: 430 simple_lock(&ubc_object.uobj.vmobjlock); 431 umap = ubc_find_mapping(uobj, umap_offset); 432 if (umap == NULL) { 433 umap = TAILQ_FIRST(UBC_QUEUE(offset)); 434 if (umap == NULL) { 435 simple_unlock(&ubc_object.uobj.vmobjlock); 436 tsleep(&lbolt, PVM, "ubc_alloc", 0); 437 goto again; 438 } 439 440 /* 441 * remove from old hash (if any), add to new hash. 442 */ 443 444 if (umap->uobj != NULL) { 445 LIST_REMOVE(umap, hash); 446 } 447 umap->uobj = uobj; 448 umap->offset = umap_offset; 449 LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)], 450 umap, hash); 451 va = UBC_UMAP_ADDR(umap); 452 if (umap->flags & UMAP_MAPPING_CACHED) { 453 umap->flags &= ~UMAP_MAPPING_CACHED; 454 pmap_remove(pmap_kernel(), va, va + ubc_winsize); 455 pmap_update(pmap_kernel()); 456 } 457 } else { 458 va = UBC_UMAP_ADDR(umap); 459 } 460 461 if (umap->refcount == 0) { 462 TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive); 463 } 464 465 #ifdef DIAGNOSTIC 466 if ((flags & UBC_WRITE) && (umap->writeoff || umap->writelen)) { 467 panic("ubc_alloc: concurrent writes uobj %p", uobj); 468 } 469 #endif 470 if (flags & UBC_WRITE) { 471 umap->writeoff = slot_offset; 472 umap->writelen = *lenp; 473 } 474 475 umap->refcount++; 476 umap->advice = advice; 477 simple_unlock(&ubc_object.uobj.vmobjlock); 478 UVMHIST_LOG(ubchist, "umap %p refs %d va %p flags 0x%x", 479 umap, umap->refcount, va, flags); 480 481 if (flags & UBC_FAULTBUSY) { 482 int npages = (*lenp + PAGE_SIZE - 1) >> PAGE_SHIFT; 483 struct vm_page *pgs[npages]; 484 int gpflags = 485 PGO_SYNCIO|PGO_OVERWRITE|PGO_PASTEOF|PGO_NOBLOCKALLOC| 486 PGO_NOTIMESTAMP; 487 int i; 488 KDASSERT(flags & UBC_WRITE); 489 490 if (umap->flags & UMAP_MAPPING_CACHED) { 491 umap->flags &= ~UMAP_MAPPING_CACHED; 492 pmap_remove(pmap_kernel(), va, va + ubc_winsize); 493 } 494 memset(pgs, 0, sizeof(pgs)); 495 simple_lock(&uobj->vmobjlock); 496 error = (*uobj->pgops->pgo_get)(uobj, trunc_page(offset), pgs, 497 &npages, 0, VM_PROT_READ | VM_PROT_WRITE, advice, gpflags); 498 UVMHIST_LOG(ubchist, "faultbusy getpages %d", error, 0, 0, 0); 499 if (error) { 500 goto out; 501 } 502 for (i = 0; i < npages; i++) { 503 pmap_kenter_pa(va + slot_offset + (i << PAGE_SHIFT), 504 VM_PAGE_TO_PHYS(pgs[i]), 505 VM_PROT_READ | VM_PROT_WRITE); 506 } 507 pmap_update(pmap_kernel()); 508 umap->flags |= UMAP_PAGES_LOCKED; 509 } 510 511 out: 512 return (void *)(va + slot_offset); 513 } 514 515 /* 516 * ubc_release: free a file mapping window. 517 */ 518 519 void 520 ubc_release(void *va, int flags) 521 { 522 struct ubc_map *umap; 523 struct uvm_object *uobj; 524 vaddr_t umapva; 525 boolean_t unmapped; 526 UVMHIST_FUNC("ubc_release"); UVMHIST_CALLED(ubchist); 527 528 UVMHIST_LOG(ubchist, "va %p", va, 0, 0, 0); 529 umap = &ubc_object.umap[((char *)va - ubc_object.kva) >> ubc_winshift]; 530 umapva = UBC_UMAP_ADDR(umap); 531 uobj = umap->uobj; 532 KASSERT(uobj != NULL); 533 534 if (umap->flags & UMAP_PAGES_LOCKED) { 535 int slot_offset = umap->writeoff; 536 int endoff = umap->writeoff + umap->writelen; 537 int zerolen = round_page(endoff) - endoff; 538 int npages = (int)(round_page(umap->writeoff + umap->writelen) 539 - trunc_page(umap->writeoff)) >> PAGE_SHIFT; 540 struct vm_page *pgs[npages]; 541 paddr_t pa; 542 int i; 543 boolean_t rv; 544 545 if (zerolen) { 546 memset((char *)umapva + endoff, 0, zerolen); 547 } 548 umap->flags &= ~UMAP_PAGES_LOCKED; 549 uvm_lock_pageq(); 550 for (i = 0; i < npages; i++) { 551 rv = pmap_extract(pmap_kernel(), 552 umapva + slot_offset + (i << PAGE_SHIFT), &pa); 553 KASSERT(rv); 554 pgs[i] = PHYS_TO_VM_PAGE(pa); 555 pgs[i]->flags &= ~(PG_FAKE|PG_CLEAN); 556 KASSERT(pgs[i]->loan_count == 0); 557 uvm_pageactivate(pgs[i]); 558 } 559 uvm_unlock_pageq(); 560 pmap_kremove(umapva, ubc_winsize); 561 pmap_update(pmap_kernel()); 562 simple_lock(&uobj->vmobjlock); 563 uvm_page_unbusy(pgs, npages); 564 simple_unlock(&uobj->vmobjlock); 565 unmapped = TRUE; 566 } else { 567 unmapped = FALSE; 568 } 569 570 simple_lock(&ubc_object.uobj.vmobjlock); 571 umap->writeoff = 0; 572 umap->writelen = 0; 573 umap->refcount--; 574 if (umap->refcount == 0) { 575 if (flags & UBC_UNMAP) { 576 577 /* 578 * Invalidate any cached mappings if requested. 579 * This is typically used to avoid leaving 580 * incompatible cache aliases around indefinitely. 581 */ 582 583 pmap_remove(pmap_kernel(), umapva, 584 umapva + ubc_winsize); 585 umap->flags &= ~UMAP_MAPPING_CACHED; 586 pmap_update(pmap_kernel()); 587 LIST_REMOVE(umap, hash); 588 umap->uobj = NULL; 589 TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap, 590 inactive); 591 } else { 592 if (!unmapped) { 593 umap->flags |= UMAP_MAPPING_CACHED; 594 } 595 TAILQ_INSERT_TAIL(UBC_QUEUE(umap->offset), umap, 596 inactive); 597 } 598 } 599 UVMHIST_LOG(ubchist, "umap %p refs %d", umap, umap->refcount, 0, 0); 600 simple_unlock(&ubc_object.uobj.vmobjlock); 601 } 602 603 604 #if 0 /* notused */ 605 /* 606 * removing a range of mappings from the ubc mapping cache. 607 */ 608 609 void 610 ubc_flush(struct uvm_object *uobj, voff_t start, voff_t end) 611 { 612 struct ubc_map *umap; 613 vaddr_t va; 614 UVMHIST_FUNC("ubc_flush"); UVMHIST_CALLED(ubchist); 615 616 UVMHIST_LOG(ubchist, "uobj %p start 0x%lx end 0x%lx", 617 uobj, start, end, 0); 618 619 simple_lock(&ubc_object.uobj.vmobjlock); 620 for (umap = ubc_object.umap; 621 umap < &ubc_object.umap[ubc_nwins]; 622 umap++) { 623 624 if (umap->uobj != uobj || umap->offset < start || 625 (umap->offset >= end && end != 0) || 626 umap->refcount > 0) { 627 continue; 628 } 629 630 /* 631 * remove from hash, 632 * move to head of inactive queue. 633 */ 634 635 va = (vaddr_t)(ubc_object.kva + 636 ((umap - ubc_object.umap) << ubc_winshift)); 637 pmap_remove(pmap_kernel(), va, va + ubc_winsize); 638 639 LIST_REMOVE(umap, hash); 640 umap->uobj = NULL; 641 TAILQ_REMOVE(UBC_QUEUE(umap->offset), umap, inactive); 642 TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap, inactive); 643 } 644 pmap_update(pmap_kernel()); 645 simple_unlock(&ubc_object.uobj.vmobjlock); 646 } 647 #endif /* notused */ 648