1 /* $NetBSD: uvm_bio.c,v 1.63 2007/12/01 10:40:27 yamt Exp $ */ 2 3 /* 4 * Copyright (c) 1998 Chuck Silvers. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The name of the author may not be used to endorse or promote products 16 * derived from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 */ 31 32 /* 33 * uvm_bio.c: buffered i/o object mapping cache 34 */ 35 36 #include <sys/cdefs.h> 37 __KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.63 2007/12/01 10:40:27 yamt Exp $"); 38 39 #include "opt_uvmhist.h" 40 #include "opt_ubc.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/malloc.h> 45 #include <sys/kernel.h> 46 #include <sys/proc.h> 47 48 #include <uvm/uvm.h> 49 50 /* 51 * global data structures 52 */ 53 54 /* 55 * local functions 56 */ 57 58 static int ubc_fault(struct uvm_faultinfo *, vaddr_t, struct vm_page **, 59 int, int, vm_prot_t, int); 60 static struct ubc_map *ubc_find_mapping(struct uvm_object *, voff_t); 61 62 /* 63 * local data structues 64 */ 65 66 #define UBC_HASH(uobj, offset) \ 67 (((((u_long)(uobj)) >> 8) + (((u_long)(offset)) >> PAGE_SHIFT)) & \ 68 ubc_object.hashmask) 69 70 #define UBC_QUEUE(offset) \ 71 (&ubc_object.inactive[(((u_long)(offset)) >> ubc_winshift) & \ 72 (UBC_NQUEUES - 1)]) 73 74 #define UBC_UMAP_ADDR(u) \ 75 (vaddr_t)(ubc_object.kva + (((u) - ubc_object.umap) << ubc_winshift)) 76 77 78 #define UMAP_PAGES_LOCKED 0x0001 79 #define UMAP_MAPPING_CACHED 0x0002 80 81 struct ubc_map 82 { 83 struct uvm_object * uobj; /* mapped object */ 84 voff_t offset; /* offset into uobj */ 85 voff_t writeoff; /* write offset */ 86 vsize_t writelen; /* write len */ 87 int refcount; /* refcount on mapping */ 88 int flags; /* extra state */ 89 int advice; 90 91 LIST_ENTRY(ubc_map) hash; /* hash table */ 92 TAILQ_ENTRY(ubc_map) inactive; /* inactive queue */ 93 }; 94 95 static struct ubc_object 96 { 97 struct uvm_object uobj; /* glue for uvm_map() */ 98 char *kva; /* where ubc_object is mapped */ 99 struct ubc_map *umap; /* array of ubc_map's */ 100 101 LIST_HEAD(, ubc_map) *hash; /* hashtable for cached ubc_map's */ 102 u_long hashmask; /* mask for hashtable */ 103 104 TAILQ_HEAD(ubc_inactive_head, ubc_map) *inactive; 105 /* inactive queues for ubc_map's */ 106 107 } ubc_object; 108 109 const struct uvm_pagerops ubc_pager = { 110 .pgo_fault = ubc_fault, 111 /* ... rest are NULL */ 112 }; 113 114 int ubc_nwins = UBC_NWINS; 115 int ubc_winshift = UBC_WINSHIFT; 116 int ubc_winsize; 117 #if defined(PMAP_PREFER) 118 int ubc_nqueues; 119 #define UBC_NQUEUES ubc_nqueues 120 #else 121 #define UBC_NQUEUES 1 122 #endif 123 124 #if defined(UBC_STATS) 125 126 #define UBC_EVCNT_DEFINE(name) \ 127 struct evcnt ubc_evcnt_##name = \ 128 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "ubc", #name); \ 129 EVCNT_ATTACH_STATIC(ubc_evcnt_##name); 130 #define UBC_EVCNT_INCR(name) ubc_evcnt_##name.ev_count++ 131 132 #else /* defined(UBC_STATS) */ 133 134 #define UBC_EVCNT_DEFINE(name) /* nothing */ 135 #define UBC_EVCNT_INCR(name) /* nothing */ 136 137 #endif /* defined(UBC_STATS) */ 138 139 UBC_EVCNT_DEFINE(wincachehit) 140 UBC_EVCNT_DEFINE(wincachemiss) 141 UBC_EVCNT_DEFINE(faultbusy) 142 143 /* 144 * ubc_init 145 * 146 * init pager private data structures. 147 */ 148 149 void 150 ubc_init(void) 151 { 152 struct ubc_map *umap; 153 vaddr_t va; 154 int i; 155 156 /* 157 * Make sure ubc_winshift is sane. 158 */ 159 if (ubc_winshift < PAGE_SHIFT) 160 ubc_winshift = PAGE_SHIFT; 161 162 /* 163 * init ubc_object. 164 * alloc and init ubc_map's. 165 * init inactive queues. 166 * alloc and init hashtable. 167 * map in ubc_object. 168 */ 169 170 UVM_OBJ_INIT(&ubc_object.uobj, &ubc_pager, UVM_OBJ_KERN); 171 172 ubc_object.umap = malloc(ubc_nwins * sizeof(struct ubc_map), 173 M_TEMP, M_NOWAIT); 174 if (ubc_object.umap == NULL) 175 panic("ubc_init: failed to allocate ubc_map"); 176 memset(ubc_object.umap, 0, ubc_nwins * sizeof(struct ubc_map)); 177 178 if (ubc_winshift < PAGE_SHIFT) { 179 ubc_winshift = PAGE_SHIFT; 180 } 181 va = (vaddr_t)1L; 182 #ifdef PMAP_PREFER 183 PMAP_PREFER(0, &va, 0, 0); /* kernel is never topdown */ 184 ubc_nqueues = va >> ubc_winshift; 185 if (ubc_nqueues == 0) { 186 ubc_nqueues = 1; 187 } 188 #endif 189 ubc_winsize = 1 << ubc_winshift; 190 ubc_object.inactive = malloc(UBC_NQUEUES * 191 sizeof(struct ubc_inactive_head), M_TEMP, M_NOWAIT); 192 if (ubc_object.inactive == NULL) 193 panic("ubc_init: failed to allocate inactive queue heads"); 194 for (i = 0; i < UBC_NQUEUES; i++) { 195 TAILQ_INIT(&ubc_object.inactive[i]); 196 } 197 for (i = 0; i < ubc_nwins; i++) { 198 umap = &ubc_object.umap[i]; 199 TAILQ_INSERT_TAIL(&ubc_object.inactive[i & (UBC_NQUEUES - 1)], 200 umap, inactive); 201 } 202 203 ubc_object.hash = hashinit(ubc_nwins, HASH_LIST, M_TEMP, M_NOWAIT, 204 &ubc_object.hashmask); 205 for (i = 0; i <= ubc_object.hashmask; i++) { 206 LIST_INIT(&ubc_object.hash[i]); 207 } 208 209 if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva, 210 ubc_nwins << ubc_winshift, &ubc_object.uobj, 0, (vsize_t)va, 211 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 212 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) { 213 panic("ubc_init: failed to map ubc_object"); 214 } 215 UVMHIST_INIT(ubchist, 300); 216 } 217 218 /* 219 * ubc_fault: fault routine for ubc mapping 220 */ 221 222 static int 223 ubc_fault(struct uvm_faultinfo *ufi, vaddr_t ign1, struct vm_page **ign2, 224 int ign3, int ign4, vm_prot_t access_type, int flags) 225 { 226 struct uvm_object *uobj; 227 struct ubc_map *umap; 228 vaddr_t va, eva, ubc_offset, slot_offset; 229 int i, error, npages; 230 struct vm_page *pgs[ubc_winsize >> PAGE_SHIFT], *pg; 231 vm_prot_t prot; 232 UVMHIST_FUNC("ubc_fault"); UVMHIST_CALLED(ubchist); 233 234 /* 235 * no need to try with PGO_LOCKED... 236 * we don't need to have the map locked since we know that 237 * no one will mess with it until our reference is released. 238 */ 239 240 if (flags & PGO_LOCKED) { 241 uvmfault_unlockall(ufi, NULL, &ubc_object.uobj, NULL); 242 flags &= ~PGO_LOCKED; 243 } 244 245 va = ufi->orig_rvaddr; 246 ubc_offset = va - (vaddr_t)ubc_object.kva; 247 umap = &ubc_object.umap[ubc_offset >> ubc_winshift]; 248 KASSERT(umap->refcount != 0); 249 KASSERT((umap->flags & UMAP_PAGES_LOCKED) == 0); 250 slot_offset = ubc_offset & (ubc_winsize - 1); 251 252 /* 253 * some platforms cannot write to individual bytes atomically, so 254 * software has to do read/modify/write of larger quantities instead. 255 * this means that the access_type for "write" operations 256 * can be VM_PROT_READ, which confuses us mightily. 257 * 258 * deal with this by resetting access_type based on the info 259 * that ubc_alloc() stores for us. 260 */ 261 262 access_type = umap->writelen ? VM_PROT_WRITE : VM_PROT_READ; 263 UVMHIST_LOG(ubchist, "va 0x%lx ubc_offset 0x%lx access_type %d", 264 va, ubc_offset, access_type, 0); 265 266 #ifdef DIAGNOSTIC 267 if ((access_type & VM_PROT_WRITE) != 0) { 268 if (slot_offset < trunc_page(umap->writeoff) || 269 umap->writeoff + umap->writelen <= slot_offset) { 270 panic("ubc_fault: out of range write"); 271 } 272 } 273 #endif 274 275 /* no umap locking needed since we have a ref on the umap */ 276 uobj = umap->uobj; 277 278 if ((access_type & VM_PROT_WRITE) == 0) { 279 npages = (ubc_winsize - slot_offset) >> PAGE_SHIFT; 280 } else { 281 npages = (round_page(umap->offset + umap->writeoff + 282 umap->writelen) - (umap->offset + slot_offset)) 283 >> PAGE_SHIFT; 284 flags |= PGO_PASTEOF; 285 } 286 287 again: 288 memset(pgs, 0, sizeof (pgs)); 289 simple_lock(&uobj->vmobjlock); 290 291 UVMHIST_LOG(ubchist, "slot_offset 0x%x writeoff 0x%x writelen 0x%x ", 292 slot_offset, umap->writeoff, umap->writelen, 0); 293 UVMHIST_LOG(ubchist, "getpages uobj %p offset 0x%x npages %d", 294 uobj, umap->offset + slot_offset, npages, 0); 295 296 error = (*uobj->pgops->pgo_get)(uobj, umap->offset + slot_offset, pgs, 297 &npages, 0, access_type, umap->advice, flags | PGO_NOBLOCKALLOC | 298 PGO_NOTIMESTAMP); 299 UVMHIST_LOG(ubchist, "getpages error %d npages %d", error, npages, 0, 300 0); 301 302 if (error == EAGAIN) { 303 kpause("ubc_fault", false, hz, NULL); 304 goto again; 305 } 306 if (error) { 307 return error; 308 } 309 310 va = ufi->orig_rvaddr; 311 eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT); 312 313 UVMHIST_LOG(ubchist, "va 0x%lx eva 0x%lx", va, eva, 0, 0); 314 for (i = 0; va < eva; i++, va += PAGE_SIZE) { 315 bool rdonly; 316 vm_prot_t mask; 317 318 /* 319 * for virtually-indexed, virtually-tagged caches we should 320 * avoid creating writable mappings when we don't absolutely 321 * need them, since the "compatible alias" trick doesn't work 322 * on such caches. otherwise, we can always map the pages 323 * writable. 324 */ 325 326 #ifdef PMAP_CACHE_VIVT 327 prot = VM_PROT_READ | access_type; 328 #else 329 prot = VM_PROT_READ | VM_PROT_WRITE; 330 #endif 331 UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i], 0, 0); 332 pg = pgs[i]; 333 334 if (pg == NULL || pg == PGO_DONTCARE) { 335 continue; 336 } 337 338 uobj = pg->uobject; 339 simple_lock(&uobj->vmobjlock); 340 if (pg->flags & PG_WANTED) { 341 wakeup(pg); 342 } 343 KASSERT((pg->flags & PG_FAKE) == 0); 344 if (pg->flags & PG_RELEASED) { 345 uvm_lock_pageq(); 346 uvm_pagefree(pg); 347 uvm_unlock_pageq(); 348 simple_unlock(&uobj->vmobjlock); 349 continue; 350 } 351 if (pg->loan_count != 0) { 352 353 /* 354 * avoid unneeded loan break if possible. 355 */ 356 357 if ((access_type & VM_PROT_WRITE) == 0) 358 prot &= ~VM_PROT_WRITE; 359 360 if (prot & VM_PROT_WRITE) { 361 struct vm_page *newpg; 362 363 newpg = uvm_loanbreak(pg); 364 if (newpg == NULL) { 365 uvm_page_unbusy(&pg, 1); 366 simple_unlock(&uobj->vmobjlock); 367 uvm_wait("ubc_loanbrk"); 368 continue; /* will re-fault */ 369 } 370 pg = newpg; 371 } 372 } 373 374 /* 375 * note that a page whose backing store is partially allocated 376 * is marked as PG_RDONLY. 377 */ 378 379 rdonly = ((access_type & VM_PROT_WRITE) == 0 && 380 (pg->flags & PG_RDONLY) != 0) || 381 UVM_OBJ_NEEDS_WRITEFAULT(uobj); 382 KASSERT((pg->flags & PG_RDONLY) == 0 || 383 (access_type & VM_PROT_WRITE) == 0 || 384 pg->offset < umap->writeoff || 385 pg->offset + PAGE_SIZE > umap->writeoff + umap->writelen); 386 mask = rdonly ? ~VM_PROT_WRITE : VM_PROT_ALL; 387 error = pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg), 388 prot & mask, PMAP_CANFAIL | (access_type & mask)); 389 uvm_lock_pageq(); 390 uvm_pageactivate(pg); 391 uvm_unlock_pageq(); 392 pg->flags &= ~(PG_BUSY|PG_WANTED); 393 UVM_PAGE_OWN(pg, NULL); 394 simple_unlock(&uobj->vmobjlock); 395 if (error) { 396 UVMHIST_LOG(ubchist, "pmap_enter fail %d", 397 error, 0, 0, 0); 398 uvm_wait("ubc_pmfail"); 399 /* will refault */ 400 } 401 } 402 pmap_update(ufi->orig_map->pmap); 403 return 0; 404 } 405 406 /* 407 * local functions 408 */ 409 410 static struct ubc_map * 411 ubc_find_mapping(struct uvm_object *uobj, voff_t offset) 412 { 413 struct ubc_map *umap; 414 415 LIST_FOREACH(umap, &ubc_object.hash[UBC_HASH(uobj, offset)], hash) { 416 if (umap->uobj == uobj && umap->offset == offset) { 417 return umap; 418 } 419 } 420 return NULL; 421 } 422 423 424 /* 425 * ubc interface functions 426 */ 427 428 /* 429 * ubc_alloc: allocate a file mapping window 430 */ 431 432 void * 433 ubc_alloc(struct uvm_object *uobj, voff_t offset, vsize_t *lenp, int advice, 434 int flags) 435 { 436 vaddr_t slot_offset, va; 437 struct ubc_map *umap; 438 voff_t umap_offset; 439 int error; 440 UVMHIST_FUNC("ubc_alloc"); UVMHIST_CALLED(ubchist); 441 442 UVMHIST_LOG(ubchist, "uobj %p offset 0x%lx len 0x%lx", 443 uobj, offset, *lenp, 0); 444 445 KASSERT(*lenp > 0); 446 umap_offset = (offset & ~((voff_t)ubc_winsize - 1)); 447 slot_offset = (vaddr_t)(offset & ((voff_t)ubc_winsize - 1)); 448 *lenp = MIN(*lenp, ubc_winsize - slot_offset); 449 450 /* 451 * the object is always locked here, so we don't need to add a ref. 452 */ 453 454 again: 455 simple_lock(&ubc_object.uobj.vmobjlock); 456 umap = ubc_find_mapping(uobj, umap_offset); 457 if (umap == NULL) { 458 UBC_EVCNT_INCR(wincachemiss); 459 umap = TAILQ_FIRST(UBC_QUEUE(offset)); 460 if (umap == NULL) { 461 simple_unlock(&ubc_object.uobj.vmobjlock); 462 kpause("ubc_alloc", false, hz, NULL); 463 goto again; 464 } 465 466 /* 467 * remove from old hash (if any), add to new hash. 468 */ 469 470 if (umap->uobj != NULL) { 471 LIST_REMOVE(umap, hash); 472 } 473 umap->uobj = uobj; 474 umap->offset = umap_offset; 475 LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)], 476 umap, hash); 477 va = UBC_UMAP_ADDR(umap); 478 if (umap->flags & UMAP_MAPPING_CACHED) { 479 umap->flags &= ~UMAP_MAPPING_CACHED; 480 pmap_remove(pmap_kernel(), va, va + ubc_winsize); 481 pmap_update(pmap_kernel()); 482 } 483 } else { 484 UBC_EVCNT_INCR(wincachehit); 485 va = UBC_UMAP_ADDR(umap); 486 } 487 488 if (umap->refcount == 0) { 489 TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive); 490 } 491 492 #ifdef DIAGNOSTIC 493 if ((flags & UBC_WRITE) && (umap->writeoff || umap->writelen)) { 494 panic("ubc_alloc: concurrent writes uobj %p", uobj); 495 } 496 #endif 497 if (flags & UBC_WRITE) { 498 umap->writeoff = slot_offset; 499 umap->writelen = *lenp; 500 } 501 502 umap->refcount++; 503 umap->advice = advice; 504 simple_unlock(&ubc_object.uobj.vmobjlock); 505 UVMHIST_LOG(ubchist, "umap %p refs %d va %p flags 0x%x", 506 umap, umap->refcount, va, flags); 507 508 if (flags & UBC_FAULTBUSY) { 509 int npages = (*lenp + PAGE_SIZE - 1) >> PAGE_SHIFT; 510 struct vm_page *pgs[npages]; 511 int gpflags = 512 PGO_SYNCIO|PGO_OVERWRITE|PGO_PASTEOF|PGO_NOBLOCKALLOC| 513 PGO_NOTIMESTAMP; 514 int i; 515 KDASSERT(flags & UBC_WRITE); 516 KASSERT(umap->refcount == 1); 517 518 UBC_EVCNT_INCR(faultbusy); 519 if (umap->flags & UMAP_MAPPING_CACHED) { 520 umap->flags &= ~UMAP_MAPPING_CACHED; 521 pmap_remove(pmap_kernel(), va, va + ubc_winsize); 522 } 523 again_faultbusy: 524 memset(pgs, 0, sizeof(pgs)); 525 simple_lock(&uobj->vmobjlock); 526 error = (*uobj->pgops->pgo_get)(uobj, trunc_page(offset), pgs, 527 &npages, 0, VM_PROT_READ | VM_PROT_WRITE, advice, gpflags); 528 UVMHIST_LOG(ubchist, "faultbusy getpages %d", error, 0, 0, 0); 529 if (error) { 530 goto out; 531 } 532 for (i = 0; i < npages; i++) { 533 struct vm_page *pg = pgs[i]; 534 535 KASSERT(pg->uobject == uobj); 536 if (pg->loan_count != 0) { 537 simple_lock(&uobj->vmobjlock); 538 if (pg->loan_count != 0) { 539 pg = uvm_loanbreak(pg); 540 } 541 simple_unlock(&uobj->vmobjlock); 542 if (pg == NULL) { 543 pmap_kremove(va, ubc_winsize); 544 pmap_update(pmap_kernel()); 545 simple_lock(&uobj->vmobjlock); 546 uvm_page_unbusy(pgs, npages); 547 simple_unlock(&uobj->vmobjlock); 548 uvm_wait("ubc_alloc"); 549 goto again_faultbusy; 550 } 551 pgs[i] = pg; 552 } 553 pmap_kenter_pa(va + slot_offset + (i << PAGE_SHIFT), 554 VM_PAGE_TO_PHYS(pg), VM_PROT_READ | VM_PROT_WRITE); 555 } 556 pmap_update(pmap_kernel()); 557 umap->flags |= UMAP_PAGES_LOCKED; 558 } else { 559 KASSERT((umap->flags & UMAP_PAGES_LOCKED) == 0); 560 } 561 562 out: 563 return (void *)(va + slot_offset); 564 } 565 566 /* 567 * ubc_release: free a file mapping window. 568 */ 569 570 void 571 ubc_release(void *va, int flags) 572 { 573 struct ubc_map *umap; 574 struct uvm_object *uobj; 575 vaddr_t umapva; 576 bool unmapped; 577 UVMHIST_FUNC("ubc_release"); UVMHIST_CALLED(ubchist); 578 579 UVMHIST_LOG(ubchist, "va %p", va, 0, 0, 0); 580 umap = &ubc_object.umap[((char *)va - ubc_object.kva) >> ubc_winshift]; 581 umapva = UBC_UMAP_ADDR(umap); 582 uobj = umap->uobj; 583 KASSERT(uobj != NULL); 584 585 if (umap->flags & UMAP_PAGES_LOCKED) { 586 int slot_offset = umap->writeoff; 587 int endoff = umap->writeoff + umap->writelen; 588 int zerolen = round_page(endoff) - endoff; 589 int npages = (int)(round_page(umap->writeoff + umap->writelen) 590 - trunc_page(umap->writeoff)) >> PAGE_SHIFT; 591 struct vm_page *pgs[npages]; 592 paddr_t pa; 593 int i; 594 bool rv; 595 596 KASSERT((umap->flags & UMAP_MAPPING_CACHED) == 0); 597 if (zerolen) { 598 memset((char *)umapva + endoff, 0, zerolen); 599 } 600 umap->flags &= ~UMAP_PAGES_LOCKED; 601 uvm_lock_pageq(); 602 for (i = 0; i < npages; i++) { 603 rv = pmap_extract(pmap_kernel(), 604 umapva + slot_offset + (i << PAGE_SHIFT), &pa); 605 KASSERT(rv); 606 pgs[i] = PHYS_TO_VM_PAGE(pa); 607 pgs[i]->flags &= ~(PG_FAKE|PG_CLEAN); 608 KASSERT(pgs[i]->loan_count == 0); 609 uvm_pageactivate(pgs[i]); 610 } 611 uvm_unlock_pageq(); 612 pmap_kremove(umapva, ubc_winsize); 613 pmap_update(pmap_kernel()); 614 simple_lock(&uobj->vmobjlock); 615 uvm_page_unbusy(pgs, npages); 616 simple_unlock(&uobj->vmobjlock); 617 unmapped = true; 618 } else { 619 unmapped = false; 620 } 621 622 simple_lock(&ubc_object.uobj.vmobjlock); 623 umap->writeoff = 0; 624 umap->writelen = 0; 625 umap->refcount--; 626 if (umap->refcount == 0) { 627 if (flags & UBC_UNMAP) { 628 629 /* 630 * Invalidate any cached mappings if requested. 631 * This is typically used to avoid leaving 632 * incompatible cache aliases around indefinitely. 633 */ 634 635 pmap_remove(pmap_kernel(), umapva, 636 umapva + ubc_winsize); 637 umap->flags &= ~UMAP_MAPPING_CACHED; 638 pmap_update(pmap_kernel()); 639 LIST_REMOVE(umap, hash); 640 umap->uobj = NULL; 641 TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap, 642 inactive); 643 } else { 644 if (!unmapped) { 645 umap->flags |= UMAP_MAPPING_CACHED; 646 } 647 TAILQ_INSERT_TAIL(UBC_QUEUE(umap->offset), umap, 648 inactive); 649 } 650 } 651 UVMHIST_LOG(ubchist, "umap %p refs %d", umap, umap->refcount, 0, 0); 652 simple_unlock(&ubc_object.uobj.vmobjlock); 653 } 654 655 /* 656 * ubc_uiomove: move data to/from an object. 657 */ 658 659 int 660 ubc_uiomove(struct uvm_object *uobj, struct uio *uio, vsize_t todo, int advice, 661 int flags) 662 { 663 voff_t off; 664 const bool overwrite = (flags & UBC_FAULTBUSY) != 0; 665 int error; 666 667 KASSERT(todo <= uio->uio_resid); 668 KASSERT(((flags & UBC_WRITE) != 0 && uio->uio_rw == UIO_WRITE) || 669 ((flags & UBC_READ) != 0 && uio->uio_rw == UIO_READ)); 670 671 off = uio->uio_offset; 672 error = 0; 673 while (todo > 0) { 674 vsize_t bytelen = todo; 675 void *win; 676 677 win = ubc_alloc(uobj, off, &bytelen, advice, flags); 678 if (error == 0) { 679 error = uiomove(win, bytelen, uio); 680 } 681 if (error != 0 && overwrite) { 682 /* 683 * if we haven't initialized the pages yet, 684 * do it now. it's safe to use memset here 685 * because we just mapped the pages above. 686 */ 687 memset(win, 0, bytelen); 688 } 689 ubc_release(win, flags); 690 off += bytelen; 691 todo -= bytelen; 692 if (error != 0 && (flags & UBC_PARTIALOK) != 0) { 693 break; 694 } 695 } 696 697 return error; 698 } 699 700 #if 0 /* notused */ 701 /* 702 * removing a range of mappings from the ubc mapping cache. 703 */ 704 705 void 706 ubc_flush(struct uvm_object *uobj, voff_t start, voff_t end) 707 { 708 struct ubc_map *umap; 709 vaddr_t va; 710 UVMHIST_FUNC("ubc_flush"); UVMHIST_CALLED(ubchist); 711 712 UVMHIST_LOG(ubchist, "uobj %p start 0x%lx end 0x%lx", 713 uobj, start, end, 0); 714 715 simple_lock(&ubc_object.uobj.vmobjlock); 716 for (umap = ubc_object.umap; 717 umap < &ubc_object.umap[ubc_nwins]; 718 umap++) { 719 720 if (umap->uobj != uobj || umap->offset < start || 721 (umap->offset >= end && end != 0) || 722 umap->refcount > 0) { 723 continue; 724 } 725 726 /* 727 * remove from hash, 728 * move to head of inactive queue. 729 */ 730 731 va = (vaddr_t)(ubc_object.kva + 732 ((umap - ubc_object.umap) << ubc_winshift)); 733 pmap_remove(pmap_kernel(), va, va + ubc_winsize); 734 735 LIST_REMOVE(umap, hash); 736 umap->uobj = NULL; 737 TAILQ_REMOVE(UBC_QUEUE(umap->offset), umap, inactive); 738 TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap, inactive); 739 } 740 pmap_update(pmap_kernel()); 741 simple_unlock(&ubc_object.uobj.vmobjlock); 742 } 743 #endif /* notused */ 744