1 /* $NetBSD: uvm_bio.c,v 1.128 2023/04/09 09:00:56 riastradh Exp $ */ 2 3 /* 4 * Copyright (c) 1998 Chuck Silvers. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The name of the author may not be used to endorse or promote products 16 * derived from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 */ 31 32 /* 33 * uvm_bio.c: buffered i/o object mapping cache 34 */ 35 36 #include <sys/cdefs.h> 37 __KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.128 2023/04/09 09:00:56 riastradh Exp $"); 38 39 #include "opt_uvmhist.h" 40 #include "opt_ubc.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/kmem.h> 45 #include <sys/kernel.h> 46 #include <sys/proc.h> 47 #include <sys/sysctl.h> 48 #include <sys/vnode.h> 49 #include <sys/bitops.h> /* for ilog2() */ 50 51 #include <uvm/uvm.h> 52 #include <uvm/uvm_pdpolicy.h> 53 54 #ifdef PMAP_DIRECT 55 # define UBC_USE_PMAP_DIRECT 56 #endif 57 58 /* 59 * local functions 60 */ 61 62 static int ubc_fault(struct uvm_faultinfo *, vaddr_t, struct vm_page **, 63 int, int, vm_prot_t, int); 64 static struct ubc_map *ubc_find_mapping(struct uvm_object *, voff_t); 65 static int ubchash_stats(struct hashstat_sysctl *hs, bool fill); 66 #ifdef UBC_USE_PMAP_DIRECT 67 static int __noinline ubc_uiomove_direct(struct uvm_object *, struct uio *, vsize_t, 68 int, int); 69 static void __noinline ubc_zerorange_direct(struct uvm_object *, off_t, size_t, int); 70 71 /* XXX disabled by default until the kinks are worked out. */ 72 bool ubc_direct = false; 73 #endif 74 75 /* 76 * local data structures 77 */ 78 79 #define UBC_HASH(uobj, offset) \ 80 (((((u_long)(uobj)) >> 8) + (((u_long)(offset)) >> PAGE_SHIFT)) & \ 81 ubc_object.hashmask) 82 83 #define UBC_QUEUE(offset) \ 84 (&ubc_object.inactive[(((u_long)(offset)) >> ubc_winshift) & \ 85 (UBC_NQUEUES - 1)]) 86 87 #define UBC_UMAP_ADDR(u) \ 88 (vaddr_t)(ubc_object.kva + (((u) - ubc_object.umap) << ubc_winshift)) 89 90 91 #define UMAP_PAGES_LOCKED 0x0001 92 #define UMAP_MAPPING_CACHED 0x0002 93 94 struct ubc_map { 95 struct uvm_object * uobj; /* mapped object */ 96 voff_t offset; /* offset into uobj */ 97 voff_t writeoff; /* write offset */ 98 vsize_t writelen; /* write len */ 99 int refcount; /* refcount on mapping */ 100 int flags; /* extra state */ 101 int advice; 102 103 LIST_ENTRY(ubc_map) hash; /* hash table */ 104 TAILQ_ENTRY(ubc_map) inactive; /* inactive queue */ 105 LIST_ENTRY(ubc_map) list; /* per-object list */ 106 }; 107 108 TAILQ_HEAD(ubc_inactive_head, ubc_map); 109 static struct ubc_object { 110 struct uvm_object uobj; /* glue for uvm_map() */ 111 char *kva; /* where ubc_object is mapped */ 112 struct ubc_map *umap; /* array of ubc_map's */ 113 114 LIST_HEAD(, ubc_map) *hash; /* hashtable for cached ubc_map's */ 115 u_long hashmask; /* mask for hashtable */ 116 117 struct ubc_inactive_head *inactive; 118 /* inactive queues for ubc_map's */ 119 } ubc_object; 120 121 const struct uvm_pagerops ubc_pager = { 122 .pgo_fault = ubc_fault, 123 /* ... rest are NULL */ 124 }; 125 126 /* Use value at least as big as maximum page size supported by architecture */ 127 #define UBC_MAX_WINSHIFT \ 128 ((1 << UBC_WINSHIFT) > MAX_PAGE_SIZE ? UBC_WINSHIFT : ilog2(MAX_PAGE_SIZE)) 129 130 int ubc_nwins = UBC_NWINS; 131 const int ubc_winshift = UBC_MAX_WINSHIFT; 132 const int ubc_winsize = 1 << UBC_MAX_WINSHIFT; 133 #if defined(PMAP_PREFER) 134 int ubc_nqueues; 135 #define UBC_NQUEUES ubc_nqueues 136 #else 137 #define UBC_NQUEUES 1 138 #endif 139 140 #if defined(UBC_STATS) 141 142 #define UBC_EVCNT_DEFINE(name) \ 143 struct evcnt ubc_evcnt_##name = \ 144 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "ubc", #name); \ 145 EVCNT_ATTACH_STATIC(ubc_evcnt_##name); 146 #define UBC_EVCNT_INCR(name) ubc_evcnt_##name.ev_count++ 147 148 #else /* defined(UBC_STATS) */ 149 150 #define UBC_EVCNT_DEFINE(name) /* nothing */ 151 #define UBC_EVCNT_INCR(name) /* nothing */ 152 153 #endif /* defined(UBC_STATS) */ 154 155 UBC_EVCNT_DEFINE(wincachehit) 156 UBC_EVCNT_DEFINE(wincachemiss) 157 UBC_EVCNT_DEFINE(faultbusy) 158 159 /* 160 * ubc_init 161 * 162 * init pager private data structures. 163 */ 164 165 void 166 ubc_init(void) 167 { 168 /* 169 * Make sure ubc_winshift is sane. 170 */ 171 KASSERT(ubc_winshift >= PAGE_SHIFT); 172 173 /* 174 * init ubc_object. 175 * alloc and init ubc_map's. 176 * init inactive queues. 177 * alloc and init hashtable. 178 * map in ubc_object. 179 */ 180 181 uvm_obj_init(&ubc_object.uobj, &ubc_pager, true, UVM_OBJ_KERN); 182 183 ubc_object.umap = kmem_zalloc(ubc_nwins * sizeof(struct ubc_map), 184 KM_SLEEP); 185 if (ubc_object.umap == NULL) 186 panic("ubc_init: failed to allocate ubc_map"); 187 188 vaddr_t va = (vaddr_t)1L; 189 #ifdef PMAP_PREFER 190 PMAP_PREFER(0, &va, 0, 0); /* kernel is never topdown */ 191 ubc_nqueues = va >> ubc_winshift; 192 if (ubc_nqueues == 0) { 193 ubc_nqueues = 1; 194 } 195 #endif 196 ubc_object.inactive = kmem_alloc(UBC_NQUEUES * 197 sizeof(struct ubc_inactive_head), KM_SLEEP); 198 for (int i = 0; i < UBC_NQUEUES; i++) { 199 TAILQ_INIT(&ubc_object.inactive[i]); 200 } 201 for (int i = 0; i < ubc_nwins; i++) { 202 struct ubc_map *umap; 203 umap = &ubc_object.umap[i]; 204 TAILQ_INSERT_TAIL(&ubc_object.inactive[i & (UBC_NQUEUES - 1)], 205 umap, inactive); 206 } 207 208 ubc_object.hash = hashinit(ubc_nwins, HASH_LIST, true, 209 &ubc_object.hashmask); 210 for (int i = 0; i <= ubc_object.hashmask; i++) { 211 LIST_INIT(&ubc_object.hash[i]); 212 } 213 214 if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva, 215 ubc_nwins << ubc_winshift, &ubc_object.uobj, 0, (vsize_t)va, 216 UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_NONE, 217 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) { 218 panic("ubc_init: failed to map ubc_object"); 219 } 220 221 hashstat_register("ubchash", ubchash_stats); 222 } 223 224 void 225 ubchist_init(void) 226 { 227 228 UVMHIST_INIT(ubchist, 300); 229 } 230 231 /* 232 * ubc_fault_page: helper of ubc_fault to handle a single page. 233 * 234 * => Caller has UVM object locked. 235 * => Caller will perform pmap_update(). 236 */ 237 238 static inline int 239 ubc_fault_page(const struct uvm_faultinfo *ufi, const struct ubc_map *umap, 240 struct vm_page *pg, vm_prot_t prot, vm_prot_t access_type, vaddr_t va) 241 { 242 vm_prot_t mask; 243 int error; 244 bool rdonly; 245 246 KASSERT(rw_write_held(pg->uobject->vmobjlock)); 247 248 KASSERT((pg->flags & PG_FAKE) == 0); 249 if (pg->flags & PG_RELEASED) { 250 uvm_pagefree(pg); 251 return 0; 252 } 253 if (pg->loan_count != 0) { 254 255 /* 256 * Avoid unneeded loan break, if possible. 257 */ 258 259 if ((access_type & VM_PROT_WRITE) == 0) { 260 prot &= ~VM_PROT_WRITE; 261 } 262 if (prot & VM_PROT_WRITE) { 263 struct vm_page *newpg; 264 265 newpg = uvm_loanbreak(pg); 266 if (newpg == NULL) { 267 uvm_page_unbusy(&pg, 1); 268 return ENOMEM; 269 } 270 pg = newpg; 271 } 272 } 273 274 /* 275 * Note that a page whose backing store is partially allocated 276 * is marked as PG_RDONLY. 277 * 278 * it's a responsibility of ubc_alloc's caller to allocate backing 279 * blocks before writing to the window. 280 */ 281 282 KASSERT((pg->flags & PG_RDONLY) == 0 || 283 (access_type & VM_PROT_WRITE) == 0 || 284 pg->offset < umap->writeoff || 285 pg->offset + PAGE_SIZE > umap->writeoff + umap->writelen); 286 287 rdonly = uvm_pagereadonly_p(pg); 288 mask = rdonly ? ~VM_PROT_WRITE : VM_PROT_ALL; 289 290 error = pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg), 291 prot & mask, PMAP_CANFAIL | (access_type & mask)); 292 293 uvm_pagelock(pg); 294 uvm_pageactivate(pg); 295 uvm_pagewakeup(pg); 296 uvm_pageunlock(pg); 297 pg->flags &= ~PG_BUSY; 298 UVM_PAGE_OWN(pg, NULL); 299 300 return error; 301 } 302 303 /* 304 * ubc_fault: fault routine for ubc mapping 305 */ 306 307 static int 308 ubc_fault(struct uvm_faultinfo *ufi, vaddr_t ign1, struct vm_page **ign2, 309 int ign3, int ign4, vm_prot_t access_type, int flags) 310 { 311 struct uvm_object *uobj; 312 struct ubc_map *umap; 313 vaddr_t va, eva, ubc_offset, slot_offset; 314 struct vm_page *pgs[howmany(ubc_winsize, MIN_PAGE_SIZE)]; 315 int i, error, npages; 316 vm_prot_t prot; 317 318 UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist); 319 320 /* 321 * no need to try with PGO_LOCKED... 322 * we don't need to have the map locked since we know that 323 * no one will mess with it until our reference is released. 324 */ 325 326 if (flags & PGO_LOCKED) { 327 uvmfault_unlockall(ufi, NULL, &ubc_object.uobj); 328 flags &= ~PGO_LOCKED; 329 } 330 331 va = ufi->orig_rvaddr; 332 ubc_offset = va - (vaddr_t)ubc_object.kva; 333 umap = &ubc_object.umap[ubc_offset >> ubc_winshift]; 334 KASSERT(umap->refcount != 0); 335 KASSERT((umap->flags & UMAP_PAGES_LOCKED) == 0); 336 slot_offset = ubc_offset & (ubc_winsize - 1); 337 338 /* 339 * some platforms cannot write to individual bytes atomically, so 340 * software has to do read/modify/write of larger quantities instead. 341 * this means that the access_type for "write" operations 342 * can be VM_PROT_READ, which confuses us mightily. 343 * 344 * deal with this by resetting access_type based on the info 345 * that ubc_alloc() stores for us. 346 */ 347 348 access_type = umap->writelen ? VM_PROT_WRITE : VM_PROT_READ; 349 UVMHIST_LOG(ubchist, "va %#jx ubc_offset %#jx access_type %jd", 350 va, ubc_offset, access_type, 0); 351 352 if ((access_type & VM_PROT_WRITE) != 0) { 353 #ifndef PRIxOFF /* XXX */ 354 #define PRIxOFF "jx" /* XXX */ 355 #endif /* XXX */ 356 KASSERTMSG((trunc_page(umap->writeoff) <= slot_offset), 357 "out of range write: slot=%#"PRIxVSIZE" off=%#"PRIxOFF, 358 slot_offset, (intmax_t)umap->writeoff); 359 KASSERTMSG((slot_offset < umap->writeoff + umap->writelen), 360 "out of range write: slot=%#"PRIxVADDR 361 " off=%#"PRIxOFF" len=%#"PRIxVSIZE, 362 slot_offset, (intmax_t)umap->writeoff, umap->writelen); 363 } 364 365 /* no umap locking needed since we have a ref on the umap */ 366 uobj = umap->uobj; 367 368 if ((access_type & VM_PROT_WRITE) == 0) { 369 npages = (ubc_winsize - slot_offset) >> PAGE_SHIFT; 370 } else { 371 npages = (round_page(umap->offset + umap->writeoff + 372 umap->writelen) - (umap->offset + slot_offset)) 373 >> PAGE_SHIFT; 374 flags |= PGO_PASTEOF; 375 } 376 377 again: 378 memset(pgs, 0, sizeof (pgs)); 379 rw_enter(uobj->vmobjlock, RW_WRITER); 380 381 UVMHIST_LOG(ubchist, "slot_offset %#jx writeoff %#jx writelen %#jx ", 382 slot_offset, umap->writeoff, umap->writelen, 0); 383 UVMHIST_LOG(ubchist, "getpages uobj %#jx offset %#jx npages %jd", 384 (uintptr_t)uobj, umap->offset + slot_offset, npages, 0); 385 386 error = (*uobj->pgops->pgo_get)(uobj, umap->offset + slot_offset, pgs, 387 &npages, 0, access_type, umap->advice, flags | PGO_NOBLOCKALLOC | 388 PGO_NOTIMESTAMP); 389 UVMHIST_LOG(ubchist, "getpages error %jd npages %jd", error, npages, 0, 390 0); 391 392 if (error == EAGAIN) { 393 kpause("ubc_fault", false, hz >> 2, NULL); 394 goto again; 395 } 396 if (error) { 397 return error; 398 } 399 400 /* 401 * For virtually-indexed, virtually-tagged caches we should avoid 402 * creating writable mappings when we do not absolutely need them, 403 * since the "compatible alias" trick does not work on such caches. 404 * Otherwise, we can always map the pages writable. 405 */ 406 407 #ifdef PMAP_CACHE_VIVT 408 prot = VM_PROT_READ | access_type; 409 #else 410 prot = VM_PROT_READ | VM_PROT_WRITE; 411 #endif 412 413 va = ufi->orig_rvaddr; 414 eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT); 415 416 UVMHIST_LOG(ubchist, "va %#jx eva %#jx", va, eva, 0, 0); 417 418 /* 419 * Note: normally all returned pages would have the same UVM object. 420 * However, layered file-systems and e.g. tmpfs, may return pages 421 * which belong to underlying UVM object. In such case, lock is 422 * shared amongst the objects. 423 */ 424 rw_enter(uobj->vmobjlock, RW_WRITER); 425 for (i = 0; va < eva; i++, va += PAGE_SIZE) { 426 struct vm_page *pg; 427 428 UVMHIST_LOG(ubchist, "pgs[%jd] = %#jx", i, (uintptr_t)pgs[i], 429 0, 0); 430 pg = pgs[i]; 431 432 if (pg == NULL || pg == PGO_DONTCARE) { 433 continue; 434 } 435 KASSERT(uobj->vmobjlock == pg->uobject->vmobjlock); 436 error = ubc_fault_page(ufi, umap, pg, prot, access_type, va); 437 if (error) { 438 /* 439 * Flush (there might be pages entered), drop the lock, 440 * and perform uvm_wait(). Note: page will re-fault. 441 */ 442 pmap_update(ufi->orig_map->pmap); 443 rw_exit(uobj->vmobjlock); 444 uvm_wait("ubc_fault"); 445 rw_enter(uobj->vmobjlock, RW_WRITER); 446 } 447 } 448 /* Must make VA visible before the unlock. */ 449 pmap_update(ufi->orig_map->pmap); 450 rw_exit(uobj->vmobjlock); 451 452 return 0; 453 } 454 455 /* 456 * local functions 457 */ 458 459 static struct ubc_map * 460 ubc_find_mapping(struct uvm_object *uobj, voff_t offset) 461 { 462 struct ubc_map *umap; 463 464 LIST_FOREACH(umap, &ubc_object.hash[UBC_HASH(uobj, offset)], hash) { 465 if (umap->uobj == uobj && umap->offset == offset) { 466 return umap; 467 } 468 } 469 return NULL; 470 } 471 472 473 /* 474 * ubc interface functions 475 */ 476 477 /* 478 * ubc_alloc: allocate a file mapping window 479 */ 480 481 static void * __noinline 482 ubc_alloc(struct uvm_object *uobj, voff_t offset, vsize_t *lenp, int advice, 483 int flags, struct vm_page **pgs, int *npagesp) 484 { 485 vaddr_t slot_offset, va; 486 struct ubc_map *umap; 487 voff_t umap_offset; 488 int error; 489 UVMHIST_FUNC(__func__); 490 UVMHIST_CALLARGS(ubchist, "uobj %#jx offset %#jx len %#jx", 491 (uintptr_t)uobj, offset, *lenp, 0); 492 493 KASSERT(*lenp > 0); 494 umap_offset = (offset & ~((voff_t)ubc_winsize - 1)); 495 slot_offset = (vaddr_t)(offset & ((voff_t)ubc_winsize - 1)); 496 *lenp = MIN(*lenp, ubc_winsize - slot_offset); 497 KASSERT(*lenp > 0); 498 499 rw_enter(ubc_object.uobj.vmobjlock, RW_WRITER); 500 again: 501 /* 502 * The UVM object is already referenced. 503 * Lock order: UBC object -> ubc_map::uobj. 504 */ 505 umap = ubc_find_mapping(uobj, umap_offset); 506 if (umap == NULL) { 507 struct uvm_object *oobj; 508 509 UBC_EVCNT_INCR(wincachemiss); 510 umap = TAILQ_FIRST(UBC_QUEUE(offset)); 511 if (umap == NULL) { 512 rw_exit(ubc_object.uobj.vmobjlock); 513 kpause("ubc_alloc", false, hz >> 2, NULL); 514 rw_enter(ubc_object.uobj.vmobjlock, RW_WRITER); 515 goto again; 516 } 517 518 va = UBC_UMAP_ADDR(umap); 519 oobj = umap->uobj; 520 521 /* 522 * Remove from old hash (if any), add to new hash. 523 */ 524 525 if (oobj != NULL) { 526 /* 527 * Mapping must be removed before the list entry, 528 * since there is a race with ubc_purge(). 529 */ 530 if (umap->flags & UMAP_MAPPING_CACHED) { 531 umap->flags &= ~UMAP_MAPPING_CACHED; 532 rw_enter(oobj->vmobjlock, RW_WRITER); 533 pmap_remove(pmap_kernel(), va, 534 va + ubc_winsize); 535 pmap_update(pmap_kernel()); 536 rw_exit(oobj->vmobjlock); 537 } 538 LIST_REMOVE(umap, hash); 539 LIST_REMOVE(umap, list); 540 } else { 541 KASSERT((umap->flags & UMAP_MAPPING_CACHED) == 0); 542 } 543 umap->uobj = uobj; 544 umap->offset = umap_offset; 545 LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)], 546 umap, hash); 547 LIST_INSERT_HEAD(&uobj->uo_ubc, umap, list); 548 } else { 549 UBC_EVCNT_INCR(wincachehit); 550 va = UBC_UMAP_ADDR(umap); 551 } 552 553 if (umap->refcount == 0) { 554 TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive); 555 } 556 557 if (flags & UBC_WRITE) { 558 KASSERTMSG(umap->writeoff == 0, 559 "ubc_alloc: concurrent writes to uobj %p", uobj); 560 KASSERTMSG(umap->writelen == 0, 561 "ubc_alloc: concurrent writes to uobj %p", uobj); 562 umap->writeoff = slot_offset; 563 umap->writelen = *lenp; 564 } 565 566 umap->refcount++; 567 umap->advice = advice; 568 rw_exit(ubc_object.uobj.vmobjlock); 569 UVMHIST_LOG(ubchist, "umap %#jx refs %jd va %#jx flags %#jx", 570 (uintptr_t)umap, umap->refcount, (uintptr_t)va, flags); 571 572 if (flags & UBC_FAULTBUSY) { 573 int npages = (*lenp + (offset & (PAGE_SIZE - 1)) + 574 PAGE_SIZE - 1) >> PAGE_SHIFT; 575 int gpflags = 576 PGO_SYNCIO|PGO_OVERWRITE|PGO_PASTEOF|PGO_NOBLOCKALLOC| 577 PGO_NOTIMESTAMP; 578 int i; 579 KDASSERT(flags & UBC_WRITE); 580 KASSERT(npages <= *npagesp); 581 KASSERT(umap->refcount == 1); 582 583 UBC_EVCNT_INCR(faultbusy); 584 again_faultbusy: 585 rw_enter(uobj->vmobjlock, RW_WRITER); 586 if (umap->flags & UMAP_MAPPING_CACHED) { 587 umap->flags &= ~UMAP_MAPPING_CACHED; 588 pmap_remove(pmap_kernel(), va, va + ubc_winsize); 589 } 590 memset(pgs, 0, *npagesp * sizeof(pgs[0])); 591 592 error = (*uobj->pgops->pgo_get)(uobj, trunc_page(offset), pgs, 593 &npages, 0, VM_PROT_READ | VM_PROT_WRITE, advice, gpflags); 594 UVMHIST_LOG(ubchist, "faultbusy getpages %jd", error, 0, 0, 0); 595 if (error) { 596 /* 597 * Flush: the mapping above might have been removed. 598 */ 599 pmap_update(pmap_kernel()); 600 goto out; 601 } 602 for (i = 0; i < npages; i++) { 603 struct vm_page *pg = pgs[i]; 604 605 KASSERT(pg->uobject == uobj); 606 if (pg->loan_count != 0) { 607 rw_enter(uobj->vmobjlock, RW_WRITER); 608 if (pg->loan_count != 0) { 609 pg = uvm_loanbreak(pg); 610 } 611 if (pg == NULL) { 612 pmap_kremove(va, ubc_winsize); 613 pmap_update(pmap_kernel()); 614 uvm_page_unbusy(pgs, npages); 615 rw_exit(uobj->vmobjlock); 616 uvm_wait("ubc_alloc"); 617 goto again_faultbusy; 618 } 619 rw_exit(uobj->vmobjlock); 620 pgs[i] = pg; 621 } 622 pmap_kenter_pa( 623 va + trunc_page(slot_offset) + (i << PAGE_SHIFT), 624 VM_PAGE_TO_PHYS(pg), 625 VM_PROT_READ | VM_PROT_WRITE, 0); 626 } 627 pmap_update(pmap_kernel()); 628 umap->flags |= UMAP_PAGES_LOCKED; 629 *npagesp = npages; 630 } else { 631 KASSERT((umap->flags & UMAP_PAGES_LOCKED) == 0); 632 } 633 634 out: 635 return (void *)(va + slot_offset); 636 } 637 638 /* 639 * ubc_release: free a file mapping window. 640 */ 641 642 static void __noinline 643 ubc_release(void *va, int flags, struct vm_page **pgs, int npages) 644 { 645 struct ubc_map *umap; 646 struct uvm_object *uobj; 647 vaddr_t umapva; 648 bool unmapped; 649 UVMHIST_FUNC(__func__); 650 UVMHIST_CALLARGS(ubchist, "va %#jx", (uintptr_t)va, 0, 0, 0); 651 652 umap = &ubc_object.umap[((char *)va - ubc_object.kva) >> ubc_winshift]; 653 umapva = UBC_UMAP_ADDR(umap); 654 uobj = umap->uobj; 655 KASSERT(uobj != NULL); 656 657 if (umap->flags & UMAP_PAGES_LOCKED) { 658 const voff_t endoff = umap->writeoff + umap->writelen; 659 const voff_t zerolen = round_page(endoff) - endoff; 660 661 KASSERT(npages == (round_page(endoff) - 662 trunc_page(umap->writeoff)) >> PAGE_SHIFT); 663 KASSERT((umap->flags & UMAP_MAPPING_CACHED) == 0); 664 if (zerolen) { 665 memset((char *)umapva + endoff, 0, zerolen); 666 } 667 umap->flags &= ~UMAP_PAGES_LOCKED; 668 rw_enter(uobj->vmobjlock, RW_WRITER); 669 for (u_int i = 0; i < npages; i++) { 670 struct vm_page *pg = pgs[i]; 671 #ifdef DIAGNOSTIC 672 paddr_t pa; 673 bool rv; 674 rv = pmap_extract(pmap_kernel(), umapva + 675 umap->writeoff + (i << PAGE_SHIFT), &pa); 676 KASSERT(rv); 677 KASSERT(PHYS_TO_VM_PAGE(pa) == pg); 678 #endif 679 pg->flags &= ~PG_FAKE; 680 KASSERTMSG(uvm_pagegetdirty(pg) == 681 UVM_PAGE_STATUS_DIRTY, 682 "page %p not dirty", pg); 683 KASSERT(pg->loan_count == 0); 684 if (uvmpdpol_pageactivate_p(pg)) { 685 uvm_pagelock(pg); 686 uvm_pageactivate(pg); 687 uvm_pageunlock(pg); 688 } 689 } 690 pmap_kremove(umapva, ubc_winsize); 691 pmap_update(pmap_kernel()); 692 uvm_page_unbusy(pgs, npages); 693 rw_exit(uobj->vmobjlock); 694 unmapped = true; 695 } else { 696 unmapped = false; 697 } 698 699 rw_enter(ubc_object.uobj.vmobjlock, RW_WRITER); 700 umap->writeoff = 0; 701 umap->writelen = 0; 702 umap->refcount--; 703 if (umap->refcount == 0) { 704 if (flags & UBC_UNMAP) { 705 /* 706 * Invalidate any cached mappings if requested. 707 * This is typically used to avoid leaving 708 * incompatible cache aliases around indefinitely. 709 */ 710 rw_enter(uobj->vmobjlock, RW_WRITER); 711 pmap_remove(pmap_kernel(), umapva, 712 umapva + ubc_winsize); 713 pmap_update(pmap_kernel()); 714 rw_exit(uobj->vmobjlock); 715 716 umap->flags &= ~UMAP_MAPPING_CACHED; 717 LIST_REMOVE(umap, hash); 718 LIST_REMOVE(umap, list); 719 umap->uobj = NULL; 720 TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap, 721 inactive); 722 } else { 723 if (!unmapped) { 724 umap->flags |= UMAP_MAPPING_CACHED; 725 } 726 TAILQ_INSERT_TAIL(UBC_QUEUE(umap->offset), umap, 727 inactive); 728 } 729 } 730 UVMHIST_LOG(ubchist, "umap %#jx refs %jd", (uintptr_t)umap, 731 umap->refcount, 0, 0); 732 rw_exit(ubc_object.uobj.vmobjlock); 733 } 734 735 /* 736 * ubc_uiomove: move data to/from an object. 737 */ 738 739 int 740 ubc_uiomove(struct uvm_object *uobj, struct uio *uio, vsize_t todo, int advice, 741 int flags) 742 { 743 const bool overwrite = (flags & UBC_FAULTBUSY) != 0; 744 struct vm_page *pgs[howmany(ubc_winsize, MIN_PAGE_SIZE)]; 745 voff_t off; 746 int error, npages; 747 748 KASSERT(todo <= uio->uio_resid); 749 KASSERT(((flags & UBC_WRITE) != 0 && uio->uio_rw == UIO_WRITE) || 750 ((flags & UBC_READ) != 0 && uio->uio_rw == UIO_READ)); 751 752 #ifdef UBC_USE_PMAP_DIRECT 753 /* 754 * during direct access pages need to be held busy to prevent them 755 * changing identity, and therefore if we read or write an object 756 * into a mapped view of same we could deadlock while faulting. 757 * 758 * avoid the problem by disallowing direct access if the object 759 * might be visible somewhere via mmap(). 760 * 761 * XXX concurrent reads cause thundering herd issues with PG_BUSY. 762 * In the future enable by default for writes or if ncpu<=2, and 763 * make the toggle override that. 764 */ 765 if ((ubc_direct && (flags & UBC_ISMAPPED) == 0) || 766 (flags & UBC_FAULTBUSY) != 0) { 767 return ubc_uiomove_direct(uobj, uio, todo, advice, flags); 768 } 769 #endif 770 771 off = uio->uio_offset; 772 error = 0; 773 while (todo > 0) { 774 vsize_t bytelen = todo; 775 void *win; 776 777 npages = __arraycount(pgs); 778 win = ubc_alloc(uobj, off, &bytelen, advice, flags, pgs, 779 &npages); 780 if (error == 0) { 781 error = uiomove(win, bytelen, uio); 782 } 783 if (error != 0 && overwrite) { 784 /* 785 * if we haven't initialized the pages yet, 786 * do it now. it's safe to use memset here 787 * because we just mapped the pages above. 788 */ 789 memset(win, 0, bytelen); 790 } 791 ubc_release(win, flags, pgs, npages); 792 off += bytelen; 793 todo -= bytelen; 794 if (error != 0 && (flags & UBC_PARTIALOK) != 0) { 795 break; 796 } 797 } 798 799 return error; 800 } 801 802 /* 803 * ubc_zerorange: set a range of bytes in an object to zero. 804 */ 805 806 void 807 ubc_zerorange(struct uvm_object *uobj, off_t off, size_t len, int flags) 808 { 809 struct vm_page *pgs[howmany(ubc_winsize, MIN_PAGE_SIZE)]; 810 int npages; 811 812 #ifdef UBC_USE_PMAP_DIRECT 813 if (ubc_direct || (flags & UBC_FAULTBUSY) != 0) { 814 ubc_zerorange_direct(uobj, off, len, flags); 815 return; 816 } 817 #endif 818 819 /* 820 * XXXUBC invent kzero() and use it 821 */ 822 823 while (len) { 824 void *win; 825 vsize_t bytelen = len; 826 827 npages = __arraycount(pgs); 828 win = ubc_alloc(uobj, off, &bytelen, UVM_ADV_NORMAL, UBC_WRITE, 829 pgs, &npages); 830 memset(win, 0, bytelen); 831 ubc_release(win, flags, pgs, npages); 832 833 off += bytelen; 834 len -= bytelen; 835 } 836 } 837 838 #ifdef UBC_USE_PMAP_DIRECT 839 /* Copy data using direct map */ 840 841 /* 842 * ubc_alloc_direct: allocate a file mapping window using direct map 843 */ 844 static int __noinline 845 ubc_alloc_direct(struct uvm_object *uobj, voff_t offset, vsize_t *lenp, 846 int advice, int flags, struct vm_page **pgs, int *npages) 847 { 848 voff_t pgoff; 849 int error; 850 int gpflags = flags | PGO_NOTIMESTAMP | PGO_SYNCIO; 851 int access_type = VM_PROT_READ; 852 UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist); 853 854 if (flags & UBC_WRITE) { 855 if (flags & UBC_FAULTBUSY) 856 gpflags |= PGO_OVERWRITE | PGO_NOBLOCKALLOC; 857 #if 0 858 KASSERT(!UVM_OBJ_NEEDS_WRITEFAULT(uobj)); 859 #endif 860 861 /* 862 * Tell genfs_getpages() we already have the journal lock, 863 * allow allocation past current EOF. 864 */ 865 gpflags |= PGO_JOURNALLOCKED | PGO_PASTEOF; 866 access_type |= VM_PROT_WRITE; 867 } else { 868 /* Don't need the empty blocks allocated, PG_RDONLY is okay */ 869 gpflags |= PGO_NOBLOCKALLOC; 870 } 871 872 pgoff = (offset & PAGE_MASK); 873 *lenp = MIN(*lenp, ubc_winsize - pgoff); 874 875 again: 876 *npages = (*lenp + pgoff + PAGE_SIZE - 1) >> PAGE_SHIFT; 877 KASSERT((*npages * PAGE_SIZE) <= ubc_winsize); 878 KASSERT(*lenp + pgoff <= ubc_winsize); 879 memset(pgs, 0, *npages * sizeof(pgs[0])); 880 881 rw_enter(uobj->vmobjlock, RW_WRITER); 882 error = (*uobj->pgops->pgo_get)(uobj, trunc_page(offset), pgs, 883 npages, 0, access_type, advice, gpflags); 884 UVMHIST_LOG(ubchist, "alloc_direct getpages %jd", error, 0, 0, 0); 885 if (error) { 886 if (error == EAGAIN) { 887 kpause("ubc_alloc_directg", false, hz >> 2, NULL); 888 goto again; 889 } 890 return error; 891 } 892 893 rw_enter(uobj->vmobjlock, RW_WRITER); 894 for (int i = 0; i < *npages; i++) { 895 struct vm_page *pg = pgs[i]; 896 897 KASSERT(pg != NULL); 898 KASSERT(pg != PGO_DONTCARE); 899 KASSERT((pg->flags & PG_FAKE) == 0 || (gpflags & PGO_OVERWRITE)); 900 KASSERT(pg->uobject->vmobjlock == uobj->vmobjlock); 901 902 /* Avoid breaking loan if possible, only do it on write */ 903 if ((flags & UBC_WRITE) && pg->loan_count != 0) { 904 pg = uvm_loanbreak(pg); 905 if (pg == NULL) { 906 uvm_page_unbusy(pgs, *npages); 907 rw_exit(uobj->vmobjlock); 908 uvm_wait("ubc_alloc_directl"); 909 goto again; 910 } 911 pgs[i] = pg; 912 } 913 914 /* Page must be writable by now */ 915 KASSERT((pg->flags & PG_RDONLY) == 0 || (flags & UBC_WRITE) == 0); 916 917 /* 918 * XXX For aobj pages. No managed mapping - mark the page 919 * dirty. 920 */ 921 if ((flags & UBC_WRITE) != 0) { 922 uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY); 923 } 924 } 925 rw_exit(uobj->vmobjlock); 926 927 return 0; 928 } 929 930 static void __noinline 931 ubc_direct_release(struct uvm_object *uobj, 932 int flags, struct vm_page **pgs, int npages) 933 { 934 rw_enter(uobj->vmobjlock, RW_WRITER); 935 for (int i = 0; i < npages; i++) { 936 struct vm_page *pg = pgs[i]; 937 938 pg->flags &= ~PG_BUSY; 939 UVM_PAGE_OWN(pg, NULL); 940 if (pg->flags & PG_RELEASED) { 941 pg->flags &= ~PG_RELEASED; 942 uvm_pagefree(pg); 943 continue; 944 } 945 946 if (uvm_pagewanted_p(pg) || uvmpdpol_pageactivate_p(pg)) { 947 uvm_pagelock(pg); 948 uvm_pageactivate(pg); 949 uvm_pagewakeup(pg); 950 uvm_pageunlock(pg); 951 } 952 953 /* Page was changed, no longer fake and neither clean. */ 954 if (flags & UBC_WRITE) { 955 KASSERTMSG(uvm_pagegetdirty(pg) == 956 UVM_PAGE_STATUS_DIRTY, 957 "page %p not dirty", pg); 958 pg->flags &= ~PG_FAKE; 959 } 960 } 961 rw_exit(uobj->vmobjlock); 962 } 963 964 static int 965 ubc_uiomove_process(void *win, size_t len, void *arg) 966 { 967 struct uio *uio = (struct uio *)arg; 968 969 return uiomove(win, len, uio); 970 } 971 972 static int 973 ubc_zerorange_process(void *win, size_t len, void *arg) 974 { 975 memset(win, 0, len); 976 return 0; 977 } 978 979 static int __noinline 980 ubc_uiomove_direct(struct uvm_object *uobj, struct uio *uio, vsize_t todo, int advice, 981 int flags) 982 { 983 const bool overwrite = (flags & UBC_FAULTBUSY) != 0; 984 voff_t off; 985 int error, npages; 986 struct vm_page *pgs[howmany(ubc_winsize, MIN_PAGE_SIZE)]; 987 988 KASSERT(todo <= uio->uio_resid); 989 KASSERT(((flags & UBC_WRITE) != 0 && uio->uio_rw == UIO_WRITE) || 990 ((flags & UBC_READ) != 0 && uio->uio_rw == UIO_READ)); 991 992 off = uio->uio_offset; 993 error = 0; 994 while (todo > 0) { 995 vsize_t bytelen = todo; 996 997 error = ubc_alloc_direct(uobj, off, &bytelen, advice, flags, 998 pgs, &npages); 999 if (error != 0) { 1000 /* can't do anything, failed to get the pages */ 1001 break; 1002 } 1003 1004 if (error == 0) { 1005 error = uvm_direct_process(pgs, npages, off, bytelen, 1006 ubc_uiomove_process, uio); 1007 } 1008 1009 if (overwrite) { 1010 voff_t endoff; 1011 1012 /* 1013 * if we haven't initialized the pages yet due to an 1014 * error above, do it now. 1015 */ 1016 if (error != 0) { 1017 (void) uvm_direct_process(pgs, npages, off, 1018 bytelen, ubc_zerorange_process, NULL); 1019 } 1020 1021 off += bytelen; 1022 todo -= bytelen; 1023 endoff = off & (PAGE_SIZE - 1); 1024 1025 /* 1026 * zero out the remaining portion of the final page 1027 * (if any). 1028 */ 1029 if (todo == 0 && endoff != 0) { 1030 vsize_t zlen = PAGE_SIZE - endoff; 1031 (void) uvm_direct_process(pgs + npages - 1, 1, 1032 off, zlen, ubc_zerorange_process, NULL); 1033 } 1034 } else { 1035 off += bytelen; 1036 todo -= bytelen; 1037 } 1038 1039 ubc_direct_release(uobj, flags, pgs, npages); 1040 1041 if (error != 0 && ISSET(flags, UBC_PARTIALOK)) { 1042 break; 1043 } 1044 } 1045 1046 return error; 1047 } 1048 1049 static void __noinline 1050 ubc_zerorange_direct(struct uvm_object *uobj, off_t off, size_t todo, int flags) 1051 { 1052 int error, npages; 1053 struct vm_page *pgs[howmany(ubc_winsize, MIN_PAGE_SIZE)]; 1054 1055 flags |= UBC_WRITE; 1056 1057 error = 0; 1058 while (todo > 0) { 1059 vsize_t bytelen = todo; 1060 1061 error = ubc_alloc_direct(uobj, off, &bytelen, UVM_ADV_NORMAL, 1062 flags, pgs, &npages); 1063 if (error != 0) { 1064 /* can't do anything, failed to get the pages */ 1065 break; 1066 } 1067 1068 error = uvm_direct_process(pgs, npages, off, bytelen, 1069 ubc_zerorange_process, NULL); 1070 1071 ubc_direct_release(uobj, flags, pgs, npages); 1072 1073 off += bytelen; 1074 todo -= bytelen; 1075 } 1076 } 1077 1078 #endif /* UBC_USE_PMAP_DIRECT */ 1079 1080 /* 1081 * ubc_purge: disassociate ubc_map structures from an empty uvm_object. 1082 */ 1083 1084 void 1085 ubc_purge(struct uvm_object *uobj) 1086 { 1087 struct ubc_map *umap; 1088 vaddr_t va; 1089 1090 KASSERT(uobj->uo_npages == 0); 1091 1092 /* 1093 * Safe to check without lock held, as ubc_alloc() removes 1094 * the mapping and list entry in the correct order. 1095 */ 1096 if (__predict_true(LIST_EMPTY(&uobj->uo_ubc))) { 1097 return; 1098 } 1099 rw_enter(ubc_object.uobj.vmobjlock, RW_WRITER); 1100 while ((umap = LIST_FIRST(&uobj->uo_ubc)) != NULL) { 1101 KASSERT(umap->refcount == 0); 1102 for (va = 0; va < ubc_winsize; va += PAGE_SIZE) { 1103 KASSERT(!pmap_extract(pmap_kernel(), 1104 va + UBC_UMAP_ADDR(umap), NULL)); 1105 } 1106 LIST_REMOVE(umap, list); 1107 LIST_REMOVE(umap, hash); 1108 umap->flags &= ~UMAP_MAPPING_CACHED; 1109 umap->uobj = NULL; 1110 } 1111 rw_exit(ubc_object.uobj.vmobjlock); 1112 } 1113 1114 static int 1115 ubchash_stats(struct hashstat_sysctl *hs, bool fill) 1116 { 1117 struct ubc_map *umap; 1118 uint64_t chain; 1119 1120 strlcpy(hs->hash_name, "ubchash", sizeof(hs->hash_name)); 1121 strlcpy(hs->hash_desc, "ubc object hash", sizeof(hs->hash_desc)); 1122 if (!fill) 1123 return 0; 1124 1125 hs->hash_size = ubc_object.hashmask + 1; 1126 1127 for (size_t i = 0; i < hs->hash_size; i++) { 1128 chain = 0; 1129 rw_enter(ubc_object.uobj.vmobjlock, RW_READER); 1130 LIST_FOREACH(umap, &ubc_object.hash[i], hash) { 1131 chain++; 1132 } 1133 rw_exit(ubc_object.uobj.vmobjlock); 1134 if (chain > 0) { 1135 hs->hash_used++; 1136 hs->hash_items += chain; 1137 if (chain > hs->hash_maxchain) 1138 hs->hash_maxchain = chain; 1139 } 1140 preempt_point(); 1141 } 1142 1143 return 0; 1144 } 1145