xref: /netbsd-src/sys/uvm/uvm_bio.c (revision 8ecbf5f02b752fcb7debe1a8fab1dc82602bc760)
1 /*	$NetBSD: uvm_bio.c,v 1.121 2020/07/09 09:24:32 rin Exp $	*/
2 
3 /*
4  * Copyright (c) 1998 Chuck Silvers.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The name of the author may not be used to endorse or promote products
16  *    derived from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  */
31 
32 /*
33  * uvm_bio.c: buffered i/o object mapping cache
34  */
35 
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.121 2020/07/09 09:24:32 rin Exp $");
38 
39 #include "opt_uvmhist.h"
40 #include "opt_ubc.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kmem.h>
45 #include <sys/kernel.h>
46 #include <sys/proc.h>
47 #include <sys/vnode.h>
48 #include <sys/bitops.h>		/* for ilog2() */
49 
50 #include <uvm/uvm.h>
51 #include <uvm/uvm_pdpolicy.h>
52 
53 #ifdef PMAP_DIRECT
54 #  define UBC_USE_PMAP_DIRECT
55 #endif
56 
57 /*
58  * local functions
59  */
60 
61 static int	ubc_fault(struct uvm_faultinfo *, vaddr_t, struct vm_page **,
62 			  int, int, vm_prot_t, int);
63 static struct ubc_map *ubc_find_mapping(struct uvm_object *, voff_t);
64 #ifdef UBC_USE_PMAP_DIRECT
65 static int __noinline ubc_uiomove_direct(struct uvm_object *, struct uio *, vsize_t,
66 			  int, int);
67 static void __noinline ubc_zerorange_direct(struct uvm_object *, off_t, size_t, int);
68 
69 /* XXX disabled by default until the kinks are worked out. */
70 bool ubc_direct = false;
71 #endif
72 
73 /*
74  * local data structues
75  */
76 
77 #define UBC_HASH(uobj, offset) 						\
78 	(((((u_long)(uobj)) >> 8) + (((u_long)(offset)) >> PAGE_SHIFT)) & \
79 				ubc_object.hashmask)
80 
81 #define UBC_QUEUE(offset)						\
82 	(&ubc_object.inactive[(((u_long)(offset)) >> ubc_winshift) &	\
83 			     (UBC_NQUEUES - 1)])
84 
85 #define UBC_UMAP_ADDR(u)						\
86 	(vaddr_t)(ubc_object.kva + (((u) - ubc_object.umap) << ubc_winshift))
87 
88 
89 #define UMAP_PAGES_LOCKED	0x0001
90 #define UMAP_MAPPING_CACHED	0x0002
91 
92 struct ubc_map {
93 	struct uvm_object *	uobj;		/* mapped object */
94 	voff_t			offset;		/* offset into uobj */
95 	voff_t			writeoff;	/* write offset */
96 	vsize_t			writelen;	/* write len */
97 	int			refcount;	/* refcount on mapping */
98 	int			flags;		/* extra state */
99 	int			advice;
100 
101 	LIST_ENTRY(ubc_map)	hash;		/* hash table */
102 	TAILQ_ENTRY(ubc_map)	inactive;	/* inactive queue */
103 	LIST_ENTRY(ubc_map)	list;		/* per-object list */
104 };
105 
106 TAILQ_HEAD(ubc_inactive_head, ubc_map);
107 static struct ubc_object {
108 	struct uvm_object uobj;		/* glue for uvm_map() */
109 	char *kva;			/* where ubc_object is mapped */
110 	struct ubc_map *umap;		/* array of ubc_map's */
111 
112 	LIST_HEAD(, ubc_map) *hash;	/* hashtable for cached ubc_map's */
113 	u_long hashmask;		/* mask for hashtable */
114 
115 	struct ubc_inactive_head *inactive;
116 					/* inactive queues for ubc_map's */
117 } ubc_object;
118 
119 const struct uvm_pagerops ubc_pager = {
120 	.pgo_fault = ubc_fault,
121 	/* ... rest are NULL */
122 };
123 
124 /* Use value at least as big as maximum page size supported by architecture */
125 #define UBC_MAX_WINSHIFT	\
126     ((1 << UBC_WINSHIFT) > MAX_PAGE_SIZE ? UBC_WINSHIFT : ilog2(MAX_PAGE_SIZE))
127 
128 int ubc_nwins = UBC_NWINS;
129 const int ubc_winshift = UBC_MAX_WINSHIFT;
130 const int ubc_winsize = 1 << UBC_MAX_WINSHIFT;
131 #if defined(PMAP_PREFER)
132 int ubc_nqueues;
133 #define UBC_NQUEUES ubc_nqueues
134 #else
135 #define UBC_NQUEUES 1
136 #endif
137 
138 #if defined(UBC_STATS)
139 
140 #define	UBC_EVCNT_DEFINE(name) \
141 struct evcnt ubc_evcnt_##name = \
142 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "ubc", #name); \
143 EVCNT_ATTACH_STATIC(ubc_evcnt_##name);
144 #define	UBC_EVCNT_INCR(name) ubc_evcnt_##name.ev_count++
145 
146 #else /* defined(UBC_STATS) */
147 
148 #define	UBC_EVCNT_DEFINE(name)	/* nothing */
149 #define	UBC_EVCNT_INCR(name)	/* nothing */
150 
151 #endif /* defined(UBC_STATS) */
152 
153 UBC_EVCNT_DEFINE(wincachehit)
154 UBC_EVCNT_DEFINE(wincachemiss)
155 UBC_EVCNT_DEFINE(faultbusy)
156 
157 /*
158  * ubc_init
159  *
160  * init pager private data structures.
161  */
162 
163 void
164 ubc_init(void)
165 {
166 	/*
167 	 * Make sure ubc_winshift is sane.
168 	 */
169 	KASSERT(ubc_winshift >= PAGE_SHIFT);
170 
171 	/*
172 	 * init ubc_object.
173 	 * alloc and init ubc_map's.
174 	 * init inactive queues.
175 	 * alloc and init hashtable.
176 	 * map in ubc_object.
177 	 */
178 
179 	uvm_obj_init(&ubc_object.uobj, &ubc_pager, true, UVM_OBJ_KERN);
180 
181 	ubc_object.umap = kmem_zalloc(ubc_nwins * sizeof(struct ubc_map),
182 	    KM_SLEEP);
183 	if (ubc_object.umap == NULL)
184 		panic("ubc_init: failed to allocate ubc_map");
185 
186 	vaddr_t va = (vaddr_t)1L;
187 #ifdef PMAP_PREFER
188 	PMAP_PREFER(0, &va, 0, 0);	/* kernel is never topdown */
189 	ubc_nqueues = va >> ubc_winshift;
190 	if (ubc_nqueues == 0) {
191 		ubc_nqueues = 1;
192 	}
193 #endif
194 	ubc_object.inactive = kmem_alloc(UBC_NQUEUES *
195 	    sizeof(struct ubc_inactive_head), KM_SLEEP);
196 	for (int i = 0; i < UBC_NQUEUES; i++) {
197 		TAILQ_INIT(&ubc_object.inactive[i]);
198 	}
199 	for (int i = 0; i < ubc_nwins; i++) {
200 		struct ubc_map *umap;
201 		umap = &ubc_object.umap[i];
202 		TAILQ_INSERT_TAIL(&ubc_object.inactive[i & (UBC_NQUEUES - 1)],
203 				  umap, inactive);
204 	}
205 
206 	ubc_object.hash = hashinit(ubc_nwins, HASH_LIST, true,
207 	    &ubc_object.hashmask);
208 	for (int i = 0; i <= ubc_object.hashmask; i++) {
209 		LIST_INIT(&ubc_object.hash[i]);
210 	}
211 
212 	if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva,
213 		    ubc_nwins << ubc_winshift, &ubc_object.uobj, 0, (vsize_t)va,
214 		    UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_NONE,
215 				UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) {
216 		panic("ubc_init: failed to map ubc_object");
217 	}
218 }
219 
220 void
221 ubchist_init(void)
222 {
223 
224 	UVMHIST_INIT(ubchist, 300);
225 }
226 
227 /*
228  * ubc_fault_page: helper of ubc_fault to handle a single page.
229  *
230  * => Caller has UVM object locked.
231  * => Caller will perform pmap_update().
232  */
233 
234 static inline int
235 ubc_fault_page(const struct uvm_faultinfo *ufi, const struct ubc_map *umap,
236     struct vm_page *pg, vm_prot_t prot, vm_prot_t access_type, vaddr_t va)
237 {
238 	vm_prot_t mask;
239 	int error;
240 	bool rdonly;
241 
242 	KASSERT(rw_write_held(pg->uobject->vmobjlock));
243 
244 	KASSERT((pg->flags & PG_FAKE) == 0);
245 	if (pg->flags & PG_RELEASED) {
246 		uvm_pagefree(pg);
247 		return 0;
248 	}
249 	if (pg->loan_count != 0) {
250 
251 		/*
252 		 * Avoid unneeded loan break, if possible.
253 		 */
254 
255 		if ((access_type & VM_PROT_WRITE) == 0) {
256 			prot &= ~VM_PROT_WRITE;
257 		}
258 		if (prot & VM_PROT_WRITE) {
259 			struct vm_page *newpg;
260 
261 			newpg = uvm_loanbreak(pg);
262 			if (newpg == NULL) {
263 				uvm_page_unbusy(&pg, 1);
264 				return ENOMEM;
265 			}
266 			pg = newpg;
267 		}
268 	}
269 
270 	/*
271 	 * Note that a page whose backing store is partially allocated
272 	 * is marked as PG_RDONLY.
273 	 *
274 	 * it's a responsibility of ubc_alloc's caller to allocate backing
275 	 * blocks before writing to the window.
276 	 */
277 
278 	KASSERT((pg->flags & PG_RDONLY) == 0 ||
279 	    (access_type & VM_PROT_WRITE) == 0 ||
280 	    pg->offset < umap->writeoff ||
281 	    pg->offset + PAGE_SIZE > umap->writeoff + umap->writelen);
282 
283 	rdonly = uvm_pagereadonly_p(pg);
284 	mask = rdonly ? ~VM_PROT_WRITE : VM_PROT_ALL;
285 
286 	error = pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg),
287 	    prot & mask, PMAP_CANFAIL | (access_type & mask));
288 
289 	uvm_pagelock(pg);
290 	uvm_pageactivate(pg);
291 	uvm_pagewakeup(pg);
292 	uvm_pageunlock(pg);
293 	pg->flags &= ~PG_BUSY;
294 	UVM_PAGE_OWN(pg, NULL);
295 
296 	return error;
297 }
298 
299 /*
300  * ubc_fault: fault routine for ubc mapping
301  */
302 
303 static int
304 ubc_fault(struct uvm_faultinfo *ufi, vaddr_t ign1, struct vm_page **ign2,
305     int ign3, int ign4, vm_prot_t access_type, int flags)
306 {
307 	struct uvm_object *uobj;
308 	struct ubc_map *umap;
309 	vaddr_t va, eva, ubc_offset, slot_offset;
310 	struct vm_page *pgs[howmany(ubc_winsize, MIN_PAGE_SIZE)];
311 	int i, error, npages;
312 	vm_prot_t prot;
313 
314 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
315 
316 	/*
317 	 * no need to try with PGO_LOCKED...
318 	 * we don't need to have the map locked since we know that
319 	 * no one will mess with it until our reference is released.
320 	 */
321 
322 	if (flags & PGO_LOCKED) {
323 		uvmfault_unlockall(ufi, NULL, &ubc_object.uobj);
324 		flags &= ~PGO_LOCKED;
325 	}
326 
327 	va = ufi->orig_rvaddr;
328 	ubc_offset = va - (vaddr_t)ubc_object.kva;
329 	umap = &ubc_object.umap[ubc_offset >> ubc_winshift];
330 	KASSERT(umap->refcount != 0);
331 	KASSERT((umap->flags & UMAP_PAGES_LOCKED) == 0);
332 	slot_offset = ubc_offset & (ubc_winsize - 1);
333 
334 	/*
335 	 * some platforms cannot write to individual bytes atomically, so
336 	 * software has to do read/modify/write of larger quantities instead.
337 	 * this means that the access_type for "write" operations
338 	 * can be VM_PROT_READ, which confuses us mightily.
339 	 *
340 	 * deal with this by resetting access_type based on the info
341 	 * that ubc_alloc() stores for us.
342 	 */
343 
344 	access_type = umap->writelen ? VM_PROT_WRITE : VM_PROT_READ;
345 	UVMHIST_LOG(ubchist, "va 0x%jx ubc_offset 0x%jx access_type %jd",
346 	    va, ubc_offset, access_type, 0);
347 
348 	if ((access_type & VM_PROT_WRITE) != 0) {
349 #ifndef PRIxOFF		/* XXX */
350 #define PRIxOFF "jx"	/* XXX */
351 #endif			/* XXX */
352 		KASSERTMSG((trunc_page(umap->writeoff) <= slot_offset),
353 		    "out of range write: slot=%#"PRIxVSIZE" off=%#"PRIxOFF,
354 		    slot_offset, (intmax_t)umap->writeoff);
355 		KASSERTMSG((slot_offset < umap->writeoff + umap->writelen),
356 		    "out of range write: slot=%#"PRIxVADDR
357 		        " off=%#"PRIxOFF" len=%#"PRIxVSIZE,
358 		    slot_offset, (intmax_t)umap->writeoff, umap->writelen);
359 	}
360 
361 	/* no umap locking needed since we have a ref on the umap */
362 	uobj = umap->uobj;
363 
364 	if ((access_type & VM_PROT_WRITE) == 0) {
365 		npages = (ubc_winsize - slot_offset) >> PAGE_SHIFT;
366 	} else {
367 		npages = (round_page(umap->offset + umap->writeoff +
368 		    umap->writelen) - (umap->offset + slot_offset))
369 		    >> PAGE_SHIFT;
370 		flags |= PGO_PASTEOF;
371 	}
372 
373 again:
374 	memset(pgs, 0, sizeof (pgs));
375 	rw_enter(uobj->vmobjlock, RW_WRITER);
376 
377 	UVMHIST_LOG(ubchist, "slot_offset 0x%jx writeoff 0x%jx writelen 0x%jx ",
378 	    slot_offset, umap->writeoff, umap->writelen, 0);
379 	UVMHIST_LOG(ubchist, "getpages uobj %#jx offset 0x%jx npages %jd",
380 	    (uintptr_t)uobj, umap->offset + slot_offset, npages, 0);
381 
382 	error = (*uobj->pgops->pgo_get)(uobj, umap->offset + slot_offset, pgs,
383 	    &npages, 0, access_type, umap->advice, flags | PGO_NOBLOCKALLOC |
384 	    PGO_NOTIMESTAMP);
385 	UVMHIST_LOG(ubchist, "getpages error %jd npages %jd", error, npages, 0,
386 	    0);
387 
388 	if (error == EAGAIN) {
389 		kpause("ubc_fault", false, hz >> 2, NULL);
390 		goto again;
391 	}
392 	if (error) {
393 		return error;
394 	}
395 
396 	/*
397 	 * For virtually-indexed, virtually-tagged caches we should avoid
398 	 * creating writable mappings when we do not absolutely need them,
399 	 * since the "compatible alias" trick does not work on such caches.
400 	 * Otherwise, we can always map the pages writable.
401 	 */
402 
403 #ifdef PMAP_CACHE_VIVT
404 	prot = VM_PROT_READ | access_type;
405 #else
406 	prot = VM_PROT_READ | VM_PROT_WRITE;
407 #endif
408 
409 	va = ufi->orig_rvaddr;
410 	eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT);
411 
412 	UVMHIST_LOG(ubchist, "va 0x%jx eva 0x%jx", va, eva, 0, 0);
413 
414 	/*
415 	 * Note: normally all returned pages would have the same UVM object.
416 	 * However, layered file-systems and e.g. tmpfs, may return pages
417 	 * which belong to underlying UVM object.  In such case, lock is
418 	 * shared amongst the objects.
419 	 */
420 	rw_enter(uobj->vmobjlock, RW_WRITER);
421 	for (i = 0; va < eva; i++, va += PAGE_SIZE) {
422 		struct vm_page *pg;
423 
424 		UVMHIST_LOG(ubchist, "pgs[%jd] = %#jx", i, (uintptr_t)pgs[i],
425 		    0, 0);
426 		pg = pgs[i];
427 
428 		if (pg == NULL || pg == PGO_DONTCARE) {
429 			continue;
430 		}
431 		KASSERT(uobj->vmobjlock == pg->uobject->vmobjlock);
432 		error = ubc_fault_page(ufi, umap, pg, prot, access_type, va);
433 		if (error) {
434 			/*
435 			 * Flush (there might be pages entered), drop the lock,
436 			 * and perform uvm_wait().  Note: page will re-fault.
437 			 */
438 			pmap_update(ufi->orig_map->pmap);
439 			rw_exit(uobj->vmobjlock);
440 			uvm_wait("ubc_fault");
441 			rw_enter(uobj->vmobjlock, RW_WRITER);
442 		}
443 	}
444 	/* Must make VA visible before the unlock. */
445 	pmap_update(ufi->orig_map->pmap);
446 	rw_exit(uobj->vmobjlock);
447 
448 	return 0;
449 }
450 
451 /*
452  * local functions
453  */
454 
455 static struct ubc_map *
456 ubc_find_mapping(struct uvm_object *uobj, voff_t offset)
457 {
458 	struct ubc_map *umap;
459 
460 	LIST_FOREACH(umap, &ubc_object.hash[UBC_HASH(uobj, offset)], hash) {
461 		if (umap->uobj == uobj && umap->offset == offset) {
462 			return umap;
463 		}
464 	}
465 	return NULL;
466 }
467 
468 
469 /*
470  * ubc interface functions
471  */
472 
473 /*
474  * ubc_alloc:  allocate a file mapping window
475  */
476 
477 static void * __noinline
478 ubc_alloc(struct uvm_object *uobj, voff_t offset, vsize_t *lenp, int advice,
479     int flags, struct vm_page **pgs, int *npagesp)
480 {
481 	vaddr_t slot_offset, va;
482 	struct ubc_map *umap;
483 	voff_t umap_offset;
484 	int error;
485 	UVMHIST_FUNC(__func__);
486 	UVMHIST_CALLARGS(ubchist, "uobj %#jx offset 0x%jx len 0x%jx",
487 	    (uintptr_t)uobj, offset, *lenp, 0);
488 
489 	KASSERT(*lenp > 0);
490 	umap_offset = (offset & ~((voff_t)ubc_winsize - 1));
491 	slot_offset = (vaddr_t)(offset & ((voff_t)ubc_winsize - 1));
492 	*lenp = MIN(*lenp, ubc_winsize - slot_offset);
493 	KASSERT(*lenp > 0);
494 
495 	rw_enter(ubc_object.uobj.vmobjlock, RW_WRITER);
496 again:
497 	/*
498 	 * The UVM object is already referenced.
499 	 * Lock order: UBC object -> ubc_map::uobj.
500 	 */
501 	umap = ubc_find_mapping(uobj, umap_offset);
502 	if (umap == NULL) {
503 		struct uvm_object *oobj;
504 
505 		UBC_EVCNT_INCR(wincachemiss);
506 		umap = TAILQ_FIRST(UBC_QUEUE(offset));
507 		if (umap == NULL) {
508 			rw_exit(ubc_object.uobj.vmobjlock);
509 			kpause("ubc_alloc", false, hz >> 2, NULL);
510 			rw_enter(ubc_object.uobj.vmobjlock, RW_WRITER);
511 			goto again;
512 		}
513 
514 		va = UBC_UMAP_ADDR(umap);
515 		oobj = umap->uobj;
516 
517 		/*
518 		 * Remove from old hash (if any), add to new hash.
519 		 */
520 
521 		if (oobj != NULL) {
522 			/*
523 			 * Mapping must be removed before the list entry,
524 			 * since there is a race with ubc_purge().
525 			 */
526 			if (umap->flags & UMAP_MAPPING_CACHED) {
527 				umap->flags &= ~UMAP_MAPPING_CACHED;
528 				rw_enter(oobj->vmobjlock, RW_WRITER);
529 				pmap_remove(pmap_kernel(), va,
530 				    va + ubc_winsize);
531 				pmap_update(pmap_kernel());
532 				rw_exit(oobj->vmobjlock);
533 			}
534 			LIST_REMOVE(umap, hash);
535 			LIST_REMOVE(umap, list);
536 		} else {
537 			KASSERT((umap->flags & UMAP_MAPPING_CACHED) == 0);
538 		}
539 		umap->uobj = uobj;
540 		umap->offset = umap_offset;
541 		LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)],
542 		    umap, hash);
543 		LIST_INSERT_HEAD(&uobj->uo_ubc, umap, list);
544 	} else {
545 		UBC_EVCNT_INCR(wincachehit);
546 		va = UBC_UMAP_ADDR(umap);
547 	}
548 
549 	if (umap->refcount == 0) {
550 		TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive);
551 	}
552 
553 	if (flags & UBC_WRITE) {
554 		KASSERTMSG(umap->writeoff == 0 && umap->writelen == 0,
555 		    "ubc_alloc: concurrent writes to uobj %p", uobj);
556 		umap->writeoff = slot_offset;
557 		umap->writelen = *lenp;
558 	}
559 
560 	umap->refcount++;
561 	umap->advice = advice;
562 	rw_exit(ubc_object.uobj.vmobjlock);
563 	UVMHIST_LOG(ubchist, "umap %#jx refs %jd va %#jx flags 0x%jx",
564 	    (uintptr_t)umap, umap->refcount, (uintptr_t)va, flags);
565 
566 	if (flags & UBC_FAULTBUSY) {
567 		int npages = (*lenp + (offset & (PAGE_SIZE - 1)) +
568 		    PAGE_SIZE - 1) >> PAGE_SHIFT;
569 		int gpflags =
570 		    PGO_SYNCIO|PGO_OVERWRITE|PGO_PASTEOF|PGO_NOBLOCKALLOC|
571 		    PGO_NOTIMESTAMP;
572 		int i;
573 		KDASSERT(flags & UBC_WRITE);
574 		KASSERT(npages <= *npagesp);
575 		KASSERT(umap->refcount == 1);
576 
577 		UBC_EVCNT_INCR(faultbusy);
578 again_faultbusy:
579 		rw_enter(uobj->vmobjlock, RW_WRITER);
580 		if (umap->flags & UMAP_MAPPING_CACHED) {
581 			umap->flags &= ~UMAP_MAPPING_CACHED;
582 			pmap_remove(pmap_kernel(), va, va + ubc_winsize);
583 		}
584 		memset(pgs, 0, *npagesp * sizeof(pgs[0]));
585 
586 		error = (*uobj->pgops->pgo_get)(uobj, trunc_page(offset), pgs,
587 		    &npages, 0, VM_PROT_READ | VM_PROT_WRITE, advice, gpflags);
588 		UVMHIST_LOG(ubchist, "faultbusy getpages %jd", error, 0, 0, 0);
589 		if (error) {
590 			/*
591 			 * Flush: the mapping above might have been removed.
592 			 */
593 			pmap_update(pmap_kernel());
594 			goto out;
595 		}
596 		for (i = 0; i < npages; i++) {
597 			struct vm_page *pg = pgs[i];
598 
599 			KASSERT(pg->uobject == uobj);
600 			if (pg->loan_count != 0) {
601 				rw_enter(uobj->vmobjlock, RW_WRITER);
602 				if (pg->loan_count != 0) {
603 					pg = uvm_loanbreak(pg);
604 				}
605 				if (pg == NULL) {
606 					pmap_kremove(va, ubc_winsize);
607 					pmap_update(pmap_kernel());
608 					uvm_page_unbusy(pgs, npages);
609 					rw_exit(uobj->vmobjlock);
610 					uvm_wait("ubc_alloc");
611 					goto again_faultbusy;
612 				}
613 				rw_exit(uobj->vmobjlock);
614 				pgs[i] = pg;
615 			}
616 			pmap_kenter_pa(
617 			    va + trunc_page(slot_offset) + (i << PAGE_SHIFT),
618 			    VM_PAGE_TO_PHYS(pg),
619 			    VM_PROT_READ | VM_PROT_WRITE, 0);
620 		}
621 		pmap_update(pmap_kernel());
622 		umap->flags |= UMAP_PAGES_LOCKED;
623 		*npagesp = npages;
624 	} else {
625 		KASSERT((umap->flags & UMAP_PAGES_LOCKED) == 0);
626 	}
627 
628 out:
629 	return (void *)(va + slot_offset);
630 }
631 
632 /*
633  * ubc_release:  free a file mapping window.
634  */
635 
636 static void __noinline
637 ubc_release(void *va, int flags, struct vm_page **pgs, int npages)
638 {
639 	struct ubc_map *umap;
640 	struct uvm_object *uobj;
641 	vaddr_t umapva;
642 	bool unmapped;
643 	UVMHIST_FUNC(__func__);
644 	UVMHIST_CALLARGS(ubchist, "va %#jx", (uintptr_t)va, 0, 0, 0);
645 
646 	umap = &ubc_object.umap[((char *)va - ubc_object.kva) >> ubc_winshift];
647 	umapva = UBC_UMAP_ADDR(umap);
648 	uobj = umap->uobj;
649 	KASSERT(uobj != NULL);
650 
651 	if (umap->flags & UMAP_PAGES_LOCKED) {
652 		const voff_t endoff = umap->writeoff + umap->writelen;
653 		const voff_t zerolen = round_page(endoff) - endoff;
654 
655 		KASSERT(npages == (round_page(endoff) -
656 		    trunc_page(umap->writeoff)) >> PAGE_SHIFT);
657 		KASSERT((umap->flags & UMAP_MAPPING_CACHED) == 0);
658 		if (zerolen) {
659 			memset((char *)umapva + endoff, 0, zerolen);
660 		}
661 		umap->flags &= ~UMAP_PAGES_LOCKED;
662 		rw_enter(uobj->vmobjlock, RW_WRITER);
663 		for (u_int i = 0; i < npages; i++) {
664 			struct vm_page *pg = pgs[i];
665 #ifdef DIAGNOSTIC
666 			paddr_t pa;
667 			bool rv;
668 			rv = pmap_extract(pmap_kernel(), umapva +
669 			    umap->writeoff + (i << PAGE_SHIFT), &pa);
670 			KASSERT(rv);
671 			KASSERT(PHYS_TO_VM_PAGE(pa) == pg);
672 #endif
673 			pg->flags &= ~PG_FAKE;
674 			KASSERTMSG(uvm_pagegetdirty(pg) ==
675 			    UVM_PAGE_STATUS_DIRTY,
676 			    "page %p not dirty", pg);
677 			KASSERT(pg->loan_count == 0);
678 			if (uvmpdpol_pageactivate_p(pg)) {
679 				uvm_pagelock(pg);
680 				uvm_pageactivate(pg);
681 				uvm_pageunlock(pg);
682 			}
683 		}
684 		pmap_kremove(umapva, ubc_winsize);
685 		pmap_update(pmap_kernel());
686 		uvm_page_unbusy(pgs, npages);
687 		rw_exit(uobj->vmobjlock);
688 		unmapped = true;
689 	} else {
690 		unmapped = false;
691 	}
692 
693 	rw_enter(ubc_object.uobj.vmobjlock, RW_WRITER);
694 	umap->writeoff = 0;
695 	umap->writelen = 0;
696 	umap->refcount--;
697 	if (umap->refcount == 0) {
698 		if (flags & UBC_UNMAP) {
699 			/*
700 			 * Invalidate any cached mappings if requested.
701 			 * This is typically used to avoid leaving
702 			 * incompatible cache aliases around indefinitely.
703 			 */
704 			rw_enter(uobj->vmobjlock, RW_WRITER);
705 			pmap_remove(pmap_kernel(), umapva,
706 				    umapva + ubc_winsize);
707 			pmap_update(pmap_kernel());
708 			rw_exit(uobj->vmobjlock);
709 
710 			umap->flags &= ~UMAP_MAPPING_CACHED;
711 			LIST_REMOVE(umap, hash);
712 			LIST_REMOVE(umap, list);
713 			umap->uobj = NULL;
714 			TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap,
715 			    inactive);
716 		} else {
717 			if (!unmapped) {
718 				umap->flags |= UMAP_MAPPING_CACHED;
719 			}
720 			TAILQ_INSERT_TAIL(UBC_QUEUE(umap->offset), umap,
721 			    inactive);
722 		}
723 	}
724 	UVMHIST_LOG(ubchist, "umap %#jx refs %jd", (uintptr_t)umap,
725 	    umap->refcount, 0, 0);
726 	rw_exit(ubc_object.uobj.vmobjlock);
727 }
728 
729 /*
730  * ubc_uiomove: move data to/from an object.
731  */
732 
733 int
734 ubc_uiomove(struct uvm_object *uobj, struct uio *uio, vsize_t todo, int advice,
735     int flags)
736 {
737 	const bool overwrite = (flags & UBC_FAULTBUSY) != 0;
738 	struct vm_page *pgs[howmany(ubc_winsize, MIN_PAGE_SIZE)];
739 	voff_t off;
740 	int error, npages;
741 
742 	KASSERT(todo <= uio->uio_resid);
743 	KASSERT(((flags & UBC_WRITE) != 0 && uio->uio_rw == UIO_WRITE) ||
744 	    ((flags & UBC_READ) != 0 && uio->uio_rw == UIO_READ));
745 
746 #ifdef UBC_USE_PMAP_DIRECT
747 	/*
748 	 * during direct access pages need to be held busy to prevent them
749 	 * changing identity, and therefore if we read or write an object
750 	 * into a mapped view of same we could deadlock while faulting.
751 	 *
752 	 * avoid the problem by disallowing direct access if the object
753 	 * might be visible somewhere via mmap().
754 	 *
755 	 * XXX concurrent reads cause thundering herd issues with PG_BUSY.
756 	 * In the future enable by default for writes or if ncpu<=2, and
757 	 * make the toggle override that.
758 	 */
759 	if ((ubc_direct && (flags & UBC_ISMAPPED) == 0) ||
760 	    (flags & UBC_FAULTBUSY) != 0) {
761 		return ubc_uiomove_direct(uobj, uio, todo, advice, flags);
762 	}
763 #endif
764 
765 	off = uio->uio_offset;
766 	error = 0;
767 	while (todo > 0) {
768 		vsize_t bytelen = todo;
769 		void *win;
770 
771 		npages = __arraycount(pgs);
772 		win = ubc_alloc(uobj, off, &bytelen, advice, flags, pgs,
773 		    &npages);
774 		if (error == 0) {
775 			error = uiomove(win, bytelen, uio);
776 		}
777 		if (error != 0 && overwrite) {
778 			/*
779 			 * if we haven't initialized the pages yet,
780 			 * do it now.  it's safe to use memset here
781 			 * because we just mapped the pages above.
782 			 */
783 			printf("%s: error=%d\n", __func__, error);
784 			memset(win, 0, bytelen);
785 		}
786 		ubc_release(win, flags, pgs, npages);
787 		off += bytelen;
788 		todo -= bytelen;
789 		if (error != 0 && (flags & UBC_PARTIALOK) != 0) {
790 			break;
791 		}
792 	}
793 
794 	return error;
795 }
796 
797 /*
798  * ubc_zerorange: set a range of bytes in an object to zero.
799  */
800 
801 void
802 ubc_zerorange(struct uvm_object *uobj, off_t off, size_t len, int flags)
803 {
804 	struct vm_page *pgs[howmany(ubc_winsize, MIN_PAGE_SIZE)];
805 	int npages;
806 
807 #ifdef UBC_USE_PMAP_DIRECT
808 	if (ubc_direct || (flags & UBC_FAULTBUSY) != 0) {
809 		ubc_zerorange_direct(uobj, off, len, flags);
810 		return;
811 	}
812 #endif
813 
814 	/*
815 	 * XXXUBC invent kzero() and use it
816 	 */
817 
818 	while (len) {
819 		void *win;
820 		vsize_t bytelen = len;
821 
822 		npages = __arraycount(pgs);
823 		win = ubc_alloc(uobj, off, &bytelen, UVM_ADV_NORMAL, UBC_WRITE,
824 		    pgs, &npages);
825 		memset(win, 0, bytelen);
826 		ubc_release(win, flags, pgs, npages);
827 
828 		off += bytelen;
829 		len -= bytelen;
830 	}
831 }
832 
833 #ifdef UBC_USE_PMAP_DIRECT
834 /* Copy data using direct map */
835 
836 /*
837  * ubc_alloc_direct:  allocate a file mapping window using direct map
838  */
839 static int __noinline
840 ubc_alloc_direct(struct uvm_object *uobj, voff_t offset, vsize_t *lenp,
841     int advice, int flags, struct vm_page **pgs, int *npages)
842 {
843 	voff_t pgoff;
844 	int error;
845 	int gpflags = flags | PGO_NOTIMESTAMP | PGO_SYNCIO;
846 	int access_type = VM_PROT_READ;
847 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
848 
849 	if (flags & UBC_WRITE) {
850 		if (flags & UBC_FAULTBUSY)
851 			gpflags |= PGO_OVERWRITE | PGO_NOBLOCKALLOC;
852 #if 0
853 		KASSERT(!UVM_OBJ_NEEDS_WRITEFAULT(uobj));
854 #endif
855 
856 		/*
857 		 * Tell genfs_getpages() we already have the journal lock,
858 		 * allow allocation past current EOF.
859 		 */
860 		gpflags |= PGO_JOURNALLOCKED | PGO_PASTEOF;
861 		access_type |= VM_PROT_WRITE;
862 	} else {
863 		/* Don't need the empty blocks allocated, PG_RDONLY is okay */
864 		gpflags |= PGO_NOBLOCKALLOC;
865 	}
866 
867 	pgoff = (offset & PAGE_MASK);
868 	*lenp = MIN(*lenp, ubc_winsize - pgoff);
869 
870 again:
871 	*npages = (*lenp + pgoff + PAGE_SIZE - 1) >> PAGE_SHIFT;
872 	KASSERT((*npages * PAGE_SIZE) <= ubc_winsize);
873 	KASSERT(*lenp + pgoff <= ubc_winsize);
874 	memset(pgs, 0, *npages * sizeof(pgs[0]));
875 
876 	rw_enter(uobj->vmobjlock, RW_WRITER);
877 	error = (*uobj->pgops->pgo_get)(uobj, trunc_page(offset), pgs,
878 	    npages, 0, access_type, advice, gpflags);
879 	UVMHIST_LOG(ubchist, "alloc_direct getpages %jd", error, 0, 0, 0);
880 	if (error) {
881 		if (error == EAGAIN) {
882 			kpause("ubc_alloc_directg", false, hz >> 2, NULL);
883 			goto again;
884 		}
885 		return error;
886 	}
887 
888 	rw_enter(uobj->vmobjlock, RW_WRITER);
889 	for (int i = 0; i < *npages; i++) {
890 		struct vm_page *pg = pgs[i];
891 
892 		KASSERT(pg != NULL);
893 		KASSERT(pg != PGO_DONTCARE);
894 		KASSERT((pg->flags & PG_FAKE) == 0 || (gpflags & PGO_OVERWRITE));
895 		KASSERT(pg->uobject->vmobjlock == uobj->vmobjlock);
896 
897 		/* Avoid breaking loan if possible, only do it on write */
898 		if ((flags & UBC_WRITE) && pg->loan_count != 0) {
899 			pg = uvm_loanbreak(pg);
900 			if (pg == NULL) {
901 				uvm_page_unbusy(pgs, *npages);
902 				rw_exit(uobj->vmobjlock);
903 				uvm_wait("ubc_alloc_directl");
904 				goto again;
905 			}
906 			pgs[i] = pg;
907 		}
908 
909 		/* Page must be writable by now */
910 		KASSERT((pg->flags & PG_RDONLY) == 0 || (flags & UBC_WRITE) == 0);
911 
912 		/*
913 		 * XXX For aobj pages.  No managed mapping - mark the page
914 		 * dirty.
915 		 */
916 		if ((flags & UBC_WRITE) != 0) {
917 			uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
918 		}
919 	}
920 	rw_exit(uobj->vmobjlock);
921 
922 	return 0;
923 }
924 
925 static void __noinline
926 ubc_direct_release(struct uvm_object *uobj,
927 	int flags, struct vm_page **pgs, int npages)
928 {
929 	rw_enter(uobj->vmobjlock, RW_WRITER);
930 	for (int i = 0; i < npages; i++) {
931 		struct vm_page *pg = pgs[i];
932 
933 		pg->flags &= ~PG_BUSY;
934 		UVM_PAGE_OWN(pg, NULL);
935 		if (pg->flags & PG_RELEASED) {
936 			pg->flags &= ~PG_RELEASED;
937 			uvm_pagefree(pg);
938 			continue;
939 		}
940 
941 		if (uvm_pagewanted_p(pg) || uvmpdpol_pageactivate_p(pg)) {
942 			uvm_pagelock(pg);
943 			uvm_pageactivate(pg);
944 			uvm_pagewakeup(pg);
945 			uvm_pageunlock(pg);
946 		}
947 
948 		/* Page was changed, no longer fake and neither clean. */
949 		if (flags & UBC_WRITE) {
950 			KASSERTMSG(uvm_pagegetdirty(pg) ==
951 			    UVM_PAGE_STATUS_DIRTY,
952 			    "page %p not dirty", pg);
953 			pg->flags &= ~PG_FAKE;
954 		}
955 	}
956 	rw_exit(uobj->vmobjlock);
957 }
958 
959 static int
960 ubc_uiomove_process(void *win, size_t len, void *arg)
961 {
962 	struct uio *uio = (struct uio *)arg;
963 
964 	return uiomove(win, len, uio);
965 }
966 
967 static int
968 ubc_zerorange_process(void *win, size_t len, void *arg)
969 {
970 	memset(win, 0, len);
971 	return 0;
972 }
973 
974 static int __noinline
975 ubc_uiomove_direct(struct uvm_object *uobj, struct uio *uio, vsize_t todo, int advice,
976     int flags)
977 {
978 	const bool overwrite = (flags & UBC_FAULTBUSY) != 0;
979 	voff_t off;
980 	int error, npages;
981 	struct vm_page *pgs[howmany(ubc_winsize, MIN_PAGE_SIZE)];
982 
983 	KASSERT(todo <= uio->uio_resid);
984 	KASSERT(((flags & UBC_WRITE) != 0 && uio->uio_rw == UIO_WRITE) ||
985 	    ((flags & UBC_READ) != 0 && uio->uio_rw == UIO_READ));
986 
987 	off = uio->uio_offset;
988 	error = 0;
989 	while (todo > 0) {
990 		vsize_t bytelen = todo;
991 
992 		error = ubc_alloc_direct(uobj, off, &bytelen, advice, flags,
993 		    pgs, &npages);
994 		if (error != 0) {
995 			/* can't do anything, failed to get the pages */
996 			break;
997 		}
998 
999 		if (error == 0) {
1000 			error = uvm_direct_process(pgs, npages, off, bytelen,
1001 			    ubc_uiomove_process, uio);
1002 		}
1003 
1004 		if (overwrite) {
1005 			voff_t endoff;
1006 
1007 			/*
1008 			 * if we haven't initialized the pages yet due to an
1009 			 * error above, do it now.
1010 			 */
1011 			if (error != 0) {
1012 				printf("%s: error=%d\n", __func__, error);
1013 				(void) uvm_direct_process(pgs, npages, off,
1014 				    bytelen, ubc_zerorange_process, NULL);
1015 			}
1016 
1017 			off += bytelen;
1018 			todo -= bytelen;
1019 			endoff = off & (PAGE_SIZE - 1);
1020 
1021 			/*
1022 			 * zero out the remaining portion of the final page
1023 			 * (if any).
1024 			 */
1025 			if (todo == 0 && endoff != 0) {
1026 				vsize_t zlen = PAGE_SIZE - endoff;
1027 				(void) uvm_direct_process(pgs + npages - 1, 1,
1028 				    off, zlen, ubc_zerorange_process, NULL);
1029 			}
1030 		} else {
1031 			off += bytelen;
1032 			todo -= bytelen;
1033 		}
1034 
1035 		ubc_direct_release(uobj, flags, pgs, npages);
1036 
1037 		if (error != 0 && ISSET(flags, UBC_PARTIALOK)) {
1038 			break;
1039 		}
1040 	}
1041 
1042 	return error;
1043 }
1044 
1045 static void __noinline
1046 ubc_zerorange_direct(struct uvm_object *uobj, off_t off, size_t todo, int flags)
1047 {
1048 	int error, npages;
1049 	struct vm_page *pgs[howmany(ubc_winsize, MIN_PAGE_SIZE)];
1050 
1051 	flags |= UBC_WRITE;
1052 
1053 	error = 0;
1054 	while (todo > 0) {
1055 		vsize_t bytelen = todo;
1056 
1057 		error = ubc_alloc_direct(uobj, off, &bytelen, UVM_ADV_NORMAL,
1058 		    flags, pgs, &npages);
1059 		if (error != 0) {
1060 			/* can't do anything, failed to get the pages */
1061 			break;
1062 		}
1063 
1064 		error = uvm_direct_process(pgs, npages, off, bytelen,
1065 		    ubc_zerorange_process, NULL);
1066 
1067 		ubc_direct_release(uobj, flags, pgs, npages);
1068 
1069 		off += bytelen;
1070 		todo -= bytelen;
1071 	}
1072 }
1073 
1074 #endif /* UBC_USE_PMAP_DIRECT */
1075 
1076 /*
1077  * ubc_purge: disassociate ubc_map structures from an empty uvm_object.
1078  */
1079 
1080 void
1081 ubc_purge(struct uvm_object *uobj)
1082 {
1083 	struct ubc_map *umap;
1084 	vaddr_t va;
1085 
1086 	KASSERT(uobj->uo_npages == 0);
1087 
1088 	/*
1089 	 * Safe to check without lock held, as ubc_alloc() removes
1090 	 * the mapping and list entry in the correct order.
1091 	 */
1092 	if (__predict_true(LIST_EMPTY(&uobj->uo_ubc))) {
1093 		return;
1094 	}
1095 	rw_enter(ubc_object.uobj.vmobjlock, RW_WRITER);
1096 	while ((umap = LIST_FIRST(&uobj->uo_ubc)) != NULL) {
1097 		KASSERT(umap->refcount == 0);
1098 		for (va = 0; va < ubc_winsize; va += PAGE_SIZE) {
1099 			KASSERT(!pmap_extract(pmap_kernel(),
1100 			    va + UBC_UMAP_ADDR(umap), NULL));
1101 		}
1102 		LIST_REMOVE(umap, list);
1103 		LIST_REMOVE(umap, hash);
1104 		umap->flags &= ~UMAP_MAPPING_CACHED;
1105 		umap->uobj = NULL;
1106 	}
1107 	rw_exit(ubc_object.uobj.vmobjlock);
1108 }
1109