xref: /netbsd-src/sys/uvm/uvm_vnode.c (revision 1d7848ad4368e55adca5a460c7b64c37dbeaa74b)
1 /*	$NetBSD: uvm_vnode.c,v 1.111 2020/03/22 18:32:42 ad Exp $	*/
2 
3 /*
4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
5  * Copyright (c) 1991, 1993
6  *      The Regents of the University of California.
7  * Copyright (c) 1990 University of Utah.
8  *
9  * All rights reserved.
10  *
11  * This code is derived from software contributed to Berkeley by
12  * the Systems Programming Group of the University of Utah Computer
13  * Science Department.
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  * 3. Neither the name of the University nor the names of its contributors
24  *    may be used to endorse or promote products derived from this software
25  *    without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37  * SUCH DAMAGE.
38  *
39  *      @(#)vnode_pager.c       8.8 (Berkeley) 2/13/94
40  * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp
41  */
42 
43 /*
44  * uvm_vnode.c: the vnode pager.
45  */
46 
47 #include <sys/cdefs.h>
48 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.111 2020/03/22 18:32:42 ad Exp $");
49 
50 #ifdef _KERNEL_OPT
51 #include "opt_uvmhist.h"
52 #endif
53 
54 #include <sys/atomic.h>
55 #include <sys/param.h>
56 #include <sys/systm.h>
57 #include <sys/kernel.h>
58 #include <sys/vnode.h>
59 #include <sys/disklabel.h>
60 #include <sys/ioctl.h>
61 #include <sys/fcntl.h>
62 #include <sys/conf.h>
63 #include <sys/pool.h>
64 #include <sys/mount.h>
65 
66 #include <miscfs/specfs/specdev.h>
67 
68 #include <uvm/uvm.h>
69 #include <uvm/uvm_readahead.h>
70 #include <uvm/uvm_page_array.h>
71 
72 #ifdef UVMHIST
73 UVMHIST_DEFINE(ubchist);
74 #endif
75 
76 /*
77  * functions
78  */
79 
80 static void	uvn_alloc_ractx(struct uvm_object *);
81 static void	uvn_detach(struct uvm_object *);
82 static int	uvn_get(struct uvm_object *, voff_t, struct vm_page **, int *,
83 			int, vm_prot_t, int, int);
84 static void	uvn_markdirty(struct uvm_object *);
85 static int	uvn_put(struct uvm_object *, voff_t, voff_t, int);
86 static void	uvn_reference(struct uvm_object *);
87 
88 static int	uvn_findpage(struct uvm_object *, voff_t, struct vm_page **,
89 			     unsigned int, struct uvm_page_array *a,
90 			     unsigned int);
91 
92 /*
93  * master pager structure
94  */
95 
96 const struct uvm_pagerops uvm_vnodeops = {
97 	.pgo_reference = uvn_reference,
98 	.pgo_detach = uvn_detach,
99 	.pgo_get = uvn_get,
100 	.pgo_put = uvn_put,
101 	.pgo_markdirty = uvn_markdirty,
102 };
103 
104 /*
105  * the ops!
106  */
107 
108 /*
109  * uvn_reference
110  *
111  * duplicate a reference to a VM object.  Note that the reference
112  * count must already be at least one (the passed in reference) so
113  * there is no chance of the uvn being killed or locked out here.
114  *
115  * => caller must call with object unlocked.
116  * => caller must be using the same accessprot as was used at attach time
117  */
118 
119 static void
120 uvn_reference(struct uvm_object *uobj)
121 {
122 	vref((struct vnode *)uobj);
123 }
124 
125 
126 /*
127  * uvn_detach
128  *
129  * remove a reference to a VM object.
130  *
131  * => caller must call with object unlocked and map locked.
132  */
133 
134 static void
135 uvn_detach(struct uvm_object *uobj)
136 {
137 	vrele((struct vnode *)uobj);
138 }
139 
140 /*
141  * uvn_put: flush page data to backing store.
142  *
143  * => object must be locked on entry!   VOP_PUTPAGES must unlock it.
144  * => flags: PGO_SYNCIO -- use sync. I/O
145  */
146 
147 static int
148 uvn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags)
149 {
150 	struct vnode *vp = (struct vnode *)uobj;
151 	int error;
152 
153 	KASSERT(rw_write_held(uobj->vmobjlock));
154 	error = VOP_PUTPAGES(vp, offlo, offhi, flags);
155 
156 	return error;
157 }
158 
159 /*
160  * uvn_get: get pages (synchronously) from backing store
161  *
162  * => prefer map unlocked (not required)
163  * => object must be locked!  we will _unlock_ it before starting any I/O.
164  * => flags: PGO_ALLPAGES: get all of the pages
165  *           PGO_LOCKED: fault data structures are locked
166  * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
167  * => NOTE: caller must check for released pages!!
168  */
169 
170 static int
171 uvn_get(struct uvm_object *uobj, voff_t offset,
172     struct vm_page **pps /* IN/OUT */,
173     int *npagesp /* IN (OUT if PGO_LOCKED)*/,
174     int centeridx, vm_prot_t access_type, int advice, int flags)
175 {
176 	struct vnode *vp = (struct vnode *)uobj;
177 	int error;
178 
179 	UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(ubchist);
180 
181 	UVMHIST_LOG(ubchist, "vp %#jx off 0x%jx", (uintptr_t)vp, (int)offset,
182 	    0, 0);
183 
184 	if (vp->v_type == VREG && (access_type & VM_PROT_WRITE) == 0
185 	    && (flags & PGO_LOCKED) == 0) {
186 		uvn_alloc_ractx(uobj);
187 		uvm_ra_request(vp->v_ractx, advice, uobj, offset,
188 		    *npagesp << PAGE_SHIFT);
189 	}
190 
191 	error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx,
192 			     access_type, advice, flags);
193 
194 	KASSERT(((flags & PGO_LOCKED) != 0 && rw_lock_held(uobj->vmobjlock)) ||
195 	    (flags & PGO_LOCKED) == 0);
196 	return error;
197 }
198 
199 /*
200  * uvn_markdirty: called when the object gains first dirty page
201  *
202  * => uobj must be write locked.
203  */
204 
205 static void
206 uvn_markdirty(struct uvm_object *uobj)
207 {
208 	struct vnode *vp = (struct vnode *)uobj;
209 
210 	KASSERT(rw_write_held(uobj->vmobjlock));
211 
212 	mutex_enter(vp->v_interlock);
213 	if ((vp->v_iflag & VI_ONWORKLST) == 0) {
214 		vn_syncer_add_to_worklist(vp, filedelay);
215 	}
216 	mutex_exit(vp->v_interlock);
217 }
218 
219 /*
220  * uvn_findpages:
221  * return the page for the uobj and offset requested, allocating if needed.
222  * => uobj must be locked.
223  * => returned pages will be BUSY.
224  */
225 
226 int
227 uvn_findpages(struct uvm_object *uobj, voff_t offset, unsigned int *npagesp,
228     struct vm_page **pgs, struct uvm_page_array *a, unsigned int flags)
229 {
230 	unsigned int count, found, npages;
231 	int i, rv;
232 	struct uvm_page_array a_store;
233 
234 	if (a == NULL) {
235 		a = &a_store;
236 		uvm_page_array_init(a);
237 	}
238 	count = found = 0;
239 	npages = *npagesp;
240 	if (flags & UFP_BACKWARD) {
241 		for (i = npages - 1; i >= 0; i--, offset -= PAGE_SIZE) {
242 			rv = uvn_findpage(uobj, offset, &pgs[i], flags, a,
243 			    i + 1);
244 			if (rv == 0) {
245 				if (flags & UFP_DIRTYONLY)
246 					break;
247 			} else
248 				found++;
249 			count++;
250 		}
251 	} else {
252 		for (i = 0; i < npages; i++, offset += PAGE_SIZE) {
253 			rv = uvn_findpage(uobj, offset, &pgs[i], flags, a,
254 			    npages - i);
255 			if (rv == 0) {
256 				if (flags & UFP_DIRTYONLY)
257 					break;
258 			} else
259 				found++;
260 			count++;
261 		}
262 	}
263 	if (a == &a_store) {
264 		uvm_page_array_fini(a);
265 	}
266 	*npagesp = count;
267 	return (found);
268 }
269 
270 /*
271  * uvn_findpage: find a single page
272  *
273  * if a suitable page was found, put it in *pgp and return 1.
274  * otherwise return 0.
275  */
276 
277 static int
278 uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp,
279     unsigned int flags, struct uvm_page_array *a, unsigned int nleft)
280 {
281 	struct vm_page *pg;
282 	const unsigned int fillflags =
283 	    ((flags & UFP_BACKWARD) ? UVM_PAGE_ARRAY_FILL_BACKWARD : 0) |
284 	    ((flags & UFP_DIRTYONLY) ?
285 	    (UVM_PAGE_ARRAY_FILL_DIRTY|UVM_PAGE_ARRAY_FILL_DENSE) : 0);
286 	UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist);
287 	UVMHIST_LOG(ubchist, "vp %#jx off 0x%jx", (uintptr_t)uobj, offset,
288 	    0, 0);
289 
290 	/*
291 	 * NOBUSY must come with NOWAIT and NOALLOC.  if NOBUSY is
292 	 * specified, this may be called with a reader lock.
293 	 */
294 
295 	KASSERT(rw_lock_held(uobj->vmobjlock));
296 	KASSERT((flags & UFP_NOBUSY) == 0 || (flags & UFP_NOWAIT) != 0);
297 	KASSERT((flags & UFP_NOBUSY) == 0 || (flags & UFP_NOALLOC) != 0);
298 	KASSERT((flags & UFP_NOBUSY) != 0 || rw_write_held(uobj->vmobjlock));
299 
300 	if (*pgp != NULL) {
301 		UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0);
302 		goto skip_offset;
303 	}
304 	for (;;) {
305 		/*
306 		 * look for an existing page.
307 		 *
308 		 * XXX fragile API
309 		 * note that the array can be the one supplied by the caller of
310 		 * uvn_findpages.  in that case, fillflags used by the caller
311 		 * might not match strictly with ours.
312 		 * in particular, the caller might have filled the array
313 		 * without DENSE but passed us UFP_DIRTYONLY (thus DENSE).
314 		 */
315 		pg = uvm_page_array_fill_and_peek(a, uobj, offset, nleft,
316 		    fillflags);
317 		if (pg != NULL && pg->offset != offset) {
318 			KASSERT(
319 			    ((fillflags & UVM_PAGE_ARRAY_FILL_BACKWARD) != 0)
320 			    == (pg->offset < offset));
321 			KASSERT(uvm_pagelookup(uobj, offset) == NULL
322 			    || ((fillflags & UVM_PAGE_ARRAY_FILL_DIRTY) != 0 &&
323 			    radix_tree_get_tag(&uobj->uo_pages,
324 			    offset >> PAGE_SHIFT, UVM_PAGE_DIRTY_TAG) == 0));
325 			pg = NULL;
326 			if ((fillflags & UVM_PAGE_ARRAY_FILL_DENSE) != 0) {
327 				UVMHIST_LOG(ubchist, "dense", 0,0,0,0);
328 				return 0;
329 			}
330 		}
331 
332 		/* nope?  allocate one now */
333 		if (pg == NULL) {
334 			if (flags & UFP_NOALLOC) {
335 				UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0);
336 				return 0;
337 			}
338 			pg = uvm_pagealloc(uobj, offset, NULL,
339 			    UVM_FLAG_COLORMATCH);
340 			if (pg == NULL) {
341 				if (flags & UFP_NOWAIT) {
342 					UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
343 					return 0;
344 				}
345 				rw_exit(uobj->vmobjlock);
346 				uvm_wait("uvnfp1");
347 				uvm_page_array_clear(a);
348 				rw_enter(uobj->vmobjlock, RW_WRITER);
349 				continue;
350 			}
351 			UVMHIST_LOG(ubchist, "alloced %#jx (color %ju)",
352 			    (uintptr_t)pg, VM_PGCOLOR(pg), 0, 0);
353 			KASSERTMSG(uvm_pagegetdirty(pg) ==
354 			    UVM_PAGE_STATUS_CLEAN, "page %p not clean", pg);
355 			break;
356 		} else if (flags & UFP_NOCACHE) {
357 			UVMHIST_LOG(ubchist, "nocache",0,0,0,0);
358 			goto skip;
359 		}
360 
361 		/* page is there, see if we need to wait on it */
362 		if ((pg->flags & PG_BUSY) != 0) {
363 			if (flags & UFP_NOWAIT) {
364 				UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
365 				goto skip;
366 			}
367 			UVMHIST_LOG(ubchist, "wait %#jx (color %ju)",
368 			    (uintptr_t)pg, VM_PGCOLOR(pg), 0, 0);
369 			uvm_pagewait(pg, uobj->vmobjlock, "uvnfp2");
370 			uvm_page_array_clear(a);
371 			rw_enter(uobj->vmobjlock, RW_WRITER);
372 			continue;
373 		}
374 
375 		/* skip PG_RDONLY pages if requested */
376 		if ((flags & UFP_NORDONLY) && (pg->flags & PG_RDONLY)) {
377 			UVMHIST_LOG(ubchist, "nordonly",0,0,0,0);
378 			goto skip;
379 		}
380 
381 		/* stop on clean pages if requested */
382 		if (flags & UFP_DIRTYONLY) {
383 			const bool dirty = uvm_pagecheckdirty(pg, false);
384 			if (!dirty) {
385 				UVMHIST_LOG(ubchist, "dirtonly", 0,0,0,0);
386 				return 0;
387 			}
388 		}
389 
390 		/* mark the page BUSY and we're done. */
391 		if ((flags & UFP_NOBUSY) == 0) {
392 			pg->flags |= PG_BUSY;
393 			UVM_PAGE_OWN(pg, "uvn_findpage");
394 		}
395 		UVMHIST_LOG(ubchist, "found %#jx (color %ju)",
396 		    (uintptr_t)pg, VM_PGCOLOR(pg), 0, 0);
397 		uvm_page_array_advance(a);
398 		break;
399 	}
400 	*pgp = pg;
401 	return 1;
402 
403  skip_offset:
404 	/*
405 	 * skip this offset
406 	 */
407 	pg = uvm_page_array_peek(a);
408 	if (pg != NULL) {
409 		if (pg->offset == offset) {
410 			uvm_page_array_advance(a);
411 		} else {
412 			KASSERT((fillflags & UVM_PAGE_ARRAY_FILL_DENSE) == 0);
413 		}
414 	}
415 	return 0;
416 
417  skip:
418 	/*
419 	 * skip this page
420 	 */
421 	KASSERT(pg != NULL);
422 	uvm_page_array_advance(a);
423 	return 0;
424 }
425 
426 /*
427  * uvm_vnp_setsize: grow or shrink a vnode uobj
428  *
429  * grow   => just update size value
430  * shrink => toss un-needed pages
431  *
432  * => we assume that the caller has a reference of some sort to the
433  *	vnode in question so that it will not be yanked out from under
434  *	us.
435  */
436 
437 void
438 uvm_vnp_setsize(struct vnode *vp, voff_t newsize)
439 {
440 	struct uvm_object *uobj = &vp->v_uobj;
441 	voff_t pgend = round_page(newsize);
442 	voff_t oldsize;
443 	UVMHIST_FUNC("uvm_vnp_setsize"); UVMHIST_CALLED(ubchist);
444 
445 	rw_enter(uobj->vmobjlock, RW_WRITER);
446 	UVMHIST_LOG(ubchist, "vp %#jx old 0x%jx new 0x%jx",
447 	    (uintptr_t)vp, vp->v_size, newsize, 0);
448 
449 	/*
450 	 * now check if the size has changed: if we shrink we had better
451 	 * toss some pages...
452 	 */
453 
454 	KASSERT(newsize != VSIZENOTSET && newsize >= 0);
455 	KASSERT(vp->v_size <= vp->v_writesize);
456 	KASSERT(vp->v_size == vp->v_writesize ||
457 	    newsize == vp->v_writesize || newsize <= vp->v_size);
458 
459 	oldsize = vp->v_writesize;
460 
461 	/*
462 	 * check whether size shrinks
463 	 * if old size hasn't been set, there are no pages to drop
464 	 * if there was an integer overflow in pgend, then this is no shrink
465 	 */
466 	if (oldsize > pgend && oldsize != VSIZENOTSET && pgend >= 0) {
467 		(void) uvn_put(uobj, pgend, 0, PGO_FREE | PGO_SYNCIO);
468 		rw_enter(uobj->vmobjlock, RW_WRITER);
469 	}
470 	mutex_enter(vp->v_interlock);
471 	vp->v_size = vp->v_writesize = newsize;
472 	mutex_exit(vp->v_interlock);
473 	rw_exit(uobj->vmobjlock);
474 }
475 
476 void
477 uvm_vnp_setwritesize(struct vnode *vp, voff_t newsize)
478 {
479 
480 	rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
481 	KASSERT(newsize != VSIZENOTSET && newsize >= 0);
482 	KASSERT(vp->v_size != VSIZENOTSET);
483 	KASSERT(vp->v_writesize != VSIZENOTSET);
484 	KASSERT(vp->v_size <= vp->v_writesize);
485 	KASSERT(vp->v_size <= newsize);
486 	mutex_enter(vp->v_interlock);
487 	vp->v_writesize = newsize;
488 	mutex_exit(vp->v_interlock);
489 	rw_exit(vp->v_uobj.vmobjlock);
490 }
491 
492 bool
493 uvn_text_p(struct uvm_object *uobj)
494 {
495 	struct vnode *vp = (struct vnode *)uobj;
496 	int iflag;
497 
498 	/*
499 	 * v_interlock is not held here, but VI_EXECMAP is only ever changed
500 	 * with the vmobjlock held too.
501 	 */
502 	iflag = atomic_load_relaxed(&vp->v_iflag);
503 	return (iflag & VI_EXECMAP) != 0;
504 }
505 
506 bool
507 uvn_clean_p(struct uvm_object *uobj)
508 {
509 
510 	return radix_tree_empty_tagged_tree_p(&uobj->uo_pages,
511             UVM_PAGE_DIRTY_TAG);
512 }
513 
514 static void
515 uvn_alloc_ractx(struct uvm_object *uobj)
516 {
517 	struct vnode *vp = (struct vnode *)uobj;
518 	struct uvm_ractx *ra = NULL;
519 
520 	KASSERT(rw_write_held(uobj->vmobjlock));
521 
522 	if (vp->v_type != VREG) {
523 		return;
524 	}
525 	if (vp->v_ractx != NULL) {
526 		return;
527 	}
528 	if (vp->v_ractx == NULL) {
529 		rw_exit(uobj->vmobjlock);
530 		ra = uvm_ra_allocctx();
531 		rw_enter(uobj->vmobjlock, RW_WRITER);
532 		if (ra != NULL && vp->v_ractx == NULL) {
533 			vp->v_ractx = ra;
534 			ra = NULL;
535 		}
536 	}
537 	if (ra != NULL) {
538 		uvm_ra_freectx(ra);
539 	}
540 }
541