xref: /netbsd-src/sys/uvm/uvm_vnode.c (revision e5548b402ae4c44fb816de42c7bba9581ce23ef5)
1 /*	$NetBSD: uvm_vnode.c,v 1.69 2005/12/11 12:25:29 christos Exp $	*/
2 
3 /*
4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
5  * Copyright (c) 1991, 1993
6  *      The Regents of the University of California.
7  * Copyright (c) 1990 University of Utah.
8  *
9  * All rights reserved.
10  *
11  * This code is derived from software contributed to Berkeley by
12  * the Systems Programming Group of the University of Utah Computer
13  * Science Department.
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  * 3. All advertising materials mentioning features or use of this software
24  *    must display the following acknowledgement:
25  *      This product includes software developed by Charles D. Cranor,
26  *	Washington University, the University of California, Berkeley and
27  *	its contributors.
28  * 4. Neither the name of the University nor the names of its contributors
29  *    may be used to endorse or promote products derived from this software
30  *    without specific prior written permission.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42  * SUCH DAMAGE.
43  *
44  *      @(#)vnode_pager.c       8.8 (Berkeley) 2/13/94
45  * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp
46  */
47 
48 /*
49  * uvm_vnode.c: the vnode pager.
50  */
51 
52 #include <sys/cdefs.h>
53 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.69 2005/12/11 12:25:29 christos Exp $");
54 
55 #include "fs_nfs.h"
56 #include "opt_uvmhist.h"
57 #include "opt_readahead.h"
58 #include "opt_ddb.h"
59 
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/kernel.h>
63 #include <sys/proc.h>
64 #include <sys/malloc.h>
65 #include <sys/vnode.h>
66 #include <sys/disklabel.h>
67 #include <sys/ioctl.h>
68 #include <sys/fcntl.h>
69 #include <sys/conf.h>
70 #include <sys/pool.h>
71 #include <sys/mount.h>
72 
73 #include <miscfs/specfs/specdev.h>
74 
75 #include <uvm/uvm.h>
76 #include <uvm/uvm_readahead.h>
77 
78 /*
79  * functions
80  */
81 
82 static void	uvn_detach(struct uvm_object *);
83 static int	uvn_get(struct uvm_object *, voff_t, struct vm_page **, int *,
84 			int, vm_prot_t, int, int);
85 static int	uvn_put(struct uvm_object *, voff_t, voff_t, int);
86 static void	uvn_reference(struct uvm_object *);
87 
88 static int	uvn_findpage(struct uvm_object *, voff_t, struct vm_page **,
89 			     int);
90 
91 /*
92  * master pager structure
93  */
94 
95 struct uvm_pagerops uvm_vnodeops = {
96 	NULL,
97 	uvn_reference,
98 	uvn_detach,
99 	NULL,
100 	uvn_get,
101 	uvn_put,
102 };
103 
104 /*
105  * the ops!
106  */
107 
108 /*
109  * uvn_attach
110  *
111  * attach a vnode structure to a VM object.  if the vnode is already
112  * attached, then just bump the reference count by one and return the
113  * VM object.   if not already attached, attach and return the new VM obj.
114  * the "accessprot" tells the max access the attaching thread wants to
115  * our pages.
116  *
117  * => caller must _not_ already be holding the lock on the uvm_object.
118  * => in fact, nothing should be locked so that we can sleep here.
119  * => note that uvm_object is first thing in vnode structure, so their
120  *    pointers are equiv.
121  */
122 
123 struct uvm_object *
124 uvn_attach(void *arg, vm_prot_t accessprot)
125 {
126 	struct vnode *vp = arg;
127 	struct uvm_object *uobj = &vp->v_uobj;
128 	struct vattr vattr;
129 	const struct bdevsw *bdev;
130 	int result;
131 	struct partinfo pi;
132 	voff_t used_vnode_size;
133 	UVMHIST_FUNC("uvn_attach"); UVMHIST_CALLED(maphist);
134 
135 	UVMHIST_LOG(maphist, "(vn=0x%x)", arg,0,0,0);
136 	used_vnode_size = (voff_t)0;
137 
138 	/*
139 	 * first get a lock on the uobj.
140 	 */
141 
142 	simple_lock(&uobj->vmobjlock);
143 	while (vp->v_flag & VXLOCK) {
144 		vp->v_flag |= VXWANT;
145 		UVMHIST_LOG(maphist, "  SLEEPING on blocked vn",0,0,0,0);
146 		UVM_UNLOCK_AND_WAIT(uobj, &uobj->vmobjlock, FALSE,
147 		    "uvn_attach", 0);
148 		simple_lock(&uobj->vmobjlock);
149 		UVMHIST_LOG(maphist,"  WOKE UP",0,0,0,0);
150 	}
151 
152 	/*
153 	 * if we're mapping a BLK device, make sure it is a disk.
154 	 */
155 	if (vp->v_type == VBLK) {
156 		bdev = bdevsw_lookup(vp->v_rdev);
157 		if (bdev == NULL || bdev->d_type != D_DISK) {
158 			simple_unlock(&uobj->vmobjlock);
159 			UVMHIST_LOG(maphist,"<- done (VBLK not D_DISK!)",
160 				    0,0,0,0);
161 			return(NULL);
162 		}
163 	}
164 	KASSERT(vp->v_type == VREG || vp->v_type == VBLK);
165 
166 	/*
167 	 * set up our idea of the size
168 	 * if this hasn't been done already.
169 	 */
170 	if (vp->v_size == VSIZENOTSET) {
171 
172 
173 	vp->v_flag |= VXLOCK;
174 	simple_unlock(&uobj->vmobjlock); /* drop lock in case we sleep */
175 		/* XXX: curproc? */
176 	if (vp->v_type == VBLK) {
177 		/*
178 		 * We could implement this as a specfs getattr call, but:
179 		 *
180 		 *	(1) VOP_GETATTR() would get the file system
181 		 *	    vnode operation, not the specfs operation.
182 		 *
183 		 *	(2) All we want is the size, anyhow.
184 		 */
185 		bdev = bdevsw_lookup(vp->v_rdev);
186 		if (bdev != NULL) {
187 			result = (*bdev->d_ioctl)(vp->v_rdev, DIOCGPART,
188 						  (caddr_t)&pi, FREAD, curlwp);
189 		} else {
190 			result = ENXIO;
191 		}
192 		if (result == 0) {
193 			/* XXX should remember blocksize */
194 			used_vnode_size = (voff_t)pi.disklab->d_secsize *
195 			    (voff_t)pi.part->p_size;
196 		}
197 	} else {
198 		result = VOP_GETATTR(vp, &vattr, curproc->p_ucred, curlwp);
199 		if (result == 0)
200 			used_vnode_size = vattr.va_size;
201 	}
202 
203 	/* relock object */
204 	simple_lock(&uobj->vmobjlock);
205 
206 	if (vp->v_flag & VXWANT) {
207 		wakeup(vp);
208 	}
209 	vp->v_flag &= ~(VXLOCK|VXWANT);
210 
211 	if (result != 0) {
212 		simple_unlock(&uobj->vmobjlock);
213 		UVMHIST_LOG(maphist,"<- done (VOP_GETATTR FAILED!)", 0,0,0,0);
214 		return(NULL);
215 	}
216 	vp->v_size = used_vnode_size;
217 
218 	}
219 
220 	simple_unlock(&uobj->vmobjlock);
221 	UVMHIST_LOG(maphist,"<- done, refcnt=%d", vp->v_usecount,
222 	    0, 0, 0);
223 	return uobj;
224 }
225 
226 
227 /*
228  * uvn_reference
229  *
230  * duplicate a reference to a VM object.  Note that the reference
231  * count must already be at least one (the passed in reference) so
232  * there is no chance of the uvn being killed or locked out here.
233  *
234  * => caller must call with object unlocked.
235  * => caller must be using the same accessprot as was used at attach time
236  */
237 
238 static void
239 uvn_reference(struct uvm_object *uobj)
240 {
241 	VREF((struct vnode *)uobj);
242 }
243 
244 
245 /*
246  * uvn_detach
247  *
248  * remove a reference to a VM object.
249  *
250  * => caller must call with object unlocked and map locked.
251  */
252 
253 static void
254 uvn_detach(struct uvm_object *uobj)
255 {
256 	vrele((struct vnode *)uobj);
257 }
258 
259 /*
260  * uvn_put: flush page data to backing store.
261  *
262  * => object must be locked on entry!   VOP_PUTPAGES must unlock it.
263  * => flags: PGO_SYNCIO -- use sync. I/O
264  * => note: caller must set PG_CLEAN and pmap_clear_modify (if needed)
265  */
266 
267 static int
268 uvn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags)
269 {
270 	struct vnode *vp = (struct vnode *)uobj;
271 	int error;
272 
273 	LOCK_ASSERT(simple_lock_held(&vp->v_interlock));
274 	error = VOP_PUTPAGES(vp, offlo, offhi, flags);
275 	LOCK_ASSERT(!simple_lock_held(&vp->v_interlock));
276 	return error;
277 }
278 
279 
280 /*
281  * uvn_get: get pages (synchronously) from backing store
282  *
283  * => prefer map unlocked (not required)
284  * => object must be locked!  we will _unlock_ it before starting any I/O.
285  * => flags: PGO_ALLPAGES: get all of the pages
286  *           PGO_LOCKED: fault data structures are locked
287  * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
288  * => NOTE: caller must check for released pages!!
289  */
290 
291 static int
292 uvn_get(struct uvm_object *uobj, voff_t offset,
293     struct vm_page **pps /* IN/OUT */,
294     int *npagesp /* IN (OUT if PGO_LOCKED)*/,
295     int centeridx, vm_prot_t access_type, int advice, int flags)
296 {
297 	struct vnode *vp = (struct vnode *)uobj;
298 	int error;
299 #if defined(READAHEAD_STATS)
300 	int orignpages = *npagesp;
301 #endif /* defined(READAHEAD_STATS) */
302 
303 	UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(ubchist);
304 
305 	UVMHIST_LOG(ubchist, "vp %p off 0x%x", vp, (int)offset, 0,0);
306 
307 	if ((access_type & VM_PROT_WRITE) == 0 && (flags & PGO_LOCKED) == 0) {
308 		simple_unlock(&vp->v_interlock);
309 		vn_ra_allocctx(vp);
310 		uvm_ra_request(vp->v_ractx, advice, uobj, offset,
311 		    *npagesp << PAGE_SHIFT);
312 		simple_lock(&vp->v_interlock);
313 	}
314 
315 	error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx,
316 			     access_type, advice, flags);
317 
318 #if defined(READAHEAD_STATS)
319 	if (((flags & PGO_LOCKED) != 0 && *npagesp > 0) ||
320 	    ((flags & (PGO_LOCKED|PGO_SYNCIO)) == PGO_SYNCIO && error == 0)) {
321 		int i;
322 
323 		if ((flags & PGO_LOCKED) == 0) {
324 			simple_lock(&uobj->vmobjlock);
325 		}
326 		for (i = 0; i < orignpages; i++) {
327 			struct vm_page *pg = pps[i];
328 
329 			if (pg == NULL || pg == PGO_DONTCARE) {
330 				continue;
331 			}
332 			if ((pg->flags & PG_SPECULATIVE) != 0) {
333 				pg->flags &= ~PG_SPECULATIVE;
334 				uvm_ra_hit.ev_count++;
335 			}
336 		}
337 		if ((flags & PGO_LOCKED) == 0) {
338 			simple_unlock(&uobj->vmobjlock);
339 		}
340 	}
341 #endif /* defined(READAHEAD_STATS) */
342 
343 	return error;
344 }
345 
346 
347 /*
348  * uvn_findpages:
349  * return the page for the uobj and offset requested, allocating if needed.
350  * => uobj must be locked.
351  * => returned pages will be BUSY.
352  */
353 
354 int
355 uvn_findpages(struct uvm_object *uobj, voff_t offset, int *npagesp,
356     struct vm_page **pgs, int flags)
357 {
358 	int i, count, found, npages, rv;
359 
360 	count = found = 0;
361 	npages = *npagesp;
362 	if (flags & UFP_BACKWARD) {
363 		for (i = npages - 1; i >= 0; i--, offset -= PAGE_SIZE) {
364 			rv = uvn_findpage(uobj, offset, &pgs[i], flags);
365 			if (rv == 0) {
366 				if (flags & UFP_DIRTYONLY)
367 					break;
368 			} else
369 				found++;
370 			count++;
371 		}
372 	} else {
373 		for (i = 0; i < npages; i++, offset += PAGE_SIZE) {
374 			rv = uvn_findpage(uobj, offset, &pgs[i], flags);
375 			if (rv == 0) {
376 				if (flags & UFP_DIRTYONLY)
377 					break;
378 			} else
379 				found++;
380 			count++;
381 		}
382 	}
383 	*npagesp = count;
384 	return (found);
385 }
386 
387 static int
388 uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp,
389     int flags)
390 {
391 	struct vm_page *pg;
392 	boolean_t dirty;
393 	UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist);
394 	UVMHIST_LOG(ubchist, "vp %p off 0x%lx", uobj, offset,0,0);
395 
396 	if (*pgp != NULL) {
397 		UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0);
398 		return 0;
399 	}
400 	for (;;) {
401 		/* look for an existing page */
402 		pg = uvm_pagelookup(uobj, offset);
403 
404 		/* nope?  allocate one now */
405 		if (pg == NULL) {
406 			if (flags & UFP_NOALLOC) {
407 				UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0);
408 				return 0;
409 			}
410 			pg = uvm_pagealloc(uobj, offset, NULL, 0);
411 			if (pg == NULL) {
412 				if (flags & UFP_NOWAIT) {
413 					UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
414 					return 0;
415 				}
416 				simple_unlock(&uobj->vmobjlock);
417 				uvm_wait("uvn_fp1");
418 				simple_lock(&uobj->vmobjlock);
419 				continue;
420 			}
421 			UVMHIST_LOG(ubchist, "alloced %p", pg,0,0,0);
422 			break;
423 		} else if (flags & UFP_NOCACHE) {
424 			UVMHIST_LOG(ubchist, "nocache",0,0,0,0);
425 			return 0;
426 		}
427 
428 		/* page is there, see if we need to wait on it */
429 		if ((pg->flags & PG_BUSY) != 0) {
430 			if (flags & UFP_NOWAIT) {
431 				UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
432 				return 0;
433 			}
434 			pg->flags |= PG_WANTED;
435 			UVMHIST_LOG(ubchist, "wait %p", pg,0,0,0);
436 			UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
437 					    "uvn_fp2", 0);
438 			simple_lock(&uobj->vmobjlock);
439 			continue;
440 		}
441 
442 		/* skip PG_RDONLY pages if requested */
443 		if ((flags & UFP_NORDONLY) && (pg->flags & PG_RDONLY)) {
444 			UVMHIST_LOG(ubchist, "nordonly",0,0,0,0);
445 			return 0;
446 		}
447 
448 		/* stop on clean pages if requested */
449 		if (flags & UFP_DIRTYONLY) {
450 			dirty = pmap_clear_modify(pg) ||
451 				(pg->flags & PG_CLEAN) == 0;
452 			pg->flags |= PG_CLEAN;
453 			if (!dirty) {
454 				UVMHIST_LOG(ubchist, "dirtonly", 0,0,0,0);
455 				return 0;
456 			}
457 		}
458 
459 		/* mark the page BUSY and we're done. */
460 		pg->flags |= PG_BUSY;
461 		UVM_PAGE_OWN(pg, "uvn_findpage");
462 		UVMHIST_LOG(ubchist, "found %p", pg,0,0,0);
463 		break;
464 	}
465 	*pgp = pg;
466 	return 1;
467 }
468 
469 /*
470  * uvm_vnp_setsize: grow or shrink a vnode uobj
471  *
472  * grow   => just update size value
473  * shrink => toss un-needed pages
474  *
475  * => we assume that the caller has a reference of some sort to the
476  *	vnode in question so that it will not be yanked out from under
477  *	us.
478  */
479 
480 void
481 uvm_vnp_setsize(struct vnode *vp, voff_t newsize)
482 {
483 	struct uvm_object *uobj = &vp->v_uobj;
484 	voff_t pgend = round_page(newsize);
485 	UVMHIST_FUNC("uvm_vnp_setsize"); UVMHIST_CALLED(ubchist);
486 
487 	simple_lock(&uobj->vmobjlock);
488 	UVMHIST_LOG(ubchist, "vp %p old 0x%x new 0x%x",
489 	    vp, vp->v_size, newsize, 0);
490 
491 	/*
492 	 * now check if the size has changed: if we shrink we had better
493 	 * toss some pages...
494 	 */
495 
496 	if (vp->v_size > pgend && vp->v_size != VSIZENOTSET) {
497 		(void) uvn_put(uobj, pgend, 0, PGO_FREE | PGO_SYNCIO);
498 	} else {
499 		simple_unlock(&uobj->vmobjlock);
500 	}
501 	vp->v_size = newsize;
502 }
503 
504 /*
505  * uvm_vnp_zerorange:  set a range of bytes in a file to zero.
506  */
507 
508 void
509 uvm_vnp_zerorange(struct vnode *vp, off_t off, size_t len)
510 {
511 	void *win;
512 	int flags;
513 
514 	/*
515 	 * XXXUBC invent kzero() and use it
516 	 */
517 
518 	while (len) {
519 		vsize_t bytelen = len;
520 
521 		win = ubc_alloc(&vp->v_uobj, off, &bytelen, UVM_ADV_NORMAL,
522 		    UBC_WRITE);
523 		memset(win, 0, bytelen);
524 		flags = UBC_WANT_UNMAP(vp) ? UBC_UNMAP : 0;
525 		ubc_release(win, flags);
526 
527 		off += bytelen;
528 		len -= bytelen;
529 	}
530 }
531