xref: /netbsd-src/sys/uvm/uvm_vnode.c (revision 27578b9aac214cc7796ead81dcc5427e79d5f2a0)
1 /*	$NetBSD: uvm_vnode.c,v 1.52 2001/09/15 20:36:47 chs Exp $	*/
2 
3 /*
4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
5  * Copyright (c) 1991, 1993
6  *      The Regents of the University of California.
7  * Copyright (c) 1990 University of Utah.
8  *
9  * All rights reserved.
10  *
11  * This code is derived from software contributed to Berkeley by
12  * the Systems Programming Group of the University of Utah Computer
13  * Science Department.
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  * 3. All advertising materials mentioning features or use of this software
24  *    must display the following acknowledgement:
25  *      This product includes software developed by Charles D. Cranor,
26  *	Washington University, the University of California, Berkeley and
27  *	its contributors.
28  * 4. Neither the name of the University nor the names of its contributors
29  *    may be used to endorse or promote products derived from this software
30  *    without specific prior written permission.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42  * SUCH DAMAGE.
43  *
44  *      @(#)vnode_pager.c       8.8 (Berkeley) 2/13/94
45  * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp
46  */
47 
48 #include "fs_nfs.h"
49 #include "opt_uvmhist.h"
50 #include "opt_ddb.h"
51 
52 /*
53  * uvm_vnode.c: the vnode pager.
54  */
55 
56 #include <sys/param.h>
57 #include <sys/systm.h>
58 #include <sys/kernel.h>
59 #include <sys/proc.h>
60 #include <sys/malloc.h>
61 #include <sys/vnode.h>
62 #include <sys/disklabel.h>
63 #include <sys/ioctl.h>
64 #include <sys/fcntl.h>
65 #include <sys/conf.h>
66 #include <sys/pool.h>
67 #include <sys/mount.h>
68 
69 #include <miscfs/specfs/specdev.h>
70 
71 #include <uvm/uvm.h>
72 
73 /*
74  * functions
75  */
76 
77 void	uvn_detach __P((struct uvm_object *));
78 int	uvn_get __P((struct uvm_object *, voff_t, struct vm_page **, int *, int,
79 	    vm_prot_t, int, int));
80 int	uvn_put __P((struct uvm_object *, voff_t, voff_t, int));
81 void	uvn_reference __P((struct uvm_object *));
82 
83 int	uvn_findpage __P((struct uvm_object *, voff_t, struct vm_page **, int));
84 
85 /*
86  * master pager structure
87  */
88 
89 struct uvm_pagerops uvm_vnodeops = {
90 	NULL,
91 	uvn_reference,
92 	uvn_detach,
93 	NULL,
94 	uvn_get,
95 	uvn_put,
96 };
97 
98 /*
99  * the ops!
100  */
101 
102 /*
103  * uvn_attach
104  *
105  * attach a vnode structure to a VM object.  if the vnode is already
106  * attached, then just bump the reference count by one and return the
107  * VM object.   if not already attached, attach and return the new VM obj.
108  * the "accessprot" tells the max access the attaching thread wants to
109  * our pages.
110  *
111  * => caller must _not_ already be holding the lock on the uvm_object.
112  * => in fact, nothing should be locked so that we can sleep here.
113  * => note that uvm_object is first thing in vnode structure, so their
114  *    pointers are equiv.
115  */
116 
117 struct uvm_object *
118 uvn_attach(arg, accessprot)
119 	void *arg;
120 	vm_prot_t accessprot;
121 {
122 	struct vnode *vp = arg;
123 	struct uvm_object *uobj = &vp->v_uobj;
124 	struct vattr vattr;
125 	int result;
126 	struct partinfo pi;
127 	voff_t used_vnode_size;
128 	UVMHIST_FUNC("uvn_attach"); UVMHIST_CALLED(maphist);
129 
130 	UVMHIST_LOG(maphist, "(vn=0x%x)", arg,0,0,0);
131 	used_vnode_size = (voff_t)0;
132 
133 	/*
134 	 * first get a lock on the uobj.
135 	 */
136 
137 	simple_lock(&uobj->vmobjlock);
138 	while (vp->v_flag & VXLOCK) {
139 		vp->v_flag |= VXWANT;
140 		UVMHIST_LOG(maphist, "  SLEEPING on blocked vn",0,0,0,0);
141 		UVM_UNLOCK_AND_WAIT(uobj, &uobj->vmobjlock, FALSE,
142 		    "uvn_attach", 0);
143 		simple_lock(&uobj->vmobjlock);
144 		UVMHIST_LOG(maphist,"  WOKE UP",0,0,0,0);
145 	}
146 
147 	/*
148 	 * if we're mapping a BLK device, make sure it is a disk.
149 	 */
150 	if (vp->v_type == VBLK && bdevsw[major(vp->v_rdev)].d_type != D_DISK) {
151 		simple_unlock(&uobj->vmobjlock);
152 		UVMHIST_LOG(maphist,"<- done (VBLK not D_DISK!)", 0,0,0,0);
153 		return(NULL);
154 	}
155 	KASSERT(vp->v_type == VREG || vp->v_type == VBLK);
156 
157 	/*
158 	 * set up our idea of the size
159 	 * if this hasn't been done already.
160 	 */
161 	if (vp->v_size == VSIZENOTSET) {
162 
163 
164 	vp->v_flag |= VXLOCK;
165 	simple_unlock(&uobj->vmobjlock); /* drop lock in case we sleep */
166 		/* XXX: curproc? */
167 	if (vp->v_type == VBLK) {
168 		/*
169 		 * We could implement this as a specfs getattr call, but:
170 		 *
171 		 *	(1) VOP_GETATTR() would get the file system
172 		 *	    vnode operation, not the specfs operation.
173 		 *
174 		 *	(2) All we want is the size, anyhow.
175 		 */
176 		result = (*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev,
177 		    DIOCGPART, (caddr_t)&pi, FREAD, curproc);
178 		if (result == 0) {
179 			/* XXX should remember blocksize */
180 			used_vnode_size = (voff_t)pi.disklab->d_secsize *
181 			    (voff_t)pi.part->p_size;
182 		}
183 	} else {
184 		result = VOP_GETATTR(vp, &vattr, curproc->p_ucred, curproc);
185 		if (result == 0)
186 			used_vnode_size = vattr.va_size;
187 	}
188 
189 	/* relock object */
190 	simple_lock(&uobj->vmobjlock);
191 
192 	if (vp->v_flag & VXWANT) {
193 		wakeup(vp);
194 	}
195 	vp->v_flag &= ~(VXLOCK|VXWANT);
196 
197 	if (result != 0) {
198 		simple_unlock(&uobj->vmobjlock);
199 		UVMHIST_LOG(maphist,"<- done (VOP_GETATTR FAILED!)", 0,0,0,0);
200 		return(NULL);
201 	}
202 	vp->v_size = used_vnode_size;
203 
204 	}
205 
206 	simple_unlock(&uobj->vmobjlock);
207 	UVMHIST_LOG(maphist,"<- done, refcnt=%d", vp->v_usecount,
208 	    0, 0, 0);
209 	return uobj;
210 }
211 
212 
213 /*
214  * uvn_reference
215  *
216  * duplicate a reference to a VM object.  Note that the reference
217  * count must already be at least one (the passed in reference) so
218  * there is no chance of the uvn being killed or locked out here.
219  *
220  * => caller must call with object unlocked.
221  * => caller must be using the same accessprot as was used at attach time
222  */
223 
224 void
225 uvn_reference(uobj)
226 	struct uvm_object *uobj;
227 {
228 	VREF((struct vnode *)uobj);
229 }
230 
231 
232 /*
233  * uvn_detach
234  *
235  * remove a reference to a VM object.
236  *
237  * => caller must call with object unlocked and map locked.
238  */
239 
240 void
241 uvn_detach(uobj)
242 	struct uvm_object *uobj;
243 {
244 	vrele((struct vnode *)uobj);
245 }
246 
247 /*
248  * uvn_put: flush page data to backing store.
249  *
250  * => object must be locked!   we will _unlock_ it before starting I/O.
251  * => flags: PGO_SYNCIO -- use sync. I/O
252  * => note: caller must set PG_CLEAN and pmap_clear_modify (if needed)
253  */
254 
255 int
256 uvn_put(uobj, off, len, flags)
257 	struct uvm_object *uobj;
258 	voff_t off;
259 	voff_t len;
260 	int flags;
261 {
262 	struct vnode *vp = (struct vnode *)uobj;
263 	int error;
264 
265 	error = VOP_PUTPAGES(vp, off, len, flags);
266 	return error;
267 }
268 
269 
270 /*
271  * uvn_get: get pages (synchronously) from backing store
272  *
273  * => prefer map unlocked (not required)
274  * => object must be locked!  we will _unlock_ it before starting any I/O.
275  * => flags: PGO_ALLPAGES: get all of the pages
276  *           PGO_LOCKED: fault data structures are locked
277  * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
278  * => NOTE: caller must check for released pages!!
279  */
280 
281 int
282 uvn_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
283 	struct uvm_object *uobj;
284 	voff_t offset;
285 	struct vm_page **pps;		/* IN/OUT */
286 	int *npagesp;			/* IN (OUT if PGO_LOCKED) */
287 	int centeridx;
288 	vm_prot_t access_type;
289 	int advice, flags;
290 {
291 	struct vnode *vp = (struct vnode *)uobj;
292 	int error;
293 	UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(ubchist);
294 
295 	UVMHIST_LOG(ubchist, "vp %p off 0x%x", vp, (int)offset, 0,0);
296 	error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx,
297 			     access_type, advice, flags);
298 	return error;
299 }
300 
301 
302 /*
303  * uvn_findpages:
304  * return the page for the uobj and offset requested, allocating if needed.
305  * => uobj must be locked.
306  * => returned pages will be BUSY.
307  */
308 
309 void
310 uvn_findpages(uobj, offset, npagesp, pgs, flags)
311 	struct uvm_object *uobj;
312 	voff_t offset;
313 	int *npagesp;
314 	struct vm_page **pgs;
315 	int flags;
316 {
317 	int i, count, npages, rv;
318 
319 	count = 0;
320 	npages = *npagesp;
321 	if (flags & UFP_BACKWARD) {
322 		for (i = npages - 1; i >= 0; i--, offset -= PAGE_SIZE) {
323 			rv = uvn_findpage(uobj, offset, &pgs[i], flags);
324 			if (flags & UFP_DIRTYONLY && rv == 0) {
325 				break;
326 			}
327 			count++;
328 		}
329 	} else {
330 		for (i = 0; i < npages; i++, offset += PAGE_SIZE) {
331 			rv = uvn_findpage(uobj, offset, &pgs[i], flags);
332 			if (flags & UFP_DIRTYONLY && rv == 0) {
333 				break;
334 			}
335 			count++;
336 		}
337 	}
338 	*npagesp = count;
339 }
340 
341 int
342 uvn_findpage(uobj, offset, pgp, flags)
343 	struct uvm_object *uobj;
344 	voff_t offset;
345 	struct vm_page **pgp;
346 	int flags;
347 {
348 	struct vm_page *pg;
349 	boolean_t dirty;
350 	UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist);
351 	UVMHIST_LOG(ubchist, "vp %p off 0x%lx", uobj, offset,0,0);
352 
353 	if (*pgp != NULL) {
354 		UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0);
355 		return 0;
356 	}
357 	for (;;) {
358 		/* look for an existing page */
359 		pg = uvm_pagelookup(uobj, offset);
360 
361 		/* nope?  allocate one now */
362 		if (pg == NULL) {
363 			if (flags & UFP_NOALLOC) {
364 				UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0);
365 				return 0;
366 			}
367 			pg = uvm_pagealloc(uobj, offset, NULL, 0);
368 			if (pg == NULL) {
369 				if (flags & UFP_NOWAIT) {
370 					UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
371 					return 0;
372 				}
373 				simple_unlock(&uobj->vmobjlock);
374 				uvm_wait("uvn_fp1");
375 				simple_lock(&uobj->vmobjlock);
376 				continue;
377 			}
378 			if (UVM_OBJ_IS_VTEXT(uobj)) {
379 				uvmexp.vtextpages++;
380 			} else {
381 				uvmexp.vnodepages++;
382 			}
383 			UVMHIST_LOG(ubchist, "alloced %p", pg,0,0,0);
384 			break;
385 		} else if (flags & UFP_NOCACHE) {
386 			UVMHIST_LOG(ubchist, "nocache",0,0,0,0);
387 			return 0;
388 		}
389 
390 		/* page is there, see if we need to wait on it */
391 		if ((pg->flags & PG_BUSY) != 0) {
392 			if (flags & UFP_NOWAIT) {
393 				UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
394 				return 0;
395 			}
396 			pg->flags |= PG_WANTED;
397 			UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
398 					    "uvn_fp2", 0);
399 			simple_lock(&uobj->vmobjlock);
400 			continue;
401 		}
402 
403 		/* skip PG_RDONLY pages if requested */
404 		if ((flags & UFP_NORDONLY) && (pg->flags & PG_RDONLY)) {
405 			UVMHIST_LOG(ubchist, "nordonly",0,0,0,0);
406 			return 0;
407 		}
408 
409 		/* stop on clean pages if requested */
410 		if (flags & UFP_DIRTYONLY) {
411 			dirty = pmap_clear_modify(pg) ||
412 				(pg->flags & PG_CLEAN) == 0;
413 			pg->flags |= PG_CLEAN;
414 			if (!dirty) {
415 				return 0;
416 			}
417 		}
418 
419 		/* mark the page BUSY and we're done. */
420 		pg->flags |= PG_BUSY;
421 		UVM_PAGE_OWN(pg, "uvn_findpage");
422 		UVMHIST_LOG(ubchist, "found %p", pg,0,0,0);
423 		break;
424 	}
425 	*pgp = pg;
426 	return 1;
427 }
428 
429 /*
430  * uvm_vnp_setsize: grow or shrink a vnode uobj
431  *
432  * grow   => just update size value
433  * shrink => toss un-needed pages
434  *
435  * => we assume that the caller has a reference of some sort to the
436  *	vnode in question so that it will not be yanked out from under
437  *	us.
438  */
439 
440 void
441 uvm_vnp_setsize(vp, newsize)
442 	struct vnode *vp;
443 	voff_t newsize;
444 {
445 	struct uvm_object *uobj = &vp->v_uobj;
446 	voff_t pgend = round_page(newsize);
447 	UVMHIST_FUNC("uvm_vnp_setsize"); UVMHIST_CALLED(ubchist);
448 
449 	simple_lock(&uobj->vmobjlock);
450 	UVMHIST_LOG(ubchist, "vp %p old 0x%x new 0x%x",
451 	    vp, vp->v_size, newsize, 0);
452 
453 	/*
454 	 * now check if the size has changed: if we shrink we had better
455 	 * toss some pages...
456 	 */
457 
458 	if (vp->v_size > pgend && vp->v_size != VSIZENOTSET) {
459 		(void) uvn_put(uobj, pgend, 0, PGO_FREE);
460 	} else {
461 		simple_unlock(&uobj->vmobjlock);
462 	}
463 	vp->v_size = newsize;
464 }
465 
466 /*
467  * uvm_vnp_zerorange:  set a range of bytes in a file to zero.
468  */
469 
470 void
471 uvm_vnp_zerorange(vp, off, len)
472 	struct vnode *vp;
473 	off_t off;
474 	size_t len;
475 {
476         void *win;
477 
478         /*
479          * XXXUBC invent kzero() and use it
480          */
481 
482         while (len) {
483                 vsize_t bytelen = len;
484 
485                 win = ubc_alloc(&vp->v_uobj, off, &bytelen, UBC_WRITE);
486                 memset(win, 0, bytelen);
487                 ubc_release(win, 0);
488 
489                 off += bytelen;
490                 len -= bytelen;
491         }
492 }
493