xref: /netbsd-src/sys/uvm/uvm_vnode.c (revision b1c86f5f087524e68db12794ee9c3e3da1ab17a0)
1 /*	$NetBSD: uvm_vnode.c,v 1.93 2010/01/08 11:35:12 pooka Exp $	*/
2 
3 /*
4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
5  * Copyright (c) 1991, 1993
6  *      The Regents of the University of California.
7  * Copyright (c) 1990 University of Utah.
8  *
9  * All rights reserved.
10  *
11  * This code is derived from software contributed to Berkeley by
12  * the Systems Programming Group of the University of Utah Computer
13  * Science Department.
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  * 3. All advertising materials mentioning features or use of this software
24  *    must display the following acknowledgement:
25  *      This product includes software developed by Charles D. Cranor,
26  *	Washington University, the University of California, Berkeley and
27  *	its contributors.
28  * 4. Neither the name of the University nor the names of its contributors
29  *    may be used to endorse or promote products derived from this software
30  *    without specific prior written permission.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42  * SUCH DAMAGE.
43  *
44  *      @(#)vnode_pager.c       8.8 (Berkeley) 2/13/94
45  * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp
46  */
47 
48 /*
49  * uvm_vnode.c: the vnode pager.
50  */
51 
52 #include <sys/cdefs.h>
53 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.93 2010/01/08 11:35:12 pooka Exp $");
54 
55 #include "opt_uvmhist.h"
56 
57 #include <sys/param.h>
58 #include <sys/systm.h>
59 #include <sys/kernel.h>
60 #include <sys/proc.h>
61 #include <sys/malloc.h>
62 #include <sys/vnode.h>
63 #include <sys/disklabel.h>
64 #include <sys/ioctl.h>
65 #include <sys/fcntl.h>
66 #include <sys/conf.h>
67 #include <sys/pool.h>
68 #include <sys/mount.h>
69 
70 #include <miscfs/specfs/specdev.h>
71 
72 #include <uvm/uvm.h>
73 #include <uvm/uvm_readahead.h>
74 
75 /*
76  * functions
77  */
78 
79 static void	uvn_detach(struct uvm_object *);
80 static int	uvn_get(struct uvm_object *, voff_t, struct vm_page **, int *,
81 			int, vm_prot_t, int, int);
82 static int	uvn_put(struct uvm_object *, voff_t, voff_t, int);
83 static void	uvn_reference(struct uvm_object *);
84 
85 static int	uvn_findpage(struct uvm_object *, voff_t, struct vm_page **,
86 			     int);
87 
88 /*
89  * master pager structure
90  */
91 
92 const struct uvm_pagerops uvm_vnodeops = {
93 	.pgo_reference = uvn_reference,
94 	.pgo_detach = uvn_detach,
95 	.pgo_get = uvn_get,
96 	.pgo_put = uvn_put,
97 };
98 
99 /*
100  * the ops!
101  */
102 
103 /*
104  * uvn_reference
105  *
106  * duplicate a reference to a VM object.  Note that the reference
107  * count must already be at least one (the passed in reference) so
108  * there is no chance of the uvn being killed or locked out here.
109  *
110  * => caller must call with object unlocked.
111  * => caller must be using the same accessprot as was used at attach time
112  */
113 
114 static void
115 uvn_reference(struct uvm_object *uobj)
116 {
117 	vref((struct vnode *)uobj);
118 }
119 
120 
121 /*
122  * uvn_detach
123  *
124  * remove a reference to a VM object.
125  *
126  * => caller must call with object unlocked and map locked.
127  */
128 
129 static void
130 uvn_detach(struct uvm_object *uobj)
131 {
132 	vrele((struct vnode *)uobj);
133 }
134 
135 /*
136  * uvn_put: flush page data to backing store.
137  *
138  * => object must be locked on entry!   VOP_PUTPAGES must unlock it.
139  * => flags: PGO_SYNCIO -- use sync. I/O
140  * => note: caller must set PG_CLEAN and pmap_clear_modify (if needed)
141  */
142 
143 static int
144 uvn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags)
145 {
146 	struct vnode *vp = (struct vnode *)uobj;
147 	int error;
148 
149 	KASSERT(mutex_owned(&vp->v_interlock));
150 	error = VOP_PUTPAGES(vp, offlo, offhi, flags);
151 
152 	return error;
153 }
154 
155 
156 /*
157  * uvn_get: get pages (synchronously) from backing store
158  *
159  * => prefer map unlocked (not required)
160  * => object must be locked!  we will _unlock_ it before starting any I/O.
161  * => flags: PGO_ALLPAGES: get all of the pages
162  *           PGO_LOCKED: fault data structures are locked
163  * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
164  * => NOTE: caller must check for released pages!!
165  */
166 
167 static int
168 uvn_get(struct uvm_object *uobj, voff_t offset,
169     struct vm_page **pps /* IN/OUT */,
170     int *npagesp /* IN (OUT if PGO_LOCKED)*/,
171     int centeridx, vm_prot_t access_type, int advice, int flags)
172 {
173 	struct vnode *vp = (struct vnode *)uobj;
174 	int error;
175 
176 	UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(ubchist);
177 
178 	UVMHIST_LOG(ubchist, "vp %p off 0x%x", vp, (int)offset, 0,0);
179 
180 	if ((access_type & VM_PROT_WRITE) == 0 && (flags & PGO_LOCKED) == 0) {
181 		vn_ra_allocctx(vp);
182 		uvm_ra_request(vp->v_ractx, advice, uobj, offset,
183 		    *npagesp << PAGE_SHIFT);
184 	}
185 
186 	error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx,
187 			     access_type, advice, flags);
188 
189 	KASSERT(((flags & PGO_LOCKED) != 0 && mutex_owned(&vp->v_interlock)) ||
190 	    (flags & PGO_LOCKED) == 0);
191 	return error;
192 }
193 
194 
195 /*
196  * uvn_findpages:
197  * return the page for the uobj and offset requested, allocating if needed.
198  * => uobj must be locked.
199  * => returned pages will be BUSY.
200  */
201 
202 int
203 uvn_findpages(struct uvm_object *uobj, voff_t offset, int *npagesp,
204     struct vm_page **pgs, int flags)
205 {
206 	int i, count, found, npages, rv;
207 
208 	count = found = 0;
209 	npages = *npagesp;
210 	if (flags & UFP_BACKWARD) {
211 		for (i = npages - 1; i >= 0; i--, offset -= PAGE_SIZE) {
212 			rv = uvn_findpage(uobj, offset, &pgs[i], flags);
213 			if (rv == 0) {
214 				if (flags & UFP_DIRTYONLY)
215 					break;
216 			} else
217 				found++;
218 			count++;
219 		}
220 	} else {
221 		for (i = 0; i < npages; i++, offset += PAGE_SIZE) {
222 			rv = uvn_findpage(uobj, offset, &pgs[i], flags);
223 			if (rv == 0) {
224 				if (flags & UFP_DIRTYONLY)
225 					break;
226 			} else
227 				found++;
228 			count++;
229 		}
230 	}
231 	*npagesp = count;
232 	return (found);
233 }
234 
235 static int
236 uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp,
237     int flags)
238 {
239 	struct vm_page *pg;
240 	bool dirty;
241 	UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist);
242 	UVMHIST_LOG(ubchist, "vp %p off 0x%lx", uobj, offset,0,0);
243 
244 	if (*pgp != NULL) {
245 		UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0);
246 		return 0;
247 	}
248 	for (;;) {
249 		/* look for an existing page */
250 		pg = uvm_pagelookup(uobj, offset);
251 
252 		/* nope?  allocate one now */
253 		if (pg == NULL) {
254 			if (flags & UFP_NOALLOC) {
255 				UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0);
256 				return 0;
257 			}
258 			pg = uvm_pagealloc(uobj, offset, NULL, 0);
259 			if (pg == NULL) {
260 				if (flags & UFP_NOWAIT) {
261 					UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
262 					return 0;
263 				}
264 				mutex_exit(&uobj->vmobjlock);
265 				uvm_wait("uvn_fp1");
266 				mutex_enter(&uobj->vmobjlock);
267 				continue;
268 			}
269 			UVMHIST_LOG(ubchist, "alloced %p", pg,0,0,0);
270 			break;
271 		} else if (flags & UFP_NOCACHE) {
272 			UVMHIST_LOG(ubchist, "nocache",0,0,0,0);
273 			return 0;
274 		}
275 
276 		/* page is there, see if we need to wait on it */
277 		if ((pg->flags & PG_BUSY) != 0) {
278 			if (flags & UFP_NOWAIT) {
279 				UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
280 				return 0;
281 			}
282 			pg->flags |= PG_WANTED;
283 			UVMHIST_LOG(ubchist, "wait %p", pg,0,0,0);
284 			UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
285 					    "uvn_fp2", 0);
286 			mutex_enter(&uobj->vmobjlock);
287 			continue;
288 		}
289 
290 		/* skip PG_RDONLY pages if requested */
291 		if ((flags & UFP_NORDONLY) && (pg->flags & PG_RDONLY)) {
292 			UVMHIST_LOG(ubchist, "nordonly",0,0,0,0);
293 			return 0;
294 		}
295 
296 		/* stop on clean pages if requested */
297 		if (flags & UFP_DIRTYONLY) {
298 			dirty = pmap_clear_modify(pg) ||
299 				(pg->flags & PG_CLEAN) == 0;
300 			pg->flags |= PG_CLEAN;
301 			if (!dirty) {
302 				UVMHIST_LOG(ubchist, "dirtonly", 0,0,0,0);
303 				return 0;
304 			}
305 		}
306 
307 		/* mark the page BUSY and we're done. */
308 		pg->flags |= PG_BUSY;
309 		UVM_PAGE_OWN(pg, "uvn_findpage");
310 		UVMHIST_LOG(ubchist, "found %p", pg,0,0,0);
311 		break;
312 	}
313 	*pgp = pg;
314 	return 1;
315 }
316 
317 /*
318  * uvm_vnp_setsize: grow or shrink a vnode uobj
319  *
320  * grow   => just update size value
321  * shrink => toss un-needed pages
322  *
323  * => we assume that the caller has a reference of some sort to the
324  *	vnode in question so that it will not be yanked out from under
325  *	us.
326  */
327 
328 void
329 uvm_vnp_setsize(struct vnode *vp, voff_t newsize)
330 {
331 	struct uvm_object *uobj = &vp->v_uobj;
332 	voff_t pgend = round_page(newsize);
333 	voff_t oldsize;
334 	UVMHIST_FUNC("uvm_vnp_setsize"); UVMHIST_CALLED(ubchist);
335 
336 	mutex_enter(&uobj->vmobjlock);
337 	UVMHIST_LOG(ubchist, "vp %p old 0x%x new 0x%x",
338 	    vp, vp->v_size, newsize, 0);
339 
340 	/*
341 	 * now check if the size has changed: if we shrink we had better
342 	 * toss some pages...
343 	 */
344 
345 	KASSERT(newsize != VSIZENOTSET);
346 	KASSERT(vp->v_size <= vp->v_writesize);
347 	KASSERT(vp->v_size == vp->v_writesize ||
348 	    newsize == vp->v_writesize || newsize <= vp->v_size);
349 
350 	oldsize = vp->v_writesize;
351 	KASSERT(oldsize != VSIZENOTSET || pgend > oldsize);
352 
353 	if (oldsize > pgend) {
354 		(void) uvn_put(uobj, pgend, 0, PGO_FREE | PGO_SYNCIO);
355 		mutex_enter(&uobj->vmobjlock);
356 	}
357 	vp->v_size = vp->v_writesize = newsize;
358 	mutex_exit(&uobj->vmobjlock);
359 }
360 
361 void
362 uvm_vnp_setwritesize(struct vnode *vp, voff_t newsize)
363 {
364 
365 	mutex_enter(&vp->v_interlock);
366 	KASSERT(newsize != VSIZENOTSET);
367 	KASSERT(vp->v_size != VSIZENOTSET);
368 	KASSERT(vp->v_writesize != VSIZENOTSET);
369 	KASSERT(vp->v_size <= vp->v_writesize);
370 	KASSERT(vp->v_size <= newsize);
371 	vp->v_writesize = newsize;
372 	mutex_exit(&vp->v_interlock);
373 }
374 
375 bool
376 uvn_text_p(struct uvm_object *uobj)
377 {
378 	struct vnode *vp = (struct vnode *)uobj;
379 
380 	return (vp->v_iflag & VI_EXECMAP) != 0;
381 }
382 
383 bool
384 uvn_clean_p(struct uvm_object *uobj)
385 {
386 	struct vnode *vp = (struct vnode *)uobj;
387 
388 	return (vp->v_iflag & VI_ONWORKLST) == 0;
389 }
390 
391 bool
392 uvn_needs_writefault_p(struct uvm_object *uobj)
393 {
394 	struct vnode *vp = (struct vnode *)uobj;
395 
396 	return uvn_clean_p(uobj) ||
397 	    (vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP;
398 }
399