xref: /netbsd-src/sys/uvm/uvm_vnode.c (revision b757af438b42b93f8c6571f026d8b8ef3eaf5fc9)
1 /*	$NetBSD: uvm_vnode.c,v 1.97 2011/09/06 16:41:55 matt Exp $	*/
2 
3 /*
4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
5  * Copyright (c) 1991, 1993
6  *      The Regents of the University of California.
7  * Copyright (c) 1990 University of Utah.
8  *
9  * All rights reserved.
10  *
11  * This code is derived from software contributed to Berkeley by
12  * the Systems Programming Group of the University of Utah Computer
13  * Science Department.
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  * 3. Neither the name of the University nor the names of its contributors
24  *    may be used to endorse or promote products derived from this software
25  *    without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37  * SUCH DAMAGE.
38  *
39  *      @(#)vnode_pager.c       8.8 (Berkeley) 2/13/94
40  * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp
41  */
42 
43 /*
44  * uvm_vnode.c: the vnode pager.
45  */
46 
47 #include <sys/cdefs.h>
48 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.97 2011/09/06 16:41:55 matt Exp $");
49 
50 #include "opt_uvmhist.h"
51 
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/kernel.h>
55 #include <sys/vnode.h>
56 #include <sys/disklabel.h>
57 #include <sys/ioctl.h>
58 #include <sys/fcntl.h>
59 #include <sys/conf.h>
60 #include <sys/pool.h>
61 #include <sys/mount.h>
62 
63 #include <miscfs/specfs/specdev.h>
64 
65 #include <uvm/uvm.h>
66 #include <uvm/uvm_readahead.h>
67 
68 /*
69  * functions
70  */
71 
72 static void	uvn_detach(struct uvm_object *);
73 static int	uvn_get(struct uvm_object *, voff_t, struct vm_page **, int *,
74 			int, vm_prot_t, int, int);
75 static int	uvn_put(struct uvm_object *, voff_t, voff_t, int);
76 static void	uvn_reference(struct uvm_object *);
77 
78 static int	uvn_findpage(struct uvm_object *, voff_t, struct vm_page **,
79 			     int);
80 
81 /*
82  * master pager structure
83  */
84 
85 const struct uvm_pagerops uvm_vnodeops = {
86 	.pgo_reference = uvn_reference,
87 	.pgo_detach = uvn_detach,
88 	.pgo_get = uvn_get,
89 	.pgo_put = uvn_put,
90 };
91 
92 /*
93  * the ops!
94  */
95 
96 /*
97  * uvn_reference
98  *
99  * duplicate a reference to a VM object.  Note that the reference
100  * count must already be at least one (the passed in reference) so
101  * there is no chance of the uvn being killed or locked out here.
102  *
103  * => caller must call with object unlocked.
104  * => caller must be using the same accessprot as was used at attach time
105  */
106 
107 static void
108 uvn_reference(struct uvm_object *uobj)
109 {
110 	vref((struct vnode *)uobj);
111 }
112 
113 
114 /*
115  * uvn_detach
116  *
117  * remove a reference to a VM object.
118  *
119  * => caller must call with object unlocked and map locked.
120  */
121 
122 static void
123 uvn_detach(struct uvm_object *uobj)
124 {
125 	vrele((struct vnode *)uobj);
126 }
127 
128 /*
129  * uvn_put: flush page data to backing store.
130  *
131  * => object must be locked on entry!   VOP_PUTPAGES must unlock it.
132  * => flags: PGO_SYNCIO -- use sync. I/O
133  * => note: caller must set PG_CLEAN and pmap_clear_modify (if needed)
134  */
135 
136 static int
137 uvn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags)
138 {
139 	struct vnode *vp = (struct vnode *)uobj;
140 	int error;
141 
142 	KASSERT(mutex_owned(vp->v_interlock));
143 	error = VOP_PUTPAGES(vp, offlo, offhi, flags);
144 
145 	return error;
146 }
147 
148 
149 /*
150  * uvn_get: get pages (synchronously) from backing store
151  *
152  * => prefer map unlocked (not required)
153  * => object must be locked!  we will _unlock_ it before starting any I/O.
154  * => flags: PGO_ALLPAGES: get all of the pages
155  *           PGO_LOCKED: fault data structures are locked
156  * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
157  * => NOTE: caller must check for released pages!!
158  */
159 
160 static int
161 uvn_get(struct uvm_object *uobj, voff_t offset,
162     struct vm_page **pps /* IN/OUT */,
163     int *npagesp /* IN (OUT if PGO_LOCKED)*/,
164     int centeridx, vm_prot_t access_type, int advice, int flags)
165 {
166 	struct vnode *vp = (struct vnode *)uobj;
167 	int error;
168 
169 	UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(ubchist);
170 
171 	UVMHIST_LOG(ubchist, "vp %p off 0x%x", vp, (int)offset, 0,0);
172 
173 	if ((access_type & VM_PROT_WRITE) == 0 && (flags & PGO_LOCKED) == 0) {
174 		vn_ra_allocctx(vp);
175 		uvm_ra_request(vp->v_ractx, advice, uobj, offset,
176 		    *npagesp << PAGE_SHIFT);
177 	}
178 
179 	error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx,
180 			     access_type, advice, flags);
181 
182 	KASSERT(((flags & PGO_LOCKED) != 0 && mutex_owned(vp->v_interlock)) ||
183 	    (flags & PGO_LOCKED) == 0);
184 	return error;
185 }
186 
187 
188 /*
189  * uvn_findpages:
190  * return the page for the uobj and offset requested, allocating if needed.
191  * => uobj must be locked.
192  * => returned pages will be BUSY.
193  */
194 
195 int
196 uvn_findpages(struct uvm_object *uobj, voff_t offset, int *npagesp,
197     struct vm_page **pgs, int flags)
198 {
199 	int i, count, found, npages, rv;
200 
201 	count = found = 0;
202 	npages = *npagesp;
203 	if (flags & UFP_BACKWARD) {
204 		for (i = npages - 1; i >= 0; i--, offset -= PAGE_SIZE) {
205 			rv = uvn_findpage(uobj, offset, &pgs[i], flags);
206 			if (rv == 0) {
207 				if (flags & UFP_DIRTYONLY)
208 					break;
209 			} else
210 				found++;
211 			count++;
212 		}
213 	} else {
214 		for (i = 0; i < npages; i++, offset += PAGE_SIZE) {
215 			rv = uvn_findpage(uobj, offset, &pgs[i], flags);
216 			if (rv == 0) {
217 				if (flags & UFP_DIRTYONLY)
218 					break;
219 			} else
220 				found++;
221 			count++;
222 		}
223 	}
224 	*npagesp = count;
225 	return (found);
226 }
227 
228 static int
229 uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp,
230     int flags)
231 {
232 	struct vm_page *pg;
233 	bool dirty;
234 	UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist);
235 	UVMHIST_LOG(ubchist, "vp %p off 0x%lx", uobj, offset,0,0);
236 
237 	KASSERT(mutex_owned(uobj->vmobjlock));
238 
239 	if (*pgp != NULL) {
240 		UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0);
241 		return 0;
242 	}
243 	for (;;) {
244 		/* look for an existing page */
245 		pg = uvm_pagelookup(uobj, offset);
246 
247 		/* nope?  allocate one now */
248 		if (pg == NULL) {
249 			if (flags & UFP_NOALLOC) {
250 				UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0);
251 				return 0;
252 			}
253 			pg = uvm_pagealloc(uobj, offset, NULL,
254 			    UVM_FLAG_COLORMATCH);
255 			if (pg == NULL) {
256 				if (flags & UFP_NOWAIT) {
257 					UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
258 					return 0;
259 				}
260 				mutex_exit(uobj->vmobjlock);
261 				uvm_wait("uvn_fp1");
262 				mutex_enter(uobj->vmobjlock);
263 				continue;
264 			}
265 			UVMHIST_LOG(ubchist, "alloced %p (color %u)", pg,
266 			    VM_PGCOLOR_BUCKET(pg), 0,0);
267 			break;
268 		} else if (flags & UFP_NOCACHE) {
269 			UVMHIST_LOG(ubchist, "nocache",0,0,0,0);
270 			return 0;
271 		}
272 
273 		/* page is there, see if we need to wait on it */
274 		if ((pg->flags & PG_BUSY) != 0) {
275 			if (flags & UFP_NOWAIT) {
276 				UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
277 				return 0;
278 			}
279 			pg->flags |= PG_WANTED;
280 			UVMHIST_LOG(ubchist, "wait %p (color %u)", pg,
281 			    VM_PGCOLOR_BUCKET(pg), 0,0);
282 			UVM_UNLOCK_AND_WAIT(pg, uobj->vmobjlock, 0,
283 					    "uvn_fp2", 0);
284 			mutex_enter(uobj->vmobjlock);
285 			continue;
286 		}
287 
288 		/* skip PG_RDONLY pages if requested */
289 		if ((flags & UFP_NORDONLY) && (pg->flags & PG_RDONLY)) {
290 			UVMHIST_LOG(ubchist, "nordonly",0,0,0,0);
291 			return 0;
292 		}
293 
294 		/* stop on clean pages if requested */
295 		if (flags & UFP_DIRTYONLY) {
296 			dirty = pmap_clear_modify(pg) ||
297 				(pg->flags & PG_CLEAN) == 0;
298 			pg->flags |= PG_CLEAN;
299 			if (!dirty) {
300 				UVMHIST_LOG(ubchist, "dirtonly", 0,0,0,0);
301 				return 0;
302 			}
303 		}
304 
305 		/* mark the page BUSY and we're done. */
306 		pg->flags |= PG_BUSY;
307 		UVM_PAGE_OWN(pg, "uvn_findpage");
308 		UVMHIST_LOG(ubchist, "found %p (color %u)",
309 		    pg, VM_PGCOLOR_BUCKET(pg), 0,0);
310 		break;
311 	}
312 	*pgp = pg;
313 	return 1;
314 }
315 
316 /*
317  * uvm_vnp_setsize: grow or shrink a vnode uobj
318  *
319  * grow   => just update size value
320  * shrink => toss un-needed pages
321  *
322  * => we assume that the caller has a reference of some sort to the
323  *	vnode in question so that it will not be yanked out from under
324  *	us.
325  */
326 
327 void
328 uvm_vnp_setsize(struct vnode *vp, voff_t newsize)
329 {
330 	struct uvm_object *uobj = &vp->v_uobj;
331 	voff_t pgend = round_page(newsize);
332 	voff_t oldsize;
333 	UVMHIST_FUNC("uvm_vnp_setsize"); UVMHIST_CALLED(ubchist);
334 
335 	mutex_enter(uobj->vmobjlock);
336 	UVMHIST_LOG(ubchist, "vp %p old 0x%x new 0x%x",
337 	    vp, vp->v_size, newsize, 0);
338 
339 	/*
340 	 * now check if the size has changed: if we shrink we had better
341 	 * toss some pages...
342 	 */
343 
344 	KASSERT(newsize != VSIZENOTSET);
345 	KASSERT(vp->v_size <= vp->v_writesize);
346 	KASSERT(vp->v_size == vp->v_writesize ||
347 	    newsize == vp->v_writesize || newsize <= vp->v_size);
348 
349 	oldsize = vp->v_writesize;
350 	KASSERT(oldsize != VSIZENOTSET || pgend > oldsize);
351 
352 	if (oldsize > pgend) {
353 		(void) uvn_put(uobj, pgend, 0, PGO_FREE | PGO_SYNCIO);
354 		mutex_enter(uobj->vmobjlock);
355 	}
356 	vp->v_size = vp->v_writesize = newsize;
357 	mutex_exit(uobj->vmobjlock);
358 }
359 
360 void
361 uvm_vnp_setwritesize(struct vnode *vp, voff_t newsize)
362 {
363 
364 	mutex_enter(vp->v_interlock);
365 	KASSERT(newsize != VSIZENOTSET);
366 	KASSERT(vp->v_size != VSIZENOTSET);
367 	KASSERT(vp->v_writesize != VSIZENOTSET);
368 	KASSERT(vp->v_size <= vp->v_writesize);
369 	KASSERT(vp->v_size <= newsize);
370 	vp->v_writesize = newsize;
371 	mutex_exit(vp->v_interlock);
372 }
373 
374 bool
375 uvn_text_p(struct uvm_object *uobj)
376 {
377 	struct vnode *vp = (struct vnode *)uobj;
378 
379 	return (vp->v_iflag & VI_EXECMAP) != 0;
380 }
381 
382 bool
383 uvn_clean_p(struct uvm_object *uobj)
384 {
385 	struct vnode *vp = (struct vnode *)uobj;
386 
387 	return (vp->v_iflag & VI_ONWORKLST) == 0;
388 }
389 
390 bool
391 uvn_needs_writefault_p(struct uvm_object *uobj)
392 {
393 	struct vnode *vp = (struct vnode *)uobj;
394 
395 	return uvn_clean_p(uobj) ||
396 	    (vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP;
397 }
398