xref: /openbsd-src/sys/uvm/uvm_object.c (revision 4c1e55dc91edd6e69ccc60ce855900fbc12cf34f)
1 /*	$OpenBSD: uvm_object.c,v 1.6 2010/05/01 13:13:10 oga Exp $	*/
2 
3 /*
4  * Copyright (c) 2006 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Mindaugas Rasiukevicius.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * uvm_object.c: operate with memory objects
34  *
35  */
36 
37 #include <sys/param.h>
38 #include <sys/proc.h>		/* XXX for atomic */
39 
40 #include <uvm/uvm.h>
41 
42 /* We will fetch this page count per step */
43 #define	FETCH_PAGECOUNT	16
44 
45 /*
46  * uvm_objinit: initialise a uvm object.
47  */
48 void
49 uvm_objinit(struct uvm_object *uobj, struct uvm_pagerops *pgops, int refs)
50 {
51 	uobj->pgops = pgops;
52 	RB_INIT(&uobj->memt);
53 	uobj->uo_npages = 0;
54 	uobj->uo_refs = refs;
55 }
56 
57 #ifndef SMALL_KERNEL
58 /*
59  * uvm_objwire: wire the pages of entire uobj
60  *
61  * => caller must pass page-aligned start and end values
62  * => if the caller passes in a pageq pointer, we'll return a list of
63  *  wired pages.
64  */
65 
66 int
67 uvm_objwire(struct uvm_object *uobj, off_t start, off_t end,
68     struct pglist *pageq)
69 {
70 	int i, npages, error;
71 	struct vm_page *pgs[FETCH_PAGECOUNT];
72 	off_t offset = start, left;
73 
74 	left = (end - start) >> PAGE_SHIFT;
75 
76 	simple_lock(&uobj->vmobjlock);
77 	while (left) {
78 
79 		npages = MIN(FETCH_PAGECOUNT, left);
80 
81 		/* Get the pages */
82 		memset(pgs, 0, sizeof(pgs));
83 		error = (*uobj->pgops->pgo_get)(uobj, offset, pgs, &npages, 0,
84 			VM_PROT_READ | VM_PROT_WRITE, UVM_ADV_SEQUENTIAL,
85 			PGO_ALLPAGES | PGO_SYNCIO);
86 
87 		if (error)
88 			goto error;
89 
90 		simple_lock(&uobj->vmobjlock);
91 		for (i = 0; i < npages; i++) {
92 
93 			KASSERT(pgs[i] != NULL);
94 			KASSERT(!(pgs[i]->pg_flags & PG_RELEASED));
95 
96 #if 0
97 			/*
98 			 * Loan break
99 			 */
100 			if (pgs[i]->loan_count) {
101 				while (pgs[i]->loan_count) {
102 					pg = uvm_loanbreak(pgs[i]);
103 					if (!pg) {
104 						simple_unlock(&uobj->vmobjlock);
105 						uvm_wait("uobjwirepg");
106 						simple_lock(&uobj->vmobjlock);
107 						continue;
108 					}
109 				}
110 				pgs[i] = pg;
111 			}
112 #endif
113 
114 			if (pgs[i]->pg_flags & PQ_AOBJ) {
115 				atomic_clearbits_int(&pgs[i]->pg_flags,
116 				    PG_CLEAN);
117 				uao_dropswap(uobj, i);
118 			}
119 		}
120 
121 		/* Wire the pages */
122 		uvm_lock_pageq();
123 		for (i = 0; i < npages; i++) {
124 			uvm_pagewire(pgs[i]);
125 			if (pageq != NULL)
126 				TAILQ_INSERT_TAIL(pageq, pgs[i], pageq);
127 		}
128 		uvm_unlock_pageq();
129 
130 		/* Unbusy the pages */
131 		uvm_page_unbusy(pgs, npages);
132 
133 		left -= npages;
134 		offset += npages << PAGE_SHIFT;
135 	}
136 	simple_unlock(&uobj->vmobjlock);
137 
138 	return 0;
139 
140 error:
141 	/* Unwire the pages which have been wired */
142 	uvm_objunwire(uobj, start, offset);
143 
144 	return error;
145 }
146 
147 /*
148  * uobj_unwirepages: unwire the pages of entire uobj
149  *
150  * => caller must pass page-aligned start and end values
151  */
152 
153 void
154 uvm_objunwire(struct uvm_object *uobj, off_t start, off_t end)
155 {
156 	struct vm_page *pg;
157 	off_t offset;
158 
159 	simple_lock(&uobj->vmobjlock);
160 	uvm_lock_pageq();
161 	for (offset = start; offset < end; offset += PAGE_SIZE) {
162 		pg = uvm_pagelookup(uobj, offset);
163 
164 		KASSERT(pg != NULL);
165 		KASSERT(!(pg->pg_flags & PG_RELEASED));
166 
167 		uvm_pageunwire(pg);
168 	}
169 	uvm_unlock_pageq();
170 	simple_unlock(&uobj->vmobjlock);
171 }
172 #endif /* !SMALL_KERNEL */
173