xref: /netbsd-src/sys/uvm/uvm_page_array.c (revision 5dd36a3bc8bf2a9dec29ceb6349550414570c447)
1 /*	$NetBSD: uvm_page_array.c,v 1.4 2020/02/23 15:46:43 ad Exp $	*/
2 
3 /*-
4  * Copyright (c)2011 YAMAMOTO Takashi,
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: uvm_page_array.c,v 1.4 2020/02/23 15:46:43 ad Exp $");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 
35 #include <uvm/uvm_extern.h>
36 #include <uvm/uvm_object.h>
37 #include <uvm/uvm_page.h>
38 #include <uvm/uvm_page_array.h>
39 
40 /*
41  * uvm_page_array_init: initialize the array.
42  */
43 
44 void
45 uvm_page_array_init(struct uvm_page_array *ar)
46 {
47 
48 	ar->ar_idx = ar->ar_npages = 0;
49 }
50 
51 /*
52  * uvm_page_array_fini: clean up the array.
53  */
54 
55 void
56 uvm_page_array_fini(struct uvm_page_array *ar)
57 {
58 
59 	/*
60 	 * currently nothing to do.
61 	 */
62 #if defined(DIAGNOSTIC)
63 	/*
64 	 * poison to trigger assertion in uvm_page_array_peek to
65 	 * detect usage errors.
66 	 */
67 	ar->ar_npages = 1;
68 	ar->ar_idx = 1000;
69 #endif /* defined(DIAGNOSTIC) */
70 }
71 
72 /*
73  * uvm_page_array_clear: forget the cached pages and initialize the array.
74  */
75 
76 void
77 uvm_page_array_clear(struct uvm_page_array *ar)
78 {
79 
80 	KASSERT(ar->ar_idx <= ar->ar_npages);
81 	uvm_page_array_init(ar);
82 }
83 
84 /*
85  * uvm_page_array_peek: return the next cached page.
86  */
87 
88 struct vm_page *
89 uvm_page_array_peek(struct uvm_page_array *ar)
90 {
91 
92 	KASSERT(ar->ar_idx <= ar->ar_npages);
93 	if (ar->ar_idx == ar->ar_npages) {
94 		return NULL;
95 	}
96 	return ar->ar_pages[ar->ar_idx];
97 }
98 
99 /*
100  * uvm_page_array_advance: advance the array to the next cached page
101  */
102 
103 void
104 uvm_page_array_advance(struct uvm_page_array *ar)
105 {
106 
107 	KASSERT(ar->ar_idx <= ar->ar_npages);
108 	ar->ar_idx++;
109 	KASSERT(ar->ar_idx <= ar->ar_npages);
110 }
111 
112 /*
113  * uvm_page_array_fill: lookup pages and keep them cached.
114  *
115  * return 0 on success.  in that case, cache the result in the array
116  * so that they will be picked by later uvm_page_array_peek.
117  *
118  * nwant is a number of pages to fetch.  a caller should consider it a hint.
119  * nwant == 0 means a caller have no specific idea.
120  *
121  * return ENOENT if no pages are found.
122  *
123  * called with object lock held.
124  */
125 
126 int
127 uvm_page_array_fill(struct uvm_page_array *ar, struct uvm_object *uobj,
128     voff_t off, unsigned int nwant, unsigned int flags)
129 {
130 	unsigned int npages;
131 #if defined(DEBUG)
132 	unsigned int i;
133 #endif /* defined(DEBUG) */
134 	unsigned int maxpages = __arraycount(ar->ar_pages);
135 	const bool dense = (flags & UVM_PAGE_ARRAY_FILL_DENSE) != 0;
136 	const bool backward = (flags & UVM_PAGE_ARRAY_FILL_BACKWARD) != 0;
137 
138 	if (nwant != 0 && nwant < maxpages) {
139 		maxpages = nwant;
140 	}
141 #if 0 /* called from DDB for "show obj/f" without lock */
142 	KASSERT(rw_write_held(uobj->vmobjlock));
143 #endif
144 	KASSERT(uvm_page_array_peek(ar) == NULL);
145 	if ((flags & UVM_PAGE_ARRAY_FILL_DIRTY) != 0) {
146 		unsigned int tagmask = UVM_PAGE_DIRTY_TAG;
147 
148 		if ((flags & UVM_PAGE_ARRAY_FILL_WRITEBACK) != 0) {
149 			tagmask |= UVM_PAGE_WRITEBACK_TAG;
150 		}
151 		npages =
152 		    (backward ? radix_tree_gang_lookup_tagged_node_reverse :
153 		    radix_tree_gang_lookup_tagged_node)(
154 		    &uobj->uo_pages, off >> PAGE_SHIFT, (void **)ar->ar_pages,
155 		    maxpages, dense, tagmask);
156 	} else {
157 		npages =
158 		    (backward ? radix_tree_gang_lookup_node_reverse :
159 		    radix_tree_gang_lookup_node)(
160 		    &uobj->uo_pages, off >> PAGE_SHIFT, (void **)ar->ar_pages,
161 		    maxpages, dense);
162 	}
163 	if (npages == 0) {
164 		uvm_page_array_clear(ar);
165 		return ENOENT;
166 	}
167 	KASSERT(npages <= maxpages);
168 	ar->ar_npages = npages;
169 	ar->ar_idx = 0;
170 #if defined(DEBUG)
171 	for (i = 0; i < ar->ar_npages; i++) {
172 		struct vm_page * const pg = ar->ar_pages[i];
173 
174 		KDASSERT(pg != NULL);
175 		KDASSERT(pg->uobject == uobj);
176 		if (backward) {
177 			KDASSERT(pg->offset <= off);
178 			KDASSERT(i == 0 ||
179 			    pg->offset < ar->ar_pages[i - 1]->offset);
180 		} else {
181 			KDASSERT(pg->offset >= off);
182 			KDASSERT(i == 0 ||
183 			    pg->offset > ar->ar_pages[i - 1]->offset);
184 		}
185 	}
186 #endif /* defined(DEBUG) */
187 	return 0;
188 }
189 
190 /*
191  * uvm_page_array_fill_and_peek:
192  * same as uvm_page_array_peek except that, if the array is empty, try to fill
193  * it first.
194  */
195 
196 struct vm_page *
197 uvm_page_array_fill_and_peek(struct uvm_page_array *a, struct uvm_object *uobj,
198     voff_t off, unsigned int nwant, unsigned int flags)
199 {
200 	struct vm_page *pg;
201 	int error;
202 
203 	pg = uvm_page_array_peek(a);
204 	if (pg != NULL) {
205 		return pg;
206 	}
207 	error = uvm_page_array_fill(a, uobj, off, nwant, flags);
208 	if (error != 0) {
209 		return NULL;
210 	}
211 	pg = uvm_page_array_peek(a);
212 	KASSERT(pg != NULL);
213 	return pg;
214 }
215