1*a88d45a1Sad /* $NetBSD: uvm_page_array.c,v 1.9 2020/05/26 21:52:12 ad Exp $ */
2881d12e6Sad
3881d12e6Sad /*-
4881d12e6Sad * Copyright (c)2011 YAMAMOTO Takashi,
5881d12e6Sad * All rights reserved.
6881d12e6Sad *
7881d12e6Sad * Redistribution and use in source and binary forms, with or without
8881d12e6Sad * modification, are permitted provided that the following conditions
9881d12e6Sad * are met:
10881d12e6Sad * 1. Redistributions of source code must retain the above copyright
11881d12e6Sad * notice, this list of conditions and the following disclaimer.
12881d12e6Sad * 2. Redistributions in binary form must reproduce the above copyright
13881d12e6Sad * notice, this list of conditions and the following disclaimer in the
14881d12e6Sad * documentation and/or other materials provided with the distribution.
15881d12e6Sad *
16881d12e6Sad * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17881d12e6Sad * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18881d12e6Sad * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19881d12e6Sad * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20881d12e6Sad * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21881d12e6Sad * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22881d12e6Sad * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23881d12e6Sad * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24881d12e6Sad * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25881d12e6Sad * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26881d12e6Sad * SUCH DAMAGE.
27881d12e6Sad */
28881d12e6Sad
29881d12e6Sad #include <sys/cdefs.h>
30*a88d45a1Sad __KERNEL_RCSID(0, "$NetBSD: uvm_page_array.c,v 1.9 2020/05/26 21:52:12 ad Exp $");
31881d12e6Sad
32881d12e6Sad #include <sys/param.h>
33881d12e6Sad #include <sys/systm.h>
34881d12e6Sad
35881d12e6Sad #include <uvm/uvm_extern.h>
36881d12e6Sad #include <uvm/uvm_object.h>
37881d12e6Sad #include <uvm/uvm_page.h>
38881d12e6Sad #include <uvm/uvm_page_array.h>
39881d12e6Sad
40881d12e6Sad /*
41881d12e6Sad * uvm_page_array_init: initialize the array.
42881d12e6Sad */
43881d12e6Sad
44881d12e6Sad void
uvm_page_array_init(struct uvm_page_array * ar,struct uvm_object * uobj,unsigned int flags)454bfe0439Sad uvm_page_array_init(struct uvm_page_array *ar, struct uvm_object *uobj,
464bfe0439Sad unsigned int flags)
47881d12e6Sad {
48881d12e6Sad
494bfe0439Sad ar->ar_idx = 0;
504bfe0439Sad ar->ar_npages = 0;
514bfe0439Sad ar->ar_uobj = uobj;
524bfe0439Sad ar->ar_flags = flags;
53881d12e6Sad }
54881d12e6Sad
55881d12e6Sad /*
56881d12e6Sad * uvm_page_array_fini: clean up the array.
57881d12e6Sad */
58881d12e6Sad
59881d12e6Sad void
uvm_page_array_fini(struct uvm_page_array * ar)60881d12e6Sad uvm_page_array_fini(struct uvm_page_array *ar)
61881d12e6Sad {
62881d12e6Sad
63881d12e6Sad /*
64881d12e6Sad * currently nothing to do.
65881d12e6Sad */
66881d12e6Sad #if defined(DIAGNOSTIC)
67881d12e6Sad /*
68881d12e6Sad * poison to trigger assertion in uvm_page_array_peek to
69881d12e6Sad * detect usage errors.
70881d12e6Sad */
71881d12e6Sad ar->ar_npages = 1;
72881d12e6Sad ar->ar_idx = 1000;
73881d12e6Sad #endif /* defined(DIAGNOSTIC) */
74881d12e6Sad }
75881d12e6Sad
76881d12e6Sad /*
77881d12e6Sad * uvm_page_array_clear: forget the cached pages and initialize the array.
78881d12e6Sad */
79881d12e6Sad
80881d12e6Sad void
uvm_page_array_clear(struct uvm_page_array * ar)81881d12e6Sad uvm_page_array_clear(struct uvm_page_array *ar)
82881d12e6Sad {
83881d12e6Sad
84881d12e6Sad KASSERT(ar->ar_idx <= ar->ar_npages);
854bfe0439Sad ar->ar_idx = 0;
864bfe0439Sad ar->ar_npages = 0;
87881d12e6Sad }
88881d12e6Sad
89881d12e6Sad /*
90881d12e6Sad * uvm_page_array_peek: return the next cached page.
91881d12e6Sad */
92881d12e6Sad
93881d12e6Sad struct vm_page *
uvm_page_array_peek(struct uvm_page_array * ar)94881d12e6Sad uvm_page_array_peek(struct uvm_page_array *ar)
95881d12e6Sad {
96881d12e6Sad
97881d12e6Sad KASSERT(ar->ar_idx <= ar->ar_npages);
98881d12e6Sad if (ar->ar_idx == ar->ar_npages) {
99881d12e6Sad return NULL;
100881d12e6Sad }
101881d12e6Sad return ar->ar_pages[ar->ar_idx];
102881d12e6Sad }
103881d12e6Sad
104881d12e6Sad /*
105881d12e6Sad * uvm_page_array_advance: advance the array to the next cached page
106881d12e6Sad */
107881d12e6Sad
108881d12e6Sad void
uvm_page_array_advance(struct uvm_page_array * ar)109881d12e6Sad uvm_page_array_advance(struct uvm_page_array *ar)
110881d12e6Sad {
111881d12e6Sad
112881d12e6Sad KASSERT(ar->ar_idx <= ar->ar_npages);
113881d12e6Sad ar->ar_idx++;
114881d12e6Sad KASSERT(ar->ar_idx <= ar->ar_npages);
115881d12e6Sad }
116881d12e6Sad
117881d12e6Sad /*
118881d12e6Sad * uvm_page_array_fill: lookup pages and keep them cached.
119881d12e6Sad *
120881d12e6Sad * return 0 on success. in that case, cache the result in the array
121881d12e6Sad * so that they will be picked by later uvm_page_array_peek.
122881d12e6Sad *
123881d12e6Sad * nwant is a number of pages to fetch. a caller should consider it a hint.
124881d12e6Sad * nwant == 0 means a caller have no specific idea.
125881d12e6Sad *
126881d12e6Sad * return ENOENT if no pages are found.
127881d12e6Sad *
128881d12e6Sad * called with object lock held.
129881d12e6Sad */
130881d12e6Sad
131881d12e6Sad int
uvm_page_array_fill(struct uvm_page_array * ar,voff_t off,unsigned int nwant)1324bfe0439Sad uvm_page_array_fill(struct uvm_page_array *ar, voff_t off, unsigned int nwant)
133881d12e6Sad {
134881d12e6Sad unsigned int npages;
135881d12e6Sad #if defined(DEBUG)
136881d12e6Sad unsigned int i;
137881d12e6Sad #endif /* defined(DEBUG) */
138881d12e6Sad unsigned int maxpages = __arraycount(ar->ar_pages);
1394bfe0439Sad struct uvm_object *uobj = ar->ar_uobj;
1404bfe0439Sad const int flags = ar->ar_flags;
141881d12e6Sad const bool dense = (flags & UVM_PAGE_ARRAY_FILL_DENSE) != 0;
142881d12e6Sad const bool backward = (flags & UVM_PAGE_ARRAY_FILL_BACKWARD) != 0;
143*a88d45a1Sad int error = 0;
144881d12e6Sad
145881d12e6Sad if (nwant != 0 && nwant < maxpages) {
146881d12e6Sad maxpages = nwant;
147881d12e6Sad }
148881d12e6Sad #if 0 /* called from DDB for "show obj/f" without lock */
149a1a4ef59Sad KASSERT(rw_lock_held(uobj->vmobjlock));
150881d12e6Sad #endif
151881d12e6Sad KASSERT(uvm_page_array_peek(ar) == NULL);
152881d12e6Sad if ((flags & UVM_PAGE_ARRAY_FILL_DIRTY) != 0) {
153881d12e6Sad unsigned int tagmask = UVM_PAGE_DIRTY_TAG;
154881d12e6Sad
155881d12e6Sad if ((flags & UVM_PAGE_ARRAY_FILL_WRITEBACK) != 0) {
156881d12e6Sad tagmask |= UVM_PAGE_WRITEBACK_TAG;
157881d12e6Sad }
158881d12e6Sad npages =
159881d12e6Sad (backward ? radix_tree_gang_lookup_tagged_node_reverse :
160881d12e6Sad radix_tree_gang_lookup_tagged_node)(
161881d12e6Sad &uobj->uo_pages, off >> PAGE_SHIFT, (void **)ar->ar_pages,
162881d12e6Sad maxpages, dense, tagmask);
16305a3457eSad } else {
164881d12e6Sad npages =
165881d12e6Sad (backward ? radix_tree_gang_lookup_node_reverse :
166881d12e6Sad radix_tree_gang_lookup_node)(
167881d12e6Sad &uobj->uo_pages, off >> PAGE_SHIFT, (void **)ar->ar_pages,
168881d12e6Sad maxpages, dense);
169881d12e6Sad }
170881d12e6Sad if (npages == 0) {
1714bfe0439Sad if (flags != 0) {
1724bfe0439Sad /*
1734bfe0439Sad * if dense or looking for tagged entries (or
1744bfe0439Sad * working backwards), fail right away.
1754bfe0439Sad */
176*a88d45a1Sad npages = 0;
1774bfe0439Sad } else {
1784bfe0439Sad /*
1794bfe0439Sad * there's nothing else to be found with the current
1804bfe0439Sad * set of arguments, in the current version of the
1814bfe0439Sad * tree.
1824bfe0439Sad *
1837fa42e33Sad * minimize repeated tree lookups by "finding" a
1847fa42e33Sad * null pointer, in case the caller keeps looping (a
1857fa42e33Sad * common use case).
1864bfe0439Sad */
1877fa42e33Sad npages = 1;
1887fa42e33Sad ar->ar_pages[0] = NULL;
1894bfe0439Sad }
190*a88d45a1Sad error = ENOENT;
191881d12e6Sad }
192881d12e6Sad KASSERT(npages <= maxpages);
193881d12e6Sad ar->ar_npages = npages;
194881d12e6Sad ar->ar_idx = 0;
195881d12e6Sad #if defined(DEBUG)
196*a88d45a1Sad for (i = 0; error == 0 && i < ar->ar_npages; i++) {
197881d12e6Sad struct vm_page * const pg = ar->ar_pages[i];
198881d12e6Sad
199*a88d45a1Sad KASSERT(pg != NULL);
200881d12e6Sad KDASSERT(pg->uobject == uobj);
201881d12e6Sad if (backward) {
202881d12e6Sad KDASSERT(pg->offset <= off);
203881d12e6Sad KDASSERT(i == 0 ||
204881d12e6Sad pg->offset < ar->ar_pages[i - 1]->offset);
205881d12e6Sad } else {
206881d12e6Sad KDASSERT(pg->offset >= off);
207881d12e6Sad KDASSERT(i == 0 ||
208881d12e6Sad pg->offset > ar->ar_pages[i - 1]->offset);
209881d12e6Sad }
210881d12e6Sad }
211881d12e6Sad #endif /* defined(DEBUG) */
212*a88d45a1Sad return error;
213881d12e6Sad }
214881d12e6Sad
215881d12e6Sad /*
216881d12e6Sad * uvm_page_array_fill_and_peek:
217881d12e6Sad * same as uvm_page_array_peek except that, if the array is empty, try to fill
218881d12e6Sad * it first.
219881d12e6Sad */
220881d12e6Sad
221881d12e6Sad struct vm_page *
uvm_page_array_fill_and_peek(struct uvm_page_array * ar,voff_t off,unsigned int nwant)2227fa42e33Sad uvm_page_array_fill_and_peek(struct uvm_page_array *ar, voff_t off,
2234bfe0439Sad unsigned int nwant)
224881d12e6Sad {
225881d12e6Sad int error;
226881d12e6Sad
2277fa42e33Sad if (ar->ar_idx != ar->ar_npages) {
2287fa42e33Sad return ar->ar_pages[ar->ar_idx];
229881d12e6Sad }
2307fa42e33Sad error = uvm_page_array_fill(ar, off, nwant);
231881d12e6Sad if (error != 0) {
232881d12e6Sad return NULL;
233881d12e6Sad }
2347fa42e33Sad return uvm_page_array_peek(ar);
235881d12e6Sad }
236