xref: /netbsd-src/sys/uvm/uvm_page_array.h (revision e6c7e151de239c49d2e38720a061ed9d1fa99309)
1 /*	$NetBSD: uvm_page_array.h,v 1.2 2019/12/15 21:11:35 ad Exp $	*/
2 
3 /*-
4  * Copyright (c)2011 YAMAMOTO Takashi,
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #if !defined(_UVM_UVM_PAGE_ARRAY_H_)
30 #define _UVM_UVM_PAGE_ARRAY_H_
31 
32 /*
33  * uvm_page_array: an array of pages.
34  *
35  * these structure and functions simply manipulate struct vm_page pointers.
36  * it's caller's responsibity to acquire and keep the object lock so that
37  * the result is valid.
38  *
39  * typical usage:
40  *
41  *	struct uvm_page_array a;
42  *
43  *	uvm_page_array_init(&a);
44  *	while ((pg = uvm_page_array_fill_and_peek(&a, uobj, off, ....))
45  *	    != NULL) {
46  *		off = pg->offset + PAGE_SIZE;
47  *		do_something(pg);
48  *		uvm_page_array_advance(&a);
49  *	}
50  *	uvm_page_array_fini(&a);
51  */
52 
53 struct vm_page;
54 
55 struct uvm_page_array {
56 	struct vm_page *ar_pages[16];	/* XXX tune */
57 	unsigned int ar_npages;		/* valid elements in ar_pages */
58 	unsigned int ar_idx;		/* index in ar_pages */
59 };
60 
61 void uvm_page_array_init(struct uvm_page_array *);
62 void uvm_page_array_fini(struct uvm_page_array *);
63 void uvm_page_array_clear(struct uvm_page_array *);
64 struct vm_page *uvm_page_array_peek(struct uvm_page_array *);
65 void uvm_page_array_advance(struct uvm_page_array *);
66 int uvm_page_array_fill(struct uvm_page_array *, struct uvm_object *,
67     voff_t, unsigned int, unsigned int);
68 struct vm_page *uvm_page_array_fill_and_peek(struct uvm_page_array *,
69     struct uvm_object *, voff_t, unsigned int, unsigned int);
70 
71 /*
72  * flags for uvm_page_array_fill and uvm_page_array_fill_and_peek
73  */
74 #define	UVM_PAGE_ARRAY_FILL_DIRTY	1	/* dirty pages */
75 #define	UVM_PAGE_ARRAY_FILL_WRITEBACK	2	/* dirty or written-back */
76 #define	UVM_PAGE_ARRAY_FILL_DENSE	4	/* stop on a hole */
77 #define	UVM_PAGE_ARRAY_FILL_BACKWARD	8	/* descend order */
78 
79 #endif /* defined(_UVM_UVM_ARRAY_H_) */
80