xref: /openbsd-src/sys/uvm/uvm_object.c (revision 824adb5411e4389b29bae28eba5c2c2bbd147f34)
1 /*	$OpenBSD: uvm_object.c,v 1.20 2021/09/05 11:44:46 mpi Exp $	*/
2 
3 /*
4  * Copyright (c) 2006 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Mindaugas Rasiukevicius.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * uvm_object.c: operate with memory objects
34  *
35  */
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/mman.h>
40 #include <sys/atomic.h>
41 
42 #include <uvm/uvm.h>
43 
44 /* Dummy object used by some pmaps for sanity checks. */
45 const struct uvm_pagerops pmap_pager = {
46 	/* nothing */
47 };
48 
49 /* Dummy object used by the buffer cache for sanity checks. */
50 const struct uvm_pagerops bufcache_pager = {
51 	/* nothing */
52 };
53 
54 /* We will fetch this page count per step */
55 #define	FETCH_PAGECOUNT	16
56 
57 /*
58  * uvm_obj_init: initialise a uvm object.
59  */
60 void
61 uvm_obj_init(struct uvm_object *uobj, const struct uvm_pagerops *pgops, int refs)
62 {
63 	uobj->pgops = pgops;
64 	RBT_INIT(uvm_objtree, &uobj->memt);
65 	uobj->uo_npages = 0;
66 	uobj->uo_refs = refs;
67 }
68 
69 #ifndef SMALL_KERNEL
70 /*
71  * uvm_obj_wire: wire the pages of entire uobj
72  *
73  * => caller must pass page-aligned start and end values
74  * => if the caller passes in a pageq pointer, we'll return a list of
75  *  wired pages.
76  */
77 
78 int
79 uvm_obj_wire(struct uvm_object *uobj, voff_t start, voff_t end,
80     struct pglist *pageq)
81 {
82 	int i, npages, left, error;
83 	struct vm_page *pgs[FETCH_PAGECOUNT];
84 	voff_t offset = start;
85 
86 	left = (end - start) >> PAGE_SHIFT;
87 
88 	while (left) {
89 
90 		npages = MIN(FETCH_PAGECOUNT, left);
91 
92 		/* Get the pages */
93 		memset(pgs, 0, sizeof(pgs));
94 		error = (*uobj->pgops->pgo_get)(uobj, offset, pgs, &npages, 0,
95 			PROT_READ | PROT_WRITE, MADV_SEQUENTIAL,
96 			PGO_ALLPAGES | PGO_SYNCIO);
97 
98 		if (error)
99 			goto error;
100 
101 		for (i = 0; i < npages; i++) {
102 
103 			KASSERT(pgs[i] != NULL);
104 			KASSERT(!(pgs[i]->pg_flags & PG_RELEASED));
105 
106 			if (pgs[i]->pg_flags & PQ_AOBJ) {
107 				atomic_clearbits_int(&pgs[i]->pg_flags,
108 				    PG_CLEAN);
109 				uao_dropswap(uobj, i);
110 			}
111 		}
112 
113 		/* Wire the pages */
114 		uvm_lock_pageq();
115 		for (i = 0; i < npages; i++) {
116 			uvm_pagewire(pgs[i]);
117 			if (pageq != NULL)
118 				TAILQ_INSERT_TAIL(pageq, pgs[i], pageq);
119 		}
120 		uvm_unlock_pageq();
121 
122 		/* Unbusy the pages */
123 		uvm_page_unbusy(pgs, npages);
124 
125 		left -= npages;
126 		offset += (voff_t)npages << PAGE_SHIFT;
127 	}
128 
129 	return 0;
130 
131 error:
132 	/* Unwire the pages which have been wired */
133 	uvm_obj_unwire(uobj, start, offset);
134 
135 	return error;
136 }
137 
138 /*
139  * uobj_unwirepages: unwire the pages of entire uobj
140  *
141  * => caller must pass page-aligned start and end values
142  */
143 
144 void
145 uvm_obj_unwire(struct uvm_object *uobj, voff_t start, voff_t end)
146 {
147 	struct vm_page *pg;
148 	off_t offset;
149 
150 	uvm_lock_pageq();
151 	for (offset = start; offset < end; offset += PAGE_SIZE) {
152 		pg = uvm_pagelookup(uobj, offset);
153 
154 		KASSERT(pg != NULL);
155 		KASSERT(!(pg->pg_flags & PG_RELEASED));
156 
157 		uvm_pageunwire(pg);
158 	}
159 	uvm_unlock_pageq();
160 }
161 #endif /* !SMALL_KERNEL */
162 
163 /*
164  * uvm_obj_free: free all pages in a uvm object, used by the buffer
165  * cache to free all pages attached to a buffer.
166  */
167 void
168 uvm_obj_free(struct uvm_object *uobj)
169 {
170 	struct vm_page *pg;
171 	struct pglist pgl;
172 
173 	KASSERT(UVM_OBJ_IS_BUFCACHE(uobj));
174 	KERNEL_ASSERT_LOCKED();
175 
176 	TAILQ_INIT(&pgl);
177  	/*
178 	 * Extract from rb tree in offset order. The phys addresses
179 	 * usually increase in that order, which is better for
180 	 * uvm_pmr_freepageq.
181  	 */
182 	RBT_FOREACH(pg, uvm_objtree, &uobj->memt) {
183 		/*
184 		 * clear PG_TABLED so we don't do work to remove
185 		 * this pg from the uobj we are throwing away
186 		 */
187 		atomic_clearbits_int(&pg->pg_flags, PG_TABLED);
188 		uvm_lock_pageq();
189 		uvm_pageclean(pg);
190 		uvm_unlock_pageq();
191 		TAILQ_INSERT_TAIL(&pgl, pg, pageq);
192  	}
193 	uvm_pmr_freepageq(&pgl);
194 }
195 
196