xref: /openbsd-src/sys/uvm/uvm_object.c (revision 4e1ee0786f11cc571bd0be17d38e46f635c719fc)
1 /*	$OpenBSD: uvm_object.c,v 1.21 2021/10/12 18:16:51 kettenis Exp $	*/
2 
3 /*
4  * Copyright (c) 2006 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Mindaugas Rasiukevicius.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * uvm_object.c: operate with memory objects
34  *
35  */
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/mman.h>
40 #include <sys/atomic.h>
41 
42 #include <uvm/uvm.h>
43 
44 /* Dummy object used by some pmaps for sanity checks. */
45 const struct uvm_pagerops pmap_pager = {
46 	/* nothing */
47 };
48 
49 /* Dummy object used by the buffer cache for sanity checks. */
50 const struct uvm_pagerops bufcache_pager = {
51 	/* nothing */
52 };
53 
54 /* We will fetch this page count per step */
55 #define	FETCH_PAGECOUNT	16
56 
57 /*
58  * uvm_obj_init: initialise a uvm object.
59  */
60 void
61 uvm_obj_init(struct uvm_object *uobj, const struct uvm_pagerops *pgops, int refs)
62 {
63 	uobj->pgops = pgops;
64 	RBT_INIT(uvm_objtree, &uobj->memt);
65 	uobj->uo_npages = 0;
66 	uobj->uo_refs = refs;
67 }
68 
69 void
70 uvm_obj_destroy(struct uvm_object *uo)
71 {
72 }
73 
74 #ifndef SMALL_KERNEL
75 /*
76  * uvm_obj_wire: wire the pages of entire uobj
77  *
78  * => caller must pass page-aligned start and end values
79  * => if the caller passes in a pageq pointer, we'll return a list of
80  *  wired pages.
81  */
82 
83 int
84 uvm_obj_wire(struct uvm_object *uobj, voff_t start, voff_t end,
85     struct pglist *pageq)
86 {
87 	int i, npages, left, error;
88 	struct vm_page *pgs[FETCH_PAGECOUNT];
89 	voff_t offset = start;
90 
91 	left = (end - start) >> PAGE_SHIFT;
92 
93 	while (left) {
94 
95 		npages = MIN(FETCH_PAGECOUNT, left);
96 
97 		/* Get the pages */
98 		memset(pgs, 0, sizeof(pgs));
99 		error = (*uobj->pgops->pgo_get)(uobj, offset, pgs, &npages, 0,
100 			PROT_READ | PROT_WRITE, MADV_SEQUENTIAL,
101 			PGO_ALLPAGES | PGO_SYNCIO);
102 
103 		if (error)
104 			goto error;
105 
106 		for (i = 0; i < npages; i++) {
107 
108 			KASSERT(pgs[i] != NULL);
109 			KASSERT(!(pgs[i]->pg_flags & PG_RELEASED));
110 
111 			if (pgs[i]->pg_flags & PQ_AOBJ) {
112 				atomic_clearbits_int(&pgs[i]->pg_flags,
113 				    PG_CLEAN);
114 				uao_dropswap(uobj, i);
115 			}
116 		}
117 
118 		/* Wire the pages */
119 		uvm_lock_pageq();
120 		for (i = 0; i < npages; i++) {
121 			uvm_pagewire(pgs[i]);
122 			if (pageq != NULL)
123 				TAILQ_INSERT_TAIL(pageq, pgs[i], pageq);
124 		}
125 		uvm_unlock_pageq();
126 
127 		/* Unbusy the pages */
128 		uvm_page_unbusy(pgs, npages);
129 
130 		left -= npages;
131 		offset += (voff_t)npages << PAGE_SHIFT;
132 	}
133 
134 	return 0;
135 
136 error:
137 	/* Unwire the pages which have been wired */
138 	uvm_obj_unwire(uobj, start, offset);
139 
140 	return error;
141 }
142 
143 /*
144  * uobj_unwirepages: unwire the pages of entire uobj
145  *
146  * => caller must pass page-aligned start and end values
147  */
148 
149 void
150 uvm_obj_unwire(struct uvm_object *uobj, voff_t start, voff_t end)
151 {
152 	struct vm_page *pg;
153 	off_t offset;
154 
155 	uvm_lock_pageq();
156 	for (offset = start; offset < end; offset += PAGE_SIZE) {
157 		pg = uvm_pagelookup(uobj, offset);
158 
159 		KASSERT(pg != NULL);
160 		KASSERT(!(pg->pg_flags & PG_RELEASED));
161 
162 		uvm_pageunwire(pg);
163 	}
164 	uvm_unlock_pageq();
165 }
166 #endif /* !SMALL_KERNEL */
167 
168 /*
169  * uvm_obj_free: free all pages in a uvm object, used by the buffer
170  * cache to free all pages attached to a buffer.
171  */
172 void
173 uvm_obj_free(struct uvm_object *uobj)
174 {
175 	struct vm_page *pg;
176 	struct pglist pgl;
177 
178 	KASSERT(UVM_OBJ_IS_BUFCACHE(uobj));
179 	KERNEL_ASSERT_LOCKED();
180 
181 	TAILQ_INIT(&pgl);
182  	/*
183 	 * Extract from rb tree in offset order. The phys addresses
184 	 * usually increase in that order, which is better for
185 	 * uvm_pmr_freepageq.
186  	 */
187 	RBT_FOREACH(pg, uvm_objtree, &uobj->memt) {
188 		/*
189 		 * clear PG_TABLED so we don't do work to remove
190 		 * this pg from the uobj we are throwing away
191 		 */
192 		atomic_clearbits_int(&pg->pg_flags, PG_TABLED);
193 		uvm_lock_pageq();
194 		uvm_pageclean(pg);
195 		uvm_unlock_pageq();
196 		TAILQ_INSERT_TAIL(&pgl, pg, pageq);
197  	}
198 	uvm_pmr_freepageq(&pgl);
199 }
200 
201