1*34c50303Skn /* $OpenBSD: uvm_object.c,v 1.25 2022/02/21 16:08:36 kn Exp $ */
22fa8d88bSoga
32fa8d88bSoga /*
469c04514Smpi * Copyright (c) 2006, 2010, 2019 The NetBSD Foundation, Inc.
52fa8d88bSoga * All rights reserved.
62fa8d88bSoga *
72fa8d88bSoga * This code is derived from software contributed to The NetBSD Foundation
82fa8d88bSoga * by Mindaugas Rasiukevicius.
92fa8d88bSoga *
102fa8d88bSoga * Redistribution and use in source and binary forms, with or without
112fa8d88bSoga * modification, are permitted provided that the following conditions
122fa8d88bSoga * are met:
132fa8d88bSoga * 1. Redistributions of source code must retain the above copyright
142fa8d88bSoga * notice, this list of conditions and the following disclaimer.
152fa8d88bSoga * 2. Redistributions in binary form must reproduce the above copyright
162fa8d88bSoga * notice, this list of conditions and the following disclaimer in the
172fa8d88bSoga * documentation and/or other materials provided with the distribution.
182fa8d88bSoga *
192fa8d88bSoga * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
202fa8d88bSoga * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
212fa8d88bSoga * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
222fa8d88bSoga * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
232fa8d88bSoga * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
242fa8d88bSoga * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
252fa8d88bSoga * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
262fa8d88bSoga * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
272fa8d88bSoga * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
282fa8d88bSoga * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
292fa8d88bSoga * POSSIBILITY OF SUCH DAMAGE.
302fa8d88bSoga */
312fa8d88bSoga
322fa8d88bSoga /*
332fa8d88bSoga * uvm_object.c: operate with memory objects
342fa8d88bSoga *
352fa8d88bSoga */
362fa8d88bSoga
372fa8d88bSoga #include <sys/param.h>
38af074ab7Smpi #include <sys/systm.h>
391e8cdc2eSderaadt #include <sys/mman.h>
4003d1830dStedu #include <sys/atomic.h>
4169c04514Smpi #include <sys/rwlock.h>
422fa8d88bSoga
432fa8d88bSoga #include <uvm/uvm.h>
442fa8d88bSoga
4594151407Smpi /* Dummy object used by some pmaps for sanity checks. */
4694151407Smpi const struct uvm_pagerops pmap_pager = {
4794151407Smpi /* nothing */
4894151407Smpi };
4994151407Smpi
5094151407Smpi /* Dummy object used by the buffer cache for sanity checks. */
5194151407Smpi const struct uvm_pagerops bufcache_pager = {
5294151407Smpi /* nothing */
5394151407Smpi };
5494151407Smpi
5569c04514Smpi /* Page count to fetch per single step. */
562fa8d88bSoga #define FETCH_PAGECOUNT 16
572fa8d88bSoga
582fa8d88bSoga /*
5969c04514Smpi * uvm_obj_init: initialize UVM memory object.
60ca0f98feSoga */
61ca0f98feSoga void
uvm_obj_init(struct uvm_object * uobj,const struct uvm_pagerops * pgops,int refs)62da3d0110Smpi uvm_obj_init(struct uvm_object *uobj, const struct uvm_pagerops *pgops, int refs)
63ca0f98feSoga {
6469c04514Smpi int alock;
6569c04514Smpi
6669c04514Smpi alock = ((pgops != NULL) && (pgops != &pmap_pager) &&
6769c04514Smpi (pgops != &bufcache_pager) && (refs != UVM_OBJ_KERN));
6869c04514Smpi
6969c04514Smpi if (alock) {
7069c04514Smpi /* Allocate and assign a lock. */
7169c04514Smpi rw_obj_alloc(&uobj->vmobjlock, "uobjlk");
7269c04514Smpi } else {
7369c04514Smpi /* The lock will need to be set via uvm_obj_setlock(). */
7469c04514Smpi uobj->vmobjlock = NULL;
7569c04514Smpi }
76ca0f98feSoga uobj->pgops = pgops;
77262a556aSdlg RBT_INIT(uvm_objtree, &uobj->memt);
78ca0f98feSoga uobj->uo_npages = 0;
79ca0f98feSoga uobj->uo_refs = refs;
80ca0f98feSoga }
81ca0f98feSoga
822c850ee8Smpi /*
832c850ee8Smpi * uvm_obj_destroy: destroy UVM memory object.
842c850ee8Smpi */
853c82a206Skettenis void
uvm_obj_destroy(struct uvm_object * uo)863c82a206Skettenis uvm_obj_destroy(struct uvm_object *uo)
873c82a206Skettenis {
882c850ee8Smpi KASSERT(RBT_EMPTY(uvm_objtree, &uo->memt));
8969c04514Smpi
9069c04514Smpi rw_obj_free(uo->vmobjlock);
9169c04514Smpi }
9269c04514Smpi
9369c04514Smpi /*
9469c04514Smpi * uvm_obj_setlock: assign a vmobjlock to the UVM object.
9569c04514Smpi *
9669c04514Smpi * => Caller is responsible to ensure that UVM objects is not use.
9769c04514Smpi * => Only dynamic lock may be previously set. We drop the reference then.
9869c04514Smpi */
9969c04514Smpi void
uvm_obj_setlock(struct uvm_object * uo,struct rwlock * lockptr)10069c04514Smpi uvm_obj_setlock(struct uvm_object *uo, struct rwlock *lockptr)
10169c04514Smpi {
10269c04514Smpi struct rwlock *olockptr = uo->vmobjlock;
10369c04514Smpi
10469c04514Smpi if (olockptr) {
10569c04514Smpi /* Drop the reference on the old lock. */
10669c04514Smpi rw_obj_free(olockptr);
10769c04514Smpi }
10869c04514Smpi if (lockptr == NULL) {
10969c04514Smpi /* If new lock is not passed - allocate default one. */
11069c04514Smpi rw_obj_alloc(&lockptr, "uobjlk");
11169c04514Smpi }
11269c04514Smpi uo->vmobjlock = lockptr;
1133c82a206Skettenis }
1143c82a206Skettenis
115e6250aa7Soga #ifndef SMALL_KERNEL
116ca0f98feSoga /*
11769c04514Smpi * uvm_obj_wire: wire the pages of entire UVM object.
1182fa8d88bSoga *
11969c04514Smpi * => NOTE: this function should only be used for types of objects
12069c04514Smpi * where PG_RELEASED flag is never set (aobj objects)
1212fa8d88bSoga * => caller must pass page-aligned start and end values
1222fa8d88bSoga * => if the caller passes in a pageq pointer, we'll return a list of
1232fa8d88bSoga * wired pages.
1242fa8d88bSoga */
1252fa8d88bSoga
1262fa8d88bSoga int
uvm_obj_wire(struct uvm_object * uobj,voff_t start,voff_t end,struct pglist * pageq)127da3d0110Smpi uvm_obj_wire(struct uvm_object *uobj, voff_t start, voff_t end,
1282fa8d88bSoga struct pglist *pageq)
1292fa8d88bSoga {
13036d5d901Skettenis int i, npages, left, error;
1312fa8d88bSoga struct vm_page *pgs[FETCH_PAGECOUNT];
13236d5d901Skettenis voff_t offset = start;
1332fa8d88bSoga
1342fa8d88bSoga left = (end - start) >> PAGE_SHIFT;
1352fa8d88bSoga
136*34c50303Skn rw_enter(uobj->vmobjlock, RW_WRITE | RW_DUPOK);
1372fa8d88bSoga while (left) {
1382fa8d88bSoga
1392fa8d88bSoga npages = MIN(FETCH_PAGECOUNT, left);
1402fa8d88bSoga
1412fa8d88bSoga /* Get the pages */
1422fa8d88bSoga memset(pgs, 0, sizeof(pgs));
1432fa8d88bSoga error = (*uobj->pgops->pgo_get)(uobj, offset, pgs, &npages, 0,
14415cd8707Sguenther PROT_READ | PROT_WRITE, MADV_SEQUENTIAL,
1452fa8d88bSoga PGO_ALLPAGES | PGO_SYNCIO);
1462fa8d88bSoga
1472fa8d88bSoga if (error)
1482fa8d88bSoga goto error;
1492fa8d88bSoga
150*34c50303Skn rw_enter(uobj->vmobjlock, RW_WRITE | RW_DUPOK);
1512fa8d88bSoga for (i = 0; i < npages; i++) {
1522fa8d88bSoga
1532fa8d88bSoga KASSERT(pgs[i] != NULL);
1542fa8d88bSoga KASSERT(!(pgs[i]->pg_flags & PG_RELEASED));
1552fa8d88bSoga
1562fa8d88bSoga if (pgs[i]->pg_flags & PQ_AOBJ) {
1572fa8d88bSoga atomic_clearbits_int(&pgs[i]->pg_flags,
1582fa8d88bSoga PG_CLEAN);
1592fa8d88bSoga uao_dropswap(uobj, i);
1602fa8d88bSoga }
1612fa8d88bSoga }
1622fa8d88bSoga
1632fa8d88bSoga /* Wire the pages */
1642fa8d88bSoga uvm_lock_pageq();
1652fa8d88bSoga for (i = 0; i < npages; i++) {
1662fa8d88bSoga uvm_pagewire(pgs[i]);
1672fa8d88bSoga if (pageq != NULL)
1682fa8d88bSoga TAILQ_INSERT_TAIL(pageq, pgs[i], pageq);
1692fa8d88bSoga }
1702fa8d88bSoga uvm_unlock_pageq();
1712fa8d88bSoga
1722fa8d88bSoga /* Unbusy the pages */
1732fa8d88bSoga uvm_page_unbusy(pgs, npages);
1742fa8d88bSoga
1752fa8d88bSoga left -= npages;
17636d5d901Skettenis offset += (voff_t)npages << PAGE_SHIFT;
1772fa8d88bSoga }
17869c04514Smpi rw_exit(uobj->vmobjlock);
1792fa8d88bSoga
1802fa8d88bSoga return 0;
1812fa8d88bSoga
1822fa8d88bSoga error:
1832fa8d88bSoga /* Unwire the pages which have been wired */
184da3d0110Smpi uvm_obj_unwire(uobj, start, offset);
1852fa8d88bSoga
1862fa8d88bSoga return error;
1872fa8d88bSoga }
1882fa8d88bSoga
1892fa8d88bSoga /*
19069c04514Smpi * uvm_obj_unwire: unwire the pages of entire UVM object.
1912fa8d88bSoga *
1922fa8d88bSoga * => caller must pass page-aligned start and end values
1932fa8d88bSoga */
1942fa8d88bSoga void
uvm_obj_unwire(struct uvm_object * uobj,voff_t start,voff_t end)195da3d0110Smpi uvm_obj_unwire(struct uvm_object *uobj, voff_t start, voff_t end)
1962fa8d88bSoga {
1972fa8d88bSoga struct vm_page *pg;
1982fa8d88bSoga off_t offset;
1992fa8d88bSoga
200*34c50303Skn rw_enter(uobj->vmobjlock, RW_WRITE | RW_DUPOK);
2012fa8d88bSoga uvm_lock_pageq();
2022fa8d88bSoga for (offset = start; offset < end; offset += PAGE_SIZE) {
2032fa8d88bSoga pg = uvm_pagelookup(uobj, offset);
2042fa8d88bSoga
2052fa8d88bSoga KASSERT(pg != NULL);
2062fa8d88bSoga KASSERT(!(pg->pg_flags & PG_RELEASED));
2072fa8d88bSoga
2082fa8d88bSoga uvm_pageunwire(pg);
2092fa8d88bSoga }
2102fa8d88bSoga uvm_unlock_pageq();
21169c04514Smpi rw_exit(uobj->vmobjlock);
2122fa8d88bSoga }
213e6250aa7Soga #endif /* !SMALL_KERNEL */
214e91f82a5Sbeck
215e91f82a5Sbeck /*
216da3d0110Smpi * uvm_obj_free: free all pages in a uvm object, used by the buffer
217e91f82a5Sbeck * cache to free all pages attached to a buffer.
218e91f82a5Sbeck */
219e91f82a5Sbeck void
uvm_obj_free(struct uvm_object * uobj)220da3d0110Smpi uvm_obj_free(struct uvm_object *uobj)
221e91f82a5Sbeck {
222e91f82a5Sbeck struct vm_page *pg;
223e91f82a5Sbeck struct pglist pgl;
224e91f82a5Sbeck
22594151407Smpi KASSERT(UVM_OBJ_IS_BUFCACHE(uobj));
22694151407Smpi KERNEL_ASSERT_LOCKED();
22794151407Smpi
228e91f82a5Sbeck TAILQ_INIT(&pgl);
229e91f82a5Sbeck /*
230e91f82a5Sbeck * Extract from rb tree in offset order. The phys addresses
231e91f82a5Sbeck * usually increase in that order, which is better for
2327f144f4cSmpi * uvm_pglistfree().
233e91f82a5Sbeck */
234e91f82a5Sbeck RBT_FOREACH(pg, uvm_objtree, &uobj->memt) {
235e91f82a5Sbeck /*
236e91f82a5Sbeck * clear PG_TABLED so we don't do work to remove
237e91f82a5Sbeck * this pg from the uobj we are throwing away
238e91f82a5Sbeck */
239e91f82a5Sbeck atomic_clearbits_int(&pg->pg_flags, PG_TABLED);
240d21d8ab4Smpi uvm_lock_pageq();
241e91f82a5Sbeck uvm_pageclean(pg);
242d21d8ab4Smpi uvm_unlock_pageq();
243e91f82a5Sbeck TAILQ_INSERT_TAIL(&pgl, pg, pageq);
244e91f82a5Sbeck }
2457f144f4cSmpi uvm_pglistfree(&pgl);
246e91f82a5Sbeck }
247e91f82a5Sbeck
248