1*19303cecSchs /* $NetBSD: uvm_page_status.c,v 1.6 2020/08/14 09:06:15 chs Exp $ */
205a3457eSad
305a3457eSad /*-
405a3457eSad * Copyright (c)2011 YAMAMOTO Takashi,
505a3457eSad * All rights reserved.
605a3457eSad *
705a3457eSad * Redistribution and use in source and binary forms, with or without
805a3457eSad * modification, are permitted provided that the following conditions
905a3457eSad * are met:
1005a3457eSad * 1. Redistributions of source code must retain the above copyright
1105a3457eSad * notice, this list of conditions and the following disclaimer.
1205a3457eSad * 2. Redistributions in binary form must reproduce the above copyright
1305a3457eSad * notice, this list of conditions and the following disclaimer in the
1405a3457eSad * documentation and/or other materials provided with the distribution.
1505a3457eSad *
1605a3457eSad * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1705a3457eSad * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1805a3457eSad * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1905a3457eSad * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
2005a3457eSad * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2105a3457eSad * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2205a3457eSad * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2305a3457eSad * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2405a3457eSad * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2505a3457eSad * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2605a3457eSad * SUCH DAMAGE.
2705a3457eSad */
2805a3457eSad
2905a3457eSad #include <sys/cdefs.h>
30*19303cecSchs __KERNEL_RCSID(0, "$NetBSD: uvm_page_status.c,v 1.6 2020/08/14 09:06:15 chs Exp $");
3105a3457eSad
3205a3457eSad #include <sys/param.h>
3305a3457eSad #include <sys/systm.h>
3405a3457eSad
3505a3457eSad #include <uvm/uvm.h>
3605a3457eSad
3705a3457eSad /*
3805a3457eSad * page dirtiness status tracking
3905a3457eSad *
4005a3457eSad * separated from uvm_page.c mainly for rump
4105a3457eSad */
4205a3457eSad
4305a3457eSad /*
4405a3457eSad * these constants are chosen to match so that we can convert between
4505a3457eSad * them quickly.
4605a3457eSad */
4705a3457eSad
4805a3457eSad __CTASSERT(UVM_PAGE_STATUS_UNKNOWN == 0);
4905a3457eSad __CTASSERT(UVM_PAGE_STATUS_DIRTY == PG_DIRTY);
5005a3457eSad __CTASSERT(UVM_PAGE_STATUS_CLEAN == PG_CLEAN);
5105a3457eSad
5205a3457eSad /*
5305a3457eSad * uvm_pagegetdirty: return the dirtiness status (one of UVM_PAGE_STATUS_
5405a3457eSad * values) of the page.
5505a3457eSad *
5605a3457eSad * called with the owner locked.
5705a3457eSad */
5805a3457eSad
5905a3457eSad unsigned int
uvm_pagegetdirty(struct vm_page * pg)6005a3457eSad uvm_pagegetdirty(struct vm_page *pg)
6105a3457eSad {
6205a3457eSad struct uvm_object * const uobj __diagused = pg->uobject;
6305a3457eSad
6405a3457eSad KASSERT((~pg->flags & (PG_CLEAN|PG_DIRTY)) != 0);
65d2a0ebb6Sad KASSERT(uvm_page_owner_locked_p(pg, false));
6605a3457eSad KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) ==
67*19303cecSchs uvm_obj_page_dirty_p(pg));
6805a3457eSad return pg->flags & (PG_CLEAN|PG_DIRTY);
6905a3457eSad }
7005a3457eSad
7105a3457eSad /*
7205a3457eSad * uvm_pagemarkdirty: set the dirtiness status (one of UVM_PAGE_STATUS_ values)
7305a3457eSad * of the page.
7405a3457eSad *
7505a3457eSad * called with the owner locked.
7605a3457eSad *
7705a3457eSad * update the radix tree tag for object-owned page.
7805a3457eSad *
7905a3457eSad * if new status is UVM_PAGE_STATUS_UNKNOWN, clear pmap-level dirty bit
8005a3457eSad * so that later uvm_pagecheckdirty() can notice modifications on the page.
8105a3457eSad */
8205a3457eSad
8305a3457eSad void
uvm_pagemarkdirty(struct vm_page * pg,unsigned int newstatus)8405a3457eSad uvm_pagemarkdirty(struct vm_page *pg, unsigned int newstatus)
8505a3457eSad {
8605a3457eSad struct uvm_object * const uobj = pg->uobject;
8705a3457eSad const unsigned int oldstatus = uvm_pagegetdirty(pg);
8805a3457eSad enum cpu_count base;
8905a3457eSad
9005a3457eSad KASSERT((~newstatus & (PG_CLEAN|PG_DIRTY)) != 0);
9105a3457eSad KASSERT((newstatus & ~(PG_CLEAN|PG_DIRTY)) == 0);
92d2a0ebb6Sad KASSERT(uvm_page_owner_locked_p(pg, true));
9305a3457eSad KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) ==
94*19303cecSchs uvm_obj_page_dirty_p(pg));
9505a3457eSad
9605a3457eSad if (oldstatus == newstatus) {
9705a3457eSad return;
9805a3457eSad }
9905a3457eSad
10005a3457eSad /*
10105a3457eSad * set UVM_PAGE_DIRTY_TAG tag unless known CLEAN so that putpages can
10205a3457eSad * find possibly-dirty pages quickly.
10305a3457eSad */
10405a3457eSad
10505a3457eSad if (uobj != NULL) {
10605a3457eSad if (newstatus == UVM_PAGE_STATUS_CLEAN) {
107*19303cecSchs uvm_obj_page_clear_dirty(pg);
108939a94f3Sad } else if (oldstatus == UVM_PAGE_STATUS_CLEAN) {
109da3ef92bSad /*
110da3ef92bSad * on first dirty page, mark the object dirty.
111da3ef92bSad * for vnodes this inserts to the syncer worklist.
112da3ef92bSad */
113*19303cecSchs if (uvm_obj_clean_p(uobj) &&
114da3ef92bSad uobj->pgops->pgo_markdirty != NULL) {
115da3ef92bSad (*uobj->pgops->pgo_markdirty)(uobj);
116da3ef92bSad }
117*19303cecSchs uvm_obj_page_set_dirty(pg);
11805a3457eSad }
11905a3457eSad }
12005a3457eSad if (newstatus == UVM_PAGE_STATUS_UNKNOWN) {
12105a3457eSad /*
12205a3457eSad * start relying on pmap-level dirtiness tracking.
12305a3457eSad */
12405a3457eSad pmap_clear_modify(pg);
12505a3457eSad }
12605a3457eSad pg->flags &= ~(PG_CLEAN|PG_DIRTY);
12705a3457eSad pg->flags |= newstatus;
12805a3457eSad KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) ==
129*19303cecSchs uvm_obj_page_dirty_p(pg));
13005a3457eSad if ((pg->flags & PG_STAT) != 0) {
13105a3457eSad if ((pg->flags & PG_SWAPBACKED) != 0) {
13205a3457eSad base = CPU_COUNT_ANONUNKNOWN;
13305a3457eSad } else {
13405a3457eSad base = CPU_COUNT_FILEUNKNOWN;
13505a3457eSad }
13605a3457eSad kpreempt_disable();
13705a3457eSad CPU_COUNT(base + oldstatus, -1);
13805a3457eSad CPU_COUNT(base + newstatus, +1);
13905a3457eSad kpreempt_enable();
14005a3457eSad }
14105a3457eSad }
14205a3457eSad
14305a3457eSad /*
14405a3457eSad * uvm_pagecheckdirty: check if page is dirty, and remove its dirty bit.
14505a3457eSad *
14605a3457eSad * called with the owner locked.
14705a3457eSad *
14805a3457eSad * returns if the page was dirty.
14905a3457eSad *
15005a3457eSad * if protected is true, mark the page CLEAN. otherwise, mark the page UNKNOWN.
15105a3457eSad * ("mark" in the sense of uvm_pagemarkdirty().)
15205a3457eSad */
15305a3457eSad
15405a3457eSad bool
uvm_pagecheckdirty(struct vm_page * pg,bool pgprotected)15505a3457eSad uvm_pagecheckdirty(struct vm_page *pg, bool pgprotected)
15605a3457eSad {
15705a3457eSad const unsigned int oldstatus = uvm_pagegetdirty(pg);
15805a3457eSad bool modified;
15905a3457eSad
160d2a0ebb6Sad KASSERT(uvm_page_owner_locked_p(pg, true));
16105a3457eSad
16205a3457eSad /*
16305a3457eSad * if pgprotected is true, mark the page CLEAN.
16405a3457eSad * otherwise mark the page UNKNOWN unless it's CLEAN.
16505a3457eSad *
16605a3457eSad * possible transitions:
16705a3457eSad *
16805a3457eSad * CLEAN -> CLEAN , modified = false
16905a3457eSad * UNKNOWN -> UNKNOWN, modified = true
17005a3457eSad * UNKNOWN -> UNKNOWN, modified = false
17105a3457eSad * UNKNOWN -> CLEAN , modified = true
17205a3457eSad * UNKNOWN -> CLEAN , modified = false
17305a3457eSad * DIRTY -> UNKNOWN, modified = true
17405a3457eSad * DIRTY -> CLEAN , modified = true
17505a3457eSad *
17605a3457eSad * pmap_clear_modify is necessary if either of
17705a3457eSad * oldstatus or newstatus is UVM_PAGE_STATUS_UNKNOWN.
17805a3457eSad */
17905a3457eSad
18005a3457eSad if (oldstatus == UVM_PAGE_STATUS_CLEAN) {
18105a3457eSad modified = false;
18205a3457eSad } else {
18305a3457eSad const unsigned int newstatus = pgprotected ?
18405a3457eSad UVM_PAGE_STATUS_CLEAN : UVM_PAGE_STATUS_UNKNOWN;
18505a3457eSad
18605a3457eSad if (oldstatus == UVM_PAGE_STATUS_DIRTY) {
18705a3457eSad modified = true;
18805a3457eSad if (newstatus == UVM_PAGE_STATUS_UNKNOWN) {
18905a3457eSad pmap_clear_modify(pg);
19005a3457eSad }
19105a3457eSad } else {
19205a3457eSad KASSERT(oldstatus == UVM_PAGE_STATUS_UNKNOWN);
19305a3457eSad modified = pmap_clear_modify(pg);
19405a3457eSad }
19505a3457eSad uvm_pagemarkdirty(pg, newstatus);
19605a3457eSad }
19705a3457eSad return modified;
19805a3457eSad }
199