1 /* $NetBSD: uvm_page_status.c,v 1.4 2020/03/14 20:45:23 ad Exp $ */ 2 3 /*- 4 * Copyright (c)2011 YAMAMOTO Takashi, 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __KERNEL_RCSID(0, "$NetBSD: uvm_page_status.c,v 1.4 2020/03/14 20:45:23 ad Exp $"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 35 #include <uvm/uvm.h> 36 37 /* 38 * page dirtiness status tracking 39 * 40 * separated from uvm_page.c mainly for rump 41 */ 42 43 /* 44 * these constants are chosen to match so that we can convert between 45 * them quickly. 46 */ 47 48 __CTASSERT(UVM_PAGE_STATUS_UNKNOWN == 0); 49 __CTASSERT(UVM_PAGE_STATUS_DIRTY == PG_DIRTY); 50 __CTASSERT(UVM_PAGE_STATUS_CLEAN == PG_CLEAN); 51 52 /* 53 * uvm_pagegetdirty: return the dirtiness status (one of UVM_PAGE_STATUS_ 54 * values) of the page. 55 * 56 * called with the owner locked. 57 */ 58 59 unsigned int 60 uvm_pagegetdirty(struct vm_page *pg) 61 { 62 struct uvm_object * const uobj __diagused = pg->uobject; 63 const uint64_t idx __diagused = pg->offset >> PAGE_SHIFT; 64 65 KASSERT((~pg->flags & (PG_CLEAN|PG_DIRTY)) != 0); 66 KASSERT(uvm_page_owner_locked_p(pg, false)); 67 KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) == 68 !!radix_tree_get_tag(&uobj->uo_pages, idx, UVM_PAGE_DIRTY_TAG)); 69 return pg->flags & (PG_CLEAN|PG_DIRTY); 70 } 71 72 /* 73 * uvm_pagemarkdirty: set the dirtiness status (one of UVM_PAGE_STATUS_ values) 74 * of the page. 75 * 76 * called with the owner locked. 77 * 78 * update the radix tree tag for object-owned page. 79 * 80 * if new status is UVM_PAGE_STATUS_UNKNOWN, clear pmap-level dirty bit 81 * so that later uvm_pagecheckdirty() can notice modifications on the page. 82 */ 83 84 void 85 uvm_pagemarkdirty(struct vm_page *pg, unsigned int newstatus) 86 { 87 struct uvm_object * const uobj = pg->uobject; 88 const uint64_t idx = pg->offset >> PAGE_SHIFT; 89 const unsigned int oldstatus = uvm_pagegetdirty(pg); 90 enum cpu_count base; 91 92 KASSERT((~newstatus & (PG_CLEAN|PG_DIRTY)) != 0); 93 KASSERT((newstatus & ~(PG_CLEAN|PG_DIRTY)) == 0); 94 KASSERT(uvm_page_owner_locked_p(pg, true)); 95 KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) == 96 !!radix_tree_get_tag(&uobj->uo_pages, idx, UVM_PAGE_DIRTY_TAG)); 97 98 if (oldstatus == newstatus) { 99 return; 100 } 101 102 /* 103 * set UVM_PAGE_DIRTY_TAG tag unless known CLEAN so that putpages can 104 * find possibly-dirty pages quickly. 105 */ 106 107 if (uobj != NULL) { 108 if (newstatus == UVM_PAGE_STATUS_CLEAN) { 109 radix_tree_clear_tag(&uobj->uo_pages, idx, 110 UVM_PAGE_DIRTY_TAG); 111 } else { 112 /* 113 * on first dirty page, mark the object dirty. 114 * for vnodes this inserts to the syncer worklist. 115 */ 116 if (radix_tree_empty_tagged_tree_p(&uobj->uo_pages, 117 UVM_PAGE_DIRTY_TAG) && 118 uobj->pgops->pgo_markdirty != NULL) { 119 (*uobj->pgops->pgo_markdirty)(uobj); 120 } 121 radix_tree_set_tag(&uobj->uo_pages, idx, 122 UVM_PAGE_DIRTY_TAG); 123 } 124 } 125 if (newstatus == UVM_PAGE_STATUS_UNKNOWN) { 126 /* 127 * start relying on pmap-level dirtiness tracking. 128 */ 129 pmap_clear_modify(pg); 130 } 131 pg->flags &= ~(PG_CLEAN|PG_DIRTY); 132 pg->flags |= newstatus; 133 KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) == 134 !!radix_tree_get_tag(&uobj->uo_pages, idx, UVM_PAGE_DIRTY_TAG)); 135 if ((pg->flags & PG_STAT) != 0) { 136 if ((pg->flags & PG_SWAPBACKED) != 0) { 137 base = CPU_COUNT_ANONUNKNOWN; 138 } else { 139 base = CPU_COUNT_FILEUNKNOWN; 140 } 141 kpreempt_disable(); 142 CPU_COUNT(base + oldstatus, -1); 143 CPU_COUNT(base + newstatus, +1); 144 kpreempt_enable(); 145 } 146 } 147 148 /* 149 * uvm_pagecheckdirty: check if page is dirty, and remove its dirty bit. 150 * 151 * called with the owner locked. 152 * 153 * returns if the page was dirty. 154 * 155 * if protected is true, mark the page CLEAN. otherwise, mark the page UNKNOWN. 156 * ("mark" in the sense of uvm_pagemarkdirty().) 157 */ 158 159 bool 160 uvm_pagecheckdirty(struct vm_page *pg, bool pgprotected) 161 { 162 const unsigned int oldstatus = uvm_pagegetdirty(pg); 163 bool modified; 164 165 KASSERT(uvm_page_owner_locked_p(pg, true)); 166 167 /* 168 * if pgprotected is true, mark the page CLEAN. 169 * otherwise mark the page UNKNOWN unless it's CLEAN. 170 * 171 * possible transitions: 172 * 173 * CLEAN -> CLEAN , modified = false 174 * UNKNOWN -> UNKNOWN, modified = true 175 * UNKNOWN -> UNKNOWN, modified = false 176 * UNKNOWN -> CLEAN , modified = true 177 * UNKNOWN -> CLEAN , modified = false 178 * DIRTY -> UNKNOWN, modified = true 179 * DIRTY -> CLEAN , modified = true 180 * 181 * pmap_clear_modify is necessary if either of 182 * oldstatus or newstatus is UVM_PAGE_STATUS_UNKNOWN. 183 */ 184 185 if (oldstatus == UVM_PAGE_STATUS_CLEAN) { 186 modified = false; 187 } else { 188 const unsigned int newstatus = pgprotected ? 189 UVM_PAGE_STATUS_CLEAN : UVM_PAGE_STATUS_UNKNOWN; 190 191 if (oldstatus == UVM_PAGE_STATUS_DIRTY) { 192 modified = true; 193 if (newstatus == UVM_PAGE_STATUS_UNKNOWN) { 194 pmap_clear_modify(pg); 195 } 196 } else { 197 KASSERT(oldstatus == UVM_PAGE_STATUS_UNKNOWN); 198 modified = pmap_clear_modify(pg); 199 } 200 uvm_pagemarkdirty(pg, newstatus); 201 } 202 return modified; 203 } 204