1 1.6 chs /* $NetBSD: uvm_page_status.c,v 1.6 2020/08/14 09:06:15 chs Exp $ */ 2 1.2 ad 3 1.2 ad /*- 4 1.2 ad * Copyright (c)2011 YAMAMOTO Takashi, 5 1.2 ad * All rights reserved. 6 1.2 ad * 7 1.2 ad * Redistribution and use in source and binary forms, with or without 8 1.2 ad * modification, are permitted provided that the following conditions 9 1.2 ad * are met: 10 1.2 ad * 1. Redistributions of source code must retain the above copyright 11 1.2 ad * notice, this list of conditions and the following disclaimer. 12 1.2 ad * 2. Redistributions in binary form must reproduce the above copyright 13 1.2 ad * notice, this list of conditions and the following disclaimer in the 14 1.2 ad * documentation and/or other materials provided with the distribution. 15 1.2 ad * 16 1.2 ad * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 1.2 ad * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 1.2 ad * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 1.2 ad * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 1.2 ad * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 1.2 ad * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 1.2 ad * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 1.2 ad * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 1.2 ad * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 1.2 ad * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 1.2 ad * SUCH DAMAGE. 27 1.2 ad */ 28 1.2 ad 29 1.2 ad #include <sys/cdefs.h> 30 1.6 chs __KERNEL_RCSID(0, "$NetBSD: uvm_page_status.c,v 1.6 2020/08/14 09:06:15 chs Exp $"); 31 1.2 ad 32 1.2 ad #include <sys/param.h> 33 1.2 ad #include <sys/systm.h> 34 1.2 ad 35 1.2 ad #include <uvm/uvm.h> 36 1.2 ad 37 1.2 ad /* 38 1.2 ad * page dirtiness status tracking 39 1.2 ad * 40 1.2 ad * separated from uvm_page.c mainly for rump 41 1.2 ad */ 42 1.2 ad 43 1.2 ad /* 44 1.2 ad * these constants are chosen to match so that we can convert between 45 1.2 ad * them quickly. 46 1.2 ad */ 47 1.2 ad 48 1.2 ad __CTASSERT(UVM_PAGE_STATUS_UNKNOWN == 0); 49 1.2 ad __CTASSERT(UVM_PAGE_STATUS_DIRTY == PG_DIRTY); 50 1.2 ad __CTASSERT(UVM_PAGE_STATUS_CLEAN == PG_CLEAN); 51 1.2 ad 52 1.2 ad /* 53 1.2 ad * uvm_pagegetdirty: return the dirtiness status (one of UVM_PAGE_STATUS_ 54 1.2 ad * values) of the page. 55 1.2 ad * 56 1.2 ad * called with the owner locked. 57 1.2 ad */ 58 1.2 ad 59 1.2 ad unsigned int 60 1.2 ad uvm_pagegetdirty(struct vm_page *pg) 61 1.2 ad { 62 1.2 ad struct uvm_object * const uobj __diagused = pg->uobject; 63 1.2 ad 64 1.2 ad KASSERT((~pg->flags & (PG_CLEAN|PG_DIRTY)) != 0); 65 1.3 ad KASSERT(uvm_page_owner_locked_p(pg, false)); 66 1.2 ad KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) == 67 1.6 chs uvm_obj_page_dirty_p(pg)); 68 1.2 ad return pg->flags & (PG_CLEAN|PG_DIRTY); 69 1.2 ad } 70 1.2 ad 71 1.2 ad /* 72 1.2 ad * uvm_pagemarkdirty: set the dirtiness status (one of UVM_PAGE_STATUS_ values) 73 1.2 ad * of the page. 74 1.2 ad * 75 1.2 ad * called with the owner locked. 76 1.2 ad * 77 1.2 ad * update the radix tree tag for object-owned page. 78 1.2 ad * 79 1.2 ad * if new status is UVM_PAGE_STATUS_UNKNOWN, clear pmap-level dirty bit 80 1.2 ad * so that later uvm_pagecheckdirty() can notice modifications on the page. 81 1.2 ad */ 82 1.2 ad 83 1.2 ad void 84 1.2 ad uvm_pagemarkdirty(struct vm_page *pg, unsigned int newstatus) 85 1.2 ad { 86 1.2 ad struct uvm_object * const uobj = pg->uobject; 87 1.2 ad const unsigned int oldstatus = uvm_pagegetdirty(pg); 88 1.2 ad enum cpu_count base; 89 1.2 ad 90 1.2 ad KASSERT((~newstatus & (PG_CLEAN|PG_DIRTY)) != 0); 91 1.2 ad KASSERT((newstatus & ~(PG_CLEAN|PG_DIRTY)) == 0); 92 1.3 ad KASSERT(uvm_page_owner_locked_p(pg, true)); 93 1.2 ad KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) == 94 1.6 chs uvm_obj_page_dirty_p(pg)); 95 1.2 ad 96 1.2 ad if (oldstatus == newstatus) { 97 1.2 ad return; 98 1.2 ad } 99 1.2 ad 100 1.2 ad /* 101 1.2 ad * set UVM_PAGE_DIRTY_TAG tag unless known CLEAN so that putpages can 102 1.2 ad * find possibly-dirty pages quickly. 103 1.2 ad */ 104 1.2 ad 105 1.2 ad if (uobj != NULL) { 106 1.2 ad if (newstatus == UVM_PAGE_STATUS_CLEAN) { 107 1.6 chs uvm_obj_page_clear_dirty(pg); 108 1.5 ad } else if (oldstatus == UVM_PAGE_STATUS_CLEAN) { 109 1.4 ad /* 110 1.4 ad * on first dirty page, mark the object dirty. 111 1.4 ad * for vnodes this inserts to the syncer worklist. 112 1.4 ad */ 113 1.6 chs if (uvm_obj_clean_p(uobj) && 114 1.4 ad uobj->pgops->pgo_markdirty != NULL) { 115 1.4 ad (*uobj->pgops->pgo_markdirty)(uobj); 116 1.4 ad } 117 1.6 chs uvm_obj_page_set_dirty(pg); 118 1.2 ad } 119 1.2 ad } 120 1.2 ad if (newstatus == UVM_PAGE_STATUS_UNKNOWN) { 121 1.2 ad /* 122 1.2 ad * start relying on pmap-level dirtiness tracking. 123 1.2 ad */ 124 1.2 ad pmap_clear_modify(pg); 125 1.2 ad } 126 1.2 ad pg->flags &= ~(PG_CLEAN|PG_DIRTY); 127 1.2 ad pg->flags |= newstatus; 128 1.2 ad KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) == 129 1.6 chs uvm_obj_page_dirty_p(pg)); 130 1.2 ad if ((pg->flags & PG_STAT) != 0) { 131 1.2 ad if ((pg->flags & PG_SWAPBACKED) != 0) { 132 1.2 ad base = CPU_COUNT_ANONUNKNOWN; 133 1.2 ad } else { 134 1.2 ad base = CPU_COUNT_FILEUNKNOWN; 135 1.2 ad } 136 1.2 ad kpreempt_disable(); 137 1.2 ad CPU_COUNT(base + oldstatus, -1); 138 1.2 ad CPU_COUNT(base + newstatus, +1); 139 1.2 ad kpreempt_enable(); 140 1.2 ad } 141 1.2 ad } 142 1.2 ad 143 1.2 ad /* 144 1.2 ad * uvm_pagecheckdirty: check if page is dirty, and remove its dirty bit. 145 1.2 ad * 146 1.2 ad * called with the owner locked. 147 1.2 ad * 148 1.2 ad * returns if the page was dirty. 149 1.2 ad * 150 1.2 ad * if protected is true, mark the page CLEAN. otherwise, mark the page UNKNOWN. 151 1.2 ad * ("mark" in the sense of uvm_pagemarkdirty().) 152 1.2 ad */ 153 1.2 ad 154 1.2 ad bool 155 1.2 ad uvm_pagecheckdirty(struct vm_page *pg, bool pgprotected) 156 1.2 ad { 157 1.2 ad const unsigned int oldstatus = uvm_pagegetdirty(pg); 158 1.2 ad bool modified; 159 1.2 ad 160 1.3 ad KASSERT(uvm_page_owner_locked_p(pg, true)); 161 1.2 ad 162 1.2 ad /* 163 1.2 ad * if pgprotected is true, mark the page CLEAN. 164 1.2 ad * otherwise mark the page UNKNOWN unless it's CLEAN. 165 1.2 ad * 166 1.2 ad * possible transitions: 167 1.2 ad * 168 1.2 ad * CLEAN -> CLEAN , modified = false 169 1.2 ad * UNKNOWN -> UNKNOWN, modified = true 170 1.2 ad * UNKNOWN -> UNKNOWN, modified = false 171 1.2 ad * UNKNOWN -> CLEAN , modified = true 172 1.2 ad * UNKNOWN -> CLEAN , modified = false 173 1.2 ad * DIRTY -> UNKNOWN, modified = true 174 1.2 ad * DIRTY -> CLEAN , modified = true 175 1.2 ad * 176 1.2 ad * pmap_clear_modify is necessary if either of 177 1.2 ad * oldstatus or newstatus is UVM_PAGE_STATUS_UNKNOWN. 178 1.2 ad */ 179 1.2 ad 180 1.2 ad if (oldstatus == UVM_PAGE_STATUS_CLEAN) { 181 1.2 ad modified = false; 182 1.2 ad } else { 183 1.2 ad const unsigned int newstatus = pgprotected ? 184 1.2 ad UVM_PAGE_STATUS_CLEAN : UVM_PAGE_STATUS_UNKNOWN; 185 1.2 ad 186 1.2 ad if (oldstatus == UVM_PAGE_STATUS_DIRTY) { 187 1.2 ad modified = true; 188 1.2 ad if (newstatus == UVM_PAGE_STATUS_UNKNOWN) { 189 1.2 ad pmap_clear_modify(pg); 190 1.2 ad } 191 1.2 ad } else { 192 1.2 ad KASSERT(oldstatus == UVM_PAGE_STATUS_UNKNOWN); 193 1.2 ad modified = pmap_clear_modify(pg); 194 1.2 ad } 195 1.2 ad uvm_pagemarkdirty(pg, newstatus); 196 1.2 ad } 197 1.2 ad return modified; 198 1.2 ad } 199