uvm_page_status.c revision 1.5 1 1.5 ad /* $NetBSD: uvm_page_status.c,v 1.5 2020/05/15 22:25:18 ad Exp $ */
2 1.2 ad
3 1.2 ad /*-
4 1.2 ad * Copyright (c)2011 YAMAMOTO Takashi,
5 1.2 ad * All rights reserved.
6 1.2 ad *
7 1.2 ad * Redistribution and use in source and binary forms, with or without
8 1.2 ad * modification, are permitted provided that the following conditions
9 1.2 ad * are met:
10 1.2 ad * 1. Redistributions of source code must retain the above copyright
11 1.2 ad * notice, this list of conditions and the following disclaimer.
12 1.2 ad * 2. Redistributions in binary form must reproduce the above copyright
13 1.2 ad * notice, this list of conditions and the following disclaimer in the
14 1.2 ad * documentation and/or other materials provided with the distribution.
15 1.2 ad *
16 1.2 ad * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 1.2 ad * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 1.2 ad * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 1.2 ad * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 1.2 ad * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 1.2 ad * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 1.2 ad * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 1.2 ad * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 1.2 ad * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 1.2 ad * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 1.2 ad * SUCH DAMAGE.
27 1.2 ad */
28 1.2 ad
29 1.2 ad #include <sys/cdefs.h>
30 1.5 ad __KERNEL_RCSID(0, "$NetBSD: uvm_page_status.c,v 1.5 2020/05/15 22:25:18 ad Exp $");
31 1.2 ad
32 1.2 ad #include <sys/param.h>
33 1.2 ad #include <sys/systm.h>
34 1.2 ad
35 1.2 ad #include <uvm/uvm.h>
36 1.2 ad
37 1.2 ad /*
38 1.2 ad * page dirtiness status tracking
39 1.2 ad *
40 1.2 ad * separated from uvm_page.c mainly for rump
41 1.2 ad */
42 1.2 ad
43 1.2 ad /*
44 1.2 ad * these constants are chosen to match so that we can convert between
45 1.2 ad * them quickly.
46 1.2 ad */
47 1.2 ad
48 1.2 ad __CTASSERT(UVM_PAGE_STATUS_UNKNOWN == 0);
49 1.2 ad __CTASSERT(UVM_PAGE_STATUS_DIRTY == PG_DIRTY);
50 1.2 ad __CTASSERT(UVM_PAGE_STATUS_CLEAN == PG_CLEAN);
51 1.2 ad
52 1.2 ad /*
53 1.2 ad * uvm_pagegetdirty: return the dirtiness status (one of UVM_PAGE_STATUS_
54 1.2 ad * values) of the page.
55 1.2 ad *
56 1.2 ad * called with the owner locked.
57 1.2 ad */
58 1.2 ad
59 1.2 ad unsigned int
60 1.2 ad uvm_pagegetdirty(struct vm_page *pg)
61 1.2 ad {
62 1.2 ad struct uvm_object * const uobj __diagused = pg->uobject;
63 1.2 ad const uint64_t idx __diagused = pg->offset >> PAGE_SHIFT;
64 1.2 ad
65 1.2 ad KASSERT((~pg->flags & (PG_CLEAN|PG_DIRTY)) != 0);
66 1.3 ad KASSERT(uvm_page_owner_locked_p(pg, false));
67 1.2 ad KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) ==
68 1.2 ad !!radix_tree_get_tag(&uobj->uo_pages, idx, UVM_PAGE_DIRTY_TAG));
69 1.2 ad return pg->flags & (PG_CLEAN|PG_DIRTY);
70 1.2 ad }
71 1.2 ad
72 1.2 ad /*
73 1.2 ad * uvm_pagemarkdirty: set the dirtiness status (one of UVM_PAGE_STATUS_ values)
74 1.2 ad * of the page.
75 1.2 ad *
76 1.2 ad * called with the owner locked.
77 1.2 ad *
78 1.2 ad * update the radix tree tag for object-owned page.
79 1.2 ad *
80 1.2 ad * if new status is UVM_PAGE_STATUS_UNKNOWN, clear pmap-level dirty bit
81 1.2 ad * so that later uvm_pagecheckdirty() can notice modifications on the page.
82 1.2 ad */
83 1.2 ad
84 1.2 ad void
85 1.2 ad uvm_pagemarkdirty(struct vm_page *pg, unsigned int newstatus)
86 1.2 ad {
87 1.2 ad struct uvm_object * const uobj = pg->uobject;
88 1.2 ad const uint64_t idx = pg->offset >> PAGE_SHIFT;
89 1.2 ad const unsigned int oldstatus = uvm_pagegetdirty(pg);
90 1.2 ad enum cpu_count base;
91 1.2 ad
92 1.2 ad KASSERT((~newstatus & (PG_CLEAN|PG_DIRTY)) != 0);
93 1.2 ad KASSERT((newstatus & ~(PG_CLEAN|PG_DIRTY)) == 0);
94 1.3 ad KASSERT(uvm_page_owner_locked_p(pg, true));
95 1.2 ad KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) ==
96 1.2 ad !!radix_tree_get_tag(&uobj->uo_pages, idx, UVM_PAGE_DIRTY_TAG));
97 1.2 ad
98 1.2 ad if (oldstatus == newstatus) {
99 1.2 ad return;
100 1.2 ad }
101 1.2 ad
102 1.2 ad /*
103 1.2 ad * set UVM_PAGE_DIRTY_TAG tag unless known CLEAN so that putpages can
104 1.2 ad * find possibly-dirty pages quickly.
105 1.2 ad */
106 1.2 ad
107 1.2 ad if (uobj != NULL) {
108 1.2 ad if (newstatus == UVM_PAGE_STATUS_CLEAN) {
109 1.2 ad radix_tree_clear_tag(&uobj->uo_pages, idx,
110 1.2 ad UVM_PAGE_DIRTY_TAG);
111 1.5 ad } else if (oldstatus == UVM_PAGE_STATUS_CLEAN) {
112 1.4 ad /*
113 1.4 ad * on first dirty page, mark the object dirty.
114 1.4 ad * for vnodes this inserts to the syncer worklist.
115 1.4 ad */
116 1.4 ad if (radix_tree_empty_tagged_tree_p(&uobj->uo_pages,
117 1.4 ad UVM_PAGE_DIRTY_TAG) &&
118 1.4 ad uobj->pgops->pgo_markdirty != NULL) {
119 1.4 ad (*uobj->pgops->pgo_markdirty)(uobj);
120 1.4 ad }
121 1.2 ad radix_tree_set_tag(&uobj->uo_pages, idx,
122 1.2 ad UVM_PAGE_DIRTY_TAG);
123 1.2 ad }
124 1.2 ad }
125 1.2 ad if (newstatus == UVM_PAGE_STATUS_UNKNOWN) {
126 1.2 ad /*
127 1.2 ad * start relying on pmap-level dirtiness tracking.
128 1.2 ad */
129 1.2 ad pmap_clear_modify(pg);
130 1.2 ad }
131 1.2 ad pg->flags &= ~(PG_CLEAN|PG_DIRTY);
132 1.2 ad pg->flags |= newstatus;
133 1.2 ad KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) ==
134 1.2 ad !!radix_tree_get_tag(&uobj->uo_pages, idx, UVM_PAGE_DIRTY_TAG));
135 1.2 ad if ((pg->flags & PG_STAT) != 0) {
136 1.2 ad if ((pg->flags & PG_SWAPBACKED) != 0) {
137 1.2 ad base = CPU_COUNT_ANONUNKNOWN;
138 1.2 ad } else {
139 1.2 ad base = CPU_COUNT_FILEUNKNOWN;
140 1.2 ad }
141 1.2 ad kpreempt_disable();
142 1.2 ad CPU_COUNT(base + oldstatus, -1);
143 1.2 ad CPU_COUNT(base + newstatus, +1);
144 1.2 ad kpreempt_enable();
145 1.2 ad }
146 1.2 ad }
147 1.2 ad
148 1.2 ad /*
149 1.2 ad * uvm_pagecheckdirty: check if page is dirty, and remove its dirty bit.
150 1.2 ad *
151 1.2 ad * called with the owner locked.
152 1.2 ad *
153 1.2 ad * returns if the page was dirty.
154 1.2 ad *
155 1.2 ad * if protected is true, mark the page CLEAN. otherwise, mark the page UNKNOWN.
156 1.2 ad * ("mark" in the sense of uvm_pagemarkdirty().)
157 1.2 ad */
158 1.2 ad
159 1.2 ad bool
160 1.2 ad uvm_pagecheckdirty(struct vm_page *pg, bool pgprotected)
161 1.2 ad {
162 1.2 ad const unsigned int oldstatus = uvm_pagegetdirty(pg);
163 1.2 ad bool modified;
164 1.2 ad
165 1.3 ad KASSERT(uvm_page_owner_locked_p(pg, true));
166 1.2 ad
167 1.2 ad /*
168 1.2 ad * if pgprotected is true, mark the page CLEAN.
169 1.2 ad * otherwise mark the page UNKNOWN unless it's CLEAN.
170 1.2 ad *
171 1.2 ad * possible transitions:
172 1.2 ad *
173 1.2 ad * CLEAN -> CLEAN , modified = false
174 1.2 ad * UNKNOWN -> UNKNOWN, modified = true
175 1.2 ad * UNKNOWN -> UNKNOWN, modified = false
176 1.2 ad * UNKNOWN -> CLEAN , modified = true
177 1.2 ad * UNKNOWN -> CLEAN , modified = false
178 1.2 ad * DIRTY -> UNKNOWN, modified = true
179 1.2 ad * DIRTY -> CLEAN , modified = true
180 1.2 ad *
181 1.2 ad * pmap_clear_modify is necessary if either of
182 1.2 ad * oldstatus or newstatus is UVM_PAGE_STATUS_UNKNOWN.
183 1.2 ad */
184 1.2 ad
185 1.2 ad if (oldstatus == UVM_PAGE_STATUS_CLEAN) {
186 1.2 ad modified = false;
187 1.2 ad } else {
188 1.2 ad const unsigned int newstatus = pgprotected ?
189 1.2 ad UVM_PAGE_STATUS_CLEAN : UVM_PAGE_STATUS_UNKNOWN;
190 1.2 ad
191 1.2 ad if (oldstatus == UVM_PAGE_STATUS_DIRTY) {
192 1.2 ad modified = true;
193 1.2 ad if (newstatus == UVM_PAGE_STATUS_UNKNOWN) {
194 1.2 ad pmap_clear_modify(pg);
195 1.2 ad }
196 1.2 ad } else {
197 1.2 ad KASSERT(oldstatus == UVM_PAGE_STATUS_UNKNOWN);
198 1.2 ad modified = pmap_clear_modify(pg);
199 1.2 ad }
200 1.2 ad uvm_pagemarkdirty(pg, newstatus);
201 1.2 ad }
202 1.2 ad return modified;
203 1.2 ad }
204