uvm_object.c revision 1.7.4.5 1 1.7.4.5 rmind /* $NetBSD: uvm_object.c,v 1.7.4.5 2011/03/05 20:56:37 rmind Exp $ */
2 1.1 yamt
3 1.1 yamt /*
4 1.7.4.4 rmind * Copyright (c) 2006, 2010 The NetBSD Foundation, Inc.
5 1.1 yamt * All rights reserved.
6 1.1 yamt *
7 1.3 rmind * This code is derived from software contributed to The NetBSD Foundation
8 1.3 rmind * by Mindaugas Rasiukevicius.
9 1.3 rmind *
10 1.1 yamt * Redistribution and use in source and binary forms, with or without
11 1.1 yamt * modification, are permitted provided that the following conditions
12 1.1 yamt * are met:
13 1.1 yamt * 1. Redistributions of source code must retain the above copyright
14 1.1 yamt * notice, this list of conditions and the following disclaimer.
15 1.1 yamt * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 yamt * notice, this list of conditions and the following disclaimer in the
17 1.1 yamt * documentation and/or other materials provided with the distribution.
18 1.1 yamt *
19 1.1 yamt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 yamt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 yamt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 yamt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 yamt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 yamt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 yamt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 yamt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 yamt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 yamt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 yamt * POSSIBILITY OF SUCH DAMAGE.
30 1.1 yamt */
31 1.1 yamt
32 1.1 yamt /*
33 1.1 yamt * uvm_object.c: operate with memory objects
34 1.1 yamt *
35 1.1 yamt * TODO:
36 1.1 yamt * 1. Support PG_RELEASED-using objects
37 1.1 yamt */
38 1.1 yamt
39 1.1 yamt #include <sys/cdefs.h>
40 1.7.4.5 rmind __KERNEL_RCSID(0, "$NetBSD: uvm_object.c,v 1.7.4.5 2011/03/05 20:56:37 rmind Exp $");
41 1.1 yamt
42 1.7 thorpej #include "opt_ddb.h"
43 1.1 yamt
44 1.1 yamt #include <sys/param.h>
45 1.7.4.1 rmind #include <sys/mutex.h>
46 1.7.4.1 rmind #include <sys/queue.h>
47 1.7.4.5 rmind #include <sys/rbtree.h>
48 1.1 yamt
49 1.1 yamt #include <uvm/uvm.h>
50 1.7 thorpej #include <uvm/uvm_ddb.h>
51 1.1 yamt
52 1.7.4.4 rmind /* Page count to fetch per single step. */
53 1.7.4.4 rmind #define FETCH_PAGECOUNT 16
54 1.1 yamt
55 1.1 yamt /*
56 1.7.4.1 rmind * uvm_obj_init: initialize UVM memory object.
57 1.7.4.1 rmind */
58 1.7.4.1 rmind void
59 1.7.4.1 rmind uvm_obj_init(struct uvm_object *uo, const struct uvm_pagerops *ops,
60 1.7.4.1 rmind kmutex_t *lockptr, u_int refs)
61 1.7.4.1 rmind {
62 1.7.4.1 rmind
63 1.7.4.1 rmind if (lockptr == NULL) {
64 1.7.4.1 rmind uo->vmobjlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
65 1.7.4.1 rmind } else {
66 1.7.4.1 rmind uo->vmobjlock = lockptr;
67 1.7.4.1 rmind mutex_init(uo->vmobjlock, MUTEX_DEFAULT, IPL_NONE);
68 1.7.4.1 rmind }
69 1.7.4.1 rmind uo->pgops = ops;
70 1.7.4.1 rmind TAILQ_INIT(&uo->memq);
71 1.7.4.4 rmind LIST_INIT(&uo->uo_ubc);
72 1.7.4.1 rmind uo->uo_npages = 0;
73 1.7.4.1 rmind uo->uo_refs = refs;
74 1.7.4.1 rmind rb_tree_init(&uo->rb_tree, &uvm_page_tree_ops);
75 1.7.4.1 rmind }
76 1.7.4.1 rmind
77 1.7.4.1 rmind /*
78 1.7.4.1 rmind * uvm_obj_destroy: destroy UVM memory object.
79 1.7.4.1 rmind */
80 1.7.4.1 rmind void
81 1.7.4.1 rmind uvm_obj_destroy(struct uvm_object *uo, kmutex_t *lockptr)
82 1.7.4.1 rmind {
83 1.7.4.1 rmind #ifdef DIAGNOSTIC
84 1.7.4.1 rmind void *tmp = NULL;
85 1.7.4.1 rmind KASSERT(rb_tree_find_node_geq(&uo->rb_tree, &tmp) == NULL);
86 1.7.4.1 rmind #endif
87 1.7.4.4 rmind /* Purge any UBC entries with this object. */
88 1.7.4.4 rmind if (__predict_false(!LIST_EMPTY(&uo->uo_ubc))) {
89 1.7.4.4 rmind ubc_purge(uo);
90 1.7.4.4 rmind }
91 1.7.4.4 rmind /* Finally, safe to destory the lock. */
92 1.7.4.1 rmind if (lockptr) {
93 1.7.4.1 rmind KASSERT(uo->vmobjlock == lockptr);
94 1.7.4.1 rmind mutex_destroy(uo->vmobjlock);
95 1.7.4.1 rmind } else {
96 1.7.4.1 rmind mutex_obj_free(uo->vmobjlock);
97 1.7.4.1 rmind }
98 1.7.4.1 rmind }
99 1.7.4.1 rmind
100 1.7.4.1 rmind /*
101 1.1 yamt * uobj_wirepages: wire the pages of entire uobj
102 1.1 yamt *
103 1.1 yamt * => NOTE: this function should only be used for types of objects
104 1.1 yamt * where PG_RELEASED flag is never set (aobj objects)
105 1.1 yamt * => caller must pass page-aligned start and end values
106 1.1 yamt */
107 1.1 yamt
108 1.1 yamt int
109 1.7.4.2 rmind uvm_obj_wirepages(struct uvm_object *uobj, off_t start, off_t end)
110 1.1 yamt {
111 1.1 yamt int i, npages, error;
112 1.1 yamt struct vm_page *pgs[FETCH_PAGECOUNT], *pg = NULL;
113 1.1 yamt off_t offset = start, left;
114 1.1 yamt
115 1.1 yamt left = (end - start) >> PAGE_SHIFT;
116 1.1 yamt
117 1.7.4.1 rmind mutex_enter(uobj->vmobjlock);
118 1.1 yamt while (left) {
119 1.1 yamt
120 1.1 yamt npages = MIN(FETCH_PAGECOUNT, left);
121 1.1 yamt
122 1.1 yamt /* Get the pages */
123 1.1 yamt memset(pgs, 0, sizeof(pgs));
124 1.1 yamt error = (*uobj->pgops->pgo_get)(uobj, offset, pgs, &npages, 0,
125 1.1 yamt VM_PROT_READ | VM_PROT_WRITE, UVM_ADV_SEQUENTIAL,
126 1.1 yamt PGO_ALLPAGES | PGO_SYNCIO);
127 1.1 yamt
128 1.1 yamt if (error)
129 1.1 yamt goto error;
130 1.1 yamt
131 1.7.4.1 rmind mutex_enter(uobj->vmobjlock);
132 1.1 yamt for (i = 0; i < npages; i++) {
133 1.1 yamt
134 1.1 yamt KASSERT(pgs[i] != NULL);
135 1.1 yamt KASSERT(!(pgs[i]->flags & PG_RELEASED));
136 1.1 yamt
137 1.1 yamt /*
138 1.1 yamt * Loan break
139 1.1 yamt */
140 1.1 yamt if (pgs[i]->loan_count) {
141 1.1 yamt while (pgs[i]->loan_count) {
142 1.1 yamt pg = uvm_loanbreak(pgs[i]);
143 1.1 yamt if (!pg) {
144 1.7.4.1 rmind mutex_exit(uobj->vmobjlock);
145 1.1 yamt uvm_wait("uobjwirepg");
146 1.7.4.1 rmind mutex_enter(uobj->vmobjlock);
147 1.1 yamt continue;
148 1.1 yamt }
149 1.1 yamt }
150 1.1 yamt pgs[i] = pg;
151 1.1 yamt }
152 1.1 yamt
153 1.1 yamt if (pgs[i]->pqflags & PQ_AOBJ) {
154 1.1 yamt pgs[i]->flags &= ~(PG_CLEAN);
155 1.1 yamt uao_dropswap(uobj, i);
156 1.1 yamt }
157 1.1 yamt }
158 1.1 yamt
159 1.1 yamt /* Wire the pages */
160 1.4 ad mutex_enter(&uvm_pageqlock);
161 1.1 yamt for (i = 0; i < npages; i++) {
162 1.1 yamt uvm_pagewire(pgs[i]);
163 1.1 yamt }
164 1.4 ad mutex_exit(&uvm_pageqlock);
165 1.1 yamt
166 1.1 yamt /* Unbusy the pages */
167 1.1 yamt uvm_page_unbusy(pgs, npages);
168 1.1 yamt
169 1.1 yamt left -= npages;
170 1.1 yamt offset += npages << PAGE_SHIFT;
171 1.1 yamt }
172 1.7.4.1 rmind mutex_exit(uobj->vmobjlock);
173 1.1 yamt
174 1.1 yamt return 0;
175 1.1 yamt
176 1.1 yamt error:
177 1.1 yamt /* Unwire the pages which has been wired */
178 1.7.4.3 rmind uvm_obj_unwirepages(uobj, start, offset);
179 1.1 yamt
180 1.1 yamt return error;
181 1.1 yamt }
182 1.1 yamt
183 1.1 yamt /*
184 1.1 yamt * uobj_unwirepages: unwire the pages of entire uobj
185 1.1 yamt *
186 1.1 yamt * => NOTE: this function should only be used for types of objects
187 1.1 yamt * where PG_RELEASED flag is never set
188 1.1 yamt * => caller must pass page-aligned start and end values
189 1.1 yamt */
190 1.1 yamt
191 1.1 yamt void
192 1.7.4.2 rmind uvm_obj_unwirepages(struct uvm_object *uobj, off_t start, off_t end)
193 1.1 yamt {
194 1.1 yamt struct vm_page *pg;
195 1.1 yamt off_t offset;
196 1.1 yamt
197 1.7.4.1 rmind mutex_enter(uobj->vmobjlock);
198 1.4 ad mutex_enter(&uvm_pageqlock);
199 1.1 yamt for (offset = start; offset < end; offset += PAGE_SIZE) {
200 1.1 yamt pg = uvm_pagelookup(uobj, offset);
201 1.1 yamt
202 1.1 yamt KASSERT(pg != NULL);
203 1.1 yamt KASSERT(!(pg->flags & PG_RELEASED));
204 1.1 yamt
205 1.1 yamt uvm_pageunwire(pg);
206 1.1 yamt }
207 1.4 ad mutex_exit(&uvm_pageqlock);
208 1.7.4.1 rmind mutex_exit(uobj->vmobjlock);
209 1.1 yamt }
210 1.7 thorpej
211 1.7 thorpej #if defined(DDB) || defined(DEBUGPRINT)
212 1.7 thorpej
213 1.7 thorpej /*
214 1.7 thorpej * uvm_object_printit: actually prints the object
215 1.7 thorpej */
216 1.7 thorpej void
217 1.7 thorpej uvm_object_printit(struct uvm_object *uobj, bool full,
218 1.7 thorpej void (*pr)(const char *, ...))
219 1.7 thorpej {
220 1.7 thorpej struct vm_page *pg;
221 1.7 thorpej int cnt = 0;
222 1.7 thorpej
223 1.7 thorpej (*pr)("OBJECT %p: locked=%d, pgops=%p, npages=%d, ",
224 1.7.4.1 rmind uobj, mutex_owned(uobj->vmobjlock), uobj->pgops, uobj->uo_npages);
225 1.7 thorpej if (UVM_OBJ_IS_KERN_OBJECT(uobj))
226 1.7 thorpej (*pr)("refs=<SYSTEM>\n");
227 1.7 thorpej else
228 1.7 thorpej (*pr)("refs=%d\n", uobj->uo_refs);
229 1.7 thorpej
230 1.7 thorpej if (!full) {
231 1.7 thorpej return;
232 1.7 thorpej }
233 1.7 thorpej (*pr)(" PAGES <pg,offset>:\n ");
234 1.7 thorpej TAILQ_FOREACH(pg, &uobj->memq, listq.queue) {
235 1.7 thorpej cnt++;
236 1.7 thorpej (*pr)("<%p,0x%llx> ", pg, (long long)pg->offset);
237 1.7 thorpej if ((cnt % 3) == 0) {
238 1.7 thorpej (*pr)("\n ");
239 1.7 thorpej }
240 1.7 thorpej }
241 1.7 thorpej if ((cnt % 3) != 0) {
242 1.7 thorpej (*pr)("\n");
243 1.7 thorpej }
244 1.7 thorpej }
245 1.7 thorpej
246 1.7 thorpej #endif /* DDB || DEBUGPRINT */
247