uvm_object.c revision 1.7.10.1 1 /* $NetBSD: uvm_object.c,v 1.7.10.1 2011/06/23 14:20:36 cherry Exp $ */
2
3 /*
4 * Copyright (c) 2006, 2010 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Mindaugas Rasiukevicius.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * uvm_object.c: operate with memory objects
34 *
35 * TODO:
36 * 1. Support PG_RELEASED-using objects
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: uvm_object.c,v 1.7.10.1 2011/06/23 14:20:36 cherry Exp $");
41
42 #include "opt_ddb.h"
43
44 #include <sys/param.h>
45 #include <sys/mutex.h>
46 #include <sys/queue.h>
47 #include <sys/rbtree.h>
48
49 #include <uvm/uvm.h>
50 #include <uvm/uvm_ddb.h>
51
52 /* Page count to fetch per single step. */
53 #define FETCH_PAGECOUNT 16
54
55 /*
56 * uvm_obj_init: initialize UVM memory object.
57 */
58 void
59 uvm_obj_init(struct uvm_object *uo, const struct uvm_pagerops *ops,
60 bool alock, u_int refs)
61 {
62
63 if (alock) {
64 /* Allocate and assign a lock. */
65 uo->vmobjlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
66 } else {
67 /* The lock will need to be set via uvm_obj_setlock(). */
68 uo->vmobjlock = NULL;
69 }
70 uo->pgops = ops;
71 TAILQ_INIT(&uo->memq);
72 LIST_INIT(&uo->uo_ubc);
73 uo->uo_npages = 0;
74 uo->uo_refs = refs;
75 rb_tree_init(&uo->rb_tree, &uvm_page_tree_ops);
76 }
77
78 /*
79 * uvm_obj_destroy: destroy UVM memory object.
80 */
81 void
82 uvm_obj_destroy(struct uvm_object *uo, bool dlock)
83 {
84
85 KASSERT(rb_tree_iterate(&uo->rb_tree, NULL, RB_DIR_LEFT) == NULL);
86
87 /* Purge any UBC entries associated with this object. */
88 ubc_purge(uo);
89
90 /* Destroy the lock, if requested. */
91 if (dlock) {
92 mutex_obj_free(uo->vmobjlock);
93 }
94 }
95
96 /*
97 * uvm_obj_setlock: assign a vmobjlock to the UVM object.
98 *
99 * => Caller is responsible to ensure that UVM objects is not use.
100 * => Only dynamic lock may be previously set. We drop the reference then.
101 */
102 void
103 uvm_obj_setlock(struct uvm_object *uo, kmutex_t *lockptr)
104 {
105 kmutex_t *olockptr = uo->vmobjlock;
106
107 if (olockptr) {
108 /* Drop the reference on the old lock. */
109 mutex_obj_free(olockptr);
110 }
111 if (lockptr == NULL) {
112 /* If new lock is not passed - allocate default one. */
113 lockptr = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
114 }
115 uo->vmobjlock = lockptr;
116 }
117
118 /*
119 * uvm_obj_wirepages: wire the pages of entire UVM object.
120 *
121 * => NOTE: this function should only be used for types of objects
122 * where PG_RELEASED flag is never set (aobj objects)
123 * => caller must pass page-aligned start and end values
124 */
125 int
126 uvm_obj_wirepages(struct uvm_object *uobj, off_t start, off_t end)
127 {
128 int i, npages, error;
129 struct vm_page *pgs[FETCH_PAGECOUNT], *pg = NULL;
130 off_t offset = start, left;
131
132 left = (end - start) >> PAGE_SHIFT;
133
134 mutex_enter(uobj->vmobjlock);
135 while (left) {
136
137 npages = MIN(FETCH_PAGECOUNT, left);
138
139 /* Get the pages */
140 memset(pgs, 0, sizeof(pgs));
141 error = (*uobj->pgops->pgo_get)(uobj, offset, pgs, &npages, 0,
142 VM_PROT_READ | VM_PROT_WRITE, UVM_ADV_SEQUENTIAL,
143 PGO_ALLPAGES | PGO_SYNCIO);
144
145 if (error)
146 goto error;
147
148 mutex_enter(uobj->vmobjlock);
149 for (i = 0; i < npages; i++) {
150
151 KASSERT(pgs[i] != NULL);
152 KASSERT(!(pgs[i]->flags & PG_RELEASED));
153
154 /*
155 * Loan break
156 */
157 if (pgs[i]->loan_count) {
158 while (pgs[i]->loan_count) {
159 pg = uvm_loanbreak(pgs[i]);
160 if (!pg) {
161 mutex_exit(uobj->vmobjlock);
162 uvm_wait("uobjwirepg");
163 mutex_enter(uobj->vmobjlock);
164 continue;
165 }
166 }
167 pgs[i] = pg;
168 }
169
170 if (pgs[i]->pqflags & PQ_AOBJ) {
171 pgs[i]->flags &= ~(PG_CLEAN);
172 uao_dropswap(uobj, i);
173 }
174 }
175
176 /* Wire the pages */
177 mutex_enter(&uvm_pageqlock);
178 for (i = 0; i < npages; i++) {
179 uvm_pagewire(pgs[i]);
180 }
181 mutex_exit(&uvm_pageqlock);
182
183 /* Unbusy the pages */
184 uvm_page_unbusy(pgs, npages);
185
186 left -= npages;
187 offset += npages << PAGE_SHIFT;
188 }
189 mutex_exit(uobj->vmobjlock);
190
191 return 0;
192
193 error:
194 /* Unwire the pages which has been wired */
195 uvm_obj_unwirepages(uobj, start, offset);
196
197 return error;
198 }
199
200 /*
201 * uvm_obj_unwirepages: unwire the pages of entire UVM object.
202 *
203 * => NOTE: this function should only be used for types of objects
204 * where PG_RELEASED flag is never set
205 * => caller must pass page-aligned start and end values
206 */
207 void
208 uvm_obj_unwirepages(struct uvm_object *uobj, off_t start, off_t end)
209 {
210 struct vm_page *pg;
211 off_t offset;
212
213 mutex_enter(uobj->vmobjlock);
214 mutex_enter(&uvm_pageqlock);
215 for (offset = start; offset < end; offset += PAGE_SIZE) {
216 pg = uvm_pagelookup(uobj, offset);
217
218 KASSERT(pg != NULL);
219 KASSERT(!(pg->flags & PG_RELEASED));
220
221 uvm_pageunwire(pg);
222 }
223 mutex_exit(&uvm_pageqlock);
224 mutex_exit(uobj->vmobjlock);
225 }
226
227 #if (defined(DDB) || defined(DEBUGPRINT)) && !defined(_RUMPKERNEL)
228
229 /*
230 * uvm_object_printit: actually prints the object
231 */
232 void
233 uvm_object_printit(struct uvm_object *uobj, bool full,
234 void (*pr)(const char *, ...))
235 {
236 struct vm_page *pg;
237 int cnt = 0;
238
239 (*pr)("OBJECT %p: locked=%d, pgops=%p, npages=%d, ",
240 uobj, mutex_owned(uobj->vmobjlock), uobj->pgops, uobj->uo_npages);
241 if (UVM_OBJ_IS_KERN_OBJECT(uobj))
242 (*pr)("refs=<SYSTEM>\n");
243 else
244 (*pr)("refs=%d\n", uobj->uo_refs);
245
246 if (!full) {
247 return;
248 }
249 (*pr)(" PAGES <pg,offset>:\n ");
250 TAILQ_FOREACH(pg, &uobj->memq, listq.queue) {
251 cnt++;
252 (*pr)("<%p,0x%llx> ", pg, (long long)pg->offset);
253 if ((cnt % 3) == 0) {
254 (*pr)("\n ");
255 }
256 }
257 if ((cnt % 3) != 0) {
258 (*pr)("\n");
259 }
260 }
261
262 #endif /* DDB || DEBUGPRINT */
263