uvm_object.c revision 1.7.2.2 1 /* $NetBSD: uvm_object.c,v 1.7.2.2 2010/05/31 13:26:38 uebayasi Exp $ */
2
3 /*
4 * Copyright (c) 2006 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Mindaugas Rasiukevicius.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * uvm_object.c: operate with memory objects
34 *
35 * TODO:
36 * 1. Support PG_RELEASED-using objects
37 *
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: uvm_object.c,v 1.7.2.2 2010/05/31 13:26:38 uebayasi Exp $");
42
43 #include "opt_ddb.h"
44 #include "opt_uvmhist.h"
45
46 #include <sys/param.h>
47
48 #include <uvm/uvm.h>
49 #include <uvm/uvm_ddb.h>
50
51 /* We will fetch this page count per step */
52 #define FETCH_PAGECOUNT 16
53
54 /*
55 * uobj_wirepages: wire the pages of entire uobj
56 *
57 * => NOTE: this function should only be used for types of objects
58 * where PG_RELEASED flag is never set (aobj objects)
59 * => caller must pass page-aligned start and end values
60 */
61
62 int
63 uobj_wirepages(struct uvm_object *uobj, off_t start, off_t end)
64 {
65 int i, npages, error;
66 struct vm_page *pgs[FETCH_PAGECOUNT], *pg = NULL;
67 off_t offset = start, left;
68
69 left = (end - start) >> PAGE_SHIFT;
70
71 mutex_enter(&uobj->vmobjlock);
72 while (left) {
73
74 npages = MIN(FETCH_PAGECOUNT, left);
75
76 /* Get the pages */
77 memset(pgs, 0, sizeof(pgs));
78 error = (*uobj->pgops->pgo_get)(uobj, offset, pgs, &npages, 0,
79 VM_PROT_READ | VM_PROT_WRITE, UVM_ADV_SEQUENTIAL,
80 PGO_ALLPAGES | PGO_SYNCIO);
81
82 if (error)
83 goto error;
84
85 mutex_enter(&uobj->vmobjlock);
86 for (i = 0; i < npages; i++) {
87
88 if (uvm_pageisdirect_p(pgs[i]))
89 continue;
90
91 KASSERT(pgs[i] != NULL);
92 KASSERT(!(pgs[i]->flags & PG_RELEASED));
93
94 /*
95 * Loan break
96 */
97 if (pgs[i]->loan_count) {
98 while (pgs[i]->loan_count) {
99 pg = uvm_loanbreak(pgs[i]);
100 if (!pg) {
101 mutex_exit(&uobj->vmobjlock);
102 uvm_wait("uobjwirepg");
103 mutex_enter(&uobj->vmobjlock);
104 continue;
105 }
106 }
107 pgs[i] = pg;
108 }
109
110 if (pgs[i]->pqflags & PQ_AOBJ) {
111 pgs[i]->flags &= ~(PG_CLEAN);
112 uao_dropswap(uobj, i);
113 }
114 }
115
116 /* Wire the pages */
117 mutex_enter(&uvm_pageqlock);
118 for (i = 0; i < npages; i++) {
119 uvm_pagewire(pgs[i]);
120 }
121 mutex_exit(&uvm_pageqlock);
122
123 /* Unbusy the pages */
124 uvm_page_unbusy(pgs, npages);
125
126 left -= npages;
127 offset += npages << PAGE_SHIFT;
128 }
129 mutex_exit(&uobj->vmobjlock);
130
131 return 0;
132
133 error:
134 /* Unwire the pages which has been wired */
135 uobj_unwirepages(uobj, start, offset);
136
137 return error;
138 }
139
140 /*
141 * uobj_unwirepages: unwire the pages of entire uobj
142 *
143 * => NOTE: this function should only be used for types of objects
144 * where PG_RELEASED flag is never set
145 * => caller must pass page-aligned start and end values
146 */
147
148 void
149 uobj_unwirepages(struct uvm_object *uobj, off_t start, off_t end)
150 {
151 struct vm_page *pg;
152 off_t offset;
153
154 mutex_enter(&uobj->vmobjlock);
155 mutex_enter(&uvm_pageqlock);
156 for (offset = start; offset < end; offset += PAGE_SIZE) {
157 pg = uvm_pagelookup(uobj, offset);
158
159 KASSERT(pg != NULL);
160 KASSERT(!(pg->flags & PG_RELEASED));
161
162 uvm_pageunwire(pg);
163 }
164 mutex_exit(&uvm_pageqlock);
165 mutex_exit(&uobj->vmobjlock);
166 }
167
168 #if defined(DDB) || defined(DEBUGPRINT)
169
170 /*
171 * uvm_object_printit: actually prints the object
172 */
173
174 void
175 uvm_object_printit(struct uvm_object *uobj, bool full,
176 void (*pr)(const char *, ...))
177 {
178 struct vm_page *pg;
179 int cnt = 0;
180
181 (*pr)("OBJECT %p: locked=%d, pgops=%p, npages=%d, ",
182 uobj, mutex_owned(&uobj->vmobjlock), uobj->pgops, uobj->uo_npages);
183 if (UVM_OBJ_IS_KERN_OBJECT(uobj))
184 (*pr)("refs=<SYSTEM>\n");
185 else
186 (*pr)("refs=%d\n", uobj->uo_refs);
187
188 if (!full) {
189 return;
190 }
191 (*pr)(" PAGES <pg,offset>:\n ");
192 TAILQ_FOREACH(pg, &uobj->memq, listq.queue) {
193 cnt++;
194 (*pr)("<%p,0x%llx> ", pg, (long long)pg->offset);
195 if ((cnt % 3) == 0) {
196 (*pr)("\n ");
197 }
198 }
199 if ((cnt % 3) != 0) {
200 (*pr)("\n");
201 }
202 }
203
204 #endif /* DDB || DEBUGPRINT */
205