uvm_object.c revision 1.3.30.2 1 1.3.30.1 bouyer /* $NetBSD: uvm_object.c,v 1.3.30.2 2008/01/08 22:12:07 bouyer Exp $ */
2 1.1 yamt
3 1.1 yamt /*
4 1.1 yamt * Copyright (c) 2006 The NetBSD Foundation, Inc.
5 1.1 yamt * All rights reserved.
6 1.1 yamt *
7 1.3 rmind * This code is derived from software contributed to The NetBSD Foundation
8 1.3 rmind * by Mindaugas Rasiukevicius.
9 1.3 rmind *
10 1.1 yamt * Redistribution and use in source and binary forms, with or without
11 1.1 yamt * modification, are permitted provided that the following conditions
12 1.1 yamt * are met:
13 1.1 yamt * 1. Redistributions of source code must retain the above copyright
14 1.1 yamt * notice, this list of conditions and the following disclaimer.
15 1.1 yamt * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 yamt * notice, this list of conditions and the following disclaimer in the
17 1.1 yamt * documentation and/or other materials provided with the distribution.
18 1.1 yamt * 3. All advertising materials mentioning features or use of this software
19 1.1 yamt * must display the following acknowledgement:
20 1.1 yamt * This product includes software developed by the NetBSD
21 1.1 yamt * Foundation, Inc. and its contributors.
22 1.1 yamt * 4. Neither the name of The NetBSD Foundation nor the names of its
23 1.1 yamt * contributors may be used to endorse or promote products derived
24 1.1 yamt * from this software without specific prior written permission.
25 1.1 yamt *
26 1.1 yamt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 1.1 yamt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 1.1 yamt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.1 yamt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 1.1 yamt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 1.1 yamt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 1.1 yamt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 1.1 yamt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 1.1 yamt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 1.1 yamt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 1.1 yamt * POSSIBILITY OF SUCH DAMAGE.
37 1.1 yamt */
38 1.1 yamt
39 1.1 yamt /*
40 1.1 yamt * uvm_object.c: operate with memory objects
41 1.1 yamt *
42 1.1 yamt * TODO:
43 1.1 yamt * 1. Support PG_RELEASED-using objects
44 1.1 yamt *
45 1.1 yamt */
46 1.1 yamt
47 1.1 yamt #include <sys/cdefs.h>
48 1.3.30.1 bouyer __KERNEL_RCSID(0, "$NetBSD: uvm_object.c,v 1.3.30.2 2008/01/08 22:12:07 bouyer Exp $");
49 1.1 yamt
50 1.1 yamt #include "opt_uvmhist.h"
51 1.1 yamt
52 1.1 yamt #include <sys/param.h>
53 1.1 yamt
54 1.1 yamt #include <uvm/uvm.h>
55 1.1 yamt
56 1.1 yamt /* We will fetch this page count per step */
57 1.1 yamt #define FETCH_PAGECOUNT 16
58 1.1 yamt
59 1.1 yamt /*
60 1.1 yamt * uobj_wirepages: wire the pages of entire uobj
61 1.1 yamt *
62 1.1 yamt * => NOTE: this function should only be used for types of objects
63 1.1 yamt * where PG_RELEASED flag is never set (aobj objects)
64 1.1 yamt * => caller must pass page-aligned start and end values
65 1.1 yamt */
66 1.1 yamt
67 1.1 yamt int
68 1.1 yamt uobj_wirepages(struct uvm_object *uobj, off_t start, off_t end)
69 1.1 yamt {
70 1.1 yamt int i, npages, error;
71 1.1 yamt struct vm_page *pgs[FETCH_PAGECOUNT], *pg = NULL;
72 1.1 yamt off_t offset = start, left;
73 1.1 yamt
74 1.1 yamt left = (end - start) >> PAGE_SHIFT;
75 1.1 yamt
76 1.3.30.1 bouyer mutex_enter(&uobj->vmobjlock);
77 1.1 yamt while (left) {
78 1.1 yamt
79 1.1 yamt npages = MIN(FETCH_PAGECOUNT, left);
80 1.1 yamt
81 1.1 yamt /* Get the pages */
82 1.1 yamt memset(pgs, 0, sizeof(pgs));
83 1.1 yamt error = (*uobj->pgops->pgo_get)(uobj, offset, pgs, &npages, 0,
84 1.1 yamt VM_PROT_READ | VM_PROT_WRITE, UVM_ADV_SEQUENTIAL,
85 1.1 yamt PGO_ALLPAGES | PGO_SYNCIO);
86 1.1 yamt
87 1.1 yamt if (error)
88 1.1 yamt goto error;
89 1.1 yamt
90 1.3.30.1 bouyer mutex_enter(&uobj->vmobjlock);
91 1.1 yamt for (i = 0; i < npages; i++) {
92 1.1 yamt
93 1.1 yamt KASSERT(pgs[i] != NULL);
94 1.1 yamt KASSERT(!(pgs[i]->flags & PG_RELEASED));
95 1.1 yamt
96 1.1 yamt /*
97 1.1 yamt * Loan break
98 1.1 yamt */
99 1.1 yamt if (pgs[i]->loan_count) {
100 1.1 yamt while (pgs[i]->loan_count) {
101 1.1 yamt pg = uvm_loanbreak(pgs[i]);
102 1.1 yamt if (!pg) {
103 1.3.30.1 bouyer mutex_exit(&uobj->vmobjlock);
104 1.1 yamt uvm_wait("uobjwirepg");
105 1.3.30.1 bouyer mutex_enter(&uobj->vmobjlock);
106 1.1 yamt continue;
107 1.1 yamt }
108 1.1 yamt }
109 1.1 yamt pgs[i] = pg;
110 1.1 yamt }
111 1.1 yamt
112 1.1 yamt if (pgs[i]->pqflags & PQ_AOBJ) {
113 1.1 yamt pgs[i]->flags &= ~(PG_CLEAN);
114 1.1 yamt uao_dropswap(uobj, i);
115 1.1 yamt }
116 1.1 yamt }
117 1.1 yamt
118 1.1 yamt /* Wire the pages */
119 1.3.30.1 bouyer mutex_enter(&uvm_pageqlock);
120 1.1 yamt for (i = 0; i < npages; i++) {
121 1.1 yamt uvm_pagewire(pgs[i]);
122 1.1 yamt }
123 1.3.30.1 bouyer mutex_exit(&uvm_pageqlock);
124 1.1 yamt
125 1.1 yamt /* Unbusy the pages */
126 1.1 yamt uvm_page_unbusy(pgs, npages);
127 1.1 yamt
128 1.1 yamt left -= npages;
129 1.1 yamt offset += npages << PAGE_SHIFT;
130 1.1 yamt }
131 1.3.30.1 bouyer mutex_exit(&uobj->vmobjlock);
132 1.1 yamt
133 1.1 yamt return 0;
134 1.1 yamt
135 1.1 yamt error:
136 1.1 yamt /* Unwire the pages which has been wired */
137 1.1 yamt uobj_unwirepages(uobj, start, offset);
138 1.1 yamt
139 1.1 yamt return error;
140 1.1 yamt }
141 1.1 yamt
142 1.1 yamt /*
143 1.1 yamt * uobj_unwirepages: unwire the pages of entire uobj
144 1.1 yamt *
145 1.1 yamt * => NOTE: this function should only be used for types of objects
146 1.1 yamt * where PG_RELEASED flag is never set
147 1.1 yamt * => caller must pass page-aligned start and end values
148 1.1 yamt */
149 1.1 yamt
150 1.1 yamt void
151 1.1 yamt uobj_unwirepages(struct uvm_object *uobj, off_t start, off_t end)
152 1.1 yamt {
153 1.1 yamt struct vm_page *pg;
154 1.1 yamt off_t offset;
155 1.1 yamt
156 1.3.30.1 bouyer mutex_enter(&uobj->vmobjlock);
157 1.3.30.1 bouyer mutex_enter(&uvm_pageqlock);
158 1.1 yamt for (offset = start; offset < end; offset += PAGE_SIZE) {
159 1.1 yamt pg = uvm_pagelookup(uobj, offset);
160 1.1 yamt
161 1.1 yamt KASSERT(pg != NULL);
162 1.1 yamt KASSERT(!(pg->flags & PG_RELEASED));
163 1.1 yamt
164 1.1 yamt uvm_pageunwire(pg);
165 1.1 yamt }
166 1.3.30.1 bouyer mutex_exit(&uvm_pageqlock);
167 1.3.30.1 bouyer mutex_exit(&uobj->vmobjlock);
168 1.1 yamt }
169