uvm_object.c revision 1.3 1 1.3 rmind /* $NetBSD: uvm_object.c,v 1.3 2007/02/17 20:45:36 rmind Exp $ */
2 1.1 yamt
3 1.1 yamt /*
4 1.1 yamt * Copyright (c) 2006 The NetBSD Foundation, Inc.
5 1.1 yamt * All rights reserved.
6 1.1 yamt *
7 1.3 rmind * This code is derived from software contributed to The NetBSD Foundation
8 1.3 rmind * by Mindaugas Rasiukevicius.
9 1.3 rmind *
10 1.1 yamt * Redistribution and use in source and binary forms, with or without
11 1.1 yamt * modification, are permitted provided that the following conditions
12 1.1 yamt * are met:
13 1.1 yamt * 1. Redistributions of source code must retain the above copyright
14 1.1 yamt * notice, this list of conditions and the following disclaimer.
15 1.1 yamt * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 yamt * notice, this list of conditions and the following disclaimer in the
17 1.1 yamt * documentation and/or other materials provided with the distribution.
18 1.1 yamt * 3. All advertising materials mentioning features or use of this software
19 1.1 yamt * must display the following acknowledgement:
20 1.1 yamt * This product includes software developed by the NetBSD
21 1.1 yamt * Foundation, Inc. and its contributors.
22 1.1 yamt * 4. Neither the name of The NetBSD Foundation nor the names of its
23 1.1 yamt * contributors may be used to endorse or promote products derived
24 1.1 yamt * from this software without specific prior written permission.
25 1.1 yamt *
26 1.1 yamt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 1.1 yamt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 1.1 yamt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.1 yamt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 1.1 yamt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 1.1 yamt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 1.1 yamt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 1.1 yamt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 1.1 yamt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 1.1 yamt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 1.1 yamt * POSSIBILITY OF SUCH DAMAGE.
37 1.1 yamt */
38 1.1 yamt
39 1.1 yamt /*
40 1.1 yamt * uvm_object.c: operate with memory objects
41 1.1 yamt *
42 1.1 yamt * TODO:
43 1.1 yamt * 1. Support PG_RELEASED-using objects
44 1.1 yamt *
45 1.1 yamt */
46 1.1 yamt
47 1.1 yamt #include <sys/cdefs.h>
48 1.3 rmind __KERNEL_RCSID(0, "$NetBSD: uvm_object.c,v 1.3 2007/02/17 20:45:36 rmind Exp $");
49 1.1 yamt
50 1.1 yamt #include "opt_uvmhist.h"
51 1.1 yamt
52 1.1 yamt #include <sys/param.h>
53 1.1 yamt #include <sys/lock.h>
54 1.1 yamt
55 1.1 yamt #include <uvm/uvm.h>
56 1.1 yamt
57 1.1 yamt /* We will fetch this page count per step */
58 1.1 yamt #define FETCH_PAGECOUNT 16
59 1.1 yamt
60 1.1 yamt /*
61 1.1 yamt * uobj_wirepages: wire the pages of entire uobj
62 1.1 yamt *
63 1.1 yamt * => NOTE: this function should only be used for types of objects
64 1.1 yamt * where PG_RELEASED flag is never set (aobj objects)
65 1.1 yamt * => caller must pass page-aligned start and end values
66 1.1 yamt */
67 1.1 yamt
68 1.1 yamt int
69 1.1 yamt uobj_wirepages(struct uvm_object *uobj, off_t start, off_t end)
70 1.1 yamt {
71 1.1 yamt int i, npages, error;
72 1.1 yamt struct vm_page *pgs[FETCH_PAGECOUNT], *pg = NULL;
73 1.1 yamt off_t offset = start, left;
74 1.1 yamt
75 1.1 yamt left = (end - start) >> PAGE_SHIFT;
76 1.1 yamt
77 1.1 yamt simple_lock(&uobj->vmobjlock);
78 1.1 yamt while (left) {
79 1.1 yamt
80 1.1 yamt npages = MIN(FETCH_PAGECOUNT, left);
81 1.1 yamt
82 1.1 yamt /* Get the pages */
83 1.1 yamt memset(pgs, 0, sizeof(pgs));
84 1.1 yamt error = (*uobj->pgops->pgo_get)(uobj, offset, pgs, &npages, 0,
85 1.1 yamt VM_PROT_READ | VM_PROT_WRITE, UVM_ADV_SEQUENTIAL,
86 1.1 yamt PGO_ALLPAGES | PGO_SYNCIO);
87 1.1 yamt
88 1.1 yamt if (error)
89 1.1 yamt goto error;
90 1.1 yamt
91 1.1 yamt simple_lock(&uobj->vmobjlock);
92 1.1 yamt for (i = 0; i < npages; i++) {
93 1.1 yamt
94 1.1 yamt KASSERT(pgs[i] != NULL);
95 1.1 yamt KASSERT(!(pgs[i]->flags & PG_RELEASED));
96 1.1 yamt
97 1.1 yamt /*
98 1.1 yamt * Loan break
99 1.1 yamt */
100 1.1 yamt if (pgs[i]->loan_count) {
101 1.1 yamt while (pgs[i]->loan_count) {
102 1.1 yamt pg = uvm_loanbreak(pgs[i]);
103 1.1 yamt if (!pg) {
104 1.1 yamt simple_unlock(&uobj->vmobjlock);
105 1.1 yamt uvm_wait("uobjwirepg");
106 1.1 yamt simple_lock(&uobj->vmobjlock);
107 1.1 yamt continue;
108 1.1 yamt }
109 1.1 yamt }
110 1.1 yamt pgs[i] = pg;
111 1.1 yamt }
112 1.1 yamt
113 1.1 yamt if (pgs[i]->pqflags & PQ_AOBJ) {
114 1.1 yamt pgs[i]->flags &= ~(PG_CLEAN);
115 1.1 yamt uao_dropswap(uobj, i);
116 1.1 yamt }
117 1.1 yamt }
118 1.1 yamt
119 1.1 yamt /* Wire the pages */
120 1.1 yamt uvm_lock_pageq();
121 1.1 yamt for (i = 0; i < npages; i++) {
122 1.1 yamt uvm_pagewire(pgs[i]);
123 1.1 yamt }
124 1.1 yamt uvm_unlock_pageq();
125 1.1 yamt
126 1.1 yamt /* Unbusy the pages */
127 1.1 yamt uvm_page_unbusy(pgs, npages);
128 1.1 yamt
129 1.1 yamt left -= npages;
130 1.1 yamt offset += npages << PAGE_SHIFT;
131 1.1 yamt }
132 1.1 yamt simple_unlock(&uobj->vmobjlock);
133 1.1 yamt
134 1.1 yamt return 0;
135 1.1 yamt
136 1.1 yamt error:
137 1.1 yamt /* Unwire the pages which has been wired */
138 1.1 yamt uobj_unwirepages(uobj, start, offset);
139 1.1 yamt
140 1.1 yamt return error;
141 1.1 yamt }
142 1.1 yamt
143 1.1 yamt /*
144 1.1 yamt * uobj_unwirepages: unwire the pages of entire uobj
145 1.1 yamt *
146 1.1 yamt * => NOTE: this function should only be used for types of objects
147 1.1 yamt * where PG_RELEASED flag is never set
148 1.1 yamt * => caller must pass page-aligned start and end values
149 1.1 yamt */
150 1.1 yamt
151 1.1 yamt void
152 1.1 yamt uobj_unwirepages(struct uvm_object *uobj, off_t start, off_t end)
153 1.1 yamt {
154 1.1 yamt struct vm_page *pg;
155 1.1 yamt off_t offset;
156 1.1 yamt
157 1.1 yamt simple_lock(&uobj->vmobjlock);
158 1.1 yamt uvm_lock_pageq();
159 1.1 yamt for (offset = start; offset < end; offset += PAGE_SIZE) {
160 1.1 yamt pg = uvm_pagelookup(uobj, offset);
161 1.1 yamt
162 1.1 yamt KASSERT(pg != NULL);
163 1.1 yamt KASSERT(!(pg->flags & PG_RELEASED));
164 1.1 yamt
165 1.1 yamt uvm_pageunwire(pg);
166 1.1 yamt }
167 1.1 yamt uvm_unlock_pageq();
168 1.1 yamt simple_unlock(&uobj->vmobjlock);
169 1.1 yamt }
170