uvm_object.c revision 1.5.6.1 1 1.5.6.1 mjf /* $NetBSD: uvm_object.c,v 1.5.6.1 2008/06/02 13:24:37 mjf Exp $ */
2 1.1 yamt
3 1.1 yamt /*
4 1.1 yamt * Copyright (c) 2006 The NetBSD Foundation, Inc.
5 1.1 yamt * All rights reserved.
6 1.1 yamt *
7 1.3 rmind * This code is derived from software contributed to The NetBSD Foundation
8 1.3 rmind * by Mindaugas Rasiukevicius.
9 1.3 rmind *
10 1.1 yamt * Redistribution and use in source and binary forms, with or without
11 1.1 yamt * modification, are permitted provided that the following conditions
12 1.1 yamt * are met:
13 1.1 yamt * 1. Redistributions of source code must retain the above copyright
14 1.1 yamt * notice, this list of conditions and the following disclaimer.
15 1.1 yamt * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 yamt * notice, this list of conditions and the following disclaimer in the
17 1.1 yamt * documentation and/or other materials provided with the distribution.
18 1.1 yamt *
19 1.1 yamt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 yamt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 yamt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 yamt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 yamt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 yamt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 yamt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 yamt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 yamt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 yamt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 yamt * POSSIBILITY OF SUCH DAMAGE.
30 1.1 yamt */
31 1.1 yamt
32 1.1 yamt /*
33 1.1 yamt * uvm_object.c: operate with memory objects
34 1.1 yamt *
35 1.1 yamt * TODO:
36 1.1 yamt * 1. Support PG_RELEASED-using objects
37 1.1 yamt *
38 1.1 yamt */
39 1.1 yamt
40 1.1 yamt #include <sys/cdefs.h>
41 1.5.6.1 mjf __KERNEL_RCSID(0, "$NetBSD: uvm_object.c,v 1.5.6.1 2008/06/02 13:24:37 mjf Exp $");
42 1.1 yamt
43 1.1 yamt #include "opt_uvmhist.h"
44 1.1 yamt
45 1.1 yamt #include <sys/param.h>
46 1.1 yamt
47 1.1 yamt #include <uvm/uvm.h>
48 1.1 yamt
49 1.1 yamt /* We will fetch this page count per step */
50 1.1 yamt #define FETCH_PAGECOUNT 16
51 1.1 yamt
52 1.1 yamt /*
53 1.1 yamt * uobj_wirepages: wire the pages of entire uobj
54 1.1 yamt *
55 1.1 yamt * => NOTE: this function should only be used for types of objects
56 1.1 yamt * where PG_RELEASED flag is never set (aobj objects)
57 1.1 yamt * => caller must pass page-aligned start and end values
58 1.1 yamt */
59 1.1 yamt
60 1.1 yamt int
61 1.1 yamt uobj_wirepages(struct uvm_object *uobj, off_t start, off_t end)
62 1.1 yamt {
63 1.1 yamt int i, npages, error;
64 1.1 yamt struct vm_page *pgs[FETCH_PAGECOUNT], *pg = NULL;
65 1.1 yamt off_t offset = start, left;
66 1.1 yamt
67 1.1 yamt left = (end - start) >> PAGE_SHIFT;
68 1.1 yamt
69 1.4 ad mutex_enter(&uobj->vmobjlock);
70 1.1 yamt while (left) {
71 1.1 yamt
72 1.1 yamt npages = MIN(FETCH_PAGECOUNT, left);
73 1.1 yamt
74 1.1 yamt /* Get the pages */
75 1.1 yamt memset(pgs, 0, sizeof(pgs));
76 1.1 yamt error = (*uobj->pgops->pgo_get)(uobj, offset, pgs, &npages, 0,
77 1.1 yamt VM_PROT_READ | VM_PROT_WRITE, UVM_ADV_SEQUENTIAL,
78 1.1 yamt PGO_ALLPAGES | PGO_SYNCIO);
79 1.1 yamt
80 1.1 yamt if (error)
81 1.1 yamt goto error;
82 1.1 yamt
83 1.4 ad mutex_enter(&uobj->vmobjlock);
84 1.1 yamt for (i = 0; i < npages; i++) {
85 1.1 yamt
86 1.1 yamt KASSERT(pgs[i] != NULL);
87 1.1 yamt KASSERT(!(pgs[i]->flags & PG_RELEASED));
88 1.1 yamt
89 1.1 yamt /*
90 1.1 yamt * Loan break
91 1.1 yamt */
92 1.1 yamt if (pgs[i]->loan_count) {
93 1.1 yamt while (pgs[i]->loan_count) {
94 1.1 yamt pg = uvm_loanbreak(pgs[i]);
95 1.1 yamt if (!pg) {
96 1.4 ad mutex_exit(&uobj->vmobjlock);
97 1.1 yamt uvm_wait("uobjwirepg");
98 1.4 ad mutex_enter(&uobj->vmobjlock);
99 1.1 yamt continue;
100 1.1 yamt }
101 1.1 yamt }
102 1.1 yamt pgs[i] = pg;
103 1.1 yamt }
104 1.1 yamt
105 1.1 yamt if (pgs[i]->pqflags & PQ_AOBJ) {
106 1.1 yamt pgs[i]->flags &= ~(PG_CLEAN);
107 1.1 yamt uao_dropswap(uobj, i);
108 1.1 yamt }
109 1.1 yamt }
110 1.1 yamt
111 1.1 yamt /* Wire the pages */
112 1.4 ad mutex_enter(&uvm_pageqlock);
113 1.1 yamt for (i = 0; i < npages; i++) {
114 1.1 yamt uvm_pagewire(pgs[i]);
115 1.1 yamt }
116 1.4 ad mutex_exit(&uvm_pageqlock);
117 1.1 yamt
118 1.1 yamt /* Unbusy the pages */
119 1.1 yamt uvm_page_unbusy(pgs, npages);
120 1.1 yamt
121 1.1 yamt left -= npages;
122 1.1 yamt offset += npages << PAGE_SHIFT;
123 1.1 yamt }
124 1.4 ad mutex_exit(&uobj->vmobjlock);
125 1.1 yamt
126 1.1 yamt return 0;
127 1.1 yamt
128 1.1 yamt error:
129 1.1 yamt /* Unwire the pages which has been wired */
130 1.1 yamt uobj_unwirepages(uobj, start, offset);
131 1.1 yamt
132 1.1 yamt return error;
133 1.1 yamt }
134 1.1 yamt
135 1.1 yamt /*
136 1.1 yamt * uobj_unwirepages: unwire the pages of entire uobj
137 1.1 yamt *
138 1.1 yamt * => NOTE: this function should only be used for types of objects
139 1.1 yamt * where PG_RELEASED flag is never set
140 1.1 yamt * => caller must pass page-aligned start and end values
141 1.1 yamt */
142 1.1 yamt
143 1.1 yamt void
144 1.1 yamt uobj_unwirepages(struct uvm_object *uobj, off_t start, off_t end)
145 1.1 yamt {
146 1.1 yamt struct vm_page *pg;
147 1.1 yamt off_t offset;
148 1.1 yamt
149 1.4 ad mutex_enter(&uobj->vmobjlock);
150 1.4 ad mutex_enter(&uvm_pageqlock);
151 1.1 yamt for (offset = start; offset < end; offset += PAGE_SIZE) {
152 1.1 yamt pg = uvm_pagelookup(uobj, offset);
153 1.1 yamt
154 1.1 yamt KASSERT(pg != NULL);
155 1.1 yamt KASSERT(!(pg->flags & PG_RELEASED));
156 1.1 yamt
157 1.1 yamt uvm_pageunwire(pg);
158 1.1 yamt }
159 1.4 ad mutex_exit(&uvm_pageqlock);
160 1.4 ad mutex_exit(&uobj->vmobjlock);
161 1.1 yamt }
162