uvm_pglist.c revision 1.1 1 #define VM_PAGE_ALLOC_MEMORY_STATS
2
3 /*-
4 * Copyright (c) 1997 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * uvm_pglist.c: pglist functions
42 *
43 * XXX: was part of uvm_page but has an incompatable copyright so it
44 * gets its own file now.
45 */
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/malloc.h>
50 #include <sys/mount.h>
51 #include <sys/proc.h>
52
53 #include <vm/vm.h>
54 #include <vm/vm_page.h>
55 #include <vm/vm_kern.h>
56
57 #include <sys/syscallargs.h>
58
59 #include <uvm/uvm.h>
60
61 #ifdef VM_PAGE_ALLOC_MEMORY_STATS
62 #define STAT_INCR(v) (v)++
63 #define STAT_DECR(v) do { \
64 if ((v) == 0) \
65 printf("%s:%d -- Already 0!\n", __FILE__, __LINE__); \
66 else \
67 (v)--; \
68 } while (0)
69 u_long uvm_pglistalloc_npages;
70 #else
71 #define STAT_INCR(v)
72 #define STAT_DECR(v)
73 #endif
74
75 /*
76 * uvm_pglistalloc: allocate a list of pages
77 *
78 * => allocated pages are placed at the tail of rlist. rlist is
79 * assumed to be properly initialized by caller.
80 * => returns 0 on success or errno on failure
81 * => XXX: implementation allocates only a single segment, also
82 * might be able to better advantage of vm_physeg[].
83 * => doesn't take into account clean non-busy pages on inactive list
84 * that could be used(?)
85 * => params:
86 * size the size of the allocation, rounded to page size.
87 * low the low address of the allowed allocation range.
88 * high the high address of the allowed allocation range.
89 * alignment memory must be aligned to this power-of-two boundary.
90 * boundary no segment in the allocation may cross this
91 * power-of-two boundary (relative to zero).
92 */
93
94 int uvm_pglistalloc(size, low, high, alignment, boundary, rlist, nsegs, waitok)
95
96 vm_size_t size;
97 vm_offset_t low, high, alignment, boundary;
98 struct pglist *rlist;
99 int nsegs, waitok;
100
101 {
102 vm_offset_t try, idxpa, lastidxpa;
103 int psi;
104 struct vm_page *pgs;
105 int s, tryidx, idx, end, error;
106 vm_page_t m;
107 u_long pagemask;
108 #ifdef DEBUG
109 vm_page_t tp;
110 #endif
111
112 #ifdef DIAGNOSTIC
113 if ((alignment & (alignment - 1)) != 0)
114 panic("vm_page_alloc_memory: alignment must be power of 2");
115
116 if ((boundary & (boundary - 1)) != 0)
117 panic("vm_page_alloc_memory: boundary must be power of 2");
118 #endif
119
120 /*
121 * Our allocations are always page granularity, so our alignment
122 * must be, too.
123 */
124 if (alignment < PAGE_SIZE)
125 alignment = PAGE_SIZE;
126
127 size = round_page(size);
128 try = roundup(low, alignment);
129
130 if (boundary != 0 && boundary < size)
131 return (EINVAL);
132
133 pagemask = ~(boundary - 1);
134
135 /* Default to "lose". */
136 error = ENOMEM;
137
138 /*
139 * Block all memory allocation and lock the free list.
140 */
141 s = splimp();
142 uvm_lock_fpageq(); /* lock free page queue */
143
144 /* Are there even any free pages? */
145 if (uvm.page_free.tqh_first == NULL)
146 goto out;
147
148 for (;; try += alignment) {
149 if (try + size > high) {
150 /*
151 * We've run past the allowable range.
152 */
153 goto out;
154 }
155
156 /*
157 * Make sure this is a managed physical page.
158 */
159
160 if ((psi = vm_physseg_find(atop(try), &idx)) == -1)
161 continue; /* managed? */
162 if (vm_physseg_find(atop(try + size), NULL) != psi)
163 continue; /* end must be in this segment */
164
165 tryidx = idx;
166 end = idx + (size / PAGE_SIZE);
167 pgs = vm_physmem[psi].pgs;
168
169 /*
170 * Found a suitable starting page. See of the range is free.
171 */
172 for (; idx < end; idx++) {
173 if (VM_PAGE_IS_FREE(&pgs[idx]) == 0) {
174 /*
175 * Page not available.
176 */
177 break;
178 }
179
180 idxpa = VM_PAGE_TO_PHYS(&pgs[idx]);
181
182 if (idx > tryidx) {
183 lastidxpa = VM_PAGE_TO_PHYS(&pgs[idx - 1]);
184
185 if ((lastidxpa + PAGE_SIZE) != idxpa) {
186 /*
187 * Region not contiguous.
188 */
189 break;
190 }
191 if (boundary != 0 && ((lastidxpa ^ idxpa) & pagemask) != 0) {
192 /*
193 * Region crosses boundary.
194 */
195 break;
196 }
197 }
198 }
199
200 if (idx == end) {
201 /*
202 * Woo hoo! Found one.
203 */
204 break;
205 }
206 }
207
208 /*
209 * we have a chunk of memory that conforms to the requested constraints.
210 */
211 idx = tryidx;
212 while (idx < end) {
213 m = &pgs[idx];
214 #ifdef DEBUG
215 for (tp = uvm.page_free.tqh_first; tp != NULL;
216 tp = tp->pageq.tqe_next) {
217 if (tp == m)
218 break;
219 }
220 if (tp == NULL)
221 panic("uvm_pglistalloc: page not on freelist");
222 #endif
223 TAILQ_REMOVE(&uvm.page_free, m, pageq);
224 uvmexp.free--;
225 m->flags = PG_CLEAN;
226 m->pqflags = 0;
227 m->uobject = NULL;
228 m->uanon = NULL;
229 m->wire_count = 0;
230 m->loan_count = 0;
231 TAILQ_INSERT_TAIL(rlist, m, pageq);
232 idx++;
233 STAT_INCR(uvm_pglistalloc_npages);
234 }
235 error = 0;
236
237 out:
238 uvm_unlock_fpageq();
239 splx(s);
240
241 /*
242 * check to see if we need to generate some free pages waking
243 * the pagedaemon.
244 * XXX: we read uvm.free without locking
245 */
246
247 if (uvmexp.free < uvmexp.freemin ||
248 (uvmexp.free < uvmexp.freetarg && uvmexp.inactive < uvmexp.inactarg)) {
249
250 thread_wakeup(&uvm.pagedaemon);
251 }
252
253 return (error);
254 }
255
256 /*
257 * uvm_pglistfree: free a list of pages
258 *
259 * => pages should already be unmapped
260 */
261
262 void uvm_pglistfree(list)
263
264 struct pglist *list;
265
266 {
267 vm_page_t m;
268 int s;
269
270 /*
271 * Block all memory allocation and lock the free list.
272 */
273 s = splimp();
274 uvm_lock_fpageq();
275
276 while ((m = list->tqh_first) != NULL) {
277 #ifdef DIAGNOSTIC
278 if (m->pqflags & (PQ_ACTIVE|PQ_INACTIVE))
279 panic("uvm_pglistfree: active/inactive page!");
280 #endif
281 TAILQ_REMOVE(list, m, pageq);
282 m->pqflags = PQ_FREE;
283 TAILQ_INSERT_TAIL(&uvm.page_free, m, pageq);
284 uvmexp.free++;
285 STAT_DECR(uvm_pglistalloc_npages);
286 }
287
288 uvm_unlock_fpageq();
289 splx(s);
290 }
291