uvm_pglist.c revision 1.2 1 1.2 thorpej /* $NetBSD: uvm_pglist.c,v 1.2 1998/02/06 22:32:26 thorpej Exp $ */
2 1.2 thorpej
3 1.1 mrg #define VM_PAGE_ALLOC_MEMORY_STATS
4 1.1 mrg
5 1.1 mrg /*-
6 1.1 mrg * Copyright (c) 1997 The NetBSD Foundation, Inc.
7 1.1 mrg * All rights reserved.
8 1.1 mrg *
9 1.1 mrg * This code is derived from software contributed to The NetBSD Foundation
10 1.1 mrg * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
11 1.1 mrg * NASA Ames Research Center.
12 1.1 mrg *
13 1.1 mrg * Redistribution and use in source and binary forms, with or without
14 1.1 mrg * modification, are permitted provided that the following conditions
15 1.1 mrg * are met:
16 1.1 mrg * 1. Redistributions of source code must retain the above copyright
17 1.1 mrg * notice, this list of conditions and the following disclaimer.
18 1.1 mrg * 2. Redistributions in binary form must reproduce the above copyright
19 1.1 mrg * notice, this list of conditions and the following disclaimer in the
20 1.1 mrg * documentation and/or other materials provided with the distribution.
21 1.1 mrg * 3. All advertising materials mentioning features or use of this software
22 1.1 mrg * must display the following acknowledgement:
23 1.1 mrg * This product includes software developed by the NetBSD
24 1.1 mrg * Foundation, Inc. and its contributors.
25 1.1 mrg * 4. Neither the name of The NetBSD Foundation nor the names of its
26 1.1 mrg * contributors may be used to endorse or promote products derived
27 1.1 mrg * from this software without specific prior written permission.
28 1.1 mrg *
29 1.1 mrg * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
30 1.1 mrg * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
31 1.1 mrg * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
32 1.1 mrg * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
33 1.1 mrg * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 1.1 mrg * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 1.1 mrg * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
36 1.1 mrg * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
37 1.1 mrg * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 1.1 mrg * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 1.1 mrg * POSSIBILITY OF SUCH DAMAGE.
40 1.1 mrg */
41 1.1 mrg
42 1.1 mrg /*
43 1.1 mrg * uvm_pglist.c: pglist functions
44 1.1 mrg *
45 1.1 mrg * XXX: was part of uvm_page but has an incompatable copyright so it
46 1.1 mrg * gets its own file now.
47 1.1 mrg */
48 1.1 mrg
49 1.1 mrg #include <sys/param.h>
50 1.1 mrg #include <sys/systm.h>
51 1.1 mrg #include <sys/malloc.h>
52 1.1 mrg #include <sys/mount.h>
53 1.1 mrg #include <sys/proc.h>
54 1.1 mrg
55 1.1 mrg #include <vm/vm.h>
56 1.1 mrg #include <vm/vm_page.h>
57 1.1 mrg #include <vm/vm_kern.h>
58 1.1 mrg
59 1.1 mrg #include <sys/syscallargs.h>
60 1.1 mrg
61 1.1 mrg #include <uvm/uvm.h>
62 1.1 mrg
63 1.1 mrg #ifdef VM_PAGE_ALLOC_MEMORY_STATS
64 1.1 mrg #define STAT_INCR(v) (v)++
65 1.1 mrg #define STAT_DECR(v) do { \
66 1.1 mrg if ((v) == 0) \
67 1.1 mrg printf("%s:%d -- Already 0!\n", __FILE__, __LINE__); \
68 1.1 mrg else \
69 1.1 mrg (v)--; \
70 1.1 mrg } while (0)
71 1.1 mrg u_long uvm_pglistalloc_npages;
72 1.1 mrg #else
73 1.1 mrg #define STAT_INCR(v)
74 1.1 mrg #define STAT_DECR(v)
75 1.1 mrg #endif
76 1.1 mrg
77 1.1 mrg /*
78 1.1 mrg * uvm_pglistalloc: allocate a list of pages
79 1.1 mrg *
80 1.1 mrg * => allocated pages are placed at the tail of rlist. rlist is
81 1.1 mrg * assumed to be properly initialized by caller.
82 1.1 mrg * => returns 0 on success or errno on failure
83 1.1 mrg * => XXX: implementation allocates only a single segment, also
84 1.1 mrg * might be able to better advantage of vm_physeg[].
85 1.1 mrg * => doesn't take into account clean non-busy pages on inactive list
86 1.1 mrg * that could be used(?)
87 1.1 mrg * => params:
88 1.1 mrg * size the size of the allocation, rounded to page size.
89 1.1 mrg * low the low address of the allowed allocation range.
90 1.1 mrg * high the high address of the allowed allocation range.
91 1.1 mrg * alignment memory must be aligned to this power-of-two boundary.
92 1.1 mrg * boundary no segment in the allocation may cross this
93 1.1 mrg * power-of-two boundary (relative to zero).
94 1.1 mrg */
95 1.1 mrg
96 1.1 mrg int uvm_pglistalloc(size, low, high, alignment, boundary, rlist, nsegs, waitok)
97 1.1 mrg
98 1.1 mrg vm_size_t size;
99 1.1 mrg vm_offset_t low, high, alignment, boundary;
100 1.1 mrg struct pglist *rlist;
101 1.1 mrg int nsegs, waitok;
102 1.1 mrg
103 1.1 mrg {
104 1.1 mrg vm_offset_t try, idxpa, lastidxpa;
105 1.1 mrg int psi;
106 1.1 mrg struct vm_page *pgs;
107 1.1 mrg int s, tryidx, idx, end, error;
108 1.1 mrg vm_page_t m;
109 1.1 mrg u_long pagemask;
110 1.1 mrg #ifdef DEBUG
111 1.1 mrg vm_page_t tp;
112 1.1 mrg #endif
113 1.1 mrg
114 1.1 mrg #ifdef DIAGNOSTIC
115 1.1 mrg if ((alignment & (alignment - 1)) != 0)
116 1.1 mrg panic("vm_page_alloc_memory: alignment must be power of 2");
117 1.1 mrg
118 1.1 mrg if ((boundary & (boundary - 1)) != 0)
119 1.1 mrg panic("vm_page_alloc_memory: boundary must be power of 2");
120 1.1 mrg #endif
121 1.1 mrg
122 1.1 mrg /*
123 1.1 mrg * Our allocations are always page granularity, so our alignment
124 1.1 mrg * must be, too.
125 1.1 mrg */
126 1.1 mrg if (alignment < PAGE_SIZE)
127 1.1 mrg alignment = PAGE_SIZE;
128 1.1 mrg
129 1.1 mrg size = round_page(size);
130 1.1 mrg try = roundup(low, alignment);
131 1.1 mrg
132 1.1 mrg if (boundary != 0 && boundary < size)
133 1.1 mrg return (EINVAL);
134 1.1 mrg
135 1.1 mrg pagemask = ~(boundary - 1);
136 1.1 mrg
137 1.1 mrg /* Default to "lose". */
138 1.1 mrg error = ENOMEM;
139 1.1 mrg
140 1.1 mrg /*
141 1.1 mrg * Block all memory allocation and lock the free list.
142 1.1 mrg */
143 1.1 mrg s = splimp();
144 1.1 mrg uvm_lock_fpageq(); /* lock free page queue */
145 1.1 mrg
146 1.1 mrg /* Are there even any free pages? */
147 1.1 mrg if (uvm.page_free.tqh_first == NULL)
148 1.1 mrg goto out;
149 1.1 mrg
150 1.1 mrg for (;; try += alignment) {
151 1.1 mrg if (try + size > high) {
152 1.1 mrg /*
153 1.1 mrg * We've run past the allowable range.
154 1.1 mrg */
155 1.1 mrg goto out;
156 1.1 mrg }
157 1.1 mrg
158 1.1 mrg /*
159 1.1 mrg * Make sure this is a managed physical page.
160 1.1 mrg */
161 1.1 mrg
162 1.1 mrg if ((psi = vm_physseg_find(atop(try), &idx)) == -1)
163 1.1 mrg continue; /* managed? */
164 1.1 mrg if (vm_physseg_find(atop(try + size), NULL) != psi)
165 1.1 mrg continue; /* end must be in this segment */
166 1.1 mrg
167 1.1 mrg tryidx = idx;
168 1.1 mrg end = idx + (size / PAGE_SIZE);
169 1.1 mrg pgs = vm_physmem[psi].pgs;
170 1.1 mrg
171 1.1 mrg /*
172 1.1 mrg * Found a suitable starting page. See of the range is free.
173 1.1 mrg */
174 1.1 mrg for (; idx < end; idx++) {
175 1.1 mrg if (VM_PAGE_IS_FREE(&pgs[idx]) == 0) {
176 1.1 mrg /*
177 1.1 mrg * Page not available.
178 1.1 mrg */
179 1.1 mrg break;
180 1.1 mrg }
181 1.1 mrg
182 1.1 mrg idxpa = VM_PAGE_TO_PHYS(&pgs[idx]);
183 1.1 mrg
184 1.1 mrg if (idx > tryidx) {
185 1.1 mrg lastidxpa = VM_PAGE_TO_PHYS(&pgs[idx - 1]);
186 1.1 mrg
187 1.1 mrg if ((lastidxpa + PAGE_SIZE) != idxpa) {
188 1.1 mrg /*
189 1.1 mrg * Region not contiguous.
190 1.1 mrg */
191 1.1 mrg break;
192 1.1 mrg }
193 1.1 mrg if (boundary != 0 && ((lastidxpa ^ idxpa) & pagemask) != 0) {
194 1.1 mrg /*
195 1.1 mrg * Region crosses boundary.
196 1.1 mrg */
197 1.1 mrg break;
198 1.1 mrg }
199 1.1 mrg }
200 1.1 mrg }
201 1.1 mrg
202 1.1 mrg if (idx == end) {
203 1.1 mrg /*
204 1.1 mrg * Woo hoo! Found one.
205 1.1 mrg */
206 1.1 mrg break;
207 1.1 mrg }
208 1.1 mrg }
209 1.1 mrg
210 1.1 mrg /*
211 1.1 mrg * we have a chunk of memory that conforms to the requested constraints.
212 1.1 mrg */
213 1.1 mrg idx = tryidx;
214 1.1 mrg while (idx < end) {
215 1.1 mrg m = &pgs[idx];
216 1.1 mrg #ifdef DEBUG
217 1.1 mrg for (tp = uvm.page_free.tqh_first; tp != NULL;
218 1.1 mrg tp = tp->pageq.tqe_next) {
219 1.1 mrg if (tp == m)
220 1.1 mrg break;
221 1.1 mrg }
222 1.1 mrg if (tp == NULL)
223 1.1 mrg panic("uvm_pglistalloc: page not on freelist");
224 1.1 mrg #endif
225 1.1 mrg TAILQ_REMOVE(&uvm.page_free, m, pageq);
226 1.1 mrg uvmexp.free--;
227 1.1 mrg m->flags = PG_CLEAN;
228 1.1 mrg m->pqflags = 0;
229 1.1 mrg m->uobject = NULL;
230 1.1 mrg m->uanon = NULL;
231 1.1 mrg m->wire_count = 0;
232 1.1 mrg m->loan_count = 0;
233 1.1 mrg TAILQ_INSERT_TAIL(rlist, m, pageq);
234 1.1 mrg idx++;
235 1.1 mrg STAT_INCR(uvm_pglistalloc_npages);
236 1.1 mrg }
237 1.1 mrg error = 0;
238 1.1 mrg
239 1.1 mrg out:
240 1.1 mrg uvm_unlock_fpageq();
241 1.1 mrg splx(s);
242 1.1 mrg
243 1.1 mrg /*
244 1.1 mrg * check to see if we need to generate some free pages waking
245 1.1 mrg * the pagedaemon.
246 1.1 mrg * XXX: we read uvm.free without locking
247 1.1 mrg */
248 1.1 mrg
249 1.1 mrg if (uvmexp.free < uvmexp.freemin ||
250 1.1 mrg (uvmexp.free < uvmexp.freetarg && uvmexp.inactive < uvmexp.inactarg)) {
251 1.1 mrg
252 1.1 mrg thread_wakeup(&uvm.pagedaemon);
253 1.1 mrg }
254 1.1 mrg
255 1.1 mrg return (error);
256 1.1 mrg }
257 1.1 mrg
258 1.1 mrg /*
259 1.1 mrg * uvm_pglistfree: free a list of pages
260 1.1 mrg *
261 1.1 mrg * => pages should already be unmapped
262 1.1 mrg */
263 1.1 mrg
264 1.1 mrg void uvm_pglistfree(list)
265 1.1 mrg
266 1.1 mrg struct pglist *list;
267 1.1 mrg
268 1.1 mrg {
269 1.1 mrg vm_page_t m;
270 1.1 mrg int s;
271 1.1 mrg
272 1.1 mrg /*
273 1.1 mrg * Block all memory allocation and lock the free list.
274 1.1 mrg */
275 1.1 mrg s = splimp();
276 1.1 mrg uvm_lock_fpageq();
277 1.1 mrg
278 1.1 mrg while ((m = list->tqh_first) != NULL) {
279 1.1 mrg #ifdef DIAGNOSTIC
280 1.1 mrg if (m->pqflags & (PQ_ACTIVE|PQ_INACTIVE))
281 1.1 mrg panic("uvm_pglistfree: active/inactive page!");
282 1.1 mrg #endif
283 1.1 mrg TAILQ_REMOVE(list, m, pageq);
284 1.1 mrg m->pqflags = PQ_FREE;
285 1.1 mrg TAILQ_INSERT_TAIL(&uvm.page_free, m, pageq);
286 1.1 mrg uvmexp.free++;
287 1.1 mrg STAT_DECR(uvm_pglistalloc_npages);
288 1.1 mrg }
289 1.1 mrg
290 1.1 mrg uvm_unlock_fpageq();
291 1.1 mrg splx(s);
292 1.1 mrg }
293