uvm_pglist.c revision 1.19 1 /* $NetBSD: uvm_pglist.c,v 1.19 2001/11/10 07:37:01 lukem Exp $ */
2
3 /*-
4 * Copyright (c) 1997 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * uvm_pglist.c: pglist functions
42 */
43
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: uvm_pglist.c,v 1.19 2001/11/10 07:37:01 lukem Exp $");
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/malloc.h>
50 #include <sys/proc.h>
51
52 #include <uvm/uvm.h>
53
54 #ifdef VM_PAGE_ALLOC_MEMORY_STATS
55 #define STAT_INCR(v) (v)++
56 #define STAT_DECR(v) do { \
57 if ((v) == 0) \
58 printf("%s:%d -- Already 0!\n", __FILE__, __LINE__); \
59 else \
60 (v)--; \
61 } while (0)
62 u_long uvm_pglistalloc_npages;
63 #else
64 #define STAT_INCR(v)
65 #define STAT_DECR(v)
66 #endif
67
68 /*
69 * uvm_pglistalloc: allocate a list of pages
70 *
71 * => allocated pages are placed at the tail of rlist. rlist is
72 * assumed to be properly initialized by caller.
73 * => returns 0 on success or errno on failure
74 * => XXX: implementation allocates only a single segment, also
75 * might be able to better advantage of vm_physeg[].
76 * => doesn't take into account clean non-busy pages on inactive list
77 * that could be used(?)
78 * => params:
79 * size the size of the allocation, rounded to page size.
80 * low the low address of the allowed allocation range.
81 * high the high address of the allowed allocation range.
82 * alignment memory must be aligned to this power-of-two boundary.
83 * boundary no segment in the allocation may cross this
84 * power-of-two boundary (relative to zero).
85 */
86
87 int
88 uvm_pglistalloc(size, low, high, alignment, boundary, rlist, nsegs, waitok)
89 psize_t size;
90 paddr_t low, high, alignment, boundary;
91 struct pglist *rlist;
92 int nsegs, waitok;
93 {
94 paddr_t try, idxpa, lastidxpa;
95 int psi;
96 struct vm_page *pgs, *pg;
97 int s, tryidx, idx, pgflidx, end, error, free_list, color;
98 u_long pagemask;
99 #ifdef DEBUG
100 struct vm_page *tp;
101 #endif
102
103 KASSERT((alignment & (alignment - 1)) == 0);
104 KASSERT((boundary & (boundary - 1)) == 0);
105
106 /*
107 * Our allocations are always page granularity, so our alignment
108 * must be, too.
109 */
110
111 if (alignment < PAGE_SIZE)
112 alignment = PAGE_SIZE;
113 size = round_page(size);
114 try = roundup(low, alignment);
115 if (boundary != 0 && boundary < size)
116 return (EINVAL);
117 pagemask = ~(boundary - 1);
118
119 /* Default to "lose". */
120 error = ENOMEM;
121
122 /*
123 * Block all memory allocation and lock the free list.
124 */
125
126 s = uvm_lock_fpageq();
127
128 /* Are there even any free pages? */
129 if (uvmexp.free <= (uvmexp.reserve_pagedaemon + uvmexp.reserve_kernel))
130 goto out;
131
132 for (;; try += alignment) {
133 if (try + size > high) {
134
135 /*
136 * We've run past the allowable range.
137 */
138
139 goto out;
140 }
141
142 /*
143 * Make sure this is a managed physical page.
144 */
145
146 if ((psi = vm_physseg_find(atop(try), &idx)) == -1)
147 continue; /* managed? */
148 if (vm_physseg_find(atop(try + size), NULL) != psi)
149 continue; /* end must be in this segment */
150 tryidx = idx;
151 end = idx + (size / PAGE_SIZE);
152 pgs = vm_physmem[psi].pgs;
153
154 /*
155 * Found a suitable starting page. See of the range is free.
156 */
157
158 for (; idx < end; idx++) {
159 if (VM_PAGE_IS_FREE(&pgs[idx]) == 0) {
160 break;
161 }
162 idxpa = VM_PAGE_TO_PHYS(&pgs[idx]);
163 if (idx > tryidx) {
164 lastidxpa = VM_PAGE_TO_PHYS(&pgs[idx - 1]);
165 if ((lastidxpa + PAGE_SIZE) != idxpa) {
166
167 /*
168 * Region not contiguous.
169 */
170
171 break;
172 }
173 if (boundary != 0 &&
174 ((lastidxpa ^ idxpa) & pagemask) != 0) {
175
176 /*
177 * Region crosses boundary.
178 */
179
180 break;
181 }
182 }
183 }
184 if (idx == end) {
185 break;
186 }
187 }
188
189 #if PGFL_NQUEUES != 2
190 #error uvm_pglistalloc needs to be updated
191 #endif
192
193 /*
194 * we have a chunk of memory that conforms to the requested constraints.
195 */
196 idx = tryidx;
197 while (idx < end) {
198 pg = &pgs[idx];
199 free_list = uvm_page_lookup_freelist(pg);
200 color = VM_PGCOLOR_BUCKET(pg);
201 pgflidx = (pg->flags & PG_ZERO) ? PGFL_ZEROS : PGFL_UNKNOWN;
202 #ifdef DEBUG
203 for (tp = TAILQ_FIRST(&uvm.page_free[
204 free_list].pgfl_buckets[color].pgfl_queues[pgflidx]);
205 tp != NULL;
206 tp = TAILQ_NEXT(tp, pageq)) {
207 if (tp == pg)
208 break;
209 }
210 if (tp == NULL)
211 panic("uvm_pglistalloc: page not on freelist");
212 #endif
213 TAILQ_REMOVE(&uvm.page_free[free_list].pgfl_buckets[
214 color].pgfl_queues[pgflidx], pg, pageq);
215 uvmexp.free--;
216 if (pg->flags & PG_ZERO)
217 uvmexp.zeropages--;
218 pg->flags = PG_CLEAN;
219 pg->pqflags = 0;
220 pg->uobject = NULL;
221 pg->uanon = NULL;
222 TAILQ_INSERT_TAIL(rlist, pg, pageq);
223 idx++;
224 STAT_INCR(uvm_pglistalloc_npages);
225 }
226 error = 0;
227
228 out:
229 /*
230 * check to see if we need to generate some free pages waking
231 * the pagedaemon.
232 */
233
234 UVM_KICK_PDAEMON();
235 uvm_unlock_fpageq(s);
236 return (error);
237 }
238
239 /*
240 * uvm_pglistfree: free a list of pages
241 *
242 * => pages should already be unmapped
243 */
244
245 void
246 uvm_pglistfree(list)
247 struct pglist *list;
248 {
249 struct vm_page *pg;
250 int s;
251
252 /*
253 * Lock the free list and free each page.
254 */
255
256 s = uvm_lock_fpageq();
257 while ((pg = TAILQ_FIRST(list)) != NULL) {
258 KASSERT((pg->pqflags & (PQ_ACTIVE|PQ_INACTIVE)) == 0);
259 TAILQ_REMOVE(list, pg, pageq);
260 pg->pqflags = PQ_FREE;
261 TAILQ_INSERT_TAIL(&uvm.page_free[uvm_page_lookup_freelist(pg)].
262 pgfl_buckets[VM_PGCOLOR_BUCKET(pg)].
263 pgfl_queues[PGFL_UNKNOWN], pg, pageq);
264 uvmexp.free++;
265 if (uvmexp.zeropages < UVM_PAGEZERO_TARGET)
266 uvm.page_idle_zero = vm_page_zero_enable;
267 STAT_DECR(uvm_pglistalloc_npages);
268 }
269 uvm_unlock_fpageq(s);
270 }
271