uvm_pglist.c revision 1.42.16.3 1 /* $NetBSD: uvm_pglist.c,v 1.42.16.3 2010/01/22 08:54:41 matt Exp $ */
2
3 /*-
4 * Copyright (c) 1997 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * uvm_pglist.c: pglist functions
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: uvm_pglist.c,v 1.42.16.3 2010/01/22 08:54:41 matt Exp $");
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/malloc.h>
43 #include <sys/proc.h>
44
45 #include <uvm/uvm.h>
46 #include <uvm/uvm_pdpolicy.h>
47
48 #ifdef VM_PAGE_ALLOC_MEMORY_STATS
49 #define STAT_INCR(v) (v)++
50 #define STAT_DECR(v) do { \
51 if ((v) == 0) \
52 printf("%s:%d -- Already 0!\n", __FILE__, __LINE__); \
53 else \
54 (v)--; \
55 } while (/*CONSTCOND*/ 0)
56 u_long uvm_pglistalloc_npages;
57 #else
58 #define STAT_INCR(v)
59 #define STAT_DECR(v)
60 #endif
61
62 /*
63 * uvm_pglistalloc: allocate a list of pages
64 *
65 * => allocated pages are placed onto an rlist. rlist is
66 * initialized by uvm_pglistalloc.
67 * => returns 0 on success or errno on failure
68 * => implementation allocates a single segment if any constraints are
69 * imposed by call arguments.
70 * => doesn't take into account clean non-busy pages on inactive list
71 * that could be used(?)
72 * => params:
73 * size the size of the allocation, rounded to page size.
74 * low the low address of the allowed allocation range.
75 * high the high address of the allowed allocation range.
76 * alignment memory must be aligned to this power-of-two boundary.
77 * boundary no segment in the allocation may cross this
78 * power-of-two boundary (relative to zero).
79 */
80
81 static void
82 uvm_pglist_add(struct vm_page *pg, struct pglist *rlist)
83 {
84 int free_list, color, pgflidx;
85
86 KASSERT(mutex_owned(&uvm_fpageqlock));
87
88 #if PGFL_NQUEUES != 2
89 #error uvm_pglistalloc needs to be updated
90 #endif
91
92 free_list = uvm_page_lookup_freelist(pg);
93 color = VM_PGCOLOR_BUCKET(pg);
94 pgflidx = (pg->flags & PG_ZERO) ? PGFL_ZEROS : PGFL_UNKNOWN;
95 #if defined(DEBUG) && DEBUG > 1
96 struct vm_page *tp;
97 LIST_FOREACH(tp,
98 &uvm.page_free[free_list].pgfl_buckets[color].pgfl_queues[pgflidx],
99 pageq.list) {
100 if (tp == pg)
101 break;
102 }
103 if (tp == NULL)
104 panic("uvm_pglistalloc: page not on freelist");
105 #endif
106 LIST_REMOVE(pg, pageq.list); /* global */
107 LIST_REMOVE(pg, listq.list); /* cpu */
108 uvmexp.free--;
109 if (pg->flags & PG_ZERO)
110 uvmexp.zeropages--;
111 VM_FREE_PAGE_TO_CPU(pg)->pages[pgflidx]--;
112 pg->flags = PG_CLEAN;
113 pg->pqflags = 0;
114 pg->uobject = NULL;
115 pg->uanon = NULL;
116 TAILQ_INSERT_TAIL(rlist, pg, pageq.queue);
117 STAT_INCR(uvm_pglistalloc_npages);
118 }
119
120 static int
121 uvm_pglistalloc_c_ps(struct vm_physseg *ps, int num, paddr_t low, paddr_t high,
122 paddr_t alignment, paddr_t boundary, struct pglist *rlist)
123 {
124 signed int try, limit, tryidx, end, idx, skip;
125 const signed int align = atop(alignment);
126 struct vm_page *pgs;
127 int pagemask;
128 #ifdef DEBUG
129 paddr_t idxpa, lastidxpa;
130 int cidx = 0; /* XXX: GCC */
131 #endif
132 #ifdef PGALLOC_VERBOSE
133 printf("pgalloc: contig %d pgs from psi %ld\n", num,
134 (long)(ps - vm_physmem));
135 #endif
136
137 KASSERT(mutex_owned(&uvm_fpageqlock));
138
139 try = roundup(max(atop(low), ps->avail_start), align);
140 limit = min(atop(high), ps->avail_end);
141 pagemask = ~((boundary >> PAGE_SHIFT) - 1);
142 skip = 0;
143
144 for (;;) {
145 bool ok = true;
146 int cnt;
147
148 if (try + num > limit) {
149 /*
150 * We've run past the allowable range.
151 */
152 return (0); /* FAIL */
153 }
154 if (boundary != 0 &&
155 ((try ^ (try + num - 1)) & pagemask) != 0) {
156 /*
157 * Region crosses boundary. Jump to the boundary
158 * just crossed and ensure alignment.
159 */
160 try = (try + num - 1) & pagemask;
161 try = roundup(try, align);
162 continue;
163 }
164 #ifdef DEBUG
165 /*
166 * Make sure this is a managed physical page.
167 */
168
169 if (vm_physseg_find(try, &cidx) != ps - vm_physmem)
170 panic("pgalloc contig: botch1");
171 if (cidx != try - ps->start)
172 panic("pgalloc contig: botch2");
173 if (vm_physseg_find(try + num - 1, &cidx) != ps - vm_physmem)
174 panic("pgalloc contig: botch3");
175 if (cidx != try - ps->start + num - 1)
176 panic("pgalloc contig: botch4");
177 #endif
178 tryidx = try - ps->start;
179 end = tryidx + num;
180 pgs = ps->pgs;
181
182 /*
183 * Found a suitable starting page. See if the range is free.
184 */
185 #ifdef PGALLOC_VERBOSE
186 printf("%s: ps=%p try=%#x end=%#x skip=%#x, align=%#x",
187 __func__, ps, tryidx, end, skip, align);
188 #endif
189 /*
190 * We start at the end and work backwards since if we find a
191 * non-free page, it makes no sense to continue.
192 *
193 * But on the plus size we have "vetted" some number of free
194 * pages. If this iteration fails, we may be able to skip
195 * testing most of those pages again in the next pass.
196 */
197 for (idx = end - 1; idx >= tryidx + skip; idx--) {
198 if (VM_PAGE_IS_FREE(&pgs[idx]) == 0) {
199 ok = false;
200 break;
201 }
202
203 #ifdef DEBUG
204 if (idx > tryidx) {
205 idxpa = VM_PAGE_TO_PHYS(&pgs[idx]);
206 lastidxpa = VM_PAGE_TO_PHYS(&pgs[idx - 1]);
207 if ((lastidxpa + PAGE_SIZE) != idxpa) {
208 /*
209 * Region not contiguous.
210 */
211 panic("pgalloc contig: botch5");
212 }
213 if (boundary != 0 &&
214 ((lastidxpa ^ idxpa) & ~(boundary - 1))
215 != 0) {
216 /*
217 * Region crosses boundary.
218 */
219 panic("pgalloc contig: botch6");
220 }
221 }
222 #endif
223 }
224
225 if (ok) {
226 while (skip-- > 0) {
227 KDASSERT(VM_PAGE_IS_FREE(&pgs[tryidx + skip]));
228 }
229 #ifdef PGALLOC_VERBOSE
230 printf(": ok\n");
231 #endif
232 break;
233 }
234
235 #ifdef PGALLOC_VERBOSE
236 printf(": non-free at %#x\n", idx - tryidx);
237 #endif
238 /*
239 * count the number of pages we can advance
240 * since we know they aren't all free.
241 */
242 cnt = idx + 1 - tryidx;
243 /*
244 * now round up that to the needed alignment.
245 */
246 cnt = roundup(cnt, align);
247 /*
248 * The number of pages we can skip checking
249 * (might be 0 if cnt > num).
250 */
251 skip = max(num - cnt, 0);
252 try += cnt;
253 }
254
255 /*
256 * we have a chunk of memory that conforms to the requested constraints.
257 */
258 for (idx = tryidx, pgs += idx; idx < end; idx++, pgs++)
259 uvm_pglist_add(pgs, rlist);
260
261 #ifdef PGALLOC_VERBOSE
262 printf("got %d pgs\n", num);
263 #endif
264 return (num); /* number of pages allocated */
265 }
266
267 static int
268 uvm_pglistalloc_contig(int num, paddr_t low, paddr_t high, paddr_t alignment,
269 paddr_t boundary, struct pglist *rlist)
270 {
271 int fl, psi;
272 struct vm_physseg *ps;
273 int error;
274
275 /* Default to "lose". */
276 error = ENOMEM;
277
278 /*
279 * Block all memory allocation and lock the free list.
280 */
281 mutex_spin_enter(&uvm_fpageqlock);
282
283 /* Are there even any free pages? */
284 if (uvmexp.free <= (uvmexp.reserve_pagedaemon + uvmexp.reserve_kernel))
285 goto out;
286
287 for (fl = 0; fl < VM_NFREELIST; fl++) {
288 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
289 for (psi = vm_nphysseg - 1 ; psi >= 0 ; psi--)
290 #else
291 for (psi = 0 ; psi < vm_nphysseg ; psi++)
292 #endif
293 {
294 ps = &vm_physmem[psi];
295
296 if (ps->free_list != fl)
297 continue;
298
299 num -= uvm_pglistalloc_c_ps(ps, num, low, high,
300 alignment, boundary, rlist);
301 if (num == 0) {
302 #ifdef PGALLOC_VERBOSE
303 printf("pgalloc: %lx-%lx\n",
304 VM_PAGE_TO_PHYS(TAILQ_FIRST(rlist)),
305 VM_PAGE_TO_PHYS(TAILQ_LAST(rlist)));
306 #endif
307 error = 0;
308 goto out;
309 }
310 }
311 }
312
313 out:
314 /*
315 * check to see if we need to generate some free pages waking
316 * the pagedaemon.
317 */
318
319 uvm_kick_pdaemon();
320 mutex_spin_exit(&uvm_fpageqlock);
321 return (error);
322 }
323
324 static int
325 uvm_pglistalloc_s_ps(struct vm_physseg *ps, int num, paddr_t low, paddr_t high,
326 struct pglist *rlist)
327 {
328 int todo, limit, try;
329 struct vm_page *pg;
330 #ifdef DEBUG
331 int cidx = 0; /* XXX: GCC */
332 #endif
333 #ifdef PGALLOC_VERBOSE
334 printf("pgalloc: simple %d pgs from psi %ld\n", num,
335 (long)(ps - vm_physmem));
336 #endif
337
338 KASSERT(mutex_owned(&uvm_fpageqlock));
339
340 todo = num;
341 limit = min(atop(high), ps->avail_end);
342
343 for (try = max(atop(low), ps->avail_start);
344 try < limit; try ++) {
345 #ifdef DEBUG
346 if (vm_physseg_find(try, &cidx) != ps - vm_physmem)
347 panic("pgalloc simple: botch1");
348 if (cidx != (try - ps->start))
349 panic("pgalloc simple: botch2");
350 #endif
351 pg = &ps->pgs[try - ps->start];
352 if (VM_PAGE_IS_FREE(pg) == 0)
353 continue;
354
355 uvm_pglist_add(pg, rlist);
356 if (--todo == 0)
357 break;
358 }
359
360 #ifdef PGALLOC_VERBOSE
361 printf("got %d pgs\n", num - todo);
362 #endif
363 return (num - todo); /* number of pages allocated */
364 }
365
366 static int
367 uvm_pglistalloc_simple(int num, paddr_t low, paddr_t high,
368 struct pglist *rlist, int waitok)
369 {
370 int fl, psi, error;
371 struct vm_physseg *ps;
372
373 /* Default to "lose". */
374 error = ENOMEM;
375
376 again:
377 /*
378 * Block all memory allocation and lock the free list.
379 */
380 mutex_spin_enter(&uvm_fpageqlock);
381
382 /* Are there even any free pages? */
383 if (uvmexp.free <= (uvmexp.reserve_pagedaemon + uvmexp.reserve_kernel))
384 goto out;
385
386 for (fl = 0; fl < VM_NFREELIST; fl++) {
387 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
388 for (psi = vm_nphysseg - 1 ; psi >= 0 ; psi--)
389 #else
390 for (psi = 0 ; psi < vm_nphysseg ; psi++)
391 #endif
392 {
393 ps = &vm_physmem[psi];
394
395 if (ps->free_list != fl)
396 continue;
397
398 num -= uvm_pglistalloc_s_ps(ps, num, low, high, rlist);
399 if (num == 0) {
400 error = 0;
401 goto out;
402 }
403 }
404
405 }
406
407 out:
408 /*
409 * check to see if we need to generate some free pages waking
410 * the pagedaemon.
411 */
412
413 uvm_kick_pdaemon();
414 mutex_spin_exit(&uvm_fpageqlock);
415
416 if (error) {
417 if (waitok) {
418 /* XXX perhaps some time limitation? */
419 #ifdef DEBUG
420 printf("pglistalloc waiting\n");
421 #endif
422 uvm_wait("pglalloc");
423 goto again;
424 } else
425 uvm_pglistfree(rlist);
426 }
427 #ifdef PGALLOC_VERBOSE
428 if (!error)
429 printf("pgalloc: %lx..%lx\n",
430 VM_PAGE_TO_PHYS(TAILQ_FIRST(rlist)),
431 VM_PAGE_TO_PHYS(TAILQ_LAST(rlist, pglist)));
432 #endif
433 return (error);
434 }
435
436 int
437 uvm_pglistalloc(psize_t size, paddr_t low, paddr_t high, paddr_t alignment,
438 paddr_t boundary, struct pglist *rlist, int nsegs, int waitok)
439 {
440 int num, res;
441
442 KASSERT((alignment & (alignment - 1)) == 0);
443 KASSERT((boundary & (boundary - 1)) == 0);
444
445 /*
446 * Our allocations are always page granularity, so our alignment
447 * must be, too.
448 */
449 if (alignment < PAGE_SIZE)
450 alignment = PAGE_SIZE;
451 if (boundary != 0 && boundary < size)
452 return (EINVAL);
453 num = atop(round_page(size));
454 low = roundup(low, alignment);
455
456 TAILQ_INIT(rlist);
457
458 if ((nsegs < size >> PAGE_SHIFT) || (alignment != PAGE_SIZE) ||
459 (boundary != 0))
460 res = uvm_pglistalloc_contig(num, low, high, alignment,
461 boundary, rlist);
462 else
463 res = uvm_pglistalloc_simple(num, low, high, rlist, waitok);
464
465 return (res);
466 }
467
468 /*
469 * uvm_pglistfree: free a list of pages
470 *
471 * => pages should already be unmapped
472 */
473
474 void
475 uvm_pglistfree(struct pglist *list)
476 {
477 struct uvm_cpu *ucpu;
478 struct vm_page *pg;
479 int index, color, queue;
480 bool iszero;
481
482 /*
483 * Lock the free list and free each page.
484 */
485
486 mutex_spin_enter(&uvm_fpageqlock);
487 ucpu = curcpu()->ci_data.cpu_uvm;
488 while ((pg = TAILQ_FIRST(list)) != NULL) {
489 KASSERT(!uvmpdpol_pageisqueued_p(pg));
490 TAILQ_REMOVE(list, pg, pageq.queue);
491 iszero = (pg->flags & PG_ZERO);
492 pg->pqflags = PQ_FREE;
493 #ifdef DEBUG
494 pg->uobject = (void *)0xdeadbeef;
495 pg->uanon = (void *)0xdeadbeef;
496 #endif /* DEBUG */
497 #ifdef DEBUG
498 if (iszero)
499 uvm_pagezerocheck(pg);
500 #endif /* DEBUG */
501 index = uvm_page_lookup_freelist(pg);
502 color = VM_PGCOLOR_BUCKET(pg);
503 queue = iszero ? PGFL_ZEROS : PGFL_UNKNOWN;
504 pg->offset = (uintptr_t)ucpu;
505 LIST_INSERT_HEAD(&uvm.page_free[index].pgfl_buckets[color].
506 pgfl_queues[queue], pg, pageq.list);
507 LIST_INSERT_HEAD(&ucpu->page_free[index].pgfl_buckets[color].
508 pgfl_queues[queue], pg, listq.list);
509 uvmexp.free++;
510 if (iszero)
511 uvmexp.zeropages++;
512 ucpu->pages[queue]++;
513 STAT_DECR(uvm_pglistalloc_npages);
514 }
515 if (ucpu->pages[PGFL_ZEROS] < ucpu->pages[PGFL_UNKNOWN])
516 ucpu->page_idle_zero = vm_page_zero_enable;
517 mutex_spin_exit(&uvm_fpageqlock);
518 }
519