uvm_pglist.c revision 1.52 1 /* $NetBSD: uvm_pglist.c,v 1.52 2011/01/18 21:43:29 matt Exp $ */
2
3 /*-
4 * Copyright (c) 1997 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * uvm_pglist.c: pglist functions
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: uvm_pglist.c,v 1.52 2011/01/18 21:43:29 matt Exp $");
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/malloc.h>
43 #include <sys/proc.h>
44
45 #include <uvm/uvm.h>
46 #include <uvm/uvm_pdpolicy.h>
47
48 #ifdef VM_PAGE_ALLOC_MEMORY_STATS
49 #define STAT_INCR(v) (v)++
50 #define STAT_DECR(v) do { \
51 if ((v) == 0) \
52 printf("%s:%d -- Already 0!\n", __FILE__, __LINE__); \
53 else \
54 (v)--; \
55 } while (/*CONSTCOND*/ 0)
56 u_long uvm_pglistalloc_npages;
57 #else
58 #define STAT_INCR(v)
59 #define STAT_DECR(v)
60 #endif
61
62 /*
63 * uvm_pglistalloc: allocate a list of pages
64 *
65 * => allocated pages are placed onto an rlist. rlist is
66 * initialized by uvm_pglistalloc.
67 * => returns 0 on success or errno on failure
68 * => implementation allocates a single segment if any constraints are
69 * imposed by call arguments.
70 * => doesn't take into account clean non-busy pages on inactive list
71 * that could be used(?)
72 * => params:
73 * size the size of the allocation, rounded to page size.
74 * low the low address of the allowed allocation range.
75 * high the high address of the allowed allocation range.
76 * alignment memory must be aligned to this power-of-two boundary.
77 * boundary no segment in the allocation may cross this
78 * power-of-two boundary (relative to zero).
79 */
80
81 static void
82 uvm_pglist_add(struct vm_page *pg, struct pglist *rlist)
83 {
84 int free_list, color, pgflidx;
85
86 KASSERT(mutex_owned(&uvm_fpageqlock));
87
88 #if PGFL_NQUEUES != 2
89 #error uvm_pglistalloc needs to be updated
90 #endif
91
92 free_list = uvm_page_lookup_freelist(pg);
93 color = VM_PGCOLOR_BUCKET(pg);
94 pgflidx = (pg->flags & PG_ZERO) ? PGFL_ZEROS : PGFL_UNKNOWN;
95 #ifdef NOT_DEBUG
96 struct vm_page *tp;
97 LIST_FOREACH(tp,
98 &uvm.page_free[free_list].pgfl_buckets[color].pgfl_queues[pgflidx],
99 pageq.list) {
100 if (tp == pg)
101 break;
102 }
103 if (tp == NULL)
104 panic("uvm_pglistalloc: page not on freelist");
105 #endif
106 LIST_REMOVE(pg, pageq.list); /* global */
107 LIST_REMOVE(pg, listq.list); /* cpu */
108 uvmexp.free--;
109 if (pg->flags & PG_ZERO)
110 uvmexp.zeropages--;
111 VM_FREE_PAGE_TO_CPU(pg)->pages[pgflidx]--;
112 pg->flags = PG_CLEAN;
113 pg->pqflags = 0;
114 pg->uobject = NULL;
115 pg->uanon = NULL;
116 TAILQ_INSERT_TAIL(rlist, pg, pageq.queue);
117 STAT_INCR(uvm_pglistalloc_npages);
118 }
119
120 static int
121 uvm_pglistalloc_c_ps(struct vm_physseg *ps, int num, paddr_t low, paddr_t high,
122 paddr_t alignment, paddr_t boundary, struct pglist *rlist)
123 {
124 signed int try, limit, tryidx, end, idx, skip;
125 struct vm_page *pgs;
126 int pagemask;
127 bool second_pass;
128 #ifdef DEBUG
129 paddr_t idxpa, lastidxpa;
130 int cidx = 0; /* XXX: GCC */
131 #endif
132 #ifdef PGALLOC_VERBOSE
133 printf("pgalloc: contig %d pgs from psi %ld\n", num,
134 (long)(ps - vm_physmem));
135 #endif
136
137 KASSERT(mutex_owned(&uvm_fpageqlock));
138
139 low = atop(low);
140 high = atop(high);
141 alignment = atop(alignment);
142
143 /*
144 * We start our search at the just after where the last allocation
145 * succeeded.
146 */
147 try = roundup2(max(low, ps->avail_start + ps->start_hint), alignment);
148 limit = min(high, ps->avail_end);
149 pagemask = ~((boundary >> PAGE_SHIFT) - 1);
150 skip = 0;
151 second_pass = false;
152 pgs = ps->pgs;
153
154 for (;;) {
155 bool ok = true;
156 signed int cnt;
157
158 if (try + num > limit) {
159 if (ps->start_hint == 0 || second_pass) {
160 /*
161 * We've run past the allowable range.
162 */
163 return 0; /* FAIL = 0 pages*/
164 }
165 /*
166 * We've wrapped around the end of this segment
167 * so restart at the beginning but now our limit
168 * is were we started.
169 */
170 second_pass = true;
171 try = roundup2(max(low, ps->avail_start), alignment);
172 limit = min(high, ps->avail_start + ps->start_hint);
173 skip = 0;
174 continue;
175 }
176 if (boundary != 0 &&
177 ((try ^ (try + num - 1)) & pagemask) != 0) {
178 /*
179 * Region crosses boundary. Jump to the boundary
180 * just crossed and ensure alignment.
181 */
182 try = (try + num - 1) & pagemask;
183 try = roundup2(try, alignment);
184 skip = 0;
185 continue;
186 }
187 #ifdef DEBUG
188 /*
189 * Make sure this is a managed physical page.
190 */
191
192 if (vm_physseg_find(try, &cidx) != ps - vm_physmem)
193 panic("pgalloc contig: botch1");
194 if (cidx != try - ps->start)
195 panic("pgalloc contig: botch2");
196 if (vm_physseg_find(try + num - 1, &cidx) != ps - vm_physmem)
197 panic("pgalloc contig: botch3");
198 if (cidx != try - ps->start + num - 1)
199 panic("pgalloc contig: botch4");
200 #endif
201 tryidx = try - ps->start;
202 end = tryidx + num;
203
204 /*
205 * Found a suitable starting page. See if the range is free.
206 */
207 #ifdef PGALLOC_VERBOSE
208 printf("%s: ps=%p try=%#x end=%#x skip=%#x, align=%#x",
209 __func__, ps, tryidx, end, skip, alignment);
210 #endif
211 /*
212 * We start at the end and work backwards since if we find a
213 * non-free page, it makes no sense to continue.
214 *
215 * But on the plus size we have "vetted" some number of free
216 * pages. If this iteration fails, we may be able to skip
217 * testing most of those pages again in the next pass.
218 */
219 for (idx = end - 1; idx >= tryidx + skip; idx--) {
220 if (VM_PAGE_IS_FREE(&pgs[idx]) == 0) {
221 ok = false;
222 break;
223 }
224
225 #ifdef DEBUG
226 if (idx > tryidx) {
227 idxpa = VM_PAGE_TO_PHYS(&pgs[idx]);
228 lastidxpa = VM_PAGE_TO_PHYS(&pgs[idx - 1]);
229 if ((lastidxpa + PAGE_SIZE) != idxpa) {
230 /*
231 * Region not contiguous.
232 */
233 panic("pgalloc contig: botch5");
234 }
235 if (boundary != 0 &&
236 ((lastidxpa ^ idxpa) & ~(boundary - 1))
237 != 0) {
238 /*
239 * Region crosses boundary.
240 */
241 panic("pgalloc contig: botch6");
242 }
243 }
244 #endif
245 }
246
247 if (ok) {
248 while (skip-- > 0) {
249 KDASSERT(VM_PAGE_IS_FREE(&pgs[tryidx + skip]));
250 }
251 #ifdef PGALLOC_VERBOSE
252 printf(": ok\n");
253 #endif
254 break;
255 }
256
257 #ifdef PGALLOC_VERBOSE
258 printf(": non-free at %#x\n", idx - tryidx);
259 #endif
260 /*
261 * count the number of pages we can advance
262 * since we know they aren't all free.
263 */
264 cnt = idx + 1 - tryidx;
265 /*
266 * now round up that to the needed alignment.
267 */
268 cnt = roundup2(cnt, alignment);
269 /*
270 * The number of pages we can skip checking
271 * (might be 0 if cnt > num).
272 */
273 skip = max(num - cnt, 0);
274 try += cnt;
275 }
276
277 /*
278 * we have a chunk of memory that conforms to the requested constraints.
279 */
280 for (idx = tryidx, pgs += idx; idx < end; idx++, pgs++)
281 uvm_pglist_add(pgs, rlist);
282
283 /*
284 * the next time we need to search this segment, start after this
285 * chunk of pages we just allocated.
286 */
287 ps->start_hint = tryidx + num;
288
289 #ifdef PGALLOC_VERBOSE
290 printf("got %d pgs\n", num);
291 #endif
292 return num; /* number of pages allocated */
293 }
294
295 static int
296 uvm_pglistalloc_contig(int num, paddr_t low, paddr_t high, paddr_t alignment,
297 paddr_t boundary, struct pglist *rlist)
298 {
299 int fl, psi;
300 struct vm_physseg *ps;
301 int error;
302
303 /* Default to "lose". */
304 error = ENOMEM;
305
306 /*
307 * Block all memory allocation and lock the free list.
308 */
309 mutex_spin_enter(&uvm_fpageqlock);
310
311 /* Are there even any free pages? */
312 if (uvmexp.free <= (uvmexp.reserve_pagedaemon + uvmexp.reserve_kernel))
313 goto out;
314
315 for (fl = 0; fl < VM_NFREELIST; fl++) {
316 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
317 for (psi = vm_nphysseg - 1 ; psi >= 0 ; psi--)
318 #else
319 for (psi = 0 ; psi < vm_nphysseg ; psi++)
320 #endif
321 {
322 ps = &vm_physmem[psi];
323
324 if (ps->free_list != fl)
325 continue;
326
327 num -= uvm_pglistalloc_c_ps(ps, num, low, high,
328 alignment, boundary, rlist);
329 if (num == 0) {
330 #ifdef PGALLOC_VERBOSE
331 printf("pgalloc: %"PRIxMAX"-%"PRIxMAX"\n",
332 (uintmax_t) VM_PAGE_TO_PHYS(TAILQ_FIRST(rlist)),
333 (uintmax_t) VM_PAGE_TO_PHYS(TAILQ_LAST(rlist, pglist)));
334 #endif
335 error = 0;
336 goto out;
337 }
338 }
339 }
340
341 out:
342 /*
343 * check to see if we need to generate some free pages waking
344 * the pagedaemon.
345 */
346
347 uvm_kick_pdaemon();
348 mutex_spin_exit(&uvm_fpageqlock);
349 return (error);
350 }
351
352 static int
353 uvm_pglistalloc_s_ps(struct vm_physseg *ps, int num, paddr_t low, paddr_t high,
354 struct pglist *rlist)
355 {
356 int todo, limit, try;
357 struct vm_page *pg;
358 bool second_pass;
359 #ifdef DEBUG
360 int cidx = 0; /* XXX: GCC */
361 #endif
362 #ifdef PGALLOC_VERBOSE
363 printf("pgalloc: simple %d pgs from psi %ld\n", num,
364 (long)(ps - vm_physmem));
365 #endif
366
367 KASSERT(mutex_owned(&uvm_fpageqlock));
368
369 low = atop(low);
370 high = atop(high);
371 todo = num;
372 try = max(low, ps->avail_start + ps->start_hint);
373 limit = min(high, ps->avail_end);
374 pg = &ps->pgs[try - ps->start];
375 second_pass = false;
376
377 for (;; try++, pg++) {
378 if (try >= limit) {
379 if (ps->start_hint == 0 || second_pass)
380 break;
381 second_pass = true;
382 try = max(low, ps->avail_start);
383 limit = min(high, ps->avail_start + ps->start_hint);
384 pg = &ps->pgs[try - ps->start];
385 continue;
386 }
387 #ifdef DEBUG
388 if (vm_physseg_find(try, &cidx) != ps - vm_physmem)
389 panic("pgalloc simple: botch1");
390 if (cidx != (try - ps->start))
391 panic("pgalloc simple: botch2");
392 #endif
393 if (VM_PAGE_IS_FREE(pg) == 0)
394 continue;
395
396 uvm_pglist_add(pg, rlist);
397 if (--todo == 0) {
398 break;
399 }
400 }
401
402 /*
403 * The next time we need to search this segment,
404 * start just after the pages we just allocated.
405 */
406 ps->start_hint = try + 1 - ps->start;
407
408 #ifdef PGALLOC_VERBOSE
409 printf("got %d pgs\n", num - todo);
410 #endif
411 return (num - todo); /* number of pages allocated */
412 }
413
414 static int
415 uvm_pglistalloc_simple(int num, paddr_t low, paddr_t high,
416 struct pglist *rlist, int waitok)
417 {
418 int fl, psi, error;
419 struct vm_physseg *ps;
420
421 /* Default to "lose". */
422 error = ENOMEM;
423
424 again:
425 /*
426 * Block all memory allocation and lock the free list.
427 */
428 mutex_spin_enter(&uvm_fpageqlock);
429
430 /* Are there even any free pages? */
431 if (uvmexp.free <= (uvmexp.reserve_pagedaemon + uvmexp.reserve_kernel))
432 goto out;
433
434 for (fl = 0; fl < VM_NFREELIST; fl++) {
435 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
436 for (psi = vm_nphysseg - 1 ; psi >= 0 ; psi--)
437 #else
438 for (psi = 0 ; psi < vm_nphysseg ; psi++)
439 #endif
440 {
441 ps = &vm_physmem[psi];
442
443 if (ps->free_list != fl)
444 continue;
445
446 num -= uvm_pglistalloc_s_ps(ps, num, low, high, rlist);
447 if (num == 0) {
448 error = 0;
449 goto out;
450 }
451 }
452
453 }
454
455 out:
456 /*
457 * check to see if we need to generate some free pages waking
458 * the pagedaemon.
459 */
460
461 uvm_kick_pdaemon();
462 mutex_spin_exit(&uvm_fpageqlock);
463
464 if (error) {
465 if (waitok) {
466 /* XXX perhaps some time limitation? */
467 #ifdef DEBUG
468 printf("pglistalloc waiting\n");
469 #endif
470 uvm_wait("pglalloc");
471 goto again;
472 } else
473 uvm_pglistfree(rlist);
474 }
475 #ifdef PGALLOC_VERBOSE
476 if (!error)
477 printf("pgalloc: %"PRIxMAX"..%"PRIxMAX"\n",
478 (uintmax_t) VM_PAGE_TO_PHYS(TAILQ_FIRST(rlist)),
479 (uintmax_t) VM_PAGE_TO_PHYS(TAILQ_LAST(rlist, pglist)));
480 #endif
481 return (error);
482 }
483
484 int
485 uvm_pglistalloc(psize_t size, paddr_t low, paddr_t high, paddr_t alignment,
486 paddr_t boundary, struct pglist *rlist, int nsegs, int waitok)
487 {
488 int num, res;
489
490 KASSERT((alignment & (alignment - 1)) == 0);
491 KASSERT((boundary & (boundary - 1)) == 0);
492
493 /*
494 * Our allocations are always page granularity, so our alignment
495 * must be, too.
496 */
497 if (alignment < PAGE_SIZE)
498 alignment = PAGE_SIZE;
499 if (boundary != 0 && boundary < size)
500 return (EINVAL);
501 num = atop(round_page(size));
502 low = roundup2(low, alignment);
503
504 TAILQ_INIT(rlist);
505
506 if ((nsegs < size >> PAGE_SHIFT) || (alignment != PAGE_SIZE) ||
507 (boundary != 0))
508 res = uvm_pglistalloc_contig(num, low, high, alignment,
509 boundary, rlist);
510 else
511 res = uvm_pglistalloc_simple(num, low, high, rlist, waitok);
512
513 return (res);
514 }
515
516 /*
517 * uvm_pglistfree: free a list of pages
518 *
519 * => pages should already be unmapped
520 */
521
522 void
523 uvm_pglistfree(struct pglist *list)
524 {
525 struct uvm_cpu *ucpu;
526 struct vm_page *pg;
527 int index, color, queue;
528 bool iszero;
529
530 /*
531 * Lock the free list and free each page.
532 */
533
534 mutex_spin_enter(&uvm_fpageqlock);
535 ucpu = curcpu()->ci_data.cpu_uvm;
536 while ((pg = TAILQ_FIRST(list)) != NULL) {
537 KASSERT(!uvmpdpol_pageisqueued_p(pg));
538 TAILQ_REMOVE(list, pg, pageq.queue);
539 iszero = (pg->flags & PG_ZERO);
540 pg->pqflags = PQ_FREE;
541 #ifdef DEBUG
542 pg->uobject = (void *)0xdeadbeef;
543 pg->uanon = (void *)0xdeadbeef;
544 #endif /* DEBUG */
545 #ifdef DEBUG
546 if (iszero)
547 uvm_pagezerocheck(pg);
548 #endif /* DEBUG */
549 index = uvm_page_lookup_freelist(pg);
550 color = VM_PGCOLOR_BUCKET(pg);
551 queue = iszero ? PGFL_ZEROS : PGFL_UNKNOWN;
552 pg->offset = (uintptr_t)ucpu;
553 LIST_INSERT_HEAD(&uvm.page_free[index].pgfl_buckets[color].
554 pgfl_queues[queue], pg, pageq.list);
555 LIST_INSERT_HEAD(&ucpu->page_free[index].pgfl_buckets[color].
556 pgfl_queues[queue], pg, listq.list);
557 uvmexp.free++;
558 if (iszero)
559 uvmexp.zeropages++;
560 ucpu->pages[queue]++;
561 STAT_DECR(uvm_pglistalloc_npages);
562 }
563 if (ucpu->pages[PGFL_ZEROS] < ucpu->pages[PGFL_UNKNOWN])
564 ucpu->page_idle_zero = vm_page_zero_enable;
565 mutex_spin_exit(&uvm_fpageqlock);
566 }
567