uvm_pglist.c revision 1.88 1 /* $NetBSD: uvm_pglist.c,v 1.88 2021/03/26 09:35:18 chs Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 2019 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, and by Andrew Doran.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * uvm_pglist.c: pglist functions
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: uvm_pglist.c,v 1.88 2021/03/26 09:35:18 chs Exp $");
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/cpu.h>
43
44 #include <uvm/uvm.h>
45 #include <uvm/uvm_pdpolicy.h>
46 #include <uvm/uvm_pgflcache.h>
47
48 #ifdef VM_PAGE_ALLOC_MEMORY_STATS
49 #define STAT_INCR(v) (v)++
50 #define STAT_DECR(v) do { \
51 if ((v) == 0) \
52 printf("%s:%d -- Already 0!\n", __FILE__, __LINE__); \
53 else \
54 (v)--; \
55 } while (/*CONSTCOND*/ 0)
56 u_long uvm_pglistalloc_npages;
57 #else
58 #define STAT_INCR(v)
59 #define STAT_DECR(v)
60 #endif
61
62 kmutex_t uvm_pglistalloc_contig_lock;
63
64 /*
65 * uvm_pglistalloc: allocate a list of pages
66 *
67 * => allocated pages are placed onto an rlist. rlist is
68 * initialized by uvm_pglistalloc.
69 * => returns 0 on success or errno on failure
70 * => implementation allocates a single segment if any constraints are
71 * imposed by call arguments.
72 * => doesn't take into account clean non-busy pages on inactive list
73 * that could be used(?)
74 * => params:
75 * size the size of the allocation, rounded to page size.
76 * low the low address of the allowed allocation range.
77 * high the high address of the allowed allocation range.
78 * alignment memory must be aligned to this power-of-two boundary.
79 * boundary no segment in the allocation may cross this
80 * power-of-two boundary (relative to zero).
81 */
82
83 static void
84 uvm_pglist_add(struct vm_page *pg, struct pglist *rlist)
85 {
86 struct pgfreelist *pgfl;
87 struct pgflbucket *pgb;
88
89 pgfl = &uvm.page_free[uvm_page_get_freelist(pg)];
90 pgb = pgfl->pgfl_buckets[uvm_page_get_bucket(pg)];
91
92 #ifdef UVMDEBUG
93 struct vm_page *tp;
94 LIST_FOREACH(tp, &pgb->pgb_colors[VM_PGCOLOR(pg)], pageq.list) {
95 if (tp == pg)
96 break;
97 }
98 if (tp == NULL)
99 panic("uvm_pglistalloc: page not on freelist");
100 #endif
101 LIST_REMOVE(pg, pageq.list);
102 pgb->pgb_nfree--;
103 CPU_COUNT(CPU_COUNT_FREEPAGES, -1);
104 pg->flags = PG_CLEAN;
105 pg->uobject = NULL;
106 pg->uanon = NULL;
107 TAILQ_INSERT_TAIL(rlist, pg, pageq.queue);
108 STAT_INCR(uvm_pglistalloc_npages);
109 }
110
111 static int
112 uvm_pglistalloc_c_ps(uvm_physseg_t psi, int num, paddr_t low, paddr_t high,
113 paddr_t alignment, paddr_t boundary, struct pglist *rlist)
114 {
115 signed int candidate, limit, candidateidx, end, idx, skip;
116 int pagemask;
117 bool second_pass;
118 #ifdef DEBUG
119 paddr_t idxpa, lastidxpa;
120 paddr_t cidx = 0; /* XXX: GCC */
121 #endif
122 #ifdef PGALLOC_VERBOSE
123 printf("pgalloc: contig %d pgs from psi %d\n", num, psi);
124 #endif
125
126 low = atop(low);
127 high = atop(high);
128 alignment = atop(alignment);
129
130 /*
131 * Make sure that physseg falls within with range to be allocated from.
132 */
133 if (high <= uvm_physseg_get_avail_start(psi) || low >= uvm_physseg_get_avail_end(psi))
134 return 0;
135
136 /*
137 * We start our search at the just after where the last allocation
138 * succeeded.
139 */
140 candidate = roundup2(uimax(low, uvm_physseg_get_avail_start(psi) +
141 uvm_physseg_get_start_hint(psi)), alignment);
142 limit = uimin(high, uvm_physseg_get_avail_end(psi));
143 pagemask = ~((boundary >> PAGE_SHIFT) - 1);
144 skip = 0;
145 second_pass = false;
146
147 for (;;) {
148 bool ok = true;
149 signed int cnt;
150
151 if (candidate + num > limit) {
152 if (uvm_physseg_get_start_hint(psi) == 0 || second_pass) {
153 /*
154 * We've run past the allowable range.
155 */
156 return 0; /* FAIL = 0 pages*/
157 }
158 /*
159 * We've wrapped around the end of this segment
160 * so restart at the beginning but now our limit
161 * is were we started.
162 */
163 second_pass = true;
164 candidate = roundup2(uimax(low, uvm_physseg_get_avail_start(psi)), alignment);
165 limit = uimin(limit, uvm_physseg_get_avail_start(psi) +
166 uvm_physseg_get_start_hint(psi));
167 skip = 0;
168 continue;
169 }
170 if (boundary != 0 &&
171 ((candidate ^ (candidate + num - 1)) & pagemask) != 0) {
172 /*
173 * Region crosses boundary. Jump to the boundary
174 * just crossed and ensure alignment.
175 */
176 candidate = (candidate + num - 1) & pagemask;
177 candidate = roundup2(candidate, alignment);
178 skip = 0;
179 continue;
180 }
181 #ifdef DEBUG
182 /*
183 * Make sure this is a managed physical page.
184 */
185
186 if (uvm_physseg_find(candidate, &cidx) != psi)
187 panic("pgalloc contig: botch1");
188 if (cidx != candidate - uvm_physseg_get_start(psi))
189 panic("pgalloc contig: botch2");
190 if (uvm_physseg_find(candidate + num - 1, &cidx) != psi)
191 panic("pgalloc contig: botch3");
192 if (cidx != candidate - uvm_physseg_get_start(psi) + num - 1)
193 panic("pgalloc contig: botch4");
194 #endif
195 candidateidx = candidate - uvm_physseg_get_start(psi);
196 end = candidateidx + num;
197
198 /*
199 * Found a suitable starting page. See if the range is free.
200 */
201 #ifdef PGALLOC_VERBOSE
202 printf("%s: psi=%d candidate=%#x end=%#x skip=%#x, align=%#"PRIxPADDR,
203 __func__, psi, candidateidx, end, skip, alignment);
204 #endif
205 /*
206 * We start at the end and work backwards since if we find a
207 * non-free page, it makes no sense to continue.
208 *
209 * But on the plus size we have "vetted" some number of free
210 * pages. If this iteration fails, we may be able to skip
211 * testing most of those pages again in the next pass.
212 */
213 for (idx = end - 1; idx >= candidateidx + skip; idx--) {
214 if (VM_PAGE_IS_FREE(uvm_physseg_get_pg(psi, idx)) == 0) {
215 ok = false;
216 break;
217 }
218
219 #ifdef DEBUG
220 if (idx > candidateidx) {
221 idxpa = VM_PAGE_TO_PHYS(uvm_physseg_get_pg(psi, idx));
222 lastidxpa = VM_PAGE_TO_PHYS(uvm_physseg_get_pg(psi, idx - 1));
223 if ((lastidxpa + PAGE_SIZE) != idxpa) {
224 /*
225 * Region not contiguous.
226 */
227 panic("pgalloc contig: botch5");
228 }
229 if (boundary != 0 &&
230 ((lastidxpa ^ idxpa) & ~(boundary - 1))
231 != 0) {
232 /*
233 * Region crosses boundary.
234 */
235 panic("pgalloc contig: botch6");
236 }
237 }
238 #endif
239 }
240
241 if (ok) {
242 while (skip-- > 0) {
243 KDASSERT(VM_PAGE_IS_FREE(uvm_physseg_get_pg(psi, candidateidx + skip)));
244 }
245 #ifdef PGALLOC_VERBOSE
246 printf(": ok\n");
247 #endif
248 break;
249 }
250
251 #ifdef PGALLOC_VERBOSE
252 printf(": non-free at %#x\n", idx - candidateidx);
253 #endif
254 /*
255 * count the number of pages we can advance
256 * since we know they aren't all free.
257 */
258 cnt = idx + 1 - candidateidx;
259 /*
260 * now round up that to the needed alignment.
261 */
262 cnt = roundup2(cnt, alignment);
263 /*
264 * The number of pages we can skip checking
265 * (might be 0 if cnt > num).
266 */
267 skip = uimax(num - cnt, 0);
268 candidate += cnt;
269 }
270
271 /*
272 * we have a chunk of memory that conforms to the requested constraints.
273 */
274 for (idx = candidateidx; idx < end; idx++)
275 uvm_pglist_add(uvm_physseg_get_pg(psi, idx), rlist);
276
277 /*
278 * the next time we need to search this segment, start after this
279 * chunk of pages we just allocated.
280 */
281 uvm_physseg_set_start_hint(psi, candidate + num -
282 uvm_physseg_get_avail_start(psi));
283 KASSERTMSG(uvm_physseg_get_start_hint(psi) <=
284 uvm_physseg_get_avail_end(psi) - uvm_physseg_get_avail_start(psi),
285 "%x %u (%#x) <= %#"PRIxPADDR" - %#"PRIxPADDR" (%#"PRIxPADDR")",
286 candidate + num,
287 uvm_physseg_get_start_hint(psi), uvm_physseg_get_start_hint(psi),
288 uvm_physseg_get_avail_end(psi), uvm_physseg_get_avail_start(psi),
289 uvm_physseg_get_avail_end(psi) - uvm_physseg_get_avail_start(psi));
290
291 #ifdef PGALLOC_VERBOSE
292 printf("got %d pgs\n", num);
293 #endif
294 return num; /* number of pages allocated */
295 }
296
297 static int
298 uvm_pglistalloc_contig_aggressive(int num, paddr_t low, paddr_t high,
299 paddr_t alignment, paddr_t boundary, struct pglist *rlist)
300 {
301 struct vm_page *pg;
302 struct pglist tmp;
303 paddr_t pa, off, spa, amask, bmask, rlo, rhi;
304 uvm_physseg_t upm;
305 int error, i, run, acnt;
306
307 /*
308 * Allocate pages the normal way and for each new page, check if
309 * the page completes a range satisfying the request.
310 * The pagedaemon will evict pages as we go and we are very likely
311 * to get compatible pages eventually.
312 */
313
314 error = ENOMEM;
315 TAILQ_INIT(&tmp);
316 acnt = atop(alignment);
317 amask = ~(alignment - 1);
318 bmask = ~(boundary - 1);
319 KASSERT(bmask <= amask);
320 mutex_enter(&uvm_pglistalloc_contig_lock);
321 while (uvm_reclaimable()) {
322 pg = uvm_pagealloc(NULL, 0, NULL, 0);
323 if (pg == NULL) {
324 uvm_wait("pglac2");
325 continue;
326 }
327 pg->flags |= PG_PGLCA;
328 TAILQ_INSERT_HEAD(&tmp, pg, pageq.queue);
329
330 pa = VM_PAGE_TO_PHYS(pg);
331 if (pa < low || pa >= high) {
332 continue;
333 }
334
335 upm = uvm_physseg_find(atop(pa), &off);
336 KASSERT(uvm_physseg_valid_p(upm));
337
338 spa = pa & amask;
339
340 /*
341 * Look backward for at most num - 1 pages, back to
342 * the highest of:
343 * - the first page in the physseg
344 * - the specified low address
345 * - num-1 pages before the one we just allocated
346 * - the start of the boundary range containing pa
347 * all rounded up to alignment.
348 */
349
350 rlo = roundup2(ptoa(uvm_physseg_get_avail_start(upm)), alignment);
351 rlo = MAX(rlo, roundup2(low, alignment));
352 rlo = MAX(rlo, roundup2(pa - ptoa(num - 1), alignment));
353 if (boundary) {
354 rlo = MAX(rlo, spa & bmask);
355 }
356
357 /*
358 * Look forward as far as the lowest of:
359 * - the last page of the physseg
360 * - the specified high address
361 * - the boundary after pa
362 */
363
364 rhi = ptoa(uvm_physseg_get_avail_end(upm));
365 rhi = MIN(rhi, high);
366 if (boundary) {
367 rhi = MIN(rhi, rounddown2(pa, boundary) + boundary);
368 }
369
370 /*
371 * Make sure our range to consider is big enough.
372 */
373
374 if (rhi - rlo < ptoa(num)) {
375 continue;
376 }
377
378 run = 0;
379 while (spa > rlo) {
380
381 /*
382 * Examine pages before spa in groups of acnt.
383 * If all the pages in a group are marked then add
384 * these pages to the run.
385 */
386
387 for (i = 0; i < acnt; i++) {
388 pg = PHYS_TO_VM_PAGE(spa - alignment + ptoa(i));
389 if ((pg->flags & PG_PGLCA) == 0) {
390 break;
391 }
392 }
393 if (i < acnt) {
394 break;
395 }
396 spa -= alignment;
397 run += acnt;
398 }
399
400 /*
401 * Look forward for any remaining pages.
402 */
403
404 if (spa + ptoa(num) > rhi) {
405 continue;
406 }
407 for (; run < num; run++) {
408 pg = PHYS_TO_VM_PAGE(spa + ptoa(run));
409 if ((pg->flags & PG_PGLCA) == 0) {
410 break;
411 }
412 }
413 if (run < num) {
414 continue;
415 }
416
417 /*
418 * We found a match. Move these pages from the tmp list to
419 * the caller's list.
420 */
421
422 for (i = 0; i < num; i++) {
423 pg = PHYS_TO_VM_PAGE(spa + ptoa(i));
424 TAILQ_REMOVE(&tmp, pg, pageq.queue);
425 pg->flags &= ~PG_PGLCA;
426 TAILQ_INSERT_TAIL(rlist, pg, pageq.queue);
427 STAT_INCR(uvm_pglistalloc_npages);
428 }
429
430 error = 0;
431 break;
432 }
433
434 /*
435 * Free all the pages that we didn't need.
436 */
437
438 while (!TAILQ_EMPTY(&tmp)) {
439 pg = TAILQ_FIRST(&tmp);
440 TAILQ_REMOVE(&tmp, pg, pageq.queue);
441 pg->flags &= ~PG_PGLCA;
442 uvm_pagefree(pg);
443 }
444 mutex_exit(&uvm_pglistalloc_contig_lock);
445 return error;
446 }
447
448 static int
449 uvm_pglistalloc_contig(int num, paddr_t low, paddr_t high, paddr_t alignment,
450 paddr_t boundary, struct pglist *rlist, int waitok)
451 {
452 int fl;
453 int error;
454 uvm_physseg_t psi;
455
456 /* Default to "lose". */
457 error = ENOMEM;
458
459 /*
460 * Block all memory allocation and lock the free list.
461 */
462 uvm_pgfl_lock();
463
464 /* Are there even any free pages? */
465 if (uvm_availmem(false) <=
466 (uvmexp.reserve_pagedaemon + uvmexp.reserve_kernel))
467 goto out;
468
469 for (fl = 0; fl < VM_NFREELIST; fl++) {
470 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
471 for (psi = uvm_physseg_get_last(); uvm_physseg_valid_p(psi); psi = uvm_physseg_get_prev(psi))
472 #else
473 for (psi = uvm_physseg_get_first(); uvm_physseg_valid_p(psi); psi = uvm_physseg_get_next(psi))
474 #endif
475 {
476 if (uvm_physseg_get_free_list(psi) != fl)
477 continue;
478
479 num -= uvm_pglistalloc_c_ps(psi, num, low, high,
480 alignment, boundary, rlist);
481 if (num == 0) {
482 #ifdef PGALLOC_VERBOSE
483 printf("pgalloc: %"PRIxMAX"-%"PRIxMAX"\n",
484 (uintmax_t) VM_PAGE_TO_PHYS(TAILQ_FIRST(rlist)),
485 (uintmax_t) VM_PAGE_TO_PHYS(TAILQ_LAST(rlist, pglist)));
486 #endif
487 error = 0;
488 goto out;
489 }
490 }
491 }
492
493 out:
494 uvm_pgfl_unlock();
495
496 /*
497 * If that didn't work, try the more aggressive approach.
498 */
499
500 if (error) {
501 if (waitok) {
502 error = uvm_pglistalloc_contig_aggressive(num, low, high,
503 alignment, boundary, rlist);
504 } else {
505 uvm_pglistfree(rlist);
506 uvm_kick_pdaemon();
507 }
508 }
509 return error;
510 }
511
512 static int
513 uvm_pglistalloc_s_ps(uvm_physseg_t psi, int num, paddr_t low, paddr_t high,
514 struct pglist *rlist)
515 {
516 int todo, limit, candidate;
517 struct vm_page *pg;
518 bool second_pass;
519 #ifdef PGALLOC_VERBOSE
520 printf("pgalloc: simple %d pgs from psi %d\n", num, psi);
521 #endif
522
523 KASSERT(uvm_physseg_get_start(psi) <= uvm_physseg_get_avail_start(psi));
524 KASSERT(uvm_physseg_get_start(psi) <= uvm_physseg_get_avail_end(psi));
525 KASSERT(uvm_physseg_get_avail_start(psi) <= uvm_physseg_get_end(psi));
526 KASSERT(uvm_physseg_get_avail_end(psi) <= uvm_physseg_get_end(psi));
527
528 low = atop(low);
529 high = atop(high);
530 todo = num;
531 candidate = uimax(low, uvm_physseg_get_avail_start(psi) +
532 uvm_physseg_get_start_hint(psi));
533 limit = uimin(high, uvm_physseg_get_avail_end(psi));
534 pg = uvm_physseg_get_pg(psi, candidate - uvm_physseg_get_start(psi));
535 second_pass = false;
536
537 /*
538 * Make sure that physseg falls within with range to be allocated from.
539 */
540 if (high <= uvm_physseg_get_avail_start(psi) ||
541 low >= uvm_physseg_get_avail_end(psi))
542 return 0;
543
544 again:
545 for (;; candidate++, pg++) {
546 if (candidate >= limit) {
547 if (uvm_physseg_get_start_hint(psi) == 0 || second_pass) {
548 candidate = limit - 1;
549 break;
550 }
551 second_pass = true;
552 candidate = uimax(low, uvm_physseg_get_avail_start(psi));
553 limit = uimin(limit, uvm_physseg_get_avail_start(psi) +
554 uvm_physseg_get_start_hint(psi));
555 pg = uvm_physseg_get_pg(psi, candidate - uvm_physseg_get_start(psi));
556 goto again;
557 }
558 #if defined(DEBUG)
559 {
560 paddr_t cidx = 0;
561 const uvm_physseg_t bank = uvm_physseg_find(candidate, &cidx);
562 KDASSERTMSG(bank == psi,
563 "uvm_physseg_find(%#x) (%"PRIxPHYSSEG ") != psi %"PRIxPHYSSEG,
564 candidate, bank, psi);
565 KDASSERTMSG(cidx == candidate - uvm_physseg_get_start(psi),
566 "uvm_physseg_find(%#x): %#"PRIxPADDR" != off %"PRIxPADDR,
567 candidate, cidx, candidate - uvm_physseg_get_start(psi));
568 }
569 #endif
570 if (VM_PAGE_IS_FREE(pg) == 0)
571 continue;
572
573 uvm_pglist_add(pg, rlist);
574 if (--todo == 0) {
575 break;
576 }
577 }
578
579 /*
580 * The next time we need to search this segment,
581 * start just after the pages we just allocated.
582 */
583 uvm_physseg_set_start_hint(psi, candidate + 1 - uvm_physseg_get_avail_start(psi));
584 KASSERTMSG(uvm_physseg_get_start_hint(psi) <= uvm_physseg_get_avail_end(psi) -
585 uvm_physseg_get_avail_start(psi),
586 "%#x %u (%#x) <= %#"PRIxPADDR" - %#"PRIxPADDR" (%#"PRIxPADDR")",
587 candidate + 1,
588 uvm_physseg_get_start_hint(psi),
589 uvm_physseg_get_start_hint(psi),
590 uvm_physseg_get_avail_end(psi),
591 uvm_physseg_get_avail_start(psi),
592 uvm_physseg_get_avail_end(psi) - uvm_physseg_get_avail_start(psi));
593
594 #ifdef PGALLOC_VERBOSE
595 printf("got %d pgs\n", num - todo);
596 #endif
597 return (num - todo); /* number of pages allocated */
598 }
599
600 static int
601 uvm_pglistalloc_simple(int num, paddr_t low, paddr_t high,
602 struct pglist *rlist, int waitok)
603 {
604 int fl, error;
605 uvm_physseg_t psi;
606 int count = 0;
607
608 /* Default to "lose". */
609 error = ENOMEM;
610
611 again:
612 /*
613 * Block all memory allocation and lock the free list.
614 */
615 uvm_pgfl_lock();
616 count++;
617
618 /* Are there even any free pages? */
619 if (uvm_availmem(false) <=
620 (uvmexp.reserve_pagedaemon + uvmexp.reserve_kernel))
621 goto out;
622
623 for (fl = 0; fl < VM_NFREELIST; fl++) {
624 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
625 for (psi = uvm_physseg_get_last(); uvm_physseg_valid_p(psi); psi = uvm_physseg_get_prev(psi))
626 #else
627 for (psi = uvm_physseg_get_first(); uvm_physseg_valid_p(psi); psi = uvm_physseg_get_next(psi))
628 #endif
629 {
630 if (uvm_physseg_get_free_list(psi) != fl)
631 continue;
632
633 num -= uvm_pglistalloc_s_ps(psi, num, low, high, rlist);
634 if (num == 0) {
635 error = 0;
636 goto out;
637 }
638 }
639
640 }
641
642 out:
643 /*
644 * check to see if we need to generate some free pages waking
645 * the pagedaemon.
646 */
647
648 uvm_pgfl_unlock();
649 uvm_kick_pdaemon();
650
651 if (error) {
652 if (waitok) {
653 uvm_wait("pglalloc");
654 goto again;
655 } else
656 uvm_pglistfree(rlist);
657 }
658 #ifdef PGALLOC_VERBOSE
659 if (!error)
660 printf("pgalloc: %"PRIxMAX"..%"PRIxMAX"\n",
661 (uintmax_t) VM_PAGE_TO_PHYS(TAILQ_FIRST(rlist)),
662 (uintmax_t) VM_PAGE_TO_PHYS(TAILQ_LAST(rlist, pglist)));
663 #endif
664 return (error);
665 }
666
667 int
668 uvm_pglistalloc(psize_t size, paddr_t low, paddr_t high, paddr_t alignment,
669 paddr_t boundary, struct pglist *rlist, int nsegs, int waitok)
670 {
671 int num, res;
672
673 KASSERT(!cpu_intr_p());
674 KASSERT(!cpu_softintr_p());
675 KASSERT((alignment & (alignment - 1)) == 0);
676 KASSERT((boundary & (boundary - 1)) == 0);
677
678 /*
679 * Our allocations are always page granularity, so our alignment
680 * must be, too.
681 */
682 if (alignment < PAGE_SIZE)
683 alignment = PAGE_SIZE;
684 if (boundary != 0 && boundary < size)
685 return (EINVAL);
686 num = atop(round_page(size));
687 low = roundup2(low, alignment);
688
689 TAILQ_INIT(rlist);
690
691 /*
692 * Turn off the caching of free pages - we need everything to be on
693 * the global freelists.
694 */
695 uvm_pgflcache_pause();
696
697 if (nsegs < num || alignment != PAGE_SIZE || boundary != 0)
698 res = uvm_pglistalloc_contig(num, low, high, alignment,
699 boundary, rlist, waitok);
700 else
701 res = uvm_pglistalloc_simple(num, low, high, rlist, waitok);
702
703 uvm_pgflcache_resume();
704
705 return (res);
706 }
707
708 /*
709 * uvm_pglistfree: free a list of pages
710 *
711 * => pages should already be unmapped
712 */
713
714 void
715 uvm_pglistfree(struct pglist *list)
716 {
717 struct vm_page *pg;
718
719 KASSERT(!cpu_intr_p());
720 KASSERT(!cpu_softintr_p());
721
722 while ((pg = TAILQ_FIRST(list)) != NULL) {
723 TAILQ_REMOVE(list, pg, pageq.queue);
724 uvm_pagefree(pg);
725 STAT_DECR(uvm_pglistalloc_npages);
726 }
727 }
728
729 void
730 uvm_pglistalloc_init(void)
731 {
732
733 mutex_init(&uvm_pglistalloc_contig_lock, MUTEX_DEFAULT, IPL_NONE);
734 }
735