uvm_pglist.c revision 1.78 1 /* $NetBSD: uvm_pglist.c,v 1.78 2019/12/27 12:51:57 ad Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 2019 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, and by Andrew Doran.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * uvm_pglist.c: pglist functions
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: uvm_pglist.c,v 1.78 2019/12/27 12:51:57 ad Exp $");
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42
43 #include <uvm/uvm.h>
44 #include <uvm/uvm_pdpolicy.h>
45 #include <uvm/uvm_pgflcache.h>
46
47 #ifdef VM_PAGE_ALLOC_MEMORY_STATS
48 #define STAT_INCR(v) (v)++
49 #define STAT_DECR(v) do { \
50 if ((v) == 0) \
51 printf("%s:%d -- Already 0!\n", __FILE__, __LINE__); \
52 else \
53 (v)--; \
54 } while (/*CONSTCOND*/ 0)
55 u_long uvm_pglistalloc_npages;
56 #else
57 #define STAT_INCR(v)
58 #define STAT_DECR(v)
59 #endif
60
61 /*
62 * uvm_pglistalloc: allocate a list of pages
63 *
64 * => allocated pages are placed onto an rlist. rlist is
65 * initialized by uvm_pglistalloc.
66 * => returns 0 on success or errno on failure
67 * => implementation allocates a single segment if any constraints are
68 * imposed by call arguments.
69 * => doesn't take into account clean non-busy pages on inactive list
70 * that could be used(?)
71 * => params:
72 * size the size of the allocation, rounded to page size.
73 * low the low address of the allowed allocation range.
74 * high the high address of the allowed allocation range.
75 * alignment memory must be aligned to this power-of-two boundary.
76 * boundary no segment in the allocation may cross this
77 * power-of-two boundary (relative to zero).
78 */
79
80 static void
81 uvm_pglist_add(struct vm_page *pg, struct pglist *rlist)
82 {
83 struct pgfreelist *pgfl;
84 struct pgflbucket *pgb;
85
86 pgfl = &uvm.page_free[uvm_page_get_freelist(pg)];
87 pgb = pgfl->pgfl_buckets[uvm_page_get_bucket(pg)];
88
89 #ifdef UVMDEBUG
90 struct vm_page *tp;
91 LIST_FOREACH(tp, &pgb->pgb_colors[VM_PGCOLOR(pg)], pageq.list) {
92 if (tp == pg)
93 break;
94 }
95 if (tp == NULL)
96 panic("uvm_pglistalloc: page not on freelist");
97 #endif
98 LIST_REMOVE(pg, pageq.list);
99 pgb->pgb_nfree--;
100 if (pg->flags & PG_ZERO)
101 CPU_COUNT(CPU_COUNT_ZEROPAGES, -1);
102 pg->flags = PG_CLEAN;
103 pg->uobject = NULL;
104 pg->uanon = NULL;
105 TAILQ_INSERT_TAIL(rlist, pg, pageq.queue);
106 STAT_INCR(uvm_pglistalloc_npages);
107 }
108
109 static int
110 uvm_pglistalloc_c_ps(uvm_physseg_t psi, int num, paddr_t low, paddr_t high,
111 paddr_t alignment, paddr_t boundary, struct pglist *rlist)
112 {
113 signed int candidate, limit, candidateidx, end, idx, skip;
114 int pagemask;
115 bool second_pass;
116 #ifdef DEBUG
117 paddr_t idxpa, lastidxpa;
118 paddr_t cidx = 0; /* XXX: GCC */
119 #endif
120 #ifdef PGALLOC_VERBOSE
121 printf("pgalloc: contig %d pgs from psi %zd\n", num, ps - vm_physmem);
122 #endif
123
124 low = atop(low);
125 high = atop(high);
126 alignment = atop(alignment);
127
128 /*
129 * Make sure that physseg falls within with range to be allocated from.
130 */
131 if (high <= uvm_physseg_get_avail_start(psi) || low >= uvm_physseg_get_avail_end(psi))
132 return 0;
133
134 /*
135 * We start our search at the just after where the last allocation
136 * succeeded.
137 */
138 candidate = roundup2(uimax(low, uvm_physseg_get_avail_start(psi) +
139 uvm_physseg_get_start_hint(psi)), alignment);
140 limit = uimin(high, uvm_physseg_get_avail_end(psi));
141 pagemask = ~((boundary >> PAGE_SHIFT) - 1);
142 skip = 0;
143 second_pass = false;
144
145 for (;;) {
146 bool ok = true;
147 signed int cnt;
148
149 if (candidate + num > limit) {
150 if (uvm_physseg_get_start_hint(psi) == 0 || second_pass) {
151 /*
152 * We've run past the allowable range.
153 */
154 return 0; /* FAIL = 0 pages*/
155 }
156 /*
157 * We've wrapped around the end of this segment
158 * so restart at the beginning but now our limit
159 * is were we started.
160 */
161 second_pass = true;
162 candidate = roundup2(uimax(low, uvm_physseg_get_avail_start(psi)), alignment);
163 limit = uimin(limit, uvm_physseg_get_avail_start(psi) +
164 uvm_physseg_get_start_hint(psi));
165 skip = 0;
166 continue;
167 }
168 if (boundary != 0 &&
169 ((candidate ^ (candidate + num - 1)) & pagemask) != 0) {
170 /*
171 * Region crosses boundary. Jump to the boundary
172 * just crossed and ensure alignment.
173 */
174 candidate = (candidate + num - 1) & pagemask;
175 candidate = roundup2(candidate, alignment);
176 skip = 0;
177 continue;
178 }
179 #ifdef DEBUG
180 /*
181 * Make sure this is a managed physical page.
182 */
183
184 if (uvm_physseg_find(candidate, &cidx) != psi)
185 panic("pgalloc contig: botch1");
186 if (cidx != candidate - uvm_physseg_get_start(psi))
187 panic("pgalloc contig: botch2");
188 if (uvm_physseg_find(candidate + num - 1, &cidx) != psi)
189 panic("pgalloc contig: botch3");
190 if (cidx != candidate - uvm_physseg_get_start(psi) + num - 1)
191 panic("pgalloc contig: botch4");
192 #endif
193 candidateidx = candidate - uvm_physseg_get_start(psi);
194 end = candidateidx + num;
195
196 /*
197 * Found a suitable starting page. See if the range is free.
198 */
199 #ifdef PGALLOC_VERBOSE
200 printf("%s: ps=%p candidate=%#x end=%#x skip=%#x, align=%#"PRIxPADDR,
201 __func__, ps, candidateidx, end, skip, alignment);
202 #endif
203 /*
204 * We start at the end and work backwards since if we find a
205 * non-free page, it makes no sense to continue.
206 *
207 * But on the plus size we have "vetted" some number of free
208 * pages. If this iteration fails, we may be able to skip
209 * testing most of those pages again in the next pass.
210 */
211 for (idx = end - 1; idx >= candidateidx + skip; idx--) {
212 if (VM_PAGE_IS_FREE(uvm_physseg_get_pg(psi, idx)) == 0) {
213 ok = false;
214 break;
215 }
216
217 #ifdef DEBUG
218 if (idx > candidateidx) {
219 idxpa = VM_PAGE_TO_PHYS(uvm_physseg_get_pg(psi, idx));
220 lastidxpa = VM_PAGE_TO_PHYS(uvm_physseg_get_pg(psi, idx - 1));
221 if ((lastidxpa + PAGE_SIZE) != idxpa) {
222 /*
223 * Region not contiguous.
224 */
225 panic("pgalloc contig: botch5");
226 }
227 if (boundary != 0 &&
228 ((lastidxpa ^ idxpa) & ~(boundary - 1))
229 != 0) {
230 /*
231 * Region crosses boundary.
232 */
233 panic("pgalloc contig: botch6");
234 }
235 }
236 #endif
237 }
238
239 if (ok) {
240 while (skip-- > 0) {
241 KDASSERT(VM_PAGE_IS_FREE(uvm_physseg_get_pg(psi, candidateidx + skip)));
242 }
243 #ifdef PGALLOC_VERBOSE
244 printf(": ok\n");
245 #endif
246 break;
247 }
248
249 #ifdef PGALLOC_VERBOSE
250 printf(": non-free at %#x\n", idx - candidateidx);
251 #endif
252 /*
253 * count the number of pages we can advance
254 * since we know they aren't all free.
255 */
256 cnt = idx + 1 - candidateidx;
257 /*
258 * now round up that to the needed alignment.
259 */
260 cnt = roundup2(cnt, alignment);
261 /*
262 * The number of pages we can skip checking
263 * (might be 0 if cnt > num).
264 */
265 skip = uimax(num - cnt, 0);
266 candidate += cnt;
267 }
268
269 /*
270 * we have a chunk of memory that conforms to the requested constraints.
271 */
272 for (idx = candidateidx; idx < end; idx++)
273 uvm_pglist_add(uvm_physseg_get_pg(psi, idx), rlist);
274
275 /*
276 * the next time we need to search this segment, start after this
277 * chunk of pages we just allocated.
278 */
279 uvm_physseg_set_start_hint(psi, candidate + num -
280 uvm_physseg_get_avail_start(psi));
281 KASSERTMSG(uvm_physseg_get_start_hint(psi) <=
282 uvm_physseg_get_avail_end(psi) - uvm_physseg_get_avail_start(psi),
283 "%x %u (%#x) <= %#"PRIxPADDR" - %#"PRIxPADDR" (%#"PRIxPADDR")",
284 candidate + num,
285 uvm_physseg_get_start_hint(psi), uvm_physseg_get_start_hint(psi),
286 uvm_physseg_get_avail_end(psi), uvm_physseg_get_avail_start(psi),
287 uvm_physseg_get_avail_end(psi) - uvm_physseg_get_avail_start(psi));
288
289 #ifdef PGALLOC_VERBOSE
290 printf("got %d pgs\n", num);
291 #endif
292 return num; /* number of pages allocated */
293 }
294
295 static int
296 uvm_pglistalloc_contig(int num, paddr_t low, paddr_t high, paddr_t alignment,
297 paddr_t boundary, struct pglist *rlist)
298 {
299 int fl;
300 int error;
301
302 uvm_physseg_t psi;
303 /* Default to "lose". */
304 error = ENOMEM;
305
306 /*
307 * Block all memory allocation and lock the free list.
308 */
309 uvm_pgfl_lock();
310
311 /* Are there even any free pages? */
312 if (uvm_free() <= (uvmexp.reserve_pagedaemon + uvmexp.reserve_kernel))
313 goto out;
314
315 for (fl = 0; fl < VM_NFREELIST; fl++) {
316 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
317 for (psi = uvm_physseg_get_last(); uvm_physseg_valid_p(psi); psi = uvm_physseg_get_prev(psi))
318 #else
319 for (psi = uvm_physseg_get_first(); uvm_physseg_valid_p(psi); psi = uvm_physseg_get_next(psi))
320 #endif
321 {
322 if (uvm_physseg_get_free_list(psi) != fl)
323 continue;
324
325 num -= uvm_pglistalloc_c_ps(psi, num, low, high,
326 alignment, boundary, rlist);
327 if (num == 0) {
328 #ifdef PGALLOC_VERBOSE
329 printf("pgalloc: %"PRIxMAX"-%"PRIxMAX"\n",
330 (uintmax_t) VM_PAGE_TO_PHYS(TAILQ_FIRST(rlist)),
331 (uintmax_t) VM_PAGE_TO_PHYS(TAILQ_LAST(rlist, pglist)));
332 #endif
333 error = 0;
334 goto out;
335 }
336 }
337 }
338
339 out:
340 /*
341 * check to see if we need to generate some free pages waking
342 * the pagedaemon.
343 */
344
345 uvm_pgfl_unlock();
346 uvm_kick_pdaemon();
347 return (error);
348 }
349
350 static int
351 uvm_pglistalloc_s_ps(uvm_physseg_t psi, int num, paddr_t low, paddr_t high,
352 struct pglist *rlist)
353 {
354 int todo, limit, candidate;
355 struct vm_page *pg;
356 bool second_pass;
357 #ifdef PGALLOC_VERBOSE
358 printf("pgalloc: simple %d pgs from psi %zd\n", num, psi);
359 #endif
360
361 KASSERT(uvm_physseg_get_start(psi) <= uvm_physseg_get_avail_start(psi));
362 KASSERT(uvm_physseg_get_start(psi) <= uvm_physseg_get_avail_end(psi));
363 KASSERT(uvm_physseg_get_avail_start(psi) <= uvm_physseg_get_end(psi));
364 KASSERT(uvm_physseg_get_avail_end(psi) <= uvm_physseg_get_end(psi));
365
366 low = atop(low);
367 high = atop(high);
368 todo = num;
369 candidate = uimax(low, uvm_physseg_get_avail_start(psi) +
370 uvm_physseg_get_start_hint(psi));
371 limit = uimin(high, uvm_physseg_get_avail_end(psi));
372 pg = uvm_physseg_get_pg(psi, candidate - uvm_physseg_get_start(psi));
373 second_pass = false;
374
375 /*
376 * Make sure that physseg falls within with range to be allocated from.
377 */
378 if (high <= uvm_physseg_get_avail_start(psi) ||
379 low >= uvm_physseg_get_avail_end(psi))
380 return 0;
381
382 again:
383 for (;; candidate++, pg++) {
384 if (candidate >= limit) {
385 if (uvm_physseg_get_start_hint(psi) == 0 || second_pass) {
386 candidate = limit - 1;
387 break;
388 }
389 second_pass = true;
390 candidate = uimax(low, uvm_physseg_get_avail_start(psi));
391 limit = uimin(limit, uvm_physseg_get_avail_start(psi) +
392 uvm_physseg_get_start_hint(psi));
393 pg = uvm_physseg_get_pg(psi, candidate - uvm_physseg_get_start(psi));
394 goto again;
395 }
396 #if defined(DEBUG)
397 {
398 paddr_t cidx = 0;
399 const uvm_physseg_t bank = uvm_physseg_find(candidate, &cidx);
400 KDASSERTMSG(bank == psi,
401 "uvm_physseg_find(%#x) (%"PRIxPHYSSEG ") != psi %"PRIxPHYSSEG,
402 candidate, bank, psi);
403 KDASSERTMSG(cidx == candidate - uvm_physseg_get_start(psi),
404 "uvm_physseg_find(%#x): %#"PRIxPADDR" != off %"PRIxPADDR,
405 candidate, cidx, candidate - uvm_physseg_get_start(psi));
406 }
407 #endif
408 if (VM_PAGE_IS_FREE(pg) == 0)
409 continue;
410
411 uvm_pglist_add(pg, rlist);
412 if (--todo == 0) {
413 break;
414 }
415 }
416
417 /*
418 * The next time we need to search this segment,
419 * start just after the pages we just allocated.
420 */
421 uvm_physseg_set_start_hint(psi, candidate + 1 - uvm_physseg_get_avail_start(psi));
422 KASSERTMSG(uvm_physseg_get_start_hint(psi) <= uvm_physseg_get_avail_end(psi) -
423 uvm_physseg_get_avail_start(psi),
424 "%#x %u (%#x) <= %#"PRIxPADDR" - %#"PRIxPADDR" (%#"PRIxPADDR")",
425 candidate + 1,
426 uvm_physseg_get_start_hint(psi),
427 uvm_physseg_get_start_hint(psi),
428 uvm_physseg_get_avail_end(psi),
429 uvm_physseg_get_avail_start(psi),
430 uvm_physseg_get_avail_end(psi) - uvm_physseg_get_avail_start(psi));
431
432 #ifdef PGALLOC_VERBOSE
433 printf("got %d pgs\n", num - todo);
434 #endif
435 return (num - todo); /* number of pages allocated */
436 }
437
438 static int
439 uvm_pglistalloc_simple(int num, paddr_t low, paddr_t high,
440 struct pglist *rlist, int waitok)
441 {
442 int fl, error;
443 uvm_physseg_t psi;
444 int count = 0;
445
446 /* Default to "lose". */
447 error = ENOMEM;
448
449 again:
450 /*
451 * Block all memory allocation and lock the free list.
452 */
453 uvm_pgfl_lock();
454 count++;
455
456 /* Are there even any free pages? */
457 if (uvm_free() <= (uvmexp.reserve_pagedaemon + uvmexp.reserve_kernel))
458 goto out;
459
460 for (fl = 0; fl < VM_NFREELIST; fl++) {
461 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
462 for (psi = uvm_physseg_get_last(); uvm_physseg_valid_p(psi); psi = uvm_physseg_get_prev(psi))
463 #else
464 for (psi = uvm_physseg_get_first(); uvm_physseg_valid_p(psi); psi = uvm_physseg_get_next(psi))
465 #endif
466 {
467 if (uvm_physseg_get_free_list(psi) != fl)
468 continue;
469
470 num -= uvm_pglistalloc_s_ps(psi, num, low, high, rlist);
471 if (num == 0) {
472 error = 0;
473 goto out;
474 }
475 }
476
477 }
478
479 out:
480 /*
481 * check to see if we need to generate some free pages waking
482 * the pagedaemon.
483 */
484
485 uvm_pgfl_unlock();
486 uvm_kick_pdaemon();
487
488 if (error) {
489 if (waitok) {
490 /* XXX perhaps some time limitation? */
491 #ifdef DEBUG
492 if (count == 1)
493 printf("pglistalloc waiting\n");
494 #endif
495 uvm_wait("pglalloc");
496 goto again;
497 } else
498 uvm_pglistfree(rlist);
499 }
500 #ifdef PGALLOC_VERBOSE
501 if (!error)
502 printf("pgalloc: %"PRIxMAX"..%"PRIxMAX"\n",
503 (uintmax_t) VM_PAGE_TO_PHYS(TAILQ_FIRST(rlist)),
504 (uintmax_t) VM_PAGE_TO_PHYS(TAILQ_LAST(rlist, pglist)));
505 #endif
506 return (error);
507 }
508
509 int
510 uvm_pglistalloc(psize_t size, paddr_t low, paddr_t high, paddr_t alignment,
511 paddr_t boundary, struct pglist *rlist, int nsegs, int waitok)
512 {
513 int num, res;
514
515 KASSERT((alignment & (alignment - 1)) == 0);
516 KASSERT((boundary & (boundary - 1)) == 0);
517
518 /*
519 * Our allocations are always page granularity, so our alignment
520 * must be, too.
521 */
522 if (alignment < PAGE_SIZE)
523 alignment = PAGE_SIZE;
524 if (boundary != 0 && boundary < size)
525 return (EINVAL);
526 num = atop(round_page(size));
527 low = roundup2(low, alignment);
528
529 TAILQ_INIT(rlist);
530
531 /*
532 * Turn off the caching of free pages - we need everything to be on
533 * the global freelists.
534 */
535 uvm_pgflcache_pause();
536
537 if ((nsegs < size >> PAGE_SHIFT) || (alignment != PAGE_SIZE) ||
538 (boundary != 0))
539 res = uvm_pglistalloc_contig(num, low, high, alignment,
540 boundary, rlist);
541 else
542 res = uvm_pglistalloc_simple(num, low, high, rlist, waitok);
543
544 uvm_pgflcache_resume();
545
546 return (res);
547 }
548
549 /*
550 * uvm_pglistfree: free a list of pages
551 *
552 * => pages should already be unmapped
553 */
554
555 void
556 uvm_pglistfree(struct pglist *list)
557 {
558 struct pgfreelist *pgfl;
559 struct pgflbucket *pgb;
560 struct vm_page *pg;
561 int c, b;
562
563 /*
564 * Lock the free list and free each page.
565 */
566
567 uvm_pgfl_lock();
568 while ((pg = TAILQ_FIRST(list)) != NULL) {
569 TAILQ_REMOVE(list, pg, pageq.queue);
570 pg->flags = (pg->flags & PG_ZERO) | PG_FREE;
571 #ifdef DEBUG
572 pg->uobject = (void *)0xdeadbeef;
573 pg->uanon = (void *)0xdeadbeef;
574 if (pg->flags & PG_ZERO)
575 uvm_pagezerocheck(pg);
576 #endif /* DEBUG */
577 c = VM_PGCOLOR(pg);
578 b = uvm_page_get_bucket(pg);
579 pgfl = &uvm.page_free[uvm_page_get_freelist(pg)];
580 pgb = pgfl->pgfl_buckets[b];
581 if (pg->flags & PG_ZERO)
582 CPU_COUNT(CPU_COUNT_ZEROPAGES, 1);
583 pgb->pgb_nfree++;
584 LIST_INSERT_HEAD(&pgb->pgb_colors[c], pg, pageq.list);
585 STAT_DECR(uvm_pglistalloc_npages);
586 }
587 uvm_pgfl_unlock();
588 }
589