subr_pool.c revision 1.89 1 /* $NetBSD: subr_pool.c,v 1.89 2003/12/29 16:04:58 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.89 2003/12/29 16:04:58 yamt Exp $");
42
43 #include "opt_pool.h"
44 #include "opt_poollog.h"
45 #include "opt_lockdebug.h"
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/proc.h>
50 #include <sys/errno.h>
51 #include <sys/kernel.h>
52 #include <sys/malloc.h>
53 #include <sys/lock.h>
54 #include <sys/pool.h>
55 #include <sys/syslog.h>
56
57 #include <uvm/uvm.h>
58
59 /*
60 * Pool resource management utility.
61 *
62 * Memory is allocated in pages which are split into pieces according to
63 * the pool item size. Each page is kept on one of three lists in the
64 * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
65 * for empty, full and partially-full pages respectively. The individual
66 * pool items are on a linked list headed by `ph_itemlist' in each page
67 * header. The memory for building the page list is either taken from
68 * the allocated pages themselves (for small pool items) or taken from
69 * an internal pool of page headers (`phpool').
70 */
71
72 /* List of all pools */
73 TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
74
75 /* Private pool for page header structures */
76 static struct pool phpool;
77
78 #ifdef POOL_SUBPAGE
79 /* Pool of subpages for use by normal pools. */
80 static struct pool psppool;
81 #endif
82
83 /* # of seconds to retain page after last use */
84 int pool_inactive_time = 10;
85
86 /* Next candidate for drainage (see pool_drain()) */
87 static struct pool *drainpp;
88
89 /* This spin lock protects both pool_head and drainpp. */
90 struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
91
92 struct pool_item_header {
93 /* Page headers */
94 LIST_ENTRY(pool_item_header)
95 ph_pagelist; /* pool page list */
96 TAILQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */
97 SPLAY_ENTRY(pool_item_header)
98 ph_node; /* Off-page page headers */
99 unsigned int ph_nmissing; /* # of chunks in use */
100 caddr_t ph_page; /* this page's address */
101 struct timeval ph_time; /* last referenced */
102 };
103
104 struct pool_item {
105 #ifdef DIAGNOSTIC
106 u_int pi_magic;
107 #endif
108 #define PI_MAGIC 0xdeadbeefU
109 /* Other entries use only this list entry */
110 TAILQ_ENTRY(pool_item) pi_list;
111 };
112
113 #define POOL_NEEDS_CATCHUP(pp) \
114 ((pp)->pr_nitems < (pp)->pr_minitems)
115
116 /*
117 * Pool cache management.
118 *
119 * Pool caches provide a way for constructed objects to be cached by the
120 * pool subsystem. This can lead to performance improvements by avoiding
121 * needless object construction/destruction; it is deferred until absolutely
122 * necessary.
123 *
124 * Caches are grouped into cache groups. Each cache group references
125 * up to 16 constructed objects. When a cache allocates an object
126 * from the pool, it calls the object's constructor and places it into
127 * a cache group. When a cache group frees an object back to the pool,
128 * it first calls the object's destructor. This allows the object to
129 * persist in constructed form while freed to the cache.
130 *
131 * Multiple caches may exist for each pool. This allows a single
132 * object type to have multiple constructed forms. The pool references
133 * each cache, so that when a pool is drained by the pagedaemon, it can
134 * drain each individual cache as well. Each time a cache is drained,
135 * the most idle cache group is freed to the pool in its entirety.
136 *
137 * Pool caches are layed on top of pools. By layering them, we can avoid
138 * the complexity of cache management for pools which would not benefit
139 * from it.
140 */
141
142 /* The cache group pool. */
143 static struct pool pcgpool;
144
145 static void pool_cache_reclaim(struct pool_cache *);
146
147 static int pool_catchup(struct pool *);
148 static void pool_prime_page(struct pool *, caddr_t,
149 struct pool_item_header *);
150 static void pool_update_curpage(struct pool *);
151
152 void *pool_allocator_alloc(struct pool *, int);
153 void pool_allocator_free(struct pool *, void *);
154
155 static void pool_print_pagelist(struct pool_pagelist *,
156 void (*)(const char *, ...));
157 static void pool_print1(struct pool *, const char *,
158 void (*)(const char *, ...));
159
160 static int pool_chk_page(struct pool *, const char *,
161 struct pool_item_header *);
162
163 /*
164 * Pool log entry. An array of these is allocated in pool_init().
165 */
166 struct pool_log {
167 const char *pl_file;
168 long pl_line;
169 int pl_action;
170 #define PRLOG_GET 1
171 #define PRLOG_PUT 2
172 void *pl_addr;
173 };
174
175 #ifdef POOL_DIAGNOSTIC
176 /* Number of entries in pool log buffers */
177 #ifndef POOL_LOGSIZE
178 #define POOL_LOGSIZE 10
179 #endif
180
181 int pool_logsize = POOL_LOGSIZE;
182
183 static __inline void
184 pr_log(struct pool *pp, void *v, int action, const char *file, long line)
185 {
186 int n = pp->pr_curlogentry;
187 struct pool_log *pl;
188
189 if ((pp->pr_roflags & PR_LOGGING) == 0)
190 return;
191
192 /*
193 * Fill in the current entry. Wrap around and overwrite
194 * the oldest entry if necessary.
195 */
196 pl = &pp->pr_log[n];
197 pl->pl_file = file;
198 pl->pl_line = line;
199 pl->pl_action = action;
200 pl->pl_addr = v;
201 if (++n >= pp->pr_logsize)
202 n = 0;
203 pp->pr_curlogentry = n;
204 }
205
206 static void
207 pr_printlog(struct pool *pp, struct pool_item *pi,
208 void (*pr)(const char *, ...))
209 {
210 int i = pp->pr_logsize;
211 int n = pp->pr_curlogentry;
212
213 if ((pp->pr_roflags & PR_LOGGING) == 0)
214 return;
215
216 /*
217 * Print all entries in this pool's log.
218 */
219 while (i-- > 0) {
220 struct pool_log *pl = &pp->pr_log[n];
221 if (pl->pl_action != 0) {
222 if (pi == NULL || pi == pl->pl_addr) {
223 (*pr)("\tlog entry %d:\n", i);
224 (*pr)("\t\taction = %s, addr = %p\n",
225 pl->pl_action == PRLOG_GET ? "get" : "put",
226 pl->pl_addr);
227 (*pr)("\t\tfile: %s at line %lu\n",
228 pl->pl_file, pl->pl_line);
229 }
230 }
231 if (++n >= pp->pr_logsize)
232 n = 0;
233 }
234 }
235
236 static __inline void
237 pr_enter(struct pool *pp, const char *file, long line)
238 {
239
240 if (__predict_false(pp->pr_entered_file != NULL)) {
241 printf("pool %s: reentrancy at file %s line %ld\n",
242 pp->pr_wchan, file, line);
243 printf(" previous entry at file %s line %ld\n",
244 pp->pr_entered_file, pp->pr_entered_line);
245 panic("pr_enter");
246 }
247
248 pp->pr_entered_file = file;
249 pp->pr_entered_line = line;
250 }
251
252 static __inline void
253 pr_leave(struct pool *pp)
254 {
255
256 if (__predict_false(pp->pr_entered_file == NULL)) {
257 printf("pool %s not entered?\n", pp->pr_wchan);
258 panic("pr_leave");
259 }
260
261 pp->pr_entered_file = NULL;
262 pp->pr_entered_line = 0;
263 }
264
265 static __inline void
266 pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
267 {
268
269 if (pp->pr_entered_file != NULL)
270 (*pr)("\n\tcurrently entered from file %s line %ld\n",
271 pp->pr_entered_file, pp->pr_entered_line);
272 }
273 #else
274 #define pr_log(pp, v, action, file, line)
275 #define pr_printlog(pp, pi, pr)
276 #define pr_enter(pp, file, line)
277 #define pr_leave(pp)
278 #define pr_enter_check(pp, pr)
279 #endif /* POOL_DIAGNOSTIC */
280
281 static __inline int
282 phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
283 {
284 if (a->ph_page < b->ph_page)
285 return (-1);
286 else if (a->ph_page > b->ph_page)
287 return (1);
288 else
289 return (0);
290 }
291
292 SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
293 SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
294
295 /*
296 * Return the pool page header based on page address.
297 */
298 static __inline struct pool_item_header *
299 pr_find_pagehead(struct pool *pp, caddr_t page)
300 {
301 struct pool_item_header *ph, tmp;
302
303 if ((pp->pr_roflags & PR_PHINPAGE) != 0)
304 return ((struct pool_item_header *)(page + pp->pr_phoffset));
305
306 tmp.ph_page = page;
307 ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
308 return ph;
309 }
310
311 /*
312 * Remove a page from the pool.
313 */
314 static __inline void
315 pr_rmpage(struct pool *pp, struct pool_item_header *ph,
316 struct pool_pagelist *pq)
317 {
318 int s;
319
320 /*
321 * If the page was idle, decrement the idle page count.
322 */
323 if (ph->ph_nmissing == 0) {
324 #ifdef DIAGNOSTIC
325 if (pp->pr_nidle == 0)
326 panic("pr_rmpage: nidle inconsistent");
327 if (pp->pr_nitems < pp->pr_itemsperpage)
328 panic("pr_rmpage: nitems inconsistent");
329 #endif
330 pp->pr_nidle--;
331 }
332
333 pp->pr_nitems -= pp->pr_itemsperpage;
334
335 /*
336 * Unlink a page from the pool and release it (or queue it for release).
337 */
338 LIST_REMOVE(ph, ph_pagelist);
339 if (pq) {
340 LIST_INSERT_HEAD(pq, ph, ph_pagelist);
341 } else {
342 pool_allocator_free(pp, ph->ph_page);
343 if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
344 SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
345 s = splvm();
346 pool_put(&phpool, ph);
347 splx(s);
348 }
349 }
350 pp->pr_npages--;
351 pp->pr_npagefree++;
352
353 pool_update_curpage(pp);
354 }
355
356 /*
357 * Initialize the given pool resource structure.
358 *
359 * We export this routine to allow other kernel parts to declare
360 * static pools that must be initialized before malloc() is available.
361 */
362 void
363 pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
364 const char *wchan, struct pool_allocator *palloc)
365 {
366 int off, slack;
367
368 #ifdef POOL_DIAGNOSTIC
369 /*
370 * Always log if POOL_DIAGNOSTIC is defined.
371 */
372 if (pool_logsize != 0)
373 flags |= PR_LOGGING;
374 #endif
375
376 #ifdef POOL_SUBPAGE
377 /*
378 * XXX We don't provide a real `nointr' back-end
379 * yet; all sub-pages come from a kmem back-end.
380 * maybe some day...
381 */
382 if (palloc == NULL) {
383 extern struct pool_allocator pool_allocator_kmem_subpage;
384 palloc = &pool_allocator_kmem_subpage;
385 }
386 /*
387 * We'll assume any user-specified back-end allocator
388 * will deal with sub-pages, or simply don't care.
389 */
390 #else
391 if (palloc == NULL)
392 palloc = &pool_allocator_kmem;
393 #endif /* POOL_SUBPAGE */
394 if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
395 if (palloc->pa_pagesz == 0) {
396 #ifdef POOL_SUBPAGE
397 if (palloc == &pool_allocator_kmem)
398 palloc->pa_pagesz = PAGE_SIZE;
399 else
400 palloc->pa_pagesz = POOL_SUBPAGE;
401 #else
402 palloc->pa_pagesz = PAGE_SIZE;
403 #endif /* POOL_SUBPAGE */
404 }
405
406 TAILQ_INIT(&palloc->pa_list);
407
408 simple_lock_init(&palloc->pa_slock);
409 palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
410 palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
411 palloc->pa_flags |= PA_INITIALIZED;
412 }
413
414 if (align == 0)
415 align = ALIGN(1);
416
417 if (size < sizeof(struct pool_item))
418 size = sizeof(struct pool_item);
419
420 size = roundup(size, align);
421 #ifdef DIAGNOSTIC
422 if (size > palloc->pa_pagesz)
423 panic("pool_init: pool item size (%lu) too large",
424 (u_long)size);
425 #endif
426
427 /*
428 * Initialize the pool structure.
429 */
430 LIST_INIT(&pp->pr_emptypages);
431 LIST_INIT(&pp->pr_fullpages);
432 LIST_INIT(&pp->pr_partpages);
433 TAILQ_INIT(&pp->pr_cachelist);
434 pp->pr_curpage = NULL;
435 pp->pr_npages = 0;
436 pp->pr_minitems = 0;
437 pp->pr_minpages = 0;
438 pp->pr_maxpages = UINT_MAX;
439 pp->pr_roflags = flags;
440 pp->pr_flags = 0;
441 pp->pr_size = size;
442 pp->pr_align = align;
443 pp->pr_wchan = wchan;
444 pp->pr_alloc = palloc;
445 pp->pr_nitems = 0;
446 pp->pr_nout = 0;
447 pp->pr_hardlimit = UINT_MAX;
448 pp->pr_hardlimit_warning = NULL;
449 pp->pr_hardlimit_ratecap.tv_sec = 0;
450 pp->pr_hardlimit_ratecap.tv_usec = 0;
451 pp->pr_hardlimit_warning_last.tv_sec = 0;
452 pp->pr_hardlimit_warning_last.tv_usec = 0;
453 pp->pr_drain_hook = NULL;
454 pp->pr_drain_hook_arg = NULL;
455
456 /*
457 * Decide whether to put the page header off page to avoid
458 * wasting too large a part of the page. Off-page page headers
459 * go on a hash table, so we can match a returned item
460 * with its header based on the page address.
461 * We use 1/16 of the page size as the threshold (XXX: tune)
462 */
463 if (pp->pr_size < palloc->pa_pagesz/16) {
464 /* Use the end of the page for the page header */
465 pp->pr_roflags |= PR_PHINPAGE;
466 pp->pr_phoffset = off = palloc->pa_pagesz -
467 ALIGN(sizeof(struct pool_item_header));
468 } else {
469 /* The page header will be taken from our page header pool */
470 pp->pr_phoffset = 0;
471 off = palloc->pa_pagesz;
472 SPLAY_INIT(&pp->pr_phtree);
473 }
474
475 /*
476 * Alignment is to take place at `ioff' within the item. This means
477 * we must reserve up to `align - 1' bytes on the page to allow
478 * appropriate positioning of each item.
479 *
480 * Silently enforce `0 <= ioff < align'.
481 */
482 pp->pr_itemoffset = ioff = ioff % align;
483 pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
484 KASSERT(pp->pr_itemsperpage != 0);
485
486 /*
487 * Use the slack between the chunks and the page header
488 * for "cache coloring".
489 */
490 slack = off - pp->pr_itemsperpage * pp->pr_size;
491 pp->pr_maxcolor = (slack / align) * align;
492 pp->pr_curcolor = 0;
493
494 pp->pr_nget = 0;
495 pp->pr_nfail = 0;
496 pp->pr_nput = 0;
497 pp->pr_npagealloc = 0;
498 pp->pr_npagefree = 0;
499 pp->pr_hiwat = 0;
500 pp->pr_nidle = 0;
501
502 #ifdef POOL_DIAGNOSTIC
503 if (flags & PR_LOGGING) {
504 if (kmem_map == NULL ||
505 (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
506 M_TEMP, M_NOWAIT)) == NULL)
507 pp->pr_roflags &= ~PR_LOGGING;
508 pp->pr_curlogentry = 0;
509 pp->pr_logsize = pool_logsize;
510 }
511 #endif
512
513 pp->pr_entered_file = NULL;
514 pp->pr_entered_line = 0;
515
516 simple_lock_init(&pp->pr_slock);
517
518 /*
519 * Initialize private page header pool and cache magazine pool if we
520 * haven't done so yet.
521 * XXX LOCKING.
522 */
523 if (phpool.pr_size == 0) {
524 #ifdef POOL_SUBPAGE
525 pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, 0,
526 "phpool", &pool_allocator_kmem);
527 pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
528 PR_RECURSIVE, "psppool", &pool_allocator_kmem);
529 #else
530 pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,
531 0, "phpool", NULL);
532 #endif
533 pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
534 0, "pcgpool", NULL);
535 }
536
537 /* Insert into the list of all pools. */
538 simple_lock(&pool_head_slock);
539 TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
540 simple_unlock(&pool_head_slock);
541
542 /* Insert this into the list of pools using this allocator. */
543 simple_lock(&palloc->pa_slock);
544 TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
545 simple_unlock(&palloc->pa_slock);
546 }
547
548 /*
549 * De-commision a pool resource.
550 */
551 void
552 pool_destroy(struct pool *pp)
553 {
554 struct pool_item_header *ph;
555 struct pool_cache *pc;
556
557 /* Locking order: pool_allocator -> pool */
558 simple_lock(&pp->pr_alloc->pa_slock);
559 TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
560 simple_unlock(&pp->pr_alloc->pa_slock);
561
562 /* Destroy all caches for this pool. */
563 while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL)
564 pool_cache_destroy(pc);
565
566 #ifdef DIAGNOSTIC
567 if (pp->pr_nout != 0) {
568 pr_printlog(pp, NULL, printf);
569 panic("pool_destroy: pool busy: still out: %u",
570 pp->pr_nout);
571 }
572 #endif
573
574 /* Remove all pages */
575 while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
576 pr_rmpage(pp, ph, NULL);
577 KASSERT(LIST_EMPTY(&pp->pr_fullpages));
578 KASSERT(LIST_EMPTY(&pp->pr_partpages));
579
580 /* Remove from global pool list */
581 simple_lock(&pool_head_slock);
582 TAILQ_REMOVE(&pool_head, pp, pr_poollist);
583 if (drainpp == pp) {
584 drainpp = NULL;
585 }
586 simple_unlock(&pool_head_slock);
587
588 #ifdef POOL_DIAGNOSTIC
589 if ((pp->pr_roflags & PR_LOGGING) != 0)
590 free(pp->pr_log, M_TEMP);
591 #endif
592 }
593
594 void
595 pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
596 {
597
598 /* XXX no locking -- must be used just after pool_init() */
599 #ifdef DIAGNOSTIC
600 if (pp->pr_drain_hook != NULL)
601 panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
602 #endif
603 pp->pr_drain_hook = fn;
604 pp->pr_drain_hook_arg = arg;
605 }
606
607 static struct pool_item_header *
608 pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags)
609 {
610 struct pool_item_header *ph;
611 int s;
612
613 LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0);
614
615 if ((pp->pr_roflags & PR_PHINPAGE) != 0)
616 ph = (struct pool_item_header *) (storage + pp->pr_phoffset);
617 else {
618 s = splvm();
619 ph = pool_get(&phpool, flags);
620 splx(s);
621 }
622
623 return (ph);
624 }
625
626 /*
627 * Grab an item from the pool; must be called at appropriate spl level
628 */
629 void *
630 #ifdef POOL_DIAGNOSTIC
631 _pool_get(struct pool *pp, int flags, const char *file, long line)
632 #else
633 pool_get(struct pool *pp, int flags)
634 #endif
635 {
636 struct pool_item *pi;
637 struct pool_item_header *ph;
638 void *v;
639
640 #ifdef DIAGNOSTIC
641 if (__predict_false(curlwp == NULL && doing_shutdown == 0 &&
642 (flags & PR_WAITOK) != 0))
643 panic("pool_get: %s: must have NOWAIT", pp->pr_wchan);
644
645 #ifdef LOCKDEBUG
646 if (flags & PR_WAITOK)
647 simple_lock_only_held(NULL, "pool_get(PR_WAITOK)");
648 #endif
649 #endif /* DIAGNOSTIC */
650
651 simple_lock(&pp->pr_slock);
652 pr_enter(pp, file, line);
653
654 startover:
655 /*
656 * Check to see if we've reached the hard limit. If we have,
657 * and we can wait, then wait until an item has been returned to
658 * the pool.
659 */
660 #ifdef DIAGNOSTIC
661 if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
662 pr_leave(pp);
663 simple_unlock(&pp->pr_slock);
664 panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
665 }
666 #endif
667 if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
668 if (pp->pr_drain_hook != NULL) {
669 /*
670 * Since the drain hook is going to free things
671 * back to the pool, unlock, call the hook, re-lock,
672 * and check the hardlimit condition again.
673 */
674 pr_leave(pp);
675 simple_unlock(&pp->pr_slock);
676 (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
677 simple_lock(&pp->pr_slock);
678 pr_enter(pp, file, line);
679 if (pp->pr_nout < pp->pr_hardlimit)
680 goto startover;
681 }
682
683 if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
684 /*
685 * XXX: A warning isn't logged in this case. Should
686 * it be?
687 */
688 pp->pr_flags |= PR_WANTED;
689 pr_leave(pp);
690 ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
691 pr_enter(pp, file, line);
692 goto startover;
693 }
694
695 /*
696 * Log a message that the hard limit has been hit.
697 */
698 if (pp->pr_hardlimit_warning != NULL &&
699 ratecheck(&pp->pr_hardlimit_warning_last,
700 &pp->pr_hardlimit_ratecap))
701 log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
702
703 pp->pr_nfail++;
704
705 pr_leave(pp);
706 simple_unlock(&pp->pr_slock);
707 return (NULL);
708 }
709
710 /*
711 * The convention we use is that if `curpage' is not NULL, then
712 * it points at a non-empty bucket. In particular, `curpage'
713 * never points at a page header which has PR_PHINPAGE set and
714 * has no items in its bucket.
715 */
716 if ((ph = pp->pr_curpage) == NULL) {
717 #ifdef DIAGNOSTIC
718 if (pp->pr_nitems != 0) {
719 simple_unlock(&pp->pr_slock);
720 printf("pool_get: %s: curpage NULL, nitems %u\n",
721 pp->pr_wchan, pp->pr_nitems);
722 panic("pool_get: nitems inconsistent");
723 }
724 #endif
725
726 /*
727 * Call the back-end page allocator for more memory.
728 * Release the pool lock, as the back-end page allocator
729 * may block.
730 */
731 pr_leave(pp);
732 simple_unlock(&pp->pr_slock);
733 v = pool_allocator_alloc(pp, flags);
734 if (__predict_true(v != NULL))
735 ph = pool_alloc_item_header(pp, v, flags);
736 simple_lock(&pp->pr_slock);
737 pr_enter(pp, file, line);
738
739 if (__predict_false(v == NULL || ph == NULL)) {
740 if (v != NULL)
741 pool_allocator_free(pp, v);
742
743 /*
744 * We were unable to allocate a page or item
745 * header, but we released the lock during
746 * allocation, so perhaps items were freed
747 * back to the pool. Check for this case.
748 */
749 if (pp->pr_curpage != NULL)
750 goto startover;
751
752 if ((flags & PR_WAITOK) == 0) {
753 pp->pr_nfail++;
754 pr_leave(pp);
755 simple_unlock(&pp->pr_slock);
756 return (NULL);
757 }
758
759 /*
760 * Wait for items to be returned to this pool.
761 *
762 * XXX: maybe we should wake up once a second and
763 * try again?
764 */
765 pp->pr_flags |= PR_WANTED;
766 /* PA_WANTED is already set on the allocator. */
767 pr_leave(pp);
768 ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
769 pr_enter(pp, file, line);
770 goto startover;
771 }
772
773 /* We have more memory; add it to the pool */
774 pool_prime_page(pp, v, ph);
775 pp->pr_npagealloc++;
776
777 /* Start the allocation process over. */
778 goto startover;
779 }
780 if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) {
781 pr_leave(pp);
782 simple_unlock(&pp->pr_slock);
783 panic("pool_get: %s: page empty", pp->pr_wchan);
784 }
785 #ifdef DIAGNOSTIC
786 if (__predict_false(pp->pr_nitems == 0)) {
787 pr_leave(pp);
788 simple_unlock(&pp->pr_slock);
789 printf("pool_get: %s: items on itemlist, nitems %u\n",
790 pp->pr_wchan, pp->pr_nitems);
791 panic("pool_get: nitems inconsistent");
792 }
793 #endif
794
795 #ifdef POOL_DIAGNOSTIC
796 pr_log(pp, v, PRLOG_GET, file, line);
797 #endif
798
799 #ifdef DIAGNOSTIC
800 if (__predict_false(pi->pi_magic != PI_MAGIC)) {
801 pr_printlog(pp, pi, printf);
802 panic("pool_get(%s): free list modified: magic=%x; page %p;"
803 " item addr %p\n",
804 pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
805 }
806 #endif
807
808 /*
809 * Remove from item list.
810 */
811 TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
812 pp->pr_nitems--;
813 pp->pr_nout++;
814 if (ph->ph_nmissing == 0) {
815 #ifdef DIAGNOSTIC
816 if (__predict_false(pp->pr_nidle == 0))
817 panic("pool_get: nidle inconsistent");
818 #endif
819 pp->pr_nidle--;
820
821 /*
822 * This page was previously empty. Move it to the list of
823 * partially-full pages. This page is already curpage.
824 */
825 LIST_REMOVE(ph, ph_pagelist);
826 LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
827 }
828 ph->ph_nmissing++;
829 if (TAILQ_EMPTY(&ph->ph_itemlist)) {
830 #ifdef DIAGNOSTIC
831 if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) {
832 pr_leave(pp);
833 simple_unlock(&pp->pr_slock);
834 panic("pool_get: %s: nmissing inconsistent",
835 pp->pr_wchan);
836 }
837 #endif
838 /*
839 * This page is now full. Move it to the full list
840 * and select a new current page.
841 */
842 LIST_REMOVE(ph, ph_pagelist);
843 LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
844 pool_update_curpage(pp);
845 }
846
847 pp->pr_nget++;
848
849 /*
850 * If we have a low water mark and we are now below that low
851 * water mark, add more items to the pool.
852 */
853 if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
854 /*
855 * XXX: Should we log a warning? Should we set up a timeout
856 * to try again in a second or so? The latter could break
857 * a caller's assumptions about interrupt protection, etc.
858 */
859 }
860
861 pr_leave(pp);
862 simple_unlock(&pp->pr_slock);
863 return (v);
864 }
865
866 /*
867 * Internal version of pool_put(). Pool is already locked/entered.
868 */
869 static void
870 pool_do_put(struct pool *pp, void *v)
871 {
872 struct pool_item *pi = v;
873 struct pool_item_header *ph;
874 caddr_t page;
875 int s;
876
877 LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
878
879 page = (caddr_t)((u_long)v & pp->pr_alloc->pa_pagemask);
880
881 #ifdef DIAGNOSTIC
882 if (__predict_false(pp->pr_nout == 0)) {
883 printf("pool %s: putting with none out\n",
884 pp->pr_wchan);
885 panic("pool_put");
886 }
887 #endif
888
889 if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {
890 pr_printlog(pp, NULL, printf);
891 panic("pool_put: %s: page header missing", pp->pr_wchan);
892 }
893
894 #ifdef LOCKDEBUG
895 /*
896 * Check if we're freeing a locked simple lock.
897 */
898 simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
899 #endif
900
901 /*
902 * Return to item list.
903 */
904 #ifdef DIAGNOSTIC
905 pi->pi_magic = PI_MAGIC;
906 #endif
907 #ifdef DEBUG
908 {
909 int i, *ip = v;
910
911 for (i = 0; i < pp->pr_size / sizeof(int); i++) {
912 *ip++ = PI_MAGIC;
913 }
914 }
915 #endif
916
917 TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
918 KDASSERT(ph->ph_nmissing != 0);
919 ph->ph_nmissing--;
920 pp->pr_nput++;
921 pp->pr_nitems++;
922 pp->pr_nout--;
923
924 /* Cancel "pool empty" condition if it exists */
925 if (pp->pr_curpage == NULL)
926 pp->pr_curpage = ph;
927
928 if (pp->pr_flags & PR_WANTED) {
929 pp->pr_flags &= ~PR_WANTED;
930 if (ph->ph_nmissing == 0)
931 pp->pr_nidle++;
932 wakeup((caddr_t)pp);
933 return;
934 }
935
936 /*
937 * If this page is now empty, do one of two things:
938 *
939 * (1) If we have more pages than the page high water mark,
940 * free the page back to the system.
941 *
942 * (2) Otherwise, move the page to the empty page list.
943 *
944 * Either way, select a new current page (so we use a partially-full
945 * page if one is available).
946 */
947 if (ph->ph_nmissing == 0) {
948 pp->pr_nidle++;
949 if (pp->pr_npages > pp->pr_maxpages ||
950 (pp->pr_alloc->pa_flags & PA_WANT) != 0) {
951 pr_rmpage(pp, ph, NULL);
952 } else {
953 LIST_REMOVE(ph, ph_pagelist);
954 LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
955
956 /*
957 * Update the timestamp on the page. A page must
958 * be idle for some period of time before it can
959 * be reclaimed by the pagedaemon. This minimizes
960 * ping-pong'ing for memory.
961 */
962 s = splclock();
963 ph->ph_time = mono_time;
964 splx(s);
965 }
966 pool_update_curpage(pp);
967 }
968
969 /*
970 * If the page was previously completely full, move it to the
971 * partially-full list and make it the current page. The next
972 * allocation will get the item from this page, instead of
973 * further fragmenting the pool.
974 */
975 else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
976 LIST_REMOVE(ph, ph_pagelist);
977 LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
978 pp->pr_curpage = ph;
979 }
980 }
981
982 /*
983 * Return resource to the pool; must be called at appropriate spl level
984 */
985 #ifdef POOL_DIAGNOSTIC
986 void
987 _pool_put(struct pool *pp, void *v, const char *file, long line)
988 {
989
990 simple_lock(&pp->pr_slock);
991 pr_enter(pp, file, line);
992
993 pr_log(pp, v, PRLOG_PUT, file, line);
994
995 pool_do_put(pp, v);
996
997 pr_leave(pp);
998 simple_unlock(&pp->pr_slock);
999 }
1000 #undef pool_put
1001 #endif /* POOL_DIAGNOSTIC */
1002
1003 void
1004 pool_put(struct pool *pp, void *v)
1005 {
1006
1007 simple_lock(&pp->pr_slock);
1008
1009 pool_do_put(pp, v);
1010
1011 simple_unlock(&pp->pr_slock);
1012 }
1013
1014 #ifdef POOL_DIAGNOSTIC
1015 #define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__)
1016 #endif
1017
1018 /*
1019 * Add N items to the pool.
1020 */
1021 int
1022 pool_prime(struct pool *pp, int n)
1023 {
1024 struct pool_item_header *ph = NULL;
1025 caddr_t cp;
1026 int newpages;
1027
1028 simple_lock(&pp->pr_slock);
1029
1030 newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1031
1032 while (newpages-- > 0) {
1033 simple_unlock(&pp->pr_slock);
1034 cp = pool_allocator_alloc(pp, PR_NOWAIT);
1035 if (__predict_true(cp != NULL))
1036 ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1037 simple_lock(&pp->pr_slock);
1038
1039 if (__predict_false(cp == NULL || ph == NULL)) {
1040 if (cp != NULL)
1041 pool_allocator_free(pp, cp);
1042 break;
1043 }
1044
1045 pool_prime_page(pp, cp, ph);
1046 pp->pr_npagealloc++;
1047 pp->pr_minpages++;
1048 }
1049
1050 if (pp->pr_minpages >= pp->pr_maxpages)
1051 pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1052
1053 simple_unlock(&pp->pr_slock);
1054 return (0);
1055 }
1056
1057 /*
1058 * Add a page worth of items to the pool.
1059 *
1060 * Note, we must be called with the pool descriptor LOCKED.
1061 */
1062 static void
1063 pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
1064 {
1065 struct pool_item *pi;
1066 caddr_t cp = storage;
1067 unsigned int align = pp->pr_align;
1068 unsigned int ioff = pp->pr_itemoffset;
1069 int n;
1070 int s;
1071
1072 #ifdef DIAGNOSTIC
1073 if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
1074 panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1075 #endif
1076
1077 /*
1078 * Insert page header.
1079 */
1080 LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1081 TAILQ_INIT(&ph->ph_itemlist);
1082 ph->ph_page = storage;
1083 ph->ph_nmissing = 0;
1084 s = splclock();
1085 ph->ph_time = mono_time;
1086 splx(s);
1087 if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1088 SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
1089
1090 pp->pr_nidle++;
1091
1092 /*
1093 * Color this page.
1094 */
1095 cp = (caddr_t)(cp + pp->pr_curcolor);
1096 if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1097 pp->pr_curcolor = 0;
1098
1099 /*
1100 * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1101 */
1102 if (ioff != 0)
1103 cp = (caddr_t)(cp + (align - ioff));
1104
1105 /*
1106 * Insert remaining chunks on the bucket list.
1107 */
1108 n = pp->pr_itemsperpage;
1109 pp->pr_nitems += n;
1110
1111 while (n--) {
1112 pi = (struct pool_item *)cp;
1113
1114 KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
1115
1116 /* Insert on page list */
1117 TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
1118 #ifdef DIAGNOSTIC
1119 pi->pi_magic = PI_MAGIC;
1120 #endif
1121 cp = (caddr_t)(cp + pp->pr_size);
1122 }
1123
1124 /*
1125 * If the pool was depleted, point at the new page.
1126 */
1127 if (pp->pr_curpage == NULL)
1128 pp->pr_curpage = ph;
1129
1130 if (++pp->pr_npages > pp->pr_hiwat)
1131 pp->pr_hiwat = pp->pr_npages;
1132 }
1133
1134 /*
1135 * Used by pool_get() when nitems drops below the low water mark. This
1136 * is used to catch up pr_nitems with the low water mark.
1137 *
1138 * Note 1, we never wait for memory here, we let the caller decide what to do.
1139 *
1140 * Note 2, we must be called with the pool already locked, and we return
1141 * with it locked.
1142 */
1143 static int
1144 pool_catchup(struct pool *pp)
1145 {
1146 struct pool_item_header *ph = NULL;
1147 caddr_t cp;
1148 int error = 0;
1149
1150 while (POOL_NEEDS_CATCHUP(pp)) {
1151 /*
1152 * Call the page back-end allocator for more memory.
1153 *
1154 * XXX: We never wait, so should we bother unlocking
1155 * the pool descriptor?
1156 */
1157 simple_unlock(&pp->pr_slock);
1158 cp = pool_allocator_alloc(pp, PR_NOWAIT);
1159 if (__predict_true(cp != NULL))
1160 ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1161 simple_lock(&pp->pr_slock);
1162 if (__predict_false(cp == NULL || ph == NULL)) {
1163 if (cp != NULL)
1164 pool_allocator_free(pp, cp);
1165 error = ENOMEM;
1166 break;
1167 }
1168 pool_prime_page(pp, cp, ph);
1169 pp->pr_npagealloc++;
1170 }
1171
1172 return (error);
1173 }
1174
1175 static void
1176 pool_update_curpage(struct pool *pp)
1177 {
1178
1179 pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
1180 if (pp->pr_curpage == NULL) {
1181 pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
1182 }
1183 }
1184
1185 void
1186 pool_setlowat(struct pool *pp, int n)
1187 {
1188
1189 simple_lock(&pp->pr_slock);
1190
1191 pp->pr_minitems = n;
1192 pp->pr_minpages = (n == 0)
1193 ? 0
1194 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1195
1196 /* Make sure we're caught up with the newly-set low water mark. */
1197 if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1198 /*
1199 * XXX: Should we log a warning? Should we set up a timeout
1200 * to try again in a second or so? The latter could break
1201 * a caller's assumptions about interrupt protection, etc.
1202 */
1203 }
1204
1205 simple_unlock(&pp->pr_slock);
1206 }
1207
1208 void
1209 pool_sethiwat(struct pool *pp, int n)
1210 {
1211
1212 simple_lock(&pp->pr_slock);
1213
1214 pp->pr_maxpages = (n == 0)
1215 ? 0
1216 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1217
1218 simple_unlock(&pp->pr_slock);
1219 }
1220
1221 void
1222 pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1223 {
1224
1225 simple_lock(&pp->pr_slock);
1226
1227 pp->pr_hardlimit = n;
1228 pp->pr_hardlimit_warning = warnmess;
1229 pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1230 pp->pr_hardlimit_warning_last.tv_sec = 0;
1231 pp->pr_hardlimit_warning_last.tv_usec = 0;
1232
1233 /*
1234 * In-line version of pool_sethiwat(), because we don't want to
1235 * release the lock.
1236 */
1237 pp->pr_maxpages = (n == 0)
1238 ? 0
1239 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1240
1241 simple_unlock(&pp->pr_slock);
1242 }
1243
1244 /*
1245 * Release all complete pages that have not been used recently.
1246 */
1247 int
1248 #ifdef POOL_DIAGNOSTIC
1249 _pool_reclaim(struct pool *pp, const char *file, long line)
1250 #else
1251 pool_reclaim(struct pool *pp)
1252 #endif
1253 {
1254 struct pool_item_header *ph, *phnext;
1255 struct pool_cache *pc;
1256 struct timeval curtime;
1257 struct pool_pagelist pq;
1258 struct timeval diff;
1259 int s;
1260
1261 if (pp->pr_drain_hook != NULL) {
1262 /*
1263 * The drain hook must be called with the pool unlocked.
1264 */
1265 (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1266 }
1267
1268 if (simple_lock_try(&pp->pr_slock) == 0)
1269 return (0);
1270 pr_enter(pp, file, line);
1271
1272 LIST_INIT(&pq);
1273
1274 /*
1275 * Reclaim items from the pool's caches.
1276 */
1277 TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist)
1278 pool_cache_reclaim(pc);
1279
1280 s = splclock();
1281 curtime = mono_time;
1282 splx(s);
1283
1284 for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
1285 phnext = LIST_NEXT(ph, ph_pagelist);
1286
1287 /* Check our minimum page claim */
1288 if (pp->pr_npages <= pp->pr_minpages)
1289 break;
1290
1291 KASSERT(ph->ph_nmissing == 0);
1292 timersub(&curtime, &ph->ph_time, &diff);
1293 if (diff.tv_sec < pool_inactive_time)
1294 continue;
1295
1296 /*
1297 * If freeing this page would put us below
1298 * the low water mark, stop now.
1299 */
1300 if ((pp->pr_nitems - pp->pr_itemsperpage) <
1301 pp->pr_minitems)
1302 break;
1303
1304 pr_rmpage(pp, ph, &pq);
1305 }
1306
1307 pr_leave(pp);
1308 simple_unlock(&pp->pr_slock);
1309 if (LIST_EMPTY(&pq))
1310 return (0);
1311
1312 while ((ph = LIST_FIRST(&pq)) != NULL) {
1313 LIST_REMOVE(ph, ph_pagelist);
1314 pool_allocator_free(pp, ph->ph_page);
1315 if (pp->pr_roflags & PR_PHINPAGE) {
1316 continue;
1317 }
1318 SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
1319 s = splvm();
1320 pool_put(&phpool, ph);
1321 splx(s);
1322 }
1323
1324 return (1);
1325 }
1326
1327 /*
1328 * Drain pools, one at a time.
1329 *
1330 * Note, we must never be called from an interrupt context.
1331 */
1332 void
1333 pool_drain(void *arg)
1334 {
1335 struct pool *pp;
1336 int s;
1337
1338 pp = NULL;
1339 s = splvm();
1340 simple_lock(&pool_head_slock);
1341 if (drainpp == NULL) {
1342 drainpp = TAILQ_FIRST(&pool_head);
1343 }
1344 if (drainpp) {
1345 pp = drainpp;
1346 drainpp = TAILQ_NEXT(pp, pr_poollist);
1347 }
1348 simple_unlock(&pool_head_slock);
1349 pool_reclaim(pp);
1350 splx(s);
1351 }
1352
1353 /*
1354 * Diagnostic helpers.
1355 */
1356 void
1357 pool_print(struct pool *pp, const char *modif)
1358 {
1359 int s;
1360
1361 s = splvm();
1362 if (simple_lock_try(&pp->pr_slock) == 0) {
1363 printf("pool %s is locked; try again later\n",
1364 pp->pr_wchan);
1365 splx(s);
1366 return;
1367 }
1368 pool_print1(pp, modif, printf);
1369 simple_unlock(&pp->pr_slock);
1370 splx(s);
1371 }
1372
1373 void
1374 pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1375 {
1376 int didlock = 0;
1377
1378 if (pp == NULL) {
1379 (*pr)("Must specify a pool to print.\n");
1380 return;
1381 }
1382
1383 /*
1384 * Called from DDB; interrupts should be blocked, and all
1385 * other processors should be paused. We can skip locking
1386 * the pool in this case.
1387 *
1388 * We do a simple_lock_try() just to print the lock
1389 * status, however.
1390 */
1391
1392 if (simple_lock_try(&pp->pr_slock) == 0)
1393 (*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
1394 else
1395 didlock = 1;
1396
1397 pool_print1(pp, modif, pr);
1398
1399 if (didlock)
1400 simple_unlock(&pp->pr_slock);
1401 }
1402
1403 static void
1404 pool_print_pagelist(struct pool_pagelist *pl, void (*pr)(const char *, ...))
1405 {
1406 struct pool_item_header *ph;
1407 #ifdef DIAGNOSTIC
1408 struct pool_item *pi;
1409 #endif
1410
1411 LIST_FOREACH(ph, pl, ph_pagelist) {
1412 (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
1413 ph->ph_page, ph->ph_nmissing,
1414 (u_long)ph->ph_time.tv_sec,
1415 (u_long)ph->ph_time.tv_usec);
1416 #ifdef DIAGNOSTIC
1417 TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1418 if (pi->pi_magic != PI_MAGIC) {
1419 (*pr)("\t\t\titem %p, magic 0x%x\n",
1420 pi, pi->pi_magic);
1421 }
1422 }
1423 #endif
1424 }
1425 }
1426
1427 static void
1428 pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1429 {
1430 struct pool_item_header *ph;
1431 struct pool_cache *pc;
1432 struct pool_cache_group *pcg;
1433 int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1434 char c;
1435
1436 while ((c = *modif++) != '\0') {
1437 if (c == 'l')
1438 print_log = 1;
1439 if (c == 'p')
1440 print_pagelist = 1;
1441 if (c == 'c')
1442 print_cache = 1;
1443 }
1444
1445 (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1446 pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1447 pp->pr_roflags);
1448 (*pr)("\talloc %p\n", pp->pr_alloc);
1449 (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1450 pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1451 (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1452 pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1453
1454 (*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
1455 pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1456 (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1457 pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1458
1459 if (print_pagelist == 0)
1460 goto skip_pagelist;
1461
1462 if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1463 (*pr)("\n\tempty page list:\n");
1464 pool_print_pagelist(&pp->pr_emptypages, pr);
1465 if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
1466 (*pr)("\n\tfull page list:\n");
1467 pool_print_pagelist(&pp->pr_fullpages, pr);
1468 if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
1469 (*pr)("\n\tpartial-page list:\n");
1470 pool_print_pagelist(&pp->pr_partpages, pr);
1471
1472 if (pp->pr_curpage == NULL)
1473 (*pr)("\tno current page\n");
1474 else
1475 (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1476
1477 skip_pagelist:
1478 if (print_log == 0)
1479 goto skip_log;
1480
1481 (*pr)("\n");
1482 if ((pp->pr_roflags & PR_LOGGING) == 0)
1483 (*pr)("\tno log\n");
1484 else
1485 pr_printlog(pp, NULL, pr);
1486
1487 skip_log:
1488 if (print_cache == 0)
1489 goto skip_cache;
1490
1491 TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) {
1492 (*pr)("\tcache %p: allocfrom %p freeto %p\n", pc,
1493 pc->pc_allocfrom, pc->pc_freeto);
1494 (*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n",
1495 pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
1496 TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1497 (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);
1498 for (i = 0; i < PCG_NOBJECTS; i++) {
1499 if (pcg->pcg_objects[i].pcgo_pa !=
1500 POOL_PADDR_INVALID) {
1501 (*pr)("\t\t\t%p, 0x%llx\n",
1502 pcg->pcg_objects[i].pcgo_va,
1503 (unsigned long long)
1504 pcg->pcg_objects[i].pcgo_pa);
1505 } else {
1506 (*pr)("\t\t\t%p\n",
1507 pcg->pcg_objects[i].pcgo_va);
1508 }
1509 }
1510 }
1511 }
1512
1513 skip_cache:
1514 pr_enter_check(pp, pr);
1515 }
1516
1517 static int
1518 pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
1519 {
1520 struct pool_item *pi;
1521 caddr_t page;
1522 int n;
1523
1524 page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask);
1525 if (page != ph->ph_page &&
1526 (pp->pr_roflags & PR_PHINPAGE) != 0) {
1527 if (label != NULL)
1528 printf("%s: ", label);
1529 printf("pool(%p:%s): page inconsistency: page %p;"
1530 " at page head addr %p (p %p)\n", pp,
1531 pp->pr_wchan, ph->ph_page,
1532 ph, page);
1533 return 1;
1534 }
1535
1536 for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
1537 pi != NULL;
1538 pi = TAILQ_NEXT(pi,pi_list), n++) {
1539
1540 #ifdef DIAGNOSTIC
1541 if (pi->pi_magic != PI_MAGIC) {
1542 if (label != NULL)
1543 printf("%s: ", label);
1544 printf("pool(%s): free list modified: magic=%x;"
1545 " page %p; item ordinal %d;"
1546 " addr %p (p %p)\n",
1547 pp->pr_wchan, pi->pi_magic, ph->ph_page,
1548 n, pi, page);
1549 panic("pool");
1550 }
1551 #endif
1552 page =
1553 (caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask);
1554 if (page == ph->ph_page)
1555 continue;
1556
1557 if (label != NULL)
1558 printf("%s: ", label);
1559 printf("pool(%p:%s): page inconsistency: page %p;"
1560 " item ordinal %d; addr %p (p %p)\n", pp,
1561 pp->pr_wchan, ph->ph_page,
1562 n, pi, page);
1563 return 1;
1564 }
1565 return 0;
1566 }
1567
1568
1569 int
1570 pool_chk(struct pool *pp, const char *label)
1571 {
1572 struct pool_item_header *ph;
1573 int r = 0;
1574
1575 simple_lock(&pp->pr_slock);
1576 LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
1577 r = pool_chk_page(pp, label, ph);
1578 if (r) {
1579 goto out;
1580 }
1581 }
1582 LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
1583 r = pool_chk_page(pp, label, ph);
1584 if (r) {
1585 goto out;
1586 }
1587 }
1588 LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
1589 r = pool_chk_page(pp, label, ph);
1590 if (r) {
1591 goto out;
1592 }
1593 }
1594
1595 out:
1596 simple_unlock(&pp->pr_slock);
1597 return (r);
1598 }
1599
1600 /*
1601 * pool_cache_init:
1602 *
1603 * Initialize a pool cache.
1604 *
1605 * NOTE: If the pool must be protected from interrupts, we expect
1606 * to be called at the appropriate interrupt priority level.
1607 */
1608 void
1609 pool_cache_init(struct pool_cache *pc, struct pool *pp,
1610 int (*ctor)(void *, void *, int),
1611 void (*dtor)(void *, void *),
1612 void *arg)
1613 {
1614
1615 TAILQ_INIT(&pc->pc_grouplist);
1616 simple_lock_init(&pc->pc_slock);
1617
1618 pc->pc_allocfrom = NULL;
1619 pc->pc_freeto = NULL;
1620 pc->pc_pool = pp;
1621
1622 pc->pc_ctor = ctor;
1623 pc->pc_dtor = dtor;
1624 pc->pc_arg = arg;
1625
1626 pc->pc_hits = 0;
1627 pc->pc_misses = 0;
1628
1629 pc->pc_ngroups = 0;
1630
1631 pc->pc_nitems = 0;
1632
1633 simple_lock(&pp->pr_slock);
1634 TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist);
1635 simple_unlock(&pp->pr_slock);
1636 }
1637
1638 /*
1639 * pool_cache_destroy:
1640 *
1641 * Destroy a pool cache.
1642 */
1643 void
1644 pool_cache_destroy(struct pool_cache *pc)
1645 {
1646 struct pool *pp = pc->pc_pool;
1647
1648 /* First, invalidate the entire cache. */
1649 pool_cache_invalidate(pc);
1650
1651 /* ...and remove it from the pool's cache list. */
1652 simple_lock(&pp->pr_slock);
1653 TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist);
1654 simple_unlock(&pp->pr_slock);
1655 }
1656
1657 static __inline void *
1658 pcg_get(struct pool_cache_group *pcg, paddr_t *pap)
1659 {
1660 void *object;
1661 u_int idx;
1662
1663 KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
1664 KASSERT(pcg->pcg_avail != 0);
1665 idx = --pcg->pcg_avail;
1666
1667 KASSERT(pcg->pcg_objects[idx].pcgo_va != NULL);
1668 object = pcg->pcg_objects[idx].pcgo_va;
1669 if (pap != NULL)
1670 *pap = pcg->pcg_objects[idx].pcgo_pa;
1671 pcg->pcg_objects[idx].pcgo_va = NULL;
1672
1673 return (object);
1674 }
1675
1676 static __inline void
1677 pcg_put(struct pool_cache_group *pcg, void *object, paddr_t pa)
1678 {
1679 u_int idx;
1680
1681 KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
1682 idx = pcg->pcg_avail++;
1683
1684 KASSERT(pcg->pcg_objects[idx].pcgo_va == NULL);
1685 pcg->pcg_objects[idx].pcgo_va = object;
1686 pcg->pcg_objects[idx].pcgo_pa = pa;
1687 }
1688
1689 /*
1690 * pool_cache_get{,_paddr}:
1691 *
1692 * Get an object from a pool cache (optionally returning
1693 * the physical address of the object).
1694 */
1695 void *
1696 pool_cache_get_paddr(struct pool_cache *pc, int flags, paddr_t *pap)
1697 {
1698 struct pool_cache_group *pcg;
1699 void *object;
1700
1701 #ifdef LOCKDEBUG
1702 if (flags & PR_WAITOK)
1703 simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)");
1704 #endif
1705
1706 simple_lock(&pc->pc_slock);
1707
1708 if ((pcg = pc->pc_allocfrom) == NULL) {
1709 TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1710 if (pcg->pcg_avail != 0) {
1711 pc->pc_allocfrom = pcg;
1712 goto have_group;
1713 }
1714 }
1715
1716 /*
1717 * No groups with any available objects. Allocate
1718 * a new object, construct it, and return it to
1719 * the caller. We will allocate a group, if necessary,
1720 * when the object is freed back to the cache.
1721 */
1722 pc->pc_misses++;
1723 simple_unlock(&pc->pc_slock);
1724 object = pool_get(pc->pc_pool, flags);
1725 if (object != NULL && pc->pc_ctor != NULL) {
1726 if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
1727 pool_put(pc->pc_pool, object);
1728 return (NULL);
1729 }
1730 }
1731 if (object != NULL && pap != NULL) {
1732 #ifdef POOL_VTOPHYS
1733 *pap = POOL_VTOPHYS(object);
1734 #else
1735 *pap = POOL_PADDR_INVALID;
1736 #endif
1737 }
1738 return (object);
1739 }
1740
1741 have_group:
1742 pc->pc_hits++;
1743 pc->pc_nitems--;
1744 object = pcg_get(pcg, pap);
1745
1746 if (pcg->pcg_avail == 0)
1747 pc->pc_allocfrom = NULL;
1748
1749 simple_unlock(&pc->pc_slock);
1750
1751 return (object);
1752 }
1753
1754 /*
1755 * pool_cache_put{,_paddr}:
1756 *
1757 * Put an object back to the pool cache (optionally caching the
1758 * physical address of the object).
1759 */
1760 void
1761 pool_cache_put_paddr(struct pool_cache *pc, void *object, paddr_t pa)
1762 {
1763 struct pool_cache_group *pcg;
1764 int s;
1765
1766 simple_lock(&pc->pc_slock);
1767
1768 if ((pcg = pc->pc_freeto) == NULL) {
1769 TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1770 if (pcg->pcg_avail != PCG_NOBJECTS) {
1771 pc->pc_freeto = pcg;
1772 goto have_group;
1773 }
1774 }
1775
1776 /*
1777 * No empty groups to free the object to. Attempt to
1778 * allocate one.
1779 */
1780 simple_unlock(&pc->pc_slock);
1781 s = splvm();
1782 pcg = pool_get(&pcgpool, PR_NOWAIT);
1783 splx(s);
1784 if (pcg != NULL) {
1785 memset(pcg, 0, sizeof(*pcg));
1786 simple_lock(&pc->pc_slock);
1787 pc->pc_ngroups++;
1788 TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list);
1789 if (pc->pc_freeto == NULL)
1790 pc->pc_freeto = pcg;
1791 goto have_group;
1792 }
1793
1794 /*
1795 * Unable to allocate a cache group; destruct the object
1796 * and free it back to the pool.
1797 */
1798 pool_cache_destruct_object(pc, object);
1799 return;
1800 }
1801
1802 have_group:
1803 pc->pc_nitems++;
1804 pcg_put(pcg, object, pa);
1805
1806 if (pcg->pcg_avail == PCG_NOBJECTS)
1807 pc->pc_freeto = NULL;
1808
1809 simple_unlock(&pc->pc_slock);
1810 }
1811
1812 /*
1813 * pool_cache_destruct_object:
1814 *
1815 * Force destruction of an object and its release back into
1816 * the pool.
1817 */
1818 void
1819 pool_cache_destruct_object(struct pool_cache *pc, void *object)
1820 {
1821
1822 if (pc->pc_dtor != NULL)
1823 (*pc->pc_dtor)(pc->pc_arg, object);
1824 pool_put(pc->pc_pool, object);
1825 }
1826
1827 /*
1828 * pool_cache_do_invalidate:
1829 *
1830 * This internal function implements pool_cache_invalidate() and
1831 * pool_cache_reclaim().
1832 */
1833 static void
1834 pool_cache_do_invalidate(struct pool_cache *pc, int free_groups,
1835 void (*putit)(struct pool *, void *))
1836 {
1837 struct pool_cache_group *pcg, *npcg;
1838 void *object;
1839 int s;
1840
1841 for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
1842 pcg = npcg) {
1843 npcg = TAILQ_NEXT(pcg, pcg_list);
1844 while (pcg->pcg_avail != 0) {
1845 pc->pc_nitems--;
1846 object = pcg_get(pcg, NULL);
1847 if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg)
1848 pc->pc_allocfrom = NULL;
1849 if (pc->pc_dtor != NULL)
1850 (*pc->pc_dtor)(pc->pc_arg, object);
1851 (*putit)(pc->pc_pool, object);
1852 }
1853 if (free_groups) {
1854 pc->pc_ngroups--;
1855 TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list);
1856 if (pc->pc_freeto == pcg)
1857 pc->pc_freeto = NULL;
1858 s = splvm();
1859 pool_put(&pcgpool, pcg);
1860 splx(s);
1861 }
1862 }
1863 }
1864
1865 /*
1866 * pool_cache_invalidate:
1867 *
1868 * Invalidate a pool cache (destruct and release all of the
1869 * cached objects).
1870 */
1871 void
1872 pool_cache_invalidate(struct pool_cache *pc)
1873 {
1874
1875 simple_lock(&pc->pc_slock);
1876 pool_cache_do_invalidate(pc, 0, pool_put);
1877 simple_unlock(&pc->pc_slock);
1878 }
1879
1880 /*
1881 * pool_cache_reclaim:
1882 *
1883 * Reclaim a pool cache for pool_reclaim().
1884 */
1885 static void
1886 pool_cache_reclaim(struct pool_cache *pc)
1887 {
1888
1889 simple_lock(&pc->pc_slock);
1890 pool_cache_do_invalidate(pc, 1, pool_do_put);
1891 simple_unlock(&pc->pc_slock);
1892 }
1893
1894 /*
1895 * Pool backend allocators.
1896 *
1897 * Each pool has a backend allocator that handles allocation, deallocation,
1898 * and any additional draining that might be needed.
1899 *
1900 * We provide two standard allocators:
1901 *
1902 * pool_allocator_kmem - the default when no allocator is specified
1903 *
1904 * pool_allocator_nointr - used for pools that will not be accessed
1905 * in interrupt context.
1906 */
1907 void *pool_page_alloc(struct pool *, int);
1908 void pool_page_free(struct pool *, void *);
1909
1910 struct pool_allocator pool_allocator_kmem = {
1911 pool_page_alloc, pool_page_free, 0,
1912 };
1913
1914 void *pool_page_alloc_nointr(struct pool *, int);
1915 void pool_page_free_nointr(struct pool *, void *);
1916
1917 struct pool_allocator pool_allocator_nointr = {
1918 pool_page_alloc_nointr, pool_page_free_nointr, 0,
1919 };
1920
1921 #ifdef POOL_SUBPAGE
1922 void *pool_subpage_alloc(struct pool *, int);
1923 void pool_subpage_free(struct pool *, void *);
1924
1925 struct pool_allocator pool_allocator_kmem_subpage = {
1926 pool_subpage_alloc, pool_subpage_free, 0,
1927 };
1928 #endif /* POOL_SUBPAGE */
1929
1930 /*
1931 * We have at least three different resources for the same allocation and
1932 * each resource can be depleted. First, we have the ready elements in the
1933 * pool. Then we have the resource (typically a vm_map) for this allocator.
1934 * Finally, we have physical memory. Waiting for any of these can be
1935 * unnecessary when any other is freed, but the kernel doesn't support
1936 * sleeping on multiple wait channels, so we have to employ another strategy.
1937 *
1938 * The caller sleeps on the pool (so that it can be awakened when an item
1939 * is returned to the pool), but we set PA_WANT on the allocator. When a
1940 * page is returned to the allocator and PA_WANT is set, pool_allocator_free
1941 * will wake up all sleeping pools belonging to this allocator.
1942 *
1943 * XXX Thundering herd.
1944 */
1945 void *
1946 pool_allocator_alloc(struct pool *org, int flags)
1947 {
1948 struct pool_allocator *pa = org->pr_alloc;
1949 struct pool *pp, *start;
1950 int s, freed;
1951 void *res;
1952
1953 do {
1954 if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
1955 return (res);
1956 if ((flags & PR_WAITOK) == 0) {
1957 /*
1958 * We only run the drain hookhere if PR_NOWAIT.
1959 * In other cases, the hook will be run in
1960 * pool_reclaim().
1961 */
1962 if (org->pr_drain_hook != NULL) {
1963 (*org->pr_drain_hook)(org->pr_drain_hook_arg,
1964 flags);
1965 if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
1966 return (res);
1967 }
1968 break;
1969 }
1970
1971 /*
1972 * Drain all pools, except "org", that use this
1973 * allocator. We do this to reclaim VA space.
1974 * pa_alloc is responsible for waiting for
1975 * physical memory.
1976 *
1977 * XXX We risk looping forever if start if someone
1978 * calls pool_destroy on "start". But there is no
1979 * other way to have potentially sleeping pool_reclaim,
1980 * non-sleeping locks on pool_allocator, and some
1981 * stirring of drained pools in the allocator.
1982 *
1983 * XXX Maybe we should use pool_head_slock for locking
1984 * the allocators?
1985 */
1986 freed = 0;
1987
1988 s = splvm();
1989 simple_lock(&pa->pa_slock);
1990 pp = start = TAILQ_FIRST(&pa->pa_list);
1991 do {
1992 TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list);
1993 TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list);
1994 if (pp == org)
1995 continue;
1996 simple_unlock(&pa->pa_slock);
1997 freed = pool_reclaim(pp);
1998 simple_lock(&pa->pa_slock);
1999 } while ((pp = TAILQ_FIRST(&pa->pa_list)) != start &&
2000 freed == 0);
2001
2002 if (freed == 0) {
2003 /*
2004 * We set PA_WANT here, the caller will most likely
2005 * sleep waiting for pages (if not, this won't hurt
2006 * that much), and there is no way to set this in
2007 * the caller without violating locking order.
2008 */
2009 pa->pa_flags |= PA_WANT;
2010 }
2011 simple_unlock(&pa->pa_slock);
2012 splx(s);
2013 } while (freed);
2014 return (NULL);
2015 }
2016
2017 void
2018 pool_allocator_free(struct pool *pp, void *v)
2019 {
2020 struct pool_allocator *pa = pp->pr_alloc;
2021 int s;
2022
2023 (*pa->pa_free)(pp, v);
2024
2025 s = splvm();
2026 simple_lock(&pa->pa_slock);
2027 if ((pa->pa_flags & PA_WANT) == 0) {
2028 simple_unlock(&pa->pa_slock);
2029 splx(s);
2030 return;
2031 }
2032
2033 TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
2034 simple_lock(&pp->pr_slock);
2035 if ((pp->pr_flags & PR_WANTED) != 0) {
2036 pp->pr_flags &= ~PR_WANTED;
2037 wakeup(pp);
2038 }
2039 simple_unlock(&pp->pr_slock);
2040 }
2041 pa->pa_flags &= ~PA_WANT;
2042 simple_unlock(&pa->pa_slock);
2043 splx(s);
2044 }
2045
2046 void *
2047 pool_page_alloc(struct pool *pp, int flags)
2048 {
2049 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2050
2051 return ((void *) uvm_km_alloc_poolpage(waitok));
2052 }
2053
2054 void
2055 pool_page_free(struct pool *pp, void *v)
2056 {
2057
2058 uvm_km_free_poolpage((vaddr_t) v);
2059 }
2060
2061 #ifdef POOL_SUBPAGE
2062 /* Sub-page allocator, for machines with large hardware pages. */
2063 void *
2064 pool_subpage_alloc(struct pool *pp, int flags)
2065 {
2066
2067 return (pool_get(&psppool, flags));
2068 }
2069
2070 void
2071 pool_subpage_free(struct pool *pp, void *v)
2072 {
2073
2074 pool_put(&psppool, v);
2075 }
2076
2077 /* We don't provide a real nointr allocator. Maybe later. */
2078 void *
2079 pool_page_alloc_nointr(struct pool *pp, int flags)
2080 {
2081
2082 return (pool_subpage_alloc(pp, flags));
2083 }
2084
2085 void
2086 pool_page_free_nointr(struct pool *pp, void *v)
2087 {
2088
2089 pool_subpage_free(pp, v);
2090 }
2091 #else
2092 void *
2093 pool_page_alloc_nointr(struct pool *pp, int flags)
2094 {
2095 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2096
2097 return ((void *) uvm_km_alloc_poolpage1(kernel_map,
2098 uvm.kernel_object, waitok));
2099 }
2100
2101 void
2102 pool_page_free_nointr(struct pool *pp, void *v)
2103 {
2104
2105 uvm_km_free_poolpage1(kernel_map, (vaddr_t) v);
2106 }
2107 #endif /* POOL_SUBPAGE */
2108