Home | History | Annotate | Line # | Download | only in kern
subr_pool.c revision 1.128.2.4
      1 /*	$NetBSD: subr_pool.c,v 1.128.2.4 2007/03/22 12:30:29 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1997, 1999, 2000, 2002, 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
      9  * Simulation Facility, NASA Ames Research Center.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. All advertising materials mentioning features or use of this software
     20  *    must display the following acknowledgement:
     21  *	This product includes software developed by the NetBSD
     22  *	Foundation, Inc. and its contributors.
     23  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24  *    contributors may be used to endorse or promote products derived
     25  *    from this software without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37  * POSSIBILITY OF SUCH DAMAGE.
     38  */
     39 
     40 #include <sys/cdefs.h>
     41 __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.128.2.4 2007/03/22 12:30:29 ad Exp $");
     42 
     43 #include "opt_pool.h"
     44 #include "opt_poollog.h"
     45 #include "opt_lockdebug.h"
     46 
     47 #include <sys/param.h>
     48 #include <sys/systm.h>
     49 #include <sys/proc.h>
     50 #include <sys/errno.h>
     51 #include <sys/kernel.h>
     52 #include <sys/malloc.h>
     53 #include <sys/lock.h>
     54 #include <sys/pool.h>
     55 #include <sys/syslog.h>
     56 #include <sys/debug.h>
     57 
     58 #include <uvm/uvm.h>
     59 
     60 /*
     61  * Pool resource management utility.
     62  *
     63  * Memory is allocated in pages which are split into pieces according to
     64  * the pool item size. Each page is kept on one of three lists in the
     65  * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
     66  * for empty, full and partially-full pages respectively. The individual
     67  * pool items are on a linked list headed by `ph_itemlist' in each page
     68  * header. The memory for building the page list is either taken from
     69  * the allocated pages themselves (for small pool items) or taken from
     70  * an internal pool of page headers (`phpool').
     71  */
     72 
     73 /* List of all pools */
     74 LIST_HEAD(,pool) pool_head = LIST_HEAD_INITIALIZER(pool_head);
     75 
     76 /* Private pool for page header structures */
     77 #define	PHPOOL_MAX	8
     78 static struct pool phpool[PHPOOL_MAX];
     79 #define	PHPOOL_FREELIST_NELEM(idx)	(((idx) == 0) ? 0 : (1 << (idx)))
     80 
     81 #ifdef POOL_SUBPAGE
     82 /* Pool of subpages for use by normal pools. */
     83 static struct pool psppool;
     84 #endif
     85 
     86 static SLIST_HEAD(, pool_allocator) pa_deferinitq =
     87     SLIST_HEAD_INITIALIZER(pa_deferinitq);
     88 
     89 static void *pool_page_alloc_meta(struct pool *, int);
     90 static void pool_page_free_meta(struct pool *, void *);
     91 
     92 /* allocator for pool metadata */
     93 static struct pool_allocator pool_allocator_meta = {
     94 	pool_page_alloc_meta, pool_page_free_meta,
     95 	.pa_backingmapptr = &kmem_map,
     96 };
     97 
     98 /* # of seconds to retain page after last use */
     99 int pool_inactive_time = 10;
    100 
    101 /* Next candidate for drainage (see pool_drain()) */
    102 static struct pool	*drainpp;
    103 
    104 /* This lock protects both pool_head and drainpp. */
    105 static kmutex_t pool_head_lock;
    106 
    107 typedef uint8_t pool_item_freelist_t;
    108 
    109 struct pool_item_header {
    110 	/* Page headers */
    111 	LIST_ENTRY(pool_item_header)
    112 				ph_pagelist;	/* pool page list */
    113 	SPLAY_ENTRY(pool_item_header)
    114 				ph_node;	/* Off-page page headers */
    115 	void *			ph_page;	/* this page's address */
    116 	struct timeval		ph_time;	/* last referenced */
    117 	union {
    118 		/* !PR_NOTOUCH */
    119 		struct {
    120 			LIST_HEAD(, pool_item)
    121 				phu_itemlist;	/* chunk list for this page */
    122 		} phu_normal;
    123 		/* PR_NOTOUCH */
    124 		struct {
    125 			uint16_t
    126 				phu_off;	/* start offset in page */
    127 			pool_item_freelist_t
    128 				phu_firstfree;	/* first free item */
    129 			/*
    130 			 * XXX it might be better to use
    131 			 * a simple bitmap and ffs(3)
    132 			 */
    133 		} phu_notouch;
    134 	} ph_u;
    135 	uint16_t		ph_nmissing;	/* # of chunks in use */
    136 };
    137 #define	ph_itemlist	ph_u.phu_normal.phu_itemlist
    138 #define	ph_off		ph_u.phu_notouch.phu_off
    139 #define	ph_firstfree	ph_u.phu_notouch.phu_firstfree
    140 
    141 struct pool_item {
    142 #ifdef DIAGNOSTIC
    143 	u_int pi_magic;
    144 #endif
    145 #define	PI_MAGIC 0xdeadbeefU
    146 	/* Other entries use only this list entry */
    147 	LIST_ENTRY(pool_item)	pi_list;
    148 };
    149 
    150 #define	POOL_NEEDS_CATCHUP(pp)						\
    151 	((pp)->pr_nitems < (pp)->pr_minitems)
    152 
    153 /*
    154  * Pool cache management.
    155  *
    156  * Pool caches provide a way for constructed objects to be cached by the
    157  * pool subsystem.  This can lead to performance improvements by avoiding
    158  * needless object construction/destruction; it is deferred until absolutely
    159  * necessary.
    160  *
    161  * Caches are grouped into cache groups.  Each cache group references
    162  * up to 16 constructed objects.  When a cache allocates an object
    163  * from the pool, it calls the object's constructor and places it into
    164  * a cache group.  When a cache group frees an object back to the pool,
    165  * it first calls the object's destructor.  This allows the object to
    166  * persist in constructed form while freed to the cache.
    167  *
    168  * Multiple caches may exist for each pool.  This allows a single
    169  * object type to have multiple constructed forms.  The pool references
    170  * each cache, so that when a pool is drained by the pagedaemon, it can
    171  * drain each individual cache as well.  Each time a cache is drained,
    172  * the most idle cache group is freed to the pool in its entirety.
    173  *
    174  * Pool caches are layed on top of pools.  By layering them, we can avoid
    175  * the complexity of cache management for pools which would not benefit
    176  * from it.
    177  */
    178 
    179 /* The cache group pool. */
    180 static struct pool pcgpool;
    181 
    182 static void	pool_cache_reclaim(struct pool_cache *, struct pool_pagelist *,
    183 				   struct pool_cache_grouplist *);
    184 static void	pcg_grouplist_free(struct pool_cache_grouplist *);
    185 
    186 static int	pool_catchup(struct pool *);
    187 static void	pool_prime_page(struct pool *, void *,
    188 		    struct pool_item_header *);
    189 static void	pool_update_curpage(struct pool *);
    190 
    191 static int	pool_grow(struct pool *, int);
    192 static void	*pool_allocator_alloc(struct pool *, int);
    193 static void	pool_allocator_free(struct pool *, void *);
    194 
    195 static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
    196 	void (*)(const char *, ...));
    197 static void pool_print1(struct pool *, const char *,
    198 	void (*)(const char *, ...));
    199 
    200 static int pool_chk_page(struct pool *, const char *,
    201 			 struct pool_item_header *);
    202 
    203 /*
    204  * Pool log entry. An array of these is allocated in pool_init().
    205  */
    206 struct pool_log {
    207 	const char	*pl_file;
    208 	long		pl_line;
    209 	int		pl_action;
    210 #define	PRLOG_GET	1
    211 #define	PRLOG_PUT	2
    212 	void		*pl_addr;
    213 };
    214 
    215 #ifdef POOL_DIAGNOSTIC
    216 /* Number of entries in pool log buffers */
    217 #ifndef POOL_LOGSIZE
    218 #define	POOL_LOGSIZE	10
    219 #endif
    220 
    221 int pool_logsize = POOL_LOGSIZE;
    222 
    223 static inline void
    224 pr_log(struct pool *pp, void *v, int action, const char *file, long line)
    225 {
    226 	int n = pp->pr_curlogentry;
    227 	struct pool_log *pl;
    228 
    229 	if ((pp->pr_roflags & PR_LOGGING) == 0)
    230 		return;
    231 
    232 	/*
    233 	 * Fill in the current entry. Wrap around and overwrite
    234 	 * the oldest entry if necessary.
    235 	 */
    236 	pl = &pp->pr_log[n];
    237 	pl->pl_file = file;
    238 	pl->pl_line = line;
    239 	pl->pl_action = action;
    240 	pl->pl_addr = v;
    241 	if (++n >= pp->pr_logsize)
    242 		n = 0;
    243 	pp->pr_curlogentry = n;
    244 }
    245 
    246 static void
    247 pr_printlog(struct pool *pp, struct pool_item *pi,
    248     void (*pr)(const char *, ...))
    249 {
    250 	int i = pp->pr_logsize;
    251 	int n = pp->pr_curlogentry;
    252 
    253 	if ((pp->pr_roflags & PR_LOGGING) == 0)
    254 		return;
    255 
    256 	/*
    257 	 * Print all entries in this pool's log.
    258 	 */
    259 	while (i-- > 0) {
    260 		struct pool_log *pl = &pp->pr_log[n];
    261 		if (pl->pl_action != 0) {
    262 			if (pi == NULL || pi == pl->pl_addr) {
    263 				(*pr)("\tlog entry %d:\n", i);
    264 				(*pr)("\t\taction = %s, addr = %p\n",
    265 				    pl->pl_action == PRLOG_GET ? "get" : "put",
    266 				    pl->pl_addr);
    267 				(*pr)("\t\tfile: %s at line %lu\n",
    268 				    pl->pl_file, pl->pl_line);
    269 			}
    270 		}
    271 		if (++n >= pp->pr_logsize)
    272 			n = 0;
    273 	}
    274 }
    275 
    276 static inline void
    277 pr_enter(struct pool *pp, const char *file, long line)
    278 {
    279 
    280 	if (__predict_false(pp->pr_entered_file != NULL)) {
    281 		printf("pool %s: reentrancy at file %s line %ld\n",
    282 		    pp->pr_wchan, file, line);
    283 		printf("         previous entry at file %s line %ld\n",
    284 		    pp->pr_entered_file, pp->pr_entered_line);
    285 		panic("pr_enter");
    286 	}
    287 
    288 	pp->pr_entered_file = file;
    289 	pp->pr_entered_line = line;
    290 }
    291 
    292 static inline void
    293 pr_leave(struct pool *pp)
    294 {
    295 
    296 	if (__predict_false(pp->pr_entered_file == NULL)) {
    297 		printf("pool %s not entered?\n", pp->pr_wchan);
    298 		panic("pr_leave");
    299 	}
    300 
    301 	pp->pr_entered_file = NULL;
    302 	pp->pr_entered_line = 0;
    303 }
    304 
    305 static inline void
    306 pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
    307 {
    308 
    309 	if (pp->pr_entered_file != NULL)
    310 		(*pr)("\n\tcurrently entered from file %s line %ld\n",
    311 		    pp->pr_entered_file, pp->pr_entered_line);
    312 }
    313 #else
    314 #define	pr_log(pp, v, action, file, line)
    315 #define	pr_printlog(pp, pi, pr)
    316 #define	pr_enter(pp, file, line)
    317 #define	pr_leave(pp)
    318 #define	pr_enter_check(pp, pr)
    319 #endif /* POOL_DIAGNOSTIC */
    320 
    321 static inline int
    322 pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph,
    323     const void *v)
    324 {
    325 	const char *cp = v;
    326 	int idx;
    327 
    328 	KASSERT(pp->pr_roflags & PR_NOTOUCH);
    329 	idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size;
    330 	KASSERT(idx < pp->pr_itemsperpage);
    331 	return idx;
    332 }
    333 
    334 #define	PR_FREELIST_ALIGN(p) \
    335 	roundup((uintptr_t)(p), sizeof(pool_item_freelist_t))
    336 #define	PR_FREELIST(ph)	((pool_item_freelist_t *)PR_FREELIST_ALIGN((ph) + 1))
    337 #define	PR_INDEX_USED	((pool_item_freelist_t)-1)
    338 #define	PR_INDEX_EOL	((pool_item_freelist_t)-2)
    339 
    340 static inline void
    341 pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph,
    342     void *obj)
    343 {
    344 	int idx = pr_item_notouch_index(pp, ph, obj);
    345 	pool_item_freelist_t *freelist = PR_FREELIST(ph);
    346 
    347 	KASSERT(freelist[idx] == PR_INDEX_USED);
    348 	freelist[idx] = ph->ph_firstfree;
    349 	ph->ph_firstfree = idx;
    350 }
    351 
    352 static inline void *
    353 pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph)
    354 {
    355 	int idx = ph->ph_firstfree;
    356 	pool_item_freelist_t *freelist = PR_FREELIST(ph);
    357 
    358 	KASSERT(freelist[idx] != PR_INDEX_USED);
    359 	ph->ph_firstfree = freelist[idx];
    360 	freelist[idx] = PR_INDEX_USED;
    361 
    362 	return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size;
    363 }
    364 
    365 static inline int
    366 phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
    367 {
    368 
    369 	/*
    370 	 * we consider pool_item_header with smaller ph_page bigger.
    371 	 * (this unnatural ordering is for the benefit of pr_find_pagehead.)
    372 	 */
    373 
    374 	if (a->ph_page < b->ph_page)
    375 		return (1);
    376 	else if (a->ph_page > b->ph_page)
    377 		return (-1);
    378 	else
    379 		return (0);
    380 }
    381 
    382 SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
    383 SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
    384 
    385 /*
    386  * Return the pool page header based on item address.
    387  */
    388 static inline struct pool_item_header *
    389 pr_find_pagehead(struct pool *pp, void *v)
    390 {
    391 	struct pool_item_header *ph, tmp;
    392 
    393 	if ((pp->pr_roflags & PR_NOALIGN) != 0) {
    394 		tmp.ph_page = (void *)(uintptr_t)v;
    395 		ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
    396 		if (ph == NULL) {
    397 			ph = SPLAY_ROOT(&pp->pr_phtree);
    398 			if (ph != NULL && phtree_compare(&tmp, ph) >= 0) {
    399 				ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph);
    400 			}
    401 			KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0);
    402 		}
    403 	} else {
    404 		void *page =
    405 		    (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask);
    406 
    407 		if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
    408 			ph = (struct pool_item_header *)((char *)page + pp->pr_phoffset);
    409 		} else {
    410 			tmp.ph_page = page;
    411 			ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
    412 		}
    413 	}
    414 
    415 	KASSERT(ph == NULL || ((pp->pr_roflags & PR_PHINPAGE) != 0) ||
    416 	    ((char *)ph->ph_page <= (char *)v &&
    417 	    (char *)v < (char *)ph->ph_page + pp->pr_alloc->pa_pagesz));
    418 	return ph;
    419 }
    420 
    421 static void
    422 pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq)
    423 {
    424 	struct pool_item_header *ph;
    425 
    426 	while ((ph = LIST_FIRST(pq)) != NULL) {
    427 		LIST_REMOVE(ph, ph_pagelist);
    428 		pool_allocator_free(pp, ph->ph_page);
    429 		if ((pp->pr_roflags & PR_PHINPAGE) == 0)
    430 			pool_put(pp->pr_phpool, ph);
    431 	}
    432 }
    433 
    434 /*
    435  * Remove a page from the pool.
    436  */
    437 static inline void
    438 pr_rmpage(struct pool *pp, struct pool_item_header *ph,
    439      struct pool_pagelist *pq)
    440 {
    441 
    442 	KASSERT(mutex_owned(&pp->pr_lock));
    443 
    444 	/*
    445 	 * If the page was idle, decrement the idle page count.
    446 	 */
    447 	if (ph->ph_nmissing == 0) {
    448 #ifdef DIAGNOSTIC
    449 		if (pp->pr_nidle == 0)
    450 			panic("pr_rmpage: nidle inconsistent");
    451 		if (pp->pr_nitems < pp->pr_itemsperpage)
    452 			panic("pr_rmpage: nitems inconsistent");
    453 #endif
    454 		pp->pr_nidle--;
    455 	}
    456 
    457 	pp->pr_nitems -= pp->pr_itemsperpage;
    458 
    459 	/*
    460 	 * Unlink the page from the pool and queue it for release.
    461 	 */
    462 	LIST_REMOVE(ph, ph_pagelist);
    463 	if ((pp->pr_roflags & PR_PHINPAGE) == 0)
    464 		SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
    465 	LIST_INSERT_HEAD(pq, ph, ph_pagelist);
    466 
    467 	pp->pr_npages--;
    468 	pp->pr_npagefree++;
    469 
    470 	pool_update_curpage(pp);
    471 }
    472 
    473 static bool
    474 pa_starved_p(struct pool_allocator *pa)
    475 {
    476 
    477 	if (pa->pa_backingmap != NULL) {
    478 		return vm_map_starved_p(pa->pa_backingmap);
    479 	}
    480 	return false;
    481 }
    482 
    483 static int
    484 pool_reclaim_callback(struct callback_entry *ce, void *obj, void *arg)
    485 {
    486 	struct pool *pp = obj;
    487 	struct pool_allocator *pa = pp->pr_alloc;
    488 
    489 	KASSERT(&pp->pr_reclaimerentry == ce);
    490 	pool_reclaim(pp);
    491 	if (!pa_starved_p(pa)) {
    492 		return CALLBACK_CHAIN_ABORT;
    493 	}
    494 	return CALLBACK_CHAIN_CONTINUE;
    495 }
    496 
    497 static void
    498 pool_reclaim_register(struct pool *pp)
    499 {
    500 	struct vm_map *map = pp->pr_alloc->pa_backingmap;
    501 	int s;
    502 
    503 	if (map == NULL) {
    504 		return;
    505 	}
    506 
    507 	s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */
    508 	callback_register(&vm_map_to_kernel(map)->vmk_reclaim_callback,
    509 	    &pp->pr_reclaimerentry, pp, pool_reclaim_callback);
    510 	splx(s);
    511 }
    512 
    513 static void
    514 pool_reclaim_unregister(struct pool *pp)
    515 {
    516 	struct vm_map *map = pp->pr_alloc->pa_backingmap;
    517 	int s;
    518 
    519 	if (map == NULL) {
    520 		return;
    521 	}
    522 
    523 	s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */
    524 	callback_unregister(&vm_map_to_kernel(map)->vmk_reclaim_callback,
    525 	    &pp->pr_reclaimerentry);
    526 	splx(s);
    527 }
    528 
    529 static void
    530 pa_reclaim_register(struct pool_allocator *pa)
    531 {
    532 	struct vm_map *map = *pa->pa_backingmapptr;
    533 	struct pool *pp;
    534 
    535 	KASSERT(pa->pa_backingmap == NULL);
    536 	if (map == NULL) {
    537 		SLIST_INSERT_HEAD(&pa_deferinitq, pa, pa_q);
    538 		return;
    539 	}
    540 	pa->pa_backingmap = map;
    541 	TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
    542 		pool_reclaim_register(pp);
    543 	}
    544 }
    545 
    546 /*
    547  * Initialize all the pools listed in the "pools" link set.
    548  */
    549 void
    550 pool_subsystem_init(void)
    551 {
    552 	struct pool_allocator *pa;
    553 	__link_set_decl(pools, struct link_pool_init);
    554 	struct link_pool_init * const *pi;
    555 
    556 	mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE);
    557 
    558 	__link_set_foreach(pi, pools)
    559 		pool_init((*pi)->pp, (*pi)->size, (*pi)->align,
    560 		    (*pi)->align_offset, (*pi)->flags, (*pi)->wchan,
    561 		    (*pi)->palloc, (*pi)->ipl);
    562 
    563 	while ((pa = SLIST_FIRST(&pa_deferinitq)) != NULL) {
    564 		KASSERT(pa->pa_backingmapptr != NULL);
    565 		KASSERT(*pa->pa_backingmapptr != NULL);
    566 		SLIST_REMOVE_HEAD(&pa_deferinitq, pa_q);
    567 		pa_reclaim_register(pa);
    568 	}
    569 }
    570 
    571 /*
    572  * Initialize the given pool resource structure.
    573  *
    574  * We export this routine to allow other kernel parts to declare
    575  * static pools that must be initialized before malloc() is available.
    576  */
    577 void
    578 pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
    579     const char *wchan, struct pool_allocator *palloc, int ipl)
    580 {
    581 #ifdef DEBUG
    582 	struct pool *pp1;
    583 #endif
    584 	size_t trysize, phsize;
    585 	int off, slack;
    586 
    587 	KASSERT((1UL << (CHAR_BIT * sizeof(pool_item_freelist_t))) - 2 >=
    588 	    PHPOOL_FREELIST_NELEM(PHPOOL_MAX - 1));
    589 
    590 #ifdef DEBUG
    591 	/*
    592 	 * Check that the pool hasn't already been initialised and
    593 	 * added to the list of all pools.
    594 	 */
    595 	LIST_FOREACH(pp1, &pool_head, pr_poollist) {
    596 		if (pp == pp1)
    597 			panic("pool_init: pool %s already initialised",
    598 			    wchan);
    599 	}
    600 #endif
    601 
    602 #ifdef POOL_DIAGNOSTIC
    603 	/*
    604 	 * Always log if POOL_DIAGNOSTIC is defined.
    605 	 */
    606 	if (pool_logsize != 0)
    607 		flags |= PR_LOGGING;
    608 #endif
    609 
    610 	if (palloc == NULL)
    611 		palloc = &pool_allocator_kmem;
    612 #ifdef POOL_SUBPAGE
    613 	if (size > palloc->pa_pagesz) {
    614 		if (palloc == &pool_allocator_kmem)
    615 			palloc = &pool_allocator_kmem_fullpage;
    616 		else if (palloc == &pool_allocator_nointr)
    617 			palloc = &pool_allocator_nointr_fullpage;
    618 	}
    619 #endif /* POOL_SUBPAGE */
    620 	if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
    621 		if (palloc->pa_pagesz == 0)
    622 			palloc->pa_pagesz = PAGE_SIZE;
    623 
    624 		TAILQ_INIT(&palloc->pa_list);
    625 
    626 		mutex_init(&palloc->pa_lock, MUTEX_DRIVER, IPL_VM);
    627 		palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
    628 		palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
    629 
    630 		if (palloc->pa_backingmapptr != NULL) {
    631 			pa_reclaim_register(palloc);
    632 		}
    633 		palloc->pa_flags |= PA_INITIALIZED;
    634 	}
    635 
    636 	if (align == 0)
    637 		align = ALIGN(1);
    638 
    639 	if ((flags & PR_NOTOUCH) == 0 && size < sizeof(struct pool_item))
    640 		size = sizeof(struct pool_item);
    641 
    642 	size = roundup(size, align);
    643 #ifdef DIAGNOSTIC
    644 	if (size > palloc->pa_pagesz)
    645 		panic("pool_init: pool item size (%zu) too large", size);
    646 #endif
    647 
    648 	/*
    649 	 * Initialize the pool structure.
    650 	 */
    651 	LIST_INIT(&pp->pr_emptypages);
    652 	LIST_INIT(&pp->pr_fullpages);
    653 	LIST_INIT(&pp->pr_partpages);
    654 	LIST_INIT(&pp->pr_cachelist);
    655 	pp->pr_curpage = NULL;
    656 	pp->pr_npages = 0;
    657 	pp->pr_minitems = 0;
    658 	pp->pr_minpages = 0;
    659 	pp->pr_maxpages = UINT_MAX;
    660 	pp->pr_roflags = flags;
    661 	pp->pr_flags = 0;
    662 	pp->pr_size = size;
    663 	pp->pr_align = align;
    664 	pp->pr_wchan = wchan;
    665 	pp->pr_alloc = palloc;
    666 	pp->pr_nitems = 0;
    667 	pp->pr_nout = 0;
    668 	pp->pr_hardlimit = UINT_MAX;
    669 	pp->pr_hardlimit_warning = NULL;
    670 	pp->pr_hardlimit_ratecap.tv_sec = 0;
    671 	pp->pr_hardlimit_ratecap.tv_usec = 0;
    672 	pp->pr_hardlimit_warning_last.tv_sec = 0;
    673 	pp->pr_hardlimit_warning_last.tv_usec = 0;
    674 	pp->pr_drain_hook = NULL;
    675 	pp->pr_drain_hook_arg = NULL;
    676 	pp->pr_freecheck = NULL;
    677 
    678 	/*
    679 	 * Decide whether to put the page header off page to avoid
    680 	 * wasting too large a part of the page or too big item.
    681 	 * Off-page page headers go on a hash table, so we can match
    682 	 * a returned item with its header based on the page address.
    683 	 * We use 1/16 of the page size and about 8 times of the item
    684 	 * size as the threshold (XXX: tune)
    685 	 *
    686 	 * However, we'll put the header into the page if we can put
    687 	 * it without wasting any items.
    688 	 *
    689 	 * Silently enforce `0 <= ioff < align'.
    690 	 */
    691 	pp->pr_itemoffset = ioff %= align;
    692 	/* See the comment below about reserved bytes. */
    693 	trysize = palloc->pa_pagesz - ((align - ioff) % align);
    694 	phsize = ALIGN(sizeof(struct pool_item_header));
    695 	if ((pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) == 0 &&
    696 	    (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
    697 	    trysize / pp->pr_size == (trysize - phsize) / pp->pr_size)) {
    698 		/* Use the end of the page for the page header */
    699 		pp->pr_roflags |= PR_PHINPAGE;
    700 		pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
    701 	} else {
    702 		/* The page header will be taken from our page header pool */
    703 		pp->pr_phoffset = 0;
    704 		off = palloc->pa_pagesz;
    705 		SPLAY_INIT(&pp->pr_phtree);
    706 	}
    707 
    708 	/*
    709 	 * Alignment is to take place at `ioff' within the item. This means
    710 	 * we must reserve up to `align - 1' bytes on the page to allow
    711 	 * appropriate positioning of each item.
    712 	 */
    713 	pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
    714 	KASSERT(pp->pr_itemsperpage != 0);
    715 	if ((pp->pr_roflags & PR_NOTOUCH)) {
    716 		int idx;
    717 
    718 		for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
    719 		    idx++) {
    720 			/* nothing */
    721 		}
    722 		if (idx >= PHPOOL_MAX) {
    723 			/*
    724 			 * if you see this panic, consider to tweak
    725 			 * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
    726 			 */
    727 			panic("%s: too large itemsperpage(%d) for PR_NOTOUCH",
    728 			    pp->pr_wchan, pp->pr_itemsperpage);
    729 		}
    730 		pp->pr_phpool = &phpool[idx];
    731 	} else if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
    732 		pp->pr_phpool = &phpool[0];
    733 	}
    734 #if defined(DIAGNOSTIC)
    735 	else {
    736 		pp->pr_phpool = NULL;
    737 	}
    738 #endif
    739 
    740 	/*
    741 	 * Use the slack between the chunks and the page header
    742 	 * for "cache coloring".
    743 	 */
    744 	slack = off - pp->pr_itemsperpage * pp->pr_size;
    745 	pp->pr_maxcolor = (slack / align) * align;
    746 	pp->pr_curcolor = 0;
    747 
    748 	pp->pr_nget = 0;
    749 	pp->pr_nfail = 0;
    750 	pp->pr_nput = 0;
    751 	pp->pr_npagealloc = 0;
    752 	pp->pr_npagefree = 0;
    753 	pp->pr_hiwat = 0;
    754 	pp->pr_nidle = 0;
    755 
    756 #ifdef POOL_DIAGNOSTIC
    757 	if (flags & PR_LOGGING) {
    758 		if (kmem_map == NULL ||
    759 		    (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
    760 		     M_TEMP, M_NOWAIT)) == NULL)
    761 			pp->pr_roflags &= ~PR_LOGGING;
    762 		pp->pr_curlogentry = 0;
    763 		pp->pr_logsize = pool_logsize;
    764 	}
    765 #endif
    766 
    767 	pp->pr_entered_file = NULL;
    768 	pp->pr_entered_line = 0;
    769 
    770 	mutex_init(&pp->pr_lock, MUTEX_DRIVER, ipl);
    771 	cv_init(&pp->pr_cv, wchan);
    772 	pp->pr_ipl = ipl;
    773 
    774 	/*
    775 	 * Initialize private page header pool and cache magazine pool if we
    776 	 * haven't done so yet.
    777 	 * XXX LOCKING.
    778 	 */
    779 	if (phpool[0].pr_size == 0) {
    780 		int idx;
    781 		for (idx = 0; idx < PHPOOL_MAX; idx++) {
    782 			static char phpool_names[PHPOOL_MAX][6+1+6+1];
    783 			int nelem;
    784 			size_t sz;
    785 
    786 			nelem = PHPOOL_FREELIST_NELEM(idx);
    787 			snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
    788 			    "phpool-%d", nelem);
    789 			sz = sizeof(struct pool_item_header);
    790 			if (nelem) {
    791 				sz = PR_FREELIST_ALIGN(sz)
    792 				    + nelem * sizeof(pool_item_freelist_t);
    793 			}
    794 			pool_init(&phpool[idx], sz, 0, 0, 0,
    795 			    phpool_names[idx], &pool_allocator_meta, IPL_VM);
    796 		}
    797 #ifdef POOL_SUBPAGE
    798 		pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
    799 		    PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM);
    800 #endif
    801 		pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
    802 		    0, "pcgpool", &pool_allocator_meta, IPL_VM);
    803 	}
    804 
    805 	if (__predict_true(!cold)) {
    806 		/* Insert into the list of all pools. */
    807 		mutex_enter(&pool_head_lock);
    808 		LIST_INSERT_HEAD(&pool_head, pp, pr_poollist);
    809 		mutex_exit(&pool_head_lock);
    810 
    811 		/* Insert this into the list of pools using this allocator. */
    812 		mutex_enter(&palloc->pa_lock);
    813 		TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
    814 		mutex_exit(&palloc->pa_lock);
    815 	} else {
    816 		LIST_INSERT_HEAD(&pool_head, pp, pr_poollist);
    817 		TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
    818 	}
    819 
    820 	pool_reclaim_register(pp);
    821 }
    822 
    823 /*
    824  * De-commision a pool resource.
    825  */
    826 void
    827 pool_destroy(struct pool *pp)
    828 {
    829 	struct pool_pagelist pq;
    830 	struct pool_item_header *ph;
    831 
    832 	/* Remove from global pool list */
    833 	mutex_enter(&pool_head_lock);
    834 	LIST_REMOVE(pp, pr_poollist);
    835 	if (drainpp == pp)
    836 		drainpp = NULL;
    837 	mutex_exit(&pool_head_lock);
    838 
    839 	/* Remove this pool from its allocator's list of pools. */
    840 	pool_reclaim_unregister(pp);
    841 	mutex_enter(&pp->pr_alloc->pa_lock);
    842 	TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
    843 	mutex_exit(&pp->pr_alloc->pa_lock);
    844 
    845 	mutex_enter(&pp->pr_lock);
    846 
    847 	KASSERT(LIST_EMPTY(&pp->pr_cachelist));
    848 
    849 #ifdef DIAGNOSTIC
    850 	if (pp->pr_nout != 0) {
    851 		pr_printlog(pp, NULL, printf);
    852 		panic("pool_destroy: pool busy: still out: %u",
    853 		    pp->pr_nout);
    854 	}
    855 #endif
    856 
    857 	KASSERT(LIST_EMPTY(&pp->pr_fullpages));
    858 	KASSERT(LIST_EMPTY(&pp->pr_partpages));
    859 
    860 	/* Remove all pages */
    861 	LIST_INIT(&pq);
    862 	while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
    863 		pr_rmpage(pp, ph, &pq);
    864 
    865 	mutex_exit(&pp->pr_lock);
    866 
    867 	pr_pagelist_free(pp, &pq);
    868 
    869 #ifdef POOL_DIAGNOSTIC
    870 	if ((pp->pr_roflags & PR_LOGGING) != 0)
    871 		free(pp->pr_log, M_TEMP);
    872 #endif
    873 
    874 	cv_destroy(&pp->pr_cv);
    875 	mutex_destroy(&pp->pr_lock);
    876 }
    877 
    878 void
    879 pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
    880 {
    881 
    882 	/* XXX no locking -- must be used just after pool_init() */
    883 #ifdef DIAGNOSTIC
    884 	if (pp->pr_drain_hook != NULL)
    885 		panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
    886 #endif
    887 	pp->pr_drain_hook = fn;
    888 	pp->pr_drain_hook_arg = arg;
    889 }
    890 
    891 static struct pool_item_header *
    892 pool_alloc_item_header(struct pool *pp, void *storage, int flags)
    893 {
    894 	struct pool_item_header *ph;
    895 
    896 	if ((pp->pr_roflags & PR_PHINPAGE) != 0)
    897 		ph = (struct pool_item_header *) ((char *)storage + pp->pr_phoffset);
    898 	else
    899 		ph = pool_get(pp->pr_phpool, flags);
    900 
    901 	return (ph);
    902 }
    903 
    904 /*
    905  * Grab an item from the pool; must be called at appropriate spl level
    906  */
    907 void *
    908 #ifdef POOL_DIAGNOSTIC
    909 _pool_get(struct pool *pp, int flags, const char *file, long line)
    910 #else
    911 pool_get(struct pool *pp, int flags)
    912 #endif
    913 {
    914 	struct pool_item *pi;
    915 	struct pool_item_header *ph;
    916 	void *v;
    917 
    918 #ifdef DIAGNOSTIC
    919 	if (__predict_false(pp->pr_itemsperpage == 0))
    920 		panic("pool_get: pool %p: pr_itemsperpage is zero, "
    921 		    "pool not initialized?", pp);
    922 	if (__predict_false(curlwp == NULL && doing_shutdown == 0 &&
    923 			    (flags & PR_WAITOK) != 0))
    924 		panic("pool_get: %s: must have NOWAIT", pp->pr_wchan);
    925 
    926 #endif /* DIAGNOSTIC */
    927 #ifdef LOCKDEBUG
    928 	if (flags & PR_WAITOK)
    929 		ASSERT_SLEEPABLE(NULL, "pool_get(PR_WAITOK)");
    930 #endif
    931 
    932 	mutex_enter(&pp->pr_lock);
    933 	pr_enter(pp, file, line);
    934 
    935  startover:
    936 	/*
    937 	 * Check to see if we've reached the hard limit.  If we have,
    938 	 * and we can wait, then wait until an item has been returned to
    939 	 * the pool.
    940 	 */
    941 #ifdef DIAGNOSTIC
    942 	if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
    943 		pr_leave(pp);
    944 		mutex_exit(&pp->pr_lock);
    945 		panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
    946 	}
    947 #endif
    948 	if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
    949 		if (pp->pr_drain_hook != NULL) {
    950 			/*
    951 			 * Since the drain hook is going to free things
    952 			 * back to the pool, unlock, call the hook, re-lock,
    953 			 * and check the hardlimit condition again.
    954 			 */
    955 			pr_leave(pp);
    956 			mutex_exit(&pp->pr_lock);
    957 			(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
    958 			mutex_enter(&pp->pr_lock);
    959 			pr_enter(pp, file, line);
    960 			if (pp->pr_nout < pp->pr_hardlimit)
    961 				goto startover;
    962 		}
    963 
    964 		if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
    965 			/*
    966 			 * XXX: A warning isn't logged in this case.  Should
    967 			 * it be?
    968 			 */
    969 			pp->pr_flags |= PR_WANTED;
    970 			pr_leave(pp);
    971 			cv_wait(&pp->pr_cv, &pp->pr_lock);
    972 			pr_enter(pp, file, line);
    973 			goto startover;
    974 		}
    975 
    976 		/*
    977 		 * Log a message that the hard limit has been hit.
    978 		 */
    979 		if (pp->pr_hardlimit_warning != NULL &&
    980 		    ratecheck(&pp->pr_hardlimit_warning_last,
    981 			      &pp->pr_hardlimit_ratecap))
    982 			log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
    983 
    984 		pp->pr_nfail++;
    985 
    986 		pr_leave(pp);
    987 		mutex_exit(&pp->pr_lock);
    988 		return (NULL);
    989 	}
    990 
    991 	/*
    992 	 * The convention we use is that if `curpage' is not NULL, then
    993 	 * it points at a non-empty bucket. In particular, `curpage'
    994 	 * never points at a page header which has PR_PHINPAGE set and
    995 	 * has no items in its bucket.
    996 	 */
    997 	if ((ph = pp->pr_curpage) == NULL) {
    998 		int error;
    999 
   1000 #ifdef DIAGNOSTIC
   1001 		if (pp->pr_nitems != 0) {
   1002 			mutex_exit(&pp->pr_lock);
   1003 			printf("pool_get: %s: curpage NULL, nitems %u\n",
   1004 			    pp->pr_wchan, pp->pr_nitems);
   1005 			panic("pool_get: nitems inconsistent");
   1006 		}
   1007 #endif
   1008 
   1009 		/*
   1010 		 * Call the back-end page allocator for more memory.
   1011 		 * Release the pool lock, as the back-end page allocator
   1012 		 * may block.
   1013 		 */
   1014 		pr_leave(pp);
   1015 		error = pool_grow(pp, flags);
   1016 		pr_enter(pp, file, line);
   1017 		if (error != 0) {
   1018 			/*
   1019 			 * We were unable to allocate a page or item
   1020 			 * header, but we released the lock during
   1021 			 * allocation, so perhaps items were freed
   1022 			 * back to the pool.  Check for this case.
   1023 			 */
   1024 			if (pp->pr_curpage != NULL)
   1025 				goto startover;
   1026 
   1027 			pp->pr_nfail++;
   1028 			pr_leave(pp);
   1029 			mutex_exit(&pp->pr_lock);
   1030 			return (NULL);
   1031 		}
   1032 
   1033 		/* Start the allocation process over. */
   1034 		goto startover;
   1035 	}
   1036 	if (pp->pr_roflags & PR_NOTOUCH) {
   1037 #ifdef DIAGNOSTIC
   1038 		if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) {
   1039 			pr_leave(pp);
   1040 			mutex_exit(&pp->pr_lock);
   1041 			panic("pool_get: %s: page empty", pp->pr_wchan);
   1042 		}
   1043 #endif
   1044 		v = pr_item_notouch_get(pp, ph);
   1045 #ifdef POOL_DIAGNOSTIC
   1046 		pr_log(pp, v, PRLOG_GET, file, line);
   1047 #endif
   1048 	} else {
   1049 		v = pi = LIST_FIRST(&ph->ph_itemlist);
   1050 		if (__predict_false(v == NULL)) {
   1051 			pr_leave(pp);
   1052 			mutex_exit(&pp->pr_lock);
   1053 			panic("pool_get: %s: page empty", pp->pr_wchan);
   1054 		}
   1055 #ifdef DIAGNOSTIC
   1056 		if (__predict_false(pp->pr_nitems == 0)) {
   1057 			pr_leave(pp);
   1058 			mutex_exit(&pp->pr_lock);
   1059 			printf("pool_get: %s: items on itemlist, nitems %u\n",
   1060 			    pp->pr_wchan, pp->pr_nitems);
   1061 			panic("pool_get: nitems inconsistent");
   1062 		}
   1063 #endif
   1064 
   1065 #ifdef POOL_DIAGNOSTIC
   1066 		pr_log(pp, v, PRLOG_GET, file, line);
   1067 #endif
   1068 
   1069 #ifdef DIAGNOSTIC
   1070 		if (__predict_false(pi->pi_magic != PI_MAGIC)) {
   1071 			pr_printlog(pp, pi, printf);
   1072 			panic("pool_get(%s): free list modified: "
   1073 			    "magic=%x; page %p; item addr %p\n",
   1074 			    pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
   1075 		}
   1076 #endif
   1077 
   1078 		/*
   1079 		 * Remove from item list.
   1080 		 */
   1081 		LIST_REMOVE(pi, pi_list);
   1082 	}
   1083 	pp->pr_nitems--;
   1084 	pp->pr_nout++;
   1085 	if (ph->ph_nmissing == 0) {
   1086 #ifdef DIAGNOSTIC
   1087 		if (__predict_false(pp->pr_nidle == 0))
   1088 			panic("pool_get: nidle inconsistent");
   1089 #endif
   1090 		pp->pr_nidle--;
   1091 
   1092 		/*
   1093 		 * This page was previously empty.  Move it to the list of
   1094 		 * partially-full pages.  This page is already curpage.
   1095 		 */
   1096 		LIST_REMOVE(ph, ph_pagelist);
   1097 		LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
   1098 	}
   1099 	ph->ph_nmissing++;
   1100 	if (ph->ph_nmissing == pp->pr_itemsperpage) {
   1101 #ifdef DIAGNOSTIC
   1102 		if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 &&
   1103 		    !LIST_EMPTY(&ph->ph_itemlist))) {
   1104 			pr_leave(pp);
   1105 			mutex_exit(&pp->pr_lock);
   1106 			panic("pool_get: %s: nmissing inconsistent",
   1107 			    pp->pr_wchan);
   1108 		}
   1109 #endif
   1110 		/*
   1111 		 * This page is now full.  Move it to the full list
   1112 		 * and select a new current page.
   1113 		 */
   1114 		LIST_REMOVE(ph, ph_pagelist);
   1115 		LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
   1116 		pool_update_curpage(pp);
   1117 	}
   1118 
   1119 	pp->pr_nget++;
   1120 	pr_leave(pp);
   1121 
   1122 	/*
   1123 	 * If we have a low water mark and we are now below that low
   1124 	 * water mark, add more items to the pool.
   1125 	 */
   1126 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
   1127 		/*
   1128 		 * XXX: Should we log a warning?  Should we set up a timeout
   1129 		 * to try again in a second or so?  The latter could break
   1130 		 * a caller's assumptions about interrupt protection, etc.
   1131 		 */
   1132 	}
   1133 
   1134 	mutex_exit(&pp->pr_lock);
   1135 	KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0);
   1136 	FREECHECK_OUT(&pp->pr_freecheck, v);
   1137 	return (v);
   1138 }
   1139 
   1140 /*
   1141  * Internal version of pool_put().  Pool is already locked/entered.
   1142  */
   1143 static void
   1144 pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
   1145 {
   1146 	struct pool_item *pi = v;
   1147 	struct pool_item_header *ph;
   1148 
   1149 	KASSERT(mutex_owned(&pp->pr_lock));
   1150 	FREECHECK_IN(&pp->pr_freecheck, v);
   1151 
   1152 #ifdef DIAGNOSTIC
   1153 	if (__predict_false(pp->pr_nout == 0)) {
   1154 		printf("pool %s: putting with none out\n",
   1155 		    pp->pr_wchan);
   1156 		panic("pool_put");
   1157 	}
   1158 #endif
   1159 
   1160 	if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) {
   1161 		pr_printlog(pp, NULL, printf);
   1162 		panic("pool_put: %s: page header missing", pp->pr_wchan);
   1163 	}
   1164 
   1165 	/*
   1166 	 * Return to item list.
   1167 	 */
   1168 	if (pp->pr_roflags & PR_NOTOUCH) {
   1169 		pr_item_notouch_put(pp, ph, v);
   1170 	} else {
   1171 #ifdef DIAGNOSTIC
   1172 		pi->pi_magic = PI_MAGIC;
   1173 #endif
   1174 #ifdef DEBUG
   1175 		{
   1176 			int i, *ip = v;
   1177 
   1178 			for (i = 0; i < pp->pr_size / sizeof(int); i++) {
   1179 				*ip++ = PI_MAGIC;
   1180 			}
   1181 		}
   1182 #endif
   1183 
   1184 		LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
   1185 	}
   1186 	KDASSERT(ph->ph_nmissing != 0);
   1187 	ph->ph_nmissing--;
   1188 	pp->pr_nput++;
   1189 	pp->pr_nitems++;
   1190 	pp->pr_nout--;
   1191 
   1192 	/* Cancel "pool empty" condition if it exists */
   1193 	if (pp->pr_curpage == NULL)
   1194 		pp->pr_curpage = ph;
   1195 
   1196 	if (pp->pr_flags & PR_WANTED) {
   1197 		pp->pr_flags &= ~PR_WANTED;
   1198 		if (ph->ph_nmissing == 0)
   1199 			pp->pr_nidle++;
   1200 		cv_broadcast(&pp->pr_cv);
   1201 		return;
   1202 	}
   1203 
   1204 	/*
   1205 	 * If this page is now empty, do one of two things:
   1206 	 *
   1207 	 *	(1) If we have more pages than the page high water mark,
   1208 	 *	    free the page back to the system.  ONLY CONSIDER
   1209 	 *	    FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
   1210 	 *	    CLAIM.
   1211 	 *
   1212 	 *	(2) Otherwise, move the page to the empty page list.
   1213 	 *
   1214 	 * Either way, select a new current page (so we use a partially-full
   1215 	 * page if one is available).
   1216 	 */
   1217 	if (ph->ph_nmissing == 0) {
   1218 		pp->pr_nidle++;
   1219 		if (pp->pr_npages > pp->pr_minpages &&
   1220 		    (pp->pr_npages > pp->pr_maxpages ||
   1221 		     pa_starved_p(pp->pr_alloc))) {
   1222 			pr_rmpage(pp, ph, pq);
   1223 		} else {
   1224 			LIST_REMOVE(ph, ph_pagelist);
   1225 			LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
   1226 
   1227 			/*
   1228 			 * Update the timestamp on the page.  A page must
   1229 			 * be idle for some period of time before it can
   1230 			 * be reclaimed by the pagedaemon.  This minimizes
   1231 			 * ping-pong'ing for memory.
   1232 			 */
   1233 			getmicrotime(&ph->ph_time);
   1234 		}
   1235 		pool_update_curpage(pp);
   1236 	}
   1237 
   1238 	/*
   1239 	 * If the page was previously completely full, move it to the
   1240 	 * partially-full list and make it the current page.  The next
   1241 	 * allocation will get the item from this page, instead of
   1242 	 * further fragmenting the pool.
   1243 	 */
   1244 	else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
   1245 		LIST_REMOVE(ph, ph_pagelist);
   1246 		LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
   1247 		pp->pr_curpage = ph;
   1248 	}
   1249 }
   1250 
   1251 /*
   1252  * Return resource to the pool; must be called at appropriate spl level
   1253  */
   1254 #ifdef POOL_DIAGNOSTIC
   1255 void
   1256 _pool_put(struct pool *pp, void *v, const char *file, long line)
   1257 {
   1258 	struct pool_pagelist pq;
   1259 
   1260 	LIST_INIT(&pq);
   1261 
   1262 	mutex_enter(&pp->pr_lock);
   1263 	pr_enter(pp, file, line);
   1264 
   1265 	pr_log(pp, v, PRLOG_PUT, file, line);
   1266 
   1267 	pool_do_put(pp, v, &pq);
   1268 
   1269 	pr_leave(pp);
   1270 	mutex_exit(&pp->pr_lock);
   1271 
   1272 	pr_pagelist_free(pp, &pq);
   1273 }
   1274 #undef pool_put
   1275 #endif /* POOL_DIAGNOSTIC */
   1276 
   1277 void
   1278 pool_put(struct pool *pp, void *v)
   1279 {
   1280 	struct pool_pagelist pq;
   1281 
   1282 	LIST_INIT(&pq);
   1283 
   1284 	mutex_enter(&pp->pr_lock);
   1285 	pool_do_put(pp, v, &pq);
   1286 	mutex_exit(&pp->pr_lock);
   1287 
   1288 	pr_pagelist_free(pp, &pq);
   1289 }
   1290 
   1291 #ifdef POOL_DIAGNOSTIC
   1292 #define		pool_put(h, v)	_pool_put((h), (v), __FILE__, __LINE__)
   1293 #endif
   1294 
   1295 /*
   1296  * pool_grow: grow a pool by a page.
   1297  *
   1298  * => called with pool locked.
   1299  * => unlock and relock the pool.
   1300  * => return with pool locked.
   1301  */
   1302 
   1303 static int
   1304 pool_grow(struct pool *pp, int flags)
   1305 {
   1306 	struct pool_item_header *ph = NULL;
   1307 	char *cp;
   1308 
   1309 	mutex_exit(&pp->pr_lock);
   1310 	cp = pool_allocator_alloc(pp, flags);
   1311 	if (__predict_true(cp != NULL)) {
   1312 		ph = pool_alloc_item_header(pp, cp, flags);
   1313 	}
   1314 	if (__predict_false(cp == NULL || ph == NULL)) {
   1315 		if (cp != NULL) {
   1316 			pool_allocator_free(pp, cp);
   1317 		}
   1318 		mutex_enter(&pp->pr_lock);
   1319 		return ENOMEM;
   1320 	}
   1321 
   1322 	mutex_enter(&pp->pr_lock);
   1323 	pool_prime_page(pp, cp, ph);
   1324 	pp->pr_npagealloc++;
   1325 	return 0;
   1326 }
   1327 
   1328 /*
   1329  * Add N items to the pool.
   1330  */
   1331 int
   1332 pool_prime(struct pool *pp, int n)
   1333 {
   1334 	int newpages;
   1335 	int error = 0;
   1336 
   1337 	mutex_enter(&pp->pr_lock);
   1338 
   1339 	newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1340 
   1341 	while (newpages-- > 0) {
   1342 		error = pool_grow(pp, PR_NOWAIT);
   1343 		if (error) {
   1344 			break;
   1345 		}
   1346 		pp->pr_minpages++;
   1347 	}
   1348 
   1349 	if (pp->pr_minpages >= pp->pr_maxpages)
   1350 		pp->pr_maxpages = pp->pr_minpages + 1;	/* XXX */
   1351 
   1352 	mutex_exit(&pp->pr_lock);
   1353 	return error;
   1354 }
   1355 
   1356 /*
   1357  * Add a page worth of items to the pool.
   1358  *
   1359  * Note, we must be called with the pool descriptor LOCKED.
   1360  */
   1361 static void
   1362 pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph)
   1363 {
   1364 	struct pool_item *pi;
   1365 	void *cp = storage;
   1366 	const unsigned int align = pp->pr_align;
   1367 	const unsigned int ioff = pp->pr_itemoffset;
   1368 	int n;
   1369 
   1370 	KASSERT(mutex_owned(&pp->pr_lock));
   1371 
   1372 #ifdef DIAGNOSTIC
   1373 	if ((pp->pr_roflags & PR_NOALIGN) == 0 &&
   1374 	    ((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
   1375 		panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
   1376 #endif
   1377 
   1378 	/*
   1379 	 * Insert page header.
   1380 	 */
   1381 	LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
   1382 	LIST_INIT(&ph->ph_itemlist);
   1383 	ph->ph_page = storage;
   1384 	ph->ph_nmissing = 0;
   1385 	getmicrotime(&ph->ph_time);
   1386 	if ((pp->pr_roflags & PR_PHINPAGE) == 0)
   1387 		SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
   1388 
   1389 	pp->pr_nidle++;
   1390 
   1391 	/*
   1392 	 * Color this page.
   1393 	 */
   1394 	cp = (char *)cp + pp->pr_curcolor;
   1395 	if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
   1396 		pp->pr_curcolor = 0;
   1397 
   1398 	/*
   1399 	 * Adjust storage to apply aligment to `pr_itemoffset' in each item.
   1400 	 */
   1401 	if (ioff != 0)
   1402 		cp = (char *)cp + align - ioff;
   1403 
   1404 	KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
   1405 
   1406 	/*
   1407 	 * Insert remaining chunks on the bucket list.
   1408 	 */
   1409 	n = pp->pr_itemsperpage;
   1410 	pp->pr_nitems += n;
   1411 
   1412 	if (pp->pr_roflags & PR_NOTOUCH) {
   1413 		pool_item_freelist_t *freelist = PR_FREELIST(ph);
   1414 		int i;
   1415 
   1416 		ph->ph_off = (char *)cp - (char *)storage;
   1417 		ph->ph_firstfree = 0;
   1418 		for (i = 0; i < n - 1; i++)
   1419 			freelist[i] = i + 1;
   1420 		freelist[n - 1] = PR_INDEX_EOL;
   1421 	} else {
   1422 		while (n--) {
   1423 			pi = (struct pool_item *)cp;
   1424 
   1425 			KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
   1426 
   1427 			/* Insert on page list */
   1428 			LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
   1429 #ifdef DIAGNOSTIC
   1430 			pi->pi_magic = PI_MAGIC;
   1431 #endif
   1432 			cp = (char *)cp + pp->pr_size;
   1433 
   1434 			KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
   1435 		}
   1436 	}
   1437 
   1438 	/*
   1439 	 * If the pool was depleted, point at the new page.
   1440 	 */
   1441 	if (pp->pr_curpage == NULL)
   1442 		pp->pr_curpage = ph;
   1443 
   1444 	if (++pp->pr_npages > pp->pr_hiwat)
   1445 		pp->pr_hiwat = pp->pr_npages;
   1446 }
   1447 
   1448 /*
   1449  * Used by pool_get() when nitems drops below the low water mark.  This
   1450  * is used to catch up pr_nitems with the low water mark.
   1451  *
   1452  * Note 1, we never wait for memory here, we let the caller decide what to do.
   1453  *
   1454  * Note 2, we must be called with the pool already locked, and we return
   1455  * with it locked.
   1456  */
   1457 static int
   1458 pool_catchup(struct pool *pp)
   1459 {
   1460 	int error = 0;
   1461 
   1462 	while (POOL_NEEDS_CATCHUP(pp)) {
   1463 		error = pool_grow(pp, PR_NOWAIT);
   1464 		if (error) {
   1465 			break;
   1466 		}
   1467 	}
   1468 	return error;
   1469 }
   1470 
   1471 static void
   1472 pool_update_curpage(struct pool *pp)
   1473 {
   1474 
   1475 	pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
   1476 	if (pp->pr_curpage == NULL) {
   1477 		pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
   1478 	}
   1479 }
   1480 
   1481 void
   1482 pool_setlowat(struct pool *pp, int n)
   1483 {
   1484 
   1485 	mutex_enter(&pp->pr_lock);
   1486 
   1487 	pp->pr_minitems = n;
   1488 	pp->pr_minpages = (n == 0)
   1489 		? 0
   1490 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1491 
   1492 	/* Make sure we're caught up with the newly-set low water mark. */
   1493 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
   1494 		/*
   1495 		 * XXX: Should we log a warning?  Should we set up a timeout
   1496 		 * to try again in a second or so?  The latter could break
   1497 		 * a caller's assumptions about interrupt protection, etc.
   1498 		 */
   1499 	}
   1500 
   1501 	mutex_exit(&pp->pr_lock);
   1502 }
   1503 
   1504 void
   1505 pool_sethiwat(struct pool *pp, int n)
   1506 {
   1507 
   1508 	mutex_enter(&pp->pr_lock);
   1509 
   1510 	pp->pr_maxpages = (n == 0)
   1511 		? 0
   1512 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1513 
   1514 	mutex_exit(&pp->pr_lock);
   1515 }
   1516 
   1517 void
   1518 pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
   1519 {
   1520 
   1521 	mutex_enter(&pp->pr_lock);
   1522 
   1523 	pp->pr_hardlimit = n;
   1524 	pp->pr_hardlimit_warning = warnmess;
   1525 	pp->pr_hardlimit_ratecap.tv_sec = ratecap;
   1526 	pp->pr_hardlimit_warning_last.tv_sec = 0;
   1527 	pp->pr_hardlimit_warning_last.tv_usec = 0;
   1528 
   1529 	/*
   1530 	 * In-line version of pool_sethiwat(), because we don't want to
   1531 	 * release the lock.
   1532 	 */
   1533 	pp->pr_maxpages = (n == 0)
   1534 		? 0
   1535 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1536 
   1537 	mutex_exit(&pp->pr_lock);
   1538 }
   1539 
   1540 /*
   1541  * Release all complete pages that have not been used recently.
   1542  */
   1543 int
   1544 #ifdef POOL_DIAGNOSTIC
   1545 _pool_reclaim(struct pool *pp, const char *file, long line)
   1546 #else
   1547 pool_reclaim(struct pool *pp)
   1548 #endif
   1549 {
   1550 	struct pool_item_header *ph, *phnext;
   1551 	struct pool_cache *pc;
   1552 	struct pool_pagelist pq;
   1553 	struct pool_cache_grouplist pcgl;
   1554 	struct timeval curtime, diff;
   1555 
   1556 	if (pp->pr_drain_hook != NULL) {
   1557 		/*
   1558 		 * The drain hook must be called with the pool unlocked.
   1559 		 */
   1560 		(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
   1561 	}
   1562 
   1563 	if (mutex_tryenter(&pp->pr_lock) == 0)
   1564 		return (0);
   1565 	pr_enter(pp, file, line);
   1566 
   1567 	LIST_INIT(&pq);
   1568 	LIST_INIT(&pcgl);
   1569 
   1570 	/*
   1571 	 * Reclaim items from the pool's caches.
   1572 	 */
   1573 	LIST_FOREACH(pc, &pp->pr_cachelist, pc_poollist)
   1574 		pool_cache_reclaim(pc, &pq, &pcgl);
   1575 
   1576 	getmicrotime(&curtime);
   1577 
   1578 	for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
   1579 		phnext = LIST_NEXT(ph, ph_pagelist);
   1580 
   1581 		/* Check our minimum page claim */
   1582 		if (pp->pr_npages <= pp->pr_minpages)
   1583 			break;
   1584 
   1585 		KASSERT(ph->ph_nmissing == 0);
   1586 		timersub(&curtime, &ph->ph_time, &diff);
   1587 		if (diff.tv_sec < pool_inactive_time
   1588 		    && !pa_starved_p(pp->pr_alloc))
   1589 			continue;
   1590 
   1591 		/*
   1592 		 * If freeing this page would put us below
   1593 		 * the low water mark, stop now.
   1594 		 */
   1595 		if ((pp->pr_nitems - pp->pr_itemsperpage) <
   1596 		    pp->pr_minitems)
   1597 			break;
   1598 
   1599 		pr_rmpage(pp, ph, &pq);
   1600 	}
   1601 
   1602 	pr_leave(pp);
   1603 	mutex_exit(&pp->pr_lock);
   1604 	if (LIST_EMPTY(&pq) && LIST_EMPTY(&pcgl))
   1605 		return 0;
   1606 
   1607 	pr_pagelist_free(pp, &pq);
   1608 	pcg_grouplist_free(&pcgl);
   1609 	return (1);
   1610 }
   1611 
   1612 /*
   1613  * Drain pools, one at a time.
   1614  *
   1615  * Note, we must never be called from an interrupt context.
   1616  */
   1617 void
   1618 pool_drain(void *arg)
   1619 {
   1620 	struct pool *pp;
   1621 	int s;
   1622 
   1623 	pp = NULL;
   1624 	s = splvm();	/* XXX why? */
   1625 	mutex_enter(&pool_head_lock);
   1626 	if (drainpp == NULL) {
   1627 		drainpp = LIST_FIRST(&pool_head);
   1628 	}
   1629 	if (drainpp) {
   1630 		pp = drainpp;
   1631 		drainpp = LIST_NEXT(pp, pr_poollist);
   1632 	}
   1633 	mutex_exit(&pool_head_lock);
   1634 	if (pp)
   1635 		pool_reclaim(pp);
   1636 	splx(s);
   1637 }
   1638 
   1639 /*
   1640  * Diagnostic helpers.
   1641  */
   1642 void
   1643 pool_print(struct pool *pp, const char *modif)
   1644 {
   1645 
   1646 	if (mutex_tryenter(&pp->pr_lock) == 0) {
   1647 		printf("pool %s is locked; try again later\n",
   1648 		    pp->pr_wchan);
   1649 		return;
   1650 	}
   1651 	pool_print1(pp, modif, printf);
   1652 	mutex_exit(&pp->pr_lock);
   1653 }
   1654 
   1655 void
   1656 pool_printall(const char *modif, void (*pr)(const char *, ...))
   1657 {
   1658 	struct pool *pp;
   1659 
   1660 	if (mutex_tryenter(&pool_head_lock) == 0) {
   1661 		(*pr)("WARNING: pool_head_slock is locked\n");
   1662 	} else {
   1663 		mutex_exit(&pool_head_lock);
   1664 	}
   1665 
   1666 	LIST_FOREACH(pp, &pool_head, pr_poollist) {
   1667 		pool_printit(pp, modif, pr);
   1668 	}
   1669 }
   1670 
   1671 void
   1672 pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
   1673 {
   1674 
   1675 	if (pp == NULL) {
   1676 		(*pr)("Must specify a pool to print.\n");
   1677 		return;
   1678 	}
   1679 
   1680 	/*
   1681 	 * Called from DDB; interrupts should be blocked, and all
   1682 	 * other processors should be paused.  We can skip locking
   1683 	 * the pool in this case.
   1684 	 *
   1685 	 * We do a mutex_tryenter() just to print the lock
   1686 	 * status, however.
   1687 	 */
   1688 
   1689 	if (mutex_tryenter(&pp->pr_lock) == 0)
   1690 		(*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
   1691 	else
   1692 		mutex_exit(&pp->pr_lock);
   1693 
   1694 	pool_print1(pp, modif, pr);
   1695 }
   1696 
   1697 static void
   1698 pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
   1699     void (*pr)(const char *, ...))
   1700 {
   1701 	struct pool_item_header *ph;
   1702 #ifdef DIAGNOSTIC
   1703 	struct pool_item *pi;
   1704 #endif
   1705 
   1706 	LIST_FOREACH(ph, pl, ph_pagelist) {
   1707 		(*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
   1708 		    ph->ph_page, ph->ph_nmissing,
   1709 		    (u_long)ph->ph_time.tv_sec,
   1710 		    (u_long)ph->ph_time.tv_usec);
   1711 #ifdef DIAGNOSTIC
   1712 		if (!(pp->pr_roflags & PR_NOTOUCH)) {
   1713 			LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
   1714 				if (pi->pi_magic != PI_MAGIC) {
   1715 					(*pr)("\t\t\titem %p, magic 0x%x\n",
   1716 					    pi, pi->pi_magic);
   1717 				}
   1718 			}
   1719 		}
   1720 #endif
   1721 	}
   1722 }
   1723 
   1724 static void
   1725 pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
   1726 {
   1727 	struct pool_item_header *ph;
   1728 	struct pool_cache *pc;
   1729 	struct pool_cache_group *pcg;
   1730 	int i, print_log = 0, print_pagelist = 0, print_cache = 0;
   1731 	char c;
   1732 
   1733 	while ((c = *modif++) != '\0') {
   1734 		if (c == 'l')
   1735 			print_log = 1;
   1736 		if (c == 'p')
   1737 			print_pagelist = 1;
   1738 		if (c == 'c')
   1739 			print_cache = 1;
   1740 	}
   1741 
   1742 	(*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
   1743 	    pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
   1744 	    pp->pr_roflags);
   1745 	(*pr)("\talloc %p\n", pp->pr_alloc);
   1746 	(*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
   1747 	    pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
   1748 	(*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
   1749 	    pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
   1750 
   1751 	(*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
   1752 	    pp->pr_nget, pp->pr_nfail, pp->pr_nput);
   1753 	(*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
   1754 	    pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
   1755 
   1756 	if (print_pagelist == 0)
   1757 		goto skip_pagelist;
   1758 
   1759 	if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
   1760 		(*pr)("\n\tempty page list:\n");
   1761 	pool_print_pagelist(pp, &pp->pr_emptypages, pr);
   1762 	if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
   1763 		(*pr)("\n\tfull page list:\n");
   1764 	pool_print_pagelist(pp, &pp->pr_fullpages, pr);
   1765 	if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
   1766 		(*pr)("\n\tpartial-page list:\n");
   1767 	pool_print_pagelist(pp, &pp->pr_partpages, pr);
   1768 
   1769 	if (pp->pr_curpage == NULL)
   1770 		(*pr)("\tno current page\n");
   1771 	else
   1772 		(*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
   1773 
   1774  skip_pagelist:
   1775 	if (print_log == 0)
   1776 		goto skip_log;
   1777 
   1778 	(*pr)("\n");
   1779 	if ((pp->pr_roflags & PR_LOGGING) == 0)
   1780 		(*pr)("\tno log\n");
   1781 	else {
   1782 		pr_printlog(pp, NULL, pr);
   1783 	}
   1784 
   1785  skip_log:
   1786 	if (print_cache == 0)
   1787 		goto skip_cache;
   1788 
   1789 #define PR_GROUPLIST(pcg)						\
   1790 	(*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);		\
   1791 	for (i = 0; i < PCG_NOBJECTS; i++) {				\
   1792 		if (pcg->pcg_objects[i].pcgo_pa !=			\
   1793 		    POOL_PADDR_INVALID) {				\
   1794 			(*pr)("\t\t\t%p, 0x%llx\n",			\
   1795 			    pcg->pcg_objects[i].pcgo_va,		\
   1796 			    (unsigned long long)			\
   1797 			    pcg->pcg_objects[i].pcgo_pa);		\
   1798 		} else {						\
   1799 			(*pr)("\t\t\t%p\n",				\
   1800 			    pcg->pcg_objects[i].pcgo_va);		\
   1801 		}							\
   1802 	}
   1803 
   1804 	LIST_FOREACH(pc, &pp->pr_cachelist, pc_poollist) {
   1805 		(*pr)("\tcache %p\n", pc);
   1806 		(*pr)("\t    hits %lu misses %lu ngroups %lu nitems %lu\n",
   1807 		    pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
   1808 		(*pr)("\t    full groups:\n");
   1809 		LIST_FOREACH(pcg, &pc->pc_fullgroups, pcg_list) {
   1810 			PR_GROUPLIST(pcg);
   1811 		}
   1812 		(*pr)("\t    partial groups:\n");
   1813 		LIST_FOREACH(pcg, &pc->pc_partgroups, pcg_list) {
   1814 			PR_GROUPLIST(pcg);
   1815 		}
   1816 		(*pr)("\t    empty groups:\n");
   1817 		LIST_FOREACH(pcg, &pc->pc_emptygroups, pcg_list) {
   1818 			PR_GROUPLIST(pcg);
   1819 		}
   1820 	}
   1821 #undef PR_GROUPLIST
   1822 
   1823  skip_cache:
   1824 	pr_enter_check(pp, pr);
   1825 }
   1826 
   1827 static int
   1828 pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
   1829 {
   1830 	struct pool_item *pi;
   1831 	void *page;
   1832 	int n;
   1833 
   1834 	if ((pp->pr_roflags & PR_NOALIGN) == 0) {
   1835 		page = (void *)((uintptr_t)ph & pp->pr_alloc->pa_pagemask);
   1836 		if (page != ph->ph_page &&
   1837 		    (pp->pr_roflags & PR_PHINPAGE) != 0) {
   1838 			if (label != NULL)
   1839 				printf("%s: ", label);
   1840 			printf("pool(%p:%s): page inconsistency: page %p;"
   1841 			       " at page head addr %p (p %p)\n", pp,
   1842 				pp->pr_wchan, ph->ph_page,
   1843 				ph, page);
   1844 			return 1;
   1845 		}
   1846 	}
   1847 
   1848 	if ((pp->pr_roflags & PR_NOTOUCH) != 0)
   1849 		return 0;
   1850 
   1851 	for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0;
   1852 	     pi != NULL;
   1853 	     pi = LIST_NEXT(pi,pi_list), n++) {
   1854 
   1855 #ifdef DIAGNOSTIC
   1856 		if (pi->pi_magic != PI_MAGIC) {
   1857 			if (label != NULL)
   1858 				printf("%s: ", label);
   1859 			printf("pool(%s): free list modified: magic=%x;"
   1860 			       " page %p; item ordinal %d; addr %p\n",
   1861 				pp->pr_wchan, pi->pi_magic, ph->ph_page,
   1862 				n, pi);
   1863 			panic("pool");
   1864 		}
   1865 #endif
   1866 		if ((pp->pr_roflags & PR_NOALIGN) != 0) {
   1867 			continue;
   1868 		}
   1869 		page = (void *)((uintptr_t)pi & pp->pr_alloc->pa_pagemask);
   1870 		if (page == ph->ph_page)
   1871 			continue;
   1872 
   1873 		if (label != NULL)
   1874 			printf("%s: ", label);
   1875 		printf("pool(%p:%s): page inconsistency: page %p;"
   1876 		       " item ordinal %d; addr %p (p %p)\n", pp,
   1877 			pp->pr_wchan, ph->ph_page,
   1878 			n, pi, page);
   1879 		return 1;
   1880 	}
   1881 	return 0;
   1882 }
   1883 
   1884 
   1885 int
   1886 pool_chk(struct pool *pp, const char *label)
   1887 {
   1888 	struct pool_item_header *ph;
   1889 	int r = 0;
   1890 
   1891 	mutex_enter(&pp->pr_lock);
   1892 	LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
   1893 		r = pool_chk_page(pp, label, ph);
   1894 		if (r) {
   1895 			goto out;
   1896 		}
   1897 	}
   1898 	LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
   1899 		r = pool_chk_page(pp, label, ph);
   1900 		if (r) {
   1901 			goto out;
   1902 		}
   1903 	}
   1904 	LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
   1905 		r = pool_chk_page(pp, label, ph);
   1906 		if (r) {
   1907 			goto out;
   1908 		}
   1909 	}
   1910 
   1911 out:
   1912 	mutex_exit(&pp->pr_lock);
   1913 	return (r);
   1914 }
   1915 
   1916 /*
   1917  * pool_cache_init:
   1918  *
   1919  *	Initialize a pool cache.
   1920  *
   1921  *	NOTE: If the pool must be protected from interrupts, we expect
   1922  *	to be called at the appropriate interrupt priority level.
   1923  */
   1924 void
   1925 pool_cache_init(struct pool_cache *pc, struct pool *pp,
   1926     int (*ctor)(void *, void *, int),
   1927     void (*dtor)(void *, void *),
   1928     void *arg)
   1929 {
   1930 
   1931 	LIST_INIT(&pc->pc_emptygroups);
   1932 	LIST_INIT(&pc->pc_fullgroups);
   1933 	LIST_INIT(&pc->pc_partgroups);
   1934 	mutex_init(&pc->pc_lock, MUTEX_DRIVER, pp->pr_ipl);
   1935 
   1936 	pc->pc_pool = pp;
   1937 
   1938 	pc->pc_ctor = ctor;
   1939 	pc->pc_dtor = dtor;
   1940 	pc->pc_arg  = arg;
   1941 
   1942 	pc->pc_hits   = 0;
   1943 	pc->pc_misses = 0;
   1944 
   1945 	pc->pc_ngroups = 0;
   1946 
   1947 	pc->pc_nitems = 0;
   1948 
   1949 	if (__predict_true(!cold)) {
   1950 		mutex_enter(&pp->pr_lock);
   1951 		LIST_INSERT_HEAD(&pp->pr_cachelist, pc, pc_poollist);
   1952 		mutex_exit(&pp->pr_lock);
   1953 	} else
   1954 		LIST_INSERT_HEAD(&pp->pr_cachelist, pc, pc_poollist);
   1955 }
   1956 
   1957 /*
   1958  * pool_cache_destroy:
   1959  *
   1960  *	Destroy a pool cache.
   1961  */
   1962 void
   1963 pool_cache_destroy(struct pool_cache *pc)
   1964 {
   1965 	struct pool *pp = pc->pc_pool;
   1966 
   1967 	/* First, invalidate the entire cache. */
   1968 	pool_cache_invalidate(pc);
   1969 
   1970 	/* ...and remove it from the pool's cache list. */
   1971 	mutex_enter(&pp->pr_lock);
   1972 	LIST_REMOVE(pc, pc_poollist);
   1973 	mutex_exit(&pp->pr_lock);
   1974 
   1975 	mutex_destroy(&pc->pc_lock);
   1976 }
   1977 
   1978 static inline void *
   1979 pcg_get(struct pool_cache_group *pcg, paddr_t *pap)
   1980 {
   1981 	void *object;
   1982 	u_int idx;
   1983 
   1984 	KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
   1985 	KASSERT(pcg->pcg_avail != 0);
   1986 	idx = --pcg->pcg_avail;
   1987 
   1988 	KASSERT(pcg->pcg_objects[idx].pcgo_va != NULL);
   1989 	object = pcg->pcg_objects[idx].pcgo_va;
   1990 	if (pap != NULL)
   1991 		*pap = pcg->pcg_objects[idx].pcgo_pa;
   1992 	pcg->pcg_objects[idx].pcgo_va = NULL;
   1993 
   1994 	return (object);
   1995 }
   1996 
   1997 static inline void
   1998 pcg_put(struct pool_cache_group *pcg, void *object, paddr_t pa)
   1999 {
   2000 	u_int idx;
   2001 
   2002 	KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
   2003 	idx = pcg->pcg_avail++;
   2004 
   2005 	KASSERT(pcg->pcg_objects[idx].pcgo_va == NULL);
   2006 	pcg->pcg_objects[idx].pcgo_va = object;
   2007 	pcg->pcg_objects[idx].pcgo_pa = pa;
   2008 }
   2009 
   2010 static void
   2011 pcg_grouplist_free(struct pool_cache_grouplist *pcgl)
   2012 {
   2013 	struct pool_cache_group *pcg;
   2014 
   2015 	while ((pcg = LIST_FIRST(pcgl)) != NULL) {
   2016 		LIST_REMOVE(pcg, pcg_list);
   2017 		pool_put(&pcgpool, pcg);
   2018 	}
   2019 }
   2020 
   2021 /*
   2022  * pool_cache_get{,_paddr}:
   2023  *
   2024  *	Get an object from a pool cache (optionally returning
   2025  *	the physical address of the object).
   2026  */
   2027 void *
   2028 pool_cache_get_paddr(struct pool_cache *pc, int flags, paddr_t *pap)
   2029 {
   2030 	struct pool_cache_group *pcg;
   2031 	void *object;
   2032 
   2033 #ifdef LOCKDEBUG
   2034 	if (flags & PR_WAITOK)
   2035 		ASSERT_SLEEPABLE(NULL, "pool_cache_get(PR_WAITOK)");
   2036 #endif
   2037 
   2038 	mutex_enter(&pc->pc_lock);
   2039 
   2040 	pcg = LIST_FIRST(&pc->pc_partgroups);
   2041 	if (pcg == NULL) {
   2042 		pcg = LIST_FIRST(&pc->pc_fullgroups);
   2043 		if (pcg != NULL) {
   2044 			LIST_REMOVE(pcg, pcg_list);
   2045 			LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list);
   2046 		}
   2047 	}
   2048 	if (pcg == NULL) {
   2049 
   2050 		/*
   2051 		 * No groups with any available objects.  Allocate
   2052 		 * a new object, construct it, and return it to
   2053 		 * the caller.  We will allocate a group, if necessary,
   2054 		 * when the object is freed back to the cache.
   2055 		 */
   2056 		pc->pc_misses++;
   2057 		mutex_exit(&pc->pc_lock);
   2058 		object = pool_get(pc->pc_pool, flags);
   2059 		if (object != NULL && pc->pc_ctor != NULL) {
   2060 			if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
   2061 				pool_put(pc->pc_pool, object);
   2062 				return (NULL);
   2063 			}
   2064 		}
   2065 		KASSERT((((vaddr_t)object + pc->pc_pool->pr_itemoffset) &
   2066 		    (pc->pc_pool->pr_align - 1)) == 0);
   2067 		if (object != NULL && pap != NULL) {
   2068 #ifdef POOL_VTOPHYS
   2069 			*pap = POOL_VTOPHYS(object);
   2070 #else
   2071 			*pap = POOL_PADDR_INVALID;
   2072 #endif
   2073 		}
   2074 
   2075 		FREECHECK_OUT(&pc->pc_freecheck, object);
   2076 		return (object);
   2077 	}
   2078 
   2079 	pc->pc_hits++;
   2080 	pc->pc_nitems--;
   2081 	object = pcg_get(pcg, pap);
   2082 
   2083 	if (pcg->pcg_avail == 0) {
   2084 		LIST_REMOVE(pcg, pcg_list);
   2085 		LIST_INSERT_HEAD(&pc->pc_emptygroups, pcg, pcg_list);
   2086 	}
   2087 	mutex_exit(&pc->pc_lock);
   2088 
   2089 	KASSERT((((vaddr_t)object + pc->pc_pool->pr_itemoffset) &
   2090 	    (pc->pc_pool->pr_align - 1)) == 0);
   2091 	FREECHECK_OUT(&pc->pc_freecheck, object);
   2092 	return (object);
   2093 }
   2094 
   2095 /*
   2096  * pool_cache_put{,_paddr}:
   2097  *
   2098  *	Put an object back to the pool cache (optionally caching the
   2099  *	physical address of the object).
   2100  */
   2101 void
   2102 pool_cache_put_paddr(struct pool_cache *pc, void *object, paddr_t pa)
   2103 {
   2104 	struct pool_cache_group *pcg;
   2105 
   2106 	FREECHECK_IN(&pc->pc_freecheck, object);
   2107 
   2108 	if (__predict_false((pc->pc_pool->pr_flags & PR_WANTED) != 0)) {
   2109 		goto destruct;
   2110 	}
   2111 
   2112 	mutex_enter(&pc->pc_lock);
   2113 
   2114 	pcg = LIST_FIRST(&pc->pc_partgroups);
   2115 	if (pcg == NULL) {
   2116 		pcg = LIST_FIRST(&pc->pc_emptygroups);
   2117 		if (pcg != NULL) {
   2118 			LIST_REMOVE(pcg, pcg_list);
   2119 			LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list);
   2120 		}
   2121 	}
   2122 	if (pcg == NULL) {
   2123 
   2124 		/*
   2125 		 * No empty groups to free the object to.  Attempt to
   2126 		 * allocate one.
   2127 		 */
   2128 		mutex_exit(&pc->pc_lock);
   2129 		pcg = pool_get(&pcgpool, PR_NOWAIT);
   2130 		if (pcg == NULL) {
   2131 destruct:
   2132 
   2133 			/*
   2134 			 * Unable to allocate a cache group; destruct the object
   2135 			 * and free it back to the pool.
   2136 			 */
   2137 			pool_cache_destruct_object(pc, object);
   2138 			return;
   2139 		}
   2140 		memset(pcg, 0, sizeof(*pcg));
   2141 		mutex_enter(&pc->pc_lock);
   2142 		pc->pc_ngroups++;
   2143 		LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list);
   2144 	}
   2145 
   2146 	pc->pc_nitems++;
   2147 	pcg_put(pcg, object, pa);
   2148 
   2149 	if (pcg->pcg_avail == PCG_NOBJECTS) {
   2150 		LIST_REMOVE(pcg, pcg_list);
   2151 		LIST_INSERT_HEAD(&pc->pc_fullgroups, pcg, pcg_list);
   2152 	}
   2153 	mutex_exit(&pc->pc_lock);
   2154 }
   2155 
   2156 /*
   2157  * pool_cache_destruct_object:
   2158  *
   2159  *	Force destruction of an object and its release back into
   2160  *	the pool.
   2161  */
   2162 void
   2163 pool_cache_destruct_object(struct pool_cache *pc, void *object)
   2164 {
   2165 
   2166 	if (pc->pc_dtor != NULL)
   2167 		(*pc->pc_dtor)(pc->pc_arg, object);
   2168 	pool_put(pc->pc_pool, object);
   2169 }
   2170 
   2171 static void
   2172 pool_do_cache_invalidate_grouplist(struct pool_cache_grouplist *pcgsl,
   2173     struct pool_cache *pc, struct pool_pagelist *pq,
   2174     struct pool_cache_grouplist *pcgdl)
   2175 {
   2176 	struct pool_cache_group *pcg, *npcg;
   2177 	void *object;
   2178 
   2179 	for (pcg = LIST_FIRST(pcgsl); pcg != NULL; pcg = npcg) {
   2180 		npcg = LIST_NEXT(pcg, pcg_list);
   2181 		while (pcg->pcg_avail != 0) {
   2182 			pc->pc_nitems--;
   2183 			object = pcg_get(pcg, NULL);
   2184 			if (pc->pc_dtor != NULL)
   2185 				(*pc->pc_dtor)(pc->pc_arg, object);
   2186 			pool_do_put(pc->pc_pool, object, pq);
   2187 		}
   2188 		pc->pc_ngroups--;
   2189 		LIST_REMOVE(pcg, pcg_list);
   2190 		LIST_INSERT_HEAD(pcgdl, pcg, pcg_list);
   2191 	}
   2192 }
   2193 
   2194 static void
   2195 pool_do_cache_invalidate(struct pool_cache *pc, struct pool_pagelist *pq,
   2196     struct pool_cache_grouplist *pcgl)
   2197 {
   2198 
   2199 	KASSERT(mutex_owned(&pc->pc_lock));
   2200 	KASSERT(mutex_owned(&pc->pc_pool->pr_lock));
   2201 
   2202 	pool_do_cache_invalidate_grouplist(&pc->pc_fullgroups, pc, pq, pcgl);
   2203 	pool_do_cache_invalidate_grouplist(&pc->pc_partgroups, pc, pq, pcgl);
   2204 
   2205 	KASSERT(LIST_EMPTY(&pc->pc_partgroups));
   2206 	KASSERT(LIST_EMPTY(&pc->pc_fullgroups));
   2207 	KASSERT(pc->pc_nitems == 0);
   2208 }
   2209 
   2210 /*
   2211  * pool_cache_invalidate:
   2212  *
   2213  *	Invalidate a pool cache (destruct and release all of the
   2214  *	cached objects).
   2215  */
   2216 void
   2217 pool_cache_invalidate(struct pool_cache *pc)
   2218 {
   2219 	struct pool_pagelist pq;
   2220 	struct pool_cache_grouplist pcgl;
   2221 
   2222 	LIST_INIT(&pq);
   2223 	LIST_INIT(&pcgl);
   2224 
   2225 	mutex_enter(&pc->pc_lock);
   2226 	mutex_enter(&pc->pc_pool->pr_lock);
   2227 
   2228 	pool_do_cache_invalidate(pc, &pq, &pcgl);
   2229 
   2230 	mutex_exit(&pc->pc_pool->pr_lock);
   2231 	mutex_exit(&pc->pc_lock);
   2232 
   2233 	pr_pagelist_free(pc->pc_pool, &pq);
   2234 	pcg_grouplist_free(&pcgl);
   2235 }
   2236 
   2237 /*
   2238  * pool_cache_reclaim:
   2239  *
   2240  *	Reclaim a pool cache for pool_reclaim().
   2241  */
   2242 static void
   2243 pool_cache_reclaim(struct pool_cache *pc, struct pool_pagelist *pq,
   2244     struct pool_cache_grouplist *pcgl)
   2245 {
   2246 
   2247 	/*
   2248 	 * We're locking in the wrong order (normally pool_cache -> pool,
   2249 	 * but the pool is already locked when we get here), so we have
   2250 	 * to use trylock.  If we can't lock the pool_cache, it's not really
   2251 	 * a big deal here.
   2252 	 */
   2253 	if (mutex_tryenter(&pc->pc_lock) == 0)
   2254 		return;
   2255 
   2256 	pool_do_cache_invalidate(pc, pq, pcgl);
   2257 
   2258 	mutex_exit(&pc->pc_lock);
   2259 }
   2260 
   2261 /*
   2262  * Pool backend allocators.
   2263  *
   2264  * Each pool has a backend allocator that handles allocation, deallocation,
   2265  * and any additional draining that might be needed.
   2266  *
   2267  * We provide two standard allocators:
   2268  *
   2269  *	pool_allocator_kmem - the default when no allocator is specified
   2270  *
   2271  *	pool_allocator_nointr - used for pools that will not be accessed
   2272  *	in interrupt context.
   2273  */
   2274 void	*pool_page_alloc(struct pool *, int);
   2275 void	pool_page_free(struct pool *, void *);
   2276 
   2277 #ifdef POOL_SUBPAGE
   2278 struct pool_allocator pool_allocator_kmem_fullpage = {
   2279 	pool_page_alloc, pool_page_free, 0,
   2280 	.pa_backingmapptr = &kmem_map,
   2281 };
   2282 #else
   2283 struct pool_allocator pool_allocator_kmem = {
   2284 	pool_page_alloc, pool_page_free, 0,
   2285 	.pa_backingmapptr = &kmem_map,
   2286 };
   2287 #endif
   2288 
   2289 void	*pool_page_alloc_nointr(struct pool *, int);
   2290 void	pool_page_free_nointr(struct pool *, void *);
   2291 
   2292 #ifdef POOL_SUBPAGE
   2293 struct pool_allocator pool_allocator_nointr_fullpage = {
   2294 	pool_page_alloc_nointr, pool_page_free_nointr, 0,
   2295 	.pa_backingmapptr = &kernel_map,
   2296 };
   2297 #else
   2298 struct pool_allocator pool_allocator_nointr = {
   2299 	pool_page_alloc_nointr, pool_page_free_nointr, 0,
   2300 	.pa_backingmapptr = &kernel_map,
   2301 };
   2302 #endif
   2303 
   2304 #ifdef POOL_SUBPAGE
   2305 void	*pool_subpage_alloc(struct pool *, int);
   2306 void	pool_subpage_free(struct pool *, void *);
   2307 
   2308 struct pool_allocator pool_allocator_kmem = {
   2309 	pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
   2310 	.pa_backingmapptr = &kmem_map,
   2311 };
   2312 
   2313 void	*pool_subpage_alloc_nointr(struct pool *, int);
   2314 void	pool_subpage_free_nointr(struct pool *, void *);
   2315 
   2316 struct pool_allocator pool_allocator_nointr = {
   2317 	pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
   2318 	.pa_backingmapptr = &kmem_map,
   2319 };
   2320 #endif /* POOL_SUBPAGE */
   2321 
   2322 static void *
   2323 pool_allocator_alloc(struct pool *pp, int flags)
   2324 {
   2325 	struct pool_allocator *pa = pp->pr_alloc;
   2326 	void *res;
   2327 
   2328 	res = (*pa->pa_alloc)(pp, flags);
   2329 	if (res == NULL && (flags & PR_WAITOK) == 0) {
   2330 		/*
   2331 		 * We only run the drain hook here if PR_NOWAIT.
   2332 		 * In other cases, the hook will be run in
   2333 		 * pool_reclaim().
   2334 		 */
   2335 		if (pp->pr_drain_hook != NULL) {
   2336 			(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
   2337 			res = (*pa->pa_alloc)(pp, flags);
   2338 		}
   2339 	}
   2340 	return res;
   2341 }
   2342 
   2343 static void
   2344 pool_allocator_free(struct pool *pp, void *v)
   2345 {
   2346 	struct pool_allocator *pa = pp->pr_alloc;
   2347 
   2348 	(*pa->pa_free)(pp, v);
   2349 }
   2350 
   2351 void *
   2352 pool_page_alloc(struct pool *pp, int flags)
   2353 {
   2354 	bool waitok = (flags & PR_WAITOK) ? true : false;
   2355 
   2356 	return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, waitok));
   2357 }
   2358 
   2359 void
   2360 pool_page_free(struct pool *pp, void *v)
   2361 {
   2362 
   2363 	uvm_km_free_poolpage_cache(kmem_map, (vaddr_t) v);
   2364 }
   2365 
   2366 static void *
   2367 pool_page_alloc_meta(struct pool *pp, int flags)
   2368 {
   2369 	bool waitok = (flags & PR_WAITOK) ? true : false;
   2370 
   2371 	return ((void *) uvm_km_alloc_poolpage(kmem_map, waitok));
   2372 }
   2373 
   2374 static void
   2375 pool_page_free_meta(struct pool *pp, void *v)
   2376 {
   2377 
   2378 	uvm_km_free_poolpage(kmem_map, (vaddr_t) v);
   2379 }
   2380 
   2381 #ifdef POOL_SUBPAGE
   2382 /* Sub-page allocator, for machines with large hardware pages. */
   2383 void *
   2384 pool_subpage_alloc(struct pool *pp, int flags)
   2385 {
   2386 	return pool_get(&psppool, flags);
   2387 }
   2388 
   2389 void
   2390 pool_subpage_free(struct pool *pp, void *v)
   2391 {
   2392 	pool_put(&psppool, v);
   2393 }
   2394 
   2395 /* We don't provide a real nointr allocator.  Maybe later. */
   2396 void *
   2397 pool_subpage_alloc_nointr(struct pool *pp, int flags)
   2398 {
   2399 
   2400 	return (pool_subpage_alloc(pp, flags));
   2401 }
   2402 
   2403 void
   2404 pool_subpage_free_nointr(struct pool *pp, void *v)
   2405 {
   2406 
   2407 	pool_subpage_free(pp, v);
   2408 }
   2409 #endif /* POOL_SUBPAGE */
   2410 void *
   2411 pool_page_alloc_nointr(struct pool *pp, int flags)
   2412 {
   2413 	bool waitok = (flags & PR_WAITOK) ? true : false;
   2414 
   2415 	return ((void *) uvm_km_alloc_poolpage_cache(kernel_map, waitok));
   2416 }
   2417 
   2418 void
   2419 pool_page_free_nointr(struct pool *pp, void *v)
   2420 {
   2421 
   2422 	uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v);
   2423 }
   2424