Home | History | Annotate | Line # | Download | only in kern
subr_pool.c revision 1.97
      1 /*	$NetBSD: subr_pool.c,v 1.97 2005/01/01 21:04:39 yamt Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
      9  * Simulation Facility, NASA Ames Research Center.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. All advertising materials mentioning features or use of this software
     20  *    must display the following acknowledgement:
     21  *	This product includes software developed by the NetBSD
     22  *	Foundation, Inc. and its contributors.
     23  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24  *    contributors may be used to endorse or promote products derived
     25  *    from this software without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37  * POSSIBILITY OF SUCH DAMAGE.
     38  */
     39 
     40 #include <sys/cdefs.h>
     41 __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.97 2005/01/01 21:04:39 yamt Exp $");
     42 
     43 #include "opt_pool.h"
     44 #include "opt_poollog.h"
     45 #include "opt_lockdebug.h"
     46 
     47 #include <sys/param.h>
     48 #include <sys/systm.h>
     49 #include <sys/proc.h>
     50 #include <sys/errno.h>
     51 #include <sys/kernel.h>
     52 #include <sys/malloc.h>
     53 #include <sys/lock.h>
     54 #include <sys/pool.h>
     55 #include <sys/syslog.h>
     56 
     57 #include <uvm/uvm.h>
     58 
     59 /*
     60  * Pool resource management utility.
     61  *
     62  * Memory is allocated in pages which are split into pieces according to
     63  * the pool item size. Each page is kept on one of three lists in the
     64  * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
     65  * for empty, full and partially-full pages respectively. The individual
     66  * pool items are on a linked list headed by `ph_itemlist' in each page
     67  * header. The memory for building the page list is either taken from
     68  * the allocated pages themselves (for small pool items) or taken from
     69  * an internal pool of page headers (`phpool').
     70  */
     71 
     72 /* List of all pools */
     73 TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
     74 
     75 /* Private pool for page header structures */
     76 #define	PHPOOL_MAX	8
     77 static struct pool phpool[PHPOOL_MAX];
     78 #define	PHPOOL_FREELIST_NELEM(idx)	(((idx) == 0) ? 0 : (1 << (idx)))
     79 
     80 #ifdef POOL_SUBPAGE
     81 /* Pool of subpages for use by normal pools. */
     82 static struct pool psppool;
     83 #endif
     84 
     85 /* # of seconds to retain page after last use */
     86 int pool_inactive_time = 10;
     87 
     88 /* Next candidate for drainage (see pool_drain()) */
     89 static struct pool	*drainpp;
     90 
     91 /* This spin lock protects both pool_head and drainpp. */
     92 struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
     93 
     94 struct pool_item_header {
     95 	/* Page headers */
     96 	LIST_ENTRY(pool_item_header)
     97 				ph_pagelist;	/* pool page list */
     98 	SPLAY_ENTRY(pool_item_header)
     99 				ph_node;	/* Off-page page headers */
    100 	caddr_t			ph_page;	/* this page's address */
    101 	struct timeval		ph_time;	/* last referenced */
    102 	union {
    103 		/* !PR_NOTOUCH */
    104 		struct {
    105 			TAILQ_HEAD(, pool_item)
    106 				phu_itemlist;	/* chunk list for this page */
    107 		} phu_normal;
    108 		/* PR_NOTOUCH */
    109 		struct {
    110 			uint16_t
    111 				phu_off;	/* start offset in page */
    112 			uint16_t
    113 				phu_firstfree;	/* first free item */
    114 		} phu_notouch;
    115 	} ph_u;
    116 	uint16_t		ph_nmissing;	/* # of chunks in use */
    117 };
    118 #define	ph_itemlist	ph_u.phu_normal.phu_itemlist
    119 #define	ph_off		ph_u.phu_notouch.phu_off
    120 #define	ph_firstfree	ph_u.phu_notouch.phu_firstfree
    121 
    122 struct pool_item {
    123 #ifdef DIAGNOSTIC
    124 	u_int pi_magic;
    125 #endif
    126 #define	PI_MAGIC 0xdeadbeefU
    127 	/* Other entries use only this list entry */
    128 	TAILQ_ENTRY(pool_item)	pi_list;
    129 };
    130 
    131 #define	POOL_NEEDS_CATCHUP(pp)						\
    132 	((pp)->pr_nitems < (pp)->pr_minitems)
    133 
    134 /*
    135  * Pool cache management.
    136  *
    137  * Pool caches provide a way for constructed objects to be cached by the
    138  * pool subsystem.  This can lead to performance improvements by avoiding
    139  * needless object construction/destruction; it is deferred until absolutely
    140  * necessary.
    141  *
    142  * Caches are grouped into cache groups.  Each cache group references
    143  * up to 16 constructed objects.  When a cache allocates an object
    144  * from the pool, it calls the object's constructor and places it into
    145  * a cache group.  When a cache group frees an object back to the pool,
    146  * it first calls the object's destructor.  This allows the object to
    147  * persist in constructed form while freed to the cache.
    148  *
    149  * Multiple caches may exist for each pool.  This allows a single
    150  * object type to have multiple constructed forms.  The pool references
    151  * each cache, so that when a pool is drained by the pagedaemon, it can
    152  * drain each individual cache as well.  Each time a cache is drained,
    153  * the most idle cache group is freed to the pool in its entirety.
    154  *
    155  * Pool caches are layed on top of pools.  By layering them, we can avoid
    156  * the complexity of cache management for pools which would not benefit
    157  * from it.
    158  */
    159 
    160 /* The cache group pool. */
    161 static struct pool pcgpool;
    162 
    163 static void	pool_cache_reclaim(struct pool_cache *);
    164 
    165 static int	pool_catchup(struct pool *);
    166 static void	pool_prime_page(struct pool *, caddr_t,
    167 		    struct pool_item_header *);
    168 static void	pool_update_curpage(struct pool *);
    169 
    170 void		*pool_allocator_alloc(struct pool *, int);
    171 void		pool_allocator_free(struct pool *, void *);
    172 
    173 static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
    174 	void (*)(const char *, ...));
    175 static void pool_print1(struct pool *, const char *,
    176 	void (*)(const char *, ...));
    177 
    178 static int pool_chk_page(struct pool *, const char *,
    179 			 struct pool_item_header *);
    180 
    181 /*
    182  * Pool log entry. An array of these is allocated in pool_init().
    183  */
    184 struct pool_log {
    185 	const char	*pl_file;
    186 	long		pl_line;
    187 	int		pl_action;
    188 #define	PRLOG_GET	1
    189 #define	PRLOG_PUT	2
    190 	void		*pl_addr;
    191 };
    192 
    193 #ifdef POOL_DIAGNOSTIC
    194 /* Number of entries in pool log buffers */
    195 #ifndef POOL_LOGSIZE
    196 #define	POOL_LOGSIZE	10
    197 #endif
    198 
    199 int pool_logsize = POOL_LOGSIZE;
    200 
    201 static __inline void
    202 pr_log(struct pool *pp, void *v, int action, const char *file, long line)
    203 {
    204 	int n = pp->pr_curlogentry;
    205 	struct pool_log *pl;
    206 
    207 	if ((pp->pr_roflags & PR_LOGGING) == 0)
    208 		return;
    209 
    210 	/*
    211 	 * Fill in the current entry. Wrap around and overwrite
    212 	 * the oldest entry if necessary.
    213 	 */
    214 	pl = &pp->pr_log[n];
    215 	pl->pl_file = file;
    216 	pl->pl_line = line;
    217 	pl->pl_action = action;
    218 	pl->pl_addr = v;
    219 	if (++n >= pp->pr_logsize)
    220 		n = 0;
    221 	pp->pr_curlogentry = n;
    222 }
    223 
    224 static void
    225 pr_printlog(struct pool *pp, struct pool_item *pi,
    226     void (*pr)(const char *, ...))
    227 {
    228 	int i = pp->pr_logsize;
    229 	int n = pp->pr_curlogentry;
    230 
    231 	if ((pp->pr_roflags & PR_LOGGING) == 0)
    232 		return;
    233 
    234 	/*
    235 	 * Print all entries in this pool's log.
    236 	 */
    237 	while (i-- > 0) {
    238 		struct pool_log *pl = &pp->pr_log[n];
    239 		if (pl->pl_action != 0) {
    240 			if (pi == NULL || pi == pl->pl_addr) {
    241 				(*pr)("\tlog entry %d:\n", i);
    242 				(*pr)("\t\taction = %s, addr = %p\n",
    243 				    pl->pl_action == PRLOG_GET ? "get" : "put",
    244 				    pl->pl_addr);
    245 				(*pr)("\t\tfile: %s at line %lu\n",
    246 				    pl->pl_file, pl->pl_line);
    247 			}
    248 		}
    249 		if (++n >= pp->pr_logsize)
    250 			n = 0;
    251 	}
    252 }
    253 
    254 static __inline void
    255 pr_enter(struct pool *pp, const char *file, long line)
    256 {
    257 
    258 	if (__predict_false(pp->pr_entered_file != NULL)) {
    259 		printf("pool %s: reentrancy at file %s line %ld\n",
    260 		    pp->pr_wchan, file, line);
    261 		printf("         previous entry at file %s line %ld\n",
    262 		    pp->pr_entered_file, pp->pr_entered_line);
    263 		panic("pr_enter");
    264 	}
    265 
    266 	pp->pr_entered_file = file;
    267 	pp->pr_entered_line = line;
    268 }
    269 
    270 static __inline void
    271 pr_leave(struct pool *pp)
    272 {
    273 
    274 	if (__predict_false(pp->pr_entered_file == NULL)) {
    275 		printf("pool %s not entered?\n", pp->pr_wchan);
    276 		panic("pr_leave");
    277 	}
    278 
    279 	pp->pr_entered_file = NULL;
    280 	pp->pr_entered_line = 0;
    281 }
    282 
    283 static __inline void
    284 pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
    285 {
    286 
    287 	if (pp->pr_entered_file != NULL)
    288 		(*pr)("\n\tcurrently entered from file %s line %ld\n",
    289 		    pp->pr_entered_file, pp->pr_entered_line);
    290 }
    291 #else
    292 #define	pr_log(pp, v, action, file, line)
    293 #define	pr_printlog(pp, pi, pr)
    294 #define	pr_enter(pp, file, line)
    295 #define	pr_leave(pp)
    296 #define	pr_enter_check(pp, pr)
    297 #endif /* POOL_DIAGNOSTIC */
    298 
    299 static __inline int
    300 pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph,
    301     const void *v)
    302 {
    303 	const char *cp = v;
    304 	int idx;
    305 
    306 	KASSERT(pp->pr_roflags & PR_NOTOUCH);
    307 	idx = (cp - ph->ph_page - ph->ph_off) / pp->pr_size;
    308 	KASSERT(idx < pp->pr_itemsperpage);
    309 	return idx;
    310 }
    311 
    312 #define	PR_FREELIST_ALIGN(p)	roundup((uintptr_t)(p), sizeof(uint16_t))
    313 #define	PR_FREELIST(ph)	((uint16_t *)PR_FREELIST_ALIGN((ph) + 1))
    314 #define	PR_INDEX_USED	((uint16_t)-1)
    315 #define	PR_INDEX_EOL	((uint16_t)-2)
    316 
    317 static __inline void
    318 pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph,
    319     void *obj)
    320 {
    321 	int idx = pr_item_notouch_index(pp, ph, obj);
    322 	uint16_t *freelist = PR_FREELIST(ph);
    323 
    324 	KASSERT(freelist[idx] == PR_INDEX_USED);
    325 	freelist[idx] = ph->ph_firstfree;
    326 	ph->ph_firstfree = idx;
    327 }
    328 
    329 static __inline void *
    330 pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph)
    331 {
    332 	int idx = ph->ph_firstfree;
    333 	uint16_t *freelist = PR_FREELIST(ph);
    334 
    335 	KASSERT(freelist[idx] != PR_INDEX_USED);
    336 	ph->ph_firstfree = freelist[idx];
    337 	freelist[idx] = PR_INDEX_USED;
    338 
    339 	return ph->ph_page + ph->ph_off + idx * pp->pr_size;
    340 }
    341 
    342 static __inline int
    343 phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
    344 {
    345 	if (a->ph_page < b->ph_page)
    346 		return (-1);
    347 	else if (a->ph_page > b->ph_page)
    348 		return (1);
    349 	else
    350 		return (0);
    351 }
    352 
    353 SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
    354 SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
    355 
    356 /*
    357  * Return the pool page header based on page address.
    358  */
    359 static __inline struct pool_item_header *
    360 pr_find_pagehead(struct pool *pp, caddr_t page)
    361 {
    362 	struct pool_item_header *ph, tmp;
    363 
    364 	if ((pp->pr_roflags & PR_PHINPAGE) != 0)
    365 		return ((struct pool_item_header *)(page + pp->pr_phoffset));
    366 
    367 	tmp.ph_page = page;
    368 	ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
    369 	return ph;
    370 }
    371 
    372 /*
    373  * Remove a page from the pool.
    374  */
    375 static __inline void
    376 pr_rmpage(struct pool *pp, struct pool_item_header *ph,
    377      struct pool_pagelist *pq)
    378 {
    379 	int s;
    380 
    381 	LOCK_ASSERT(!simple_lock_held(&pp->pr_slock) || pq != NULL);
    382 
    383 	/*
    384 	 * If the page was idle, decrement the idle page count.
    385 	 */
    386 	if (ph->ph_nmissing == 0) {
    387 #ifdef DIAGNOSTIC
    388 		if (pp->pr_nidle == 0)
    389 			panic("pr_rmpage: nidle inconsistent");
    390 		if (pp->pr_nitems < pp->pr_itemsperpage)
    391 			panic("pr_rmpage: nitems inconsistent");
    392 #endif
    393 		pp->pr_nidle--;
    394 	}
    395 
    396 	pp->pr_nitems -= pp->pr_itemsperpage;
    397 
    398 	/*
    399 	 * Unlink a page from the pool and release it (or queue it for release).
    400 	 */
    401 	LIST_REMOVE(ph, ph_pagelist);
    402 	if ((pp->pr_roflags & PR_PHINPAGE) == 0)
    403 		SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
    404 	if (pq) {
    405 		LIST_INSERT_HEAD(pq, ph, ph_pagelist);
    406 	} else {
    407 		pool_allocator_free(pp, ph->ph_page);
    408 		if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
    409 			s = splvm();
    410 			pool_put(pp->pr_phpool, ph);
    411 			splx(s);
    412 		}
    413 	}
    414 	pp->pr_npages--;
    415 	pp->pr_npagefree++;
    416 
    417 	pool_update_curpage(pp);
    418 }
    419 
    420 /*
    421  * Initialize all the pools listed in the "pools" link set.
    422  */
    423 void
    424 link_pool_init(void)
    425 {
    426 	__link_set_decl(pools, struct link_pool_init);
    427 	struct link_pool_init * const *pi;
    428 
    429 	__link_set_foreach(pi, pools)
    430 		pool_init((*pi)->pp, (*pi)->size, (*pi)->align,
    431 		    (*pi)->align_offset, (*pi)->flags, (*pi)->wchan,
    432 		    (*pi)->palloc);
    433 }
    434 
    435 /*
    436  * Initialize the given pool resource structure.
    437  *
    438  * We export this routine to allow other kernel parts to declare
    439  * static pools that must be initialized before malloc() is available.
    440  */
    441 void
    442 pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
    443     const char *wchan, struct pool_allocator *palloc)
    444 {
    445 	int off, slack;
    446 	size_t trysize, phsize;
    447 	int s;
    448 
    449 #ifdef POOL_DIAGNOSTIC
    450 	/*
    451 	 * Always log if POOL_DIAGNOSTIC is defined.
    452 	 */
    453 	if (pool_logsize != 0)
    454 		flags |= PR_LOGGING;
    455 #endif
    456 
    457 #ifdef POOL_SUBPAGE
    458 	/*
    459 	 * XXX We don't provide a real `nointr' back-end
    460 	 * yet; all sub-pages come from a kmem back-end.
    461 	 * maybe some day...
    462 	 */
    463 	if (palloc == NULL) {
    464 		extern struct pool_allocator pool_allocator_kmem_subpage;
    465 		palloc = &pool_allocator_kmem_subpage;
    466 	}
    467 	/*
    468 	 * We'll assume any user-specified back-end allocator
    469 	 * will deal with sub-pages, or simply don't care.
    470 	 */
    471 #else
    472 	if (palloc == NULL)
    473 		palloc = &pool_allocator_kmem;
    474 #endif /* POOL_SUBPAGE */
    475 	if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
    476 		if (palloc->pa_pagesz == 0) {
    477 #ifdef POOL_SUBPAGE
    478 			if (palloc == &pool_allocator_kmem)
    479 				palloc->pa_pagesz = PAGE_SIZE;
    480 			else
    481 				palloc->pa_pagesz = POOL_SUBPAGE;
    482 #else
    483 			palloc->pa_pagesz = PAGE_SIZE;
    484 #endif /* POOL_SUBPAGE */
    485 		}
    486 
    487 		TAILQ_INIT(&palloc->pa_list);
    488 
    489 		simple_lock_init(&palloc->pa_slock);
    490 		palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
    491 		palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
    492 		palloc->pa_flags |= PA_INITIALIZED;
    493 	}
    494 
    495 	if (align == 0)
    496 		align = ALIGN(1);
    497 
    498 	if (size < sizeof(struct pool_item))
    499 		size = sizeof(struct pool_item);
    500 
    501 	size = roundup(size, align);
    502 #ifdef DIAGNOSTIC
    503 	if (size > palloc->pa_pagesz)
    504 		panic("pool_init: pool item size (%lu) too large",
    505 		      (u_long)size);
    506 #endif
    507 
    508 	/*
    509 	 * Initialize the pool structure.
    510 	 */
    511 	LIST_INIT(&pp->pr_emptypages);
    512 	LIST_INIT(&pp->pr_fullpages);
    513 	LIST_INIT(&pp->pr_partpages);
    514 	TAILQ_INIT(&pp->pr_cachelist);
    515 	pp->pr_curpage = NULL;
    516 	pp->pr_npages = 0;
    517 	pp->pr_minitems = 0;
    518 	pp->pr_minpages = 0;
    519 	pp->pr_maxpages = UINT_MAX;
    520 	pp->pr_roflags = flags;
    521 	pp->pr_flags = 0;
    522 	pp->pr_size = size;
    523 	pp->pr_align = align;
    524 	pp->pr_wchan = wchan;
    525 	pp->pr_alloc = palloc;
    526 	pp->pr_nitems = 0;
    527 	pp->pr_nout = 0;
    528 	pp->pr_hardlimit = UINT_MAX;
    529 	pp->pr_hardlimit_warning = NULL;
    530 	pp->pr_hardlimit_ratecap.tv_sec = 0;
    531 	pp->pr_hardlimit_ratecap.tv_usec = 0;
    532 	pp->pr_hardlimit_warning_last.tv_sec = 0;
    533 	pp->pr_hardlimit_warning_last.tv_usec = 0;
    534 	pp->pr_drain_hook = NULL;
    535 	pp->pr_drain_hook_arg = NULL;
    536 
    537 	/*
    538 	 * Decide whether to put the page header off page to avoid
    539 	 * wasting too large a part of the page or too big item.
    540 	 * Off-page page headers go on a hash table, so we can match
    541 	 * a returned item with its header based on the page address.
    542 	 * We use 1/16 of the page size and about 8 times of the item
    543 	 * size as the threshold (XXX: tune)
    544 	 *
    545 	 * However, we'll put the header into the page if we can put
    546 	 * it without wasting any items.
    547 	 *
    548 	 * Silently enforce `0 <= ioff < align'.
    549 	 */
    550 	pp->pr_itemoffset = ioff %= align;
    551 	/* See the comment below about reserved bytes. */
    552 	trysize = palloc->pa_pagesz - ((align - ioff) % align);
    553 	phsize = ALIGN(sizeof(struct pool_item_header));
    554 	if ((pp->pr_roflags & PR_NOTOUCH) == 0 &&
    555 	    (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
    556 	    trysize / pp->pr_size == (trysize - phsize) / pp->pr_size)) {
    557 		/* Use the end of the page for the page header */
    558 		pp->pr_roflags |= PR_PHINPAGE;
    559 		pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
    560 	} else {
    561 		/* The page header will be taken from our page header pool */
    562 		pp->pr_phoffset = 0;
    563 		off = palloc->pa_pagesz;
    564 		SPLAY_INIT(&pp->pr_phtree);
    565 	}
    566 
    567 	/*
    568 	 * Alignment is to take place at `ioff' within the item. This means
    569 	 * we must reserve up to `align - 1' bytes on the page to allow
    570 	 * appropriate positioning of each item.
    571 	 */
    572 	pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
    573 	KASSERT(pp->pr_itemsperpage != 0);
    574 	if ((pp->pr_roflags & PR_NOTOUCH)) {
    575 		int idx;
    576 
    577 		for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
    578 		    idx++) {
    579 			/* nothing */
    580 		}
    581 		if (idx >= PHPOOL_MAX) {
    582 			/*
    583 			 * if you see this panic, consider to tweak
    584 			 * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
    585 			 */
    586 			panic("%s: too large itemsperpage(%d) for PR_NOTOUCH",
    587 			    pp->pr_wchan, pp->pr_itemsperpage);
    588 		}
    589 		pp->pr_phpool = &phpool[idx];
    590 	} else if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
    591 		pp->pr_phpool = &phpool[0];
    592 	}
    593 #if defined(DIAGNOSTIC)
    594 	else {
    595 		pp->pr_phpool = NULL;
    596 	}
    597 #endif
    598 
    599 	/*
    600 	 * Use the slack between the chunks and the page header
    601 	 * for "cache coloring".
    602 	 */
    603 	slack = off - pp->pr_itemsperpage * pp->pr_size;
    604 	pp->pr_maxcolor = (slack / align) * align;
    605 	pp->pr_curcolor = 0;
    606 
    607 	pp->pr_nget = 0;
    608 	pp->pr_nfail = 0;
    609 	pp->pr_nput = 0;
    610 	pp->pr_npagealloc = 0;
    611 	pp->pr_npagefree = 0;
    612 	pp->pr_hiwat = 0;
    613 	pp->pr_nidle = 0;
    614 
    615 #ifdef POOL_DIAGNOSTIC
    616 	if (flags & PR_LOGGING) {
    617 		if (kmem_map == NULL ||
    618 		    (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
    619 		     M_TEMP, M_NOWAIT)) == NULL)
    620 			pp->pr_roflags &= ~PR_LOGGING;
    621 		pp->pr_curlogentry = 0;
    622 		pp->pr_logsize = pool_logsize;
    623 	}
    624 #endif
    625 
    626 	pp->pr_entered_file = NULL;
    627 	pp->pr_entered_line = 0;
    628 
    629 	simple_lock_init(&pp->pr_slock);
    630 
    631 	/*
    632 	 * Initialize private page header pool and cache magazine pool if we
    633 	 * haven't done so yet.
    634 	 * XXX LOCKING.
    635 	 */
    636 	if (phpool[0].pr_size == 0) {
    637 		struct pool_allocator *pa;
    638 		int idx;
    639 #ifdef POOL_SUBPAGE
    640 		pa = &pool_allocator_kmem;
    641 #else
    642 		pa = NULL;
    643 #endif
    644 		for (idx = 0; idx < PHPOOL_MAX; idx++) {
    645 			static char phpool_names[PHPOOL_MAX][6+1+6+1];
    646 			int nelem;
    647 			size_t sz;
    648 
    649 			nelem = PHPOOL_FREELIST_NELEM(idx);
    650 			snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
    651 			    "phpool-%d", nelem);
    652 			sz = sizeof(struct pool_item_header);
    653 			if (nelem) {
    654 				sz = PR_FREELIST_ALIGN(sz)
    655 				    + nelem * sizeof(uint16_t);
    656 			}
    657 			pool_init(&phpool[idx], sz, 0, 0, 0,
    658 			    phpool_names[idx], pa);
    659 		}
    660 #ifdef POOL_SUBPAGE
    661 		pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
    662 		    PR_RECURSIVE, "psppool", &pool_allocator_kmem);
    663 #endif
    664 		pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
    665 		    0, "pcgpool", NULL);
    666 	}
    667 
    668 	/* Insert into the list of all pools. */
    669 	simple_lock(&pool_head_slock);
    670 	TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
    671 	simple_unlock(&pool_head_slock);
    672 
    673 	/* Insert this into the list of pools using this allocator. */
    674 	s = splvm();
    675 	simple_lock(&palloc->pa_slock);
    676 	TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
    677 	simple_unlock(&palloc->pa_slock);
    678 	splx(s);
    679 }
    680 
    681 /*
    682  * De-commision a pool resource.
    683  */
    684 void
    685 pool_destroy(struct pool *pp)
    686 {
    687 	struct pool_item_header *ph;
    688 	struct pool_cache *pc;
    689 	int s;
    690 
    691 	/* Locking order: pool_allocator -> pool */
    692 	s = splvm();
    693 	simple_lock(&pp->pr_alloc->pa_slock);
    694 	TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
    695 	simple_unlock(&pp->pr_alloc->pa_slock);
    696 	splx(s);
    697 
    698 	/* Destroy all caches for this pool. */
    699 	while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL)
    700 		pool_cache_destroy(pc);
    701 
    702 #ifdef DIAGNOSTIC
    703 	if (pp->pr_nout != 0) {
    704 		pr_printlog(pp, NULL, printf);
    705 		panic("pool_destroy: pool busy: still out: %u",
    706 		    pp->pr_nout);
    707 	}
    708 #endif
    709 
    710 	/* Remove all pages */
    711 	while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
    712 		pr_rmpage(pp, ph, NULL);
    713 	KASSERT(LIST_EMPTY(&pp->pr_fullpages));
    714 	KASSERT(LIST_EMPTY(&pp->pr_partpages));
    715 
    716 	/* Remove from global pool list */
    717 	simple_lock(&pool_head_slock);
    718 	TAILQ_REMOVE(&pool_head, pp, pr_poollist);
    719 	if (drainpp == pp) {
    720 		drainpp = NULL;
    721 	}
    722 	simple_unlock(&pool_head_slock);
    723 
    724 #ifdef POOL_DIAGNOSTIC
    725 	if ((pp->pr_roflags & PR_LOGGING) != 0)
    726 		free(pp->pr_log, M_TEMP);
    727 #endif
    728 }
    729 
    730 void
    731 pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
    732 {
    733 
    734 	/* XXX no locking -- must be used just after pool_init() */
    735 #ifdef DIAGNOSTIC
    736 	if (pp->pr_drain_hook != NULL)
    737 		panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
    738 #endif
    739 	pp->pr_drain_hook = fn;
    740 	pp->pr_drain_hook_arg = arg;
    741 }
    742 
    743 static struct pool_item_header *
    744 pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags)
    745 {
    746 	struct pool_item_header *ph;
    747 	int s;
    748 
    749 	LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0);
    750 
    751 	if ((pp->pr_roflags & PR_PHINPAGE) != 0)
    752 		ph = (struct pool_item_header *) (storage + pp->pr_phoffset);
    753 	else {
    754 		s = splvm();
    755 		ph = pool_get(pp->pr_phpool, flags);
    756 		splx(s);
    757 	}
    758 
    759 	return (ph);
    760 }
    761 
    762 /*
    763  * Grab an item from the pool; must be called at appropriate spl level
    764  */
    765 void *
    766 #ifdef POOL_DIAGNOSTIC
    767 _pool_get(struct pool *pp, int flags, const char *file, long line)
    768 #else
    769 pool_get(struct pool *pp, int flags)
    770 #endif
    771 {
    772 	struct pool_item *pi;
    773 	struct pool_item_header *ph;
    774 	void *v;
    775 
    776 #ifdef DIAGNOSTIC
    777 	if (__predict_false(pp->pr_itemsperpage == 0))
    778 		panic("pool_get: pool %p: pr_itemsperpage is zero, "
    779 		    "pool not initialized?", pp);
    780 	if (__predict_false(curlwp == NULL && doing_shutdown == 0 &&
    781 			    (flags & PR_WAITOK) != 0))
    782 		panic("pool_get: %s: must have NOWAIT", pp->pr_wchan);
    783 
    784 #ifdef LOCKDEBUG
    785 	if (flags & PR_WAITOK)
    786 		simple_lock_only_held(NULL, "pool_get(PR_WAITOK)");
    787 #endif
    788 #endif /* DIAGNOSTIC */
    789 
    790 	simple_lock(&pp->pr_slock);
    791 	pr_enter(pp, file, line);
    792 
    793  startover:
    794 	/*
    795 	 * Check to see if we've reached the hard limit.  If we have,
    796 	 * and we can wait, then wait until an item has been returned to
    797 	 * the pool.
    798 	 */
    799 #ifdef DIAGNOSTIC
    800 	if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
    801 		pr_leave(pp);
    802 		simple_unlock(&pp->pr_slock);
    803 		panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
    804 	}
    805 #endif
    806 	if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
    807 		if (pp->pr_drain_hook != NULL) {
    808 			/*
    809 			 * Since the drain hook is going to free things
    810 			 * back to the pool, unlock, call the hook, re-lock,
    811 			 * and check the hardlimit condition again.
    812 			 */
    813 			pr_leave(pp);
    814 			simple_unlock(&pp->pr_slock);
    815 			(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
    816 			simple_lock(&pp->pr_slock);
    817 			pr_enter(pp, file, line);
    818 			if (pp->pr_nout < pp->pr_hardlimit)
    819 				goto startover;
    820 		}
    821 
    822 		if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
    823 			/*
    824 			 * XXX: A warning isn't logged in this case.  Should
    825 			 * it be?
    826 			 */
    827 			pp->pr_flags |= PR_WANTED;
    828 			pr_leave(pp);
    829 			ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
    830 			pr_enter(pp, file, line);
    831 			goto startover;
    832 		}
    833 
    834 		/*
    835 		 * Log a message that the hard limit has been hit.
    836 		 */
    837 		if (pp->pr_hardlimit_warning != NULL &&
    838 		    ratecheck(&pp->pr_hardlimit_warning_last,
    839 			      &pp->pr_hardlimit_ratecap))
    840 			log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
    841 
    842 		pp->pr_nfail++;
    843 
    844 		pr_leave(pp);
    845 		simple_unlock(&pp->pr_slock);
    846 		return (NULL);
    847 	}
    848 
    849 	/*
    850 	 * The convention we use is that if `curpage' is not NULL, then
    851 	 * it points at a non-empty bucket. In particular, `curpage'
    852 	 * never points at a page header which has PR_PHINPAGE set and
    853 	 * has no items in its bucket.
    854 	 */
    855 	if ((ph = pp->pr_curpage) == NULL) {
    856 #ifdef DIAGNOSTIC
    857 		if (pp->pr_nitems != 0) {
    858 			simple_unlock(&pp->pr_slock);
    859 			printf("pool_get: %s: curpage NULL, nitems %u\n",
    860 			    pp->pr_wchan, pp->pr_nitems);
    861 			panic("pool_get: nitems inconsistent");
    862 		}
    863 #endif
    864 
    865 		/*
    866 		 * Call the back-end page allocator for more memory.
    867 		 * Release the pool lock, as the back-end page allocator
    868 		 * may block.
    869 		 */
    870 		pr_leave(pp);
    871 		simple_unlock(&pp->pr_slock);
    872 		v = pool_allocator_alloc(pp, flags);
    873 		if (__predict_true(v != NULL))
    874 			ph = pool_alloc_item_header(pp, v, flags);
    875 
    876 		if (__predict_false(v == NULL || ph == NULL)) {
    877 			if (v != NULL)
    878 				pool_allocator_free(pp, v);
    879 
    880 			simple_lock(&pp->pr_slock);
    881 			pr_enter(pp, file, line);
    882 
    883 			/*
    884 			 * We were unable to allocate a page or item
    885 			 * header, but we released the lock during
    886 			 * allocation, so perhaps items were freed
    887 			 * back to the pool.  Check for this case.
    888 			 */
    889 			if (pp->pr_curpage != NULL)
    890 				goto startover;
    891 
    892 			if ((flags & PR_WAITOK) == 0) {
    893 				pp->pr_nfail++;
    894 				pr_leave(pp);
    895 				simple_unlock(&pp->pr_slock);
    896 				return (NULL);
    897 			}
    898 
    899 			/*
    900 			 * Wait for items to be returned to this pool.
    901 			 *
    902 			 * XXX: maybe we should wake up once a second and
    903 			 * try again?
    904 			 */
    905 			pp->pr_flags |= PR_WANTED;
    906 			/* PA_WANTED is already set on the allocator. */
    907 			pr_leave(pp);
    908 			ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
    909 			pr_enter(pp, file, line);
    910 			goto startover;
    911 		}
    912 
    913 		/* We have more memory; add it to the pool */
    914 		simple_lock(&pp->pr_slock);
    915 		pr_enter(pp, file, line);
    916 		pool_prime_page(pp, v, ph);
    917 		pp->pr_npagealloc++;
    918 
    919 		/* Start the allocation process over. */
    920 		goto startover;
    921 	}
    922 	if (pp->pr_roflags & PR_NOTOUCH) {
    923 #ifdef DIAGNOSTIC
    924 		if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) {
    925 			pr_leave(pp);
    926 			simple_unlock(&pp->pr_slock);
    927 			panic("pool_get: %s: page empty", pp->pr_wchan);
    928 		}
    929 #endif
    930 		v = pr_item_notouch_get(pp, ph);
    931 #ifdef POOL_DIAGNOSTIC
    932 		pr_log(pp, v, PRLOG_GET, file, line);
    933 #endif
    934 	} else {
    935 		v = pi = TAILQ_FIRST(&ph->ph_itemlist);
    936 		if (__predict_false(v == NULL)) {
    937 			pr_leave(pp);
    938 			simple_unlock(&pp->pr_slock);
    939 			panic("pool_get: %s: page empty", pp->pr_wchan);
    940 		}
    941 #ifdef DIAGNOSTIC
    942 		if (__predict_false(pp->pr_nitems == 0)) {
    943 			pr_leave(pp);
    944 			simple_unlock(&pp->pr_slock);
    945 			printf("pool_get: %s: items on itemlist, nitems %u\n",
    946 			    pp->pr_wchan, pp->pr_nitems);
    947 			panic("pool_get: nitems inconsistent");
    948 		}
    949 #endif
    950 
    951 #ifdef POOL_DIAGNOSTIC
    952 		pr_log(pp, v, PRLOG_GET, file, line);
    953 #endif
    954 
    955 #ifdef DIAGNOSTIC
    956 		if (__predict_false(pi->pi_magic != PI_MAGIC)) {
    957 			pr_printlog(pp, pi, printf);
    958 			panic("pool_get(%s): free list modified: "
    959 			    "magic=%x; page %p; item addr %p\n",
    960 			    pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
    961 		}
    962 #endif
    963 
    964 		/*
    965 		 * Remove from item list.
    966 		 */
    967 		TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
    968 	}
    969 	pp->pr_nitems--;
    970 	pp->pr_nout++;
    971 	if (ph->ph_nmissing == 0) {
    972 #ifdef DIAGNOSTIC
    973 		if (__predict_false(pp->pr_nidle == 0))
    974 			panic("pool_get: nidle inconsistent");
    975 #endif
    976 		pp->pr_nidle--;
    977 
    978 		/*
    979 		 * This page was previously empty.  Move it to the list of
    980 		 * partially-full pages.  This page is already curpage.
    981 		 */
    982 		LIST_REMOVE(ph, ph_pagelist);
    983 		LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
    984 	}
    985 	ph->ph_nmissing++;
    986 	if (ph->ph_nmissing == pp->pr_itemsperpage) {
    987 #ifdef DIAGNOSTIC
    988 		if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 &&
    989 		    !TAILQ_EMPTY(&ph->ph_itemlist))) {
    990 			pr_leave(pp);
    991 			simple_unlock(&pp->pr_slock);
    992 			panic("pool_get: %s: nmissing inconsistent",
    993 			    pp->pr_wchan);
    994 		}
    995 #endif
    996 		/*
    997 		 * This page is now full.  Move it to the full list
    998 		 * and select a new current page.
    999 		 */
   1000 		LIST_REMOVE(ph, ph_pagelist);
   1001 		LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
   1002 		pool_update_curpage(pp);
   1003 	}
   1004 
   1005 	pp->pr_nget++;
   1006 
   1007 	/*
   1008 	 * If we have a low water mark and we are now below that low
   1009 	 * water mark, add more items to the pool.
   1010 	 */
   1011 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
   1012 		/*
   1013 		 * XXX: Should we log a warning?  Should we set up a timeout
   1014 		 * to try again in a second or so?  The latter could break
   1015 		 * a caller's assumptions about interrupt protection, etc.
   1016 		 */
   1017 	}
   1018 
   1019 	pr_leave(pp);
   1020 	simple_unlock(&pp->pr_slock);
   1021 	return (v);
   1022 }
   1023 
   1024 /*
   1025  * Internal version of pool_put().  Pool is already locked/entered.
   1026  */
   1027 static void
   1028 pool_do_put(struct pool *pp, void *v)
   1029 {
   1030 	struct pool_item *pi = v;
   1031 	struct pool_item_header *ph;
   1032 	caddr_t page;
   1033 	int s;
   1034 
   1035 	LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
   1036 
   1037 	page = (caddr_t)((u_long)v & pp->pr_alloc->pa_pagemask);
   1038 
   1039 #ifdef DIAGNOSTIC
   1040 	if (__predict_false(pp->pr_nout == 0)) {
   1041 		printf("pool %s: putting with none out\n",
   1042 		    pp->pr_wchan);
   1043 		panic("pool_put");
   1044 	}
   1045 #endif
   1046 
   1047 	if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {
   1048 		pr_printlog(pp, NULL, printf);
   1049 		panic("pool_put: %s: page header missing", pp->pr_wchan);
   1050 	}
   1051 
   1052 #ifdef LOCKDEBUG
   1053 	/*
   1054 	 * Check if we're freeing a locked simple lock.
   1055 	 */
   1056 	simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
   1057 #endif
   1058 
   1059 	/*
   1060 	 * Return to item list.
   1061 	 */
   1062 	if (pp->pr_roflags & PR_NOTOUCH) {
   1063 		pr_item_notouch_put(pp, ph, v);
   1064 	} else {
   1065 #ifdef DIAGNOSTIC
   1066 		pi->pi_magic = PI_MAGIC;
   1067 #endif
   1068 #ifdef DEBUG
   1069 		{
   1070 			int i, *ip = v;
   1071 
   1072 			for (i = 0; i < pp->pr_size / sizeof(int); i++) {
   1073 				*ip++ = PI_MAGIC;
   1074 			}
   1075 		}
   1076 #endif
   1077 
   1078 		TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
   1079 	}
   1080 	KDASSERT(ph->ph_nmissing != 0);
   1081 	ph->ph_nmissing--;
   1082 	pp->pr_nput++;
   1083 	pp->pr_nitems++;
   1084 	pp->pr_nout--;
   1085 
   1086 	/* Cancel "pool empty" condition if it exists */
   1087 	if (pp->pr_curpage == NULL)
   1088 		pp->pr_curpage = ph;
   1089 
   1090 	if (pp->pr_flags & PR_WANTED) {
   1091 		pp->pr_flags &= ~PR_WANTED;
   1092 		if (ph->ph_nmissing == 0)
   1093 			pp->pr_nidle++;
   1094 		wakeup((caddr_t)pp);
   1095 		return;
   1096 	}
   1097 
   1098 	/*
   1099 	 * If this page is now empty, do one of two things:
   1100 	 *
   1101 	 *	(1) If we have more pages than the page high water mark,
   1102 	 *	    free the page back to the system.  ONLY CONSIDER
   1103 	 *	    FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
   1104 	 *	    CLAIM.
   1105 	 *
   1106 	 *	(2) Otherwise, move the page to the empty page list.
   1107 	 *
   1108 	 * Either way, select a new current page (so we use a partially-full
   1109 	 * page if one is available).
   1110 	 */
   1111 	if (ph->ph_nmissing == 0) {
   1112 		pp->pr_nidle++;
   1113 		if (pp->pr_npages > pp->pr_minpages &&
   1114 		    (pp->pr_npages > pp->pr_maxpages ||
   1115 		     (pp->pr_alloc->pa_flags & PA_WANT) != 0)) {
   1116 			simple_unlock(&pp->pr_slock);
   1117 			pr_rmpage(pp, ph, NULL);
   1118 			simple_lock(&pp->pr_slock);
   1119 		} else {
   1120 			LIST_REMOVE(ph, ph_pagelist);
   1121 			LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
   1122 
   1123 			/*
   1124 			 * Update the timestamp on the page.  A page must
   1125 			 * be idle for some period of time before it can
   1126 			 * be reclaimed by the pagedaemon.  This minimizes
   1127 			 * ping-pong'ing for memory.
   1128 			 */
   1129 			s = splclock();
   1130 			ph->ph_time = mono_time;
   1131 			splx(s);
   1132 		}
   1133 		pool_update_curpage(pp);
   1134 	}
   1135 
   1136 	/*
   1137 	 * If the page was previously completely full, move it to the
   1138 	 * partially-full list and make it the current page.  The next
   1139 	 * allocation will get the item from this page, instead of
   1140 	 * further fragmenting the pool.
   1141 	 */
   1142 	else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
   1143 		LIST_REMOVE(ph, ph_pagelist);
   1144 		LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
   1145 		pp->pr_curpage = ph;
   1146 	}
   1147 }
   1148 
   1149 /*
   1150  * Return resource to the pool; must be called at appropriate spl level
   1151  */
   1152 #ifdef POOL_DIAGNOSTIC
   1153 void
   1154 _pool_put(struct pool *pp, void *v, const char *file, long line)
   1155 {
   1156 
   1157 	simple_lock(&pp->pr_slock);
   1158 	pr_enter(pp, file, line);
   1159 
   1160 	pr_log(pp, v, PRLOG_PUT, file, line);
   1161 
   1162 	pool_do_put(pp, v);
   1163 
   1164 	pr_leave(pp);
   1165 	simple_unlock(&pp->pr_slock);
   1166 }
   1167 #undef pool_put
   1168 #endif /* POOL_DIAGNOSTIC */
   1169 
   1170 void
   1171 pool_put(struct pool *pp, void *v)
   1172 {
   1173 
   1174 	simple_lock(&pp->pr_slock);
   1175 
   1176 	pool_do_put(pp, v);
   1177 
   1178 	simple_unlock(&pp->pr_slock);
   1179 }
   1180 
   1181 #ifdef POOL_DIAGNOSTIC
   1182 #define		pool_put(h, v)	_pool_put((h), (v), __FILE__, __LINE__)
   1183 #endif
   1184 
   1185 /*
   1186  * Add N items to the pool.
   1187  */
   1188 int
   1189 pool_prime(struct pool *pp, int n)
   1190 {
   1191 	struct pool_item_header *ph = NULL;
   1192 	caddr_t cp;
   1193 	int newpages;
   1194 
   1195 	simple_lock(&pp->pr_slock);
   1196 
   1197 	newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1198 
   1199 	while (newpages-- > 0) {
   1200 		simple_unlock(&pp->pr_slock);
   1201 		cp = pool_allocator_alloc(pp, PR_NOWAIT);
   1202 		if (__predict_true(cp != NULL))
   1203 			ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
   1204 
   1205 		if (__predict_false(cp == NULL || ph == NULL)) {
   1206 			if (cp != NULL)
   1207 				pool_allocator_free(pp, cp);
   1208 			simple_lock(&pp->pr_slock);
   1209 			break;
   1210 		}
   1211 
   1212 		simple_lock(&pp->pr_slock);
   1213 		pool_prime_page(pp, cp, ph);
   1214 		pp->pr_npagealloc++;
   1215 		pp->pr_minpages++;
   1216 	}
   1217 
   1218 	if (pp->pr_minpages >= pp->pr_maxpages)
   1219 		pp->pr_maxpages = pp->pr_minpages + 1;	/* XXX */
   1220 
   1221 	simple_unlock(&pp->pr_slock);
   1222 	return (0);
   1223 }
   1224 
   1225 /*
   1226  * Add a page worth of items to the pool.
   1227  *
   1228  * Note, we must be called with the pool descriptor LOCKED.
   1229  */
   1230 static void
   1231 pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
   1232 {
   1233 	struct pool_item *pi;
   1234 	caddr_t cp = storage;
   1235 	unsigned int align = pp->pr_align;
   1236 	unsigned int ioff = pp->pr_itemoffset;
   1237 	int n;
   1238 	int s;
   1239 
   1240 	LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
   1241 
   1242 #ifdef DIAGNOSTIC
   1243 	if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
   1244 		panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
   1245 #endif
   1246 
   1247 	/*
   1248 	 * Insert page header.
   1249 	 */
   1250 	LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
   1251 	TAILQ_INIT(&ph->ph_itemlist);
   1252 	ph->ph_page = storage;
   1253 	ph->ph_nmissing = 0;
   1254 	s = splclock();
   1255 	ph->ph_time = mono_time;
   1256 	splx(s);
   1257 	if ((pp->pr_roflags & PR_PHINPAGE) == 0)
   1258 		SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
   1259 
   1260 	pp->pr_nidle++;
   1261 
   1262 	/*
   1263 	 * Color this page.
   1264 	 */
   1265 	cp = (caddr_t)(cp + pp->pr_curcolor);
   1266 	if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
   1267 		pp->pr_curcolor = 0;
   1268 
   1269 	/*
   1270 	 * Adjust storage to apply aligment to `pr_itemoffset' in each item.
   1271 	 */
   1272 	if (ioff != 0)
   1273 		cp = (caddr_t)(cp + (align - ioff));
   1274 
   1275 	/*
   1276 	 * Insert remaining chunks on the bucket list.
   1277 	 */
   1278 	n = pp->pr_itemsperpage;
   1279 	pp->pr_nitems += n;
   1280 
   1281 	ph->ph_off = cp - storage;
   1282 
   1283 	if (pp->pr_roflags & PR_NOTOUCH) {
   1284 		uint16_t *freelist = PR_FREELIST(ph);
   1285 		int i;
   1286 
   1287 		ph->ph_firstfree = 0;
   1288 		for (i = 0; i < n - 1; i++)
   1289 			freelist[i] = i + 1;
   1290 		freelist[n - 1] = PR_INDEX_EOL;
   1291 	} else {
   1292 		while (n--) {
   1293 			pi = (struct pool_item *)cp;
   1294 
   1295 			KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
   1296 
   1297 			/* Insert on page list */
   1298 			TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
   1299 #ifdef DIAGNOSTIC
   1300 			pi->pi_magic = PI_MAGIC;
   1301 #endif
   1302 			cp = (caddr_t)(cp + pp->pr_size);
   1303 		}
   1304 	}
   1305 
   1306 	/*
   1307 	 * If the pool was depleted, point at the new page.
   1308 	 */
   1309 	if (pp->pr_curpage == NULL)
   1310 		pp->pr_curpage = ph;
   1311 
   1312 	if (++pp->pr_npages > pp->pr_hiwat)
   1313 		pp->pr_hiwat = pp->pr_npages;
   1314 }
   1315 
   1316 /*
   1317  * Used by pool_get() when nitems drops below the low water mark.  This
   1318  * is used to catch up pr_nitems with the low water mark.
   1319  *
   1320  * Note 1, we never wait for memory here, we let the caller decide what to do.
   1321  *
   1322  * Note 2, we must be called with the pool already locked, and we return
   1323  * with it locked.
   1324  */
   1325 static int
   1326 pool_catchup(struct pool *pp)
   1327 {
   1328 	struct pool_item_header *ph = NULL;
   1329 	caddr_t cp;
   1330 	int error = 0;
   1331 
   1332 	while (POOL_NEEDS_CATCHUP(pp)) {
   1333 		/*
   1334 		 * Call the page back-end allocator for more memory.
   1335 		 *
   1336 		 * XXX: We never wait, so should we bother unlocking
   1337 		 * the pool descriptor?
   1338 		 */
   1339 		simple_unlock(&pp->pr_slock);
   1340 		cp = pool_allocator_alloc(pp, PR_NOWAIT);
   1341 		if (__predict_true(cp != NULL))
   1342 			ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
   1343 		if (__predict_false(cp == NULL || ph == NULL)) {
   1344 			if (cp != NULL)
   1345 				pool_allocator_free(pp, cp);
   1346 			error = ENOMEM;
   1347 			simple_lock(&pp->pr_slock);
   1348 			break;
   1349 		}
   1350 		simple_lock(&pp->pr_slock);
   1351 		pool_prime_page(pp, cp, ph);
   1352 		pp->pr_npagealloc++;
   1353 	}
   1354 
   1355 	return (error);
   1356 }
   1357 
   1358 static void
   1359 pool_update_curpage(struct pool *pp)
   1360 {
   1361 
   1362 	pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
   1363 	if (pp->pr_curpage == NULL) {
   1364 		pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
   1365 	}
   1366 }
   1367 
   1368 void
   1369 pool_setlowat(struct pool *pp, int n)
   1370 {
   1371 
   1372 	simple_lock(&pp->pr_slock);
   1373 
   1374 	pp->pr_minitems = n;
   1375 	pp->pr_minpages = (n == 0)
   1376 		? 0
   1377 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1378 
   1379 	/* Make sure we're caught up with the newly-set low water mark. */
   1380 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
   1381 		/*
   1382 		 * XXX: Should we log a warning?  Should we set up a timeout
   1383 		 * to try again in a second or so?  The latter could break
   1384 		 * a caller's assumptions about interrupt protection, etc.
   1385 		 */
   1386 	}
   1387 
   1388 	simple_unlock(&pp->pr_slock);
   1389 }
   1390 
   1391 void
   1392 pool_sethiwat(struct pool *pp, int n)
   1393 {
   1394 
   1395 	simple_lock(&pp->pr_slock);
   1396 
   1397 	pp->pr_maxpages = (n == 0)
   1398 		? 0
   1399 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1400 
   1401 	simple_unlock(&pp->pr_slock);
   1402 }
   1403 
   1404 void
   1405 pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
   1406 {
   1407 
   1408 	simple_lock(&pp->pr_slock);
   1409 
   1410 	pp->pr_hardlimit = n;
   1411 	pp->pr_hardlimit_warning = warnmess;
   1412 	pp->pr_hardlimit_ratecap.tv_sec = ratecap;
   1413 	pp->pr_hardlimit_warning_last.tv_sec = 0;
   1414 	pp->pr_hardlimit_warning_last.tv_usec = 0;
   1415 
   1416 	/*
   1417 	 * In-line version of pool_sethiwat(), because we don't want to
   1418 	 * release the lock.
   1419 	 */
   1420 	pp->pr_maxpages = (n == 0)
   1421 		? 0
   1422 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1423 
   1424 	simple_unlock(&pp->pr_slock);
   1425 }
   1426 
   1427 /*
   1428  * Release all complete pages that have not been used recently.
   1429  */
   1430 int
   1431 #ifdef POOL_DIAGNOSTIC
   1432 _pool_reclaim(struct pool *pp, const char *file, long line)
   1433 #else
   1434 pool_reclaim(struct pool *pp)
   1435 #endif
   1436 {
   1437 	struct pool_item_header *ph, *phnext;
   1438 	struct pool_cache *pc;
   1439 	struct timeval curtime;
   1440 	struct pool_pagelist pq;
   1441 	struct timeval diff;
   1442 	int s;
   1443 
   1444 	if (pp->pr_drain_hook != NULL) {
   1445 		/*
   1446 		 * The drain hook must be called with the pool unlocked.
   1447 		 */
   1448 		(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
   1449 	}
   1450 
   1451 	if (simple_lock_try(&pp->pr_slock) == 0)
   1452 		return (0);
   1453 	pr_enter(pp, file, line);
   1454 
   1455 	LIST_INIT(&pq);
   1456 
   1457 	/*
   1458 	 * Reclaim items from the pool's caches.
   1459 	 */
   1460 	TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist)
   1461 		pool_cache_reclaim(pc);
   1462 
   1463 	s = splclock();
   1464 	curtime = mono_time;
   1465 	splx(s);
   1466 
   1467 	for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
   1468 		phnext = LIST_NEXT(ph, ph_pagelist);
   1469 
   1470 		/* Check our minimum page claim */
   1471 		if (pp->pr_npages <= pp->pr_minpages)
   1472 			break;
   1473 
   1474 		KASSERT(ph->ph_nmissing == 0);
   1475 		timersub(&curtime, &ph->ph_time, &diff);
   1476 		if (diff.tv_sec < pool_inactive_time)
   1477 			continue;
   1478 
   1479 		/*
   1480 		 * If freeing this page would put us below
   1481 		 * the low water mark, stop now.
   1482 		 */
   1483 		if ((pp->pr_nitems - pp->pr_itemsperpage) <
   1484 		    pp->pr_minitems)
   1485 			break;
   1486 
   1487 		pr_rmpage(pp, ph, &pq);
   1488 	}
   1489 
   1490 	pr_leave(pp);
   1491 	simple_unlock(&pp->pr_slock);
   1492 	if (LIST_EMPTY(&pq))
   1493 		return (0);
   1494 
   1495 	while ((ph = LIST_FIRST(&pq)) != NULL) {
   1496 		LIST_REMOVE(ph, ph_pagelist);
   1497 		pool_allocator_free(pp, ph->ph_page);
   1498 		if (pp->pr_roflags & PR_PHINPAGE) {
   1499 			continue;
   1500 		}
   1501 		s = splvm();
   1502 		pool_put(pp->pr_phpool, ph);
   1503 		splx(s);
   1504 	}
   1505 
   1506 	return (1);
   1507 }
   1508 
   1509 /*
   1510  * Drain pools, one at a time.
   1511  *
   1512  * Note, we must never be called from an interrupt context.
   1513  */
   1514 void
   1515 pool_drain(void *arg)
   1516 {
   1517 	struct pool *pp;
   1518 	int s;
   1519 
   1520 	pp = NULL;
   1521 	s = splvm();
   1522 	simple_lock(&pool_head_slock);
   1523 	if (drainpp == NULL) {
   1524 		drainpp = TAILQ_FIRST(&pool_head);
   1525 	}
   1526 	if (drainpp) {
   1527 		pp = drainpp;
   1528 		drainpp = TAILQ_NEXT(pp, pr_poollist);
   1529 	}
   1530 	simple_unlock(&pool_head_slock);
   1531 	pool_reclaim(pp);
   1532 	splx(s);
   1533 }
   1534 
   1535 /*
   1536  * Diagnostic helpers.
   1537  */
   1538 void
   1539 pool_print(struct pool *pp, const char *modif)
   1540 {
   1541 	int s;
   1542 
   1543 	s = splvm();
   1544 	if (simple_lock_try(&pp->pr_slock) == 0) {
   1545 		printf("pool %s is locked; try again later\n",
   1546 		    pp->pr_wchan);
   1547 		splx(s);
   1548 		return;
   1549 	}
   1550 	pool_print1(pp, modif, printf);
   1551 	simple_unlock(&pp->pr_slock);
   1552 	splx(s);
   1553 }
   1554 
   1555 void
   1556 pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
   1557 {
   1558 	int didlock = 0;
   1559 
   1560 	if (pp == NULL) {
   1561 		(*pr)("Must specify a pool to print.\n");
   1562 		return;
   1563 	}
   1564 
   1565 	/*
   1566 	 * Called from DDB; interrupts should be blocked, and all
   1567 	 * other processors should be paused.  We can skip locking
   1568 	 * the pool in this case.
   1569 	 *
   1570 	 * We do a simple_lock_try() just to print the lock
   1571 	 * status, however.
   1572 	 */
   1573 
   1574 	if (simple_lock_try(&pp->pr_slock) == 0)
   1575 		(*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
   1576 	else
   1577 		didlock = 1;
   1578 
   1579 	pool_print1(pp, modif, pr);
   1580 
   1581 	if (didlock)
   1582 		simple_unlock(&pp->pr_slock);
   1583 }
   1584 
   1585 static void
   1586 pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
   1587     void (*pr)(const char *, ...))
   1588 {
   1589 	struct pool_item_header *ph;
   1590 #ifdef DIAGNOSTIC
   1591 	struct pool_item *pi;
   1592 #endif
   1593 
   1594 	LIST_FOREACH(ph, pl, ph_pagelist) {
   1595 		(*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
   1596 		    ph->ph_page, ph->ph_nmissing,
   1597 		    (u_long)ph->ph_time.tv_sec,
   1598 		    (u_long)ph->ph_time.tv_usec);
   1599 #ifdef DIAGNOSTIC
   1600 		if (!(pp->pr_roflags & PR_NOTOUCH)) {
   1601 			TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) {
   1602 				if (pi->pi_magic != PI_MAGIC) {
   1603 					(*pr)("\t\t\titem %p, magic 0x%x\n",
   1604 					    pi, pi->pi_magic);
   1605 				}
   1606 			}
   1607 		}
   1608 #endif
   1609 	}
   1610 }
   1611 
   1612 static void
   1613 pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
   1614 {
   1615 	struct pool_item_header *ph;
   1616 	struct pool_cache *pc;
   1617 	struct pool_cache_group *pcg;
   1618 	int i, print_log = 0, print_pagelist = 0, print_cache = 0;
   1619 	char c;
   1620 
   1621 	while ((c = *modif++) != '\0') {
   1622 		if (c == 'l')
   1623 			print_log = 1;
   1624 		if (c == 'p')
   1625 			print_pagelist = 1;
   1626 		if (c == 'c')
   1627 			print_cache = 1;
   1628 	}
   1629 
   1630 	(*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
   1631 	    pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
   1632 	    pp->pr_roflags);
   1633 	(*pr)("\talloc %p\n", pp->pr_alloc);
   1634 	(*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
   1635 	    pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
   1636 	(*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
   1637 	    pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
   1638 
   1639 	(*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
   1640 	    pp->pr_nget, pp->pr_nfail, pp->pr_nput);
   1641 	(*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
   1642 	    pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
   1643 
   1644 	if (print_pagelist == 0)
   1645 		goto skip_pagelist;
   1646 
   1647 	if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
   1648 		(*pr)("\n\tempty page list:\n");
   1649 	pool_print_pagelist(pp, &pp->pr_emptypages, pr);
   1650 	if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
   1651 		(*pr)("\n\tfull page list:\n");
   1652 	pool_print_pagelist(pp, &pp->pr_fullpages, pr);
   1653 	if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
   1654 		(*pr)("\n\tpartial-page list:\n");
   1655 	pool_print_pagelist(pp, &pp->pr_partpages, pr);
   1656 
   1657 	if (pp->pr_curpage == NULL)
   1658 		(*pr)("\tno current page\n");
   1659 	else
   1660 		(*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
   1661 
   1662  skip_pagelist:
   1663 	if (print_log == 0)
   1664 		goto skip_log;
   1665 
   1666 	(*pr)("\n");
   1667 	if ((pp->pr_roflags & PR_LOGGING) == 0)
   1668 		(*pr)("\tno log\n");
   1669 	else
   1670 		pr_printlog(pp, NULL, pr);
   1671 
   1672  skip_log:
   1673 	if (print_cache == 0)
   1674 		goto skip_cache;
   1675 
   1676 	TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) {
   1677 		(*pr)("\tcache %p: allocfrom %p freeto %p\n", pc,
   1678 		    pc->pc_allocfrom, pc->pc_freeto);
   1679 		(*pr)("\t    hits %lu misses %lu ngroups %lu nitems %lu\n",
   1680 		    pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
   1681 		TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
   1682 			(*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);
   1683 			for (i = 0; i < PCG_NOBJECTS; i++) {
   1684 				if (pcg->pcg_objects[i].pcgo_pa !=
   1685 				    POOL_PADDR_INVALID) {
   1686 					(*pr)("\t\t\t%p, 0x%llx\n",
   1687 					    pcg->pcg_objects[i].pcgo_va,
   1688 					    (unsigned long long)
   1689 					    pcg->pcg_objects[i].pcgo_pa);
   1690 				} else {
   1691 					(*pr)("\t\t\t%p\n",
   1692 					    pcg->pcg_objects[i].pcgo_va);
   1693 				}
   1694 			}
   1695 		}
   1696 	}
   1697 
   1698  skip_cache:
   1699 	pr_enter_check(pp, pr);
   1700 }
   1701 
   1702 static int
   1703 pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
   1704 {
   1705 	struct pool_item *pi;
   1706 	caddr_t page;
   1707 	int n;
   1708 
   1709 	page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask);
   1710 	if (page != ph->ph_page &&
   1711 	    (pp->pr_roflags & PR_PHINPAGE) != 0) {
   1712 		if (label != NULL)
   1713 			printf("%s: ", label);
   1714 		printf("pool(%p:%s): page inconsistency: page %p;"
   1715 		       " at page head addr %p (p %p)\n", pp,
   1716 			pp->pr_wchan, ph->ph_page,
   1717 			ph, page);
   1718 		return 1;
   1719 	}
   1720 
   1721 	if ((pp->pr_roflags & PR_NOTOUCH) != 0)
   1722 		return 0;
   1723 
   1724 	for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
   1725 	     pi != NULL;
   1726 	     pi = TAILQ_NEXT(pi,pi_list), n++) {
   1727 
   1728 #ifdef DIAGNOSTIC
   1729 		if (pi->pi_magic != PI_MAGIC) {
   1730 			if (label != NULL)
   1731 				printf("%s: ", label);
   1732 			printf("pool(%s): free list modified: magic=%x;"
   1733 			       " page %p; item ordinal %d;"
   1734 			       " addr %p (p %p)\n",
   1735 				pp->pr_wchan, pi->pi_magic, ph->ph_page,
   1736 				n, pi, page);
   1737 			panic("pool");
   1738 		}
   1739 #endif
   1740 		page =
   1741 		    (caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask);
   1742 		if (page == ph->ph_page)
   1743 			continue;
   1744 
   1745 		if (label != NULL)
   1746 			printf("%s: ", label);
   1747 		printf("pool(%p:%s): page inconsistency: page %p;"
   1748 		       " item ordinal %d; addr %p (p %p)\n", pp,
   1749 			pp->pr_wchan, ph->ph_page,
   1750 			n, pi, page);
   1751 		return 1;
   1752 	}
   1753 	return 0;
   1754 }
   1755 
   1756 
   1757 int
   1758 pool_chk(struct pool *pp, const char *label)
   1759 {
   1760 	struct pool_item_header *ph;
   1761 	int r = 0;
   1762 
   1763 	simple_lock(&pp->pr_slock);
   1764 	LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
   1765 		r = pool_chk_page(pp, label, ph);
   1766 		if (r) {
   1767 			goto out;
   1768 		}
   1769 	}
   1770 	LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
   1771 		r = pool_chk_page(pp, label, ph);
   1772 		if (r) {
   1773 			goto out;
   1774 		}
   1775 	}
   1776 	LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
   1777 		r = pool_chk_page(pp, label, ph);
   1778 		if (r) {
   1779 			goto out;
   1780 		}
   1781 	}
   1782 
   1783 out:
   1784 	simple_unlock(&pp->pr_slock);
   1785 	return (r);
   1786 }
   1787 
   1788 /*
   1789  * pool_cache_init:
   1790  *
   1791  *	Initialize a pool cache.
   1792  *
   1793  *	NOTE: If the pool must be protected from interrupts, we expect
   1794  *	to be called at the appropriate interrupt priority level.
   1795  */
   1796 void
   1797 pool_cache_init(struct pool_cache *pc, struct pool *pp,
   1798     int (*ctor)(void *, void *, int),
   1799     void (*dtor)(void *, void *),
   1800     void *arg)
   1801 {
   1802 
   1803 	TAILQ_INIT(&pc->pc_grouplist);
   1804 	simple_lock_init(&pc->pc_slock);
   1805 
   1806 	pc->pc_allocfrom = NULL;
   1807 	pc->pc_freeto = NULL;
   1808 	pc->pc_pool = pp;
   1809 
   1810 	pc->pc_ctor = ctor;
   1811 	pc->pc_dtor = dtor;
   1812 	pc->pc_arg  = arg;
   1813 
   1814 	pc->pc_hits   = 0;
   1815 	pc->pc_misses = 0;
   1816 
   1817 	pc->pc_ngroups = 0;
   1818 
   1819 	pc->pc_nitems = 0;
   1820 
   1821 	simple_lock(&pp->pr_slock);
   1822 	TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist);
   1823 	simple_unlock(&pp->pr_slock);
   1824 }
   1825 
   1826 /*
   1827  * pool_cache_destroy:
   1828  *
   1829  *	Destroy a pool cache.
   1830  */
   1831 void
   1832 pool_cache_destroy(struct pool_cache *pc)
   1833 {
   1834 	struct pool *pp = pc->pc_pool;
   1835 
   1836 	/* First, invalidate the entire cache. */
   1837 	pool_cache_invalidate(pc);
   1838 
   1839 	/* ...and remove it from the pool's cache list. */
   1840 	simple_lock(&pp->pr_slock);
   1841 	TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist);
   1842 	simple_unlock(&pp->pr_slock);
   1843 }
   1844 
   1845 static __inline void *
   1846 pcg_get(struct pool_cache_group *pcg, paddr_t *pap)
   1847 {
   1848 	void *object;
   1849 	u_int idx;
   1850 
   1851 	KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
   1852 	KASSERT(pcg->pcg_avail != 0);
   1853 	idx = --pcg->pcg_avail;
   1854 
   1855 	KASSERT(pcg->pcg_objects[idx].pcgo_va != NULL);
   1856 	object = pcg->pcg_objects[idx].pcgo_va;
   1857 	if (pap != NULL)
   1858 		*pap = pcg->pcg_objects[idx].pcgo_pa;
   1859 	pcg->pcg_objects[idx].pcgo_va = NULL;
   1860 
   1861 	return (object);
   1862 }
   1863 
   1864 static __inline void
   1865 pcg_put(struct pool_cache_group *pcg, void *object, paddr_t pa)
   1866 {
   1867 	u_int idx;
   1868 
   1869 	KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
   1870 	idx = pcg->pcg_avail++;
   1871 
   1872 	KASSERT(pcg->pcg_objects[idx].pcgo_va == NULL);
   1873 	pcg->pcg_objects[idx].pcgo_va = object;
   1874 	pcg->pcg_objects[idx].pcgo_pa = pa;
   1875 }
   1876 
   1877 /*
   1878  * pool_cache_get{,_paddr}:
   1879  *
   1880  *	Get an object from a pool cache (optionally returning
   1881  *	the physical address of the object).
   1882  */
   1883 void *
   1884 pool_cache_get_paddr(struct pool_cache *pc, int flags, paddr_t *pap)
   1885 {
   1886 	struct pool_cache_group *pcg;
   1887 	void *object;
   1888 
   1889 #ifdef LOCKDEBUG
   1890 	if (flags & PR_WAITOK)
   1891 		simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)");
   1892 #endif
   1893 
   1894 	simple_lock(&pc->pc_slock);
   1895 
   1896 	if ((pcg = pc->pc_allocfrom) == NULL) {
   1897 		TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
   1898 			if (pcg->pcg_avail != 0) {
   1899 				pc->pc_allocfrom = pcg;
   1900 				goto have_group;
   1901 			}
   1902 		}
   1903 
   1904 		/*
   1905 		 * No groups with any available objects.  Allocate
   1906 		 * a new object, construct it, and return it to
   1907 		 * the caller.  We will allocate a group, if necessary,
   1908 		 * when the object is freed back to the cache.
   1909 		 */
   1910 		pc->pc_misses++;
   1911 		simple_unlock(&pc->pc_slock);
   1912 		object = pool_get(pc->pc_pool, flags);
   1913 		if (object != NULL && pc->pc_ctor != NULL) {
   1914 			if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
   1915 				pool_put(pc->pc_pool, object);
   1916 				return (NULL);
   1917 			}
   1918 		}
   1919 		if (object != NULL && pap != NULL) {
   1920 #ifdef POOL_VTOPHYS
   1921 			*pap = POOL_VTOPHYS(object);
   1922 #else
   1923 			*pap = POOL_PADDR_INVALID;
   1924 #endif
   1925 		}
   1926 		return (object);
   1927 	}
   1928 
   1929  have_group:
   1930 	pc->pc_hits++;
   1931 	pc->pc_nitems--;
   1932 	object = pcg_get(pcg, pap);
   1933 
   1934 	if (pcg->pcg_avail == 0)
   1935 		pc->pc_allocfrom = NULL;
   1936 
   1937 	simple_unlock(&pc->pc_slock);
   1938 
   1939 	return (object);
   1940 }
   1941 
   1942 /*
   1943  * pool_cache_put{,_paddr}:
   1944  *
   1945  *	Put an object back to the pool cache (optionally caching the
   1946  *	physical address of the object).
   1947  */
   1948 void
   1949 pool_cache_put_paddr(struct pool_cache *pc, void *object, paddr_t pa)
   1950 {
   1951 	struct pool_cache_group *pcg;
   1952 	int s;
   1953 
   1954 	simple_lock(&pc->pc_slock);
   1955 
   1956 	if ((pcg = pc->pc_freeto) == NULL) {
   1957 		TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
   1958 			if (pcg->pcg_avail != PCG_NOBJECTS) {
   1959 				pc->pc_freeto = pcg;
   1960 				goto have_group;
   1961 			}
   1962 		}
   1963 
   1964 		/*
   1965 		 * No empty groups to free the object to.  Attempt to
   1966 		 * allocate one.
   1967 		 */
   1968 		simple_unlock(&pc->pc_slock);
   1969 		s = splvm();
   1970 		pcg = pool_get(&pcgpool, PR_NOWAIT);
   1971 		splx(s);
   1972 		if (pcg != NULL) {
   1973 			memset(pcg, 0, sizeof(*pcg));
   1974 			simple_lock(&pc->pc_slock);
   1975 			pc->pc_ngroups++;
   1976 			TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list);
   1977 			if (pc->pc_freeto == NULL)
   1978 				pc->pc_freeto = pcg;
   1979 			goto have_group;
   1980 		}
   1981 
   1982 		/*
   1983 		 * Unable to allocate a cache group; destruct the object
   1984 		 * and free it back to the pool.
   1985 		 */
   1986 		pool_cache_destruct_object(pc, object);
   1987 		return;
   1988 	}
   1989 
   1990  have_group:
   1991 	pc->pc_nitems++;
   1992 	pcg_put(pcg, object, pa);
   1993 
   1994 	if (pcg->pcg_avail == PCG_NOBJECTS)
   1995 		pc->pc_freeto = NULL;
   1996 
   1997 	simple_unlock(&pc->pc_slock);
   1998 }
   1999 
   2000 /*
   2001  * pool_cache_destruct_object:
   2002  *
   2003  *	Force destruction of an object and its release back into
   2004  *	the pool.
   2005  */
   2006 void
   2007 pool_cache_destruct_object(struct pool_cache *pc, void *object)
   2008 {
   2009 
   2010 	if (pc->pc_dtor != NULL)
   2011 		(*pc->pc_dtor)(pc->pc_arg, object);
   2012 	pool_put(pc->pc_pool, object);
   2013 }
   2014 
   2015 /*
   2016  * pool_cache_do_invalidate:
   2017  *
   2018  *	This internal function implements pool_cache_invalidate() and
   2019  *	pool_cache_reclaim().
   2020  */
   2021 static void
   2022 pool_cache_do_invalidate(struct pool_cache *pc, int free_groups,
   2023     void (*putit)(struct pool *, void *))
   2024 {
   2025 	struct pool_cache_group *pcg, *npcg;
   2026 	void *object;
   2027 	int s;
   2028 
   2029 	for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
   2030 	     pcg = npcg) {
   2031 		npcg = TAILQ_NEXT(pcg, pcg_list);
   2032 		while (pcg->pcg_avail != 0) {
   2033 			pc->pc_nitems--;
   2034 			object = pcg_get(pcg, NULL);
   2035 			if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg)
   2036 				pc->pc_allocfrom = NULL;
   2037 			if (pc->pc_dtor != NULL)
   2038 				(*pc->pc_dtor)(pc->pc_arg, object);
   2039 			(*putit)(pc->pc_pool, object);
   2040 		}
   2041 		if (free_groups) {
   2042 			pc->pc_ngroups--;
   2043 			TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list);
   2044 			if (pc->pc_freeto == pcg)
   2045 				pc->pc_freeto = NULL;
   2046 			s = splvm();
   2047 			pool_put(&pcgpool, pcg);
   2048 			splx(s);
   2049 		}
   2050 	}
   2051 }
   2052 
   2053 /*
   2054  * pool_cache_invalidate:
   2055  *
   2056  *	Invalidate a pool cache (destruct and release all of the
   2057  *	cached objects).
   2058  */
   2059 void
   2060 pool_cache_invalidate(struct pool_cache *pc)
   2061 {
   2062 
   2063 	simple_lock(&pc->pc_slock);
   2064 	pool_cache_do_invalidate(pc, 0, pool_put);
   2065 	simple_unlock(&pc->pc_slock);
   2066 }
   2067 
   2068 /*
   2069  * pool_cache_reclaim:
   2070  *
   2071  *	Reclaim a pool cache for pool_reclaim().
   2072  */
   2073 static void
   2074 pool_cache_reclaim(struct pool_cache *pc)
   2075 {
   2076 
   2077 	simple_lock(&pc->pc_slock);
   2078 	pool_cache_do_invalidate(pc, 1, pool_do_put);
   2079 	simple_unlock(&pc->pc_slock);
   2080 }
   2081 
   2082 /*
   2083  * Pool backend allocators.
   2084  *
   2085  * Each pool has a backend allocator that handles allocation, deallocation,
   2086  * and any additional draining that might be needed.
   2087  *
   2088  * We provide two standard allocators:
   2089  *
   2090  *	pool_allocator_kmem - the default when no allocator is specified
   2091  *
   2092  *	pool_allocator_nointr - used for pools that will not be accessed
   2093  *	in interrupt context.
   2094  */
   2095 void	*pool_page_alloc(struct pool *, int);
   2096 void	pool_page_free(struct pool *, void *);
   2097 
   2098 struct pool_allocator pool_allocator_kmem = {
   2099 	pool_page_alloc, pool_page_free, 0,
   2100 };
   2101 
   2102 void	*pool_page_alloc_nointr(struct pool *, int);
   2103 void	pool_page_free_nointr(struct pool *, void *);
   2104 
   2105 struct pool_allocator pool_allocator_nointr = {
   2106 	pool_page_alloc_nointr, pool_page_free_nointr, 0,
   2107 };
   2108 
   2109 #ifdef POOL_SUBPAGE
   2110 void	*pool_subpage_alloc(struct pool *, int);
   2111 void	pool_subpage_free(struct pool *, void *);
   2112 
   2113 struct pool_allocator pool_allocator_kmem_subpage = {
   2114 	pool_subpage_alloc, pool_subpage_free, 0,
   2115 };
   2116 #endif /* POOL_SUBPAGE */
   2117 
   2118 /*
   2119  * We have at least three different resources for the same allocation and
   2120  * each resource can be depleted.  First, we have the ready elements in the
   2121  * pool.  Then we have the resource (typically a vm_map) for this allocator.
   2122  * Finally, we have physical memory.  Waiting for any of these can be
   2123  * unnecessary when any other is freed, but the kernel doesn't support
   2124  * sleeping on multiple wait channels, so we have to employ another strategy.
   2125  *
   2126  * The caller sleeps on the pool (so that it can be awakened when an item
   2127  * is returned to the pool), but we set PA_WANT on the allocator.  When a
   2128  * page is returned to the allocator and PA_WANT is set, pool_allocator_free
   2129  * will wake up all sleeping pools belonging to this allocator.
   2130  *
   2131  * XXX Thundering herd.
   2132  */
   2133 void *
   2134 pool_allocator_alloc(struct pool *org, int flags)
   2135 {
   2136 	struct pool_allocator *pa = org->pr_alloc;
   2137 	struct pool *pp, *start;
   2138 	int s, freed;
   2139 	void *res;
   2140 
   2141 	LOCK_ASSERT(!simple_lock_held(&org->pr_slock));
   2142 
   2143 	do {
   2144 		if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
   2145 			return (res);
   2146 		if ((flags & PR_WAITOK) == 0) {
   2147 			/*
   2148 			 * We only run the drain hookhere if PR_NOWAIT.
   2149 			 * In other cases, the hook will be run in
   2150 			 * pool_reclaim().
   2151 			 */
   2152 			if (org->pr_drain_hook != NULL) {
   2153 				(*org->pr_drain_hook)(org->pr_drain_hook_arg,
   2154 				    flags);
   2155 				if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
   2156 					return (res);
   2157 			}
   2158 			break;
   2159 		}
   2160 
   2161 		/*
   2162 		 * Drain all pools, except "org", that use this
   2163 		 * allocator.  We do this to reclaim VA space.
   2164 		 * pa_alloc is responsible for waiting for
   2165 		 * physical memory.
   2166 		 *
   2167 		 * XXX We risk looping forever if start if someone
   2168 		 * calls pool_destroy on "start".  But there is no
   2169 		 * other way to have potentially sleeping pool_reclaim,
   2170 		 * non-sleeping locks on pool_allocator, and some
   2171 		 * stirring of drained pools in the allocator.
   2172 		 *
   2173 		 * XXX Maybe we should use pool_head_slock for locking
   2174 		 * the allocators?
   2175 		 */
   2176 		freed = 0;
   2177 
   2178 		s = splvm();
   2179 		simple_lock(&pa->pa_slock);
   2180 		pp = start = TAILQ_FIRST(&pa->pa_list);
   2181 		do {
   2182 			TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list);
   2183 			TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list);
   2184 			if (pp == org)
   2185 				continue;
   2186 			simple_unlock(&pa->pa_slock);
   2187 			freed = pool_reclaim(pp);
   2188 			simple_lock(&pa->pa_slock);
   2189 		} while ((pp = TAILQ_FIRST(&pa->pa_list)) != start &&
   2190 			 freed == 0);
   2191 
   2192 		if (freed == 0) {
   2193 			/*
   2194 			 * We set PA_WANT here, the caller will most likely
   2195 			 * sleep waiting for pages (if not, this won't hurt
   2196 			 * that much), and there is no way to set this in
   2197 			 * the caller without violating locking order.
   2198 			 */
   2199 			pa->pa_flags |= PA_WANT;
   2200 		}
   2201 		simple_unlock(&pa->pa_slock);
   2202 		splx(s);
   2203 	} while (freed);
   2204 	return (NULL);
   2205 }
   2206 
   2207 void
   2208 pool_allocator_free(struct pool *pp, void *v)
   2209 {
   2210 	struct pool_allocator *pa = pp->pr_alloc;
   2211 	int s;
   2212 
   2213 	LOCK_ASSERT(!simple_lock_held(&pp->pr_slock));
   2214 
   2215 	(*pa->pa_free)(pp, v);
   2216 
   2217 	s = splvm();
   2218 	simple_lock(&pa->pa_slock);
   2219 	if ((pa->pa_flags & PA_WANT) == 0) {
   2220 		simple_unlock(&pa->pa_slock);
   2221 		splx(s);
   2222 		return;
   2223 	}
   2224 
   2225 	TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
   2226 		simple_lock(&pp->pr_slock);
   2227 		if ((pp->pr_flags & PR_WANTED) != 0) {
   2228 			pp->pr_flags &= ~PR_WANTED;
   2229 			wakeup(pp);
   2230 		}
   2231 		simple_unlock(&pp->pr_slock);
   2232 	}
   2233 	pa->pa_flags &= ~PA_WANT;
   2234 	simple_unlock(&pa->pa_slock);
   2235 	splx(s);
   2236 }
   2237 
   2238 void *
   2239 pool_page_alloc(struct pool *pp, int flags)
   2240 {
   2241 	boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
   2242 
   2243 	return ((void *) uvm_km_alloc_poolpage(waitok));
   2244 }
   2245 
   2246 void
   2247 pool_page_free(struct pool *pp, void *v)
   2248 {
   2249 
   2250 	uvm_km_free_poolpage((vaddr_t) v);
   2251 }
   2252 
   2253 #ifdef POOL_SUBPAGE
   2254 /* Sub-page allocator, for machines with large hardware pages. */
   2255 void *
   2256 pool_subpage_alloc(struct pool *pp, int flags)
   2257 {
   2258 	void *v;
   2259 	int s;
   2260 	s = splvm();
   2261 	v = pool_get(&psppool, flags);
   2262 	splx(s);
   2263 	return v;
   2264 }
   2265 
   2266 void
   2267 pool_subpage_free(struct pool *pp, void *v)
   2268 {
   2269 	int s;
   2270 	s = splvm();
   2271 	pool_put(&psppool, v);
   2272 	splx(s);
   2273 }
   2274 
   2275 /* We don't provide a real nointr allocator.  Maybe later. */
   2276 void *
   2277 pool_page_alloc_nointr(struct pool *pp, int flags)
   2278 {
   2279 
   2280 	return (pool_subpage_alloc(pp, flags));
   2281 }
   2282 
   2283 void
   2284 pool_page_free_nointr(struct pool *pp, void *v)
   2285 {
   2286 
   2287 	pool_subpage_free(pp, v);
   2288 }
   2289 #else
   2290 void *
   2291 pool_page_alloc_nointr(struct pool *pp, int flags)
   2292 {
   2293 	boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
   2294 
   2295 	return ((void *) uvm_km_alloc_poolpage1(kernel_map,
   2296 	    uvm.kernel_object, waitok));
   2297 }
   2298 
   2299 void
   2300 pool_page_free_nointr(struct pool *pp, void *v)
   2301 {
   2302 
   2303 	uvm_km_free_poolpage1(kernel_map, (vaddr_t) v);
   2304 }
   2305 #endif /* POOL_SUBPAGE */
   2306