Home | History | Annotate | Line # | Download | only in kern
subr_pool.c revision 1.91
      1 /*	$NetBSD: subr_pool.c,v 1.91 2004/01/16 12:47:37 yamt Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
      9  * Simulation Facility, NASA Ames Research Center.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. All advertising materials mentioning features or use of this software
     20  *    must display the following acknowledgement:
     21  *	This product includes software developed by the NetBSD
     22  *	Foundation, Inc. and its contributors.
     23  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24  *    contributors may be used to endorse or promote products derived
     25  *    from this software without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37  * POSSIBILITY OF SUCH DAMAGE.
     38  */
     39 
     40 #include <sys/cdefs.h>
     41 __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.91 2004/01/16 12:47:37 yamt Exp $");
     42 
     43 #include "opt_pool.h"
     44 #include "opt_poollog.h"
     45 #include "opt_lockdebug.h"
     46 
     47 #include <sys/param.h>
     48 #include <sys/systm.h>
     49 #include <sys/proc.h>
     50 #include <sys/errno.h>
     51 #include <sys/kernel.h>
     52 #include <sys/malloc.h>
     53 #include <sys/lock.h>
     54 #include <sys/pool.h>
     55 #include <sys/syslog.h>
     56 
     57 #include <uvm/uvm.h>
     58 
     59 /*
     60  * Pool resource management utility.
     61  *
     62  * Memory is allocated in pages which are split into pieces according to
     63  * the pool item size. Each page is kept on one of three lists in the
     64  * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
     65  * for empty, full and partially-full pages respectively. The individual
     66  * pool items are on a linked list headed by `ph_itemlist' in each page
     67  * header. The memory for building the page list is either taken from
     68  * the allocated pages themselves (for small pool items) or taken from
     69  * an internal pool of page headers (`phpool').
     70  */
     71 
     72 /* List of all pools */
     73 TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
     74 
     75 /* Private pool for page header structures */
     76 static struct pool phpool;
     77 
     78 #ifdef POOL_SUBPAGE
     79 /* Pool of subpages for use by normal pools. */
     80 static struct pool psppool;
     81 #endif
     82 
     83 /* # of seconds to retain page after last use */
     84 int pool_inactive_time = 10;
     85 
     86 /* Next candidate for drainage (see pool_drain()) */
     87 static struct pool	*drainpp;
     88 
     89 /* This spin lock protects both pool_head and drainpp. */
     90 struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
     91 
     92 struct pool_item_header {
     93 	/* Page headers */
     94 	LIST_ENTRY(pool_item_header)
     95 				ph_pagelist;	/* pool page list */
     96 	TAILQ_HEAD(,pool_item)	ph_itemlist;	/* chunk list for this page */
     97 	SPLAY_ENTRY(pool_item_header)
     98 				ph_node;	/* Off-page page headers */
     99 	unsigned int		ph_nmissing;	/* # of chunks in use */
    100 	caddr_t			ph_page;	/* this page's address */
    101 	struct timeval		ph_time;	/* last referenced */
    102 };
    103 
    104 struct pool_item {
    105 #ifdef DIAGNOSTIC
    106 	u_int pi_magic;
    107 #endif
    108 #define	PI_MAGIC 0xdeadbeefU
    109 	/* Other entries use only this list entry */
    110 	TAILQ_ENTRY(pool_item)	pi_list;
    111 };
    112 
    113 #define	POOL_NEEDS_CATCHUP(pp)						\
    114 	((pp)->pr_nitems < (pp)->pr_minitems)
    115 
    116 /*
    117  * Pool cache management.
    118  *
    119  * Pool caches provide a way for constructed objects to be cached by the
    120  * pool subsystem.  This can lead to performance improvements by avoiding
    121  * needless object construction/destruction; it is deferred until absolutely
    122  * necessary.
    123  *
    124  * Caches are grouped into cache groups.  Each cache group references
    125  * up to 16 constructed objects.  When a cache allocates an object
    126  * from the pool, it calls the object's constructor and places it into
    127  * a cache group.  When a cache group frees an object back to the pool,
    128  * it first calls the object's destructor.  This allows the object to
    129  * persist in constructed form while freed to the cache.
    130  *
    131  * Multiple caches may exist for each pool.  This allows a single
    132  * object type to have multiple constructed forms.  The pool references
    133  * each cache, so that when a pool is drained by the pagedaemon, it can
    134  * drain each individual cache as well.  Each time a cache is drained,
    135  * the most idle cache group is freed to the pool in its entirety.
    136  *
    137  * Pool caches are layed on top of pools.  By layering them, we can avoid
    138  * the complexity of cache management for pools which would not benefit
    139  * from it.
    140  */
    141 
    142 /* The cache group pool. */
    143 static struct pool pcgpool;
    144 
    145 static void	pool_cache_reclaim(struct pool_cache *);
    146 
    147 static int	pool_catchup(struct pool *);
    148 static void	pool_prime_page(struct pool *, caddr_t,
    149 		    struct pool_item_header *);
    150 static void	pool_update_curpage(struct pool *);
    151 
    152 void		*pool_allocator_alloc(struct pool *, int);
    153 void		pool_allocator_free(struct pool *, void *);
    154 
    155 static void pool_print_pagelist(struct pool_pagelist *,
    156 	void (*)(const char *, ...));
    157 static void pool_print1(struct pool *, const char *,
    158 	void (*)(const char *, ...));
    159 
    160 static int pool_chk_page(struct pool *, const char *,
    161 			 struct pool_item_header *);
    162 
    163 /*
    164  * Pool log entry. An array of these is allocated in pool_init().
    165  */
    166 struct pool_log {
    167 	const char	*pl_file;
    168 	long		pl_line;
    169 	int		pl_action;
    170 #define	PRLOG_GET	1
    171 #define	PRLOG_PUT	2
    172 	void		*pl_addr;
    173 };
    174 
    175 #ifdef POOL_DIAGNOSTIC
    176 /* Number of entries in pool log buffers */
    177 #ifndef POOL_LOGSIZE
    178 #define	POOL_LOGSIZE	10
    179 #endif
    180 
    181 int pool_logsize = POOL_LOGSIZE;
    182 
    183 static __inline void
    184 pr_log(struct pool *pp, void *v, int action, const char *file, long line)
    185 {
    186 	int n = pp->pr_curlogentry;
    187 	struct pool_log *pl;
    188 
    189 	if ((pp->pr_roflags & PR_LOGGING) == 0)
    190 		return;
    191 
    192 	/*
    193 	 * Fill in the current entry. Wrap around and overwrite
    194 	 * the oldest entry if necessary.
    195 	 */
    196 	pl = &pp->pr_log[n];
    197 	pl->pl_file = file;
    198 	pl->pl_line = line;
    199 	pl->pl_action = action;
    200 	pl->pl_addr = v;
    201 	if (++n >= pp->pr_logsize)
    202 		n = 0;
    203 	pp->pr_curlogentry = n;
    204 }
    205 
    206 static void
    207 pr_printlog(struct pool *pp, struct pool_item *pi,
    208     void (*pr)(const char *, ...))
    209 {
    210 	int i = pp->pr_logsize;
    211 	int n = pp->pr_curlogentry;
    212 
    213 	if ((pp->pr_roflags & PR_LOGGING) == 0)
    214 		return;
    215 
    216 	/*
    217 	 * Print all entries in this pool's log.
    218 	 */
    219 	while (i-- > 0) {
    220 		struct pool_log *pl = &pp->pr_log[n];
    221 		if (pl->pl_action != 0) {
    222 			if (pi == NULL || pi == pl->pl_addr) {
    223 				(*pr)("\tlog entry %d:\n", i);
    224 				(*pr)("\t\taction = %s, addr = %p\n",
    225 				    pl->pl_action == PRLOG_GET ? "get" : "put",
    226 				    pl->pl_addr);
    227 				(*pr)("\t\tfile: %s at line %lu\n",
    228 				    pl->pl_file, pl->pl_line);
    229 			}
    230 		}
    231 		if (++n >= pp->pr_logsize)
    232 			n = 0;
    233 	}
    234 }
    235 
    236 static __inline void
    237 pr_enter(struct pool *pp, const char *file, long line)
    238 {
    239 
    240 	if (__predict_false(pp->pr_entered_file != NULL)) {
    241 		printf("pool %s: reentrancy at file %s line %ld\n",
    242 		    pp->pr_wchan, file, line);
    243 		printf("         previous entry at file %s line %ld\n",
    244 		    pp->pr_entered_file, pp->pr_entered_line);
    245 		panic("pr_enter");
    246 	}
    247 
    248 	pp->pr_entered_file = file;
    249 	pp->pr_entered_line = line;
    250 }
    251 
    252 static __inline void
    253 pr_leave(struct pool *pp)
    254 {
    255 
    256 	if (__predict_false(pp->pr_entered_file == NULL)) {
    257 		printf("pool %s not entered?\n", pp->pr_wchan);
    258 		panic("pr_leave");
    259 	}
    260 
    261 	pp->pr_entered_file = NULL;
    262 	pp->pr_entered_line = 0;
    263 }
    264 
    265 static __inline void
    266 pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
    267 {
    268 
    269 	if (pp->pr_entered_file != NULL)
    270 		(*pr)("\n\tcurrently entered from file %s line %ld\n",
    271 		    pp->pr_entered_file, pp->pr_entered_line);
    272 }
    273 #else
    274 #define	pr_log(pp, v, action, file, line)
    275 #define	pr_printlog(pp, pi, pr)
    276 #define	pr_enter(pp, file, line)
    277 #define	pr_leave(pp)
    278 #define	pr_enter_check(pp, pr)
    279 #endif /* POOL_DIAGNOSTIC */
    280 
    281 static __inline int
    282 phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
    283 {
    284 	if (a->ph_page < b->ph_page)
    285 		return (-1);
    286 	else if (a->ph_page > b->ph_page)
    287 		return (1);
    288 	else
    289 		return (0);
    290 }
    291 
    292 SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
    293 SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
    294 
    295 /*
    296  * Return the pool page header based on page address.
    297  */
    298 static __inline struct pool_item_header *
    299 pr_find_pagehead(struct pool *pp, caddr_t page)
    300 {
    301 	struct pool_item_header *ph, tmp;
    302 
    303 	if ((pp->pr_roflags & PR_PHINPAGE) != 0)
    304 		return ((struct pool_item_header *)(page + pp->pr_phoffset));
    305 
    306 	tmp.ph_page = page;
    307 	ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
    308 	return ph;
    309 }
    310 
    311 /*
    312  * Remove a page from the pool.
    313  */
    314 static __inline void
    315 pr_rmpage(struct pool *pp, struct pool_item_header *ph,
    316      struct pool_pagelist *pq)
    317 {
    318 	int s;
    319 
    320 	LOCK_ASSERT(!simple_lock_held(&pp->pr_slock) || pq != NULL);
    321 
    322 	/*
    323 	 * If the page was idle, decrement the idle page count.
    324 	 */
    325 	if (ph->ph_nmissing == 0) {
    326 #ifdef DIAGNOSTIC
    327 		if (pp->pr_nidle == 0)
    328 			panic("pr_rmpage: nidle inconsistent");
    329 		if (pp->pr_nitems < pp->pr_itemsperpage)
    330 			panic("pr_rmpage: nitems inconsistent");
    331 #endif
    332 		pp->pr_nidle--;
    333 	}
    334 
    335 	pp->pr_nitems -= pp->pr_itemsperpage;
    336 
    337 	/*
    338 	 * Unlink a page from the pool and release it (or queue it for release).
    339 	 */
    340 	LIST_REMOVE(ph, ph_pagelist);
    341 	if ((pp->pr_roflags & PR_PHINPAGE) == 0)
    342 		SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
    343 	if (pq) {
    344 		LIST_INSERT_HEAD(pq, ph, ph_pagelist);
    345 	} else {
    346 		pool_allocator_free(pp, ph->ph_page);
    347 		if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
    348 			s = splvm();
    349 			pool_put(&phpool, ph);
    350 			splx(s);
    351 		}
    352 	}
    353 	pp->pr_npages--;
    354 	pp->pr_npagefree++;
    355 
    356 	pool_update_curpage(pp);
    357 }
    358 
    359 /*
    360  * Initialize the given pool resource structure.
    361  *
    362  * We export this routine to allow other kernel parts to declare
    363  * static pools that must be initialized before malloc() is available.
    364  */
    365 void
    366 pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
    367     const char *wchan, struct pool_allocator *palloc)
    368 {
    369 	int off, slack;
    370 
    371 #ifdef POOL_DIAGNOSTIC
    372 	/*
    373 	 * Always log if POOL_DIAGNOSTIC is defined.
    374 	 */
    375 	if (pool_logsize != 0)
    376 		flags |= PR_LOGGING;
    377 #endif
    378 
    379 #ifdef POOL_SUBPAGE
    380 	/*
    381 	 * XXX We don't provide a real `nointr' back-end
    382 	 * yet; all sub-pages come from a kmem back-end.
    383 	 * maybe some day...
    384 	 */
    385 	if (palloc == NULL) {
    386 		extern struct pool_allocator pool_allocator_kmem_subpage;
    387 		palloc = &pool_allocator_kmem_subpage;
    388 	}
    389 	/*
    390 	 * We'll assume any user-specified back-end allocator
    391 	 * will deal with sub-pages, or simply don't care.
    392 	 */
    393 #else
    394 	if (palloc == NULL)
    395 		palloc = &pool_allocator_kmem;
    396 #endif /* POOL_SUBPAGE */
    397 	if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
    398 		if (palloc->pa_pagesz == 0) {
    399 #ifdef POOL_SUBPAGE
    400 			if (palloc == &pool_allocator_kmem)
    401 				palloc->pa_pagesz = PAGE_SIZE;
    402 			else
    403 				palloc->pa_pagesz = POOL_SUBPAGE;
    404 #else
    405 			palloc->pa_pagesz = PAGE_SIZE;
    406 #endif /* POOL_SUBPAGE */
    407 		}
    408 
    409 		TAILQ_INIT(&palloc->pa_list);
    410 
    411 		simple_lock_init(&palloc->pa_slock);
    412 		palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
    413 		palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
    414 		palloc->pa_flags |= PA_INITIALIZED;
    415 	}
    416 
    417 	if (align == 0)
    418 		align = ALIGN(1);
    419 
    420 	if (size < sizeof(struct pool_item))
    421 		size = sizeof(struct pool_item);
    422 
    423 	size = roundup(size, align);
    424 #ifdef DIAGNOSTIC
    425 	if (size > palloc->pa_pagesz)
    426 		panic("pool_init: pool item size (%lu) too large",
    427 		      (u_long)size);
    428 #endif
    429 
    430 	/*
    431 	 * Initialize the pool structure.
    432 	 */
    433 	LIST_INIT(&pp->pr_emptypages);
    434 	LIST_INIT(&pp->pr_fullpages);
    435 	LIST_INIT(&pp->pr_partpages);
    436 	TAILQ_INIT(&pp->pr_cachelist);
    437 	pp->pr_curpage = NULL;
    438 	pp->pr_npages = 0;
    439 	pp->pr_minitems = 0;
    440 	pp->pr_minpages = 0;
    441 	pp->pr_maxpages = UINT_MAX;
    442 	pp->pr_roflags = flags;
    443 	pp->pr_flags = 0;
    444 	pp->pr_size = size;
    445 	pp->pr_align = align;
    446 	pp->pr_wchan = wchan;
    447 	pp->pr_alloc = palloc;
    448 	pp->pr_nitems = 0;
    449 	pp->pr_nout = 0;
    450 	pp->pr_hardlimit = UINT_MAX;
    451 	pp->pr_hardlimit_warning = NULL;
    452 	pp->pr_hardlimit_ratecap.tv_sec = 0;
    453 	pp->pr_hardlimit_ratecap.tv_usec = 0;
    454 	pp->pr_hardlimit_warning_last.tv_sec = 0;
    455 	pp->pr_hardlimit_warning_last.tv_usec = 0;
    456 	pp->pr_drain_hook = NULL;
    457 	pp->pr_drain_hook_arg = NULL;
    458 
    459 	/*
    460 	 * Decide whether to put the page header off page to avoid
    461 	 * wasting too large a part of the page. Off-page page headers
    462 	 * go on a hash table, so we can match a returned item
    463 	 * with its header based on the page address.
    464 	 * We use 1/16 of the page size as the threshold (XXX: tune)
    465 	 */
    466 	if (pp->pr_size < palloc->pa_pagesz/16) {
    467 		/* Use the end of the page for the page header */
    468 		pp->pr_roflags |= PR_PHINPAGE;
    469 		pp->pr_phoffset = off = palloc->pa_pagesz -
    470 		    ALIGN(sizeof(struct pool_item_header));
    471 	} else {
    472 		/* The page header will be taken from our page header pool */
    473 		pp->pr_phoffset = 0;
    474 		off = palloc->pa_pagesz;
    475 		SPLAY_INIT(&pp->pr_phtree);
    476 	}
    477 
    478 	/*
    479 	 * Alignment is to take place at `ioff' within the item. This means
    480 	 * we must reserve up to `align - 1' bytes on the page to allow
    481 	 * appropriate positioning of each item.
    482 	 *
    483 	 * Silently enforce `0 <= ioff < align'.
    484 	 */
    485 	pp->pr_itemoffset = ioff = ioff % align;
    486 	pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
    487 	KASSERT(pp->pr_itemsperpage != 0);
    488 
    489 	/*
    490 	 * Use the slack between the chunks and the page header
    491 	 * for "cache coloring".
    492 	 */
    493 	slack = off - pp->pr_itemsperpage * pp->pr_size;
    494 	pp->pr_maxcolor = (slack / align) * align;
    495 	pp->pr_curcolor = 0;
    496 
    497 	pp->pr_nget = 0;
    498 	pp->pr_nfail = 0;
    499 	pp->pr_nput = 0;
    500 	pp->pr_npagealloc = 0;
    501 	pp->pr_npagefree = 0;
    502 	pp->pr_hiwat = 0;
    503 	pp->pr_nidle = 0;
    504 
    505 #ifdef POOL_DIAGNOSTIC
    506 	if (flags & PR_LOGGING) {
    507 		if (kmem_map == NULL ||
    508 		    (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
    509 		     M_TEMP, M_NOWAIT)) == NULL)
    510 			pp->pr_roflags &= ~PR_LOGGING;
    511 		pp->pr_curlogentry = 0;
    512 		pp->pr_logsize = pool_logsize;
    513 	}
    514 #endif
    515 
    516 	pp->pr_entered_file = NULL;
    517 	pp->pr_entered_line = 0;
    518 
    519 	simple_lock_init(&pp->pr_slock);
    520 
    521 	/*
    522 	 * Initialize private page header pool and cache magazine pool if we
    523 	 * haven't done so yet.
    524 	 * XXX LOCKING.
    525 	 */
    526 	if (phpool.pr_size == 0) {
    527 #ifdef POOL_SUBPAGE
    528 		pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, 0,
    529 		    "phpool", &pool_allocator_kmem);
    530 		pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
    531 		    PR_RECURSIVE, "psppool", &pool_allocator_kmem);
    532 #else
    533 		pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,
    534 		    0, "phpool", NULL);
    535 #endif
    536 		pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
    537 		    0, "pcgpool", NULL);
    538 	}
    539 
    540 	/* Insert into the list of all pools. */
    541 	simple_lock(&pool_head_slock);
    542 	TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
    543 	simple_unlock(&pool_head_slock);
    544 
    545 	/* Insert this into the list of pools using this allocator. */
    546 	simple_lock(&palloc->pa_slock);
    547 	TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
    548 	simple_unlock(&palloc->pa_slock);
    549 }
    550 
    551 /*
    552  * De-commision a pool resource.
    553  */
    554 void
    555 pool_destroy(struct pool *pp)
    556 {
    557 	struct pool_item_header *ph;
    558 	struct pool_cache *pc;
    559 
    560 	/* Locking order: pool_allocator -> pool */
    561 	simple_lock(&pp->pr_alloc->pa_slock);
    562 	TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
    563 	simple_unlock(&pp->pr_alloc->pa_slock);
    564 
    565 	/* Destroy all caches for this pool. */
    566 	while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL)
    567 		pool_cache_destroy(pc);
    568 
    569 #ifdef DIAGNOSTIC
    570 	if (pp->pr_nout != 0) {
    571 		pr_printlog(pp, NULL, printf);
    572 		panic("pool_destroy: pool busy: still out: %u",
    573 		    pp->pr_nout);
    574 	}
    575 #endif
    576 
    577 	/* Remove all pages */
    578 	while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
    579 		pr_rmpage(pp, ph, NULL);
    580 	KASSERT(LIST_EMPTY(&pp->pr_fullpages));
    581 	KASSERT(LIST_EMPTY(&pp->pr_partpages));
    582 
    583 	/* Remove from global pool list */
    584 	simple_lock(&pool_head_slock);
    585 	TAILQ_REMOVE(&pool_head, pp, pr_poollist);
    586 	if (drainpp == pp) {
    587 		drainpp = NULL;
    588 	}
    589 	simple_unlock(&pool_head_slock);
    590 
    591 #ifdef POOL_DIAGNOSTIC
    592 	if ((pp->pr_roflags & PR_LOGGING) != 0)
    593 		free(pp->pr_log, M_TEMP);
    594 #endif
    595 }
    596 
    597 void
    598 pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
    599 {
    600 
    601 	/* XXX no locking -- must be used just after pool_init() */
    602 #ifdef DIAGNOSTIC
    603 	if (pp->pr_drain_hook != NULL)
    604 		panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
    605 #endif
    606 	pp->pr_drain_hook = fn;
    607 	pp->pr_drain_hook_arg = arg;
    608 }
    609 
    610 static struct pool_item_header *
    611 pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags)
    612 {
    613 	struct pool_item_header *ph;
    614 	int s;
    615 
    616 	LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0);
    617 
    618 	if ((pp->pr_roflags & PR_PHINPAGE) != 0)
    619 		ph = (struct pool_item_header *) (storage + pp->pr_phoffset);
    620 	else {
    621 		s = splvm();
    622 		ph = pool_get(&phpool, flags);
    623 		splx(s);
    624 	}
    625 
    626 	return (ph);
    627 }
    628 
    629 /*
    630  * Grab an item from the pool; must be called at appropriate spl level
    631  */
    632 void *
    633 #ifdef POOL_DIAGNOSTIC
    634 _pool_get(struct pool *pp, int flags, const char *file, long line)
    635 #else
    636 pool_get(struct pool *pp, int flags)
    637 #endif
    638 {
    639 	struct pool_item *pi;
    640 	struct pool_item_header *ph;
    641 	void *v;
    642 
    643 #ifdef DIAGNOSTIC
    644 	if (__predict_false(curlwp == NULL && doing_shutdown == 0 &&
    645 			    (flags & PR_WAITOK) != 0))
    646 		panic("pool_get: %s: must have NOWAIT", pp->pr_wchan);
    647 
    648 #ifdef LOCKDEBUG
    649 	if (flags & PR_WAITOK)
    650 		simple_lock_only_held(NULL, "pool_get(PR_WAITOK)");
    651 #endif
    652 #endif /* DIAGNOSTIC */
    653 
    654 	simple_lock(&pp->pr_slock);
    655 	pr_enter(pp, file, line);
    656 
    657  startover:
    658 	/*
    659 	 * Check to see if we've reached the hard limit.  If we have,
    660 	 * and we can wait, then wait until an item has been returned to
    661 	 * the pool.
    662 	 */
    663 #ifdef DIAGNOSTIC
    664 	if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
    665 		pr_leave(pp);
    666 		simple_unlock(&pp->pr_slock);
    667 		panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
    668 	}
    669 #endif
    670 	if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
    671 		if (pp->pr_drain_hook != NULL) {
    672 			/*
    673 			 * Since the drain hook is going to free things
    674 			 * back to the pool, unlock, call the hook, re-lock,
    675 			 * and check the hardlimit condition again.
    676 			 */
    677 			pr_leave(pp);
    678 			simple_unlock(&pp->pr_slock);
    679 			(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
    680 			simple_lock(&pp->pr_slock);
    681 			pr_enter(pp, file, line);
    682 			if (pp->pr_nout < pp->pr_hardlimit)
    683 				goto startover;
    684 		}
    685 
    686 		if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
    687 			/*
    688 			 * XXX: A warning isn't logged in this case.  Should
    689 			 * it be?
    690 			 */
    691 			pp->pr_flags |= PR_WANTED;
    692 			pr_leave(pp);
    693 			ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
    694 			pr_enter(pp, file, line);
    695 			goto startover;
    696 		}
    697 
    698 		/*
    699 		 * Log a message that the hard limit has been hit.
    700 		 */
    701 		if (pp->pr_hardlimit_warning != NULL &&
    702 		    ratecheck(&pp->pr_hardlimit_warning_last,
    703 			      &pp->pr_hardlimit_ratecap))
    704 			log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
    705 
    706 		pp->pr_nfail++;
    707 
    708 		pr_leave(pp);
    709 		simple_unlock(&pp->pr_slock);
    710 		return (NULL);
    711 	}
    712 
    713 	/*
    714 	 * The convention we use is that if `curpage' is not NULL, then
    715 	 * it points at a non-empty bucket. In particular, `curpage'
    716 	 * never points at a page header which has PR_PHINPAGE set and
    717 	 * has no items in its bucket.
    718 	 */
    719 	if ((ph = pp->pr_curpage) == NULL) {
    720 #ifdef DIAGNOSTIC
    721 		if (pp->pr_nitems != 0) {
    722 			simple_unlock(&pp->pr_slock);
    723 			printf("pool_get: %s: curpage NULL, nitems %u\n",
    724 			    pp->pr_wchan, pp->pr_nitems);
    725 			panic("pool_get: nitems inconsistent");
    726 		}
    727 #endif
    728 
    729 		/*
    730 		 * Call the back-end page allocator for more memory.
    731 		 * Release the pool lock, as the back-end page allocator
    732 		 * may block.
    733 		 */
    734 		pr_leave(pp);
    735 		simple_unlock(&pp->pr_slock);
    736 		v = pool_allocator_alloc(pp, flags);
    737 		if (__predict_true(v != NULL))
    738 			ph = pool_alloc_item_header(pp, v, flags);
    739 
    740 		if (__predict_false(v == NULL || ph == NULL)) {
    741 			if (v != NULL)
    742 				pool_allocator_free(pp, v);
    743 
    744 			simple_lock(&pp->pr_slock);
    745 			pr_enter(pp, file, line);
    746 
    747 			/*
    748 			 * We were unable to allocate a page or item
    749 			 * header, but we released the lock during
    750 			 * allocation, so perhaps items were freed
    751 			 * back to the pool.  Check for this case.
    752 			 */
    753 			if (pp->pr_curpage != NULL)
    754 				goto startover;
    755 
    756 			if ((flags & PR_WAITOK) == 0) {
    757 				pp->pr_nfail++;
    758 				pr_leave(pp);
    759 				simple_unlock(&pp->pr_slock);
    760 				return (NULL);
    761 			}
    762 
    763 			/*
    764 			 * Wait for items to be returned to this pool.
    765 			 *
    766 			 * XXX: maybe we should wake up once a second and
    767 			 * try again?
    768 			 */
    769 			pp->pr_flags |= PR_WANTED;
    770 			/* PA_WANTED is already set on the allocator. */
    771 			pr_leave(pp);
    772 			ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
    773 			pr_enter(pp, file, line);
    774 			goto startover;
    775 		}
    776 
    777 		/* We have more memory; add it to the pool */
    778 		simple_lock(&pp->pr_slock);
    779 		pr_enter(pp, file, line);
    780 		pool_prime_page(pp, v, ph);
    781 		pp->pr_npagealloc++;
    782 
    783 		/* Start the allocation process over. */
    784 		goto startover;
    785 	}
    786 	if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) {
    787 		pr_leave(pp);
    788 		simple_unlock(&pp->pr_slock);
    789 		panic("pool_get: %s: page empty", pp->pr_wchan);
    790 	}
    791 #ifdef DIAGNOSTIC
    792 	if (__predict_false(pp->pr_nitems == 0)) {
    793 		pr_leave(pp);
    794 		simple_unlock(&pp->pr_slock);
    795 		printf("pool_get: %s: items on itemlist, nitems %u\n",
    796 		    pp->pr_wchan, pp->pr_nitems);
    797 		panic("pool_get: nitems inconsistent");
    798 	}
    799 #endif
    800 
    801 #ifdef POOL_DIAGNOSTIC
    802 	pr_log(pp, v, PRLOG_GET, file, line);
    803 #endif
    804 
    805 #ifdef DIAGNOSTIC
    806 	if (__predict_false(pi->pi_magic != PI_MAGIC)) {
    807 		pr_printlog(pp, pi, printf);
    808 		panic("pool_get(%s): free list modified: magic=%x; page %p;"
    809 		       " item addr %p\n",
    810 			pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
    811 	}
    812 #endif
    813 
    814 	/*
    815 	 * Remove from item list.
    816 	 */
    817 	TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
    818 	pp->pr_nitems--;
    819 	pp->pr_nout++;
    820 	if (ph->ph_nmissing == 0) {
    821 #ifdef DIAGNOSTIC
    822 		if (__predict_false(pp->pr_nidle == 0))
    823 			panic("pool_get: nidle inconsistent");
    824 #endif
    825 		pp->pr_nidle--;
    826 
    827 		/*
    828 		 * This page was previously empty.  Move it to the list of
    829 		 * partially-full pages.  This page is already curpage.
    830 		 */
    831 		LIST_REMOVE(ph, ph_pagelist);
    832 		LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
    833 	}
    834 	ph->ph_nmissing++;
    835 	if (TAILQ_EMPTY(&ph->ph_itemlist)) {
    836 #ifdef DIAGNOSTIC
    837 		if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) {
    838 			pr_leave(pp);
    839 			simple_unlock(&pp->pr_slock);
    840 			panic("pool_get: %s: nmissing inconsistent",
    841 			    pp->pr_wchan);
    842 		}
    843 #endif
    844 		/*
    845 		 * This page is now full.  Move it to the full list
    846 		 * and select a new current page.
    847 		 */
    848 		LIST_REMOVE(ph, ph_pagelist);
    849 		LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
    850 		pool_update_curpage(pp);
    851 	}
    852 
    853 	pp->pr_nget++;
    854 
    855 	/*
    856 	 * If we have a low water mark and we are now below that low
    857 	 * water mark, add more items to the pool.
    858 	 */
    859 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
    860 		/*
    861 		 * XXX: Should we log a warning?  Should we set up a timeout
    862 		 * to try again in a second or so?  The latter could break
    863 		 * a caller's assumptions about interrupt protection, etc.
    864 		 */
    865 	}
    866 
    867 	pr_leave(pp);
    868 	simple_unlock(&pp->pr_slock);
    869 	return (v);
    870 }
    871 
    872 /*
    873  * Internal version of pool_put().  Pool is already locked/entered.
    874  */
    875 static void
    876 pool_do_put(struct pool *pp, void *v)
    877 {
    878 	struct pool_item *pi = v;
    879 	struct pool_item_header *ph;
    880 	caddr_t page;
    881 	int s;
    882 
    883 	LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
    884 
    885 	page = (caddr_t)((u_long)v & pp->pr_alloc->pa_pagemask);
    886 
    887 #ifdef DIAGNOSTIC
    888 	if (__predict_false(pp->pr_nout == 0)) {
    889 		printf("pool %s: putting with none out\n",
    890 		    pp->pr_wchan);
    891 		panic("pool_put");
    892 	}
    893 #endif
    894 
    895 	if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {
    896 		pr_printlog(pp, NULL, printf);
    897 		panic("pool_put: %s: page header missing", pp->pr_wchan);
    898 	}
    899 
    900 #ifdef LOCKDEBUG
    901 	/*
    902 	 * Check if we're freeing a locked simple lock.
    903 	 */
    904 	simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
    905 #endif
    906 
    907 	/*
    908 	 * Return to item list.
    909 	 */
    910 #ifdef DIAGNOSTIC
    911 	pi->pi_magic = PI_MAGIC;
    912 #endif
    913 #ifdef DEBUG
    914 	{
    915 		int i, *ip = v;
    916 
    917 		for (i = 0; i < pp->pr_size / sizeof(int); i++) {
    918 			*ip++ = PI_MAGIC;
    919 		}
    920 	}
    921 #endif
    922 
    923 	TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
    924 	KDASSERT(ph->ph_nmissing != 0);
    925 	ph->ph_nmissing--;
    926 	pp->pr_nput++;
    927 	pp->pr_nitems++;
    928 	pp->pr_nout--;
    929 
    930 	/* Cancel "pool empty" condition if it exists */
    931 	if (pp->pr_curpage == NULL)
    932 		pp->pr_curpage = ph;
    933 
    934 	if (pp->pr_flags & PR_WANTED) {
    935 		pp->pr_flags &= ~PR_WANTED;
    936 		if (ph->ph_nmissing == 0)
    937 			pp->pr_nidle++;
    938 		wakeup((caddr_t)pp);
    939 		return;
    940 	}
    941 
    942 	/*
    943 	 * If this page is now empty, do one of two things:
    944 	 *
    945 	 *	(1) If we have more pages than the page high water mark,
    946 	 *	    or if we are flagged as immediately freeing back idle
    947 	 *	    pages, free the page back to the system.  ONLY CONSIDER
    948 	 *	    FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
    949 	 *	    CLAIM.
    950 	 *
    951 	 *	(2) Otherwise, move the page to the empty page list.
    952 	 *
    953 	 * Either way, select a new current page (so we use a partially-full
    954 	 * page if one is available).
    955 	 */
    956 	if (ph->ph_nmissing == 0) {
    957 		pp->pr_nidle++;
    958 		if (pp->pr_npages > pp->pr_minpages &&
    959 		    (pp->pr_npages > pp->pr_maxpages ||
    960 		     (pp->pr_roflags & PR_IMMEDRELEASE) != 0 ||
    961 		     (pp->pr_alloc->pa_flags & PA_WANT) != 0)) {
    962 			simple_unlock(&pp->pr_slock);
    963 			pr_rmpage(pp, ph, NULL);
    964 			simple_lock(&pp->pr_slock);
    965 		} else {
    966 			LIST_REMOVE(ph, ph_pagelist);
    967 			LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
    968 
    969 			/*
    970 			 * Update the timestamp on the page.  A page must
    971 			 * be idle for some period of time before it can
    972 			 * be reclaimed by the pagedaemon.  This minimizes
    973 			 * ping-pong'ing for memory.
    974 			 */
    975 			s = splclock();
    976 			ph->ph_time = mono_time;
    977 			splx(s);
    978 		}
    979 		pool_update_curpage(pp);
    980 	}
    981 
    982 	/*
    983 	 * If the page was previously completely full, move it to the
    984 	 * partially-full list and make it the current page.  The next
    985 	 * allocation will get the item from this page, instead of
    986 	 * further fragmenting the pool.
    987 	 */
    988 	else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
    989 		LIST_REMOVE(ph, ph_pagelist);
    990 		LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
    991 		pp->pr_curpage = ph;
    992 	}
    993 }
    994 
    995 /*
    996  * Return resource to the pool; must be called at appropriate spl level
    997  */
    998 #ifdef POOL_DIAGNOSTIC
    999 void
   1000 _pool_put(struct pool *pp, void *v, const char *file, long line)
   1001 {
   1002 
   1003 	simple_lock(&pp->pr_slock);
   1004 	pr_enter(pp, file, line);
   1005 
   1006 	pr_log(pp, v, PRLOG_PUT, file, line);
   1007 
   1008 	pool_do_put(pp, v);
   1009 
   1010 	pr_leave(pp);
   1011 	simple_unlock(&pp->pr_slock);
   1012 }
   1013 #undef pool_put
   1014 #endif /* POOL_DIAGNOSTIC */
   1015 
   1016 void
   1017 pool_put(struct pool *pp, void *v)
   1018 {
   1019 
   1020 	simple_lock(&pp->pr_slock);
   1021 
   1022 	pool_do_put(pp, v);
   1023 
   1024 	simple_unlock(&pp->pr_slock);
   1025 }
   1026 
   1027 #ifdef POOL_DIAGNOSTIC
   1028 #define		pool_put(h, v)	_pool_put((h), (v), __FILE__, __LINE__)
   1029 #endif
   1030 
   1031 /*
   1032  * Add N items to the pool.
   1033  */
   1034 int
   1035 pool_prime(struct pool *pp, int n)
   1036 {
   1037 	struct pool_item_header *ph = NULL;
   1038 	caddr_t cp;
   1039 	int newpages;
   1040 
   1041 	simple_lock(&pp->pr_slock);
   1042 
   1043 	newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1044 
   1045 	while (newpages-- > 0) {
   1046 		simple_unlock(&pp->pr_slock);
   1047 		cp = pool_allocator_alloc(pp, PR_NOWAIT);
   1048 		if (__predict_true(cp != NULL))
   1049 			ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
   1050 
   1051 		if (__predict_false(cp == NULL || ph == NULL)) {
   1052 			if (cp != NULL)
   1053 				pool_allocator_free(pp, cp);
   1054 			simple_lock(&pp->pr_slock);
   1055 			break;
   1056 		}
   1057 
   1058 		simple_lock(&pp->pr_slock);
   1059 		pool_prime_page(pp, cp, ph);
   1060 		pp->pr_npagealloc++;
   1061 		pp->pr_minpages++;
   1062 	}
   1063 
   1064 	if (pp->pr_minpages >= pp->pr_maxpages)
   1065 		pp->pr_maxpages = pp->pr_minpages + 1;	/* XXX */
   1066 
   1067 	simple_unlock(&pp->pr_slock);
   1068 	return (0);
   1069 }
   1070 
   1071 /*
   1072  * Add a page worth of items to the pool.
   1073  *
   1074  * Note, we must be called with the pool descriptor LOCKED.
   1075  */
   1076 static void
   1077 pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
   1078 {
   1079 	struct pool_item *pi;
   1080 	caddr_t cp = storage;
   1081 	unsigned int align = pp->pr_align;
   1082 	unsigned int ioff = pp->pr_itemoffset;
   1083 	int n;
   1084 	int s;
   1085 
   1086 	LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
   1087 
   1088 #ifdef DIAGNOSTIC
   1089 	if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
   1090 		panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
   1091 #endif
   1092 
   1093 	/*
   1094 	 * Insert page header.
   1095 	 */
   1096 	LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
   1097 	TAILQ_INIT(&ph->ph_itemlist);
   1098 	ph->ph_page = storage;
   1099 	ph->ph_nmissing = 0;
   1100 	s = splclock();
   1101 	ph->ph_time = mono_time;
   1102 	splx(s);
   1103 	if ((pp->pr_roflags & PR_PHINPAGE) == 0)
   1104 		SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
   1105 
   1106 	pp->pr_nidle++;
   1107 
   1108 	/*
   1109 	 * Color this page.
   1110 	 */
   1111 	cp = (caddr_t)(cp + pp->pr_curcolor);
   1112 	if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
   1113 		pp->pr_curcolor = 0;
   1114 
   1115 	/*
   1116 	 * Adjust storage to apply aligment to `pr_itemoffset' in each item.
   1117 	 */
   1118 	if (ioff != 0)
   1119 		cp = (caddr_t)(cp + (align - ioff));
   1120 
   1121 	/*
   1122 	 * Insert remaining chunks on the bucket list.
   1123 	 */
   1124 	n = pp->pr_itemsperpage;
   1125 	pp->pr_nitems += n;
   1126 
   1127 	while (n--) {
   1128 		pi = (struct pool_item *)cp;
   1129 
   1130 		KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
   1131 
   1132 		/* Insert on page list */
   1133 		TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
   1134 #ifdef DIAGNOSTIC
   1135 		pi->pi_magic = PI_MAGIC;
   1136 #endif
   1137 		cp = (caddr_t)(cp + pp->pr_size);
   1138 	}
   1139 
   1140 	/*
   1141 	 * If the pool was depleted, point at the new page.
   1142 	 */
   1143 	if (pp->pr_curpage == NULL)
   1144 		pp->pr_curpage = ph;
   1145 
   1146 	if (++pp->pr_npages > pp->pr_hiwat)
   1147 		pp->pr_hiwat = pp->pr_npages;
   1148 }
   1149 
   1150 /*
   1151  * Used by pool_get() when nitems drops below the low water mark.  This
   1152  * is used to catch up pr_nitems with the low water mark.
   1153  *
   1154  * Note 1, we never wait for memory here, we let the caller decide what to do.
   1155  *
   1156  * Note 2, we must be called with the pool already locked, and we return
   1157  * with it locked.
   1158  */
   1159 static int
   1160 pool_catchup(struct pool *pp)
   1161 {
   1162 	struct pool_item_header *ph = NULL;
   1163 	caddr_t cp;
   1164 	int error = 0;
   1165 
   1166 	while (POOL_NEEDS_CATCHUP(pp)) {
   1167 		/*
   1168 		 * Call the page back-end allocator for more memory.
   1169 		 *
   1170 		 * XXX: We never wait, so should we bother unlocking
   1171 		 * the pool descriptor?
   1172 		 */
   1173 		simple_unlock(&pp->pr_slock);
   1174 		cp = pool_allocator_alloc(pp, PR_NOWAIT);
   1175 		if (__predict_true(cp != NULL))
   1176 			ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
   1177 		if (__predict_false(cp == NULL || ph == NULL)) {
   1178 			if (cp != NULL)
   1179 				pool_allocator_free(pp, cp);
   1180 			error = ENOMEM;
   1181 			simple_lock(&pp->pr_slock);
   1182 			break;
   1183 		}
   1184 		simple_lock(&pp->pr_slock);
   1185 		pool_prime_page(pp, cp, ph);
   1186 		pp->pr_npagealloc++;
   1187 	}
   1188 
   1189 	return (error);
   1190 }
   1191 
   1192 static void
   1193 pool_update_curpage(struct pool *pp)
   1194 {
   1195 
   1196 	pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
   1197 	if (pp->pr_curpage == NULL) {
   1198 		pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
   1199 	}
   1200 }
   1201 
   1202 void
   1203 pool_setlowat(struct pool *pp, int n)
   1204 {
   1205 
   1206 	simple_lock(&pp->pr_slock);
   1207 
   1208 	pp->pr_minitems = n;
   1209 	pp->pr_minpages = (n == 0)
   1210 		? 0
   1211 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1212 
   1213 	/* Make sure we're caught up with the newly-set low water mark. */
   1214 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
   1215 		/*
   1216 		 * XXX: Should we log a warning?  Should we set up a timeout
   1217 		 * to try again in a second or so?  The latter could break
   1218 		 * a caller's assumptions about interrupt protection, etc.
   1219 		 */
   1220 	}
   1221 
   1222 	simple_unlock(&pp->pr_slock);
   1223 }
   1224 
   1225 void
   1226 pool_sethiwat(struct pool *pp, int n)
   1227 {
   1228 
   1229 	simple_lock(&pp->pr_slock);
   1230 
   1231 	pp->pr_maxpages = (n == 0)
   1232 		? 0
   1233 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1234 
   1235 	simple_unlock(&pp->pr_slock);
   1236 }
   1237 
   1238 void
   1239 pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
   1240 {
   1241 
   1242 	simple_lock(&pp->pr_slock);
   1243 
   1244 	pp->pr_hardlimit = n;
   1245 	pp->pr_hardlimit_warning = warnmess;
   1246 	pp->pr_hardlimit_ratecap.tv_sec = ratecap;
   1247 	pp->pr_hardlimit_warning_last.tv_sec = 0;
   1248 	pp->pr_hardlimit_warning_last.tv_usec = 0;
   1249 
   1250 	/*
   1251 	 * In-line version of pool_sethiwat(), because we don't want to
   1252 	 * release the lock.
   1253 	 */
   1254 	pp->pr_maxpages = (n == 0)
   1255 		? 0
   1256 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1257 
   1258 	simple_unlock(&pp->pr_slock);
   1259 }
   1260 
   1261 /*
   1262  * Release all complete pages that have not been used recently.
   1263  */
   1264 int
   1265 #ifdef POOL_DIAGNOSTIC
   1266 _pool_reclaim(struct pool *pp, const char *file, long line)
   1267 #else
   1268 pool_reclaim(struct pool *pp)
   1269 #endif
   1270 {
   1271 	struct pool_item_header *ph, *phnext;
   1272 	struct pool_cache *pc;
   1273 	struct timeval curtime;
   1274 	struct pool_pagelist pq;
   1275 	struct timeval diff;
   1276 	int s;
   1277 
   1278 	if (pp->pr_drain_hook != NULL) {
   1279 		/*
   1280 		 * The drain hook must be called with the pool unlocked.
   1281 		 */
   1282 		(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
   1283 	}
   1284 
   1285 	if (simple_lock_try(&pp->pr_slock) == 0)
   1286 		return (0);
   1287 	pr_enter(pp, file, line);
   1288 
   1289 	LIST_INIT(&pq);
   1290 
   1291 	/*
   1292 	 * Reclaim items from the pool's caches.
   1293 	 */
   1294 	TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist)
   1295 		pool_cache_reclaim(pc);
   1296 
   1297 	s = splclock();
   1298 	curtime = mono_time;
   1299 	splx(s);
   1300 
   1301 	for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
   1302 		phnext = LIST_NEXT(ph, ph_pagelist);
   1303 
   1304 		/* Check our minimum page claim */
   1305 		if (pp->pr_npages <= pp->pr_minpages)
   1306 			break;
   1307 
   1308 		KASSERT(ph->ph_nmissing == 0);
   1309 		timersub(&curtime, &ph->ph_time, &diff);
   1310 		if (diff.tv_sec < pool_inactive_time)
   1311 			continue;
   1312 
   1313 		/*
   1314 		 * If freeing this page would put us below
   1315 		 * the low water mark, stop now.
   1316 		 */
   1317 		if ((pp->pr_nitems - pp->pr_itemsperpage) <
   1318 		    pp->pr_minitems)
   1319 			break;
   1320 
   1321 		pr_rmpage(pp, ph, &pq);
   1322 	}
   1323 
   1324 	pr_leave(pp);
   1325 	simple_unlock(&pp->pr_slock);
   1326 	if (LIST_EMPTY(&pq))
   1327 		return (0);
   1328 
   1329 	while ((ph = LIST_FIRST(&pq)) != NULL) {
   1330 		LIST_REMOVE(ph, ph_pagelist);
   1331 		pool_allocator_free(pp, ph->ph_page);
   1332 		if (pp->pr_roflags & PR_PHINPAGE) {
   1333 			continue;
   1334 		}
   1335 		s = splvm();
   1336 		pool_put(&phpool, ph);
   1337 		splx(s);
   1338 	}
   1339 
   1340 	return (1);
   1341 }
   1342 
   1343 /*
   1344  * Drain pools, one at a time.
   1345  *
   1346  * Note, we must never be called from an interrupt context.
   1347  */
   1348 void
   1349 pool_drain(void *arg)
   1350 {
   1351 	struct pool *pp;
   1352 	int s;
   1353 
   1354 	pp = NULL;
   1355 	s = splvm();
   1356 	simple_lock(&pool_head_slock);
   1357 	if (drainpp == NULL) {
   1358 		drainpp = TAILQ_FIRST(&pool_head);
   1359 	}
   1360 	if (drainpp) {
   1361 		pp = drainpp;
   1362 		drainpp = TAILQ_NEXT(pp, pr_poollist);
   1363 	}
   1364 	simple_unlock(&pool_head_slock);
   1365 	pool_reclaim(pp);
   1366 	splx(s);
   1367 }
   1368 
   1369 /*
   1370  * Diagnostic helpers.
   1371  */
   1372 void
   1373 pool_print(struct pool *pp, const char *modif)
   1374 {
   1375 	int s;
   1376 
   1377 	s = splvm();
   1378 	if (simple_lock_try(&pp->pr_slock) == 0) {
   1379 		printf("pool %s is locked; try again later\n",
   1380 		    pp->pr_wchan);
   1381 		splx(s);
   1382 		return;
   1383 	}
   1384 	pool_print1(pp, modif, printf);
   1385 	simple_unlock(&pp->pr_slock);
   1386 	splx(s);
   1387 }
   1388 
   1389 void
   1390 pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
   1391 {
   1392 	int didlock = 0;
   1393 
   1394 	if (pp == NULL) {
   1395 		(*pr)("Must specify a pool to print.\n");
   1396 		return;
   1397 	}
   1398 
   1399 	/*
   1400 	 * Called from DDB; interrupts should be blocked, and all
   1401 	 * other processors should be paused.  We can skip locking
   1402 	 * the pool in this case.
   1403 	 *
   1404 	 * We do a simple_lock_try() just to print the lock
   1405 	 * status, however.
   1406 	 */
   1407 
   1408 	if (simple_lock_try(&pp->pr_slock) == 0)
   1409 		(*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
   1410 	else
   1411 		didlock = 1;
   1412 
   1413 	pool_print1(pp, modif, pr);
   1414 
   1415 	if (didlock)
   1416 		simple_unlock(&pp->pr_slock);
   1417 }
   1418 
   1419 static void
   1420 pool_print_pagelist(struct pool_pagelist *pl, void (*pr)(const char *, ...))
   1421 {
   1422 	struct pool_item_header *ph;
   1423 #ifdef DIAGNOSTIC
   1424 	struct pool_item *pi;
   1425 #endif
   1426 
   1427 	LIST_FOREACH(ph, pl, ph_pagelist) {
   1428 		(*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
   1429 		    ph->ph_page, ph->ph_nmissing,
   1430 		    (u_long)ph->ph_time.tv_sec,
   1431 		    (u_long)ph->ph_time.tv_usec);
   1432 #ifdef DIAGNOSTIC
   1433 		TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) {
   1434 			if (pi->pi_magic != PI_MAGIC) {
   1435 				(*pr)("\t\t\titem %p, magic 0x%x\n",
   1436 				    pi, pi->pi_magic);
   1437 			}
   1438 		}
   1439 #endif
   1440 	}
   1441 }
   1442 
   1443 static void
   1444 pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
   1445 {
   1446 	struct pool_item_header *ph;
   1447 	struct pool_cache *pc;
   1448 	struct pool_cache_group *pcg;
   1449 	int i, print_log = 0, print_pagelist = 0, print_cache = 0;
   1450 	char c;
   1451 
   1452 	while ((c = *modif++) != '\0') {
   1453 		if (c == 'l')
   1454 			print_log = 1;
   1455 		if (c == 'p')
   1456 			print_pagelist = 1;
   1457 		if (c == 'c')
   1458 			print_cache = 1;
   1459 	}
   1460 
   1461 	(*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
   1462 	    pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
   1463 	    pp->pr_roflags);
   1464 	(*pr)("\talloc %p\n", pp->pr_alloc);
   1465 	(*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
   1466 	    pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
   1467 	(*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
   1468 	    pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
   1469 
   1470 	(*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
   1471 	    pp->pr_nget, pp->pr_nfail, pp->pr_nput);
   1472 	(*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
   1473 	    pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
   1474 
   1475 	if (print_pagelist == 0)
   1476 		goto skip_pagelist;
   1477 
   1478 	if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
   1479 		(*pr)("\n\tempty page list:\n");
   1480 	pool_print_pagelist(&pp->pr_emptypages, pr);
   1481 	if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
   1482 		(*pr)("\n\tfull page list:\n");
   1483 	pool_print_pagelist(&pp->pr_fullpages, pr);
   1484 	if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
   1485 		(*pr)("\n\tpartial-page list:\n");
   1486 	pool_print_pagelist(&pp->pr_partpages, pr);
   1487 
   1488 	if (pp->pr_curpage == NULL)
   1489 		(*pr)("\tno current page\n");
   1490 	else
   1491 		(*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
   1492 
   1493  skip_pagelist:
   1494 	if (print_log == 0)
   1495 		goto skip_log;
   1496 
   1497 	(*pr)("\n");
   1498 	if ((pp->pr_roflags & PR_LOGGING) == 0)
   1499 		(*pr)("\tno log\n");
   1500 	else
   1501 		pr_printlog(pp, NULL, pr);
   1502 
   1503  skip_log:
   1504 	if (print_cache == 0)
   1505 		goto skip_cache;
   1506 
   1507 	TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) {
   1508 		(*pr)("\tcache %p: allocfrom %p freeto %p\n", pc,
   1509 		    pc->pc_allocfrom, pc->pc_freeto);
   1510 		(*pr)("\t    hits %lu misses %lu ngroups %lu nitems %lu\n",
   1511 		    pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
   1512 		TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
   1513 			(*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);
   1514 			for (i = 0; i < PCG_NOBJECTS; i++) {
   1515 				if (pcg->pcg_objects[i].pcgo_pa !=
   1516 				    POOL_PADDR_INVALID) {
   1517 					(*pr)("\t\t\t%p, 0x%llx\n",
   1518 					    pcg->pcg_objects[i].pcgo_va,
   1519 					    (unsigned long long)
   1520 					    pcg->pcg_objects[i].pcgo_pa);
   1521 				} else {
   1522 					(*pr)("\t\t\t%p\n",
   1523 					    pcg->pcg_objects[i].pcgo_va);
   1524 				}
   1525 			}
   1526 		}
   1527 	}
   1528 
   1529  skip_cache:
   1530 	pr_enter_check(pp, pr);
   1531 }
   1532 
   1533 static int
   1534 pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
   1535 {
   1536 	struct pool_item *pi;
   1537 	caddr_t page;
   1538 	int n;
   1539 
   1540 	page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask);
   1541 	if (page != ph->ph_page &&
   1542 	    (pp->pr_roflags & PR_PHINPAGE) != 0) {
   1543 		if (label != NULL)
   1544 			printf("%s: ", label);
   1545 		printf("pool(%p:%s): page inconsistency: page %p;"
   1546 		       " at page head addr %p (p %p)\n", pp,
   1547 			pp->pr_wchan, ph->ph_page,
   1548 			ph, page);
   1549 		return 1;
   1550 	}
   1551 
   1552 	for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
   1553 	     pi != NULL;
   1554 	     pi = TAILQ_NEXT(pi,pi_list), n++) {
   1555 
   1556 #ifdef DIAGNOSTIC
   1557 		if (pi->pi_magic != PI_MAGIC) {
   1558 			if (label != NULL)
   1559 				printf("%s: ", label);
   1560 			printf("pool(%s): free list modified: magic=%x;"
   1561 			       " page %p; item ordinal %d;"
   1562 			       " addr %p (p %p)\n",
   1563 				pp->pr_wchan, pi->pi_magic, ph->ph_page,
   1564 				n, pi, page);
   1565 			panic("pool");
   1566 		}
   1567 #endif
   1568 		page =
   1569 		    (caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask);
   1570 		if (page == ph->ph_page)
   1571 			continue;
   1572 
   1573 		if (label != NULL)
   1574 			printf("%s: ", label);
   1575 		printf("pool(%p:%s): page inconsistency: page %p;"
   1576 		       " item ordinal %d; addr %p (p %p)\n", pp,
   1577 			pp->pr_wchan, ph->ph_page,
   1578 			n, pi, page);
   1579 		return 1;
   1580 	}
   1581 	return 0;
   1582 }
   1583 
   1584 
   1585 int
   1586 pool_chk(struct pool *pp, const char *label)
   1587 {
   1588 	struct pool_item_header *ph;
   1589 	int r = 0;
   1590 
   1591 	simple_lock(&pp->pr_slock);
   1592 	LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
   1593 		r = pool_chk_page(pp, label, ph);
   1594 		if (r) {
   1595 			goto out;
   1596 		}
   1597 	}
   1598 	LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
   1599 		r = pool_chk_page(pp, label, ph);
   1600 		if (r) {
   1601 			goto out;
   1602 		}
   1603 	}
   1604 	LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
   1605 		r = pool_chk_page(pp, label, ph);
   1606 		if (r) {
   1607 			goto out;
   1608 		}
   1609 	}
   1610 
   1611 out:
   1612 	simple_unlock(&pp->pr_slock);
   1613 	return (r);
   1614 }
   1615 
   1616 /*
   1617  * pool_cache_init:
   1618  *
   1619  *	Initialize a pool cache.
   1620  *
   1621  *	NOTE: If the pool must be protected from interrupts, we expect
   1622  *	to be called at the appropriate interrupt priority level.
   1623  */
   1624 void
   1625 pool_cache_init(struct pool_cache *pc, struct pool *pp,
   1626     int (*ctor)(void *, void *, int),
   1627     void (*dtor)(void *, void *),
   1628     void *arg)
   1629 {
   1630 
   1631 	TAILQ_INIT(&pc->pc_grouplist);
   1632 	simple_lock_init(&pc->pc_slock);
   1633 
   1634 	pc->pc_allocfrom = NULL;
   1635 	pc->pc_freeto = NULL;
   1636 	pc->pc_pool = pp;
   1637 
   1638 	pc->pc_ctor = ctor;
   1639 	pc->pc_dtor = dtor;
   1640 	pc->pc_arg  = arg;
   1641 
   1642 	pc->pc_hits   = 0;
   1643 	pc->pc_misses = 0;
   1644 
   1645 	pc->pc_ngroups = 0;
   1646 
   1647 	pc->pc_nitems = 0;
   1648 
   1649 	simple_lock(&pp->pr_slock);
   1650 	TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist);
   1651 	simple_unlock(&pp->pr_slock);
   1652 }
   1653 
   1654 /*
   1655  * pool_cache_destroy:
   1656  *
   1657  *	Destroy a pool cache.
   1658  */
   1659 void
   1660 pool_cache_destroy(struct pool_cache *pc)
   1661 {
   1662 	struct pool *pp = pc->pc_pool;
   1663 
   1664 	/* First, invalidate the entire cache. */
   1665 	pool_cache_invalidate(pc);
   1666 
   1667 	/* ...and remove it from the pool's cache list. */
   1668 	simple_lock(&pp->pr_slock);
   1669 	TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist);
   1670 	simple_unlock(&pp->pr_slock);
   1671 }
   1672 
   1673 static __inline void *
   1674 pcg_get(struct pool_cache_group *pcg, paddr_t *pap)
   1675 {
   1676 	void *object;
   1677 	u_int idx;
   1678 
   1679 	KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
   1680 	KASSERT(pcg->pcg_avail != 0);
   1681 	idx = --pcg->pcg_avail;
   1682 
   1683 	KASSERT(pcg->pcg_objects[idx].pcgo_va != NULL);
   1684 	object = pcg->pcg_objects[idx].pcgo_va;
   1685 	if (pap != NULL)
   1686 		*pap = pcg->pcg_objects[idx].pcgo_pa;
   1687 	pcg->pcg_objects[idx].pcgo_va = NULL;
   1688 
   1689 	return (object);
   1690 }
   1691 
   1692 static __inline void
   1693 pcg_put(struct pool_cache_group *pcg, void *object, paddr_t pa)
   1694 {
   1695 	u_int idx;
   1696 
   1697 	KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
   1698 	idx = pcg->pcg_avail++;
   1699 
   1700 	KASSERT(pcg->pcg_objects[idx].pcgo_va == NULL);
   1701 	pcg->pcg_objects[idx].pcgo_va = object;
   1702 	pcg->pcg_objects[idx].pcgo_pa = pa;
   1703 }
   1704 
   1705 /*
   1706  * pool_cache_get{,_paddr}:
   1707  *
   1708  *	Get an object from a pool cache (optionally returning
   1709  *	the physical address of the object).
   1710  */
   1711 void *
   1712 pool_cache_get_paddr(struct pool_cache *pc, int flags, paddr_t *pap)
   1713 {
   1714 	struct pool_cache_group *pcg;
   1715 	void *object;
   1716 
   1717 #ifdef LOCKDEBUG
   1718 	if (flags & PR_WAITOK)
   1719 		simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)");
   1720 #endif
   1721 
   1722 	simple_lock(&pc->pc_slock);
   1723 
   1724 	if ((pcg = pc->pc_allocfrom) == NULL) {
   1725 		TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
   1726 			if (pcg->pcg_avail != 0) {
   1727 				pc->pc_allocfrom = pcg;
   1728 				goto have_group;
   1729 			}
   1730 		}
   1731 
   1732 		/*
   1733 		 * No groups with any available objects.  Allocate
   1734 		 * a new object, construct it, and return it to
   1735 		 * the caller.  We will allocate a group, if necessary,
   1736 		 * when the object is freed back to the cache.
   1737 		 */
   1738 		pc->pc_misses++;
   1739 		simple_unlock(&pc->pc_slock);
   1740 		object = pool_get(pc->pc_pool, flags);
   1741 		if (object != NULL && pc->pc_ctor != NULL) {
   1742 			if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
   1743 				pool_put(pc->pc_pool, object);
   1744 				return (NULL);
   1745 			}
   1746 		}
   1747 		if (object != NULL && pap != NULL) {
   1748 #ifdef POOL_VTOPHYS
   1749 			*pap = POOL_VTOPHYS(object);
   1750 #else
   1751 			*pap = POOL_PADDR_INVALID;
   1752 #endif
   1753 		}
   1754 		return (object);
   1755 	}
   1756 
   1757  have_group:
   1758 	pc->pc_hits++;
   1759 	pc->pc_nitems--;
   1760 	object = pcg_get(pcg, pap);
   1761 
   1762 	if (pcg->pcg_avail == 0)
   1763 		pc->pc_allocfrom = NULL;
   1764 
   1765 	simple_unlock(&pc->pc_slock);
   1766 
   1767 	return (object);
   1768 }
   1769 
   1770 /*
   1771  * pool_cache_put{,_paddr}:
   1772  *
   1773  *	Put an object back to the pool cache (optionally caching the
   1774  *	physical address of the object).
   1775  */
   1776 void
   1777 pool_cache_put_paddr(struct pool_cache *pc, void *object, paddr_t pa)
   1778 {
   1779 	struct pool_cache_group *pcg;
   1780 	int s;
   1781 
   1782 	simple_lock(&pc->pc_slock);
   1783 
   1784 	if ((pcg = pc->pc_freeto) == NULL) {
   1785 		TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
   1786 			if (pcg->pcg_avail != PCG_NOBJECTS) {
   1787 				pc->pc_freeto = pcg;
   1788 				goto have_group;
   1789 			}
   1790 		}
   1791 
   1792 		/*
   1793 		 * No empty groups to free the object to.  Attempt to
   1794 		 * allocate one.
   1795 		 */
   1796 		simple_unlock(&pc->pc_slock);
   1797 		s = splvm();
   1798 		pcg = pool_get(&pcgpool, PR_NOWAIT);
   1799 		splx(s);
   1800 		if (pcg != NULL) {
   1801 			memset(pcg, 0, sizeof(*pcg));
   1802 			simple_lock(&pc->pc_slock);
   1803 			pc->pc_ngroups++;
   1804 			TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list);
   1805 			if (pc->pc_freeto == NULL)
   1806 				pc->pc_freeto = pcg;
   1807 			goto have_group;
   1808 		}
   1809 
   1810 		/*
   1811 		 * Unable to allocate a cache group; destruct the object
   1812 		 * and free it back to the pool.
   1813 		 */
   1814 		pool_cache_destruct_object(pc, object);
   1815 		return;
   1816 	}
   1817 
   1818  have_group:
   1819 	pc->pc_nitems++;
   1820 	pcg_put(pcg, object, pa);
   1821 
   1822 	if (pcg->pcg_avail == PCG_NOBJECTS)
   1823 		pc->pc_freeto = NULL;
   1824 
   1825 	simple_unlock(&pc->pc_slock);
   1826 }
   1827 
   1828 /*
   1829  * pool_cache_destruct_object:
   1830  *
   1831  *	Force destruction of an object and its release back into
   1832  *	the pool.
   1833  */
   1834 void
   1835 pool_cache_destruct_object(struct pool_cache *pc, void *object)
   1836 {
   1837 
   1838 	if (pc->pc_dtor != NULL)
   1839 		(*pc->pc_dtor)(pc->pc_arg, object);
   1840 	pool_put(pc->pc_pool, object);
   1841 }
   1842 
   1843 /*
   1844  * pool_cache_do_invalidate:
   1845  *
   1846  *	This internal function implements pool_cache_invalidate() and
   1847  *	pool_cache_reclaim().
   1848  */
   1849 static void
   1850 pool_cache_do_invalidate(struct pool_cache *pc, int free_groups,
   1851     void (*putit)(struct pool *, void *))
   1852 {
   1853 	struct pool_cache_group *pcg, *npcg;
   1854 	void *object;
   1855 	int s;
   1856 
   1857 	for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
   1858 	     pcg = npcg) {
   1859 		npcg = TAILQ_NEXT(pcg, pcg_list);
   1860 		while (pcg->pcg_avail != 0) {
   1861 			pc->pc_nitems--;
   1862 			object = pcg_get(pcg, NULL);
   1863 			if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg)
   1864 				pc->pc_allocfrom = NULL;
   1865 			if (pc->pc_dtor != NULL)
   1866 				(*pc->pc_dtor)(pc->pc_arg, object);
   1867 			(*putit)(pc->pc_pool, object);
   1868 		}
   1869 		if (free_groups) {
   1870 			pc->pc_ngroups--;
   1871 			TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list);
   1872 			if (pc->pc_freeto == pcg)
   1873 				pc->pc_freeto = NULL;
   1874 			s = splvm();
   1875 			pool_put(&pcgpool, pcg);
   1876 			splx(s);
   1877 		}
   1878 	}
   1879 }
   1880 
   1881 /*
   1882  * pool_cache_invalidate:
   1883  *
   1884  *	Invalidate a pool cache (destruct and release all of the
   1885  *	cached objects).
   1886  */
   1887 void
   1888 pool_cache_invalidate(struct pool_cache *pc)
   1889 {
   1890 
   1891 	simple_lock(&pc->pc_slock);
   1892 	pool_cache_do_invalidate(pc, 0, pool_put);
   1893 	simple_unlock(&pc->pc_slock);
   1894 }
   1895 
   1896 /*
   1897  * pool_cache_reclaim:
   1898  *
   1899  *	Reclaim a pool cache for pool_reclaim().
   1900  */
   1901 static void
   1902 pool_cache_reclaim(struct pool_cache *pc)
   1903 {
   1904 
   1905 	simple_lock(&pc->pc_slock);
   1906 	pool_cache_do_invalidate(pc, 1, pool_do_put);
   1907 	simple_unlock(&pc->pc_slock);
   1908 }
   1909 
   1910 /*
   1911  * Pool backend allocators.
   1912  *
   1913  * Each pool has a backend allocator that handles allocation, deallocation,
   1914  * and any additional draining that might be needed.
   1915  *
   1916  * We provide two standard allocators:
   1917  *
   1918  *	pool_allocator_kmem - the default when no allocator is specified
   1919  *
   1920  *	pool_allocator_nointr - used for pools that will not be accessed
   1921  *	in interrupt context.
   1922  */
   1923 void	*pool_page_alloc(struct pool *, int);
   1924 void	pool_page_free(struct pool *, void *);
   1925 
   1926 struct pool_allocator pool_allocator_kmem = {
   1927 	pool_page_alloc, pool_page_free, 0,
   1928 };
   1929 
   1930 void	*pool_page_alloc_nointr(struct pool *, int);
   1931 void	pool_page_free_nointr(struct pool *, void *);
   1932 
   1933 struct pool_allocator pool_allocator_nointr = {
   1934 	pool_page_alloc_nointr, pool_page_free_nointr, 0,
   1935 };
   1936 
   1937 #ifdef POOL_SUBPAGE
   1938 void	*pool_subpage_alloc(struct pool *, int);
   1939 void	pool_subpage_free(struct pool *, void *);
   1940 
   1941 struct pool_allocator pool_allocator_kmem_subpage = {
   1942 	pool_subpage_alloc, pool_subpage_free, 0,
   1943 };
   1944 #endif /* POOL_SUBPAGE */
   1945 
   1946 /*
   1947  * We have at least three different resources for the same allocation and
   1948  * each resource can be depleted.  First, we have the ready elements in the
   1949  * pool.  Then we have the resource (typically a vm_map) for this allocator.
   1950  * Finally, we have physical memory.  Waiting for any of these can be
   1951  * unnecessary when any other is freed, but the kernel doesn't support
   1952  * sleeping on multiple wait channels, so we have to employ another strategy.
   1953  *
   1954  * The caller sleeps on the pool (so that it can be awakened when an item
   1955  * is returned to the pool), but we set PA_WANT on the allocator.  When a
   1956  * page is returned to the allocator and PA_WANT is set, pool_allocator_free
   1957  * will wake up all sleeping pools belonging to this allocator.
   1958  *
   1959  * XXX Thundering herd.
   1960  */
   1961 void *
   1962 pool_allocator_alloc(struct pool *org, int flags)
   1963 {
   1964 	struct pool_allocator *pa = org->pr_alloc;
   1965 	struct pool *pp, *start;
   1966 	int s, freed;
   1967 	void *res;
   1968 
   1969 	LOCK_ASSERT(!simple_lock_held(&org->pr_slock));
   1970 
   1971 	do {
   1972 		if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
   1973 			return (res);
   1974 		if ((flags & PR_WAITOK) == 0) {
   1975 			/*
   1976 			 * We only run the drain hookhere if PR_NOWAIT.
   1977 			 * In other cases, the hook will be run in
   1978 			 * pool_reclaim().
   1979 			 */
   1980 			if (org->pr_drain_hook != NULL) {
   1981 				(*org->pr_drain_hook)(org->pr_drain_hook_arg,
   1982 				    flags);
   1983 				if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
   1984 					return (res);
   1985 			}
   1986 			break;
   1987 		}
   1988 
   1989 		/*
   1990 		 * Drain all pools, except "org", that use this
   1991 		 * allocator.  We do this to reclaim VA space.
   1992 		 * pa_alloc is responsible for waiting for
   1993 		 * physical memory.
   1994 		 *
   1995 		 * XXX We risk looping forever if start if someone
   1996 		 * calls pool_destroy on "start".  But there is no
   1997 		 * other way to have potentially sleeping pool_reclaim,
   1998 		 * non-sleeping locks on pool_allocator, and some
   1999 		 * stirring of drained pools in the allocator.
   2000 		 *
   2001 		 * XXX Maybe we should use pool_head_slock for locking
   2002 		 * the allocators?
   2003 		 */
   2004 		freed = 0;
   2005 
   2006 		s = splvm();
   2007 		simple_lock(&pa->pa_slock);
   2008 		pp = start = TAILQ_FIRST(&pa->pa_list);
   2009 		do {
   2010 			TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list);
   2011 			TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list);
   2012 			if (pp == org)
   2013 				continue;
   2014 			simple_unlock(&pa->pa_slock);
   2015 			freed = pool_reclaim(pp);
   2016 			simple_lock(&pa->pa_slock);
   2017 		} while ((pp = TAILQ_FIRST(&pa->pa_list)) != start &&
   2018 			 freed == 0);
   2019 
   2020 		if (freed == 0) {
   2021 			/*
   2022 			 * We set PA_WANT here, the caller will most likely
   2023 			 * sleep waiting for pages (if not, this won't hurt
   2024 			 * that much), and there is no way to set this in
   2025 			 * the caller without violating locking order.
   2026 			 */
   2027 			pa->pa_flags |= PA_WANT;
   2028 		}
   2029 		simple_unlock(&pa->pa_slock);
   2030 		splx(s);
   2031 	} while (freed);
   2032 	return (NULL);
   2033 }
   2034 
   2035 void
   2036 pool_allocator_free(struct pool *pp, void *v)
   2037 {
   2038 	struct pool_allocator *pa = pp->pr_alloc;
   2039 	int s;
   2040 
   2041 	LOCK_ASSERT(!simple_lock_held(&pp->pr_slock));
   2042 
   2043 	(*pa->pa_free)(pp, v);
   2044 
   2045 	s = splvm();
   2046 	simple_lock(&pa->pa_slock);
   2047 	if ((pa->pa_flags & PA_WANT) == 0) {
   2048 		simple_unlock(&pa->pa_slock);
   2049 		splx(s);
   2050 		return;
   2051 	}
   2052 
   2053 	TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
   2054 		simple_lock(&pp->pr_slock);
   2055 		if ((pp->pr_flags & PR_WANTED) != 0) {
   2056 			pp->pr_flags &= ~PR_WANTED;
   2057 			wakeup(pp);
   2058 		}
   2059 		simple_unlock(&pp->pr_slock);
   2060 	}
   2061 	pa->pa_flags &= ~PA_WANT;
   2062 	simple_unlock(&pa->pa_slock);
   2063 	splx(s);
   2064 }
   2065 
   2066 void *
   2067 pool_page_alloc(struct pool *pp, int flags)
   2068 {
   2069 	boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
   2070 
   2071 	return ((void *) uvm_km_alloc_poolpage(waitok));
   2072 }
   2073 
   2074 void
   2075 pool_page_free(struct pool *pp, void *v)
   2076 {
   2077 
   2078 	uvm_km_free_poolpage((vaddr_t) v);
   2079 }
   2080 
   2081 #ifdef POOL_SUBPAGE
   2082 /* Sub-page allocator, for machines with large hardware pages. */
   2083 void *
   2084 pool_subpage_alloc(struct pool *pp, int flags)
   2085 {
   2086 
   2087 	return (pool_get(&psppool, flags));
   2088 }
   2089 
   2090 void
   2091 pool_subpage_free(struct pool *pp, void *v)
   2092 {
   2093 
   2094 	pool_put(&psppool, v);
   2095 }
   2096 
   2097 /* We don't provide a real nointr allocator.  Maybe later. */
   2098 void *
   2099 pool_page_alloc_nointr(struct pool *pp, int flags)
   2100 {
   2101 
   2102 	return (pool_subpage_alloc(pp, flags));
   2103 }
   2104 
   2105 void
   2106 pool_page_free_nointr(struct pool *pp, void *v)
   2107 {
   2108 
   2109 	pool_subpage_free(pp, v);
   2110 }
   2111 #else
   2112 void *
   2113 pool_page_alloc_nointr(struct pool *pp, int flags)
   2114 {
   2115 	boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
   2116 
   2117 	return ((void *) uvm_km_alloc_poolpage1(kernel_map,
   2118 	    uvm.kernel_object, waitok));
   2119 }
   2120 
   2121 void
   2122 pool_page_free_nointr(struct pool *pp, void *v)
   2123 {
   2124 
   2125 	uvm_km_free_poolpage1(kernel_map, (vaddr_t) v);
   2126 }
   2127 #endif /* POOL_SUBPAGE */
   2128