Home | History | Annotate | Line # | Download | only in kern
subr_pool.c revision 1.72
      1 /*	$NetBSD: subr_pool.c,v 1.72 2002/03/09 01:56:27 thorpej Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
      9  * Simulation Facility, NASA Ames Research Center.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. All advertising materials mentioning features or use of this software
     20  *    must display the following acknowledgement:
     21  *	This product includes software developed by the NetBSD
     22  *	Foundation, Inc. and its contributors.
     23  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24  *    contributors may be used to endorse or promote products derived
     25  *    from this software without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37  * POSSIBILITY OF SUCH DAMAGE.
     38  */
     39 
     40 #include <sys/cdefs.h>
     41 __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.72 2002/03/09 01:56:27 thorpej Exp $");
     42 
     43 #include "opt_pool.h"
     44 #include "opt_poollog.h"
     45 #include "opt_lockdebug.h"
     46 
     47 #include <sys/param.h>
     48 #include <sys/systm.h>
     49 #include <sys/proc.h>
     50 #include <sys/errno.h>
     51 #include <sys/kernel.h>
     52 #include <sys/malloc.h>
     53 #include <sys/lock.h>
     54 #include <sys/pool.h>
     55 #include <sys/syslog.h>
     56 
     57 #include <uvm/uvm.h>
     58 
     59 /*
     60  * Pool resource management utility.
     61  *
     62  * Memory is allocated in pages which are split into pieces according
     63  * to the pool item size. Each page is kept on a list headed by `pr_pagelist'
     64  * in the pool structure and the individual pool items are on a linked list
     65  * headed by `ph_itemlist' in each page header. The memory for building
     66  * the page list is either taken from the allocated pages themselves (for
     67  * small pool items) or taken from an internal pool of page headers (`phpool').
     68  */
     69 
     70 /* List of all pools */
     71 TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
     72 
     73 /* Private pool for page header structures */
     74 static struct pool phpool;
     75 
     76 #ifdef POOL_SUBPAGE
     77 /* Pool of subpages for use by normal pools. */
     78 static struct pool psppool;
     79 #endif
     80 
     81 /* # of seconds to retain page after last use */
     82 int pool_inactive_time = 10;
     83 
     84 /* Next candidate for drainage (see pool_drain()) */
     85 static struct pool	*drainpp;
     86 
     87 /* This spin lock protects both pool_head and drainpp. */
     88 struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
     89 
     90 struct pool_item_header {
     91 	/* Page headers */
     92 	TAILQ_ENTRY(pool_item_header)
     93 				ph_pagelist;	/* pool page list */
     94 	TAILQ_HEAD(,pool_item)	ph_itemlist;	/* chunk list for this page */
     95 	LIST_ENTRY(pool_item_header)
     96 				ph_hashlist;	/* Off-page page headers */
     97 	int			ph_nmissing;	/* # of chunks in use */
     98 	caddr_t			ph_page;	/* this page's address */
     99 	struct timeval		ph_time;	/* last referenced */
    100 };
    101 TAILQ_HEAD(pool_pagelist,pool_item_header);
    102 
    103 struct pool_item {
    104 #ifdef DIAGNOSTIC
    105 	int pi_magic;
    106 #endif
    107 #define	PI_MAGIC 0xdeadbeef
    108 	/* Other entries use only this list entry */
    109 	TAILQ_ENTRY(pool_item)	pi_list;
    110 };
    111 
    112 #define	PR_HASH_INDEX(pp,addr) \
    113 	(((u_long)(addr) >> (pp)->pr_alloc->pa_pageshift) & \
    114 	 (PR_HASHTABSIZE - 1))
    115 
    116 #define	POOL_NEEDS_CATCHUP(pp)						\
    117 	((pp)->pr_nitems < (pp)->pr_minitems)
    118 
    119 /*
    120  * Pool cache management.
    121  *
    122  * Pool caches provide a way for constructed objects to be cached by the
    123  * pool subsystem.  This can lead to performance improvements by avoiding
    124  * needless object construction/destruction; it is deferred until absolutely
    125  * necessary.
    126  *
    127  * Caches are grouped into cache groups.  Each cache group references
    128  * up to 16 constructed objects.  When a cache allocates an object
    129  * from the pool, it calls the object's constructor and places it into
    130  * a cache group.  When a cache group frees an object back to the pool,
    131  * it first calls the object's destructor.  This allows the object to
    132  * persist in constructed form while freed to the cache.
    133  *
    134  * Multiple caches may exist for each pool.  This allows a single
    135  * object type to have multiple constructed forms.  The pool references
    136  * each cache, so that when a pool is drained by the pagedaemon, it can
    137  * drain each individual cache as well.  Each time a cache is drained,
    138  * the most idle cache group is freed to the pool in its entirety.
    139  *
    140  * Pool caches are layed on top of pools.  By layering them, we can avoid
    141  * the complexity of cache management for pools which would not benefit
    142  * from it.
    143  */
    144 
    145 /* The cache group pool. */
    146 static struct pool pcgpool;
    147 
    148 /* The pool cache group. */
    149 #define	PCG_NOBJECTS		16
    150 struct pool_cache_group {
    151 	TAILQ_ENTRY(pool_cache_group)
    152 		pcg_list;	/* link in the pool cache's group list */
    153 	u_int	pcg_avail;	/* # available objects */
    154 				/* pointers to the objects */
    155 	void	*pcg_objects[PCG_NOBJECTS];
    156 };
    157 
    158 static void	pool_cache_reclaim(struct pool_cache *);
    159 
    160 static int	pool_catchup(struct pool *);
    161 static void	pool_prime_page(struct pool *, caddr_t,
    162 		    struct pool_item_header *);
    163 
    164 void		*pool_allocator_alloc(struct pool *, int);
    165 void		pool_allocator_free(struct pool *, void *);
    166 
    167 static void pool_print1(struct pool *, const char *,
    168 	void (*)(const char *, ...));
    169 
    170 /*
    171  * Pool log entry. An array of these is allocated in pool_init().
    172  */
    173 struct pool_log {
    174 	const char	*pl_file;
    175 	long		pl_line;
    176 	int		pl_action;
    177 #define	PRLOG_GET	1
    178 #define	PRLOG_PUT	2
    179 	void		*pl_addr;
    180 };
    181 
    182 /* Number of entries in pool log buffers */
    183 #ifndef POOL_LOGSIZE
    184 #define	POOL_LOGSIZE	10
    185 #endif
    186 
    187 int pool_logsize = POOL_LOGSIZE;
    188 
    189 #ifdef POOL_DIAGNOSTIC
    190 static __inline void
    191 pr_log(struct pool *pp, void *v, int action, const char *file, long line)
    192 {
    193 	int n = pp->pr_curlogentry;
    194 	struct pool_log *pl;
    195 
    196 	if ((pp->pr_roflags & PR_LOGGING) == 0)
    197 		return;
    198 
    199 	/*
    200 	 * Fill in the current entry. Wrap around and overwrite
    201 	 * the oldest entry if necessary.
    202 	 */
    203 	pl = &pp->pr_log[n];
    204 	pl->pl_file = file;
    205 	pl->pl_line = line;
    206 	pl->pl_action = action;
    207 	pl->pl_addr = v;
    208 	if (++n >= pp->pr_logsize)
    209 		n = 0;
    210 	pp->pr_curlogentry = n;
    211 }
    212 
    213 static void
    214 pr_printlog(struct pool *pp, struct pool_item *pi,
    215     void (*pr)(const char *, ...))
    216 {
    217 	int i = pp->pr_logsize;
    218 	int n = pp->pr_curlogentry;
    219 
    220 	if ((pp->pr_roflags & PR_LOGGING) == 0)
    221 		return;
    222 
    223 	/*
    224 	 * Print all entries in this pool's log.
    225 	 */
    226 	while (i-- > 0) {
    227 		struct pool_log *pl = &pp->pr_log[n];
    228 		if (pl->pl_action != 0) {
    229 			if (pi == NULL || pi == pl->pl_addr) {
    230 				(*pr)("\tlog entry %d:\n", i);
    231 				(*pr)("\t\taction = %s, addr = %p\n",
    232 				    pl->pl_action == PRLOG_GET ? "get" : "put",
    233 				    pl->pl_addr);
    234 				(*pr)("\t\tfile: %s at line %lu\n",
    235 				    pl->pl_file, pl->pl_line);
    236 			}
    237 		}
    238 		if (++n >= pp->pr_logsize)
    239 			n = 0;
    240 	}
    241 }
    242 
    243 static __inline void
    244 pr_enter(struct pool *pp, const char *file, long line)
    245 {
    246 
    247 	if (__predict_false(pp->pr_entered_file != NULL)) {
    248 		printf("pool %s: reentrancy at file %s line %ld\n",
    249 		    pp->pr_wchan, file, line);
    250 		printf("         previous entry at file %s line %ld\n",
    251 		    pp->pr_entered_file, pp->pr_entered_line);
    252 		panic("pr_enter");
    253 	}
    254 
    255 	pp->pr_entered_file = file;
    256 	pp->pr_entered_line = line;
    257 }
    258 
    259 static __inline void
    260 pr_leave(struct pool *pp)
    261 {
    262 
    263 	if (__predict_false(pp->pr_entered_file == NULL)) {
    264 		printf("pool %s not entered?\n", pp->pr_wchan);
    265 		panic("pr_leave");
    266 	}
    267 
    268 	pp->pr_entered_file = NULL;
    269 	pp->pr_entered_line = 0;
    270 }
    271 
    272 static __inline void
    273 pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
    274 {
    275 
    276 	if (pp->pr_entered_file != NULL)
    277 		(*pr)("\n\tcurrently entered from file %s line %ld\n",
    278 		    pp->pr_entered_file, pp->pr_entered_line);
    279 }
    280 #else
    281 #define	pr_log(pp, v, action, file, line)
    282 #define	pr_printlog(pp, pi, pr)
    283 #define	pr_enter(pp, file, line)
    284 #define	pr_leave(pp)
    285 #define	pr_enter_check(pp, pr)
    286 #endif /* POOL_DIAGNOSTIC */
    287 
    288 /*
    289  * Return the pool page header based on page address.
    290  */
    291 static __inline struct pool_item_header *
    292 pr_find_pagehead(struct pool *pp, caddr_t page)
    293 {
    294 	struct pool_item_header *ph;
    295 
    296 	if ((pp->pr_roflags & PR_PHINPAGE) != 0)
    297 		return ((struct pool_item_header *)(page + pp->pr_phoffset));
    298 
    299 	for (ph = LIST_FIRST(&pp->pr_hashtab[PR_HASH_INDEX(pp, page)]);
    300 	     ph != NULL;
    301 	     ph = LIST_NEXT(ph, ph_hashlist)) {
    302 		if (ph->ph_page == page)
    303 			return (ph);
    304 	}
    305 	return (NULL);
    306 }
    307 
    308 /*
    309  * Remove a page from the pool.
    310  */
    311 static __inline void
    312 pr_rmpage(struct pool *pp, struct pool_item_header *ph,
    313      struct pool_pagelist *pq)
    314 {
    315 	int s;
    316 
    317 	/*
    318 	 * If the page was idle, decrement the idle page count.
    319 	 */
    320 	if (ph->ph_nmissing == 0) {
    321 #ifdef DIAGNOSTIC
    322 		if (pp->pr_nidle == 0)
    323 			panic("pr_rmpage: nidle inconsistent");
    324 		if (pp->pr_nitems < pp->pr_itemsperpage)
    325 			panic("pr_rmpage: nitems inconsistent");
    326 #endif
    327 		pp->pr_nidle--;
    328 	}
    329 
    330 	pp->pr_nitems -= pp->pr_itemsperpage;
    331 
    332 	/*
    333 	 * Unlink a page from the pool and release it (or queue it for release).
    334 	 */
    335 	TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
    336 	if (pq) {
    337 		TAILQ_INSERT_HEAD(pq, ph, ph_pagelist);
    338 	} else {
    339 		pool_allocator_free(pp, ph->ph_page);
    340 		if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
    341 			LIST_REMOVE(ph, ph_hashlist);
    342 			s = splhigh();
    343 			pool_put(&phpool, ph);
    344 			splx(s);
    345 		}
    346 	}
    347 	pp->pr_npages--;
    348 	pp->pr_npagefree++;
    349 
    350 	if (pp->pr_curpage == ph) {
    351 		/*
    352 		 * Find a new non-empty page header, if any.
    353 		 * Start search from the page head, to increase the
    354 		 * chance for "high water" pages to be freed.
    355 		 */
    356 		TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
    357 			if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
    358 				break;
    359 
    360 		pp->pr_curpage = ph;
    361 	}
    362 }
    363 
    364 /*
    365  * Initialize the given pool resource structure.
    366  *
    367  * We export this routine to allow other kernel parts to declare
    368  * static pools that must be initialized before malloc() is available.
    369  */
    370 void
    371 pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
    372     const char *wchan, struct pool_allocator *palloc)
    373 {
    374 	int off, slack, i;
    375 
    376 #ifdef POOL_DIAGNOSTIC
    377 	/*
    378 	 * Always log if POOL_DIAGNOSTIC is defined.
    379 	 */
    380 	if (pool_logsize != 0)
    381 		flags |= PR_LOGGING;
    382 #endif
    383 
    384 #ifdef POOL_SUBPAGE
    385 	/*
    386 	 * XXX We don't provide a real `nointr' back-end
    387 	 * yet; all sub-pages come from a kmem back-end.
    388 	 * maybe some day...
    389 	 */
    390 	if (palloc == NULL) {
    391 		extern struct pool_allocator pool_allocator_kmem_subpage;
    392 		palloc = &pool_allocator_kmem_subpage;
    393 	}
    394 	/*
    395 	 * We'll assume any user-specified back-end allocator
    396 	 * will deal with sub-pages, or simply don't care.
    397 	 */
    398 #else
    399 	if (palloc == NULL)
    400 		palloc = &pool_allocator_kmem;
    401 #endif /* POOL_SUBPAGE */
    402 	if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
    403 		if (palloc->pa_pagesz == 0) {
    404 #ifdef POOL_SUBPAGE
    405 			if (palloc == &pool_allocator_kmem)
    406 				palloc->pa_pagesz = PAGE_SIZE;
    407 			else
    408 				palloc->pa_pagesz = POOL_SUBPAGE;
    409 #else
    410 			palloc->pa_pagesz = PAGE_SIZE;
    411 #endif /* POOL_SUBPAGE */
    412 		}
    413 
    414 		TAILQ_INIT(&palloc->pa_list);
    415 
    416 		simple_lock_init(&palloc->pa_slock);
    417 		palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
    418 		palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
    419 		palloc->pa_flags |= PA_INITIALIZED;
    420 	}
    421 
    422 	if (align == 0)
    423 		align = ALIGN(1);
    424 
    425 	if (size < sizeof(struct pool_item))
    426 		size = sizeof(struct pool_item);
    427 
    428 	size = ALIGN(size);
    429 #ifdef DIAGNOSTIC
    430 	if (size > palloc->pa_pagesz)
    431 		panic("pool_init: pool item size (%lu) too large",
    432 		      (u_long)size);
    433 #endif
    434 
    435 	/*
    436 	 * Initialize the pool structure.
    437 	 */
    438 	TAILQ_INIT(&pp->pr_pagelist);
    439 	TAILQ_INIT(&pp->pr_cachelist);
    440 	pp->pr_curpage = NULL;
    441 	pp->pr_npages = 0;
    442 	pp->pr_minitems = 0;
    443 	pp->pr_minpages = 0;
    444 	pp->pr_maxpages = UINT_MAX;
    445 	pp->pr_roflags = flags;
    446 	pp->pr_flags = 0;
    447 	pp->pr_size = size;
    448 	pp->pr_align = align;
    449 	pp->pr_wchan = wchan;
    450 	pp->pr_alloc = palloc;
    451 	pp->pr_nitems = 0;
    452 	pp->pr_nout = 0;
    453 	pp->pr_hardlimit = UINT_MAX;
    454 	pp->pr_hardlimit_warning = NULL;
    455 	pp->pr_hardlimit_ratecap.tv_sec = 0;
    456 	pp->pr_hardlimit_ratecap.tv_usec = 0;
    457 	pp->pr_hardlimit_warning_last.tv_sec = 0;
    458 	pp->pr_hardlimit_warning_last.tv_usec = 0;
    459 	pp->pr_drain_hook = NULL;
    460 	pp->pr_drain_hook_arg = NULL;
    461 
    462 	/*
    463 	 * Decide whether to put the page header off page to avoid
    464 	 * wasting too large a part of the page. Off-page page headers
    465 	 * go on a hash table, so we can match a returned item
    466 	 * with its header based on the page address.
    467 	 * We use 1/16 of the page size as the threshold (XXX: tune)
    468 	 */
    469 	if (pp->pr_size < palloc->pa_pagesz/16) {
    470 		/* Use the end of the page for the page header */
    471 		pp->pr_roflags |= PR_PHINPAGE;
    472 		pp->pr_phoffset = off = palloc->pa_pagesz -
    473 		    ALIGN(sizeof(struct pool_item_header));
    474 	} else {
    475 		/* The page header will be taken from our page header pool */
    476 		pp->pr_phoffset = 0;
    477 		off = palloc->pa_pagesz;
    478 		for (i = 0; i < PR_HASHTABSIZE; i++) {
    479 			LIST_INIT(&pp->pr_hashtab[i]);
    480 		}
    481 	}
    482 
    483 	/*
    484 	 * Alignment is to take place at `ioff' within the item. This means
    485 	 * we must reserve up to `align - 1' bytes on the page to allow
    486 	 * appropriate positioning of each item.
    487 	 *
    488 	 * Silently enforce `0 <= ioff < align'.
    489 	 */
    490 	pp->pr_itemoffset = ioff = ioff % align;
    491 	pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
    492 	KASSERT(pp->pr_itemsperpage != 0);
    493 
    494 	/*
    495 	 * Use the slack between the chunks and the page header
    496 	 * for "cache coloring".
    497 	 */
    498 	slack = off - pp->pr_itemsperpage * pp->pr_size;
    499 	pp->pr_maxcolor = (slack / align) * align;
    500 	pp->pr_curcolor = 0;
    501 
    502 	pp->pr_nget = 0;
    503 	pp->pr_nfail = 0;
    504 	pp->pr_nput = 0;
    505 	pp->pr_npagealloc = 0;
    506 	pp->pr_npagefree = 0;
    507 	pp->pr_hiwat = 0;
    508 	pp->pr_nidle = 0;
    509 
    510 #ifdef POOL_DIAGNOSTIC
    511 	if (flags & PR_LOGGING) {
    512 		if (kmem_map == NULL ||
    513 		    (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
    514 		     M_TEMP, M_NOWAIT)) == NULL)
    515 			pp->pr_roflags &= ~PR_LOGGING;
    516 		pp->pr_curlogentry = 0;
    517 		pp->pr_logsize = pool_logsize;
    518 	}
    519 #endif
    520 
    521 	pp->pr_entered_file = NULL;
    522 	pp->pr_entered_line = 0;
    523 
    524 	simple_lock_init(&pp->pr_slock);
    525 
    526 	/*
    527 	 * Initialize private page header pool and cache magazine pool if we
    528 	 * haven't done so yet.
    529 	 * XXX LOCKING.
    530 	 */
    531 	if (phpool.pr_size == 0) {
    532 #ifdef POOL_SUBPAGE
    533 		pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, 0,
    534 		    "phpool", &pool_allocator_kmem);
    535 		pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
    536 		    PR_RECURSIVE, "psppool", &pool_allocator_kmem);
    537 #else
    538 		pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,
    539 		    0, "phpool", NULL);
    540 #endif
    541 		pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
    542 		    0, "pcgpool", NULL);
    543 	}
    544 
    545 	/* Insert into the list of all pools. */
    546 	simple_lock(&pool_head_slock);
    547 	TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
    548 	simple_unlock(&pool_head_slock);
    549 
    550 	/* Insert this into the list of pools using this allocator. */
    551 	simple_lock(&palloc->pa_slock);
    552 	TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
    553 	simple_unlock(&palloc->pa_slock);
    554 }
    555 
    556 /*
    557  * De-commision a pool resource.
    558  */
    559 void
    560 pool_destroy(struct pool *pp)
    561 {
    562 	struct pool_item_header *ph;
    563 	struct pool_cache *pc;
    564 
    565 	/* Locking order: pool_allocator -> pool */
    566 	simple_lock(&pp->pr_alloc->pa_slock);
    567 	TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
    568 	simple_unlock(&pp->pr_alloc->pa_slock);
    569 
    570 	/* Destroy all caches for this pool. */
    571 	while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL)
    572 		pool_cache_destroy(pc);
    573 
    574 #ifdef DIAGNOSTIC
    575 	if (pp->pr_nout != 0) {
    576 		pr_printlog(pp, NULL, printf);
    577 		panic("pool_destroy: pool busy: still out: %u\n",
    578 		    pp->pr_nout);
    579 	}
    580 #endif
    581 
    582 	/* Remove all pages */
    583 	while ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL)
    584 		pr_rmpage(pp, ph, NULL);
    585 
    586 	/* Remove from global pool list */
    587 	simple_lock(&pool_head_slock);
    588 	TAILQ_REMOVE(&pool_head, pp, pr_poollist);
    589 	if (drainpp == pp) {
    590 		drainpp = NULL;
    591 	}
    592 	simple_unlock(&pool_head_slock);
    593 
    594 #ifdef POOL_DIAGNOSTIC
    595 	if ((pp->pr_roflags & PR_LOGGING) != 0)
    596 		free(pp->pr_log, M_TEMP);
    597 #endif
    598 }
    599 
    600 void
    601 pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
    602 {
    603 
    604 	/* XXX no locking -- must be used just after pool_init() */
    605 #ifdef DIAGNOSTIC
    606 	if (pp->pr_drain_hook != NULL)
    607 		panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
    608 #endif
    609 	pp->pr_drain_hook = fn;
    610 	pp->pr_drain_hook_arg = arg;
    611 }
    612 
    613 static __inline struct pool_item_header *
    614 pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags)
    615 {
    616 	struct pool_item_header *ph;
    617 	int s;
    618 
    619 	LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0);
    620 
    621 	if ((pp->pr_roflags & PR_PHINPAGE) != 0)
    622 		ph = (struct pool_item_header *) (storage + pp->pr_phoffset);
    623 	else {
    624 		s = splhigh();
    625 		ph = pool_get(&phpool, flags);
    626 		splx(s);
    627 	}
    628 
    629 	return (ph);
    630 }
    631 
    632 /*
    633  * Grab an item from the pool; must be called at appropriate spl level
    634  */
    635 void *
    636 #ifdef POOL_DIAGNOSTIC
    637 _pool_get(struct pool *pp, int flags, const char *file, long line)
    638 #else
    639 pool_get(struct pool *pp, int flags)
    640 #endif
    641 {
    642 	struct pool_item *pi;
    643 	struct pool_item_header *ph;
    644 	void *v;
    645 
    646 #ifdef DIAGNOSTIC
    647 	if (__predict_false(curproc == NULL && doing_shutdown == 0 &&
    648 			    (flags & PR_WAITOK) != 0))
    649 		panic("pool_get: must have NOWAIT");
    650 
    651 #ifdef LOCKDEBUG
    652 	if (flags & PR_WAITOK)
    653 		simple_lock_only_held(NULL, "pool_get(PR_WAITOK)");
    654 #endif
    655 #endif /* DIAGNOSTIC */
    656 
    657 	simple_lock(&pp->pr_slock);
    658 	pr_enter(pp, file, line);
    659 
    660  startover:
    661 	/*
    662 	 * Check to see if we've reached the hard limit.  If we have,
    663 	 * and we can wait, then wait until an item has been returned to
    664 	 * the pool.
    665 	 */
    666 #ifdef DIAGNOSTIC
    667 	if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
    668 		pr_leave(pp);
    669 		simple_unlock(&pp->pr_slock);
    670 		panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
    671 	}
    672 #endif
    673 	if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
    674 		if (pp->pr_drain_hook != NULL) {
    675 			/*
    676 			 * Since the drain hook is going to free things
    677 			 * back to the pool, unlock, call the hook, re-lock,
    678 			 * and check the hardlimit condition again.
    679 			 */
    680 			pr_leave(pp);
    681 			simple_unlock(&pp->pr_slock);
    682 			(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
    683 			simple_lock(&pp->pr_slock);
    684 			pr_enter(pp, file, line);
    685 			if (pp->pr_nout < pp->pr_hardlimit)
    686 				goto startover;
    687 		}
    688 
    689 		if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
    690 			/*
    691 			 * XXX: A warning isn't logged in this case.  Should
    692 			 * it be?
    693 			 */
    694 			pp->pr_flags |= PR_WANTED;
    695 			pr_leave(pp);
    696 			ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
    697 			pr_enter(pp, file, line);
    698 			goto startover;
    699 		}
    700 
    701 		/*
    702 		 * Log a message that the hard limit has been hit.
    703 		 */
    704 		if (pp->pr_hardlimit_warning != NULL &&
    705 		    ratecheck(&pp->pr_hardlimit_warning_last,
    706 			      &pp->pr_hardlimit_ratecap))
    707 			log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
    708 
    709 		pp->pr_nfail++;
    710 
    711 		pr_leave(pp);
    712 		simple_unlock(&pp->pr_slock);
    713 		return (NULL);
    714 	}
    715 
    716 	/*
    717 	 * The convention we use is that if `curpage' is not NULL, then
    718 	 * it points at a non-empty bucket. In particular, `curpage'
    719 	 * never points at a page header which has PR_PHINPAGE set and
    720 	 * has no items in its bucket.
    721 	 */
    722 	if ((ph = pp->pr_curpage) == NULL) {
    723 #ifdef DIAGNOSTIC
    724 		if (pp->pr_nitems != 0) {
    725 			simple_unlock(&pp->pr_slock);
    726 			printf("pool_get: %s: curpage NULL, nitems %u\n",
    727 			    pp->pr_wchan, pp->pr_nitems);
    728 			panic("pool_get: nitems inconsistent\n");
    729 		}
    730 #endif
    731 
    732 		/*
    733 		 * Call the back-end page allocator for more memory.
    734 		 * Release the pool lock, as the back-end page allocator
    735 		 * may block.
    736 		 */
    737 		pr_leave(pp);
    738 		simple_unlock(&pp->pr_slock);
    739 		v = pool_allocator_alloc(pp, flags);
    740 		if (__predict_true(v != NULL))
    741 			ph = pool_alloc_item_header(pp, v, flags);
    742 		simple_lock(&pp->pr_slock);
    743 		pr_enter(pp, file, line);
    744 
    745 		if (__predict_false(v == NULL || ph == NULL)) {
    746 			if (v != NULL)
    747 				pool_allocator_free(pp, v);
    748 
    749 			/*
    750 			 * We were unable to allocate a page or item
    751 			 * header, but we released the lock during
    752 			 * allocation, so perhaps items were freed
    753 			 * back to the pool.  Check for this case.
    754 			 */
    755 			if (pp->pr_curpage != NULL)
    756 				goto startover;
    757 
    758 			if ((flags & PR_WAITOK) == 0) {
    759 				pp->pr_nfail++;
    760 				pr_leave(pp);
    761 				simple_unlock(&pp->pr_slock);
    762 				return (NULL);
    763 			}
    764 
    765 			/*
    766 			 * Wait for items to be returned to this pool.
    767 			 *
    768 			 * XXX: maybe we should wake up once a second and
    769 			 * try again?
    770 			 */
    771 			pp->pr_flags |= PR_WANTED;
    772 			/* PA_WANTED is already set on the allocator. */
    773 			pr_leave(pp);
    774 			ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
    775 			pr_enter(pp, file, line);
    776 			goto startover;
    777 		}
    778 
    779 		/* We have more memory; add it to the pool */
    780 		pool_prime_page(pp, v, ph);
    781 		pp->pr_npagealloc++;
    782 
    783 		/* Start the allocation process over. */
    784 		goto startover;
    785 	}
    786 
    787 	if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) {
    788 		pr_leave(pp);
    789 		simple_unlock(&pp->pr_slock);
    790 		panic("pool_get: %s: page empty", pp->pr_wchan);
    791 	}
    792 #ifdef DIAGNOSTIC
    793 	if (__predict_false(pp->pr_nitems == 0)) {
    794 		pr_leave(pp);
    795 		simple_unlock(&pp->pr_slock);
    796 		printf("pool_get: %s: items on itemlist, nitems %u\n",
    797 		    pp->pr_wchan, pp->pr_nitems);
    798 		panic("pool_get: nitems inconsistent\n");
    799 	}
    800 #endif
    801 
    802 #ifdef POOL_DIAGNOSTIC
    803 	pr_log(pp, v, PRLOG_GET, file, line);
    804 #endif
    805 
    806 #ifdef DIAGNOSTIC
    807 	if (__predict_false(pi->pi_magic != PI_MAGIC)) {
    808 		pr_printlog(pp, pi, printf);
    809 		panic("pool_get(%s): free list modified: magic=%x; page %p;"
    810 		       " item addr %p\n",
    811 			pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
    812 	}
    813 #endif
    814 
    815 	/*
    816 	 * Remove from item list.
    817 	 */
    818 	TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
    819 	pp->pr_nitems--;
    820 	pp->pr_nout++;
    821 	if (ph->ph_nmissing == 0) {
    822 #ifdef DIAGNOSTIC
    823 		if (__predict_false(pp->pr_nidle == 0))
    824 			panic("pool_get: nidle inconsistent");
    825 #endif
    826 		pp->pr_nidle--;
    827 	}
    828 	ph->ph_nmissing++;
    829 	if (TAILQ_FIRST(&ph->ph_itemlist) == NULL) {
    830 #ifdef DIAGNOSTIC
    831 		if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) {
    832 			pr_leave(pp);
    833 			simple_unlock(&pp->pr_slock);
    834 			panic("pool_get: %s: nmissing inconsistent",
    835 			    pp->pr_wchan);
    836 		}
    837 #endif
    838 		/*
    839 		 * Find a new non-empty page header, if any.
    840 		 * Start search from the page head, to increase
    841 		 * the chance for "high water" pages to be freed.
    842 		 *
    843 		 * Migrate empty pages to the end of the list.  This
    844 		 * will speed the update of curpage as pages become
    845 		 * idle.  Empty pages intermingled with idle pages
    846 		 * is no big deal.  As soon as a page becomes un-empty,
    847 		 * it will move back to the head of the list.
    848 		 */
    849 		TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
    850 		TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
    851 		TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
    852 			if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
    853 				break;
    854 
    855 		pp->pr_curpage = ph;
    856 	}
    857 
    858 	pp->pr_nget++;
    859 
    860 	/*
    861 	 * If we have a low water mark and we are now below that low
    862 	 * water mark, add more items to the pool.
    863 	 */
    864 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
    865 		/*
    866 		 * XXX: Should we log a warning?  Should we set up a timeout
    867 		 * to try again in a second or so?  The latter could break
    868 		 * a caller's assumptions about interrupt protection, etc.
    869 		 */
    870 	}
    871 
    872 	pr_leave(pp);
    873 	simple_unlock(&pp->pr_slock);
    874 	return (v);
    875 }
    876 
    877 /*
    878  * Internal version of pool_put().  Pool is already locked/entered.
    879  */
    880 static void
    881 pool_do_put(struct pool *pp, void *v)
    882 {
    883 	struct pool_item *pi = v;
    884 	struct pool_item_header *ph;
    885 	caddr_t page;
    886 	int s;
    887 
    888 	LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
    889 
    890 	page = (caddr_t)((u_long)v & pp->pr_alloc->pa_pagemask);
    891 
    892 #ifdef DIAGNOSTIC
    893 	if (__predict_false(pp->pr_nout == 0)) {
    894 		printf("pool %s: putting with none out\n",
    895 		    pp->pr_wchan);
    896 		panic("pool_put");
    897 	}
    898 #endif
    899 
    900 	if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {
    901 		pr_printlog(pp, NULL, printf);
    902 		panic("pool_put: %s: page header missing", pp->pr_wchan);
    903 	}
    904 
    905 #ifdef LOCKDEBUG
    906 	/*
    907 	 * Check if we're freeing a locked simple lock.
    908 	 */
    909 	simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
    910 #endif
    911 
    912 	/*
    913 	 * Return to item list.
    914 	 */
    915 #ifdef DIAGNOSTIC
    916 	pi->pi_magic = PI_MAGIC;
    917 #endif
    918 #ifdef DEBUG
    919 	{
    920 		int i, *ip = v;
    921 
    922 		for (i = 0; i < pp->pr_size / sizeof(int); i++) {
    923 			*ip++ = PI_MAGIC;
    924 		}
    925 	}
    926 #endif
    927 
    928 	TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
    929 	ph->ph_nmissing--;
    930 	pp->pr_nput++;
    931 	pp->pr_nitems++;
    932 	pp->pr_nout--;
    933 
    934 	/* Cancel "pool empty" condition if it exists */
    935 	if (pp->pr_curpage == NULL)
    936 		pp->pr_curpage = ph;
    937 
    938 	if (pp->pr_flags & PR_WANTED) {
    939 		pp->pr_flags &= ~PR_WANTED;
    940 		if (ph->ph_nmissing == 0)
    941 			pp->pr_nidle++;
    942 		wakeup((caddr_t)pp);
    943 		return;
    944 	}
    945 
    946 	/*
    947 	 * If this page is now complete, do one of two things:
    948 	 *
    949 	 *	(1) If we have more pages than the page high water
    950 	 *	    mark, free the page back to the system.
    951 	 *
    952 	 *	(2) Move it to the end of the page list, so that
    953 	 *	    we minimize our chances of fragmenting the
    954 	 *	    pool.  Idle pages migrate to the end (along with
    955 	 *	    completely empty pages, so that we find un-empty
    956 	 *	    pages more quickly when we update curpage) of the
    957 	 *	    list so they can be more easily swept up by
    958 	 *	    the pagedaemon when pages are scarce.
    959 	 */
    960 	if (ph->ph_nmissing == 0) {
    961 		pp->pr_nidle++;
    962 		if (pp->pr_npages > pp->pr_maxpages ||
    963 		    (pp->pr_alloc->pa_flags & PA_WANT) != 0) {
    964 			pr_rmpage(pp, ph, NULL);
    965 		} else {
    966 			TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
    967 			TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
    968 
    969 			/*
    970 			 * Update the timestamp on the page.  A page must
    971 			 * be idle for some period of time before it can
    972 			 * be reclaimed by the pagedaemon.  This minimizes
    973 			 * ping-pong'ing for memory.
    974 			 */
    975 			s = splclock();
    976 			ph->ph_time = mono_time;
    977 			splx(s);
    978 
    979 			/*
    980 			 * Update the current page pointer.  Just look for
    981 			 * the first page with any free items.
    982 			 *
    983 			 * XXX: Maybe we want an option to look for the
    984 			 * page with the fewest available items, to minimize
    985 			 * fragmentation?
    986 			 */
    987 			TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
    988 				if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
    989 					break;
    990 
    991 			pp->pr_curpage = ph;
    992 		}
    993 	}
    994 	/*
    995 	 * If the page has just become un-empty, move it to the head of
    996 	 * the list, and make it the current page.  The next allocation
    997 	 * will get the item from this page, instead of further fragmenting
    998 	 * the pool.
    999 	 */
   1000 	else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
   1001 		TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
   1002 		TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
   1003 		pp->pr_curpage = ph;
   1004 	}
   1005 }
   1006 
   1007 /*
   1008  * Return resource to the pool; must be called at appropriate spl level
   1009  */
   1010 #ifdef POOL_DIAGNOSTIC
   1011 void
   1012 _pool_put(struct pool *pp, void *v, const char *file, long line)
   1013 {
   1014 
   1015 	simple_lock(&pp->pr_slock);
   1016 	pr_enter(pp, file, line);
   1017 
   1018 	pr_log(pp, v, PRLOG_PUT, file, line);
   1019 
   1020 	pool_do_put(pp, v);
   1021 
   1022 	pr_leave(pp);
   1023 	simple_unlock(&pp->pr_slock);
   1024 }
   1025 #undef pool_put
   1026 #endif /* POOL_DIAGNOSTIC */
   1027 
   1028 void
   1029 pool_put(struct pool *pp, void *v)
   1030 {
   1031 
   1032 	simple_lock(&pp->pr_slock);
   1033 
   1034 	pool_do_put(pp, v);
   1035 
   1036 	simple_unlock(&pp->pr_slock);
   1037 }
   1038 
   1039 #ifdef POOL_DIAGNOSTIC
   1040 #define		pool_put(h, v)	_pool_put((h), (v), __FILE__, __LINE__)
   1041 #endif
   1042 
   1043 /*
   1044  * Add a page worth of items to the pool.
   1045  *
   1046  * Note, we must be called with the pool descriptor LOCKED.
   1047  */
   1048 static void
   1049 pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
   1050 {
   1051 	struct pool_item *pi;
   1052 	caddr_t cp = storage;
   1053 	unsigned int align = pp->pr_align;
   1054 	unsigned int ioff = pp->pr_itemoffset;
   1055 	int n;
   1056 
   1057 #ifdef DIAGNOSTIC
   1058 	if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
   1059 		panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
   1060 #endif
   1061 
   1062 	if ((pp->pr_roflags & PR_PHINPAGE) == 0)
   1063 		LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)],
   1064 		    ph, ph_hashlist);
   1065 
   1066 	/*
   1067 	 * Insert page header.
   1068 	 */
   1069 	TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
   1070 	TAILQ_INIT(&ph->ph_itemlist);
   1071 	ph->ph_page = storage;
   1072 	ph->ph_nmissing = 0;
   1073 	memset(&ph->ph_time, 0, sizeof(ph->ph_time));
   1074 
   1075 	pp->pr_nidle++;
   1076 
   1077 	/*
   1078 	 * Color this page.
   1079 	 */
   1080 	cp = (caddr_t)(cp + pp->pr_curcolor);
   1081 	if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
   1082 		pp->pr_curcolor = 0;
   1083 
   1084 	/*
   1085 	 * Adjust storage to apply aligment to `pr_itemoffset' in each item.
   1086 	 */
   1087 	if (ioff != 0)
   1088 		cp = (caddr_t)(cp + (align - ioff));
   1089 
   1090 	/*
   1091 	 * Insert remaining chunks on the bucket list.
   1092 	 */
   1093 	n = pp->pr_itemsperpage;
   1094 	pp->pr_nitems += n;
   1095 
   1096 	while (n--) {
   1097 		pi = (struct pool_item *)cp;
   1098 
   1099 		/* Insert on page list */
   1100 		TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
   1101 #ifdef DIAGNOSTIC
   1102 		pi->pi_magic = PI_MAGIC;
   1103 #endif
   1104 		cp = (caddr_t)(cp + pp->pr_size);
   1105 	}
   1106 
   1107 	/*
   1108 	 * If the pool was depleted, point at the new page.
   1109 	 */
   1110 	if (pp->pr_curpage == NULL)
   1111 		pp->pr_curpage = ph;
   1112 
   1113 	if (++pp->pr_npages > pp->pr_hiwat)
   1114 		pp->pr_hiwat = pp->pr_npages;
   1115 }
   1116 
   1117 /*
   1118  * Used by pool_get() when nitems drops below the low water mark.  This
   1119  * is used to catch up nitmes with the low water mark.
   1120  *
   1121  * Note 1, we never wait for memory here, we let the caller decide what to do.
   1122  *
   1123  * Note 2, this doesn't work with static pools.
   1124  *
   1125  * Note 3, we must be called with the pool already locked, and we return
   1126  * with it locked.
   1127  */
   1128 static int
   1129 pool_catchup(struct pool *pp)
   1130 {
   1131 	struct pool_item_header *ph;
   1132 	caddr_t cp;
   1133 	int error = 0;
   1134 
   1135 	while (POOL_NEEDS_CATCHUP(pp)) {
   1136 		/*
   1137 		 * Call the page back-end allocator for more memory.
   1138 		 *
   1139 		 * XXX: We never wait, so should we bother unlocking
   1140 		 * the pool descriptor?
   1141 		 */
   1142 		simple_unlock(&pp->pr_slock);
   1143 		cp = pool_allocator_alloc(pp, PR_NOWAIT);
   1144 		if (__predict_true(cp != NULL))
   1145 			ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
   1146 		simple_lock(&pp->pr_slock);
   1147 		if (__predict_false(cp == NULL || ph == NULL)) {
   1148 			if (cp != NULL)
   1149 				pool_allocator_free(pp, cp);
   1150 			error = ENOMEM;
   1151 			break;
   1152 		}
   1153 		pool_prime_page(pp, cp, ph);
   1154 		pp->pr_npagealloc++;
   1155 	}
   1156 
   1157 	return (error);
   1158 }
   1159 
   1160 void
   1161 pool_setlowat(struct pool *pp, int n)
   1162 {
   1163 	int error;
   1164 
   1165 	simple_lock(&pp->pr_slock);
   1166 
   1167 	pp->pr_minitems = n;
   1168 	pp->pr_minpages = (n == 0)
   1169 		? 0
   1170 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1171 
   1172 	/* Make sure we're caught up with the newly-set low water mark. */
   1173 	if (POOL_NEEDS_CATCHUP(pp) && (error = pool_catchup(pp) != 0)) {
   1174 		/*
   1175 		 * XXX: Should we log a warning?  Should we set up a timeout
   1176 		 * to try again in a second or so?  The latter could break
   1177 		 * a caller's assumptions about interrupt protection, etc.
   1178 		 */
   1179 	}
   1180 
   1181 	simple_unlock(&pp->pr_slock);
   1182 }
   1183 
   1184 void
   1185 pool_sethiwat(struct pool *pp, int n)
   1186 {
   1187 
   1188 	simple_lock(&pp->pr_slock);
   1189 
   1190 	pp->pr_maxpages = (n == 0)
   1191 		? 0
   1192 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1193 
   1194 	simple_unlock(&pp->pr_slock);
   1195 }
   1196 
   1197 void
   1198 pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
   1199 {
   1200 
   1201 	simple_lock(&pp->pr_slock);
   1202 
   1203 	pp->pr_hardlimit = n;
   1204 	pp->pr_hardlimit_warning = warnmess;
   1205 	pp->pr_hardlimit_ratecap.tv_sec = ratecap;
   1206 	pp->pr_hardlimit_warning_last.tv_sec = 0;
   1207 	pp->pr_hardlimit_warning_last.tv_usec = 0;
   1208 
   1209 	/*
   1210 	 * In-line version of pool_sethiwat(), because we don't want to
   1211 	 * release the lock.
   1212 	 */
   1213 	pp->pr_maxpages = (n == 0)
   1214 		? 0
   1215 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1216 
   1217 	simple_unlock(&pp->pr_slock);
   1218 }
   1219 
   1220 /*
   1221  * Release all complete pages that have not been used recently.
   1222  */
   1223 int
   1224 #ifdef POOL_DIAGNOSTIC
   1225 _pool_reclaim(struct pool *pp, const char *file, long line)
   1226 #else
   1227 pool_reclaim(struct pool *pp)
   1228 #endif
   1229 {
   1230 	struct pool_item_header *ph, *phnext;
   1231 	struct pool_cache *pc;
   1232 	struct timeval curtime;
   1233 	struct pool_pagelist pq;
   1234 	int s;
   1235 
   1236 	if (pp->pr_drain_hook != NULL) {
   1237 		/*
   1238 		 * The drain hook must be called with the pool unlocked.
   1239 		 */
   1240 		(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
   1241 	}
   1242 
   1243 	if (simple_lock_try(&pp->pr_slock) == 0)
   1244 		return (0);
   1245 	pr_enter(pp, file, line);
   1246 
   1247 	TAILQ_INIT(&pq);
   1248 
   1249 	/*
   1250 	 * Reclaim items from the pool's caches.
   1251 	 */
   1252 	TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist)
   1253 		pool_cache_reclaim(pc);
   1254 
   1255 	s = splclock();
   1256 	curtime = mono_time;
   1257 	splx(s);
   1258 
   1259 	for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; ph = phnext) {
   1260 		phnext = TAILQ_NEXT(ph, ph_pagelist);
   1261 
   1262 		/* Check our minimum page claim */
   1263 		if (pp->pr_npages <= pp->pr_minpages)
   1264 			break;
   1265 
   1266 		if (ph->ph_nmissing == 0) {
   1267 			struct timeval diff;
   1268 			timersub(&curtime, &ph->ph_time, &diff);
   1269 			if (diff.tv_sec < pool_inactive_time)
   1270 				continue;
   1271 
   1272 			/*
   1273 			 * If freeing this page would put us below
   1274 			 * the low water mark, stop now.
   1275 			 */
   1276 			if ((pp->pr_nitems - pp->pr_itemsperpage) <
   1277 			    pp->pr_minitems)
   1278 				break;
   1279 
   1280 			pr_rmpage(pp, ph, &pq);
   1281 		}
   1282 	}
   1283 
   1284 	pr_leave(pp);
   1285 	simple_unlock(&pp->pr_slock);
   1286 	if (TAILQ_EMPTY(&pq))
   1287 		return (0);
   1288 
   1289 	while ((ph = TAILQ_FIRST(&pq)) != NULL) {
   1290 		TAILQ_REMOVE(&pq, ph, ph_pagelist);
   1291 		pool_allocator_free(pp, ph->ph_page);
   1292 		if (pp->pr_roflags & PR_PHINPAGE) {
   1293 			continue;
   1294 		}
   1295 		LIST_REMOVE(ph, ph_hashlist);
   1296 		s = splhigh();
   1297 		pool_put(&phpool, ph);
   1298 		splx(s);
   1299 	}
   1300 
   1301 	return (1);
   1302 }
   1303 
   1304 /*
   1305  * Drain pools, one at a time.
   1306  *
   1307  * Note, we must never be called from an interrupt context.
   1308  */
   1309 void
   1310 pool_drain(void *arg)
   1311 {
   1312 	struct pool *pp;
   1313 	int s;
   1314 
   1315 	pp = NULL;
   1316 	s = splvm();
   1317 	simple_lock(&pool_head_slock);
   1318 	if (drainpp == NULL) {
   1319 		drainpp = TAILQ_FIRST(&pool_head);
   1320 	}
   1321 	if (drainpp) {
   1322 		pp = drainpp;
   1323 		drainpp = TAILQ_NEXT(pp, pr_poollist);
   1324 	}
   1325 	simple_unlock(&pool_head_slock);
   1326 	pool_reclaim(pp);
   1327 	splx(s);
   1328 }
   1329 
   1330 /*
   1331  * Diagnostic helpers.
   1332  */
   1333 void
   1334 pool_print(struct pool *pp, const char *modif)
   1335 {
   1336 	int s;
   1337 
   1338 	s = splvm();
   1339 	if (simple_lock_try(&pp->pr_slock) == 0) {
   1340 		printf("pool %s is locked; try again later\n",
   1341 		    pp->pr_wchan);
   1342 		splx(s);
   1343 		return;
   1344 	}
   1345 	pool_print1(pp, modif, printf);
   1346 	simple_unlock(&pp->pr_slock);
   1347 	splx(s);
   1348 }
   1349 
   1350 void
   1351 pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
   1352 {
   1353 	int didlock = 0;
   1354 
   1355 	if (pp == NULL) {
   1356 		(*pr)("Must specify a pool to print.\n");
   1357 		return;
   1358 	}
   1359 
   1360 	/*
   1361 	 * Called from DDB; interrupts should be blocked, and all
   1362 	 * other processors should be paused.  We can skip locking
   1363 	 * the pool in this case.
   1364 	 *
   1365 	 * We do a simple_lock_try() just to print the lock
   1366 	 * status, however.
   1367 	 */
   1368 
   1369 	if (simple_lock_try(&pp->pr_slock) == 0)
   1370 		(*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
   1371 	else
   1372 		didlock = 1;
   1373 
   1374 	pool_print1(pp, modif, pr);
   1375 
   1376 	if (didlock)
   1377 		simple_unlock(&pp->pr_slock);
   1378 }
   1379 
   1380 static void
   1381 pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
   1382 {
   1383 	struct pool_item_header *ph;
   1384 	struct pool_cache *pc;
   1385 	struct pool_cache_group *pcg;
   1386 #ifdef DIAGNOSTIC
   1387 	struct pool_item *pi;
   1388 #endif
   1389 	int i, print_log = 0, print_pagelist = 0, print_cache = 0;
   1390 	char c;
   1391 
   1392 	while ((c = *modif++) != '\0') {
   1393 		if (c == 'l')
   1394 			print_log = 1;
   1395 		if (c == 'p')
   1396 			print_pagelist = 1;
   1397 		if (c == 'c')
   1398 			print_cache = 1;
   1399 		modif++;
   1400 	}
   1401 
   1402 	(*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
   1403 	    pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
   1404 	    pp->pr_roflags);
   1405 	(*pr)("\talloc %p\n", pp->pr_alloc);
   1406 	(*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
   1407 	    pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
   1408 	(*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
   1409 	    pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
   1410 
   1411 	(*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
   1412 	    pp->pr_nget, pp->pr_nfail, pp->pr_nput);
   1413 	(*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
   1414 	    pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
   1415 
   1416 	if (print_pagelist == 0)
   1417 		goto skip_pagelist;
   1418 
   1419 	if ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL)
   1420 		(*pr)("\n\tpage list:\n");
   1421 	for (; ph != NULL; ph = TAILQ_NEXT(ph, ph_pagelist)) {
   1422 		(*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
   1423 		    ph->ph_page, ph->ph_nmissing,
   1424 		    (u_long)ph->ph_time.tv_sec,
   1425 		    (u_long)ph->ph_time.tv_usec);
   1426 #ifdef DIAGNOSTIC
   1427 		TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) {
   1428 			if (pi->pi_magic != PI_MAGIC) {
   1429 				(*pr)("\t\t\titem %p, magic 0x%x\n",
   1430 				    pi, pi->pi_magic);
   1431 			}
   1432 		}
   1433 #endif
   1434 	}
   1435 	if (pp->pr_curpage == NULL)
   1436 		(*pr)("\tno current page\n");
   1437 	else
   1438 		(*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
   1439 
   1440  skip_pagelist:
   1441 
   1442 	if (print_log == 0)
   1443 		goto skip_log;
   1444 
   1445 	(*pr)("\n");
   1446 	if ((pp->pr_roflags & PR_LOGGING) == 0)
   1447 		(*pr)("\tno log\n");
   1448 	else
   1449 		pr_printlog(pp, NULL, pr);
   1450 
   1451  skip_log:
   1452 
   1453 	if (print_cache == 0)
   1454 		goto skip_cache;
   1455 
   1456 	TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) {
   1457 		(*pr)("\tcache %p: allocfrom %p freeto %p\n", pc,
   1458 		    pc->pc_allocfrom, pc->pc_freeto);
   1459 		(*pr)("\t    hits %lu misses %lu ngroups %lu nitems %lu\n",
   1460 		    pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
   1461 		TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
   1462 			(*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);
   1463 			for (i = 0; i < PCG_NOBJECTS; i++)
   1464 				(*pr)("\t\t\t%p\n", pcg->pcg_objects[i]);
   1465 		}
   1466 	}
   1467 
   1468  skip_cache:
   1469 
   1470 	pr_enter_check(pp, pr);
   1471 }
   1472 
   1473 int
   1474 pool_chk(struct pool *pp, const char *label)
   1475 {
   1476 	struct pool_item_header *ph;
   1477 	int r = 0;
   1478 
   1479 	simple_lock(&pp->pr_slock);
   1480 
   1481 	TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) {
   1482 		struct pool_item *pi;
   1483 		int n;
   1484 		caddr_t page;
   1485 
   1486 		page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask);
   1487 		if (page != ph->ph_page &&
   1488 		    (pp->pr_roflags & PR_PHINPAGE) != 0) {
   1489 			if (label != NULL)
   1490 				printf("%s: ", label);
   1491 			printf("pool(%p:%s): page inconsistency: page %p;"
   1492 			       " at page head addr %p (p %p)\n", pp,
   1493 				pp->pr_wchan, ph->ph_page,
   1494 				ph, page);
   1495 			r++;
   1496 			goto out;
   1497 		}
   1498 
   1499 		for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
   1500 		     pi != NULL;
   1501 		     pi = TAILQ_NEXT(pi,pi_list), n++) {
   1502 
   1503 #ifdef DIAGNOSTIC
   1504 			if (pi->pi_magic != PI_MAGIC) {
   1505 				if (label != NULL)
   1506 					printf("%s: ", label);
   1507 				printf("pool(%s): free list modified: magic=%x;"
   1508 				       " page %p; item ordinal %d;"
   1509 				       " addr %p (p %p)\n",
   1510 					pp->pr_wchan, pi->pi_magic, ph->ph_page,
   1511 					n, pi, page);
   1512 				panic("pool");
   1513 			}
   1514 #endif
   1515 			page =
   1516 			    (caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask);
   1517 			if (page == ph->ph_page)
   1518 				continue;
   1519 
   1520 			if (label != NULL)
   1521 				printf("%s: ", label);
   1522 			printf("pool(%p:%s): page inconsistency: page %p;"
   1523 			       " item ordinal %d; addr %p (p %p)\n", pp,
   1524 				pp->pr_wchan, ph->ph_page,
   1525 				n, pi, page);
   1526 			r++;
   1527 			goto out;
   1528 		}
   1529 	}
   1530 out:
   1531 	simple_unlock(&pp->pr_slock);
   1532 	return (r);
   1533 }
   1534 
   1535 /*
   1536  * pool_cache_init:
   1537  *
   1538  *	Initialize a pool cache.
   1539  *
   1540  *	NOTE: If the pool must be protected from interrupts, we expect
   1541  *	to be called at the appropriate interrupt priority level.
   1542  */
   1543 void
   1544 pool_cache_init(struct pool_cache *pc, struct pool *pp,
   1545     int (*ctor)(void *, void *, int),
   1546     void (*dtor)(void *, void *),
   1547     void *arg)
   1548 {
   1549 
   1550 	TAILQ_INIT(&pc->pc_grouplist);
   1551 	simple_lock_init(&pc->pc_slock);
   1552 
   1553 	pc->pc_allocfrom = NULL;
   1554 	pc->pc_freeto = NULL;
   1555 	pc->pc_pool = pp;
   1556 
   1557 	pc->pc_ctor = ctor;
   1558 	pc->pc_dtor = dtor;
   1559 	pc->pc_arg  = arg;
   1560 
   1561 	pc->pc_hits   = 0;
   1562 	pc->pc_misses = 0;
   1563 
   1564 	pc->pc_ngroups = 0;
   1565 
   1566 	pc->pc_nitems = 0;
   1567 
   1568 	simple_lock(&pp->pr_slock);
   1569 	TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist);
   1570 	simple_unlock(&pp->pr_slock);
   1571 }
   1572 
   1573 /*
   1574  * pool_cache_destroy:
   1575  *
   1576  *	Destroy a pool cache.
   1577  */
   1578 void
   1579 pool_cache_destroy(struct pool_cache *pc)
   1580 {
   1581 	struct pool *pp = pc->pc_pool;
   1582 
   1583 	/* First, invalidate the entire cache. */
   1584 	pool_cache_invalidate(pc);
   1585 
   1586 	/* ...and remove it from the pool's cache list. */
   1587 	simple_lock(&pp->pr_slock);
   1588 	TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist);
   1589 	simple_unlock(&pp->pr_slock);
   1590 }
   1591 
   1592 static __inline void *
   1593 pcg_get(struct pool_cache_group *pcg)
   1594 {
   1595 	void *object;
   1596 	u_int idx;
   1597 
   1598 	KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
   1599 	KASSERT(pcg->pcg_avail != 0);
   1600 	idx = --pcg->pcg_avail;
   1601 
   1602 	KASSERT(pcg->pcg_objects[idx] != NULL);
   1603 	object = pcg->pcg_objects[idx];
   1604 	pcg->pcg_objects[idx] = NULL;
   1605 
   1606 	return (object);
   1607 }
   1608 
   1609 static __inline void
   1610 pcg_put(struct pool_cache_group *pcg, void *object)
   1611 {
   1612 	u_int idx;
   1613 
   1614 	KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
   1615 	idx = pcg->pcg_avail++;
   1616 
   1617 	KASSERT(pcg->pcg_objects[idx] == NULL);
   1618 	pcg->pcg_objects[idx] = object;
   1619 }
   1620 
   1621 /*
   1622  * pool_cache_get:
   1623  *
   1624  *	Get an object from a pool cache.
   1625  */
   1626 void *
   1627 pool_cache_get(struct pool_cache *pc, int flags)
   1628 {
   1629 	struct pool_cache_group *pcg;
   1630 	void *object;
   1631 
   1632 #ifdef LOCKDEBUG
   1633 	if (flags & PR_WAITOK)
   1634 		simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)");
   1635 #endif
   1636 
   1637 	simple_lock(&pc->pc_slock);
   1638 
   1639 	if ((pcg = pc->pc_allocfrom) == NULL) {
   1640 		TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
   1641 			if (pcg->pcg_avail != 0) {
   1642 				pc->pc_allocfrom = pcg;
   1643 				goto have_group;
   1644 			}
   1645 		}
   1646 
   1647 		/*
   1648 		 * No groups with any available objects.  Allocate
   1649 		 * a new object, construct it, and return it to
   1650 		 * the caller.  We will allocate a group, if necessary,
   1651 		 * when the object is freed back to the cache.
   1652 		 */
   1653 		pc->pc_misses++;
   1654 		simple_unlock(&pc->pc_slock);
   1655 		object = pool_get(pc->pc_pool, flags);
   1656 		if (object != NULL && pc->pc_ctor != NULL) {
   1657 			if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
   1658 				pool_put(pc->pc_pool, object);
   1659 				return (NULL);
   1660 			}
   1661 		}
   1662 		return (object);
   1663 	}
   1664 
   1665  have_group:
   1666 	pc->pc_hits++;
   1667 	pc->pc_nitems--;
   1668 	object = pcg_get(pcg);
   1669 
   1670 	if (pcg->pcg_avail == 0)
   1671 		pc->pc_allocfrom = NULL;
   1672 
   1673 	simple_unlock(&pc->pc_slock);
   1674 
   1675 	return (object);
   1676 }
   1677 
   1678 /*
   1679  * pool_cache_put:
   1680  *
   1681  *	Put an object back to the pool cache.
   1682  */
   1683 void
   1684 pool_cache_put(struct pool_cache *pc, void *object)
   1685 {
   1686 	struct pool_cache_group *pcg;
   1687 	int s;
   1688 
   1689 	simple_lock(&pc->pc_slock);
   1690 
   1691 	if ((pcg = pc->pc_freeto) == NULL) {
   1692 		TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
   1693 			if (pcg->pcg_avail != PCG_NOBJECTS) {
   1694 				pc->pc_freeto = pcg;
   1695 				goto have_group;
   1696 			}
   1697 		}
   1698 
   1699 		/*
   1700 		 * No empty groups to free the object to.  Attempt to
   1701 		 * allocate one.
   1702 		 */
   1703 		simple_unlock(&pc->pc_slock);
   1704 		s = splvm();
   1705 		pcg = pool_get(&pcgpool, PR_NOWAIT);
   1706 		splx(s);
   1707 		if (pcg != NULL) {
   1708 			memset(pcg, 0, sizeof(*pcg));
   1709 			simple_lock(&pc->pc_slock);
   1710 			pc->pc_ngroups++;
   1711 			TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list);
   1712 			if (pc->pc_freeto == NULL)
   1713 				pc->pc_freeto = pcg;
   1714 			goto have_group;
   1715 		}
   1716 
   1717 		/*
   1718 		 * Unable to allocate a cache group; destruct the object
   1719 		 * and free it back to the pool.
   1720 		 */
   1721 		pool_cache_destruct_object(pc, object);
   1722 		return;
   1723 	}
   1724 
   1725  have_group:
   1726 	pc->pc_nitems++;
   1727 	pcg_put(pcg, object);
   1728 
   1729 	if (pcg->pcg_avail == PCG_NOBJECTS)
   1730 		pc->pc_freeto = NULL;
   1731 
   1732 	simple_unlock(&pc->pc_slock);
   1733 }
   1734 
   1735 /*
   1736  * pool_cache_destruct_object:
   1737  *
   1738  *	Force destruction of an object and its release back into
   1739  *	the pool.
   1740  */
   1741 void
   1742 pool_cache_destruct_object(struct pool_cache *pc, void *object)
   1743 {
   1744 
   1745 	if (pc->pc_dtor != NULL)
   1746 		(*pc->pc_dtor)(pc->pc_arg, object);
   1747 	pool_put(pc->pc_pool, object);
   1748 }
   1749 
   1750 /*
   1751  * pool_cache_do_invalidate:
   1752  *
   1753  *	This internal function implements pool_cache_invalidate() and
   1754  *	pool_cache_reclaim().
   1755  */
   1756 static void
   1757 pool_cache_do_invalidate(struct pool_cache *pc, int free_groups,
   1758     void (*putit)(struct pool *, void *))
   1759 {
   1760 	struct pool_cache_group *pcg, *npcg;
   1761 	void *object;
   1762 	int s;
   1763 
   1764 	for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
   1765 	     pcg = npcg) {
   1766 		npcg = TAILQ_NEXT(pcg, pcg_list);
   1767 		while (pcg->pcg_avail != 0) {
   1768 			pc->pc_nitems--;
   1769 			object = pcg_get(pcg);
   1770 			if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg)
   1771 				pc->pc_allocfrom = NULL;
   1772 			if (pc->pc_dtor != NULL)
   1773 				(*pc->pc_dtor)(pc->pc_arg, object);
   1774 			(*putit)(pc->pc_pool, object);
   1775 		}
   1776 		if (free_groups) {
   1777 			pc->pc_ngroups--;
   1778 			TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list);
   1779 			if (pc->pc_freeto == pcg)
   1780 				pc->pc_freeto = NULL;
   1781 			s = splvm();
   1782 			pool_put(&pcgpool, pcg);
   1783 			splx(s);
   1784 		}
   1785 	}
   1786 }
   1787 
   1788 /*
   1789  * pool_cache_invalidate:
   1790  *
   1791  *	Invalidate a pool cache (destruct and release all of the
   1792  *	cached objects).
   1793  */
   1794 void
   1795 pool_cache_invalidate(struct pool_cache *pc)
   1796 {
   1797 
   1798 	simple_lock(&pc->pc_slock);
   1799 	pool_cache_do_invalidate(pc, 0, pool_put);
   1800 	simple_unlock(&pc->pc_slock);
   1801 }
   1802 
   1803 /*
   1804  * pool_cache_reclaim:
   1805  *
   1806  *	Reclaim a pool cache for pool_reclaim().
   1807  */
   1808 static void
   1809 pool_cache_reclaim(struct pool_cache *pc)
   1810 {
   1811 
   1812 	simple_lock(&pc->pc_slock);
   1813 	pool_cache_do_invalidate(pc, 1, pool_do_put);
   1814 	simple_unlock(&pc->pc_slock);
   1815 }
   1816 
   1817 /*
   1818  * Pool backend allocators.
   1819  *
   1820  * Each pool has a backend allocator that handles allocation, deallocation,
   1821  * and any additional draining that might be needed.
   1822  *
   1823  * We provide two standard allocators:
   1824  *
   1825  *	pool_allocator_kmem - the default when no allocator is specified
   1826  *
   1827  *	pool_allocator_nointr - used for pools that will not be accessed
   1828  *	in interrupt context.
   1829  */
   1830 void	*pool_page_alloc(struct pool *, int);
   1831 void	pool_page_free(struct pool *, void *);
   1832 
   1833 struct pool_allocator pool_allocator_kmem = {
   1834 	pool_page_alloc, pool_page_free, 0,
   1835 };
   1836 
   1837 void	*pool_page_alloc_nointr(struct pool *, int);
   1838 void	pool_page_free_nointr(struct pool *, void *);
   1839 
   1840 struct pool_allocator pool_allocator_nointr = {
   1841 	pool_page_alloc_nointr, pool_page_free_nointr, 0,
   1842 };
   1843 
   1844 #ifdef POOL_SUBPAGE
   1845 void	*pool_subpage_alloc(struct pool *, int);
   1846 void	pool_subpage_free(struct pool *, void *);
   1847 
   1848 struct pool_allocator pool_allocator_kmem_subpage = {
   1849 	pool_subpage_alloc, pool_subpage_free, 0,
   1850 };
   1851 #endif /* POOL_SUBPAGE */
   1852 
   1853 /*
   1854  * We have at least three different resources for the same allocation and
   1855  * each resource can be depleted.  First, we have the ready elements in the
   1856  * pool.  Then we have the resource (typically a vm_map) for this allocator.
   1857  * Finally, we have physical memory.  Waiting for any of these can be
   1858  * unnecessary when any other is freed, but the kernel doesn't support
   1859  * sleeping on multiple wait channels, so we have to employ another strategy.
   1860  *
   1861  * The caller sleeps on the pool (so that it can be awakened when an item
   1862  * is returned to the pool), but we set PA_WANT on the allocator.  When a
   1863  * page is returned to the allocator and PA_WANT is set, pool_allocator_free
   1864  * will wake up all sleeping pools belonging to this allocator.
   1865  *
   1866  * XXX Thundering herd.
   1867  */
   1868 void *
   1869 pool_allocator_alloc(struct pool *org, int flags)
   1870 {
   1871 	struct pool_allocator *pa = org->pr_alloc;
   1872 	struct pool *pp, *start;
   1873 	int s, freed;
   1874 	void *res;
   1875 
   1876 	do {
   1877 		if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
   1878 			return (res);
   1879 		if ((flags & PR_WAITOK) == 0) {
   1880 			/*
   1881 			 * We only run the drain hookhere if PR_NOWAIT.
   1882 			 * In other cases, the hook will be run in
   1883 			 * pool_reclaim().
   1884 			 */
   1885 			if (org->pr_drain_hook != NULL) {
   1886 				(*org->pr_drain_hook)(org->pr_drain_hook_arg,
   1887 				    flags);
   1888 				if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
   1889 					return (res);
   1890 			}
   1891 			break;
   1892 		}
   1893 
   1894 		/*
   1895 		 * Drain all pools, except "org", that use this
   1896 		 * allocator.  We do this to reclaim VA space.
   1897 		 * pa_alloc is responsible for waiting for
   1898 		 * physical memory.
   1899 		 *
   1900 		 * XXX We risk looping forever if start if someone
   1901 		 * calls pool_destroy on "start".  But there is no
   1902 		 * other way to have potentially sleeping pool_reclaim,
   1903 		 * non-sleeping locks on pool_allocator, and some
   1904 		 * stirring of drained pools in the allocator.
   1905 		 *
   1906 		 * XXX Maybe we should use pool_head_slock for locking
   1907 		 * the allocators?
   1908 		 */
   1909 		freed = 0;
   1910 
   1911 		s = splvm();
   1912 		simple_lock(&pa->pa_slock);
   1913 		pp = start = TAILQ_FIRST(&pa->pa_list);
   1914 		do {
   1915 			TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list);
   1916 			TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list);
   1917 			if (pp == org)
   1918 				continue;
   1919 			simple_unlock(&pa->pa_list);
   1920 			freed = pool_reclaim(pp);
   1921 			simple_lock(&pa->pa_list);
   1922 		} while ((pp = TAILQ_FIRST(&pa->pa_list)) != start &&
   1923 			 freed == 0);
   1924 
   1925 		if (freed == 0) {
   1926 			/*
   1927 			 * We set PA_WANT here, the caller will most likely
   1928 			 * sleep waiting for pages (if not, this won't hurt
   1929 			 * that much), and there is no way to set this in
   1930 			 * the caller without violating locking order.
   1931 			 */
   1932 			pa->pa_flags |= PA_WANT;
   1933 		}
   1934 		simple_unlock(&pa->pa_slock);
   1935 		splx(s);
   1936 	} while (freed);
   1937 	return (NULL);
   1938 }
   1939 
   1940 void
   1941 pool_allocator_free(struct pool *pp, void *v)
   1942 {
   1943 	struct pool_allocator *pa = pp->pr_alloc;
   1944 	int s;
   1945 
   1946 	(*pa->pa_free)(pp, v);
   1947 
   1948 	s = splvm();
   1949 	simple_lock(&pa->pa_slock);
   1950 	if ((pa->pa_flags & PA_WANT) == 0) {
   1951 		simple_unlock(&pa->pa_slock);
   1952 		splx(s);
   1953 		return;
   1954 	}
   1955 
   1956 	TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
   1957 		simple_lock(&pp->pr_slock);
   1958 		if ((pp->pr_flags & PR_WANTED) != 0) {
   1959 			pp->pr_flags &= ~PR_WANTED;
   1960 			wakeup(pp);
   1961 		}
   1962 		simple_unlock(&pp->pr_slock);
   1963 	}
   1964 	pa->pa_flags &= ~PA_WANT;
   1965 	simple_unlock(&pa->pa_slock);
   1966 	splx(s);
   1967 }
   1968 
   1969 void *
   1970 pool_page_alloc(struct pool *pp, int flags)
   1971 {
   1972 	boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
   1973 
   1974 	return ((void *) uvm_km_alloc_poolpage(waitok));
   1975 }
   1976 
   1977 void
   1978 pool_page_free(struct pool *pp, void *v)
   1979 {
   1980 
   1981 	uvm_km_free_poolpage((vaddr_t) v);
   1982 }
   1983 
   1984 #ifdef POOL_SUBPAGE
   1985 /* Sub-page allocator, for machines with large hardware pages. */
   1986 void *
   1987 pool_subpage_alloc(struct pool *pp, int flags)
   1988 {
   1989 
   1990 	return (pool_get(&psppool, flags));
   1991 }
   1992 
   1993 void
   1994 pool_subpage_free(struct pool *pp, void *v)
   1995 {
   1996 
   1997 	pool_put(&psppool, v);
   1998 }
   1999 
   2000 /* We don't provide a real nointr allocator.  Maybe later. */
   2001 void *
   2002 pool_page_alloc_nointr(struct pool *pp, int flags)
   2003 {
   2004 
   2005 	return (pool_subpage_alloc(pp, flags));
   2006 }
   2007 
   2008 void
   2009 pool_page_free_nointr(struct pool *pp, void *v)
   2010 {
   2011 
   2012 	pool_subpage_free(pp, v);
   2013 }
   2014 #else
   2015 void *
   2016 pool_page_alloc_nointr(struct pool *pp, int flags)
   2017 {
   2018 	boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
   2019 
   2020 	return ((void *) uvm_km_alloc_poolpage1(kernel_map,
   2021 	    uvm.kernel_object, waitok));
   2022 }
   2023 
   2024 void
   2025 pool_page_free_nointr(struct pool *pp, void *v)
   2026 {
   2027 
   2028 	uvm_km_free_poolpage1(kernel_map, (vaddr_t) v);
   2029 }
   2030 #endif /* POOL_SUBPAGE */
   2031