Home | History | Annotate | Line # | Download | only in kern
subr_pool.c revision 1.50.2.3
      1 /*	$NetBSD: subr_pool.c,v 1.50.2.3 2001/09/26 19:55:05 nathanw Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
      9  * Simulation Facility, NASA Ames Research Center.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. All advertising materials mentioning features or use of this software
     20  *    must display the following acknowledgement:
     21  *	This product includes software developed by the NetBSD
     22  *	Foundation, Inc. and its contributors.
     23  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24  *    contributors may be used to endorse or promote products derived
     25  *    from this software without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37  * POSSIBILITY OF SUCH DAMAGE.
     38  */
     39 
     40 #include "opt_pool.h"
     41 #include "opt_poollog.h"
     42 #include "opt_lockdebug.h"
     43 
     44 #include <sys/param.h>
     45 #include <sys/systm.h>
     46 #include <sys/proc.h>
     47 #include <sys/errno.h>
     48 #include <sys/kernel.h>
     49 #include <sys/malloc.h>
     50 #include <sys/lock.h>
     51 #include <sys/pool.h>
     52 #include <sys/syslog.h>
     53 
     54 #include <uvm/uvm.h>
     55 
     56 /*
     57  * Pool resource management utility.
     58  *
     59  * Memory is allocated in pages which are split into pieces according
     60  * to the pool item size. Each page is kept on a list headed by `pr_pagelist'
     61  * in the pool structure and the individual pool items are on a linked list
     62  * headed by `ph_itemlist' in each page header. The memory for building
     63  * the page list is either taken from the allocated pages themselves (for
     64  * small pool items) or taken from an internal pool of page headers (`phpool').
     65  */
     66 
     67 /* List of all pools */
     68 TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
     69 
     70 /* Private pool for page header structures */
     71 static struct pool phpool;
     72 
     73 /* # of seconds to retain page after last use */
     74 int pool_inactive_time = 10;
     75 
     76 /* Next candidate for drainage (see pool_drain()) */
     77 static struct pool	*drainpp;
     78 
     79 /* This spin lock protects both pool_head and drainpp. */
     80 struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
     81 
     82 struct pool_item_header {
     83 	/* Page headers */
     84 	TAILQ_ENTRY(pool_item_header)
     85 				ph_pagelist;	/* pool page list */
     86 	TAILQ_HEAD(,pool_item)	ph_itemlist;	/* chunk list for this page */
     87 	LIST_ENTRY(pool_item_header)
     88 				ph_hashlist;	/* Off-page page headers */
     89 	int			ph_nmissing;	/* # of chunks in use */
     90 	caddr_t			ph_page;	/* this page's address */
     91 	struct timeval		ph_time;	/* last referenced */
     92 };
     93 TAILQ_HEAD(pool_pagelist,pool_item_header);
     94 
     95 struct pool_item {
     96 #ifdef DIAGNOSTIC
     97 	int pi_magic;
     98 #endif
     99 #define	PI_MAGIC 0xdeadbeef
    100 	/* Other entries use only this list entry */
    101 	TAILQ_ENTRY(pool_item)	pi_list;
    102 };
    103 
    104 #define	PR_HASH_INDEX(pp,addr) \
    105 	(((u_long)(addr) >> (pp)->pr_pageshift) & (PR_HASHTABSIZE - 1))
    106 
    107 #define	POOL_NEEDS_CATCHUP(pp)						\
    108 	((pp)->pr_nitems < (pp)->pr_minitems)
    109 
    110 /*
    111  * Pool cache management.
    112  *
    113  * Pool caches provide a way for constructed objects to be cached by the
    114  * pool subsystem.  This can lead to performance improvements by avoiding
    115  * needless object construction/destruction; it is deferred until absolutely
    116  * necessary.
    117  *
    118  * Caches are grouped into cache groups.  Each cache group references
    119  * up to 16 constructed objects.  When a cache allocates an object
    120  * from the pool, it calls the object's constructor and places it into
    121  * a cache group.  When a cache group frees an object back to the pool,
    122  * it first calls the object's destructor.  This allows the object to
    123  * persist in constructed form while freed to the cache.
    124  *
    125  * Multiple caches may exist for each pool.  This allows a single
    126  * object type to have multiple constructed forms.  The pool references
    127  * each cache, so that when a pool is drained by the pagedaemon, it can
    128  * drain each individual cache as well.  Each time a cache is drained,
    129  * the most idle cache group is freed to the pool in its entirety.
    130  *
    131  * Pool caches are layed on top of pools.  By layering them, we can avoid
    132  * the complexity of cache management for pools which would not benefit
    133  * from it.
    134  */
    135 
    136 /* The cache group pool. */
    137 static struct pool pcgpool;
    138 
    139 /* The pool cache group. */
    140 #define	PCG_NOBJECTS		16
    141 struct pool_cache_group {
    142 	TAILQ_ENTRY(pool_cache_group)
    143 		pcg_list;	/* link in the pool cache's group list */
    144 	u_int	pcg_avail;	/* # available objects */
    145 				/* pointers to the objects */
    146 	void	*pcg_objects[PCG_NOBJECTS];
    147 };
    148 
    149 static void	pool_cache_reclaim(struct pool_cache *);
    150 
    151 static int	pool_catchup(struct pool *);
    152 static void	pool_prime_page(struct pool *, caddr_t,
    153 		    struct pool_item_header *);
    154 static void	*pool_page_alloc(unsigned long, int, int);
    155 static void	pool_page_free(void *, unsigned long, int);
    156 
    157 static void pool_print1(struct pool *, const char *,
    158 	void (*)(const char *, ...));
    159 
    160 /*
    161  * Pool log entry. An array of these is allocated in pool_init().
    162  */
    163 struct pool_log {
    164 	const char	*pl_file;
    165 	long		pl_line;
    166 	int		pl_action;
    167 #define	PRLOG_GET	1
    168 #define	PRLOG_PUT	2
    169 	void		*pl_addr;
    170 };
    171 
    172 /* Number of entries in pool log buffers */
    173 #ifndef POOL_LOGSIZE
    174 #define	POOL_LOGSIZE	10
    175 #endif
    176 
    177 int pool_logsize = POOL_LOGSIZE;
    178 
    179 #ifdef POOL_DIAGNOSTIC
    180 static __inline void
    181 pr_log(struct pool *pp, void *v, int action, const char *file, long line)
    182 {
    183 	int n = pp->pr_curlogentry;
    184 	struct pool_log *pl;
    185 
    186 	if ((pp->pr_roflags & PR_LOGGING) == 0)
    187 		return;
    188 
    189 	/*
    190 	 * Fill in the current entry. Wrap around and overwrite
    191 	 * the oldest entry if necessary.
    192 	 */
    193 	pl = &pp->pr_log[n];
    194 	pl->pl_file = file;
    195 	pl->pl_line = line;
    196 	pl->pl_action = action;
    197 	pl->pl_addr = v;
    198 	if (++n >= pp->pr_logsize)
    199 		n = 0;
    200 	pp->pr_curlogentry = n;
    201 }
    202 
    203 static void
    204 pr_printlog(struct pool *pp, struct pool_item *pi,
    205     void (*pr)(const char *, ...))
    206 {
    207 	int i = pp->pr_logsize;
    208 	int n = pp->pr_curlogentry;
    209 
    210 	if ((pp->pr_roflags & PR_LOGGING) == 0)
    211 		return;
    212 
    213 	/*
    214 	 * Print all entries in this pool's log.
    215 	 */
    216 	while (i-- > 0) {
    217 		struct pool_log *pl = &pp->pr_log[n];
    218 		if (pl->pl_action != 0) {
    219 			if (pi == NULL || pi == pl->pl_addr) {
    220 				(*pr)("\tlog entry %d:\n", i);
    221 				(*pr)("\t\taction = %s, addr = %p\n",
    222 				    pl->pl_action == PRLOG_GET ? "get" : "put",
    223 				    pl->pl_addr);
    224 				(*pr)("\t\tfile: %s at line %lu\n",
    225 				    pl->pl_file, pl->pl_line);
    226 			}
    227 		}
    228 		if (++n >= pp->pr_logsize)
    229 			n = 0;
    230 	}
    231 }
    232 
    233 static __inline void
    234 pr_enter(struct pool *pp, const char *file, long line)
    235 {
    236 
    237 	if (__predict_false(pp->pr_entered_file != NULL)) {
    238 		printf("pool %s: reentrancy at file %s line %ld\n",
    239 		    pp->pr_wchan, file, line);
    240 		printf("         previous entry at file %s line %ld\n",
    241 		    pp->pr_entered_file, pp->pr_entered_line);
    242 		panic("pr_enter");
    243 	}
    244 
    245 	pp->pr_entered_file = file;
    246 	pp->pr_entered_line = line;
    247 }
    248 
    249 static __inline void
    250 pr_leave(struct pool *pp)
    251 {
    252 
    253 	if (__predict_false(pp->pr_entered_file == NULL)) {
    254 		printf("pool %s not entered?\n", pp->pr_wchan);
    255 		panic("pr_leave");
    256 	}
    257 
    258 	pp->pr_entered_file = NULL;
    259 	pp->pr_entered_line = 0;
    260 }
    261 
    262 static __inline void
    263 pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
    264 {
    265 
    266 	if (pp->pr_entered_file != NULL)
    267 		(*pr)("\n\tcurrently entered from file %s line %ld\n",
    268 		    pp->pr_entered_file, pp->pr_entered_line);
    269 }
    270 #else
    271 #define	pr_log(pp, v, action, file, line)
    272 #define	pr_printlog(pp, pi, pr)
    273 #define	pr_enter(pp, file, line)
    274 #define	pr_leave(pp)
    275 #define	pr_enter_check(pp, pr)
    276 #endif /* POOL_DIAGNOSTIC */
    277 
    278 /*
    279  * Return the pool page header based on page address.
    280  */
    281 static __inline struct pool_item_header *
    282 pr_find_pagehead(struct pool *pp, caddr_t page)
    283 {
    284 	struct pool_item_header *ph;
    285 
    286 	if ((pp->pr_roflags & PR_PHINPAGE) != 0)
    287 		return ((struct pool_item_header *)(page + pp->pr_phoffset));
    288 
    289 	for (ph = LIST_FIRST(&pp->pr_hashtab[PR_HASH_INDEX(pp, page)]);
    290 	     ph != NULL;
    291 	     ph = LIST_NEXT(ph, ph_hashlist)) {
    292 		if (ph->ph_page == page)
    293 			return (ph);
    294 	}
    295 	return (NULL);
    296 }
    297 
    298 /*
    299  * Remove a page from the pool.
    300  */
    301 static __inline void
    302 pr_rmpage(struct pool *pp, struct pool_item_header *ph,
    303      struct pool_pagelist *pq)
    304 {
    305 	int s;
    306 
    307 	/*
    308 	 * If the page was idle, decrement the idle page count.
    309 	 */
    310 	if (ph->ph_nmissing == 0) {
    311 #ifdef DIAGNOSTIC
    312 		if (pp->pr_nidle == 0)
    313 			panic("pr_rmpage: nidle inconsistent");
    314 		if (pp->pr_nitems < pp->pr_itemsperpage)
    315 			panic("pr_rmpage: nitems inconsistent");
    316 #endif
    317 		pp->pr_nidle--;
    318 	}
    319 
    320 	pp->pr_nitems -= pp->pr_itemsperpage;
    321 
    322 	/*
    323 	 * Unlink a page from the pool and release it (or queue it for release).
    324 	 */
    325 	TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
    326 	if (pq) {
    327 		TAILQ_INSERT_HEAD(pq, ph, ph_pagelist);
    328 	} else {
    329 		(*pp->pr_free)(ph->ph_page, pp->pr_pagesz, pp->pr_mtype);
    330 		if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
    331 			LIST_REMOVE(ph, ph_hashlist);
    332 			s = splhigh();
    333 			pool_put(&phpool, ph);
    334 			splx(s);
    335 		}
    336 	}
    337 	pp->pr_npages--;
    338 	pp->pr_npagefree++;
    339 
    340 	if (pp->pr_curpage == ph) {
    341 		/*
    342 		 * Find a new non-empty page header, if any.
    343 		 * Start search from the page head, to increase the
    344 		 * chance for "high water" pages to be freed.
    345 		 */
    346 		TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
    347 			if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
    348 				break;
    349 
    350 		pp->pr_curpage = ph;
    351 	}
    352 }
    353 
    354 /*
    355  * Initialize the given pool resource structure.
    356  *
    357  * We export this routine to allow other kernel parts to declare
    358  * static pools that must be initialized before malloc() is available.
    359  */
    360 void
    361 pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
    362     const char *wchan, size_t pagesz,
    363     void *(*alloc)(unsigned long, int, int),
    364     void (*release)(void *, unsigned long, int),
    365     int mtype)
    366 {
    367 	int off, slack, i;
    368 
    369 #ifdef POOL_DIAGNOSTIC
    370 	/*
    371 	 * Always log if POOL_DIAGNOSTIC is defined.
    372 	 */
    373 	if (pool_logsize != 0)
    374 		flags |= PR_LOGGING;
    375 #endif
    376 
    377 	/*
    378 	 * Check arguments and construct default values.
    379 	 */
    380 	if (!powerof2(pagesz))
    381 		panic("pool_init: page size invalid (%lx)\n", (u_long)pagesz);
    382 
    383 	if (alloc == NULL && release == NULL) {
    384 		alloc = pool_page_alloc;
    385 		release = pool_page_free;
    386 		pagesz = PAGE_SIZE;	/* Rounds to PAGE_SIZE anyhow. */
    387 	} else if ((alloc != NULL && release != NULL) == 0) {
    388 		/* If you specifiy one, must specify both. */
    389 		panic("pool_init: must specify alloc and release together");
    390 	}
    391 
    392 	if (pagesz == 0)
    393 		pagesz = PAGE_SIZE;
    394 
    395 	if (align == 0)
    396 		align = ALIGN(1);
    397 
    398 	if (size < sizeof(struct pool_item))
    399 		size = sizeof(struct pool_item);
    400 
    401 	size = ALIGN(size);
    402 	if (size > pagesz)
    403 		panic("pool_init: pool item size (%lu) too large",
    404 		      (u_long)size);
    405 
    406 	/*
    407 	 * Initialize the pool structure.
    408 	 */
    409 	TAILQ_INIT(&pp->pr_pagelist);
    410 	TAILQ_INIT(&pp->pr_cachelist);
    411 	pp->pr_curpage = NULL;
    412 	pp->pr_npages = 0;
    413 	pp->pr_minitems = 0;
    414 	pp->pr_minpages = 0;
    415 	pp->pr_maxpages = UINT_MAX;
    416 	pp->pr_roflags = flags;
    417 	pp->pr_flags = 0;
    418 	pp->pr_size = size;
    419 	pp->pr_align = align;
    420 	pp->pr_wchan = wchan;
    421 	pp->pr_mtype = mtype;
    422 	pp->pr_alloc = alloc;
    423 	pp->pr_free = release;
    424 	pp->pr_pagesz = pagesz;
    425 	pp->pr_pagemask = ~(pagesz - 1);
    426 	pp->pr_pageshift = ffs(pagesz) - 1;
    427 	pp->pr_nitems = 0;
    428 	pp->pr_nout = 0;
    429 	pp->pr_hardlimit = UINT_MAX;
    430 	pp->pr_hardlimit_warning = NULL;
    431 	pp->pr_hardlimit_ratecap.tv_sec = 0;
    432 	pp->pr_hardlimit_ratecap.tv_usec = 0;
    433 	pp->pr_hardlimit_warning_last.tv_sec = 0;
    434 	pp->pr_hardlimit_warning_last.tv_usec = 0;
    435 
    436 	/*
    437 	 * Decide whether to put the page header off page to avoid
    438 	 * wasting too large a part of the page. Off-page page headers
    439 	 * go on a hash table, so we can match a returned item
    440 	 * with its header based on the page address.
    441 	 * We use 1/16 of the page size as the threshold (XXX: tune)
    442 	 */
    443 	if (pp->pr_size < pagesz/16) {
    444 		/* Use the end of the page for the page header */
    445 		pp->pr_roflags |= PR_PHINPAGE;
    446 		pp->pr_phoffset = off =
    447 			pagesz - ALIGN(sizeof(struct pool_item_header));
    448 	} else {
    449 		/* The page header will be taken from our page header pool */
    450 		pp->pr_phoffset = 0;
    451 		off = pagesz;
    452 		for (i = 0; i < PR_HASHTABSIZE; i++) {
    453 			LIST_INIT(&pp->pr_hashtab[i]);
    454 		}
    455 	}
    456 
    457 	/*
    458 	 * Alignment is to take place at `ioff' within the item. This means
    459 	 * we must reserve up to `align - 1' bytes on the page to allow
    460 	 * appropriate positioning of each item.
    461 	 *
    462 	 * Silently enforce `0 <= ioff < align'.
    463 	 */
    464 	pp->pr_itemoffset = ioff = ioff % align;
    465 	pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
    466 	KASSERT(pp->pr_itemsperpage != 0);
    467 
    468 	/*
    469 	 * Use the slack between the chunks and the page header
    470 	 * for "cache coloring".
    471 	 */
    472 	slack = off - pp->pr_itemsperpage * pp->pr_size;
    473 	pp->pr_maxcolor = (slack / align) * align;
    474 	pp->pr_curcolor = 0;
    475 
    476 	pp->pr_nget = 0;
    477 	pp->pr_nfail = 0;
    478 	pp->pr_nput = 0;
    479 	pp->pr_npagealloc = 0;
    480 	pp->pr_npagefree = 0;
    481 	pp->pr_hiwat = 0;
    482 	pp->pr_nidle = 0;
    483 
    484 #ifdef POOL_DIAGNOSTIC
    485 	if (flags & PR_LOGGING) {
    486 		if (kmem_map == NULL ||
    487 		    (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
    488 		     M_TEMP, M_NOWAIT)) == NULL)
    489 			pp->pr_roflags &= ~PR_LOGGING;
    490 		pp->pr_curlogentry = 0;
    491 		pp->pr_logsize = pool_logsize;
    492 	}
    493 #endif
    494 
    495 	pp->pr_entered_file = NULL;
    496 	pp->pr_entered_line = 0;
    497 
    498 	simple_lock_init(&pp->pr_slock);
    499 
    500 	/*
    501 	 * Initialize private page header pool and cache magazine pool if we
    502 	 * haven't done so yet.
    503 	 * XXX LOCKING.
    504 	 */
    505 	if (phpool.pr_size == 0) {
    506 		pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,
    507 		    0, "phpool", 0, 0, 0, 0);
    508 		pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
    509 		    0, "pcgpool", 0, 0, 0, 0);
    510 	}
    511 
    512 	/* Insert into the list of all pools. */
    513 	simple_lock(&pool_head_slock);
    514 	TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
    515 	simple_unlock(&pool_head_slock);
    516 }
    517 
    518 /*
    519  * De-commision a pool resource.
    520  */
    521 void
    522 pool_destroy(struct pool *pp)
    523 {
    524 	struct pool_item_header *ph;
    525 	struct pool_cache *pc;
    526 
    527 	/* Destroy all caches for this pool. */
    528 	while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL)
    529 		pool_cache_destroy(pc);
    530 
    531 #ifdef DIAGNOSTIC
    532 	if (pp->pr_nout != 0) {
    533 		pr_printlog(pp, NULL, printf);
    534 		panic("pool_destroy: pool busy: still out: %u\n",
    535 		    pp->pr_nout);
    536 	}
    537 #endif
    538 
    539 	/* Remove all pages */
    540 	if ((pp->pr_roflags & PR_STATIC) == 0)
    541 		while ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL)
    542 			pr_rmpage(pp, ph, NULL);
    543 
    544 	/* Remove from global pool list */
    545 	simple_lock(&pool_head_slock);
    546 	TAILQ_REMOVE(&pool_head, pp, pr_poollist);
    547 	if (drainpp == pp) {
    548 		drainpp = NULL;
    549 	}
    550 	simple_unlock(&pool_head_slock);
    551 
    552 #ifdef POOL_DIAGNOSTIC
    553 	if ((pp->pr_roflags & PR_LOGGING) != 0)
    554 		free(pp->pr_log, M_TEMP);
    555 #endif
    556 
    557 	if (pp->pr_roflags & PR_FREEHEADER)
    558 		free(pp, M_POOL);
    559 }
    560 
    561 static __inline struct pool_item_header *
    562 pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags)
    563 {
    564 	struct pool_item_header *ph;
    565 	int s;
    566 
    567 	LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0);
    568 
    569 	if ((pp->pr_roflags & PR_PHINPAGE) != 0)
    570 		ph = (struct pool_item_header *) (storage + pp->pr_phoffset);
    571 	else {
    572 		s = splhigh();
    573 		ph = pool_get(&phpool, flags);
    574 		splx(s);
    575 	}
    576 
    577 	return (ph);
    578 }
    579 
    580 /*
    581  * Grab an item from the pool; must be called at appropriate spl level
    582  */
    583 void *
    584 #ifdef POOL_DIAGNOSTIC
    585 _pool_get(struct pool *pp, int flags, const char *file, long line)
    586 #else
    587 pool_get(struct pool *pp, int flags)
    588 #endif
    589 {
    590 	struct pool_item *pi;
    591 	struct pool_item_header *ph;
    592 	void *v;
    593 
    594 #ifdef DIAGNOSTIC
    595 	if (__predict_false((pp->pr_roflags & PR_STATIC) &&
    596 			    (flags & PR_MALLOCOK))) {
    597 		pr_printlog(pp, NULL, printf);
    598 		panic("pool_get: static");
    599 	}
    600 
    601 	if (__predict_false(curproc == NULL && doing_shutdown == 0 &&
    602 			    (flags & PR_WAITOK) != 0))
    603 		panic("pool_get: must have NOWAIT");
    604 
    605 #ifdef LOCKDEBUG
    606 	if (flags & PR_WAITOK)
    607 		simple_lock_only_held(NULL, "pool_get(PR_WAITOK)");
    608 #endif
    609 #endif /* DIAGNOSTIC */
    610 
    611 	simple_lock(&pp->pr_slock);
    612 	pr_enter(pp, file, line);
    613 
    614  startover:
    615 	/*
    616 	 * Check to see if we've reached the hard limit.  If we have,
    617 	 * and we can wait, then wait until an item has been returned to
    618 	 * the pool.
    619 	 */
    620 #ifdef DIAGNOSTIC
    621 	if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
    622 		pr_leave(pp);
    623 		simple_unlock(&pp->pr_slock);
    624 		panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
    625 	}
    626 #endif
    627 	if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
    628 		if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
    629 			/*
    630 			 * XXX: A warning isn't logged in this case.  Should
    631 			 * it be?
    632 			 */
    633 			pp->pr_flags |= PR_WANTED;
    634 			pr_leave(pp);
    635 			ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
    636 			pr_enter(pp, file, line);
    637 			goto startover;
    638 		}
    639 
    640 		/*
    641 		 * Log a message that the hard limit has been hit.
    642 		 */
    643 		if (pp->pr_hardlimit_warning != NULL &&
    644 		    ratecheck(&pp->pr_hardlimit_warning_last,
    645 			      &pp->pr_hardlimit_ratecap))
    646 			log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
    647 
    648 		if (flags & PR_URGENT)
    649 			panic("pool_get: urgent");
    650 
    651 		pp->pr_nfail++;
    652 
    653 		pr_leave(pp);
    654 		simple_unlock(&pp->pr_slock);
    655 		return (NULL);
    656 	}
    657 
    658 	/*
    659 	 * The convention we use is that if `curpage' is not NULL, then
    660 	 * it points at a non-empty bucket. In particular, `curpage'
    661 	 * never points at a page header which has PR_PHINPAGE set and
    662 	 * has no items in its bucket.
    663 	 */
    664 	if ((ph = pp->pr_curpage) == NULL) {
    665 #ifdef DIAGNOSTIC
    666 		if (pp->pr_nitems != 0) {
    667 			simple_unlock(&pp->pr_slock);
    668 			printf("pool_get: %s: curpage NULL, nitems %u\n",
    669 			    pp->pr_wchan, pp->pr_nitems);
    670 			panic("pool_get: nitems inconsistent\n");
    671 		}
    672 #endif
    673 
    674 		/*
    675 		 * Call the back-end page allocator for more memory.
    676 		 * Release the pool lock, as the back-end page allocator
    677 		 * may block.
    678 		 */
    679 		pr_leave(pp);
    680 		simple_unlock(&pp->pr_slock);
    681 		v = (*pp->pr_alloc)(pp->pr_pagesz, flags, pp->pr_mtype);
    682 		if (__predict_true(v != NULL))
    683 			ph = pool_alloc_item_header(pp, v, flags);
    684 		simple_lock(&pp->pr_slock);
    685 		pr_enter(pp, file, line);
    686 
    687 		if (__predict_false(v == NULL || ph == NULL)) {
    688 			if (v != NULL)
    689 				(*pp->pr_free)(v, pp->pr_pagesz, pp->pr_mtype);
    690 
    691 			/*
    692 			 * We were unable to allocate a page or item
    693 			 * header, but we released the lock during
    694 			 * allocation, so perhaps items were freed
    695 			 * back to the pool.  Check for this case.
    696 			 */
    697 			if (pp->pr_curpage != NULL)
    698 				goto startover;
    699 
    700 			if (flags & PR_URGENT)
    701 				panic("pool_get: urgent");
    702 
    703 			if ((flags & PR_WAITOK) == 0) {
    704 				pp->pr_nfail++;
    705 				pr_leave(pp);
    706 				simple_unlock(&pp->pr_slock);
    707 				return (NULL);
    708 			}
    709 
    710 			/*
    711 			 * Wait for items to be returned to this pool.
    712 			 *
    713 			 * XXX: we actually want to wait just until
    714 			 * the page allocator has memory again. Depending
    715 			 * on this pool's usage, we might get stuck here
    716 			 * for a long time.
    717 			 *
    718 			 * XXX: maybe we should wake up once a second and
    719 			 * try again?
    720 			 */
    721 			pp->pr_flags |= PR_WANTED;
    722 			pr_leave(pp);
    723 			ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
    724 			pr_enter(pp, file, line);
    725 			goto startover;
    726 		}
    727 
    728 		/* We have more memory; add it to the pool */
    729 		pool_prime_page(pp, v, ph);
    730 		pp->pr_npagealloc++;
    731 
    732 		/* Start the allocation process over. */
    733 		goto startover;
    734 	}
    735 
    736 	if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) {
    737 		pr_leave(pp);
    738 		simple_unlock(&pp->pr_slock);
    739 		panic("pool_get: %s: page empty", pp->pr_wchan);
    740 	}
    741 #ifdef DIAGNOSTIC
    742 	if (__predict_false(pp->pr_nitems == 0)) {
    743 		pr_leave(pp);
    744 		simple_unlock(&pp->pr_slock);
    745 		printf("pool_get: %s: items on itemlist, nitems %u\n",
    746 		    pp->pr_wchan, pp->pr_nitems);
    747 		panic("pool_get: nitems inconsistent\n");
    748 	}
    749 
    750 	pr_log(pp, v, PRLOG_GET, file, line);
    751 
    752 	if (__predict_false(pi->pi_magic != PI_MAGIC)) {
    753 		pr_printlog(pp, pi, printf);
    754 		panic("pool_get(%s): free list modified: magic=%x; page %p;"
    755 		       " item addr %p\n",
    756 			pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
    757 	}
    758 #endif
    759 
    760 	/*
    761 	 * Remove from item list.
    762 	 */
    763 	TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
    764 	pp->pr_nitems--;
    765 	pp->pr_nout++;
    766 	if (ph->ph_nmissing == 0) {
    767 #ifdef DIAGNOSTIC
    768 		if (__predict_false(pp->pr_nidle == 0))
    769 			panic("pool_get: nidle inconsistent");
    770 #endif
    771 		pp->pr_nidle--;
    772 	}
    773 	ph->ph_nmissing++;
    774 	if (TAILQ_FIRST(&ph->ph_itemlist) == NULL) {
    775 #ifdef DIAGNOSTIC
    776 		if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) {
    777 			pr_leave(pp);
    778 			simple_unlock(&pp->pr_slock);
    779 			panic("pool_get: %s: nmissing inconsistent",
    780 			    pp->pr_wchan);
    781 		}
    782 #endif
    783 		/*
    784 		 * Find a new non-empty page header, if any.
    785 		 * Start search from the page head, to increase
    786 		 * the chance for "high water" pages to be freed.
    787 		 *
    788 		 * Migrate empty pages to the end of the list.  This
    789 		 * will speed the update of curpage as pages become
    790 		 * idle.  Empty pages intermingled with idle pages
    791 		 * is no big deal.  As soon as a page becomes un-empty,
    792 		 * it will move back to the head of the list.
    793 		 */
    794 		TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
    795 		TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
    796 		TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
    797 			if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
    798 				break;
    799 
    800 		pp->pr_curpage = ph;
    801 	}
    802 
    803 	pp->pr_nget++;
    804 
    805 	/*
    806 	 * If we have a low water mark and we are now below that low
    807 	 * water mark, add more items to the pool.
    808 	 */
    809 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
    810 		/*
    811 		 * XXX: Should we log a warning?  Should we set up a timeout
    812 		 * to try again in a second or so?  The latter could break
    813 		 * a caller's assumptions about interrupt protection, etc.
    814 		 */
    815 	}
    816 
    817 	pr_leave(pp);
    818 	simple_unlock(&pp->pr_slock);
    819 	return (v);
    820 }
    821 
    822 /*
    823  * Internal version of pool_put().  Pool is already locked/entered.
    824  */
    825 static void
    826 pool_do_put(struct pool *pp, void *v)
    827 {
    828 	struct pool_item *pi = v;
    829 	struct pool_item_header *ph;
    830 	caddr_t page;
    831 	int s;
    832 
    833 	LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
    834 
    835 	page = (caddr_t)((u_long)v & pp->pr_pagemask);
    836 
    837 #ifdef DIAGNOSTIC
    838 	if (__predict_false(pp->pr_nout == 0)) {
    839 		printf("pool %s: putting with none out\n",
    840 		    pp->pr_wchan);
    841 		panic("pool_put");
    842 	}
    843 #endif
    844 
    845 	if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {
    846 		pr_printlog(pp, NULL, printf);
    847 		panic("pool_put: %s: page header missing", pp->pr_wchan);
    848 	}
    849 
    850 #ifdef LOCKDEBUG
    851 	/*
    852 	 * Check if we're freeing a locked simple lock.
    853 	 */
    854 	simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
    855 #endif
    856 
    857 	/*
    858 	 * Return to item list.
    859 	 */
    860 #ifdef DIAGNOSTIC
    861 	pi->pi_magic = PI_MAGIC;
    862 #endif
    863 #ifdef DEBUG
    864 	{
    865 		int i, *ip = v;
    866 
    867 		for (i = 0; i < pp->pr_size / sizeof(int); i++) {
    868 			*ip++ = PI_MAGIC;
    869 		}
    870 	}
    871 #endif
    872 
    873 	TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
    874 	ph->ph_nmissing--;
    875 	pp->pr_nput++;
    876 	pp->pr_nitems++;
    877 	pp->pr_nout--;
    878 
    879 	/* Cancel "pool empty" condition if it exists */
    880 	if (pp->pr_curpage == NULL)
    881 		pp->pr_curpage = ph;
    882 
    883 	if (pp->pr_flags & PR_WANTED) {
    884 		pp->pr_flags &= ~PR_WANTED;
    885 		if (ph->ph_nmissing == 0)
    886 			pp->pr_nidle++;
    887 		wakeup((caddr_t)pp);
    888 		return;
    889 	}
    890 
    891 	/*
    892 	 * If this page is now complete, do one of two things:
    893 	 *
    894 	 *	(1) If we have more pages than the page high water
    895 	 *	    mark, free the page back to the system.
    896 	 *
    897 	 *	(2) Move it to the end of the page list, so that
    898 	 *	    we minimize our chances of fragmenting the
    899 	 *	    pool.  Idle pages migrate to the end (along with
    900 	 *	    completely empty pages, so that we find un-empty
    901 	 *	    pages more quickly when we update curpage) of the
    902 	 *	    list so they can be more easily swept up by
    903 	 *	    the pagedaemon when pages are scarce.
    904 	 */
    905 	if (ph->ph_nmissing == 0) {
    906 		pp->pr_nidle++;
    907 		if (pp->pr_npages > pp->pr_maxpages) {
    908 			pr_rmpage(pp, ph, NULL);
    909 		} else {
    910 			TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
    911 			TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
    912 
    913 			/*
    914 			 * Update the timestamp on the page.  A page must
    915 			 * be idle for some period of time before it can
    916 			 * be reclaimed by the pagedaemon.  This minimizes
    917 			 * ping-pong'ing for memory.
    918 			 */
    919 			s = splclock();
    920 			ph->ph_time = mono_time;
    921 			splx(s);
    922 
    923 			/*
    924 			 * Update the current page pointer.  Just look for
    925 			 * the first page with any free items.
    926 			 *
    927 			 * XXX: Maybe we want an option to look for the
    928 			 * page with the fewest available items, to minimize
    929 			 * fragmentation?
    930 			 */
    931 			TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
    932 				if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
    933 					break;
    934 
    935 			pp->pr_curpage = ph;
    936 		}
    937 	}
    938 	/*
    939 	 * If the page has just become un-empty, move it to the head of
    940 	 * the list, and make it the current page.  The next allocation
    941 	 * will get the item from this page, instead of further fragmenting
    942 	 * the pool.
    943 	 */
    944 	else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
    945 		TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
    946 		TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
    947 		pp->pr_curpage = ph;
    948 	}
    949 }
    950 
    951 /*
    952  * Return resource to the pool; must be called at appropriate spl level
    953  */
    954 #ifdef POOL_DIAGNOSTIC
    955 void
    956 _pool_put(struct pool *pp, void *v, const char *file, long line)
    957 {
    958 
    959 	simple_lock(&pp->pr_slock);
    960 	pr_enter(pp, file, line);
    961 
    962 	pr_log(pp, v, PRLOG_PUT, file, line);
    963 
    964 	pool_do_put(pp, v);
    965 
    966 	pr_leave(pp);
    967 	simple_unlock(&pp->pr_slock);
    968 }
    969 #undef pool_put
    970 #endif /* POOL_DIAGNOSTIC */
    971 
    972 void
    973 pool_put(struct pool *pp, void *v)
    974 {
    975 
    976 	simple_lock(&pp->pr_slock);
    977 
    978 	pool_do_put(pp, v);
    979 
    980 	simple_unlock(&pp->pr_slock);
    981 }
    982 
    983 #ifdef POOL_DIAGNOSTIC
    984 #define		pool_put(h, v)	_pool_put((h), (v), __FILE__, __LINE__)
    985 #endif
    986 
    987 /*
    988  * Add N items to the pool.
    989  */
    990 int
    991 pool_prime(struct pool *pp, int n)
    992 {
    993 	struct pool_item_header *ph;
    994 	caddr_t cp;
    995 	int newpages, error = 0;
    996 
    997 	simple_lock(&pp->pr_slock);
    998 
    999 	newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1000 
   1001 	while (newpages-- > 0) {
   1002 		simple_unlock(&pp->pr_slock);
   1003 		cp = (*pp->pr_alloc)(pp->pr_pagesz, PR_NOWAIT, pp->pr_mtype);
   1004 		if (__predict_true(cp != NULL))
   1005 			ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
   1006 		simple_lock(&pp->pr_slock);
   1007 
   1008 		if (__predict_false(cp == NULL || ph == NULL)) {
   1009 			error = ENOMEM;
   1010 			if (cp != NULL)
   1011 				(*pp->pr_free)(cp, pp->pr_pagesz, pp->pr_mtype);
   1012 			break;
   1013 		}
   1014 
   1015 		pool_prime_page(pp, cp, ph);
   1016 		pp->pr_npagealloc++;
   1017 		pp->pr_minpages++;
   1018 	}
   1019 
   1020 	if (pp->pr_minpages >= pp->pr_maxpages)
   1021 		pp->pr_maxpages = pp->pr_minpages + 1;	/* XXX */
   1022 
   1023 	simple_unlock(&pp->pr_slock);
   1024 	return (0);
   1025 }
   1026 
   1027 /*
   1028  * Add a page worth of items to the pool.
   1029  *
   1030  * Note, we must be called with the pool descriptor LOCKED.
   1031  */
   1032 static void
   1033 pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
   1034 {
   1035 	struct pool_item *pi;
   1036 	caddr_t cp = storage;
   1037 	unsigned int align = pp->pr_align;
   1038 	unsigned int ioff = pp->pr_itemoffset;
   1039 	int n;
   1040 
   1041 	if (((u_long)cp & (pp->pr_pagesz - 1)) != 0)
   1042 		panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
   1043 
   1044 	if ((pp->pr_roflags & PR_PHINPAGE) == 0)
   1045 		LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)],
   1046 		    ph, ph_hashlist);
   1047 
   1048 	/*
   1049 	 * Insert page header.
   1050 	 */
   1051 	TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
   1052 	TAILQ_INIT(&ph->ph_itemlist);
   1053 	ph->ph_page = storage;
   1054 	ph->ph_nmissing = 0;
   1055 	memset(&ph->ph_time, 0, sizeof(ph->ph_time));
   1056 
   1057 	pp->pr_nidle++;
   1058 
   1059 	/*
   1060 	 * Color this page.
   1061 	 */
   1062 	cp = (caddr_t)(cp + pp->pr_curcolor);
   1063 	if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
   1064 		pp->pr_curcolor = 0;
   1065 
   1066 	/*
   1067 	 * Adjust storage to apply aligment to `pr_itemoffset' in each item.
   1068 	 */
   1069 	if (ioff != 0)
   1070 		cp = (caddr_t)(cp + (align - ioff));
   1071 
   1072 	/*
   1073 	 * Insert remaining chunks on the bucket list.
   1074 	 */
   1075 	n = pp->pr_itemsperpage;
   1076 	pp->pr_nitems += n;
   1077 
   1078 	while (n--) {
   1079 		pi = (struct pool_item *)cp;
   1080 
   1081 		/* Insert on page list */
   1082 		TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
   1083 #ifdef DIAGNOSTIC
   1084 		pi->pi_magic = PI_MAGIC;
   1085 #endif
   1086 		cp = (caddr_t)(cp + pp->pr_size);
   1087 	}
   1088 
   1089 	/*
   1090 	 * If the pool was depleted, point at the new page.
   1091 	 */
   1092 	if (pp->pr_curpage == NULL)
   1093 		pp->pr_curpage = ph;
   1094 
   1095 	if (++pp->pr_npages > pp->pr_hiwat)
   1096 		pp->pr_hiwat = pp->pr_npages;
   1097 }
   1098 
   1099 /*
   1100  * Used by pool_get() when nitems drops below the low water mark.  This
   1101  * is used to catch up nitmes with the low water mark.
   1102  *
   1103  * Note 1, we never wait for memory here, we let the caller decide what to do.
   1104  *
   1105  * Note 2, this doesn't work with static pools.
   1106  *
   1107  * Note 3, we must be called with the pool already locked, and we return
   1108  * with it locked.
   1109  */
   1110 static int
   1111 pool_catchup(struct pool *pp)
   1112 {
   1113 	struct pool_item_header *ph;
   1114 	caddr_t cp;
   1115 	int error = 0;
   1116 
   1117 	if (pp->pr_roflags & PR_STATIC) {
   1118 		/*
   1119 		 * We dropped below the low water mark, and this is not a
   1120 		 * good thing.  Log a warning.
   1121 		 *
   1122 		 * XXX: rate-limit this?
   1123 		 */
   1124 		printf("WARNING: static pool `%s' dropped below low water "
   1125 		    "mark\n", pp->pr_wchan);
   1126 		return (0);
   1127 	}
   1128 
   1129 	while (POOL_NEEDS_CATCHUP(pp)) {
   1130 		/*
   1131 		 * Call the page back-end allocator for more memory.
   1132 		 *
   1133 		 * XXX: We never wait, so should we bother unlocking
   1134 		 * the pool descriptor?
   1135 		 */
   1136 		simple_unlock(&pp->pr_slock);
   1137 		cp = (*pp->pr_alloc)(pp->pr_pagesz, PR_NOWAIT, pp->pr_mtype);
   1138 		if (__predict_true(cp != NULL))
   1139 			ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
   1140 		simple_lock(&pp->pr_slock);
   1141 		if (__predict_false(cp == NULL || ph == NULL)) {
   1142 			if (cp != NULL)
   1143 				(*pp->pr_free)(cp, pp->pr_pagesz, pp->pr_mtype);
   1144 			error = ENOMEM;
   1145 			break;
   1146 		}
   1147 		pool_prime_page(pp, cp, ph);
   1148 		pp->pr_npagealloc++;
   1149 	}
   1150 
   1151 	return (error);
   1152 }
   1153 
   1154 void
   1155 pool_setlowat(struct pool *pp, int n)
   1156 {
   1157 	int error;
   1158 
   1159 	simple_lock(&pp->pr_slock);
   1160 
   1161 	pp->pr_minitems = n;
   1162 	pp->pr_minpages = (n == 0)
   1163 		? 0
   1164 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1165 
   1166 	/* Make sure we're caught up with the newly-set low water mark. */
   1167 	if (POOL_NEEDS_CATCHUP(pp) && (error = pool_catchup(pp) != 0)) {
   1168 		/*
   1169 		 * XXX: Should we log a warning?  Should we set up a timeout
   1170 		 * to try again in a second or so?  The latter could break
   1171 		 * a caller's assumptions about interrupt protection, etc.
   1172 		 */
   1173 	}
   1174 
   1175 	simple_unlock(&pp->pr_slock);
   1176 }
   1177 
   1178 void
   1179 pool_sethiwat(struct pool *pp, int n)
   1180 {
   1181 
   1182 	simple_lock(&pp->pr_slock);
   1183 
   1184 	pp->pr_maxpages = (n == 0)
   1185 		? 0
   1186 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1187 
   1188 	simple_unlock(&pp->pr_slock);
   1189 }
   1190 
   1191 void
   1192 pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
   1193 {
   1194 
   1195 	simple_lock(&pp->pr_slock);
   1196 
   1197 	pp->pr_hardlimit = n;
   1198 	pp->pr_hardlimit_warning = warnmess;
   1199 	pp->pr_hardlimit_ratecap.tv_sec = ratecap;
   1200 	pp->pr_hardlimit_warning_last.tv_sec = 0;
   1201 	pp->pr_hardlimit_warning_last.tv_usec = 0;
   1202 
   1203 	/*
   1204 	 * In-line version of pool_sethiwat(), because we don't want to
   1205 	 * release the lock.
   1206 	 */
   1207 	pp->pr_maxpages = (n == 0)
   1208 		? 0
   1209 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1210 
   1211 	simple_unlock(&pp->pr_slock);
   1212 }
   1213 
   1214 /*
   1215  * Default page allocator.
   1216  */
   1217 static void *
   1218 pool_page_alloc(unsigned long sz, int flags, int mtype)
   1219 {
   1220 	boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
   1221 
   1222 	return ((void *)uvm_km_alloc_poolpage(waitok));
   1223 }
   1224 
   1225 static void
   1226 pool_page_free(void *v, unsigned long sz, int mtype)
   1227 {
   1228 
   1229 	uvm_km_free_poolpage((vaddr_t)v);
   1230 }
   1231 
   1232 /*
   1233  * Alternate pool page allocator for pools that know they will
   1234  * never be accessed in interrupt context.
   1235  */
   1236 void *
   1237 pool_page_alloc_nointr(unsigned long sz, int flags, int mtype)
   1238 {
   1239 	boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
   1240 
   1241 	return ((void *)uvm_km_alloc_poolpage1(kernel_map, uvm.kernel_object,
   1242 	    waitok));
   1243 }
   1244 
   1245 void
   1246 pool_page_free_nointr(void *v, unsigned long sz, int mtype)
   1247 {
   1248 
   1249 	uvm_km_free_poolpage1(kernel_map, (vaddr_t)v);
   1250 }
   1251 
   1252 
   1253 /*
   1254  * Release all complete pages that have not been used recently.
   1255  */
   1256 void
   1257 #ifdef POOL_DIAGNOSTIC
   1258 _pool_reclaim(struct pool *pp, const char *file, long line)
   1259 #else
   1260 pool_reclaim(struct pool *pp)
   1261 #endif
   1262 {
   1263 	struct pool_item_header *ph, *phnext;
   1264 	struct pool_cache *pc;
   1265 	struct timeval curtime;
   1266 	struct pool_pagelist pq;
   1267 	int s;
   1268 
   1269 	if (pp->pr_roflags & PR_STATIC)
   1270 		return;
   1271 
   1272 	if (simple_lock_try(&pp->pr_slock) == 0)
   1273 		return;
   1274 	pr_enter(pp, file, line);
   1275 	TAILQ_INIT(&pq);
   1276 
   1277 	/*
   1278 	 * Reclaim items from the pool's caches.
   1279 	 */
   1280 	TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist)
   1281 		pool_cache_reclaim(pc);
   1282 
   1283 	s = splclock();
   1284 	curtime = mono_time;
   1285 	splx(s);
   1286 
   1287 	for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; ph = phnext) {
   1288 		phnext = TAILQ_NEXT(ph, ph_pagelist);
   1289 
   1290 		/* Check our minimum page claim */
   1291 		if (pp->pr_npages <= pp->pr_minpages)
   1292 			break;
   1293 
   1294 		if (ph->ph_nmissing == 0) {
   1295 			struct timeval diff;
   1296 			timersub(&curtime, &ph->ph_time, &diff);
   1297 			if (diff.tv_sec < pool_inactive_time)
   1298 				continue;
   1299 
   1300 			/*
   1301 			 * If freeing this page would put us below
   1302 			 * the low water mark, stop now.
   1303 			 */
   1304 			if ((pp->pr_nitems - pp->pr_itemsperpage) <
   1305 			    pp->pr_minitems)
   1306 				break;
   1307 
   1308 			pr_rmpage(pp, ph, &pq);
   1309 		}
   1310 	}
   1311 
   1312 	pr_leave(pp);
   1313 	simple_unlock(&pp->pr_slock);
   1314 	if (TAILQ_EMPTY(&pq)) {
   1315 		return;
   1316 	}
   1317 	while ((ph = TAILQ_FIRST(&pq)) != NULL) {
   1318 		TAILQ_REMOVE(&pq, ph, ph_pagelist);
   1319 		(*pp->pr_free)(ph->ph_page, pp->pr_pagesz, pp->pr_mtype);
   1320 		if (pp->pr_roflags & PR_PHINPAGE) {
   1321 			continue;
   1322 		}
   1323 		LIST_REMOVE(ph, ph_hashlist);
   1324 		s = splhigh();
   1325 		pool_put(&phpool, ph);
   1326 		splx(s);
   1327 	}
   1328 }
   1329 
   1330 
   1331 /*
   1332  * Drain pools, one at a time.
   1333  *
   1334  * Note, we must never be called from an interrupt context.
   1335  */
   1336 void
   1337 pool_drain(void *arg)
   1338 {
   1339 	struct pool *pp;
   1340 	int s;
   1341 
   1342 	pp = NULL;
   1343 	s = splvm();
   1344 	simple_lock(&pool_head_slock);
   1345 	if (drainpp == NULL) {
   1346 		drainpp = TAILQ_FIRST(&pool_head);
   1347 	}
   1348 	if (drainpp) {
   1349 		pp = drainpp;
   1350 		drainpp = TAILQ_NEXT(pp, pr_poollist);
   1351 	}
   1352 	simple_unlock(&pool_head_slock);
   1353 	splx(s);
   1354 
   1355 	pool_reclaim(pp);
   1356 }
   1357 
   1358 
   1359 /*
   1360  * Diagnostic helpers.
   1361  */
   1362 void
   1363 pool_print(struct pool *pp, const char *modif)
   1364 {
   1365 	int s;
   1366 
   1367 	s = splvm();
   1368 	if (simple_lock_try(&pp->pr_slock) == 0) {
   1369 		printf("pool %s is locked; try again later\n",
   1370 		    pp->pr_wchan);
   1371 		splx(s);
   1372 		return;
   1373 	}
   1374 	pool_print1(pp, modif, printf);
   1375 	simple_unlock(&pp->pr_slock);
   1376 	splx(s);
   1377 }
   1378 
   1379 void
   1380 pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
   1381 {
   1382 	int didlock = 0;
   1383 
   1384 	if (pp == NULL) {
   1385 		(*pr)("Must specify a pool to print.\n");
   1386 		return;
   1387 	}
   1388 
   1389 	/*
   1390 	 * Called from DDB; interrupts should be blocked, and all
   1391 	 * other processors should be paused.  We can skip locking
   1392 	 * the pool in this case.
   1393 	 *
   1394 	 * We do a simple_lock_try() just to print the lock
   1395 	 * status, however.
   1396 	 */
   1397 
   1398 	if (simple_lock_try(&pp->pr_slock) == 0)
   1399 		(*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
   1400 	else
   1401 		didlock = 1;
   1402 
   1403 	pool_print1(pp, modif, pr);
   1404 
   1405 	if (didlock)
   1406 		simple_unlock(&pp->pr_slock);
   1407 }
   1408 
   1409 static void
   1410 pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
   1411 {
   1412 	struct pool_item_header *ph;
   1413 	struct pool_cache *pc;
   1414 	struct pool_cache_group *pcg;
   1415 #ifdef DIAGNOSTIC
   1416 	struct pool_item *pi;
   1417 #endif
   1418 	int i, print_log = 0, print_pagelist = 0, print_cache = 0;
   1419 	char c;
   1420 
   1421 	while ((c = *modif++) != '\0') {
   1422 		if (c == 'l')
   1423 			print_log = 1;
   1424 		if (c == 'p')
   1425 			print_pagelist = 1;
   1426 		if (c == 'c')
   1427 			print_cache = 1;
   1428 		modif++;
   1429 	}
   1430 
   1431 	(*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
   1432 	    pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
   1433 	    pp->pr_roflags);
   1434 	(*pr)("\tpagesz %u, mtype %d\n", pp->pr_pagesz, pp->pr_mtype);
   1435 	(*pr)("\talloc %p, release %p\n", pp->pr_alloc, pp->pr_free);
   1436 	(*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
   1437 	    pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
   1438 	(*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
   1439 	    pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
   1440 
   1441 	(*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
   1442 	    pp->pr_nget, pp->pr_nfail, pp->pr_nput);
   1443 	(*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
   1444 	    pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
   1445 
   1446 	if (print_pagelist == 0)
   1447 		goto skip_pagelist;
   1448 
   1449 	if ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL)
   1450 		(*pr)("\n\tpage list:\n");
   1451 	for (; ph != NULL; ph = TAILQ_NEXT(ph, ph_pagelist)) {
   1452 		(*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
   1453 		    ph->ph_page, ph->ph_nmissing,
   1454 		    (u_long)ph->ph_time.tv_sec,
   1455 		    (u_long)ph->ph_time.tv_usec);
   1456 #ifdef DIAGNOSTIC
   1457 		TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) {
   1458 			if (pi->pi_magic != PI_MAGIC) {
   1459 				(*pr)("\t\t\titem %p, magic 0x%x\n",
   1460 				    pi, pi->pi_magic);
   1461 			}
   1462 		}
   1463 #endif
   1464 	}
   1465 	if (pp->pr_curpage == NULL)
   1466 		(*pr)("\tno current page\n");
   1467 	else
   1468 		(*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
   1469 
   1470  skip_pagelist:
   1471 
   1472 	if (print_log == 0)
   1473 		goto skip_log;
   1474 
   1475 	(*pr)("\n");
   1476 	if ((pp->pr_roflags & PR_LOGGING) == 0)
   1477 		(*pr)("\tno log\n");
   1478 	else
   1479 		pr_printlog(pp, NULL, pr);
   1480 
   1481  skip_log:
   1482 
   1483 	if (print_cache == 0)
   1484 		goto skip_cache;
   1485 
   1486 	TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) {
   1487 		(*pr)("\tcache %p: allocfrom %p freeto %p\n", pc,
   1488 		    pc->pc_allocfrom, pc->pc_freeto);
   1489 		(*pr)("\t    hits %lu misses %lu ngroups %lu nitems %lu\n",
   1490 		    pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
   1491 		TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
   1492 			(*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);
   1493 			for (i = 0; i < PCG_NOBJECTS; i++)
   1494 				(*pr)("\t\t\t%p\n", pcg->pcg_objects[i]);
   1495 		}
   1496 	}
   1497 
   1498  skip_cache:
   1499 
   1500 	pr_enter_check(pp, pr);
   1501 }
   1502 
   1503 int
   1504 pool_chk(struct pool *pp, const char *label)
   1505 {
   1506 	struct pool_item_header *ph;
   1507 	int r = 0;
   1508 
   1509 	simple_lock(&pp->pr_slock);
   1510 
   1511 	TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) {
   1512 		struct pool_item *pi;
   1513 		int n;
   1514 		caddr_t page;
   1515 
   1516 		page = (caddr_t)((u_long)ph & pp->pr_pagemask);
   1517 		if (page != ph->ph_page &&
   1518 		    (pp->pr_roflags & PR_PHINPAGE) != 0) {
   1519 			if (label != NULL)
   1520 				printf("%s: ", label);
   1521 			printf("pool(%p:%s): page inconsistency: page %p;"
   1522 			       " at page head addr %p (p %p)\n", pp,
   1523 				pp->pr_wchan, ph->ph_page,
   1524 				ph, page);
   1525 			r++;
   1526 			goto out;
   1527 		}
   1528 
   1529 		for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
   1530 		     pi != NULL;
   1531 		     pi = TAILQ_NEXT(pi,pi_list), n++) {
   1532 
   1533 #ifdef DIAGNOSTIC
   1534 			if (pi->pi_magic != PI_MAGIC) {
   1535 				if (label != NULL)
   1536 					printf("%s: ", label);
   1537 				printf("pool(%s): free list modified: magic=%x;"
   1538 				       " page %p; item ordinal %d;"
   1539 				       " addr %p (p %p)\n",
   1540 					pp->pr_wchan, pi->pi_magic, ph->ph_page,
   1541 					n, pi, page);
   1542 				panic("pool");
   1543 			}
   1544 #endif
   1545 			page = (caddr_t)((u_long)pi & pp->pr_pagemask);
   1546 			if (page == ph->ph_page)
   1547 				continue;
   1548 
   1549 			if (label != NULL)
   1550 				printf("%s: ", label);
   1551 			printf("pool(%p:%s): page inconsistency: page %p;"
   1552 			       " item ordinal %d; addr %p (p %p)\n", pp,
   1553 				pp->pr_wchan, ph->ph_page,
   1554 				n, pi, page);
   1555 			r++;
   1556 			goto out;
   1557 		}
   1558 	}
   1559 out:
   1560 	simple_unlock(&pp->pr_slock);
   1561 	return (r);
   1562 }
   1563 
   1564 /*
   1565  * pool_cache_init:
   1566  *
   1567  *	Initialize a pool cache.
   1568  *
   1569  *	NOTE: If the pool must be protected from interrupts, we expect
   1570  *	to be called at the appropriate interrupt priority level.
   1571  */
   1572 void
   1573 pool_cache_init(struct pool_cache *pc, struct pool *pp,
   1574     int (*ctor)(void *, void *, int),
   1575     void (*dtor)(void *, void *),
   1576     void *arg)
   1577 {
   1578 
   1579 	TAILQ_INIT(&pc->pc_grouplist);
   1580 	simple_lock_init(&pc->pc_slock);
   1581 
   1582 	pc->pc_allocfrom = NULL;
   1583 	pc->pc_freeto = NULL;
   1584 	pc->pc_pool = pp;
   1585 
   1586 	pc->pc_ctor = ctor;
   1587 	pc->pc_dtor = dtor;
   1588 	pc->pc_arg  = arg;
   1589 
   1590 	pc->pc_hits   = 0;
   1591 	pc->pc_misses = 0;
   1592 
   1593 	pc->pc_ngroups = 0;
   1594 
   1595 	pc->pc_nitems = 0;
   1596 
   1597 	simple_lock(&pp->pr_slock);
   1598 	TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist);
   1599 	simple_unlock(&pp->pr_slock);
   1600 }
   1601 
   1602 /*
   1603  * pool_cache_destroy:
   1604  *
   1605  *	Destroy a pool cache.
   1606  */
   1607 void
   1608 pool_cache_destroy(struct pool_cache *pc)
   1609 {
   1610 	struct pool *pp = pc->pc_pool;
   1611 
   1612 	/* First, invalidate the entire cache. */
   1613 	pool_cache_invalidate(pc);
   1614 
   1615 	/* ...and remove it from the pool's cache list. */
   1616 	simple_lock(&pp->pr_slock);
   1617 	TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist);
   1618 	simple_unlock(&pp->pr_slock);
   1619 }
   1620 
   1621 static __inline void *
   1622 pcg_get(struct pool_cache_group *pcg)
   1623 {
   1624 	void *object;
   1625 	u_int idx;
   1626 
   1627 	KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
   1628 	KASSERT(pcg->pcg_avail != 0);
   1629 	idx = --pcg->pcg_avail;
   1630 
   1631 	KASSERT(pcg->pcg_objects[idx] != NULL);
   1632 	object = pcg->pcg_objects[idx];
   1633 	pcg->pcg_objects[idx] = NULL;
   1634 
   1635 	return (object);
   1636 }
   1637 
   1638 static __inline void
   1639 pcg_put(struct pool_cache_group *pcg, void *object)
   1640 {
   1641 	u_int idx;
   1642 
   1643 	KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
   1644 	idx = pcg->pcg_avail++;
   1645 
   1646 	KASSERT(pcg->pcg_objects[idx] == NULL);
   1647 	pcg->pcg_objects[idx] = object;
   1648 }
   1649 
   1650 /*
   1651  * pool_cache_get:
   1652  *
   1653  *	Get an object from a pool cache.
   1654  */
   1655 void *
   1656 pool_cache_get(struct pool_cache *pc, int flags)
   1657 {
   1658 	struct pool_cache_group *pcg;
   1659 	void *object;
   1660 
   1661 #ifdef LOCKDEBUG
   1662 	if (flags & PR_WAITOK)
   1663 		simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)");
   1664 #endif
   1665 
   1666 	simple_lock(&pc->pc_slock);
   1667 
   1668 	if ((pcg = pc->pc_allocfrom) == NULL) {
   1669 		TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
   1670 			if (pcg->pcg_avail != 0) {
   1671 				pc->pc_allocfrom = pcg;
   1672 				goto have_group;
   1673 			}
   1674 		}
   1675 
   1676 		/*
   1677 		 * No groups with any available objects.  Allocate
   1678 		 * a new object, construct it, and return it to
   1679 		 * the caller.  We will allocate a group, if necessary,
   1680 		 * when the object is freed back to the cache.
   1681 		 */
   1682 		pc->pc_misses++;
   1683 		simple_unlock(&pc->pc_slock);
   1684 		object = pool_get(pc->pc_pool, flags);
   1685 		if (object != NULL && pc->pc_ctor != NULL) {
   1686 			if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
   1687 				pool_put(pc->pc_pool, object);
   1688 				return (NULL);
   1689 			}
   1690 		}
   1691 		return (object);
   1692 	}
   1693 
   1694  have_group:
   1695 	pc->pc_hits++;
   1696 	pc->pc_nitems--;
   1697 	object = pcg_get(pcg);
   1698 
   1699 	if (pcg->pcg_avail == 0)
   1700 		pc->pc_allocfrom = NULL;
   1701 
   1702 	simple_unlock(&pc->pc_slock);
   1703 
   1704 	return (object);
   1705 }
   1706 
   1707 /*
   1708  * pool_cache_put:
   1709  *
   1710  *	Put an object back to the pool cache.
   1711  */
   1712 void
   1713 pool_cache_put(struct pool_cache *pc, void *object)
   1714 {
   1715 	struct pool_cache_group *pcg;
   1716 	int s;
   1717 
   1718 	simple_lock(&pc->pc_slock);
   1719 
   1720 	if ((pcg = pc->pc_freeto) == NULL) {
   1721 		TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
   1722 			if (pcg->pcg_avail != PCG_NOBJECTS) {
   1723 				pc->pc_freeto = pcg;
   1724 				goto have_group;
   1725 			}
   1726 		}
   1727 
   1728 		/*
   1729 		 * No empty groups to free the object to.  Attempt to
   1730 		 * allocate one.
   1731 		 */
   1732 		simple_unlock(&pc->pc_slock);
   1733 		s = splvm();
   1734 		pcg = pool_get(&pcgpool, PR_NOWAIT);
   1735 		splx(s);
   1736 		if (pcg != NULL) {
   1737 			memset(pcg, 0, sizeof(*pcg));
   1738 			simple_lock(&pc->pc_slock);
   1739 			pc->pc_ngroups++;
   1740 			TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list);
   1741 			if (pc->pc_freeto == NULL)
   1742 				pc->pc_freeto = pcg;
   1743 			goto have_group;
   1744 		}
   1745 
   1746 		/*
   1747 		 * Unable to allocate a cache group; destruct the object
   1748 		 * and free it back to the pool.
   1749 		 */
   1750 		pool_cache_destruct_object(pc, object);
   1751 		return;
   1752 	}
   1753 
   1754  have_group:
   1755 	pc->pc_nitems++;
   1756 	pcg_put(pcg, object);
   1757 
   1758 	if (pcg->pcg_avail == PCG_NOBJECTS)
   1759 		pc->pc_freeto = NULL;
   1760 
   1761 	simple_unlock(&pc->pc_slock);
   1762 }
   1763 
   1764 /*
   1765  * pool_cache_destruct_object:
   1766  *
   1767  *	Force destruction of an object and its release back into
   1768  *	the pool.
   1769  */
   1770 void
   1771 pool_cache_destruct_object(struct pool_cache *pc, void *object)
   1772 {
   1773 
   1774 	if (pc->pc_dtor != NULL)
   1775 		(*pc->pc_dtor)(pc->pc_arg, object);
   1776 	pool_put(pc->pc_pool, object);
   1777 }
   1778 
   1779 /*
   1780  * pool_cache_do_invalidate:
   1781  *
   1782  *	This internal function implements pool_cache_invalidate() and
   1783  *	pool_cache_reclaim().
   1784  */
   1785 static void
   1786 pool_cache_do_invalidate(struct pool_cache *pc, int free_groups,
   1787     void (*putit)(struct pool *, void *))
   1788 {
   1789 	struct pool_cache_group *pcg, *npcg;
   1790 	void *object;
   1791 	int s;
   1792 
   1793 	for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
   1794 	     pcg = npcg) {
   1795 		npcg = TAILQ_NEXT(pcg, pcg_list);
   1796 		while (pcg->pcg_avail != 0) {
   1797 			pc->pc_nitems--;
   1798 			object = pcg_get(pcg);
   1799 			if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg)
   1800 				pc->pc_allocfrom = NULL;
   1801 			if (pc->pc_dtor != NULL)
   1802 				(*pc->pc_dtor)(pc->pc_arg, object);
   1803 			(*putit)(pc->pc_pool, object);
   1804 		}
   1805 		if (free_groups) {
   1806 			pc->pc_ngroups--;
   1807 			TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list);
   1808 			if (pc->pc_freeto == pcg)
   1809 				pc->pc_freeto = NULL;
   1810 			s = splvm();
   1811 			pool_put(&pcgpool, pcg);
   1812 			splx(s);
   1813 		}
   1814 	}
   1815 }
   1816 
   1817 /*
   1818  * pool_cache_invalidate:
   1819  *
   1820  *	Invalidate a pool cache (destruct and release all of the
   1821  *	cached objects).
   1822  */
   1823 void
   1824 pool_cache_invalidate(struct pool_cache *pc)
   1825 {
   1826 
   1827 	simple_lock(&pc->pc_slock);
   1828 	pool_cache_do_invalidate(pc, 0, pool_put);
   1829 	simple_unlock(&pc->pc_slock);
   1830 }
   1831 
   1832 /*
   1833  * pool_cache_reclaim:
   1834  *
   1835  *	Reclaim a pool cache for pool_reclaim().
   1836  */
   1837 static void
   1838 pool_cache_reclaim(struct pool_cache *pc)
   1839 {
   1840 
   1841 	simple_lock(&pc->pc_slock);
   1842 	pool_cache_do_invalidate(pc, 1, pool_do_put);
   1843 	simple_unlock(&pc->pc_slock);
   1844 }
   1845