Home | History | Annotate | Line # | Download | only in kern
subr_pool.c revision 1.25
      1 /*	$NetBSD: subr_pool.c,v 1.25 1999/05/10 21:13:05 thorpej Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1997, 1999 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
      9  * Simulation Facility, NASA Ames Research Center.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. All advertising materials mentioning features or use of this software
     20  *    must display the following acknowledgement:
     21  *	This product includes software developed by the NetBSD
     22  *	Foundation, Inc. and its contributors.
     23  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24  *    contributors may be used to endorse or promote products derived
     25  *    from this software without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37  * POSSIBILITY OF SUCH DAMAGE.
     38  */
     39 
     40 #include "opt_pool.h"
     41 #include "opt_poollog.h"
     42 
     43 #include <sys/param.h>
     44 #include <sys/systm.h>
     45 #include <sys/proc.h>
     46 #include <sys/errno.h>
     47 #include <sys/kernel.h>
     48 #include <sys/malloc.h>
     49 #include <sys/lock.h>
     50 #include <sys/pool.h>
     51 #include <sys/syslog.h>
     52 
     53 #include <vm/vm.h>
     54 #include <vm/vm_kern.h>
     55 
     56 #include <uvm/uvm.h>
     57 
     58 /*
     59  * Pool resource management utility.
     60  *
     61  * Memory is allocated in pages which are split into pieces according
     62  * to the pool item size. Each page is kept on a list headed by `pr_pagelist'
     63  * in the pool structure and the individual pool items are on a linked list
     64  * headed by `ph_itemlist' in each page header. The memory for building
     65  * the page list is either taken from the allocated pages themselves (for
     66  * small pool items) or taken from an internal pool of page headers (`phpool').
     67  */
     68 
     69 /* List of all pools */
     70 TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
     71 
     72 /* Private pool for page header structures */
     73 static struct pool phpool;
     74 
     75 /* # of seconds to retain page after last use */
     76 int pool_inactive_time = 10;
     77 
     78 /* Next candidate for drainage (see pool_drain()) */
     79 static struct pool	*drainpp;
     80 
     81 /* This spin lock protects both pool_head and drainpp. */
     82 struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
     83 
     84 struct pool_item_header {
     85 	/* Page headers */
     86 	TAILQ_ENTRY(pool_item_header)
     87 				ph_pagelist;	/* pool page list */
     88 	TAILQ_HEAD(,pool_item)	ph_itemlist;	/* chunk list for this page */
     89 	LIST_ENTRY(pool_item_header)
     90 				ph_hashlist;	/* Off-page page headers */
     91 	int			ph_nmissing;	/* # of chunks in use */
     92 	caddr_t			ph_page;	/* this page's address */
     93 	struct timeval		ph_time;	/* last referenced */
     94 };
     95 
     96 struct pool_item {
     97 #ifdef DIAGNOSTIC
     98 	int pi_magic;
     99 #define	PI_MAGIC 0xdeadbeef
    100 #endif
    101 	/* Other entries use only this list entry */
    102 	TAILQ_ENTRY(pool_item)	pi_list;
    103 };
    104 
    105 
    106 #define	PR_HASH_INDEX(pp,addr) \
    107 	(((u_long)(addr) >> (pp)->pr_pageshift) & (PR_HASHTABSIZE - 1))
    108 
    109 
    110 
    111 static struct pool_item_header
    112 		*pr_find_pagehead __P((struct pool *, caddr_t));
    113 static void	pr_rmpage __P((struct pool *, struct pool_item_header *));
    114 static int	pool_catchup __P((struct pool *));
    115 static void	pool_prime_page __P((struct pool *, caddr_t));
    116 static void	*pool_page_alloc __P((unsigned long, int, int));
    117 static void	pool_page_free __P((void *, unsigned long, int));
    118 
    119 static void pool_print1 __P((struct pool *, const char *,
    120 	void (*)(const char *, ...)));
    121 
    122 /*
    123  * Pool log entry. An array of these is allocated in pool_create().
    124  */
    125 struct pool_log {
    126 	const char	*pl_file;
    127 	long		pl_line;
    128 	int		pl_action;
    129 #define	PRLOG_GET	1
    130 #define	PRLOG_PUT	2
    131 	void		*pl_addr;
    132 };
    133 
    134 /* Number of entries in pool log buffers */
    135 #ifndef POOL_LOGSIZE
    136 #define	POOL_LOGSIZE	10
    137 #endif
    138 
    139 int pool_logsize = POOL_LOGSIZE;
    140 
    141 #ifdef DIAGNOSTIC
    142 static void	pr_log __P((struct pool *, void *, int, const char *, long));
    143 static void	pr_printlog __P((struct pool *, struct pool_item *,
    144 		    void (*)(const char *, ...)));
    145 static void	pr_enter __P((struct pool *, const char *, long));
    146 static void	pr_leave __P((struct pool *));
    147 static void	pr_enter_check __P((struct pool *,
    148 		    void (*)(const char *, ...)));
    149 
    150 static __inline__ void
    151 pr_log(pp, v, action, file, line)
    152 	struct pool	*pp;
    153 	void		*v;
    154 	int		action;
    155 	const char	*file;
    156 	long		line;
    157 {
    158 	int n = pp->pr_curlogentry;
    159 	struct pool_log *pl;
    160 
    161 	if ((pp->pr_roflags & PR_LOGGING) == 0)
    162 		return;
    163 
    164 	/*
    165 	 * Fill in the current entry. Wrap around and overwrite
    166 	 * the oldest entry if necessary.
    167 	 */
    168 	pl = &pp->pr_log[n];
    169 	pl->pl_file = file;
    170 	pl->pl_line = line;
    171 	pl->pl_action = action;
    172 	pl->pl_addr = v;
    173 	if (++n >= pp->pr_logsize)
    174 		n = 0;
    175 	pp->pr_curlogentry = n;
    176 }
    177 
    178 static void
    179 pr_printlog(pp, pi, pr)
    180 	struct pool *pp;
    181 	struct pool_item *pi;
    182 	void (*pr) __P((const char *, ...));
    183 {
    184 	int i = pp->pr_logsize;
    185 	int n = pp->pr_curlogentry;
    186 
    187 	if ((pp->pr_roflags & PR_LOGGING) == 0)
    188 		return;
    189 
    190 	/*
    191 	 * Print all entries in this pool's log.
    192 	 */
    193 	while (i-- > 0) {
    194 		struct pool_log *pl = &pp->pr_log[n];
    195 		if (pl->pl_action != 0) {
    196 			if (pi == NULL || pi == pl->pl_addr) {
    197 				(*pr)("\tlog entry %d:\n", i);
    198 				(*pr)("\t\taction = %s, addr = %p\n",
    199 				    pl->pl_action == PRLOG_GET ? "get" : "put",
    200 				    pl->pl_addr);
    201 				(*pr)("\t\tfile: %s at line %lu\n",
    202 				    pl->pl_file, pl->pl_line);
    203 			}
    204 		}
    205 		if (++n >= pp->pr_logsize)
    206 			n = 0;
    207 	}
    208 }
    209 
    210 static __inline__ void
    211 pr_enter(pp, file, line)
    212 	struct pool *pp;
    213 	const char *file;
    214 	long line;
    215 {
    216 
    217 	if (pp->pr_entered_file != NULL) {
    218 		printf("pool %s: reentrancy at file %s line %ld\n",
    219 		    pp->pr_wchan, file, line);
    220 		printf("         previous entry at file %s line %ld\n",
    221 		    pp->pr_entered_file, pp->pr_entered_line);
    222 		panic("pr_enter");
    223 	}
    224 
    225 	pp->pr_entered_file = file;
    226 	pp->pr_entered_line = line;
    227 }
    228 
    229 static __inline__ void
    230 pr_leave(pp)
    231 	struct pool *pp;
    232 {
    233 
    234 	if (pp->pr_entered_file == NULL) {
    235 		printf("pool %s not entered?\n", pp->pr_wchan);
    236 		panic("pr_leave");
    237 	}
    238 
    239 	pp->pr_entered_file = NULL;
    240 	pp->pr_entered_line = 0;
    241 }
    242 
    243 static __inline__ void
    244 pr_enter_check(pp, pr)
    245 	struct pool *pp;
    246 	void (*pr) __P((const char *, ...));
    247 {
    248 
    249 	if (pp->pr_entered_file != NULL)
    250 		(*pr)("\n\tcurrently entered from file %s line %ld\n",
    251 		    pp->pr_entered_file, pp->pr_entered_line);
    252 }
    253 #else
    254 #define	pr_log(pp, v, action, file, line)
    255 #define	pr_printlog(pp, pi, pr)
    256 #define	pr_enter(pp, file, line)
    257 #define	pr_leave(pp)
    258 #define	pr_enter_check(pp, pr)
    259 #endif /* DIAGNOSTIC */
    260 
    261 /*
    262  * Return the pool page header based on page address.
    263  */
    264 static __inline__ struct pool_item_header *
    265 pr_find_pagehead(pp, page)
    266 	struct pool *pp;
    267 	caddr_t page;
    268 {
    269 	struct pool_item_header *ph;
    270 
    271 	if ((pp->pr_roflags & PR_PHINPAGE) != 0)
    272 		return ((struct pool_item_header *)(page + pp->pr_phoffset));
    273 
    274 	for (ph = LIST_FIRST(&pp->pr_hashtab[PR_HASH_INDEX(pp, page)]);
    275 	     ph != NULL;
    276 	     ph = LIST_NEXT(ph, ph_hashlist)) {
    277 		if (ph->ph_page == page)
    278 			return (ph);
    279 	}
    280 	return (NULL);
    281 }
    282 
    283 /*
    284  * Remove a page from the pool.
    285  */
    286 static __inline__ void
    287 pr_rmpage(pp, ph)
    288 	struct pool *pp;
    289 	struct pool_item_header *ph;
    290 {
    291 
    292 	/*
    293 	 * If the page was idle, decrement the idle page count.
    294 	 */
    295 	if (ph->ph_nmissing == 0) {
    296 #ifdef DIAGNOSTIC
    297 		if (pp->pr_nidle == 0)
    298 			panic("pr_rmpage: nidle inconsistent");
    299 		if (pp->pr_nitems < pp->pr_itemsperpage)
    300 			panic("pr_rmpage: nitems inconsistent");
    301 #endif
    302 		pp->pr_nidle--;
    303 	}
    304 
    305 	pp->pr_nitems -= pp->pr_itemsperpage;
    306 
    307 	/*
    308 	 * Unlink a page from the pool and release it.
    309 	 */
    310 	TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
    311 	(*pp->pr_free)(ph->ph_page, pp->pr_pagesz, pp->pr_mtype);
    312 	pp->pr_npages--;
    313 	pp->pr_npagefree++;
    314 
    315 	if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
    316 		LIST_REMOVE(ph, ph_hashlist);
    317 		pool_put(&phpool, ph);
    318 	}
    319 
    320 	if (pp->pr_curpage == ph) {
    321 		/*
    322 		 * Find a new non-empty page header, if any.
    323 		 * Start search from the page head, to increase the
    324 		 * chance for "high water" pages to be freed.
    325 		 */
    326 		for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
    327 		     ph = TAILQ_NEXT(ph, ph_pagelist))
    328 			if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
    329 				break;
    330 
    331 		pp->pr_curpage = ph;
    332 	}
    333 }
    334 
    335 /*
    336  * Allocate and initialize a pool.
    337  */
    338 struct pool *
    339 pool_create(size, align, ioff, nitems, wchan, pagesz, alloc, release, mtype)
    340 	size_t	size;
    341 	u_int	align;
    342 	u_int	ioff;
    343 	int	nitems;
    344 	const char *wchan;
    345 	size_t	pagesz;
    346 	void	*(*alloc) __P((unsigned long, int, int));
    347 	void	(*release) __P((void *, unsigned long, int));
    348 	int	mtype;
    349 {
    350 	struct pool *pp;
    351 	int flags;
    352 
    353 	pp = (struct pool *)malloc(sizeof(*pp), M_POOL, M_NOWAIT);
    354 	if (pp == NULL)
    355 		return (NULL);
    356 
    357 	flags = PR_FREEHEADER;
    358 	pool_init(pp, size, align, ioff, flags, wchan, pagesz,
    359 		  alloc, release, mtype);
    360 
    361 	if (nitems != 0) {
    362 		if (pool_prime(pp, nitems, NULL) != 0) {
    363 			pool_destroy(pp);
    364 			return (NULL);
    365 		}
    366 	}
    367 
    368 	return (pp);
    369 }
    370 
    371 /*
    372  * Initialize the given pool resource structure.
    373  *
    374  * We export this routine to allow other kernel parts to declare
    375  * static pools that must be initialized before malloc() is available.
    376  */
    377 void
    378 pool_init(pp, size, align, ioff, flags, wchan, pagesz, alloc, release, mtype)
    379 	struct pool	*pp;
    380 	size_t		size;
    381 	u_int		align;
    382 	u_int		ioff;
    383 	int		flags;
    384 	const char	*wchan;
    385 	size_t		pagesz;
    386 	void		*(*alloc) __P((unsigned long, int, int));
    387 	void		(*release) __P((void *, unsigned long, int));
    388 	int		mtype;
    389 {
    390 	int off, slack, i;
    391 
    392 #ifdef POOL_DIAGNOSTIC
    393 	/*
    394 	 * Always log if POOL_DIAGNOSTIC is defined.
    395 	 */
    396 	if (pool_logsize != 0)
    397 		flags |= PR_LOGGING;
    398 #endif
    399 
    400 	/*
    401 	 * Check arguments and construct default values.
    402 	 */
    403 	if (!powerof2(pagesz) || pagesz > PAGE_SIZE)
    404 		panic("pool_init: page size invalid (%lx)\n", (u_long)pagesz);
    405 
    406 	if (alloc == NULL && release == NULL) {
    407 		alloc = pool_page_alloc;
    408 		release = pool_page_free;
    409 		pagesz = PAGE_SIZE;	/* Rounds to PAGE_SIZE anyhow. */
    410 	} else if ((alloc != NULL && release != NULL) == 0) {
    411 		/* If you specifiy one, must specify both. */
    412 		panic("pool_init: must specify alloc and release together");
    413 	}
    414 
    415 	if (pagesz == 0)
    416 		pagesz = PAGE_SIZE;
    417 
    418 	if (align == 0)
    419 		align = ALIGN(1);
    420 
    421 	if (size < sizeof(struct pool_item))
    422 		size = sizeof(struct pool_item);
    423 
    424 	/*
    425 	 * Initialize the pool structure.
    426 	 */
    427 	TAILQ_INIT(&pp->pr_pagelist);
    428 	pp->pr_curpage = NULL;
    429 	pp->pr_npages = 0;
    430 	pp->pr_minitems = 0;
    431 	pp->pr_minpages = 0;
    432 	pp->pr_maxpages = UINT_MAX;
    433 	pp->pr_roflags = flags;
    434 	pp->pr_flags = 0;
    435 	pp->pr_size = ALIGN(size);
    436 	pp->pr_align = align;
    437 	pp->pr_wchan = wchan;
    438 	pp->pr_mtype = mtype;
    439 	pp->pr_alloc = alloc;
    440 	pp->pr_free = release;
    441 	pp->pr_pagesz = pagesz;
    442 	pp->pr_pagemask = ~(pagesz - 1);
    443 	pp->pr_pageshift = ffs(pagesz) - 1;
    444 	pp->pr_nitems = 0;
    445 	pp->pr_nout = 0;
    446 	pp->pr_hardlimit = UINT_MAX;
    447 	pp->pr_hardlimit_warning = NULL;
    448 	pp->pr_hardlimit_ratecap = 0;
    449 	memset(&pp->pr_hardlimit_warning_last, 0,
    450 	    sizeof(pp->pr_hardlimit_warning_last));
    451 
    452 	/*
    453 	 * Decide whether to put the page header off page to avoid
    454 	 * wasting too large a part of the page. Off-page page headers
    455 	 * go on a hash table, so we can match a returned item
    456 	 * with its header based on the page address.
    457 	 * We use 1/16 of the page size as the threshold (XXX: tune)
    458 	 */
    459 	if (pp->pr_size < pagesz/16) {
    460 		/* Use the end of the page for the page header */
    461 		pp->pr_roflags |= PR_PHINPAGE;
    462 		pp->pr_phoffset = off =
    463 			pagesz - ALIGN(sizeof(struct pool_item_header));
    464 	} else {
    465 		/* The page header will be taken from our page header pool */
    466 		pp->pr_phoffset = 0;
    467 		off = pagesz;
    468 		for (i = 0; i < PR_HASHTABSIZE; i++) {
    469 			LIST_INIT(&pp->pr_hashtab[i]);
    470 		}
    471 	}
    472 
    473 	/*
    474 	 * Alignment is to take place at `ioff' within the item. This means
    475 	 * we must reserve up to `align - 1' bytes on the page to allow
    476 	 * appropriate positioning of each item.
    477 	 *
    478 	 * Silently enforce `0 <= ioff < align'.
    479 	 */
    480 	pp->pr_itemoffset = ioff = ioff % align;
    481 	pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
    482 
    483 	/*
    484 	 * Use the slack between the chunks and the page header
    485 	 * for "cache coloring".
    486 	 */
    487 	slack = off - pp->pr_itemsperpage * pp->pr_size;
    488 	pp->pr_maxcolor = (slack / align) * align;
    489 	pp->pr_curcolor = 0;
    490 
    491 	pp->pr_nget = 0;
    492 	pp->pr_nfail = 0;
    493 	pp->pr_nput = 0;
    494 	pp->pr_npagealloc = 0;
    495 	pp->pr_npagefree = 0;
    496 	pp->pr_hiwat = 0;
    497 	pp->pr_nidle = 0;
    498 
    499 	if (flags & PR_LOGGING) {
    500 		if (kmem_map == NULL ||
    501 		    (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
    502 		     M_TEMP, M_NOWAIT)) == NULL)
    503 			pp->pr_roflags &= ~PR_LOGGING;
    504 		pp->pr_curlogentry = 0;
    505 		pp->pr_logsize = pool_logsize;
    506 	}
    507 
    508 	pp->pr_entered_file = NULL;
    509 	pp->pr_entered_line = 0;
    510 
    511 	simple_lock_init(&pp->pr_slock);
    512 
    513 	/*
    514 	 * Initialize private page header pool if we haven't done so yet.
    515 	 * XXX LOCKING.
    516 	 */
    517 	if (phpool.pr_size == 0) {
    518 		pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,
    519 			  0, "phpool", 0, 0, 0, 0);
    520 	}
    521 
    522 	/* Insert into the list of all pools. */
    523 	simple_lock(&pool_head_slock);
    524 	TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
    525 	simple_unlock(&pool_head_slock);
    526 }
    527 
    528 /*
    529  * De-commision a pool resource.
    530  */
    531 void
    532 pool_destroy(pp)
    533 	struct pool *pp;
    534 {
    535 	struct pool_item_header *ph;
    536 
    537 #ifdef DIAGNOSTIC
    538 	if (pp->pr_nout != 0) {
    539 		pr_printlog(pp, NULL, printf);
    540 		panic("pool_destroy: pool busy: still out: %u\n",
    541 		    pp->pr_nout);
    542 	}
    543 #endif
    544 
    545 	/* Remove all pages */
    546 	if ((pp->pr_roflags & PR_STATIC) == 0)
    547 		while ((ph = pp->pr_pagelist.tqh_first) != NULL)
    548 			pr_rmpage(pp, ph);
    549 
    550 	/* Remove from global pool list */
    551 	simple_lock(&pool_head_slock);
    552 	TAILQ_REMOVE(&pool_head, pp, pr_poollist);
    553 	/* XXX Only clear this if we were drainpp? */
    554 	drainpp = NULL;
    555 	simple_unlock(&pool_head_slock);
    556 
    557 	if ((pp->pr_roflags & PR_LOGGING) != 0)
    558 		free(pp->pr_log, M_TEMP);
    559 
    560 	if (pp->pr_roflags & PR_FREEHEADER)
    561 		free(pp, M_POOL);
    562 }
    563 
    564 
    565 /*
    566  * Grab an item from the pool; must be called at appropriate spl level
    567  */
    568 void *
    569 _pool_get(pp, flags, file, line)
    570 	struct pool *pp;
    571 	int flags;
    572 	const char *file;
    573 	long line;
    574 {
    575 	void *v;
    576 	struct pool_item *pi;
    577 	struct pool_item_header *ph;
    578 
    579 #ifdef DIAGNOSTIC
    580 	if ((pp->pr_roflags & PR_STATIC) && (flags & PR_MALLOCOK)) {
    581 		pr_printlog(pp, NULL, printf);
    582 		panic("pool_get: static");
    583 	}
    584 #endif
    585 
    586 	if (curproc == NULL && (flags & PR_WAITOK) != 0)
    587 		panic("pool_get: must have NOWAIT");
    588 
    589 	simple_lock(&pp->pr_slock);
    590 	pr_enter(pp, file, line);
    591 
    592  startover:
    593 	/*
    594 	 * Check to see if we've reached the hard limit.  If we have,
    595 	 * and we can wait, then wait until an item has been returned to
    596 	 * the pool.
    597 	 */
    598 #ifdef DIAGNOSTIC
    599 	if (pp->pr_nout > pp->pr_hardlimit) {
    600 		pr_leave(pp);
    601 		simple_unlock(&pp->pr_slock);
    602 		panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
    603 	}
    604 #endif
    605 	if (pp->pr_nout == pp->pr_hardlimit) {
    606 		if (flags & PR_WAITOK) {
    607 			/*
    608 			 * XXX: A warning isn't logged in this case.  Should
    609 			 * it be?
    610 			 */
    611 			pp->pr_flags |= PR_WANTED;
    612 			pr_leave(pp);
    613 			simple_unlock(&pp->pr_slock);
    614 			tsleep((caddr_t)pp, PSWP, pp->pr_wchan, 0);
    615 			simple_lock(&pp->pr_slock);
    616 			pr_enter(pp, file, line);
    617 			goto startover;
    618 		}
    619 		if (pp->pr_hardlimit_warning != NULL) {
    620 			/*
    621 			 * Log a message that the hard limit has been hit.
    622 			 */
    623 			struct timeval curtime, logdiff;
    624 			int s = splclock();
    625 			curtime = mono_time;
    626 			splx(s);
    627 			timersub(&curtime, &pp->pr_hardlimit_warning_last,
    628 			    &logdiff);
    629 			if (logdiff.tv_sec >= pp->pr_hardlimit_ratecap) {
    630 				pp->pr_hardlimit_warning_last = curtime;
    631 				log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
    632 			}
    633 		}
    634 
    635 		if (flags & PR_URGENT)
    636 			panic("pool_get: urgent");
    637 
    638 		pp->pr_nfail++;
    639 
    640 		pr_leave(pp);
    641 		simple_unlock(&pp->pr_slock);
    642 		return (NULL);
    643 	}
    644 
    645 	/*
    646 	 * The convention we use is that if `curpage' is not NULL, then
    647 	 * it points at a non-empty bucket. In particular, `curpage'
    648 	 * never points at a page header which has PR_PHINPAGE set and
    649 	 * has no items in its bucket.
    650 	 */
    651 	if ((ph = pp->pr_curpage) == NULL) {
    652 		void *v;
    653 
    654 #ifdef DIAGNOSTIC
    655 		if (pp->pr_nitems != 0) {
    656 			simple_unlock(&pp->pr_slock);
    657 			printf("pool_get: %s: curpage NULL, nitems %u\n",
    658 			    pp->pr_wchan, pp->pr_nitems);
    659 			panic("pool_get: nitems inconsistent\n");
    660 		}
    661 #endif
    662 
    663 		/*
    664 		 * Call the back-end page allocator for more memory.
    665 		 * Release the pool lock, as the back-end page allocator
    666 		 * may block.
    667 		 */
    668 		pr_leave(pp);
    669 		simple_unlock(&pp->pr_slock);
    670 		v = (*pp->pr_alloc)(pp->pr_pagesz, flags, pp->pr_mtype);
    671 		simple_lock(&pp->pr_slock);
    672 		pr_enter(pp, file, line);
    673 
    674 		if (v == NULL) {
    675 			/*
    676 			 * We were unable to allocate a page, but
    677 			 * we released the lock during allocation,
    678 			 * so perhaps items were freed back to the
    679 			 * pool.  Check for this case.
    680 			 */
    681 			if (pp->pr_curpage != NULL)
    682 				goto startover;
    683 
    684 			if (flags & PR_URGENT)
    685 				panic("pool_get: urgent");
    686 
    687 			if ((flags & PR_WAITOK) == 0) {
    688 				pp->pr_nfail++;
    689 				pr_leave(pp);
    690 				simple_unlock(&pp->pr_slock);
    691 				return (NULL);
    692 			}
    693 
    694 			/*
    695 			 * Wait for items to be returned to this pool.
    696 			 *
    697 			 * XXX: we actually want to wait just until
    698 			 * the page allocator has memory again. Depending
    699 			 * on this pool's usage, we might get stuck here
    700 			 * for a long time.
    701 			 *
    702 			 * XXX: maybe we should wake up once a second and
    703 			 * try again?
    704 			 */
    705 			pp->pr_flags |= PR_WANTED;
    706 			pr_leave(pp);
    707 			simple_unlock(&pp->pr_slock);
    708 			tsleep((caddr_t)pp, PSWP, pp->pr_wchan, 0);
    709 			simple_lock(&pp->pr_slock);
    710 			pr_enter(pp, file, line);
    711 			goto startover;
    712 		}
    713 
    714 		/* We have more memory; add it to the pool */
    715 		pp->pr_npagealloc++;
    716 		pool_prime_page(pp, v);
    717 
    718 		/* Start the allocation process over. */
    719 		goto startover;
    720 	}
    721 
    722 	if ((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL) {
    723 		pr_leave(pp);
    724 		simple_unlock(&pp->pr_slock);
    725 		panic("pool_get: %s: page empty", pp->pr_wchan);
    726 	}
    727 #ifdef DIAGNOSTIC
    728 	if (pp->pr_nitems == 0) {
    729 		pr_leave(pp);
    730 		simple_unlock(&pp->pr_slock);
    731 		printf("pool_get: %s: items on itemlist, nitems %u\n",
    732 		    pp->pr_wchan, pp->pr_nitems);
    733 		panic("pool_get: nitems inconsistent\n");
    734 	}
    735 #endif
    736 	pr_log(pp, v, PRLOG_GET, file, line);
    737 
    738 #ifdef DIAGNOSTIC
    739 	if (pi->pi_magic != PI_MAGIC) {
    740 		pr_printlog(pp, pi, printf);
    741 		panic("pool_get(%s): free list modified: magic=%x; page %p;"
    742 		       " item addr %p\n",
    743 			pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
    744 	}
    745 #endif
    746 
    747 	/*
    748 	 * Remove from item list.
    749 	 */
    750 	TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
    751 	pp->pr_nitems--;
    752 	pp->pr_nout++;
    753 	if (ph->ph_nmissing == 0) {
    754 #ifdef DIAGNOSTIC
    755 		if (pp->pr_nidle == 0)
    756 			panic("pool_get: nidle inconsistent");
    757 #endif
    758 		pp->pr_nidle--;
    759 	}
    760 	ph->ph_nmissing++;
    761 	if (TAILQ_FIRST(&ph->ph_itemlist) == NULL) {
    762 #ifdef DIAGNOSTIC
    763 		if (ph->ph_nmissing != pp->pr_itemsperpage) {
    764 			pr_leave(pp);
    765 			simple_unlock(&pp->pr_slock);
    766 			panic("pool_get: %s: nmissing inconsistent",
    767 			    pp->pr_wchan);
    768 		}
    769 #endif
    770 		/*
    771 		 * Find a new non-empty page header, if any.
    772 		 * Start search from the page head, to increase
    773 		 * the chance for "high water" pages to be freed.
    774 		 *
    775 		 * Migrate empty pages to the end of the list.  This
    776 		 * will speed the update of curpage as pages become
    777 		 * idle.  Empty pages intermingled with idle pages
    778 		 * is no big deal.  As soon as a page becomes un-empty,
    779 		 * it will move back to the head of the list.
    780 		 */
    781 		TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
    782 		TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
    783 		for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
    784 		     ph = TAILQ_NEXT(ph, ph_pagelist))
    785 			if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
    786 				break;
    787 
    788 		pp->pr_curpage = ph;
    789 	}
    790 
    791 	pp->pr_nget++;
    792 
    793 	/*
    794 	 * If we have a low water mark and we are now below that low
    795 	 * water mark, add more items to the pool.
    796 	 */
    797 	if (pp->pr_nitems < pp->pr_minitems && pool_catchup(pp) != 0) {
    798 		/*
    799 		 * XXX: Should we log a warning?  Should we set up a timeout
    800 		 * to try again in a second or so?  The latter could break
    801 		 * a caller's assumptions about interrupt protection, etc.
    802 		 */
    803 	}
    804 
    805 	pr_leave(pp);
    806 	simple_unlock(&pp->pr_slock);
    807 	return (v);
    808 }
    809 
    810 /*
    811  * Return resource to the pool; must be called at appropriate spl level
    812  */
    813 void
    814 _pool_put(pp, v, file, line)
    815 	struct pool *pp;
    816 	void *v;
    817 	const char *file;
    818 	long line;
    819 {
    820 	struct pool_item *pi = v;
    821 	struct pool_item_header *ph;
    822 	caddr_t page;
    823 	int s;
    824 
    825 	page = (caddr_t)((u_long)v & pp->pr_pagemask);
    826 
    827 	simple_lock(&pp->pr_slock);
    828 	pr_enter(pp, file, line);
    829 
    830 	pr_log(pp, v, PRLOG_PUT, file, line);
    831 
    832 	if ((ph = pr_find_pagehead(pp, page)) == NULL) {
    833 		pr_printlog(pp, NULL, printf);
    834 		panic("pool_put: %s: page header missing", pp->pr_wchan);
    835 	}
    836 
    837 	/*
    838 	 * Return to item list.
    839 	 */
    840 #ifdef DIAGNOSTIC
    841 	/* XXX Should fill the item. */
    842 	pi->pi_magic = PI_MAGIC;
    843 #endif
    844 	TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
    845 	ph->ph_nmissing--;
    846 	pp->pr_nput++;
    847 	pp->pr_nitems++;
    848 	pp->pr_nout--;
    849 
    850 	/* Cancel "pool empty" condition if it exists */
    851 	if (pp->pr_curpage == NULL)
    852 		pp->pr_curpage = ph;
    853 
    854 	if (pp->pr_flags & PR_WANTED) {
    855 		pp->pr_flags &= ~PR_WANTED;
    856 		if (ph->ph_nmissing == 0)
    857 			pp->pr_nidle++;
    858 		pr_leave(pp);
    859 		simple_unlock(&pp->pr_slock);
    860 		wakeup((caddr_t)pp);
    861 		return;
    862 	}
    863 
    864 	/*
    865 	 * If this page is now complete, do one of two things:
    866 	 *
    867 	 *	(1) If we have more pages than the page high water
    868 	 *	    mark, free the page back to the system.
    869 	 *
    870 	 *	(2) Move it to the end of the page list, so that
    871 	 *	    we minimize our chances of fragmenting the
    872 	 *	    pool.  Idle pages migrate to the end (along with
    873 	 *	    completely empty pages, so that we find un-empty
    874 	 *	    pages more quickly when we update curpage) of the
    875 	 *	    list so they can be more easily swept up by
    876 	 *	    the pagedaemon when pages are scarce.
    877 	 */
    878 	if (ph->ph_nmissing == 0) {
    879 		pp->pr_nidle++;
    880 		if (pp->pr_npages > pp->pr_maxpages) {
    881 			pr_rmpage(pp, ph);
    882 		} else {
    883 			TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
    884 			TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
    885 
    886 			/*
    887 			 * Update the timestamp on the page.  A page must
    888 			 * be idle for some period of time before it can
    889 			 * be reclaimed by the pagedaemon.  This minimizes
    890 			 * ping-pong'ing for memory.
    891 			 */
    892 			s = splclock();
    893 			ph->ph_time = mono_time;
    894 			splx(s);
    895 
    896 			/*
    897 			 * Update the current page pointer.  Just look for
    898 			 * the first page with any free items.
    899 			 *
    900 			 * XXX: Maybe we want an option to look for the
    901 			 * page with the fewest available items, to minimize
    902 			 * fragmentation?
    903 			 */
    904 			for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
    905 			     ph = TAILQ_NEXT(ph, ph_pagelist))
    906 				if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
    907 					break;
    908 
    909 			pp->pr_curpage = ph;
    910 		}
    911 	}
    912 	/*
    913 	 * If the page has just become un-empty, move it to the head of
    914 	 * the list, and make it the current page.  The next allocation
    915 	 * will get the item from this page, instead of further fragmenting
    916 	 * the pool.
    917 	 */
    918 	else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
    919 		TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
    920 		TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
    921 		pp->pr_curpage = ph;
    922 	}
    923 
    924 	pr_leave(pp);
    925 	simple_unlock(&pp->pr_slock);
    926 
    927 }
    928 
    929 /*
    930  * Add N items to the pool.
    931  */
    932 int
    933 pool_prime(pp, n, storage)
    934 	struct pool *pp;
    935 	int n;
    936 	caddr_t storage;
    937 {
    938 	caddr_t cp;
    939 	int newnitems, newpages;
    940 
    941 #ifdef DIAGNOSTIC
    942 	if (storage && !(pp->pr_roflags & PR_STATIC))
    943 		panic("pool_prime: static");
    944 	/* !storage && static caught below */
    945 #endif
    946 
    947 	simple_lock(&pp->pr_slock);
    948 
    949 	newnitems = pp->pr_minitems + n;
    950 	newpages =
    951 		roundup(newnitems, pp->pr_itemsperpage) / pp->pr_itemsperpage
    952 		- pp->pr_minpages;
    953 
    954 	while (newpages-- > 0) {
    955 		if (pp->pr_roflags & PR_STATIC) {
    956 			cp = storage;
    957 			storage += pp->pr_pagesz;
    958 		} else {
    959 			simple_unlock(&pp->pr_slock);
    960 			cp = (*pp->pr_alloc)(pp->pr_pagesz, 0, pp->pr_mtype);
    961 			simple_lock(&pp->pr_slock);
    962 		}
    963 
    964 		if (cp == NULL) {
    965 			simple_unlock(&pp->pr_slock);
    966 			return (ENOMEM);
    967 		}
    968 
    969 		pool_prime_page(pp, cp);
    970 		pp->pr_minpages++;
    971 	}
    972 
    973 	pp->pr_minitems = newnitems;
    974 
    975 	if (pp->pr_minpages >= pp->pr_maxpages)
    976 		pp->pr_maxpages = pp->pr_minpages + 1;	/* XXX */
    977 
    978 	simple_unlock(&pp->pr_slock);
    979 	return (0);
    980 }
    981 
    982 /*
    983  * Add a page worth of items to the pool.
    984  *
    985  * Note, we must be called with the pool descriptor LOCKED.
    986  */
    987 static void
    988 pool_prime_page(pp, storage)
    989 	struct pool *pp;
    990 	caddr_t storage;
    991 {
    992 	struct pool_item *pi;
    993 	struct pool_item_header *ph;
    994 	caddr_t cp = storage;
    995 	unsigned int align = pp->pr_align;
    996 	unsigned int ioff = pp->pr_itemoffset;
    997 	int n;
    998 
    999 	if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
   1000 		ph = (struct pool_item_header *)(cp + pp->pr_phoffset);
   1001 	} else {
   1002 		ph = pool_get(&phpool, PR_URGENT);
   1003 		LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)],
   1004 				 ph, ph_hashlist);
   1005 	}
   1006 
   1007 	/*
   1008 	 * Insert page header.
   1009 	 */
   1010 	TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
   1011 	TAILQ_INIT(&ph->ph_itemlist);
   1012 	ph->ph_page = storage;
   1013 	ph->ph_nmissing = 0;
   1014 	memset(&ph->ph_time, 0, sizeof(ph->ph_time));
   1015 
   1016 	pp->pr_nidle++;
   1017 
   1018 	/*
   1019 	 * Color this page.
   1020 	 */
   1021 	cp = (caddr_t)(cp + pp->pr_curcolor);
   1022 	if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
   1023 		pp->pr_curcolor = 0;
   1024 
   1025 	/*
   1026 	 * Adjust storage to apply aligment to `pr_itemoffset' in each item.
   1027 	 */
   1028 	if (ioff != 0)
   1029 		cp = (caddr_t)(cp + (align - ioff));
   1030 
   1031 	/*
   1032 	 * Insert remaining chunks on the bucket list.
   1033 	 */
   1034 	n = pp->pr_itemsperpage;
   1035 	pp->pr_nitems += n;
   1036 
   1037 	while (n--) {
   1038 		pi = (struct pool_item *)cp;
   1039 
   1040 		/* Insert on page list */
   1041 		TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
   1042 #ifdef DIAGNOSTIC
   1043 		pi->pi_magic = PI_MAGIC;
   1044 #endif
   1045 		cp = (caddr_t)(cp + pp->pr_size);
   1046 	}
   1047 
   1048 	/*
   1049 	 * If the pool was depleted, point at the new page.
   1050 	 */
   1051 	if (pp->pr_curpage == NULL)
   1052 		pp->pr_curpage = ph;
   1053 
   1054 	if (++pp->pr_npages > pp->pr_hiwat)
   1055 		pp->pr_hiwat = pp->pr_npages;
   1056 }
   1057 
   1058 /*
   1059  * Like pool_prime(), except this is used by pool_get() when nitems
   1060  * drops below the low water mark.  This is used to catch up nitmes
   1061  * with the low water mark.
   1062  *
   1063  * Note 1, we never wait for memory here, we let the caller decide what to do.
   1064  *
   1065  * Note 2, this doesn't work with static pools.
   1066  *
   1067  * Note 3, we must be called with the pool already locked, and we return
   1068  * with it locked.
   1069  */
   1070 static int
   1071 pool_catchup(pp)
   1072 	struct pool *pp;
   1073 {
   1074 	caddr_t cp;
   1075 	int error = 0;
   1076 
   1077 	if (pp->pr_roflags & PR_STATIC) {
   1078 		/*
   1079 		 * We dropped below the low water mark, and this is not a
   1080 		 * good thing.  Log a warning.
   1081 		 *
   1082 		 * XXX: rate-limit this?
   1083 		 */
   1084 		printf("WARNING: static pool `%s' dropped below low water "
   1085 		    "mark\n", pp->pr_wchan);
   1086 		return (0);
   1087 	}
   1088 
   1089 	while (pp->pr_nitems < pp->pr_minitems) {
   1090 		/*
   1091 		 * Call the page back-end allocator for more memory.
   1092 		 *
   1093 		 * XXX: We never wait, so should we bother unlocking
   1094 		 * the pool descriptor?
   1095 		 */
   1096 		simple_unlock(&pp->pr_slock);
   1097 		cp = (*pp->pr_alloc)(pp->pr_pagesz, 0, pp->pr_mtype);
   1098 		simple_lock(&pp->pr_slock);
   1099 		if (cp == NULL) {
   1100 			error = ENOMEM;
   1101 			break;
   1102 		}
   1103 		pool_prime_page(pp, cp);
   1104 	}
   1105 
   1106 	return (error);
   1107 }
   1108 
   1109 void
   1110 pool_setlowat(pp, n)
   1111 	pool_handle_t	pp;
   1112 	int n;
   1113 {
   1114 	int error;
   1115 
   1116 	simple_lock(&pp->pr_slock);
   1117 
   1118 	pp->pr_minitems = n;
   1119 	pp->pr_minpages = (n == 0)
   1120 		? 0
   1121 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1122 
   1123 	/* Make sure we're caught up with the newly-set low water mark. */
   1124 	if ((error = pool_catchup(pp)) != 0) {
   1125 		/*
   1126 		 * XXX: Should we log a warning?  Should we set up a timeout
   1127 		 * to try again in a second or so?  The latter could break
   1128 		 * a caller's assumptions about interrupt protection, etc.
   1129 		 */
   1130 	}
   1131 
   1132 	simple_unlock(&pp->pr_slock);
   1133 }
   1134 
   1135 void
   1136 pool_sethiwat(pp, n)
   1137 	pool_handle_t	pp;
   1138 	int n;
   1139 {
   1140 
   1141 	simple_lock(&pp->pr_slock);
   1142 
   1143 	pp->pr_maxpages = (n == 0)
   1144 		? 0
   1145 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1146 
   1147 	simple_unlock(&pp->pr_slock);
   1148 }
   1149 
   1150 void
   1151 pool_sethardlimit(pp, n, warnmess, ratecap)
   1152 	pool_handle_t pp;
   1153 	int n;
   1154 	const char *warnmess;
   1155 	int ratecap;
   1156 {
   1157 
   1158 	simple_lock(&pp->pr_slock);
   1159 
   1160 	pp->pr_hardlimit = n;
   1161 	pp->pr_hardlimit_warning = warnmess;
   1162 	pp->pr_hardlimit_ratecap = ratecap;
   1163 	memset(&pp->pr_hardlimit_warning_last, 0,
   1164 	    sizeof(pp->pr_hardlimit_warning_last));
   1165 
   1166 	/*
   1167 	 * In-line version of pool_sethiwat(), because we don't want to
   1168 	 * release the lock.
   1169 	 */
   1170 	pp->pr_maxpages = (n == 0)
   1171 		? 0
   1172 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1173 
   1174 	simple_unlock(&pp->pr_slock);
   1175 }
   1176 
   1177 /*
   1178  * Default page allocator.
   1179  */
   1180 static void *
   1181 pool_page_alloc(sz, flags, mtype)
   1182 	unsigned long sz;
   1183 	int flags;
   1184 	int mtype;
   1185 {
   1186 	boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
   1187 
   1188 	return ((void *)uvm_km_alloc_poolpage(waitok));
   1189 }
   1190 
   1191 static void
   1192 pool_page_free(v, sz, mtype)
   1193 	void *v;
   1194 	unsigned long sz;
   1195 	int mtype;
   1196 {
   1197 
   1198 	uvm_km_free_poolpage((vaddr_t)v);
   1199 }
   1200 
   1201 /*
   1202  * Alternate pool page allocator for pools that know they will
   1203  * never be accessed in interrupt context.
   1204  */
   1205 void *
   1206 pool_page_alloc_nointr(sz, flags, mtype)
   1207 	unsigned long sz;
   1208 	int flags;
   1209 	int mtype;
   1210 {
   1211 	boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
   1212 
   1213 	return ((void *)uvm_km_alloc_poolpage1(kernel_map, uvm.kernel_object,
   1214 	    waitok));
   1215 }
   1216 
   1217 void
   1218 pool_page_free_nointr(v, sz, mtype)
   1219 	void *v;
   1220 	unsigned long sz;
   1221 	int mtype;
   1222 {
   1223 
   1224 	uvm_km_free_poolpage1(kernel_map, (vaddr_t)v);
   1225 }
   1226 
   1227 
   1228 /*
   1229  * Release all complete pages that have not been used recently.
   1230  */
   1231 void
   1232 _pool_reclaim(pp, file, line)
   1233 	pool_handle_t pp;
   1234 	const char *file;
   1235 	long line;
   1236 {
   1237 	struct pool_item_header *ph, *phnext;
   1238 	struct timeval curtime;
   1239 	int s;
   1240 
   1241 	if (pp->pr_roflags & PR_STATIC)
   1242 		return;
   1243 
   1244 	if (simple_lock_try(&pp->pr_slock) == 0)
   1245 		return;
   1246 	pr_enter(pp, file, line);
   1247 
   1248 	s = splclock();
   1249 	curtime = mono_time;
   1250 	splx(s);
   1251 
   1252 	for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; ph = phnext) {
   1253 		phnext = TAILQ_NEXT(ph, ph_pagelist);
   1254 
   1255 		/* Check our minimum page claim */
   1256 		if (pp->pr_npages <= pp->pr_minpages)
   1257 			break;
   1258 
   1259 		if (ph->ph_nmissing == 0) {
   1260 			struct timeval diff;
   1261 			timersub(&curtime, &ph->ph_time, &diff);
   1262 			if (diff.tv_sec < pool_inactive_time)
   1263 				continue;
   1264 
   1265 			/*
   1266 			 * If freeing this page would put us below
   1267 			 * the low water mark, stop now.
   1268 			 */
   1269 			if ((pp->pr_nitems - pp->pr_itemsperpage) <
   1270 			    pp->pr_minitems)
   1271 				break;
   1272 
   1273 			pr_rmpage(pp, ph);
   1274 		}
   1275 	}
   1276 
   1277 	pr_leave(pp);
   1278 	simple_unlock(&pp->pr_slock);
   1279 }
   1280 
   1281 
   1282 /*
   1283  * Drain pools, one at a time.
   1284  *
   1285  * Note, we must never be called from an interrupt context.
   1286  */
   1287 void
   1288 pool_drain(arg)
   1289 	void *arg;
   1290 {
   1291 	struct pool *pp;
   1292 	int s;
   1293 
   1294 	s = splimp();
   1295 	simple_lock(&pool_head_slock);
   1296 
   1297 	if (drainpp == NULL && (drainpp = TAILQ_FIRST(&pool_head)) == NULL)
   1298 		goto out;
   1299 
   1300 	pp = drainpp;
   1301 	drainpp = TAILQ_NEXT(pp, pr_poollist);
   1302 
   1303 	pool_reclaim(pp);
   1304 
   1305  out:
   1306 	simple_unlock(&pool_head_slock);
   1307 	splx(s);
   1308 }
   1309 
   1310 
   1311 /*
   1312  * Diagnostic helpers.
   1313  */
   1314 void
   1315 pool_print(pp, modif)
   1316 	struct pool *pp;
   1317 	const char *modif;
   1318 {
   1319 	int s;
   1320 
   1321 	s = splimp();
   1322 	if (simple_lock_try(&pp->pr_slock) == 0) {
   1323 		printf("pool %s is locked; try again later\n",
   1324 		    pp->pr_wchan);
   1325 		splx(s);
   1326 		return;
   1327 	}
   1328 	pool_print1(pp, modif, printf);
   1329 	simple_unlock(&pp->pr_slock);
   1330 	splx(s);
   1331 }
   1332 
   1333 void
   1334 pool_printit(pp, modif, pr)
   1335 	struct pool *pp;
   1336 	const char *modif;
   1337 	void (*pr) __P((const char *, ...));
   1338 {
   1339 	int didlock = 0;
   1340 
   1341 	if (pp == NULL) {
   1342 		(*pr)("Must specify a pool to print.\n");
   1343 		return;
   1344 	}
   1345 
   1346 	/*
   1347 	 * Called from DDB; interrupts should be blocked, and all
   1348 	 * other processors should be paused.  We can skip locking
   1349 	 * the pool in this case.
   1350 	 *
   1351 	 * We do a simple_lock_try() just to print the lock
   1352 	 * status, however.
   1353 	 */
   1354 
   1355 	if (simple_lock_try(&pp->pr_slock) == 0)
   1356 		(*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
   1357 	else
   1358 		didlock = 1;
   1359 
   1360 	pool_print1(pp, modif, pr);
   1361 
   1362 	if (didlock)
   1363 		simple_unlock(&pp->pr_slock);
   1364 }
   1365 
   1366 static void
   1367 pool_print1(pp, modif, pr)
   1368 	struct pool *pp;
   1369 	const char *modif;
   1370 	void (*pr) __P((const char *, ...));
   1371 {
   1372 	struct pool_item_header *ph;
   1373 #ifdef DIAGNOSTIC
   1374 	struct pool_item *pi;
   1375 #endif
   1376 	int print_log = 0, print_pagelist = 0;
   1377 	char c;
   1378 
   1379 	while ((c = *modif++) != '\0') {
   1380 		if (c == 'l')
   1381 			print_log = 1;
   1382 		if (c == 'p')
   1383 			print_pagelist = 1;
   1384 		modif++;
   1385 	}
   1386 
   1387 	(*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
   1388 	    pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
   1389 	    pp->pr_roflags);
   1390 	(*pr)("\tpagesz %u, mtype %d\n", pp->pr_pagesz, pp->pr_mtype);
   1391 	(*pr)("\talloc %p, release %p\n", pp->pr_alloc, pp->pr_free);
   1392 	(*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
   1393 	    pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
   1394 	(*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
   1395 	    pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
   1396 
   1397 	(*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
   1398 	    pp->pr_nget, pp->pr_nfail, pp->pr_nput);
   1399 	(*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
   1400 	    pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
   1401 
   1402 	if (print_pagelist == 0)
   1403 		goto skip_pagelist;
   1404 
   1405 	if ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL)
   1406 		(*pr)("\n\tpage list:\n");
   1407 	for (; ph != NULL; ph = TAILQ_NEXT(ph, ph_pagelist)) {
   1408 		(*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
   1409 		    ph->ph_page, ph->ph_nmissing,
   1410 		    (u_long)ph->ph_time.tv_sec,
   1411 		    (u_long)ph->ph_time.tv_usec);
   1412 #ifdef DIAGNOSTIC
   1413 		for (pi = TAILQ_FIRST(&ph->ph_itemlist); pi != NULL;
   1414 		     pi = TAILQ_NEXT(pi, pi_list)) {
   1415 			if (pi->pi_magic != PI_MAGIC) {
   1416 				(*pr)("\t\t\titem %p, magic 0x%x\n",
   1417 				    pi, pi->pi_magic);
   1418 			}
   1419 		}
   1420 #endif
   1421 	}
   1422 	if (pp->pr_curpage == NULL)
   1423 		(*pr)("\tno current page\n");
   1424 	else
   1425 		(*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
   1426 
   1427  skip_pagelist:
   1428 
   1429 	if (print_log == 0)
   1430 		goto skip_log;
   1431 
   1432 	(*pr)("\n");
   1433 	if ((pp->pr_roflags & PR_LOGGING) == 0)
   1434 		(*pr)("\tno log\n");
   1435 	else
   1436 		pr_printlog(pp, NULL, pr);
   1437 
   1438  skip_log:
   1439 
   1440 	pr_enter_check(pp, pr);
   1441 }
   1442 
   1443 int
   1444 pool_chk(pp, label)
   1445 	struct pool *pp;
   1446 	char *label;
   1447 {
   1448 	struct pool_item_header *ph;
   1449 	int r = 0;
   1450 
   1451 	simple_lock(&pp->pr_slock);
   1452 
   1453 	for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
   1454 	     ph = TAILQ_NEXT(ph, ph_pagelist)) {
   1455 
   1456 		struct pool_item *pi;
   1457 		int n;
   1458 		caddr_t page;
   1459 
   1460 		page = (caddr_t)((u_long)ph & pp->pr_pagemask);
   1461 		if (page != ph->ph_page &&
   1462 		    (pp->pr_roflags & PR_PHINPAGE) != 0) {
   1463 			if (label != NULL)
   1464 				printf("%s: ", label);
   1465 			printf("pool(%p:%s): page inconsistency: page %p;"
   1466 			       " at page head addr %p (p %p)\n", pp,
   1467 				pp->pr_wchan, ph->ph_page,
   1468 				ph, page);
   1469 			r++;
   1470 			goto out;
   1471 		}
   1472 
   1473 		for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
   1474 		     pi != NULL;
   1475 		     pi = TAILQ_NEXT(pi,pi_list), n++) {
   1476 
   1477 #ifdef DIAGNOSTIC
   1478 			if (pi->pi_magic != PI_MAGIC) {
   1479 				if (label != NULL)
   1480 					printf("%s: ", label);
   1481 				printf("pool(%s): free list modified: magic=%x;"
   1482 				       " page %p; item ordinal %d;"
   1483 				       " addr %p (p %p)\n",
   1484 					pp->pr_wchan, pi->pi_magic, ph->ph_page,
   1485 					n, pi, page);
   1486 				panic("pool");
   1487 			}
   1488 #endif
   1489 			page = (caddr_t)((u_long)pi & pp->pr_pagemask);
   1490 			if (page == ph->ph_page)
   1491 				continue;
   1492 
   1493 			if (label != NULL)
   1494 				printf("%s: ", label);
   1495 			printf("pool(%p:%s): page inconsistency: page %p;"
   1496 			       " item ordinal %d; addr %p (p %p)\n", pp,
   1497 				pp->pr_wchan, ph->ph_page,
   1498 				n, pi, page);
   1499 			r++;
   1500 			goto out;
   1501 		}
   1502 	}
   1503 out:
   1504 	simple_unlock(&pp->pr_slock);
   1505 	return (r);
   1506 }
   1507