Home | History | Annotate | Line # | Download | only in kern
subr_pool.c revision 1.3
      1  1.3  pk /*	$NetBSD: subr_pool.c,v 1.3 1998/07/23 20:34:00 pk Exp $	*/
      2  1.1  pk 
      3  1.1  pk /*-
      4  1.1  pk  * Copyright (c) 1997 The NetBSD Foundation, Inc.
      5  1.1  pk  * All rights reserved.
      6  1.1  pk  *
      7  1.1  pk  * This code is derived from software contributed to The NetBSD Foundation
      8  1.1  pk  * by Paul Kranenburg.
      9  1.1  pk  *
     10  1.1  pk  * Redistribution and use in source and binary forms, with or without
     11  1.1  pk  * modification, are permitted provided that the following conditions
     12  1.1  pk  * are met:
     13  1.1  pk  * 1. Redistributions of source code must retain the above copyright
     14  1.1  pk  *    notice, this list of conditions and the following disclaimer.
     15  1.1  pk  * 2. Redistributions in binary form must reproduce the above copyright
     16  1.1  pk  *    notice, this list of conditions and the following disclaimer in the
     17  1.1  pk  *    documentation and/or other materials provided with the distribution.
     18  1.1  pk  * 3. All advertising materials mentioning features or use of this software
     19  1.1  pk  *    must display the following acknowledgement:
     20  1.1  pk  *        This product includes software developed by the NetBSD
     21  1.1  pk  *        Foundation, Inc. and its contributors.
     22  1.1  pk  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  1.1  pk  *    contributors may be used to endorse or promote products derived
     24  1.1  pk  *    from this software without specific prior written permission.
     25  1.1  pk  *
     26  1.1  pk  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  1.1  pk  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  1.1  pk  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  1.1  pk  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  1.1  pk  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  1.1  pk  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  1.1  pk  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  1.1  pk  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  1.1  pk  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  1.1  pk  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  1.1  pk  * POSSIBILITY OF SUCH DAMAGE.
     37  1.1  pk  */
     38  1.1  pk 
     39  1.1  pk #include <sys/param.h>
     40  1.1  pk #include <sys/systm.h>
     41  1.1  pk #include <sys/proc.h>
     42  1.1  pk #include <sys/errno.h>
     43  1.1  pk #include <sys/kernel.h>
     44  1.1  pk #include <sys/malloc.h>
     45  1.1  pk #include <sys/lock.h>
     46  1.1  pk #include <sys/pool.h>
     47  1.1  pk 
     48  1.3  pk #include <vm/vm.h>
     49  1.3  pk #include <vm/vm_kern.h>
     50  1.3  pk 
     51  1.3  pk #if defined(UVM)
     52  1.3  pk #include <uvm/uvm.h>
     53  1.3  pk #endif
     54  1.3  pk 
     55  1.1  pk /*
     56  1.1  pk  * Pool resource management utility.
     57  1.3  pk  *
     58  1.3  pk  * Memory is allocated in pages which are split into pieces according
     59  1.3  pk  * to the pool item size. Each page is kept on a list headed by `pr_pagelist'
     60  1.3  pk  * in the pool structure and the individual pool items are on a linked list
     61  1.3  pk  * headed by `ph_itemlist' in each page header. The memory for building
     62  1.3  pk  * the page list is either taken from the allocated pages themselves (for
     63  1.3  pk  * small pool items) or taken from an internal pool of page headers (`phpool').
     64  1.3  pk  *
     65  1.1  pk  */
     66  1.1  pk 
     67  1.3  pk /* List of all pools */
     68  1.3  pk static TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
     69  1.3  pk 
     70  1.3  pk /* Private pool for page header structures */
     71  1.3  pk static struct pool phpool;
     72  1.3  pk 
     73  1.3  pk /* # of seconds to retain page after last use */
     74  1.3  pk int pool_inactive_time = 10;
     75  1.3  pk 
     76  1.3  pk /* Next candidate for drainage (see pool_drain()) */
     77  1.3  pk static struct pool	*drainpp = NULL;
     78  1.3  pk 
     79  1.3  pk struct pool_item_header {
     80  1.3  pk 	/* Page headers */
     81  1.3  pk 	TAILQ_ENTRY(pool_item_header)
     82  1.3  pk 				ph_pagelist;	/* pool page list */
     83  1.3  pk 	TAILQ_HEAD(,pool_item)	ph_itemlist;	/* chunk list for this page */
     84  1.3  pk 	LIST_ENTRY(pool_item_header)
     85  1.3  pk 				ph_hashlist;	/* Off-page page headers */
     86  1.3  pk 	int			ph_nmissing;	/* # of chunks in use */
     87  1.3  pk 	caddr_t			ph_page;	/* this page's address */
     88  1.3  pk 	struct timeval		ph_time;	/* last referenced */
     89  1.3  pk };
     90  1.3  pk 
     91  1.1  pk struct pool_item {
     92  1.3  pk #ifdef DIAGNOSTIC
     93  1.3  pk 	int pi_magic;
     94  1.3  pk #define PI_MAGIC 0xdeadbeef
     95  1.3  pk #endif
     96  1.3  pk 	/* Other entries use only this list entry */
     97  1.3  pk 	TAILQ_ENTRY(pool_item)	pi_list;
     98  1.3  pk };
     99  1.3  pk 
    100  1.3  pk 
    101  1.3  pk #define PR_HASH_INDEX(pp,addr) \
    102  1.3  pk 	(((u_long)(addr) >> (pp)->pr_pageshift) & (PR_HASHTABSIZE - 1))
    103  1.3  pk 
    104  1.3  pk 
    105  1.3  pk 
    106  1.3  pk static struct pool_item_header
    107  1.3  pk 		*pr_find_pagehead __P((struct pool *, caddr_t));
    108  1.3  pk static void	pr_rmpage __P((struct pool *, struct pool_item_header *));
    109  1.3  pk static int	pool_prime_page __P((struct pool *, caddr_t));
    110  1.3  pk static void	*pool_page_alloc __P((unsigned long, int, int));
    111  1.3  pk static void	pool_page_free __P((void *, unsigned long, int));
    112  1.3  pk int pool_chk __P((struct pool *, char *));
    113  1.3  pk 
    114  1.3  pk 
    115  1.3  pk #ifdef POOL_DIAGNOSTIC
    116  1.3  pk /*
    117  1.3  pk  * Pool log entry. An array of these is allocated in pool_create().
    118  1.3  pk  */
    119  1.3  pk struct pool_log {
    120  1.3  pk 	const char	*pl_file;
    121  1.3  pk 	long		pl_line;
    122  1.3  pk 	int		pl_action;
    123  1.3  pk #define PRLOG_GET	1
    124  1.3  pk #define PRLOG_PUT	2
    125  1.3  pk 	void		*pl_addr;
    126  1.1  pk };
    127  1.1  pk 
    128  1.3  pk /* Number of entries in pool log buffers */
    129  1.3  pk int pool_logsize = 10;
    130  1.1  pk 
    131  1.3  pk static void	pr_log __P((struct pool *, void *, int, const char *, long));
    132  1.3  pk static void	pr_printlog __P((struct pool *));
    133  1.3  pk 
    134  1.3  pk static __inline__ void
    135  1.3  pk pr_log(pp, v, action, file, line)
    136  1.3  pk 	struct pool	*pp;
    137  1.3  pk 	void		*v;
    138  1.3  pk 	int		action;
    139  1.3  pk 	const char	*file;
    140  1.3  pk 	long		line;
    141  1.3  pk {
    142  1.3  pk 	int n = pp->pr_curlogentry;
    143  1.3  pk 	struct pool_log *pl;
    144  1.3  pk 
    145  1.3  pk 	if ((pp->pr_flags & PR_LOGGING) == 0)
    146  1.3  pk 		return;
    147  1.3  pk 
    148  1.3  pk 	/*
    149  1.3  pk 	 * Fill in the current entry. Wrap around and overwrite
    150  1.3  pk 	 * the oldest entry if necessary.
    151  1.3  pk 	 */
    152  1.3  pk 	pl = &pp->pr_log[n];
    153  1.3  pk 	pl->pl_file = file;
    154  1.3  pk 	pl->pl_line = line;
    155  1.3  pk 	pl->pl_action = action;
    156  1.3  pk 	pl->pl_addr = v;
    157  1.3  pk 	if (++n >= pp->pr_logsize)
    158  1.3  pk 		n = 0;
    159  1.3  pk 	pp->pr_curlogentry = n;
    160  1.3  pk }
    161  1.3  pk 
    162  1.3  pk static void
    163  1.3  pk pr_printlog(pp)
    164  1.3  pk 	struct pool *pp;
    165  1.3  pk {
    166  1.3  pk 	int i = pp->pr_logsize;
    167  1.3  pk 	int n = pp->pr_curlogentry;
    168  1.3  pk 
    169  1.3  pk 	if ((pp->pr_flags & PR_LOGGING) == 0)
    170  1.3  pk 		return;
    171  1.3  pk 
    172  1.3  pk 	pool_print(pp, "printlog");
    173  1.3  pk 
    174  1.3  pk 	/*
    175  1.3  pk 	 * Print all entries in this pool's log.
    176  1.3  pk 	 */
    177  1.3  pk 	while (i-- > 0) {
    178  1.3  pk 		struct pool_log *pl = &pp->pr_log[n];
    179  1.3  pk 		if (pl->pl_action != 0) {
    180  1.3  pk 			printf("log entry %d:\n", i);
    181  1.3  pk 			printf("\taction = %s, addr = %p\n",
    182  1.3  pk 				pl->pl_action == PRLOG_GET ? "get" : "put",
    183  1.3  pk 				pl->pl_addr);
    184  1.3  pk 			printf("\tfile: %s at line %lu\n",
    185  1.3  pk 				pl->pl_file, pl->pl_line);
    186  1.3  pk 		}
    187  1.3  pk 		if (++n >= pp->pr_logsize)
    188  1.3  pk 			n = 0;
    189  1.3  pk 	}
    190  1.3  pk }
    191  1.3  pk #else
    192  1.3  pk #define pr_log(pp, v, action, file, line)
    193  1.3  pk #define pr_printlog(pp)
    194  1.3  pk #endif
    195  1.3  pk 
    196  1.3  pk 
    197  1.3  pk /*
    198  1.3  pk  * Return the pool page header based on page address.
    199  1.3  pk  */
    200  1.3  pk static __inline__ struct pool_item_header *
    201  1.3  pk pr_find_pagehead(pp, page)
    202  1.3  pk 	struct pool *pp;
    203  1.3  pk 	caddr_t page;
    204  1.3  pk {
    205  1.3  pk 	struct pool_item_header *ph;
    206  1.3  pk 
    207  1.3  pk 	if ((pp->pr_flags & PR_PHINPAGE) != 0)
    208  1.3  pk 		return ((struct pool_item_header *)(page + pp->pr_phoffset));
    209  1.3  pk 
    210  1.3  pk 	for (ph = LIST_FIRST(&pp->pr_hashtab[PR_HASH_INDEX(pp, page)]);
    211  1.3  pk 	     ph != NULL;
    212  1.3  pk 	     ph = LIST_NEXT(ph, ph_hashlist)) {
    213  1.3  pk 		if (ph->ph_page == page)
    214  1.3  pk 			return (ph);
    215  1.3  pk 	}
    216  1.3  pk 	return (NULL);
    217  1.3  pk }
    218  1.3  pk 
    219  1.3  pk /*
    220  1.3  pk  * Remove a page from the pool.
    221  1.3  pk  */
    222  1.3  pk static __inline__ void
    223  1.3  pk pr_rmpage(pp, ph)
    224  1.3  pk 	struct pool *pp;
    225  1.3  pk 	struct pool_item_header *ph;
    226  1.3  pk {
    227  1.3  pk 
    228  1.3  pk 	/*
    229  1.3  pk 	 * Unlink a page from the pool and release it.
    230  1.3  pk 	 */
    231  1.3  pk 	TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
    232  1.3  pk 	(*pp->pr_free)(ph->ph_page, pp->pr_pagesz, pp->pr_mtype);
    233  1.3  pk 	pp->pr_npages--;
    234  1.3  pk 	pp->pr_npagefree++;
    235  1.3  pk 
    236  1.3  pk 	if ((pp->pr_flags & PR_PHINPAGE) == 0) {
    237  1.3  pk 		LIST_REMOVE(ph, ph_hashlist);
    238  1.3  pk 		pool_put(&phpool, ph);
    239  1.3  pk 	}
    240  1.3  pk 
    241  1.3  pk 	if (pp->pr_curpage == ph) {
    242  1.3  pk 		/*
    243  1.3  pk 		 * Find a new non-empty page header, if any.
    244  1.3  pk 		 * Start search from the page head, to increase the
    245  1.3  pk 		 * chance for "high water" pages to be freed.
    246  1.3  pk 		 */
    247  1.3  pk 		for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
    248  1.3  pk 		     ph = TAILQ_NEXT(ph, ph_pagelist))
    249  1.3  pk 			if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
    250  1.3  pk 				break;
    251  1.3  pk 
    252  1.3  pk 		pp->pr_curpage = ph;
    253  1.3  pk 	}
    254  1.3  pk }
    255  1.3  pk 
    256  1.3  pk /*
    257  1.3  pk  * Allocate and initialize a pool.
    258  1.3  pk  */
    259  1.1  pk struct pool *
    260  1.3  pk pool_create(size, align, ioff, nitems, wchan, pagesz, alloc, release, mtype)
    261  1.1  pk 	size_t	size;
    262  1.3  pk 	u_int	align;
    263  1.3  pk 	u_int	ioff;
    264  1.1  pk 	int	nitems;
    265  1.1  pk 	char	*wchan;
    266  1.3  pk 	size_t	pagesz;
    267  1.3  pk 	void	*(*alloc) __P((unsigned long, int, int));
    268  1.3  pk 	void	(*release) __P((void *, unsigned long, int));
    269  1.1  pk 	int	mtype;
    270  1.1  pk {
    271  1.1  pk 	struct pool *pp;
    272  1.3  pk 	int flags;
    273  1.1  pk 
    274  1.3  pk 	pp = (struct pool *)malloc(sizeof(*pp), M_POOL, M_NOWAIT);
    275  1.3  pk 	if (pp == NULL)
    276  1.1  pk 		return (NULL);
    277  1.3  pk 
    278  1.3  pk 	flags = PR_FREEHEADER;
    279  1.3  pk #ifdef POOL_DIAGNOSTIC
    280  1.3  pk 	if (pool_logsize != 0)
    281  1.3  pk 		flags |= PR_LOGGING;
    282  1.3  pk #endif
    283  1.3  pk 
    284  1.3  pk 	pool_init(pp, size, align, ioff, flags, wchan, pagesz,
    285  1.3  pk 		  alloc, release, mtype);
    286  1.3  pk 
    287  1.3  pk 	if (nitems != 0) {
    288  1.3  pk 		if (pool_prime(pp, nitems, NULL) != 0) {
    289  1.3  pk 			pool_destroy(pp);
    290  1.3  pk 			return (NULL);
    291  1.3  pk 		}
    292  1.1  pk 	}
    293  1.1  pk 
    294  1.3  pk 	return (pp);
    295  1.3  pk }
    296  1.3  pk 
    297  1.3  pk /*
    298  1.3  pk  * Initialize the given pool resource structure.
    299  1.3  pk  *
    300  1.3  pk  * We export this routine to allow other kernel parts to declare
    301  1.3  pk  * static pools that must be initialized before malloc() is available.
    302  1.3  pk  */
    303  1.3  pk void
    304  1.3  pk pool_init(pp, size, align, ioff, flags, wchan, pagesz, alloc, release, mtype)
    305  1.3  pk 	struct pool	*pp;
    306  1.3  pk 	size_t		size;
    307  1.3  pk 	u_int		align;
    308  1.3  pk 	u_int		ioff;
    309  1.3  pk 	int		flags;
    310  1.3  pk 	char		*wchan;
    311  1.3  pk 	size_t		pagesz;
    312  1.3  pk 	void		*(*alloc) __P((unsigned long, int, int));
    313  1.3  pk 	void		(*release) __P((void *, unsigned long, int));
    314  1.3  pk 	int		mtype;
    315  1.3  pk {
    316  1.3  pk 	int off, slack;
    317  1.3  pk 
    318  1.3  pk 	/*
    319  1.3  pk 	 * Check arguments and construct default values.
    320  1.3  pk 	 */
    321  1.3  pk 	if (!powerof2(pagesz) || pagesz > PAGE_SIZE)
    322  1.3  pk 		panic("pool_init: page size invalid (%lx)\n", (u_long)pagesz);
    323  1.3  pk 
    324  1.3  pk 	if (alloc == NULL)
    325  1.3  pk 		alloc = pool_page_alloc;
    326  1.3  pk 
    327  1.3  pk 	if (release == NULL)
    328  1.3  pk 		release = pool_page_free;
    329  1.3  pk 
    330  1.3  pk 	if (pagesz == 0)
    331  1.3  pk 		pagesz = PAGE_SIZE;
    332  1.3  pk 
    333  1.3  pk 	if (align == 0)
    334  1.3  pk 		align = ALIGN(1);
    335  1.3  pk 
    336  1.3  pk 	/*
    337  1.3  pk 	 * Initialize the pool structure.
    338  1.3  pk 	 */
    339  1.3  pk 	TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
    340  1.3  pk 	TAILQ_INIT(&pp->pr_pagelist);
    341  1.3  pk 	pp->pr_curpage = NULL;
    342  1.3  pk 	pp->pr_npages = 0;
    343  1.3  pk 	pp->pr_minitems = 0;
    344  1.3  pk 	pp->pr_minpages = 0;
    345  1.3  pk 	pp->pr_maxpages = UINT_MAX;
    346  1.3  pk 	pp->pr_flags = flags;
    347  1.3  pk 	pp->pr_size = ALIGN(size);
    348  1.3  pk 	pp->pr_align = align;
    349  1.3  pk 	pp->pr_wchan = wchan;
    350  1.3  pk 	pp->pr_mtype = mtype;
    351  1.3  pk 	pp->pr_alloc = alloc;
    352  1.3  pk 	pp->pr_free = release;
    353  1.3  pk 	pp->pr_pagesz = pagesz;
    354  1.3  pk 	pp->pr_pagemask = ~(pagesz - 1);
    355  1.3  pk 	pp->pr_pageshift = ffs(pagesz) - 1;
    356  1.3  pk 
    357  1.3  pk 	/*
    358  1.3  pk 	 * Decide whether to put the page header off page to avoid
    359  1.3  pk 	 * wasting too large a part of the page. Off-page page headers
    360  1.3  pk 	 * go on a hash table, so we can match a returned item
    361  1.3  pk 	 * with its header based on the page address.
    362  1.3  pk 	 * We use 1/16 of the page size as the threshold (XXX: tune)
    363  1.3  pk 	 */
    364  1.3  pk 	if (pp->pr_size < pagesz/16) {
    365  1.3  pk 		/* Use the end of the page for the page header */
    366  1.3  pk 		pp->pr_flags |= PR_PHINPAGE;
    367  1.3  pk 		pp->pr_phoffset = off =
    368  1.3  pk 			pagesz - ALIGN(sizeof(struct pool_item_header));
    369  1.2  pk 	} else {
    370  1.3  pk 		/* The page header will be taken from our page header pool */
    371  1.3  pk 		pp->pr_phoffset = 0;
    372  1.3  pk 		off = pagesz;
    373  1.3  pk 		bzero(pp->pr_hashtab, sizeof(pp->pr_hashtab));
    374  1.2  pk 	}
    375  1.1  pk 
    376  1.3  pk 	/*
    377  1.3  pk 	 * Alignment is to take place at `ioff' within the item. This means
    378  1.3  pk 	 * we must reserve up to `align - 1' bytes on the page to allow
    379  1.3  pk 	 * appropriate positioning of each item.
    380  1.3  pk 	 *
    381  1.3  pk 	 * Silently enforce `0 <= ioff < align'.
    382  1.3  pk 	 */
    383  1.3  pk 	pp->pr_itemoffset = ioff = ioff % align;
    384  1.3  pk 	pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
    385  1.3  pk 
    386  1.3  pk 	/*
    387  1.3  pk 	 * Use the slack between the chunks and the page header
    388  1.3  pk 	 * for "cache coloring".
    389  1.3  pk 	 */
    390  1.3  pk 	slack = off - pp->pr_itemsperpage * pp->pr_size;
    391  1.3  pk 	pp->pr_maxcolor = (slack / align) * align;
    392  1.3  pk 	pp->pr_curcolor = 0;
    393  1.3  pk 
    394  1.3  pk 	pp->pr_nget = 0;
    395  1.3  pk 	pp->pr_nfail = 0;
    396  1.3  pk 	pp->pr_nput = 0;
    397  1.3  pk 	pp->pr_npagealloc = 0;
    398  1.3  pk 	pp->pr_npagefree = 0;
    399  1.1  pk 	pp->pr_hiwat = 0;
    400  1.3  pk 
    401  1.3  pk #ifdef POOL_DIAGNOSTIC
    402  1.3  pk 	if ((flags & PR_LOGGING) != 0) {
    403  1.3  pk 		pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
    404  1.3  pk 				    M_TEMP, M_NOWAIT);
    405  1.3  pk 		if (pp->pr_log == NULL)
    406  1.3  pk 			pp->pr_flags &= ~PR_LOGGING;
    407  1.3  pk 		pp->pr_curlogentry = 0;
    408  1.3  pk 		pp->pr_logsize = pool_logsize;
    409  1.3  pk 	}
    410  1.3  pk #endif
    411  1.3  pk 
    412  1.1  pk 	simple_lock_init(&pp->pr_lock);
    413  1.1  pk 
    414  1.3  pk 	/*
    415  1.3  pk 	 * Initialize private page header pool if we haven't done so yet.
    416  1.3  pk 	 */
    417  1.3  pk 	if (phpool.pr_size == 0) {
    418  1.3  pk 		pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,
    419  1.3  pk 			  0, "phpool", 0, 0, 0, 0);
    420  1.1  pk 	}
    421  1.1  pk 
    422  1.3  pk 	return;
    423  1.1  pk }
    424  1.1  pk 
    425  1.1  pk /*
    426  1.1  pk  * De-commision a pool resource.
    427  1.1  pk  */
    428  1.1  pk void
    429  1.1  pk pool_destroy(pp)
    430  1.1  pk 	struct pool *pp;
    431  1.1  pk {
    432  1.3  pk 	struct pool_item_header *ph;
    433  1.3  pk 
    434  1.3  pk #ifdef DIAGNOSTIC
    435  1.3  pk 	if (pp->pr_nget - pp->pr_nput != 0) {
    436  1.3  pk 		pr_printlog(pp);
    437  1.3  pk 		panic("pool_destroy: pool busy: still out: %lu\n",
    438  1.3  pk 		      pp->pr_nget - pp->pr_nput);
    439  1.3  pk 	}
    440  1.3  pk #endif
    441  1.1  pk 
    442  1.3  pk 	/* Remove all pages */
    443  1.3  pk 	if ((pp->pr_flags & PR_STATIC) == 0)
    444  1.3  pk 		while ((ph = pp->pr_pagelist.tqh_first) != NULL)
    445  1.3  pk 			pr_rmpage(pp, ph);
    446  1.3  pk 
    447  1.3  pk 	/* Remove from global pool list */
    448  1.3  pk 	TAILQ_REMOVE(&pool_head, pp, pr_poollist);
    449  1.3  pk 	drainpp = NULL;
    450  1.3  pk 
    451  1.3  pk #ifdef POOL_DIAGNOSTIC
    452  1.3  pk 	if ((pp->pr_flags & PR_LOGGING) != 0)
    453  1.3  pk 		free(pp->pr_log, M_TEMP);
    454  1.3  pk #endif
    455  1.2  pk 
    456  1.3  pk 	if (pp->pr_flags & PR_FREEHEADER)
    457  1.3  pk 		free(pp, M_POOL);
    458  1.1  pk }
    459  1.1  pk 
    460  1.1  pk 
    461  1.1  pk /*
    462  1.3  pk  * Grab an item from the pool; must be called at appropriate spl level
    463  1.1  pk  */
    464  1.3  pk #ifdef POOL_DIAGNOSTIC
    465  1.3  pk void *
    466  1.3  pk _pool_get(pp, flags, file, line)
    467  1.3  pk 	struct pool *pp;
    468  1.3  pk 	int flags;
    469  1.3  pk 	const char *file;
    470  1.3  pk 	long line;
    471  1.3  pk #else
    472  1.1  pk void *
    473  1.1  pk pool_get(pp, flags)
    474  1.1  pk 	struct pool *pp;
    475  1.1  pk 	int flags;
    476  1.3  pk #endif
    477  1.1  pk {
    478  1.1  pk 	void *v;
    479  1.1  pk 	struct pool_item *pi;
    480  1.3  pk 	struct pool_item_header *ph;
    481  1.1  pk 
    482  1.2  pk #ifdef DIAGNOSTIC
    483  1.3  pk 	if ((pp->pr_flags & PR_STATIC) && (flags & PR_MALLOCOK)) {
    484  1.3  pk 		pr_printlog(pp);
    485  1.2  pk 		panic("pool_get: static");
    486  1.3  pk 	}
    487  1.2  pk #endif
    488  1.2  pk 
    489  1.1  pk 	simple_lock(&pp->pr_lock);
    490  1.3  pk 	if (curproc == NULL && (flags & PR_WAITOK) != 0)
    491  1.3  pk 		panic("pool_get: must have NOWAIT");
    492  1.1  pk 
    493  1.3  pk 	/*
    494  1.3  pk 	 * The convention we use is that if `curpage' is not NULL, then
    495  1.3  pk 	 * it points at a non-empty bucket. In particular, `curpage'
    496  1.3  pk 	 * never points at a page header which has PR_PHINPAGE set and
    497  1.3  pk 	 * has no items in its bucket.
    498  1.3  pk 	 */
    499  1.3  pk again:
    500  1.3  pk 	if ((ph = pp->pr_curpage) == NULL) {
    501  1.3  pk 		void *v = (*pp->pr_alloc)(pp->pr_pagesz, flags, pp->pr_mtype);
    502  1.1  pk 		if (v == NULL) {
    503  1.3  pk 			if (flags & PR_URGENT)
    504  1.3  pk 				panic("pool_get: urgent");
    505  1.3  pk 			if ((flags & PR_WAITOK) == 0) {
    506  1.3  pk 				pp->pr_nfail++;
    507  1.3  pk 				simple_unlock(&pp->pr_lock);
    508  1.1  pk 				return (NULL);
    509  1.3  pk 			}
    510  1.3  pk 
    511  1.1  pk 			pp->pr_flags |= PR_WANTED;
    512  1.1  pk 			simple_unlock(&pp->pr_lock);
    513  1.1  pk 			tsleep((caddr_t)pp, PSWP, pp->pr_wchan, 0);
    514  1.3  pk 			simple_lock(&pp->pr_lock);
    515  1.3  pk 		} else {
    516  1.3  pk 			pp->pr_npagealloc++;
    517  1.3  pk 			pool_prime_page(pp, v);
    518  1.1  pk 		}
    519  1.3  pk 
    520  1.3  pk 		goto again;
    521  1.3  pk 	}
    522  1.3  pk 
    523  1.3  pk 	if ((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)
    524  1.3  pk 		panic("pool_get: %s: page empty", pp->pr_wchan);
    525  1.3  pk 
    526  1.3  pk 	pr_log(pp, v, PRLOG_GET, file, line);
    527  1.3  pk 
    528  1.3  pk #ifdef DIAGNOSTIC
    529  1.3  pk 	if (pi->pi_magic != PI_MAGIC) {
    530  1.3  pk 		pr_printlog(pp);
    531  1.3  pk 		panic("pool_get(%s): free list modified: magic=%x; page %p;"
    532  1.3  pk 		       " item addr %p\n",
    533  1.3  pk 			pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
    534  1.3  pk 	}
    535  1.3  pk #endif
    536  1.3  pk 
    537  1.3  pk 	/*
    538  1.3  pk 	 * Remove from item list.
    539  1.3  pk 	 */
    540  1.3  pk 	TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
    541  1.3  pk 	ph->ph_nmissing++;
    542  1.3  pk 	if (TAILQ_FIRST(&ph->ph_itemlist) == NULL) {
    543  1.3  pk 		/*
    544  1.3  pk 		 * Find a new non-empty page header, if any.
    545  1.3  pk 		 * Start search from the page head, to increase
    546  1.3  pk 		 * the chance for "high water" pages to be freed.
    547  1.3  pk 		 *
    548  1.3  pk 		 * First, move the now empty page to the head of
    549  1.3  pk 		 * the page list.
    550  1.3  pk 		 */
    551  1.3  pk 		TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
    552  1.3  pk 		TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
    553  1.3  pk 		while ((ph = TAILQ_NEXT(ph, ph_pagelist)) != NULL)
    554  1.3  pk 			if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
    555  1.3  pk 				break;
    556  1.3  pk 
    557  1.3  pk 		pp->pr_curpage = ph;
    558  1.1  pk 	}
    559  1.3  pk 
    560  1.3  pk 	pp->pr_nget++;
    561  1.1  pk 	simple_unlock(&pp->pr_lock);
    562  1.1  pk 	return (v);
    563  1.1  pk }
    564  1.1  pk 
    565  1.1  pk /*
    566  1.3  pk  * Return resource to the pool; must be called at appropriate spl level
    567  1.1  pk  */
    568  1.3  pk #ifdef POOL_DIAGNOSTIC
    569  1.3  pk void
    570  1.3  pk _pool_put(pp, v, file, line)
    571  1.3  pk 	struct pool *pp;
    572  1.3  pk 	void *v;
    573  1.3  pk 	const char *file;
    574  1.3  pk 	long line;
    575  1.3  pk #else
    576  1.1  pk void
    577  1.1  pk pool_put(pp, v)
    578  1.1  pk 	struct pool *pp;
    579  1.1  pk 	void *v;
    580  1.3  pk #endif
    581  1.1  pk {
    582  1.1  pk 	struct pool_item *pi = v;
    583  1.3  pk 	struct pool_item_header *ph;
    584  1.3  pk 	caddr_t page;
    585  1.3  pk 
    586  1.3  pk 	page = (caddr_t)((u_long)v & pp->pr_pagemask);
    587  1.1  pk 
    588  1.1  pk 	simple_lock(&pp->pr_lock);
    589  1.3  pk 
    590  1.3  pk 	pr_log(pp, v, PRLOG_PUT, file, line);
    591  1.3  pk 
    592  1.3  pk 	if ((ph = pr_find_pagehead(pp, page)) == NULL) {
    593  1.3  pk 		pr_printlog(pp);
    594  1.3  pk 		panic("pool_put: %s: page header missing", pp->pr_wchan);
    595  1.3  pk 	}
    596  1.3  pk 
    597  1.3  pk 	/*
    598  1.3  pk 	 * Return to item list.
    599  1.3  pk 	 */
    600  1.2  pk #ifdef DIAGNOSTIC
    601  1.3  pk 	pi->pi_magic = PI_MAGIC;
    602  1.3  pk #endif
    603  1.3  pk 	TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
    604  1.3  pk 	ph->ph_nmissing--;
    605  1.3  pk 	pp->pr_nput++;
    606  1.3  pk 
    607  1.3  pk 	/* Cancel "pool empty" condition if it exists */
    608  1.3  pk 	if (pp->pr_curpage == NULL)
    609  1.3  pk 		pp->pr_curpage = ph;
    610  1.3  pk 
    611  1.3  pk 	if (pp->pr_flags & PR_WANTED) {
    612  1.3  pk 		pp->pr_flags &= ~PR_WANTED;
    613  1.3  pk 		wakeup((caddr_t)pp);
    614  1.3  pk 		simple_unlock(&pp->pr_lock);
    615  1.3  pk 		return;
    616  1.3  pk 	}
    617  1.3  pk 
    618  1.3  pk 	/*
    619  1.3  pk 	 * If this page is now complete, move it to the end of the pagelist.
    620  1.3  pk 	 * If this page has just become un-empty, move it the head.
    621  1.3  pk 	 */
    622  1.3  pk 	if (ph->ph_nmissing == 0) {
    623  1.3  pk 		if (pp->pr_npages > pp->pr_maxpages) {
    624  1.3  pk #if 0
    625  1.3  pk 			timeout(pool_drain, 0, pool_inactive_time*hz);
    626  1.3  pk #else
    627  1.3  pk 			pr_rmpage(pp, ph);
    628  1.2  pk #endif
    629  1.3  pk 		} else {
    630  1.3  pk 			TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
    631  1.3  pk 			TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
    632  1.3  pk 			ph->ph_time = time;
    633  1.3  pk 
    634  1.3  pk 			/* XXX - update curpage */
    635  1.3  pk 			for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
    636  1.3  pk 			     ph = TAILQ_NEXT(ph, ph_pagelist))
    637  1.3  pk 				if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
    638  1.3  pk 					break;
    639  1.1  pk 
    640  1.3  pk 			pp->pr_curpage = ph;
    641  1.1  pk 		}
    642  1.1  pk 	}
    643  1.3  pk 
    644  1.1  pk 	simple_unlock(&pp->pr_lock);
    645  1.1  pk }
    646  1.1  pk 
    647  1.1  pk /*
    648  1.3  pk  * Add N items to the pool.
    649  1.1  pk  */
    650  1.1  pk int
    651  1.2  pk pool_prime(pp, n, storage)
    652  1.1  pk 	struct pool *pp;
    653  1.1  pk 	int n;
    654  1.2  pk 	caddr_t storage;
    655  1.1  pk {
    656  1.3  pk 	caddr_t cp;
    657  1.3  pk 	int newnitems, newpages;
    658  1.2  pk 
    659  1.2  pk #ifdef DIAGNOSTIC
    660  1.2  pk 	if (storage && !(pp->pr_flags & PR_STATIC))
    661  1.2  pk 		panic("pool_prime: static");
    662  1.2  pk 	/* !storage && static caught below */
    663  1.2  pk #endif
    664  1.1  pk 
    665  1.3  pk 	newnitems = pp->pr_minitems + n;
    666  1.3  pk 	newpages =
    667  1.3  pk 		roundup(pp->pr_itemsperpage,newnitems) / pp->pr_itemsperpage
    668  1.3  pk 		- pp->pr_minpages;
    669  1.3  pk 
    670  1.1  pk 	simple_lock(&pp->pr_lock);
    671  1.3  pk 	while (newpages-- > 0) {
    672  1.3  pk 
    673  1.2  pk 		if (pp->pr_flags & PR_STATIC) {
    674  1.3  pk 			cp = storage;
    675  1.3  pk 			storage += pp->pr_pagesz;
    676  1.3  pk 		} else {
    677  1.3  pk 			cp = (*pp->pr_alloc)(pp->pr_pagesz, 0, pp->pr_mtype);
    678  1.3  pk 		}
    679  1.2  pk 
    680  1.3  pk 		if (cp == NULL) {
    681  1.1  pk 			simple_unlock(&pp->pr_lock);
    682  1.1  pk 			return (ENOMEM);
    683  1.1  pk 		}
    684  1.1  pk 
    685  1.3  pk 		pool_prime_page(pp, cp);
    686  1.3  pk 		pp->pr_minpages++;
    687  1.1  pk 	}
    688  1.3  pk 
    689  1.3  pk 	pp->pr_minitems = newnitems;
    690  1.3  pk 
    691  1.3  pk 	if (pp->pr_minpages >= pp->pr_maxpages)
    692  1.3  pk 		pp->pr_maxpages = pp->pr_minpages + 1;	/* XXX */
    693  1.3  pk 
    694  1.1  pk 	simple_unlock(&pp->pr_lock);
    695  1.1  pk 	return (0);
    696  1.1  pk }
    697  1.3  pk 
    698  1.3  pk /*
    699  1.3  pk  * Add a page worth of items to the pool.
    700  1.3  pk  */
    701  1.3  pk int
    702  1.3  pk pool_prime_page(pp, storage)
    703  1.3  pk 	struct pool *pp;
    704  1.3  pk 	caddr_t storage;
    705  1.3  pk {
    706  1.3  pk 	struct pool_item *pi;
    707  1.3  pk 	struct pool_item_header *ph;
    708  1.3  pk 	caddr_t cp = storage;
    709  1.3  pk 	unsigned int align = pp->pr_align;
    710  1.3  pk 	unsigned int ioff = pp->pr_itemoffset;
    711  1.3  pk 	int n;
    712  1.3  pk 
    713  1.3  pk 	if ((pp->pr_flags & PR_PHINPAGE) != 0) {
    714  1.3  pk 		ph = (struct pool_item_header *)(cp + pp->pr_phoffset);
    715  1.3  pk 	} else {
    716  1.3  pk 		ph = pool_get(&phpool, PR_URGENT);
    717  1.3  pk 		LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)],
    718  1.3  pk 				 ph, ph_hashlist);
    719  1.3  pk 	}
    720  1.3  pk 
    721  1.3  pk 	/*
    722  1.3  pk 	 * Insert page header.
    723  1.3  pk 	 */
    724  1.3  pk 	TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
    725  1.3  pk 	TAILQ_INIT(&ph->ph_itemlist);
    726  1.3  pk 	ph->ph_page = storage;
    727  1.3  pk 	ph->ph_nmissing = 0;
    728  1.3  pk 	ph->ph_time.tv_sec = ph->ph_time.tv_usec = 0;
    729  1.3  pk 
    730  1.3  pk 	/*
    731  1.3  pk 	 * Color this page.
    732  1.3  pk 	 */
    733  1.3  pk 	cp = (caddr_t)(cp + pp->pr_curcolor);
    734  1.3  pk 	if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
    735  1.3  pk 		pp->pr_curcolor = 0;
    736  1.3  pk 
    737  1.3  pk 	/*
    738  1.3  pk 	 * Adjust storage to apply aligment to `pr_itemoffset' in each item.
    739  1.3  pk 	 */
    740  1.3  pk 	if (ioff != 0)
    741  1.3  pk 		cp = (caddr_t)(cp + (align - ioff));
    742  1.3  pk 
    743  1.3  pk 	/*
    744  1.3  pk 	 * Insert remaining chunks on the bucket list.
    745  1.3  pk 	 */
    746  1.3  pk 	n = pp->pr_itemsperpage;
    747  1.3  pk 
    748  1.3  pk 	while (n--) {
    749  1.3  pk 		pi = (struct pool_item *)cp;
    750  1.3  pk 
    751  1.3  pk 		/* Insert on page list */
    752  1.3  pk 		TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
    753  1.3  pk #ifdef DIAGNOSTIC
    754  1.3  pk 		pi->pi_magic = PI_MAGIC;
    755  1.3  pk #endif
    756  1.3  pk 		cp = (caddr_t)(cp + pp->pr_size);
    757  1.3  pk 	}
    758  1.3  pk 
    759  1.3  pk 	/*
    760  1.3  pk 	 * If the pool was depleted, point at the new page.
    761  1.3  pk 	 */
    762  1.3  pk 	if (pp->pr_curpage == NULL)
    763  1.3  pk 		pp->pr_curpage = ph;
    764  1.3  pk 
    765  1.3  pk 	if (++pp->pr_npages > pp->pr_hiwat)
    766  1.3  pk 		pp->pr_hiwat = pp->pr_npages;
    767  1.3  pk 
    768  1.3  pk 	return (0);
    769  1.3  pk }
    770  1.3  pk 
    771  1.3  pk void
    772  1.3  pk pool_setlowat(pp, n)
    773  1.3  pk 	pool_handle_t	pp;
    774  1.3  pk 	int n;
    775  1.3  pk {
    776  1.3  pk 	pp->pr_minitems = n;
    777  1.3  pk 	if (n == 0) {
    778  1.3  pk 		pp->pr_minpages = 0;
    779  1.3  pk 		return;
    780  1.3  pk 	}
    781  1.3  pk 	pp->pr_minpages =
    782  1.3  pk 		roundup(pp->pr_itemsperpage,n) / pp->pr_itemsperpage;
    783  1.3  pk }
    784  1.3  pk 
    785  1.3  pk void
    786  1.3  pk pool_sethiwat(pp, n)
    787  1.3  pk 	pool_handle_t	pp;
    788  1.3  pk 	int n;
    789  1.3  pk {
    790  1.3  pk 	if (n == 0) {
    791  1.3  pk 		pp->pr_maxpages = 0;
    792  1.3  pk 		return;
    793  1.3  pk 	}
    794  1.3  pk 	pp->pr_maxpages =
    795  1.3  pk 		roundup(pp->pr_itemsperpage,n) / pp->pr_itemsperpage;
    796  1.3  pk }
    797  1.3  pk 
    798  1.3  pk 
    799  1.3  pk /*
    800  1.3  pk  * Default page allocator.
    801  1.3  pk  */
    802  1.3  pk static void *
    803  1.3  pk pool_page_alloc(sz, flags, mtype)
    804  1.3  pk 	unsigned long sz;
    805  1.3  pk 	int flags;
    806  1.3  pk 	int mtype;
    807  1.3  pk {
    808  1.3  pk 	vm_offset_t va;
    809  1.3  pk 
    810  1.3  pk #if defined(UVM)
    811  1.3  pk 	va = uvm_km_kmemalloc(kernel_map, uvm.kernel_object,
    812  1.3  pk 			      (vm_size_t)sz, UVM_KMF_NOWAIT);
    813  1.3  pk #else
    814  1.3  pk 	va = kmem_malloc(kmem_map, (vm_size_t)sz, 0);
    815  1.3  pk #endif
    816  1.3  pk 	return ((void *)va);
    817  1.3  pk }
    818  1.3  pk 
    819  1.3  pk static void
    820  1.3  pk pool_page_free(v, sz, mtype)
    821  1.3  pk 	void *v;
    822  1.3  pk 	unsigned long sz;
    823  1.3  pk 	int mtype;
    824  1.3  pk {
    825  1.3  pk 
    826  1.3  pk #if defined(UVM)
    827  1.3  pk 	uvm_km_free(kernel_map, (vm_offset_t)v, sz);
    828  1.3  pk #else
    829  1.3  pk 	kmem_free(kmem_map, (vm_offset_t)v, sz);
    830  1.3  pk #endif
    831  1.3  pk }
    832  1.3  pk 
    833  1.3  pk /*
    834  1.3  pk  * Release all complete pages that have not been used recently.
    835  1.3  pk  */
    836  1.3  pk void
    837  1.3  pk pool_reclaim (pp)
    838  1.3  pk 	pool_handle_t pp;
    839  1.3  pk {
    840  1.3  pk 	struct pool_item_header *ph, *phnext;
    841  1.3  pk 	struct timeval curtime = time;
    842  1.3  pk 
    843  1.3  pk 	if (pp->pr_flags & PR_STATIC)
    844  1.3  pk 		return;
    845  1.3  pk 
    846  1.3  pk 	if (simple_lock_try(&pp->pr_lock) == 0)
    847  1.3  pk 		return;
    848  1.3  pk 
    849  1.3  pk 	for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; ph = phnext) {
    850  1.3  pk 		phnext = TAILQ_NEXT(ph, ph_pagelist);
    851  1.3  pk 
    852  1.3  pk 		/* Check our minimum page claim */
    853  1.3  pk 		if (pp->pr_npages <= pp->pr_minpages)
    854  1.3  pk 			break;
    855  1.3  pk 
    856  1.3  pk 		if (ph->ph_nmissing == 0) {
    857  1.3  pk 			struct timeval diff;
    858  1.3  pk 			timersub(&curtime, &ph->ph_time, &diff);
    859  1.3  pk 			if (diff.tv_sec < pool_inactive_time)
    860  1.3  pk 				continue;
    861  1.3  pk 			pr_rmpage(pp, ph);
    862  1.3  pk 		}
    863  1.3  pk 	}
    864  1.3  pk 
    865  1.3  pk 	simple_unlock(&pp->pr_lock);
    866  1.3  pk }
    867  1.3  pk 
    868  1.3  pk 
    869  1.3  pk /*
    870  1.3  pk  * Drain pools, one at a time.
    871  1.3  pk  */
    872  1.3  pk void
    873  1.3  pk pool_drain(arg)
    874  1.3  pk 	void *arg;
    875  1.3  pk {
    876  1.3  pk 	struct pool *pp;
    877  1.3  pk 	int s = splimp();
    878  1.3  pk 
    879  1.3  pk 	/* XXX:lock pool head */
    880  1.3  pk 	if (drainpp == NULL && (drainpp = TAILQ_FIRST(&pool_head)) == NULL) {
    881  1.3  pk 		splx(s);
    882  1.3  pk 		return;
    883  1.3  pk 	}
    884  1.3  pk 
    885  1.3  pk 	pp = drainpp;
    886  1.3  pk 	drainpp = TAILQ_NEXT(pp, pr_poollist);
    887  1.3  pk 	/* XXX:unlock pool head */
    888  1.3  pk 
    889  1.3  pk 	pool_reclaim(pp);
    890  1.3  pk 	splx(s);
    891  1.3  pk }
    892  1.3  pk 
    893  1.3  pk 
    894  1.3  pk #ifdef DEBUG
    895  1.3  pk /*
    896  1.3  pk  * Diagnostic helpers.
    897  1.3  pk  */
    898  1.3  pk void
    899  1.3  pk pool_print(pp, label)
    900  1.3  pk 	struct pool *pp;
    901  1.3  pk 	char *label;
    902  1.3  pk {
    903  1.3  pk 
    904  1.3  pk 	if (label != NULL)
    905  1.3  pk 		printf("%s: ", label);
    906  1.3  pk 
    907  1.3  pk 	printf("pool %s: nalloc %lu nfree %lu npagealloc %lu npagefree %lu\n"
    908  1.3  pk 	       "         npages %u minitems %u itemsperpage %u itemoffset %u\n",
    909  1.3  pk 		pp->pr_wchan,
    910  1.3  pk 		pp->pr_nget,
    911  1.3  pk 		pp->pr_nput,
    912  1.3  pk 		pp->pr_npagealloc,
    913  1.3  pk 		pp->pr_npagefree,
    914  1.3  pk 		pp->pr_npages,
    915  1.3  pk 		pp->pr_minitems,
    916  1.3  pk 		pp->pr_itemsperpage,
    917  1.3  pk 		pp->pr_itemoffset);
    918  1.3  pk }
    919  1.3  pk 
    920  1.3  pk int
    921  1.3  pk pool_chk(pp, label)
    922  1.3  pk 	struct pool *pp;
    923  1.3  pk 	char *label;
    924  1.3  pk {
    925  1.3  pk 	struct pool_item_header *ph;
    926  1.3  pk 	int r = 0;
    927  1.3  pk 
    928  1.3  pk 	simple_lock(&pp->pr_lock);
    929  1.3  pk 
    930  1.3  pk 	for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
    931  1.3  pk 	     ph = TAILQ_NEXT(ph, ph_pagelist)) {
    932  1.3  pk 
    933  1.3  pk 		struct pool_item *pi;
    934  1.3  pk 		int n;
    935  1.3  pk 		caddr_t page;
    936  1.3  pk 
    937  1.3  pk 		page = (caddr_t)((u_long)ph & pp->pr_pagemask);
    938  1.3  pk 		if (page != ph->ph_page) {
    939  1.3  pk 			if (label != NULL)
    940  1.3  pk 				printf("%s: ", label);
    941  1.3  pk 			printf("pool(%s): page inconsistency: page %p;"
    942  1.3  pk 			       " at page head addr %p (p %p)\n",
    943  1.3  pk 				pp->pr_wchan, ph->ph_page,
    944  1.3  pk 				ph, page);
    945  1.3  pk 			r++;
    946  1.3  pk 			goto out;
    947  1.3  pk 		}
    948  1.3  pk 
    949  1.3  pk 		for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
    950  1.3  pk 		     pi != NULL;
    951  1.3  pk 		     pi = TAILQ_NEXT(pi,pi_list), n++) {
    952  1.3  pk 
    953  1.3  pk #ifdef DIAGNOSTIC
    954  1.3  pk 			if (pi->pi_magic != PI_MAGIC) {
    955  1.3  pk 				if (label != NULL)
    956  1.3  pk 					printf("%s: ", label);
    957  1.3  pk 				printf("pool(%s): free list modified: magic=%x;"
    958  1.3  pk 				       " page %p; item ordinal %d;"
    959  1.3  pk 				       " addr %p (p %p)\n",
    960  1.3  pk 					pp->pr_wchan, pi->pi_magic, ph->ph_page,
    961  1.3  pk 					n, pi, page);
    962  1.3  pk 				panic("pool");
    963  1.3  pk 			}
    964  1.3  pk #endif
    965  1.3  pk 			page = (caddr_t)((u_long)pi & pp->pr_pagemask);
    966  1.3  pk 			if (page == ph->ph_page)
    967  1.3  pk 				continue;
    968  1.3  pk 
    969  1.3  pk 			if (label != NULL)
    970  1.3  pk 				printf("%s: ", label);
    971  1.3  pk 			printf("pool(%s): page inconsistency: page %p;"
    972  1.3  pk 			       " item ordinal %d; addr %p (p %p)\n",
    973  1.3  pk 				pp->pr_wchan, ph->ph_page,
    974  1.3  pk 				n, pi, page);
    975  1.3  pk 			r++;
    976  1.3  pk 			goto out;
    977  1.3  pk 		}
    978  1.3  pk 	}
    979  1.3  pk out:
    980  1.3  pk 	simple_unlock(&pp->pr_lock);
    981  1.3  pk 	return (r);
    982  1.3  pk }
    983  1.3  pk #endif
    984