Home | History | Annotate | Line # | Download | only in kern
subr_pool.c revision 1.93
      1  1.93       dbj /*	$NetBSD: subr_pool.c,v 1.93 2004/03/08 22:48:09 dbj Exp $	*/
      2   1.1        pk 
      3   1.1        pk /*-
      4  1.43   thorpej  * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
      5   1.1        pk  * All rights reserved.
      6   1.1        pk  *
      7   1.1        pk  * This code is derived from software contributed to The NetBSD Foundation
      8  1.20   thorpej  * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
      9  1.20   thorpej  * Simulation Facility, NASA Ames Research Center.
     10   1.1        pk  *
     11   1.1        pk  * Redistribution and use in source and binary forms, with or without
     12   1.1        pk  * modification, are permitted provided that the following conditions
     13   1.1        pk  * are met:
     14   1.1        pk  * 1. Redistributions of source code must retain the above copyright
     15   1.1        pk  *    notice, this list of conditions and the following disclaimer.
     16   1.1        pk  * 2. Redistributions in binary form must reproduce the above copyright
     17   1.1        pk  *    notice, this list of conditions and the following disclaimer in the
     18   1.1        pk  *    documentation and/or other materials provided with the distribution.
     19   1.1        pk  * 3. All advertising materials mentioning features or use of this software
     20   1.1        pk  *    must display the following acknowledgement:
     21  1.13  christos  *	This product includes software developed by the NetBSD
     22  1.13  christos  *	Foundation, Inc. and its contributors.
     23   1.1        pk  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24   1.1        pk  *    contributors may be used to endorse or promote products derived
     25   1.1        pk  *    from this software without specific prior written permission.
     26   1.1        pk  *
     27   1.1        pk  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28   1.1        pk  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29   1.1        pk  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30   1.1        pk  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31   1.1        pk  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32   1.1        pk  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33   1.1        pk  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34   1.1        pk  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35   1.1        pk  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36   1.1        pk  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37   1.1        pk  * POSSIBILITY OF SUCH DAMAGE.
     38   1.1        pk  */
     39  1.64     lukem 
     40  1.64     lukem #include <sys/cdefs.h>
     41  1.93       dbj __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.93 2004/03/08 22:48:09 dbj Exp $");
     42  1.24    scottr 
     43  1.25   thorpej #include "opt_pool.h"
     44  1.24    scottr #include "opt_poollog.h"
     45  1.28   thorpej #include "opt_lockdebug.h"
     46   1.1        pk 
     47   1.1        pk #include <sys/param.h>
     48   1.1        pk #include <sys/systm.h>
     49   1.1        pk #include <sys/proc.h>
     50   1.1        pk #include <sys/errno.h>
     51   1.1        pk #include <sys/kernel.h>
     52   1.1        pk #include <sys/malloc.h>
     53   1.1        pk #include <sys/lock.h>
     54   1.1        pk #include <sys/pool.h>
     55  1.20   thorpej #include <sys/syslog.h>
     56   1.3        pk 
     57   1.3        pk #include <uvm/uvm.h>
     58   1.3        pk 
     59   1.1        pk /*
     60   1.1        pk  * Pool resource management utility.
     61   1.3        pk  *
     62  1.88       chs  * Memory is allocated in pages which are split into pieces according to
     63  1.88       chs  * the pool item size. Each page is kept on one of three lists in the
     64  1.88       chs  * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
     65  1.88       chs  * for empty, full and partially-full pages respectively. The individual
     66  1.88       chs  * pool items are on a linked list headed by `ph_itemlist' in each page
     67  1.88       chs  * header. The memory for building the page list is either taken from
     68  1.88       chs  * the allocated pages themselves (for small pool items) or taken from
     69  1.88       chs  * an internal pool of page headers (`phpool').
     70   1.1        pk  */
     71   1.1        pk 
     72   1.3        pk /* List of all pools */
     73   1.5   thorpej TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
     74   1.3        pk 
     75   1.3        pk /* Private pool for page header structures */
     76   1.3        pk static struct pool phpool;
     77   1.3        pk 
     78  1.62     bjh21 #ifdef POOL_SUBPAGE
     79  1.62     bjh21 /* Pool of subpages for use by normal pools. */
     80  1.62     bjh21 static struct pool psppool;
     81  1.62     bjh21 #endif
     82  1.62     bjh21 
     83   1.3        pk /* # of seconds to retain page after last use */
     84   1.3        pk int pool_inactive_time = 10;
     85   1.3        pk 
     86   1.3        pk /* Next candidate for drainage (see pool_drain()) */
     87  1.23   thorpej static struct pool	*drainpp;
     88  1.23   thorpej 
     89  1.23   thorpej /* This spin lock protects both pool_head and drainpp. */
     90  1.23   thorpej struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
     91   1.3        pk 
     92   1.3        pk struct pool_item_header {
     93   1.3        pk 	/* Page headers */
     94  1.88       chs 	LIST_ENTRY(pool_item_header)
     95   1.3        pk 				ph_pagelist;	/* pool page list */
     96   1.3        pk 	TAILQ_HEAD(,pool_item)	ph_itemlist;	/* chunk list for this page */
     97  1.88       chs 	SPLAY_ENTRY(pool_item_header)
     98  1.88       chs 				ph_node;	/* Off-page page headers */
     99  1.79   thorpej 	unsigned int		ph_nmissing;	/* # of chunks in use */
    100   1.3        pk 	caddr_t			ph_page;	/* this page's address */
    101   1.3        pk 	struct timeval		ph_time;	/* last referenced */
    102   1.3        pk };
    103   1.3        pk 
    104   1.1        pk struct pool_item {
    105   1.3        pk #ifdef DIAGNOSTIC
    106  1.82   thorpej 	u_int pi_magic;
    107  1.33       chs #endif
    108  1.82   thorpej #define	PI_MAGIC 0xdeadbeefU
    109   1.3        pk 	/* Other entries use only this list entry */
    110   1.3        pk 	TAILQ_ENTRY(pool_item)	pi_list;
    111   1.3        pk };
    112   1.3        pk 
    113  1.53   thorpej #define	POOL_NEEDS_CATCHUP(pp)						\
    114  1.53   thorpej 	((pp)->pr_nitems < (pp)->pr_minitems)
    115  1.53   thorpej 
    116  1.43   thorpej /*
    117  1.43   thorpej  * Pool cache management.
    118  1.43   thorpej  *
    119  1.43   thorpej  * Pool caches provide a way for constructed objects to be cached by the
    120  1.43   thorpej  * pool subsystem.  This can lead to performance improvements by avoiding
    121  1.43   thorpej  * needless object construction/destruction; it is deferred until absolutely
    122  1.43   thorpej  * necessary.
    123  1.43   thorpej  *
    124  1.43   thorpej  * Caches are grouped into cache groups.  Each cache group references
    125  1.43   thorpej  * up to 16 constructed objects.  When a cache allocates an object
    126  1.43   thorpej  * from the pool, it calls the object's constructor and places it into
    127  1.43   thorpej  * a cache group.  When a cache group frees an object back to the pool,
    128  1.43   thorpej  * it first calls the object's destructor.  This allows the object to
    129  1.43   thorpej  * persist in constructed form while freed to the cache.
    130  1.43   thorpej  *
    131  1.43   thorpej  * Multiple caches may exist for each pool.  This allows a single
    132  1.43   thorpej  * object type to have multiple constructed forms.  The pool references
    133  1.43   thorpej  * each cache, so that when a pool is drained by the pagedaemon, it can
    134  1.43   thorpej  * drain each individual cache as well.  Each time a cache is drained,
    135  1.43   thorpej  * the most idle cache group is freed to the pool in its entirety.
    136  1.43   thorpej  *
    137  1.43   thorpej  * Pool caches are layed on top of pools.  By layering them, we can avoid
    138  1.43   thorpej  * the complexity of cache management for pools which would not benefit
    139  1.43   thorpej  * from it.
    140  1.43   thorpej  */
    141  1.43   thorpej 
    142  1.43   thorpej /* The cache group pool. */
    143  1.43   thorpej static struct pool pcgpool;
    144   1.3        pk 
    145  1.43   thorpej static void	pool_cache_reclaim(struct pool_cache *);
    146   1.3        pk 
    147  1.42   thorpej static int	pool_catchup(struct pool *);
    148  1.55   thorpej static void	pool_prime_page(struct pool *, caddr_t,
    149  1.55   thorpej 		    struct pool_item_header *);
    150  1.88       chs static void	pool_update_curpage(struct pool *);
    151  1.66   thorpej 
    152  1.66   thorpej void		*pool_allocator_alloc(struct pool *, int);
    153  1.66   thorpej void		pool_allocator_free(struct pool *, void *);
    154   1.3        pk 
    155  1.88       chs static void pool_print_pagelist(struct pool_pagelist *,
    156  1.88       chs 	void (*)(const char *, ...));
    157  1.42   thorpej static void pool_print1(struct pool *, const char *,
    158  1.42   thorpej 	void (*)(const char *, ...));
    159   1.3        pk 
    160  1.88       chs static int pool_chk_page(struct pool *, const char *,
    161  1.88       chs 			 struct pool_item_header *);
    162  1.88       chs 
    163   1.3        pk /*
    164  1.52   thorpej  * Pool log entry. An array of these is allocated in pool_init().
    165   1.3        pk  */
    166   1.3        pk struct pool_log {
    167   1.3        pk 	const char	*pl_file;
    168   1.3        pk 	long		pl_line;
    169   1.3        pk 	int		pl_action;
    170  1.25   thorpej #define	PRLOG_GET	1
    171  1.25   thorpej #define	PRLOG_PUT	2
    172   1.3        pk 	void		*pl_addr;
    173   1.1        pk };
    174   1.1        pk 
    175  1.86      matt #ifdef POOL_DIAGNOSTIC
    176   1.3        pk /* Number of entries in pool log buffers */
    177  1.17   thorpej #ifndef POOL_LOGSIZE
    178  1.17   thorpej #define	POOL_LOGSIZE	10
    179  1.17   thorpej #endif
    180  1.17   thorpej 
    181  1.17   thorpej int pool_logsize = POOL_LOGSIZE;
    182   1.1        pk 
    183  1.42   thorpej static __inline void
    184  1.42   thorpej pr_log(struct pool *pp, void *v, int action, const char *file, long line)
    185   1.3        pk {
    186   1.3        pk 	int n = pp->pr_curlogentry;
    187   1.3        pk 	struct pool_log *pl;
    188   1.3        pk 
    189  1.20   thorpej 	if ((pp->pr_roflags & PR_LOGGING) == 0)
    190   1.3        pk 		return;
    191   1.3        pk 
    192   1.3        pk 	/*
    193   1.3        pk 	 * Fill in the current entry. Wrap around and overwrite
    194   1.3        pk 	 * the oldest entry if necessary.
    195   1.3        pk 	 */
    196   1.3        pk 	pl = &pp->pr_log[n];
    197   1.3        pk 	pl->pl_file = file;
    198   1.3        pk 	pl->pl_line = line;
    199   1.3        pk 	pl->pl_action = action;
    200   1.3        pk 	pl->pl_addr = v;
    201   1.3        pk 	if (++n >= pp->pr_logsize)
    202   1.3        pk 		n = 0;
    203   1.3        pk 	pp->pr_curlogentry = n;
    204   1.3        pk }
    205   1.3        pk 
    206   1.3        pk static void
    207  1.42   thorpej pr_printlog(struct pool *pp, struct pool_item *pi,
    208  1.42   thorpej     void (*pr)(const char *, ...))
    209   1.3        pk {
    210   1.3        pk 	int i = pp->pr_logsize;
    211   1.3        pk 	int n = pp->pr_curlogentry;
    212   1.3        pk 
    213  1.20   thorpej 	if ((pp->pr_roflags & PR_LOGGING) == 0)
    214   1.3        pk 		return;
    215   1.3        pk 
    216   1.3        pk 	/*
    217   1.3        pk 	 * Print all entries in this pool's log.
    218   1.3        pk 	 */
    219   1.3        pk 	while (i-- > 0) {
    220   1.3        pk 		struct pool_log *pl = &pp->pr_log[n];
    221   1.3        pk 		if (pl->pl_action != 0) {
    222  1.25   thorpej 			if (pi == NULL || pi == pl->pl_addr) {
    223  1.25   thorpej 				(*pr)("\tlog entry %d:\n", i);
    224  1.25   thorpej 				(*pr)("\t\taction = %s, addr = %p\n",
    225  1.25   thorpej 				    pl->pl_action == PRLOG_GET ? "get" : "put",
    226  1.25   thorpej 				    pl->pl_addr);
    227  1.25   thorpej 				(*pr)("\t\tfile: %s at line %lu\n",
    228  1.25   thorpej 				    pl->pl_file, pl->pl_line);
    229  1.25   thorpej 			}
    230   1.3        pk 		}
    231   1.3        pk 		if (++n >= pp->pr_logsize)
    232   1.3        pk 			n = 0;
    233   1.3        pk 	}
    234   1.3        pk }
    235  1.25   thorpej 
    236  1.42   thorpej static __inline void
    237  1.42   thorpej pr_enter(struct pool *pp, const char *file, long line)
    238  1.25   thorpej {
    239  1.25   thorpej 
    240  1.34   thorpej 	if (__predict_false(pp->pr_entered_file != NULL)) {
    241  1.25   thorpej 		printf("pool %s: reentrancy at file %s line %ld\n",
    242  1.25   thorpej 		    pp->pr_wchan, file, line);
    243  1.25   thorpej 		printf("         previous entry at file %s line %ld\n",
    244  1.25   thorpej 		    pp->pr_entered_file, pp->pr_entered_line);
    245  1.25   thorpej 		panic("pr_enter");
    246  1.25   thorpej 	}
    247  1.25   thorpej 
    248  1.25   thorpej 	pp->pr_entered_file = file;
    249  1.25   thorpej 	pp->pr_entered_line = line;
    250  1.25   thorpej }
    251  1.25   thorpej 
    252  1.42   thorpej static __inline void
    253  1.42   thorpej pr_leave(struct pool *pp)
    254  1.25   thorpej {
    255  1.25   thorpej 
    256  1.34   thorpej 	if (__predict_false(pp->pr_entered_file == NULL)) {
    257  1.25   thorpej 		printf("pool %s not entered?\n", pp->pr_wchan);
    258  1.25   thorpej 		panic("pr_leave");
    259  1.25   thorpej 	}
    260  1.25   thorpej 
    261  1.25   thorpej 	pp->pr_entered_file = NULL;
    262  1.25   thorpej 	pp->pr_entered_line = 0;
    263  1.25   thorpej }
    264  1.25   thorpej 
    265  1.42   thorpej static __inline void
    266  1.42   thorpej pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
    267  1.25   thorpej {
    268  1.25   thorpej 
    269  1.25   thorpej 	if (pp->pr_entered_file != NULL)
    270  1.25   thorpej 		(*pr)("\n\tcurrently entered from file %s line %ld\n",
    271  1.25   thorpej 		    pp->pr_entered_file, pp->pr_entered_line);
    272  1.25   thorpej }
    273   1.3        pk #else
    274  1.25   thorpej #define	pr_log(pp, v, action, file, line)
    275  1.25   thorpej #define	pr_printlog(pp, pi, pr)
    276  1.25   thorpej #define	pr_enter(pp, file, line)
    277  1.25   thorpej #define	pr_leave(pp)
    278  1.25   thorpej #define	pr_enter_check(pp, pr)
    279  1.59   thorpej #endif /* POOL_DIAGNOSTIC */
    280   1.3        pk 
    281  1.88       chs static __inline int
    282  1.88       chs phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
    283  1.88       chs {
    284  1.88       chs 	if (a->ph_page < b->ph_page)
    285  1.88       chs 		return (-1);
    286  1.88       chs 	else if (a->ph_page > b->ph_page)
    287  1.88       chs 		return (1);
    288  1.88       chs 	else
    289  1.88       chs 		return (0);
    290  1.88       chs }
    291  1.88       chs 
    292  1.88       chs SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
    293  1.88       chs SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
    294  1.88       chs 
    295   1.3        pk /*
    296   1.3        pk  * Return the pool page header based on page address.
    297   1.3        pk  */
    298  1.42   thorpej static __inline struct pool_item_header *
    299  1.42   thorpej pr_find_pagehead(struct pool *pp, caddr_t page)
    300   1.3        pk {
    301  1.88       chs 	struct pool_item_header *ph, tmp;
    302   1.3        pk 
    303  1.20   thorpej 	if ((pp->pr_roflags & PR_PHINPAGE) != 0)
    304   1.3        pk 		return ((struct pool_item_header *)(page + pp->pr_phoffset));
    305   1.3        pk 
    306  1.88       chs 	tmp.ph_page = page;
    307  1.88       chs 	ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
    308  1.88       chs 	return ph;
    309   1.3        pk }
    310   1.3        pk 
    311   1.3        pk /*
    312   1.3        pk  * Remove a page from the pool.
    313   1.3        pk  */
    314  1.42   thorpej static __inline void
    315  1.61       chs pr_rmpage(struct pool *pp, struct pool_item_header *ph,
    316  1.61       chs      struct pool_pagelist *pq)
    317   1.3        pk {
    318  1.61       chs 	int s;
    319   1.3        pk 
    320  1.91      yamt 	LOCK_ASSERT(!simple_lock_held(&pp->pr_slock) || pq != NULL);
    321  1.91      yamt 
    322   1.3        pk 	/*
    323   1.7   thorpej 	 * If the page was idle, decrement the idle page count.
    324   1.3        pk 	 */
    325   1.6   thorpej 	if (ph->ph_nmissing == 0) {
    326   1.6   thorpej #ifdef DIAGNOSTIC
    327   1.6   thorpej 		if (pp->pr_nidle == 0)
    328   1.6   thorpej 			panic("pr_rmpage: nidle inconsistent");
    329  1.20   thorpej 		if (pp->pr_nitems < pp->pr_itemsperpage)
    330  1.20   thorpej 			panic("pr_rmpage: nitems inconsistent");
    331   1.6   thorpej #endif
    332   1.6   thorpej 		pp->pr_nidle--;
    333   1.6   thorpej 	}
    334   1.7   thorpej 
    335  1.20   thorpej 	pp->pr_nitems -= pp->pr_itemsperpage;
    336  1.20   thorpej 
    337   1.7   thorpej 	/*
    338  1.61       chs 	 * Unlink a page from the pool and release it (or queue it for release).
    339   1.7   thorpej 	 */
    340  1.88       chs 	LIST_REMOVE(ph, ph_pagelist);
    341  1.91      yamt 	if ((pp->pr_roflags & PR_PHINPAGE) == 0)
    342  1.91      yamt 		SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
    343  1.61       chs 	if (pq) {
    344  1.88       chs 		LIST_INSERT_HEAD(pq, ph, ph_pagelist);
    345  1.61       chs 	} else {
    346  1.66   thorpej 		pool_allocator_free(pp, ph->ph_page);
    347  1.61       chs 		if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
    348  1.85        pk 			s = splvm();
    349  1.61       chs 			pool_put(&phpool, ph);
    350  1.61       chs 			splx(s);
    351  1.61       chs 		}
    352  1.61       chs 	}
    353   1.7   thorpej 	pp->pr_npages--;
    354   1.7   thorpej 	pp->pr_npagefree++;
    355   1.6   thorpej 
    356  1.88       chs 	pool_update_curpage(pp);
    357   1.3        pk }
    358   1.3        pk 
    359   1.3        pk /*
    360   1.3        pk  * Initialize the given pool resource structure.
    361   1.3        pk  *
    362   1.3        pk  * We export this routine to allow other kernel parts to declare
    363   1.3        pk  * static pools that must be initialized before malloc() is available.
    364   1.3        pk  */
    365   1.3        pk void
    366  1.42   thorpej pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
    367  1.66   thorpej     const char *wchan, struct pool_allocator *palloc)
    368   1.3        pk {
    369  1.88       chs 	int off, slack;
    370  1.92     enami 	size_t trysize, phsize;
    371  1.93       dbj 	int s;
    372   1.3        pk 
    373  1.25   thorpej #ifdef POOL_DIAGNOSTIC
    374  1.25   thorpej 	/*
    375  1.25   thorpej 	 * Always log if POOL_DIAGNOSTIC is defined.
    376  1.25   thorpej 	 */
    377  1.25   thorpej 	if (pool_logsize != 0)
    378  1.25   thorpej 		flags |= PR_LOGGING;
    379  1.25   thorpej #endif
    380  1.25   thorpej 
    381  1.66   thorpej #ifdef POOL_SUBPAGE
    382  1.66   thorpej 	/*
    383  1.66   thorpej 	 * XXX We don't provide a real `nointr' back-end
    384  1.66   thorpej 	 * yet; all sub-pages come from a kmem back-end.
    385  1.66   thorpej 	 * maybe some day...
    386  1.66   thorpej 	 */
    387  1.66   thorpej 	if (palloc == NULL) {
    388  1.66   thorpej 		extern struct pool_allocator pool_allocator_kmem_subpage;
    389  1.66   thorpej 		palloc = &pool_allocator_kmem_subpage;
    390  1.66   thorpej 	}
    391   1.3        pk 	/*
    392  1.66   thorpej 	 * We'll assume any user-specified back-end allocator
    393  1.66   thorpej 	 * will deal with sub-pages, or simply don't care.
    394   1.3        pk 	 */
    395  1.66   thorpej #else
    396  1.66   thorpej 	if (palloc == NULL)
    397  1.66   thorpej 		palloc = &pool_allocator_kmem;
    398  1.66   thorpej #endif /* POOL_SUBPAGE */
    399  1.66   thorpej 	if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
    400  1.66   thorpej 		if (palloc->pa_pagesz == 0) {
    401  1.62     bjh21 #ifdef POOL_SUBPAGE
    402  1.66   thorpej 			if (palloc == &pool_allocator_kmem)
    403  1.66   thorpej 				palloc->pa_pagesz = PAGE_SIZE;
    404  1.66   thorpej 			else
    405  1.66   thorpej 				palloc->pa_pagesz = POOL_SUBPAGE;
    406  1.62     bjh21 #else
    407  1.66   thorpej 			palloc->pa_pagesz = PAGE_SIZE;
    408  1.66   thorpej #endif /* POOL_SUBPAGE */
    409  1.66   thorpej 		}
    410  1.66   thorpej 
    411  1.66   thorpej 		TAILQ_INIT(&palloc->pa_list);
    412  1.66   thorpej 
    413  1.66   thorpej 		simple_lock_init(&palloc->pa_slock);
    414  1.66   thorpej 		palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
    415  1.66   thorpej 		palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
    416  1.66   thorpej 		palloc->pa_flags |= PA_INITIALIZED;
    417   1.4   thorpej 	}
    418   1.3        pk 
    419   1.3        pk 	if (align == 0)
    420   1.3        pk 		align = ALIGN(1);
    421  1.14   thorpej 
    422  1.14   thorpej 	if (size < sizeof(struct pool_item))
    423  1.14   thorpej 		size = sizeof(struct pool_item);
    424   1.3        pk 
    425  1.78   thorpej 	size = roundup(size, align);
    426  1.66   thorpej #ifdef DIAGNOSTIC
    427  1.66   thorpej 	if (size > palloc->pa_pagesz)
    428  1.35        pk 		panic("pool_init: pool item size (%lu) too large",
    429  1.35        pk 		      (u_long)size);
    430  1.66   thorpej #endif
    431  1.35        pk 
    432   1.3        pk 	/*
    433   1.3        pk 	 * Initialize the pool structure.
    434   1.3        pk 	 */
    435  1.88       chs 	LIST_INIT(&pp->pr_emptypages);
    436  1.88       chs 	LIST_INIT(&pp->pr_fullpages);
    437  1.88       chs 	LIST_INIT(&pp->pr_partpages);
    438  1.43   thorpej 	TAILQ_INIT(&pp->pr_cachelist);
    439   1.3        pk 	pp->pr_curpage = NULL;
    440   1.3        pk 	pp->pr_npages = 0;
    441   1.3        pk 	pp->pr_minitems = 0;
    442   1.3        pk 	pp->pr_minpages = 0;
    443   1.3        pk 	pp->pr_maxpages = UINT_MAX;
    444  1.20   thorpej 	pp->pr_roflags = flags;
    445  1.20   thorpej 	pp->pr_flags = 0;
    446  1.35        pk 	pp->pr_size = size;
    447   1.3        pk 	pp->pr_align = align;
    448   1.3        pk 	pp->pr_wchan = wchan;
    449  1.66   thorpej 	pp->pr_alloc = palloc;
    450  1.20   thorpej 	pp->pr_nitems = 0;
    451  1.20   thorpej 	pp->pr_nout = 0;
    452  1.20   thorpej 	pp->pr_hardlimit = UINT_MAX;
    453  1.20   thorpej 	pp->pr_hardlimit_warning = NULL;
    454  1.31   thorpej 	pp->pr_hardlimit_ratecap.tv_sec = 0;
    455  1.31   thorpej 	pp->pr_hardlimit_ratecap.tv_usec = 0;
    456  1.31   thorpej 	pp->pr_hardlimit_warning_last.tv_sec = 0;
    457  1.31   thorpej 	pp->pr_hardlimit_warning_last.tv_usec = 0;
    458  1.68   thorpej 	pp->pr_drain_hook = NULL;
    459  1.68   thorpej 	pp->pr_drain_hook_arg = NULL;
    460   1.3        pk 
    461   1.3        pk 	/*
    462   1.3        pk 	 * Decide whether to put the page header off page to avoid
    463  1.92     enami 	 * wasting too large a part of the page or too big item.
    464  1.92     enami 	 * Off-page page headers go on a hash table, so we can match
    465  1.92     enami 	 * a returned item with its header based on the page address.
    466  1.92     enami 	 * We use 1/16 of the page size and about 8 times of the item
    467  1.92     enami 	 * size as the threshold (XXX: tune)
    468  1.92     enami 	 *
    469  1.92     enami 	 * However, we'll put the header into the page if we can put
    470  1.92     enami 	 * it without wasting any items.
    471  1.92     enami 	 *
    472  1.92     enami 	 * Silently enforce `0 <= ioff < align'.
    473   1.3        pk 	 */
    474  1.92     enami 	pp->pr_itemoffset = ioff %= align;
    475  1.92     enami 	/* See the comment below about reserved bytes. */
    476  1.92     enami 	trysize = palloc->pa_pagesz - ((align - ioff) % align);
    477  1.92     enami 	phsize = ALIGN(sizeof(struct pool_item_header));
    478  1.92     enami 	if (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
    479  1.92     enami 	    trysize / pp->pr_size == (trysize - phsize) / pp->pr_size) {
    480   1.3        pk 		/* Use the end of the page for the page header */
    481  1.20   thorpej 		pp->pr_roflags |= PR_PHINPAGE;
    482  1.92     enami 		pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
    483   1.2        pk 	} else {
    484   1.3        pk 		/* The page header will be taken from our page header pool */
    485   1.3        pk 		pp->pr_phoffset = 0;
    486  1.66   thorpej 		off = palloc->pa_pagesz;
    487  1.88       chs 		SPLAY_INIT(&pp->pr_phtree);
    488   1.2        pk 	}
    489   1.1        pk 
    490   1.3        pk 	/*
    491   1.3        pk 	 * Alignment is to take place at `ioff' within the item. This means
    492   1.3        pk 	 * we must reserve up to `align - 1' bytes on the page to allow
    493   1.3        pk 	 * appropriate positioning of each item.
    494   1.3        pk 	 */
    495   1.3        pk 	pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
    496  1.43   thorpej 	KASSERT(pp->pr_itemsperpage != 0);
    497   1.3        pk 
    498   1.3        pk 	/*
    499   1.3        pk 	 * Use the slack between the chunks and the page header
    500   1.3        pk 	 * for "cache coloring".
    501   1.3        pk 	 */
    502   1.3        pk 	slack = off - pp->pr_itemsperpage * pp->pr_size;
    503   1.3        pk 	pp->pr_maxcolor = (slack / align) * align;
    504   1.3        pk 	pp->pr_curcolor = 0;
    505   1.3        pk 
    506   1.3        pk 	pp->pr_nget = 0;
    507   1.3        pk 	pp->pr_nfail = 0;
    508   1.3        pk 	pp->pr_nput = 0;
    509   1.3        pk 	pp->pr_npagealloc = 0;
    510   1.3        pk 	pp->pr_npagefree = 0;
    511   1.1        pk 	pp->pr_hiwat = 0;
    512   1.8   thorpej 	pp->pr_nidle = 0;
    513   1.3        pk 
    514  1.59   thorpej #ifdef POOL_DIAGNOSTIC
    515  1.25   thorpej 	if (flags & PR_LOGGING) {
    516  1.25   thorpej 		if (kmem_map == NULL ||
    517  1.25   thorpej 		    (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
    518  1.25   thorpej 		     M_TEMP, M_NOWAIT)) == NULL)
    519  1.20   thorpej 			pp->pr_roflags &= ~PR_LOGGING;
    520   1.3        pk 		pp->pr_curlogentry = 0;
    521   1.3        pk 		pp->pr_logsize = pool_logsize;
    522   1.3        pk 	}
    523  1.59   thorpej #endif
    524  1.25   thorpej 
    525  1.25   thorpej 	pp->pr_entered_file = NULL;
    526  1.25   thorpej 	pp->pr_entered_line = 0;
    527   1.3        pk 
    528  1.21   thorpej 	simple_lock_init(&pp->pr_slock);
    529   1.1        pk 
    530   1.3        pk 	/*
    531  1.43   thorpej 	 * Initialize private page header pool and cache magazine pool if we
    532  1.43   thorpej 	 * haven't done so yet.
    533  1.23   thorpej 	 * XXX LOCKING.
    534   1.3        pk 	 */
    535   1.3        pk 	if (phpool.pr_size == 0) {
    536  1.62     bjh21 #ifdef POOL_SUBPAGE
    537  1.62     bjh21 		pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, 0,
    538  1.66   thorpej 		    "phpool", &pool_allocator_kmem);
    539  1.62     bjh21 		pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
    540  1.66   thorpej 		    PR_RECURSIVE, "psppool", &pool_allocator_kmem);
    541  1.62     bjh21 #else
    542   1.3        pk 		pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,
    543  1.66   thorpej 		    0, "phpool", NULL);
    544  1.62     bjh21 #endif
    545  1.43   thorpej 		pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
    546  1.66   thorpej 		    0, "pcgpool", NULL);
    547   1.1        pk 	}
    548   1.1        pk 
    549  1.23   thorpej 	/* Insert into the list of all pools. */
    550  1.23   thorpej 	simple_lock(&pool_head_slock);
    551  1.23   thorpej 	TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
    552  1.23   thorpej 	simple_unlock(&pool_head_slock);
    553  1.66   thorpej 
    554  1.66   thorpej 	/* Insert this into the list of pools using this allocator. */
    555  1.93       dbj 	s = splvm();
    556  1.66   thorpej 	simple_lock(&palloc->pa_slock);
    557  1.66   thorpej 	TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
    558  1.66   thorpej 	simple_unlock(&palloc->pa_slock);
    559  1.93       dbj 	splx(s);
    560   1.1        pk }
    561   1.1        pk 
    562   1.1        pk /*
    563   1.1        pk  * De-commision a pool resource.
    564   1.1        pk  */
    565   1.1        pk void
    566  1.42   thorpej pool_destroy(struct pool *pp)
    567   1.1        pk {
    568   1.3        pk 	struct pool_item_header *ph;
    569  1.43   thorpej 	struct pool_cache *pc;
    570  1.93       dbj 	int s;
    571  1.43   thorpej 
    572  1.66   thorpej 	/* Locking order: pool_allocator -> pool */
    573  1.93       dbj 	s = splvm();
    574  1.66   thorpej 	simple_lock(&pp->pr_alloc->pa_slock);
    575  1.66   thorpej 	TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
    576  1.66   thorpej 	simple_unlock(&pp->pr_alloc->pa_slock);
    577  1.93       dbj 	splx(s);
    578  1.66   thorpej 
    579  1.43   thorpej 	/* Destroy all caches for this pool. */
    580  1.43   thorpej 	while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL)
    581  1.43   thorpej 		pool_cache_destroy(pc);
    582   1.3        pk 
    583   1.3        pk #ifdef DIAGNOSTIC
    584  1.20   thorpej 	if (pp->pr_nout != 0) {
    585  1.25   thorpej 		pr_printlog(pp, NULL, printf);
    586  1.80    provos 		panic("pool_destroy: pool busy: still out: %u",
    587  1.20   thorpej 		    pp->pr_nout);
    588   1.3        pk 	}
    589   1.3        pk #endif
    590   1.1        pk 
    591   1.3        pk 	/* Remove all pages */
    592  1.88       chs 	while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
    593  1.70   thorpej 		pr_rmpage(pp, ph, NULL);
    594  1.88       chs 	KASSERT(LIST_EMPTY(&pp->pr_fullpages));
    595  1.88       chs 	KASSERT(LIST_EMPTY(&pp->pr_partpages));
    596   1.3        pk 
    597   1.3        pk 	/* Remove from global pool list */
    598  1.23   thorpej 	simple_lock(&pool_head_slock);
    599   1.3        pk 	TAILQ_REMOVE(&pool_head, pp, pr_poollist);
    600  1.61       chs 	if (drainpp == pp) {
    601  1.61       chs 		drainpp = NULL;
    602  1.61       chs 	}
    603  1.23   thorpej 	simple_unlock(&pool_head_slock);
    604   1.3        pk 
    605  1.59   thorpej #ifdef POOL_DIAGNOSTIC
    606  1.20   thorpej 	if ((pp->pr_roflags & PR_LOGGING) != 0)
    607   1.3        pk 		free(pp->pr_log, M_TEMP);
    608  1.59   thorpej #endif
    609   1.1        pk }
    610   1.1        pk 
    611  1.68   thorpej void
    612  1.68   thorpej pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
    613  1.68   thorpej {
    614  1.68   thorpej 
    615  1.68   thorpej 	/* XXX no locking -- must be used just after pool_init() */
    616  1.68   thorpej #ifdef DIAGNOSTIC
    617  1.68   thorpej 	if (pp->pr_drain_hook != NULL)
    618  1.68   thorpej 		panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
    619  1.68   thorpej #endif
    620  1.68   thorpej 	pp->pr_drain_hook = fn;
    621  1.68   thorpej 	pp->pr_drain_hook_arg = arg;
    622  1.68   thorpej }
    623  1.68   thorpej 
    624  1.88       chs static struct pool_item_header *
    625  1.55   thorpej pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags)
    626  1.55   thorpej {
    627  1.55   thorpej 	struct pool_item_header *ph;
    628  1.55   thorpej 	int s;
    629  1.55   thorpej 
    630  1.55   thorpej 	LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0);
    631  1.55   thorpej 
    632  1.55   thorpej 	if ((pp->pr_roflags & PR_PHINPAGE) != 0)
    633  1.55   thorpej 		ph = (struct pool_item_header *) (storage + pp->pr_phoffset);
    634  1.55   thorpej 	else {
    635  1.85        pk 		s = splvm();
    636  1.55   thorpej 		ph = pool_get(&phpool, flags);
    637  1.55   thorpej 		splx(s);
    638  1.55   thorpej 	}
    639  1.55   thorpej 
    640  1.55   thorpej 	return (ph);
    641  1.55   thorpej }
    642   1.1        pk 
    643   1.1        pk /*
    644   1.3        pk  * Grab an item from the pool; must be called at appropriate spl level
    645   1.1        pk  */
    646   1.3        pk void *
    647  1.59   thorpej #ifdef POOL_DIAGNOSTIC
    648  1.42   thorpej _pool_get(struct pool *pp, int flags, const char *file, long line)
    649  1.56  sommerfe #else
    650  1.56  sommerfe pool_get(struct pool *pp, int flags)
    651  1.56  sommerfe #endif
    652   1.1        pk {
    653   1.1        pk 	struct pool_item *pi;
    654   1.3        pk 	struct pool_item_header *ph;
    655  1.55   thorpej 	void *v;
    656   1.1        pk 
    657   1.2        pk #ifdef DIAGNOSTIC
    658  1.84   thorpej 	if (__predict_false(curlwp == NULL && doing_shutdown == 0 &&
    659  1.37  sommerfe 			    (flags & PR_WAITOK) != 0))
    660  1.77      matt 		panic("pool_get: %s: must have NOWAIT", pp->pr_wchan);
    661  1.58   thorpej 
    662  1.58   thorpej #ifdef LOCKDEBUG
    663  1.58   thorpej 	if (flags & PR_WAITOK)
    664  1.58   thorpej 		simple_lock_only_held(NULL, "pool_get(PR_WAITOK)");
    665  1.56  sommerfe #endif
    666  1.58   thorpej #endif /* DIAGNOSTIC */
    667   1.1        pk 
    668  1.21   thorpej 	simple_lock(&pp->pr_slock);
    669  1.25   thorpej 	pr_enter(pp, file, line);
    670  1.20   thorpej 
    671  1.20   thorpej  startover:
    672  1.20   thorpej 	/*
    673  1.20   thorpej 	 * Check to see if we've reached the hard limit.  If we have,
    674  1.20   thorpej 	 * and we can wait, then wait until an item has been returned to
    675  1.20   thorpej 	 * the pool.
    676  1.20   thorpej 	 */
    677  1.20   thorpej #ifdef DIAGNOSTIC
    678  1.34   thorpej 	if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
    679  1.25   thorpej 		pr_leave(pp);
    680  1.21   thorpej 		simple_unlock(&pp->pr_slock);
    681  1.20   thorpej 		panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
    682  1.20   thorpej 	}
    683  1.20   thorpej #endif
    684  1.34   thorpej 	if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
    685  1.68   thorpej 		if (pp->pr_drain_hook != NULL) {
    686  1.68   thorpej 			/*
    687  1.68   thorpej 			 * Since the drain hook is going to free things
    688  1.68   thorpej 			 * back to the pool, unlock, call the hook, re-lock,
    689  1.68   thorpej 			 * and check the hardlimit condition again.
    690  1.68   thorpej 			 */
    691  1.68   thorpej 			pr_leave(pp);
    692  1.68   thorpej 			simple_unlock(&pp->pr_slock);
    693  1.68   thorpej 			(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
    694  1.68   thorpej 			simple_lock(&pp->pr_slock);
    695  1.68   thorpej 			pr_enter(pp, file, line);
    696  1.68   thorpej 			if (pp->pr_nout < pp->pr_hardlimit)
    697  1.68   thorpej 				goto startover;
    698  1.68   thorpej 		}
    699  1.68   thorpej 
    700  1.29  sommerfe 		if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
    701  1.20   thorpej 			/*
    702  1.20   thorpej 			 * XXX: A warning isn't logged in this case.  Should
    703  1.20   thorpej 			 * it be?
    704  1.20   thorpej 			 */
    705  1.20   thorpej 			pp->pr_flags |= PR_WANTED;
    706  1.25   thorpej 			pr_leave(pp);
    707  1.40  sommerfe 			ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
    708  1.25   thorpej 			pr_enter(pp, file, line);
    709  1.20   thorpej 			goto startover;
    710  1.20   thorpej 		}
    711  1.31   thorpej 
    712  1.31   thorpej 		/*
    713  1.31   thorpej 		 * Log a message that the hard limit has been hit.
    714  1.31   thorpej 		 */
    715  1.31   thorpej 		if (pp->pr_hardlimit_warning != NULL &&
    716  1.31   thorpej 		    ratecheck(&pp->pr_hardlimit_warning_last,
    717  1.31   thorpej 			      &pp->pr_hardlimit_ratecap))
    718  1.31   thorpej 			log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
    719  1.21   thorpej 
    720  1.21   thorpej 		pp->pr_nfail++;
    721  1.21   thorpej 
    722  1.25   thorpej 		pr_leave(pp);
    723  1.21   thorpej 		simple_unlock(&pp->pr_slock);
    724  1.20   thorpej 		return (NULL);
    725  1.20   thorpej 	}
    726  1.20   thorpej 
    727   1.3        pk 	/*
    728   1.3        pk 	 * The convention we use is that if `curpage' is not NULL, then
    729   1.3        pk 	 * it points at a non-empty bucket. In particular, `curpage'
    730   1.3        pk 	 * never points at a page header which has PR_PHINPAGE set and
    731   1.3        pk 	 * has no items in its bucket.
    732   1.3        pk 	 */
    733  1.20   thorpej 	if ((ph = pp->pr_curpage) == NULL) {
    734  1.20   thorpej #ifdef DIAGNOSTIC
    735  1.20   thorpej 		if (pp->pr_nitems != 0) {
    736  1.21   thorpej 			simple_unlock(&pp->pr_slock);
    737  1.20   thorpej 			printf("pool_get: %s: curpage NULL, nitems %u\n",
    738  1.20   thorpej 			    pp->pr_wchan, pp->pr_nitems);
    739  1.80    provos 			panic("pool_get: nitems inconsistent");
    740  1.20   thorpej 		}
    741  1.20   thorpej #endif
    742  1.20   thorpej 
    743  1.21   thorpej 		/*
    744  1.21   thorpej 		 * Call the back-end page allocator for more memory.
    745  1.21   thorpej 		 * Release the pool lock, as the back-end page allocator
    746  1.21   thorpej 		 * may block.
    747  1.21   thorpej 		 */
    748  1.25   thorpej 		pr_leave(pp);
    749  1.21   thorpej 		simple_unlock(&pp->pr_slock);
    750  1.66   thorpej 		v = pool_allocator_alloc(pp, flags);
    751  1.55   thorpej 		if (__predict_true(v != NULL))
    752  1.55   thorpej 			ph = pool_alloc_item_header(pp, v, flags);
    753  1.15        pk 
    754  1.55   thorpej 		if (__predict_false(v == NULL || ph == NULL)) {
    755  1.55   thorpej 			if (v != NULL)
    756  1.66   thorpej 				pool_allocator_free(pp, v);
    757  1.55   thorpej 
    758  1.91      yamt 			simple_lock(&pp->pr_slock);
    759  1.91      yamt 			pr_enter(pp, file, line);
    760  1.91      yamt 
    761  1.21   thorpej 			/*
    762  1.55   thorpej 			 * We were unable to allocate a page or item
    763  1.55   thorpej 			 * header, but we released the lock during
    764  1.55   thorpej 			 * allocation, so perhaps items were freed
    765  1.55   thorpej 			 * back to the pool.  Check for this case.
    766  1.21   thorpej 			 */
    767  1.21   thorpej 			if (pp->pr_curpage != NULL)
    768  1.21   thorpej 				goto startover;
    769  1.15        pk 
    770   1.3        pk 			if ((flags & PR_WAITOK) == 0) {
    771   1.3        pk 				pp->pr_nfail++;
    772  1.25   thorpej 				pr_leave(pp);
    773  1.21   thorpej 				simple_unlock(&pp->pr_slock);
    774   1.1        pk 				return (NULL);
    775   1.3        pk 			}
    776   1.3        pk 
    777  1.15        pk 			/*
    778  1.15        pk 			 * Wait for items to be returned to this pool.
    779  1.21   thorpej 			 *
    780  1.20   thorpej 			 * XXX: maybe we should wake up once a second and
    781  1.20   thorpej 			 * try again?
    782  1.15        pk 			 */
    783   1.1        pk 			pp->pr_flags |= PR_WANTED;
    784  1.66   thorpej 			/* PA_WANTED is already set on the allocator. */
    785  1.25   thorpej 			pr_leave(pp);
    786  1.40  sommerfe 			ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
    787  1.25   thorpej 			pr_enter(pp, file, line);
    788  1.20   thorpej 			goto startover;
    789   1.1        pk 		}
    790   1.3        pk 
    791  1.15        pk 		/* We have more memory; add it to the pool */
    792  1.91      yamt 		simple_lock(&pp->pr_slock);
    793  1.91      yamt 		pr_enter(pp, file, line);
    794  1.55   thorpej 		pool_prime_page(pp, v, ph);
    795  1.15        pk 		pp->pr_npagealloc++;
    796  1.15        pk 
    797  1.20   thorpej 		/* Start the allocation process over. */
    798  1.20   thorpej 		goto startover;
    799   1.3        pk 	}
    800  1.34   thorpej 	if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) {
    801  1.25   thorpej 		pr_leave(pp);
    802  1.21   thorpej 		simple_unlock(&pp->pr_slock);
    803   1.3        pk 		panic("pool_get: %s: page empty", pp->pr_wchan);
    804  1.21   thorpej 	}
    805  1.20   thorpej #ifdef DIAGNOSTIC
    806  1.34   thorpej 	if (__predict_false(pp->pr_nitems == 0)) {
    807  1.25   thorpej 		pr_leave(pp);
    808  1.21   thorpej 		simple_unlock(&pp->pr_slock);
    809  1.20   thorpej 		printf("pool_get: %s: items on itemlist, nitems %u\n",
    810  1.20   thorpej 		    pp->pr_wchan, pp->pr_nitems);
    811  1.80    provos 		panic("pool_get: nitems inconsistent");
    812  1.20   thorpej 	}
    813  1.65     enami #endif
    814  1.56  sommerfe 
    815  1.65     enami #ifdef POOL_DIAGNOSTIC
    816   1.3        pk 	pr_log(pp, v, PRLOG_GET, file, line);
    817  1.65     enami #endif
    818   1.3        pk 
    819  1.65     enami #ifdef DIAGNOSTIC
    820  1.34   thorpej 	if (__predict_false(pi->pi_magic != PI_MAGIC)) {
    821  1.25   thorpej 		pr_printlog(pp, pi, printf);
    822   1.3        pk 		panic("pool_get(%s): free list modified: magic=%x; page %p;"
    823   1.3        pk 		       " item addr %p\n",
    824   1.3        pk 			pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
    825   1.3        pk 	}
    826   1.3        pk #endif
    827   1.3        pk 
    828   1.3        pk 	/*
    829   1.3        pk 	 * Remove from item list.
    830   1.3        pk 	 */
    831   1.3        pk 	TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
    832  1.20   thorpej 	pp->pr_nitems--;
    833  1.20   thorpej 	pp->pr_nout++;
    834   1.6   thorpej 	if (ph->ph_nmissing == 0) {
    835   1.6   thorpej #ifdef DIAGNOSTIC
    836  1.34   thorpej 		if (__predict_false(pp->pr_nidle == 0))
    837   1.6   thorpej 			panic("pool_get: nidle inconsistent");
    838   1.6   thorpej #endif
    839   1.6   thorpej 		pp->pr_nidle--;
    840  1.88       chs 
    841  1.88       chs 		/*
    842  1.88       chs 		 * This page was previously empty.  Move it to the list of
    843  1.88       chs 		 * partially-full pages.  This page is already curpage.
    844  1.88       chs 		 */
    845  1.88       chs 		LIST_REMOVE(ph, ph_pagelist);
    846  1.88       chs 		LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
    847   1.6   thorpej 	}
    848   1.3        pk 	ph->ph_nmissing++;
    849  1.88       chs 	if (TAILQ_EMPTY(&ph->ph_itemlist)) {
    850  1.21   thorpej #ifdef DIAGNOSTIC
    851  1.34   thorpej 		if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) {
    852  1.25   thorpej 			pr_leave(pp);
    853  1.21   thorpej 			simple_unlock(&pp->pr_slock);
    854  1.21   thorpej 			panic("pool_get: %s: nmissing inconsistent",
    855  1.21   thorpej 			    pp->pr_wchan);
    856  1.21   thorpej 		}
    857  1.21   thorpej #endif
    858   1.3        pk 		/*
    859  1.88       chs 		 * This page is now full.  Move it to the full list
    860  1.88       chs 		 * and select a new current page.
    861   1.3        pk 		 */
    862  1.88       chs 		LIST_REMOVE(ph, ph_pagelist);
    863  1.88       chs 		LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
    864  1.88       chs 		pool_update_curpage(pp);
    865   1.1        pk 	}
    866   1.3        pk 
    867   1.3        pk 	pp->pr_nget++;
    868  1.20   thorpej 
    869  1.20   thorpej 	/*
    870  1.20   thorpej 	 * If we have a low water mark and we are now below that low
    871  1.20   thorpej 	 * water mark, add more items to the pool.
    872  1.20   thorpej 	 */
    873  1.53   thorpej 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
    874  1.20   thorpej 		/*
    875  1.20   thorpej 		 * XXX: Should we log a warning?  Should we set up a timeout
    876  1.20   thorpej 		 * to try again in a second or so?  The latter could break
    877  1.20   thorpej 		 * a caller's assumptions about interrupt protection, etc.
    878  1.20   thorpej 		 */
    879  1.20   thorpej 	}
    880  1.20   thorpej 
    881  1.25   thorpej 	pr_leave(pp);
    882  1.21   thorpej 	simple_unlock(&pp->pr_slock);
    883   1.1        pk 	return (v);
    884   1.1        pk }
    885   1.1        pk 
    886   1.1        pk /*
    887  1.43   thorpej  * Internal version of pool_put().  Pool is already locked/entered.
    888   1.1        pk  */
    889  1.43   thorpej static void
    890  1.56  sommerfe pool_do_put(struct pool *pp, void *v)
    891   1.1        pk {
    892   1.1        pk 	struct pool_item *pi = v;
    893   1.3        pk 	struct pool_item_header *ph;
    894   1.3        pk 	caddr_t page;
    895  1.21   thorpej 	int s;
    896   1.3        pk 
    897  1.61       chs 	LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
    898  1.61       chs 
    899  1.66   thorpej 	page = (caddr_t)((u_long)v & pp->pr_alloc->pa_pagemask);
    900   1.1        pk 
    901  1.30   thorpej #ifdef DIAGNOSTIC
    902  1.34   thorpej 	if (__predict_false(pp->pr_nout == 0)) {
    903  1.30   thorpej 		printf("pool %s: putting with none out\n",
    904  1.30   thorpej 		    pp->pr_wchan);
    905  1.30   thorpej 		panic("pool_put");
    906  1.30   thorpej 	}
    907  1.30   thorpej #endif
    908   1.3        pk 
    909  1.34   thorpej 	if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {
    910  1.25   thorpej 		pr_printlog(pp, NULL, printf);
    911   1.3        pk 		panic("pool_put: %s: page header missing", pp->pr_wchan);
    912   1.3        pk 	}
    913  1.28   thorpej 
    914  1.28   thorpej #ifdef LOCKDEBUG
    915  1.28   thorpej 	/*
    916  1.28   thorpej 	 * Check if we're freeing a locked simple lock.
    917  1.28   thorpej 	 */
    918  1.28   thorpej 	simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
    919  1.28   thorpej #endif
    920   1.3        pk 
    921   1.3        pk 	/*
    922   1.3        pk 	 * Return to item list.
    923   1.3        pk 	 */
    924   1.2        pk #ifdef DIAGNOSTIC
    925   1.3        pk 	pi->pi_magic = PI_MAGIC;
    926   1.3        pk #endif
    927  1.32       chs #ifdef DEBUG
    928  1.32       chs 	{
    929  1.32       chs 		int i, *ip = v;
    930  1.32       chs 
    931  1.32       chs 		for (i = 0; i < pp->pr_size / sizeof(int); i++) {
    932  1.32       chs 			*ip++ = PI_MAGIC;
    933  1.32       chs 		}
    934  1.32       chs 	}
    935  1.32       chs #endif
    936  1.32       chs 
    937   1.3        pk 	TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
    938  1.79   thorpej 	KDASSERT(ph->ph_nmissing != 0);
    939   1.3        pk 	ph->ph_nmissing--;
    940   1.3        pk 	pp->pr_nput++;
    941  1.20   thorpej 	pp->pr_nitems++;
    942  1.20   thorpej 	pp->pr_nout--;
    943   1.3        pk 
    944   1.3        pk 	/* Cancel "pool empty" condition if it exists */
    945   1.3        pk 	if (pp->pr_curpage == NULL)
    946   1.3        pk 		pp->pr_curpage = ph;
    947   1.3        pk 
    948   1.3        pk 	if (pp->pr_flags & PR_WANTED) {
    949   1.3        pk 		pp->pr_flags &= ~PR_WANTED;
    950  1.15        pk 		if (ph->ph_nmissing == 0)
    951  1.15        pk 			pp->pr_nidle++;
    952   1.3        pk 		wakeup((caddr_t)pp);
    953   1.3        pk 		return;
    954   1.3        pk 	}
    955   1.3        pk 
    956   1.3        pk 	/*
    957  1.88       chs 	 * If this page is now empty, do one of two things:
    958  1.21   thorpej 	 *
    959  1.88       chs 	 *	(1) If we have more pages than the page high water mark,
    960  1.90   thorpej 	 *	    or if we are flagged as immediately freeing back idle
    961  1.90   thorpej 	 *	    pages, free the page back to the system.  ONLY CONSIDER
    962  1.90   thorpej 	 *	    FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
    963  1.90   thorpej 	 *	    CLAIM.
    964  1.21   thorpej 	 *
    965  1.88       chs 	 *	(2) Otherwise, move the page to the empty page list.
    966  1.88       chs 	 *
    967  1.88       chs 	 * Either way, select a new current page (so we use a partially-full
    968  1.88       chs 	 * page if one is available).
    969   1.3        pk 	 */
    970   1.3        pk 	if (ph->ph_nmissing == 0) {
    971   1.6   thorpej 		pp->pr_nidle++;
    972  1.90   thorpej 		if (pp->pr_npages > pp->pr_minpages &&
    973  1.90   thorpej 		    (pp->pr_npages > pp->pr_maxpages ||
    974  1.90   thorpej 		     (pp->pr_roflags & PR_IMMEDRELEASE) != 0 ||
    975  1.90   thorpej 		     (pp->pr_alloc->pa_flags & PA_WANT) != 0)) {
    976  1.91      yamt 			simple_unlock(&pp->pr_slock);
    977  1.61       chs 			pr_rmpage(pp, ph, NULL);
    978  1.91      yamt 			simple_lock(&pp->pr_slock);
    979   1.3        pk 		} else {
    980  1.88       chs 			LIST_REMOVE(ph, ph_pagelist);
    981  1.88       chs 			LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
    982   1.3        pk 
    983  1.21   thorpej 			/*
    984  1.21   thorpej 			 * Update the timestamp on the page.  A page must
    985  1.21   thorpej 			 * be idle for some period of time before it can
    986  1.21   thorpej 			 * be reclaimed by the pagedaemon.  This minimizes
    987  1.21   thorpej 			 * ping-pong'ing for memory.
    988  1.21   thorpej 			 */
    989  1.21   thorpej 			s = splclock();
    990  1.21   thorpej 			ph->ph_time = mono_time;
    991  1.21   thorpej 			splx(s);
    992   1.1        pk 		}
    993  1.88       chs 		pool_update_curpage(pp);
    994   1.1        pk 	}
    995  1.88       chs 
    996  1.21   thorpej 	/*
    997  1.88       chs 	 * If the page was previously completely full, move it to the
    998  1.88       chs 	 * partially-full list and make it the current page.  The next
    999  1.88       chs 	 * allocation will get the item from this page, instead of
   1000  1.88       chs 	 * further fragmenting the pool.
   1001  1.21   thorpej 	 */
   1002  1.21   thorpej 	else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
   1003  1.88       chs 		LIST_REMOVE(ph, ph_pagelist);
   1004  1.88       chs 		LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
   1005  1.21   thorpej 		pp->pr_curpage = ph;
   1006  1.21   thorpej 	}
   1007  1.43   thorpej }
   1008  1.43   thorpej 
   1009  1.43   thorpej /*
   1010  1.43   thorpej  * Return resource to the pool; must be called at appropriate spl level
   1011  1.43   thorpej  */
   1012  1.59   thorpej #ifdef POOL_DIAGNOSTIC
   1013  1.43   thorpej void
   1014  1.43   thorpej _pool_put(struct pool *pp, void *v, const char *file, long line)
   1015  1.43   thorpej {
   1016  1.43   thorpej 
   1017  1.43   thorpej 	simple_lock(&pp->pr_slock);
   1018  1.43   thorpej 	pr_enter(pp, file, line);
   1019  1.43   thorpej 
   1020  1.56  sommerfe 	pr_log(pp, v, PRLOG_PUT, file, line);
   1021  1.56  sommerfe 
   1022  1.56  sommerfe 	pool_do_put(pp, v);
   1023  1.21   thorpej 
   1024  1.25   thorpej 	pr_leave(pp);
   1025  1.21   thorpej 	simple_unlock(&pp->pr_slock);
   1026   1.1        pk }
   1027  1.57  sommerfe #undef pool_put
   1028  1.59   thorpej #endif /* POOL_DIAGNOSTIC */
   1029   1.1        pk 
   1030  1.56  sommerfe void
   1031  1.56  sommerfe pool_put(struct pool *pp, void *v)
   1032  1.56  sommerfe {
   1033  1.56  sommerfe 
   1034  1.56  sommerfe 	simple_lock(&pp->pr_slock);
   1035  1.56  sommerfe 
   1036  1.56  sommerfe 	pool_do_put(pp, v);
   1037  1.56  sommerfe 
   1038  1.56  sommerfe 	simple_unlock(&pp->pr_slock);
   1039  1.56  sommerfe }
   1040  1.57  sommerfe 
   1041  1.59   thorpej #ifdef POOL_DIAGNOSTIC
   1042  1.57  sommerfe #define		pool_put(h, v)	_pool_put((h), (v), __FILE__, __LINE__)
   1043  1.56  sommerfe #endif
   1044  1.74   thorpej 
   1045  1.74   thorpej /*
   1046  1.74   thorpej  * Add N items to the pool.
   1047  1.74   thorpej  */
   1048  1.74   thorpej int
   1049  1.74   thorpej pool_prime(struct pool *pp, int n)
   1050  1.74   thorpej {
   1051  1.83       scw 	struct pool_item_header *ph = NULL;
   1052  1.74   thorpej 	caddr_t cp;
   1053  1.75    simonb 	int newpages;
   1054  1.74   thorpej 
   1055  1.74   thorpej 	simple_lock(&pp->pr_slock);
   1056  1.74   thorpej 
   1057  1.74   thorpej 	newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1058  1.74   thorpej 
   1059  1.74   thorpej 	while (newpages-- > 0) {
   1060  1.74   thorpej 		simple_unlock(&pp->pr_slock);
   1061  1.74   thorpej 		cp = pool_allocator_alloc(pp, PR_NOWAIT);
   1062  1.74   thorpej 		if (__predict_true(cp != NULL))
   1063  1.74   thorpej 			ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
   1064  1.74   thorpej 
   1065  1.74   thorpej 		if (__predict_false(cp == NULL || ph == NULL)) {
   1066  1.74   thorpej 			if (cp != NULL)
   1067  1.74   thorpej 				pool_allocator_free(pp, cp);
   1068  1.91      yamt 			simple_lock(&pp->pr_slock);
   1069  1.74   thorpej 			break;
   1070  1.74   thorpej 		}
   1071  1.74   thorpej 
   1072  1.91      yamt 		simple_lock(&pp->pr_slock);
   1073  1.74   thorpej 		pool_prime_page(pp, cp, ph);
   1074  1.74   thorpej 		pp->pr_npagealloc++;
   1075  1.74   thorpej 		pp->pr_minpages++;
   1076  1.74   thorpej 	}
   1077  1.74   thorpej 
   1078  1.74   thorpej 	if (pp->pr_minpages >= pp->pr_maxpages)
   1079  1.74   thorpej 		pp->pr_maxpages = pp->pr_minpages + 1;	/* XXX */
   1080  1.74   thorpej 
   1081  1.74   thorpej 	simple_unlock(&pp->pr_slock);
   1082  1.74   thorpej 	return (0);
   1083  1.74   thorpej }
   1084  1.55   thorpej 
   1085  1.55   thorpej /*
   1086   1.3        pk  * Add a page worth of items to the pool.
   1087  1.21   thorpej  *
   1088  1.21   thorpej  * Note, we must be called with the pool descriptor LOCKED.
   1089   1.3        pk  */
   1090  1.55   thorpej static void
   1091  1.55   thorpej pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
   1092   1.3        pk {
   1093   1.3        pk 	struct pool_item *pi;
   1094   1.3        pk 	caddr_t cp = storage;
   1095   1.3        pk 	unsigned int align = pp->pr_align;
   1096   1.3        pk 	unsigned int ioff = pp->pr_itemoffset;
   1097  1.55   thorpej 	int n;
   1098  1.89      yamt 	int s;
   1099  1.36        pk 
   1100  1.91      yamt 	LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
   1101  1.91      yamt 
   1102  1.66   thorpej #ifdef DIAGNOSTIC
   1103  1.66   thorpej 	if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
   1104  1.36        pk 		panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
   1105  1.66   thorpej #endif
   1106   1.3        pk 
   1107   1.3        pk 	/*
   1108   1.3        pk 	 * Insert page header.
   1109   1.3        pk 	 */
   1110  1.88       chs 	LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
   1111   1.3        pk 	TAILQ_INIT(&ph->ph_itemlist);
   1112   1.3        pk 	ph->ph_page = storage;
   1113   1.3        pk 	ph->ph_nmissing = 0;
   1114  1.89      yamt 	s = splclock();
   1115  1.89      yamt 	ph->ph_time = mono_time;
   1116  1.89      yamt 	splx(s);
   1117  1.88       chs 	if ((pp->pr_roflags & PR_PHINPAGE) == 0)
   1118  1.88       chs 		SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
   1119   1.3        pk 
   1120   1.6   thorpej 	pp->pr_nidle++;
   1121   1.6   thorpej 
   1122   1.3        pk 	/*
   1123   1.3        pk 	 * Color this page.
   1124   1.3        pk 	 */
   1125   1.3        pk 	cp = (caddr_t)(cp + pp->pr_curcolor);
   1126   1.3        pk 	if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
   1127   1.3        pk 		pp->pr_curcolor = 0;
   1128   1.3        pk 
   1129   1.3        pk 	/*
   1130   1.3        pk 	 * Adjust storage to apply aligment to `pr_itemoffset' in each item.
   1131   1.3        pk 	 */
   1132   1.3        pk 	if (ioff != 0)
   1133   1.3        pk 		cp = (caddr_t)(cp + (align - ioff));
   1134   1.3        pk 
   1135   1.3        pk 	/*
   1136   1.3        pk 	 * Insert remaining chunks on the bucket list.
   1137   1.3        pk 	 */
   1138   1.3        pk 	n = pp->pr_itemsperpage;
   1139  1.20   thorpej 	pp->pr_nitems += n;
   1140   1.3        pk 
   1141   1.3        pk 	while (n--) {
   1142   1.3        pk 		pi = (struct pool_item *)cp;
   1143  1.78   thorpej 
   1144  1.78   thorpej 		KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
   1145   1.3        pk 
   1146   1.3        pk 		/* Insert on page list */
   1147   1.3        pk 		TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
   1148   1.3        pk #ifdef DIAGNOSTIC
   1149   1.3        pk 		pi->pi_magic = PI_MAGIC;
   1150   1.3        pk #endif
   1151   1.3        pk 		cp = (caddr_t)(cp + pp->pr_size);
   1152   1.3        pk 	}
   1153   1.3        pk 
   1154   1.3        pk 	/*
   1155   1.3        pk 	 * If the pool was depleted, point at the new page.
   1156   1.3        pk 	 */
   1157   1.3        pk 	if (pp->pr_curpage == NULL)
   1158   1.3        pk 		pp->pr_curpage = ph;
   1159   1.3        pk 
   1160   1.3        pk 	if (++pp->pr_npages > pp->pr_hiwat)
   1161   1.3        pk 		pp->pr_hiwat = pp->pr_npages;
   1162   1.3        pk }
   1163   1.3        pk 
   1164  1.20   thorpej /*
   1165  1.52   thorpej  * Used by pool_get() when nitems drops below the low water mark.  This
   1166  1.88       chs  * is used to catch up pr_nitems with the low water mark.
   1167  1.20   thorpej  *
   1168  1.21   thorpej  * Note 1, we never wait for memory here, we let the caller decide what to do.
   1169  1.20   thorpej  *
   1170  1.73   thorpej  * Note 2, we must be called with the pool already locked, and we return
   1171  1.20   thorpej  * with it locked.
   1172  1.20   thorpej  */
   1173  1.20   thorpej static int
   1174  1.42   thorpej pool_catchup(struct pool *pp)
   1175  1.20   thorpej {
   1176  1.83       scw 	struct pool_item_header *ph = NULL;
   1177  1.20   thorpej 	caddr_t cp;
   1178  1.20   thorpej 	int error = 0;
   1179  1.20   thorpej 
   1180  1.54   thorpej 	while (POOL_NEEDS_CATCHUP(pp)) {
   1181  1.20   thorpej 		/*
   1182  1.21   thorpej 		 * Call the page back-end allocator for more memory.
   1183  1.21   thorpej 		 *
   1184  1.21   thorpej 		 * XXX: We never wait, so should we bother unlocking
   1185  1.21   thorpej 		 * the pool descriptor?
   1186  1.20   thorpej 		 */
   1187  1.21   thorpej 		simple_unlock(&pp->pr_slock);
   1188  1.66   thorpej 		cp = pool_allocator_alloc(pp, PR_NOWAIT);
   1189  1.55   thorpej 		if (__predict_true(cp != NULL))
   1190  1.55   thorpej 			ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
   1191  1.55   thorpej 		if (__predict_false(cp == NULL || ph == NULL)) {
   1192  1.55   thorpej 			if (cp != NULL)
   1193  1.66   thorpej 				pool_allocator_free(pp, cp);
   1194  1.20   thorpej 			error = ENOMEM;
   1195  1.91      yamt 			simple_lock(&pp->pr_slock);
   1196  1.20   thorpej 			break;
   1197  1.20   thorpej 		}
   1198  1.91      yamt 		simple_lock(&pp->pr_slock);
   1199  1.55   thorpej 		pool_prime_page(pp, cp, ph);
   1200  1.26   thorpej 		pp->pr_npagealloc++;
   1201  1.20   thorpej 	}
   1202  1.20   thorpej 
   1203  1.20   thorpej 	return (error);
   1204  1.20   thorpej }
   1205  1.20   thorpej 
   1206  1.88       chs static void
   1207  1.88       chs pool_update_curpage(struct pool *pp)
   1208  1.88       chs {
   1209  1.88       chs 
   1210  1.88       chs 	pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
   1211  1.88       chs 	if (pp->pr_curpage == NULL) {
   1212  1.88       chs 		pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
   1213  1.88       chs 	}
   1214  1.88       chs }
   1215  1.88       chs 
   1216   1.3        pk void
   1217  1.42   thorpej pool_setlowat(struct pool *pp, int n)
   1218   1.3        pk {
   1219  1.15        pk 
   1220  1.21   thorpej 	simple_lock(&pp->pr_slock);
   1221  1.21   thorpej 
   1222   1.3        pk 	pp->pr_minitems = n;
   1223  1.15        pk 	pp->pr_minpages = (n == 0)
   1224  1.15        pk 		? 0
   1225  1.18   thorpej 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1226  1.20   thorpej 
   1227  1.20   thorpej 	/* Make sure we're caught up with the newly-set low water mark. */
   1228  1.75    simonb 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
   1229  1.20   thorpej 		/*
   1230  1.20   thorpej 		 * XXX: Should we log a warning?  Should we set up a timeout
   1231  1.20   thorpej 		 * to try again in a second or so?  The latter could break
   1232  1.20   thorpej 		 * a caller's assumptions about interrupt protection, etc.
   1233  1.20   thorpej 		 */
   1234  1.20   thorpej 	}
   1235  1.21   thorpej 
   1236  1.21   thorpej 	simple_unlock(&pp->pr_slock);
   1237   1.3        pk }
   1238   1.3        pk 
   1239   1.3        pk void
   1240  1.42   thorpej pool_sethiwat(struct pool *pp, int n)
   1241   1.3        pk {
   1242  1.15        pk 
   1243  1.21   thorpej 	simple_lock(&pp->pr_slock);
   1244  1.21   thorpej 
   1245  1.15        pk 	pp->pr_maxpages = (n == 0)
   1246  1.15        pk 		? 0
   1247  1.18   thorpej 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1248  1.21   thorpej 
   1249  1.21   thorpej 	simple_unlock(&pp->pr_slock);
   1250   1.3        pk }
   1251   1.3        pk 
   1252  1.20   thorpej void
   1253  1.42   thorpej pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
   1254  1.20   thorpej {
   1255  1.20   thorpej 
   1256  1.21   thorpej 	simple_lock(&pp->pr_slock);
   1257  1.20   thorpej 
   1258  1.20   thorpej 	pp->pr_hardlimit = n;
   1259  1.20   thorpej 	pp->pr_hardlimit_warning = warnmess;
   1260  1.31   thorpej 	pp->pr_hardlimit_ratecap.tv_sec = ratecap;
   1261  1.31   thorpej 	pp->pr_hardlimit_warning_last.tv_sec = 0;
   1262  1.31   thorpej 	pp->pr_hardlimit_warning_last.tv_usec = 0;
   1263  1.20   thorpej 
   1264  1.20   thorpej 	/*
   1265  1.21   thorpej 	 * In-line version of pool_sethiwat(), because we don't want to
   1266  1.21   thorpej 	 * release the lock.
   1267  1.20   thorpej 	 */
   1268  1.20   thorpej 	pp->pr_maxpages = (n == 0)
   1269  1.20   thorpej 		? 0
   1270  1.20   thorpej 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1271  1.21   thorpej 
   1272  1.21   thorpej 	simple_unlock(&pp->pr_slock);
   1273  1.20   thorpej }
   1274   1.3        pk 
   1275   1.3        pk /*
   1276   1.3        pk  * Release all complete pages that have not been used recently.
   1277   1.3        pk  */
   1278  1.66   thorpej int
   1279  1.59   thorpej #ifdef POOL_DIAGNOSTIC
   1280  1.42   thorpej _pool_reclaim(struct pool *pp, const char *file, long line)
   1281  1.56  sommerfe #else
   1282  1.56  sommerfe pool_reclaim(struct pool *pp)
   1283  1.56  sommerfe #endif
   1284   1.3        pk {
   1285   1.3        pk 	struct pool_item_header *ph, *phnext;
   1286  1.43   thorpej 	struct pool_cache *pc;
   1287  1.21   thorpej 	struct timeval curtime;
   1288  1.61       chs 	struct pool_pagelist pq;
   1289  1.88       chs 	struct timeval diff;
   1290  1.21   thorpej 	int s;
   1291   1.3        pk 
   1292  1.68   thorpej 	if (pp->pr_drain_hook != NULL) {
   1293  1.68   thorpej 		/*
   1294  1.68   thorpej 		 * The drain hook must be called with the pool unlocked.
   1295  1.68   thorpej 		 */
   1296  1.68   thorpej 		(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
   1297  1.68   thorpej 	}
   1298  1.68   thorpej 
   1299  1.21   thorpej 	if (simple_lock_try(&pp->pr_slock) == 0)
   1300  1.66   thorpej 		return (0);
   1301  1.25   thorpej 	pr_enter(pp, file, line);
   1302  1.68   thorpej 
   1303  1.88       chs 	LIST_INIT(&pq);
   1304   1.3        pk 
   1305  1.43   thorpej 	/*
   1306  1.43   thorpej 	 * Reclaim items from the pool's caches.
   1307  1.43   thorpej 	 */
   1308  1.61       chs 	TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist)
   1309  1.43   thorpej 		pool_cache_reclaim(pc);
   1310  1.43   thorpej 
   1311  1.21   thorpej 	s = splclock();
   1312  1.21   thorpej 	curtime = mono_time;
   1313  1.21   thorpej 	splx(s);
   1314  1.21   thorpej 
   1315  1.88       chs 	for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
   1316  1.88       chs 		phnext = LIST_NEXT(ph, ph_pagelist);
   1317   1.3        pk 
   1318   1.3        pk 		/* Check our minimum page claim */
   1319   1.3        pk 		if (pp->pr_npages <= pp->pr_minpages)
   1320   1.3        pk 			break;
   1321   1.3        pk 
   1322  1.88       chs 		KASSERT(ph->ph_nmissing == 0);
   1323  1.88       chs 		timersub(&curtime, &ph->ph_time, &diff);
   1324  1.88       chs 		if (diff.tv_sec < pool_inactive_time)
   1325  1.88       chs 			continue;
   1326  1.21   thorpej 
   1327  1.88       chs 		/*
   1328  1.88       chs 		 * If freeing this page would put us below
   1329  1.88       chs 		 * the low water mark, stop now.
   1330  1.88       chs 		 */
   1331  1.88       chs 		if ((pp->pr_nitems - pp->pr_itemsperpage) <
   1332  1.88       chs 		    pp->pr_minitems)
   1333  1.88       chs 			break;
   1334  1.21   thorpej 
   1335  1.88       chs 		pr_rmpage(pp, ph, &pq);
   1336   1.3        pk 	}
   1337   1.3        pk 
   1338  1.25   thorpej 	pr_leave(pp);
   1339  1.21   thorpej 	simple_unlock(&pp->pr_slock);
   1340  1.88       chs 	if (LIST_EMPTY(&pq))
   1341  1.66   thorpej 		return (0);
   1342  1.66   thorpej 
   1343  1.88       chs 	while ((ph = LIST_FIRST(&pq)) != NULL) {
   1344  1.88       chs 		LIST_REMOVE(ph, ph_pagelist);
   1345  1.66   thorpej 		pool_allocator_free(pp, ph->ph_page);
   1346  1.61       chs 		if (pp->pr_roflags & PR_PHINPAGE) {
   1347  1.61       chs 			continue;
   1348  1.61       chs 		}
   1349  1.85        pk 		s = splvm();
   1350  1.61       chs 		pool_put(&phpool, ph);
   1351  1.61       chs 		splx(s);
   1352  1.61       chs 	}
   1353  1.66   thorpej 
   1354  1.66   thorpej 	return (1);
   1355   1.3        pk }
   1356   1.3        pk 
   1357   1.3        pk /*
   1358   1.3        pk  * Drain pools, one at a time.
   1359  1.21   thorpej  *
   1360  1.21   thorpej  * Note, we must never be called from an interrupt context.
   1361   1.3        pk  */
   1362   1.3        pk void
   1363  1.42   thorpej pool_drain(void *arg)
   1364   1.3        pk {
   1365   1.3        pk 	struct pool *pp;
   1366  1.23   thorpej 	int s;
   1367   1.3        pk 
   1368  1.61       chs 	pp = NULL;
   1369  1.49   thorpej 	s = splvm();
   1370  1.23   thorpej 	simple_lock(&pool_head_slock);
   1371  1.61       chs 	if (drainpp == NULL) {
   1372  1.61       chs 		drainpp = TAILQ_FIRST(&pool_head);
   1373  1.61       chs 	}
   1374  1.61       chs 	if (drainpp) {
   1375  1.61       chs 		pp = drainpp;
   1376  1.61       chs 		drainpp = TAILQ_NEXT(pp, pr_poollist);
   1377  1.61       chs 	}
   1378  1.61       chs 	simple_unlock(&pool_head_slock);
   1379  1.63       chs 	pool_reclaim(pp);
   1380  1.61       chs 	splx(s);
   1381   1.3        pk }
   1382   1.3        pk 
   1383   1.3        pk /*
   1384   1.3        pk  * Diagnostic helpers.
   1385   1.3        pk  */
   1386   1.3        pk void
   1387  1.42   thorpej pool_print(struct pool *pp, const char *modif)
   1388  1.21   thorpej {
   1389  1.21   thorpej 	int s;
   1390  1.21   thorpej 
   1391  1.49   thorpej 	s = splvm();
   1392  1.25   thorpej 	if (simple_lock_try(&pp->pr_slock) == 0) {
   1393  1.25   thorpej 		printf("pool %s is locked; try again later\n",
   1394  1.25   thorpej 		    pp->pr_wchan);
   1395  1.25   thorpej 		splx(s);
   1396  1.25   thorpej 		return;
   1397  1.25   thorpej 	}
   1398  1.25   thorpej 	pool_print1(pp, modif, printf);
   1399  1.21   thorpej 	simple_unlock(&pp->pr_slock);
   1400  1.21   thorpej 	splx(s);
   1401  1.21   thorpej }
   1402  1.21   thorpej 
   1403  1.25   thorpej void
   1404  1.42   thorpej pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
   1405  1.25   thorpej {
   1406  1.25   thorpej 	int didlock = 0;
   1407  1.25   thorpej 
   1408  1.25   thorpej 	if (pp == NULL) {
   1409  1.25   thorpej 		(*pr)("Must specify a pool to print.\n");
   1410  1.25   thorpej 		return;
   1411  1.25   thorpej 	}
   1412  1.25   thorpej 
   1413  1.25   thorpej 	/*
   1414  1.25   thorpej 	 * Called from DDB; interrupts should be blocked, and all
   1415  1.25   thorpej 	 * other processors should be paused.  We can skip locking
   1416  1.25   thorpej 	 * the pool in this case.
   1417  1.25   thorpej 	 *
   1418  1.25   thorpej 	 * We do a simple_lock_try() just to print the lock
   1419  1.25   thorpej 	 * status, however.
   1420  1.25   thorpej 	 */
   1421  1.25   thorpej 
   1422  1.25   thorpej 	if (simple_lock_try(&pp->pr_slock) == 0)
   1423  1.25   thorpej 		(*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
   1424  1.25   thorpej 	else
   1425  1.25   thorpej 		didlock = 1;
   1426  1.25   thorpej 
   1427  1.25   thorpej 	pool_print1(pp, modif, pr);
   1428  1.25   thorpej 
   1429  1.25   thorpej 	if (didlock)
   1430  1.25   thorpej 		simple_unlock(&pp->pr_slock);
   1431  1.25   thorpej }
   1432  1.25   thorpej 
   1433  1.21   thorpej static void
   1434  1.88       chs pool_print_pagelist(struct pool_pagelist *pl, void (*pr)(const char *, ...))
   1435  1.88       chs {
   1436  1.88       chs 	struct pool_item_header *ph;
   1437  1.88       chs #ifdef DIAGNOSTIC
   1438  1.88       chs 	struct pool_item *pi;
   1439  1.88       chs #endif
   1440  1.88       chs 
   1441  1.88       chs 	LIST_FOREACH(ph, pl, ph_pagelist) {
   1442  1.88       chs 		(*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
   1443  1.88       chs 		    ph->ph_page, ph->ph_nmissing,
   1444  1.88       chs 		    (u_long)ph->ph_time.tv_sec,
   1445  1.88       chs 		    (u_long)ph->ph_time.tv_usec);
   1446  1.88       chs #ifdef DIAGNOSTIC
   1447  1.88       chs 		TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) {
   1448  1.88       chs 			if (pi->pi_magic != PI_MAGIC) {
   1449  1.88       chs 				(*pr)("\t\t\titem %p, magic 0x%x\n",
   1450  1.88       chs 				    pi, pi->pi_magic);
   1451  1.88       chs 			}
   1452  1.88       chs 		}
   1453  1.88       chs #endif
   1454  1.88       chs 	}
   1455  1.88       chs }
   1456  1.88       chs 
   1457  1.88       chs static void
   1458  1.42   thorpej pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
   1459   1.3        pk {
   1460  1.25   thorpej 	struct pool_item_header *ph;
   1461  1.44   thorpej 	struct pool_cache *pc;
   1462  1.44   thorpej 	struct pool_cache_group *pcg;
   1463  1.44   thorpej 	int i, print_log = 0, print_pagelist = 0, print_cache = 0;
   1464  1.25   thorpej 	char c;
   1465  1.25   thorpej 
   1466  1.25   thorpej 	while ((c = *modif++) != '\0') {
   1467  1.25   thorpej 		if (c == 'l')
   1468  1.25   thorpej 			print_log = 1;
   1469  1.25   thorpej 		if (c == 'p')
   1470  1.25   thorpej 			print_pagelist = 1;
   1471  1.44   thorpej 		if (c == 'c')
   1472  1.44   thorpej 			print_cache = 1;
   1473  1.25   thorpej 	}
   1474  1.25   thorpej 
   1475  1.25   thorpej 	(*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
   1476  1.25   thorpej 	    pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
   1477  1.25   thorpej 	    pp->pr_roflags);
   1478  1.66   thorpej 	(*pr)("\talloc %p\n", pp->pr_alloc);
   1479  1.25   thorpej 	(*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
   1480  1.25   thorpej 	    pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
   1481  1.25   thorpej 	(*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
   1482  1.25   thorpej 	    pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
   1483  1.25   thorpej 
   1484  1.25   thorpej 	(*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
   1485  1.25   thorpej 	    pp->pr_nget, pp->pr_nfail, pp->pr_nput);
   1486  1.25   thorpej 	(*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
   1487  1.25   thorpej 	    pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
   1488  1.25   thorpej 
   1489  1.25   thorpej 	if (print_pagelist == 0)
   1490  1.25   thorpej 		goto skip_pagelist;
   1491  1.25   thorpej 
   1492  1.88       chs 	if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
   1493  1.88       chs 		(*pr)("\n\tempty page list:\n");
   1494  1.88       chs 	pool_print_pagelist(&pp->pr_emptypages, pr);
   1495  1.88       chs 	if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
   1496  1.88       chs 		(*pr)("\n\tfull page list:\n");
   1497  1.88       chs 	pool_print_pagelist(&pp->pr_fullpages, pr);
   1498  1.88       chs 	if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
   1499  1.88       chs 		(*pr)("\n\tpartial-page list:\n");
   1500  1.88       chs 	pool_print_pagelist(&pp->pr_partpages, pr);
   1501  1.88       chs 
   1502  1.25   thorpej 	if (pp->pr_curpage == NULL)
   1503  1.25   thorpej 		(*pr)("\tno current page\n");
   1504  1.25   thorpej 	else
   1505  1.25   thorpej 		(*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
   1506  1.25   thorpej 
   1507  1.25   thorpej  skip_pagelist:
   1508  1.25   thorpej 	if (print_log == 0)
   1509  1.25   thorpej 		goto skip_log;
   1510  1.25   thorpej 
   1511  1.25   thorpej 	(*pr)("\n");
   1512  1.25   thorpej 	if ((pp->pr_roflags & PR_LOGGING) == 0)
   1513  1.25   thorpej 		(*pr)("\tno log\n");
   1514  1.25   thorpej 	else
   1515  1.25   thorpej 		pr_printlog(pp, NULL, pr);
   1516   1.3        pk 
   1517  1.25   thorpej  skip_log:
   1518  1.44   thorpej 	if (print_cache == 0)
   1519  1.44   thorpej 		goto skip_cache;
   1520  1.44   thorpej 
   1521  1.61       chs 	TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) {
   1522  1.44   thorpej 		(*pr)("\tcache %p: allocfrom %p freeto %p\n", pc,
   1523  1.44   thorpej 		    pc->pc_allocfrom, pc->pc_freeto);
   1524  1.48   thorpej 		(*pr)("\t    hits %lu misses %lu ngroups %lu nitems %lu\n",
   1525  1.48   thorpej 		    pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
   1526  1.61       chs 		TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
   1527  1.44   thorpej 			(*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);
   1528  1.87   thorpej 			for (i = 0; i < PCG_NOBJECTS; i++) {
   1529  1.87   thorpej 				if (pcg->pcg_objects[i].pcgo_pa !=
   1530  1.87   thorpej 				    POOL_PADDR_INVALID) {
   1531  1.87   thorpej 					(*pr)("\t\t\t%p, 0x%llx\n",
   1532  1.87   thorpej 					    pcg->pcg_objects[i].pcgo_va,
   1533  1.87   thorpej 					    (unsigned long long)
   1534  1.87   thorpej 					    pcg->pcg_objects[i].pcgo_pa);
   1535  1.87   thorpej 				} else {
   1536  1.87   thorpej 					(*pr)("\t\t\t%p\n",
   1537  1.87   thorpej 					    pcg->pcg_objects[i].pcgo_va);
   1538  1.87   thorpej 				}
   1539  1.87   thorpej 			}
   1540  1.44   thorpej 		}
   1541  1.44   thorpej 	}
   1542  1.44   thorpej 
   1543  1.44   thorpej  skip_cache:
   1544  1.88       chs 	pr_enter_check(pp, pr);
   1545  1.88       chs }
   1546  1.88       chs 
   1547  1.88       chs static int
   1548  1.88       chs pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
   1549  1.88       chs {
   1550  1.88       chs 	struct pool_item *pi;
   1551  1.88       chs 	caddr_t page;
   1552  1.88       chs 	int n;
   1553  1.88       chs 
   1554  1.88       chs 	page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask);
   1555  1.88       chs 	if (page != ph->ph_page &&
   1556  1.88       chs 	    (pp->pr_roflags & PR_PHINPAGE) != 0) {
   1557  1.88       chs 		if (label != NULL)
   1558  1.88       chs 			printf("%s: ", label);
   1559  1.88       chs 		printf("pool(%p:%s): page inconsistency: page %p;"
   1560  1.88       chs 		       " at page head addr %p (p %p)\n", pp,
   1561  1.88       chs 			pp->pr_wchan, ph->ph_page,
   1562  1.88       chs 			ph, page);
   1563  1.88       chs 		return 1;
   1564  1.88       chs 	}
   1565   1.3        pk 
   1566  1.88       chs 	for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
   1567  1.88       chs 	     pi != NULL;
   1568  1.88       chs 	     pi = TAILQ_NEXT(pi,pi_list), n++) {
   1569  1.88       chs 
   1570  1.88       chs #ifdef DIAGNOSTIC
   1571  1.88       chs 		if (pi->pi_magic != PI_MAGIC) {
   1572  1.88       chs 			if (label != NULL)
   1573  1.88       chs 				printf("%s: ", label);
   1574  1.88       chs 			printf("pool(%s): free list modified: magic=%x;"
   1575  1.88       chs 			       " page %p; item ordinal %d;"
   1576  1.88       chs 			       " addr %p (p %p)\n",
   1577  1.88       chs 				pp->pr_wchan, pi->pi_magic, ph->ph_page,
   1578  1.88       chs 				n, pi, page);
   1579  1.88       chs 			panic("pool");
   1580  1.88       chs 		}
   1581  1.88       chs #endif
   1582  1.88       chs 		page =
   1583  1.88       chs 		    (caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask);
   1584  1.88       chs 		if (page == ph->ph_page)
   1585  1.88       chs 			continue;
   1586  1.88       chs 
   1587  1.88       chs 		if (label != NULL)
   1588  1.88       chs 			printf("%s: ", label);
   1589  1.88       chs 		printf("pool(%p:%s): page inconsistency: page %p;"
   1590  1.88       chs 		       " item ordinal %d; addr %p (p %p)\n", pp,
   1591  1.88       chs 			pp->pr_wchan, ph->ph_page,
   1592  1.88       chs 			n, pi, page);
   1593  1.88       chs 		return 1;
   1594  1.88       chs 	}
   1595  1.88       chs 	return 0;
   1596   1.3        pk }
   1597   1.3        pk 
   1598  1.88       chs 
   1599   1.3        pk int
   1600  1.42   thorpej pool_chk(struct pool *pp, const char *label)
   1601   1.3        pk {
   1602   1.3        pk 	struct pool_item_header *ph;
   1603   1.3        pk 	int r = 0;
   1604   1.3        pk 
   1605  1.21   thorpej 	simple_lock(&pp->pr_slock);
   1606  1.88       chs 	LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
   1607  1.88       chs 		r = pool_chk_page(pp, label, ph);
   1608  1.88       chs 		if (r) {
   1609  1.88       chs 			goto out;
   1610  1.88       chs 		}
   1611  1.88       chs 	}
   1612  1.88       chs 	LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
   1613  1.88       chs 		r = pool_chk_page(pp, label, ph);
   1614  1.88       chs 		if (r) {
   1615   1.3        pk 			goto out;
   1616   1.3        pk 		}
   1617  1.88       chs 	}
   1618  1.88       chs 	LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
   1619  1.88       chs 		r = pool_chk_page(pp, label, ph);
   1620  1.88       chs 		if (r) {
   1621   1.3        pk 			goto out;
   1622   1.3        pk 		}
   1623   1.3        pk 	}
   1624  1.88       chs 
   1625   1.3        pk out:
   1626  1.21   thorpej 	simple_unlock(&pp->pr_slock);
   1627   1.3        pk 	return (r);
   1628  1.43   thorpej }
   1629  1.43   thorpej 
   1630  1.43   thorpej /*
   1631  1.43   thorpej  * pool_cache_init:
   1632  1.43   thorpej  *
   1633  1.43   thorpej  *	Initialize a pool cache.
   1634  1.43   thorpej  *
   1635  1.43   thorpej  *	NOTE: If the pool must be protected from interrupts, we expect
   1636  1.43   thorpej  *	to be called at the appropriate interrupt priority level.
   1637  1.43   thorpej  */
   1638  1.43   thorpej void
   1639  1.43   thorpej pool_cache_init(struct pool_cache *pc, struct pool *pp,
   1640  1.43   thorpej     int (*ctor)(void *, void *, int),
   1641  1.43   thorpej     void (*dtor)(void *, void *),
   1642  1.43   thorpej     void *arg)
   1643  1.43   thorpej {
   1644  1.43   thorpej 
   1645  1.43   thorpej 	TAILQ_INIT(&pc->pc_grouplist);
   1646  1.43   thorpej 	simple_lock_init(&pc->pc_slock);
   1647  1.43   thorpej 
   1648  1.43   thorpej 	pc->pc_allocfrom = NULL;
   1649  1.43   thorpej 	pc->pc_freeto = NULL;
   1650  1.43   thorpej 	pc->pc_pool = pp;
   1651  1.43   thorpej 
   1652  1.43   thorpej 	pc->pc_ctor = ctor;
   1653  1.43   thorpej 	pc->pc_dtor = dtor;
   1654  1.43   thorpej 	pc->pc_arg  = arg;
   1655  1.43   thorpej 
   1656  1.48   thorpej 	pc->pc_hits   = 0;
   1657  1.48   thorpej 	pc->pc_misses = 0;
   1658  1.48   thorpej 
   1659  1.48   thorpej 	pc->pc_ngroups = 0;
   1660  1.48   thorpej 
   1661  1.48   thorpej 	pc->pc_nitems = 0;
   1662  1.48   thorpej 
   1663  1.43   thorpej 	simple_lock(&pp->pr_slock);
   1664  1.43   thorpej 	TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist);
   1665  1.43   thorpej 	simple_unlock(&pp->pr_slock);
   1666  1.43   thorpej }
   1667  1.43   thorpej 
   1668  1.43   thorpej /*
   1669  1.43   thorpej  * pool_cache_destroy:
   1670  1.43   thorpej  *
   1671  1.43   thorpej  *	Destroy a pool cache.
   1672  1.43   thorpej  */
   1673  1.43   thorpej void
   1674  1.43   thorpej pool_cache_destroy(struct pool_cache *pc)
   1675  1.43   thorpej {
   1676  1.43   thorpej 	struct pool *pp = pc->pc_pool;
   1677  1.43   thorpej 
   1678  1.43   thorpej 	/* First, invalidate the entire cache. */
   1679  1.43   thorpej 	pool_cache_invalidate(pc);
   1680  1.43   thorpej 
   1681  1.43   thorpej 	/* ...and remove it from the pool's cache list. */
   1682  1.43   thorpej 	simple_lock(&pp->pr_slock);
   1683  1.43   thorpej 	TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist);
   1684  1.43   thorpej 	simple_unlock(&pp->pr_slock);
   1685  1.43   thorpej }
   1686  1.43   thorpej 
   1687  1.43   thorpej static __inline void *
   1688  1.87   thorpej pcg_get(struct pool_cache_group *pcg, paddr_t *pap)
   1689  1.43   thorpej {
   1690  1.43   thorpej 	void *object;
   1691  1.43   thorpej 	u_int idx;
   1692  1.43   thorpej 
   1693  1.43   thorpej 	KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
   1694  1.45   thorpej 	KASSERT(pcg->pcg_avail != 0);
   1695  1.43   thorpej 	idx = --pcg->pcg_avail;
   1696  1.43   thorpej 
   1697  1.87   thorpej 	KASSERT(pcg->pcg_objects[idx].pcgo_va != NULL);
   1698  1.87   thorpej 	object = pcg->pcg_objects[idx].pcgo_va;
   1699  1.87   thorpej 	if (pap != NULL)
   1700  1.87   thorpej 		*pap = pcg->pcg_objects[idx].pcgo_pa;
   1701  1.87   thorpej 	pcg->pcg_objects[idx].pcgo_va = NULL;
   1702  1.43   thorpej 
   1703  1.43   thorpej 	return (object);
   1704  1.43   thorpej }
   1705  1.43   thorpej 
   1706  1.43   thorpej static __inline void
   1707  1.87   thorpej pcg_put(struct pool_cache_group *pcg, void *object, paddr_t pa)
   1708  1.43   thorpej {
   1709  1.43   thorpej 	u_int idx;
   1710  1.43   thorpej 
   1711  1.43   thorpej 	KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
   1712  1.43   thorpej 	idx = pcg->pcg_avail++;
   1713  1.43   thorpej 
   1714  1.87   thorpej 	KASSERT(pcg->pcg_objects[idx].pcgo_va == NULL);
   1715  1.87   thorpej 	pcg->pcg_objects[idx].pcgo_va = object;
   1716  1.87   thorpej 	pcg->pcg_objects[idx].pcgo_pa = pa;
   1717  1.43   thorpej }
   1718  1.43   thorpej 
   1719  1.43   thorpej /*
   1720  1.87   thorpej  * pool_cache_get{,_paddr}:
   1721  1.43   thorpej  *
   1722  1.87   thorpej  *	Get an object from a pool cache (optionally returning
   1723  1.87   thorpej  *	the physical address of the object).
   1724  1.43   thorpej  */
   1725  1.43   thorpej void *
   1726  1.87   thorpej pool_cache_get_paddr(struct pool_cache *pc, int flags, paddr_t *pap)
   1727  1.43   thorpej {
   1728  1.43   thorpej 	struct pool_cache_group *pcg;
   1729  1.43   thorpej 	void *object;
   1730  1.58   thorpej 
   1731  1.58   thorpej #ifdef LOCKDEBUG
   1732  1.58   thorpej 	if (flags & PR_WAITOK)
   1733  1.58   thorpej 		simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)");
   1734  1.58   thorpej #endif
   1735  1.43   thorpej 
   1736  1.43   thorpej 	simple_lock(&pc->pc_slock);
   1737  1.43   thorpej 
   1738  1.43   thorpej 	if ((pcg = pc->pc_allocfrom) == NULL) {
   1739  1.61       chs 		TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
   1740  1.43   thorpej 			if (pcg->pcg_avail != 0) {
   1741  1.43   thorpej 				pc->pc_allocfrom = pcg;
   1742  1.43   thorpej 				goto have_group;
   1743  1.43   thorpej 			}
   1744  1.43   thorpej 		}
   1745  1.43   thorpej 
   1746  1.43   thorpej 		/*
   1747  1.43   thorpej 		 * No groups with any available objects.  Allocate
   1748  1.43   thorpej 		 * a new object, construct it, and return it to
   1749  1.43   thorpej 		 * the caller.  We will allocate a group, if necessary,
   1750  1.43   thorpej 		 * when the object is freed back to the cache.
   1751  1.43   thorpej 		 */
   1752  1.48   thorpej 		pc->pc_misses++;
   1753  1.43   thorpej 		simple_unlock(&pc->pc_slock);
   1754  1.43   thorpej 		object = pool_get(pc->pc_pool, flags);
   1755  1.43   thorpej 		if (object != NULL && pc->pc_ctor != NULL) {
   1756  1.43   thorpej 			if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
   1757  1.43   thorpej 				pool_put(pc->pc_pool, object);
   1758  1.43   thorpej 				return (NULL);
   1759  1.43   thorpej 			}
   1760  1.43   thorpej 		}
   1761  1.87   thorpej 		if (object != NULL && pap != NULL) {
   1762  1.87   thorpej #ifdef POOL_VTOPHYS
   1763  1.87   thorpej 			*pap = POOL_VTOPHYS(object);
   1764  1.87   thorpej #else
   1765  1.87   thorpej 			*pap = POOL_PADDR_INVALID;
   1766  1.87   thorpej #endif
   1767  1.87   thorpej 		}
   1768  1.43   thorpej 		return (object);
   1769  1.43   thorpej 	}
   1770  1.43   thorpej 
   1771  1.43   thorpej  have_group:
   1772  1.48   thorpej 	pc->pc_hits++;
   1773  1.48   thorpej 	pc->pc_nitems--;
   1774  1.87   thorpej 	object = pcg_get(pcg, pap);
   1775  1.43   thorpej 
   1776  1.43   thorpej 	if (pcg->pcg_avail == 0)
   1777  1.43   thorpej 		pc->pc_allocfrom = NULL;
   1778  1.45   thorpej 
   1779  1.43   thorpej 	simple_unlock(&pc->pc_slock);
   1780  1.43   thorpej 
   1781  1.43   thorpej 	return (object);
   1782  1.43   thorpej }
   1783  1.43   thorpej 
   1784  1.43   thorpej /*
   1785  1.87   thorpej  * pool_cache_put{,_paddr}:
   1786  1.43   thorpej  *
   1787  1.87   thorpej  *	Put an object back to the pool cache (optionally caching the
   1788  1.87   thorpej  *	physical address of the object).
   1789  1.43   thorpej  */
   1790  1.43   thorpej void
   1791  1.87   thorpej pool_cache_put_paddr(struct pool_cache *pc, void *object, paddr_t pa)
   1792  1.43   thorpej {
   1793  1.43   thorpej 	struct pool_cache_group *pcg;
   1794  1.60   thorpej 	int s;
   1795  1.43   thorpej 
   1796  1.43   thorpej 	simple_lock(&pc->pc_slock);
   1797  1.43   thorpej 
   1798  1.43   thorpej 	if ((pcg = pc->pc_freeto) == NULL) {
   1799  1.61       chs 		TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
   1800  1.43   thorpej 			if (pcg->pcg_avail != PCG_NOBJECTS) {
   1801  1.43   thorpej 				pc->pc_freeto = pcg;
   1802  1.43   thorpej 				goto have_group;
   1803  1.43   thorpej 			}
   1804  1.43   thorpej 		}
   1805  1.43   thorpej 
   1806  1.43   thorpej 		/*
   1807  1.43   thorpej 		 * No empty groups to free the object to.  Attempt to
   1808  1.47   thorpej 		 * allocate one.
   1809  1.43   thorpej 		 */
   1810  1.47   thorpej 		simple_unlock(&pc->pc_slock);
   1811  1.60   thorpej 		s = splvm();
   1812  1.43   thorpej 		pcg = pool_get(&pcgpool, PR_NOWAIT);
   1813  1.60   thorpej 		splx(s);
   1814  1.43   thorpej 		if (pcg != NULL) {
   1815  1.43   thorpej 			memset(pcg, 0, sizeof(*pcg));
   1816  1.47   thorpej 			simple_lock(&pc->pc_slock);
   1817  1.48   thorpej 			pc->pc_ngroups++;
   1818  1.43   thorpej 			TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list);
   1819  1.47   thorpej 			if (pc->pc_freeto == NULL)
   1820  1.47   thorpej 				pc->pc_freeto = pcg;
   1821  1.43   thorpej 			goto have_group;
   1822  1.43   thorpej 		}
   1823  1.43   thorpej 
   1824  1.43   thorpej 		/*
   1825  1.43   thorpej 		 * Unable to allocate a cache group; destruct the object
   1826  1.43   thorpej 		 * and free it back to the pool.
   1827  1.43   thorpej 		 */
   1828  1.51   thorpej 		pool_cache_destruct_object(pc, object);
   1829  1.43   thorpej 		return;
   1830  1.43   thorpej 	}
   1831  1.43   thorpej 
   1832  1.43   thorpej  have_group:
   1833  1.48   thorpej 	pc->pc_nitems++;
   1834  1.87   thorpej 	pcg_put(pcg, object, pa);
   1835  1.43   thorpej 
   1836  1.43   thorpej 	if (pcg->pcg_avail == PCG_NOBJECTS)
   1837  1.43   thorpej 		pc->pc_freeto = NULL;
   1838  1.43   thorpej 
   1839  1.43   thorpej 	simple_unlock(&pc->pc_slock);
   1840  1.51   thorpej }
   1841  1.51   thorpej 
   1842  1.51   thorpej /*
   1843  1.51   thorpej  * pool_cache_destruct_object:
   1844  1.51   thorpej  *
   1845  1.51   thorpej  *	Force destruction of an object and its release back into
   1846  1.51   thorpej  *	the pool.
   1847  1.51   thorpej  */
   1848  1.51   thorpej void
   1849  1.51   thorpej pool_cache_destruct_object(struct pool_cache *pc, void *object)
   1850  1.51   thorpej {
   1851  1.51   thorpej 
   1852  1.51   thorpej 	if (pc->pc_dtor != NULL)
   1853  1.51   thorpej 		(*pc->pc_dtor)(pc->pc_arg, object);
   1854  1.51   thorpej 	pool_put(pc->pc_pool, object);
   1855  1.43   thorpej }
   1856  1.43   thorpej 
   1857  1.43   thorpej /*
   1858  1.43   thorpej  * pool_cache_do_invalidate:
   1859  1.43   thorpej  *
   1860  1.43   thorpej  *	This internal function implements pool_cache_invalidate() and
   1861  1.43   thorpej  *	pool_cache_reclaim().
   1862  1.43   thorpej  */
   1863  1.43   thorpej static void
   1864  1.43   thorpej pool_cache_do_invalidate(struct pool_cache *pc, int free_groups,
   1865  1.56  sommerfe     void (*putit)(struct pool *, void *))
   1866  1.43   thorpej {
   1867  1.43   thorpej 	struct pool_cache_group *pcg, *npcg;
   1868  1.43   thorpej 	void *object;
   1869  1.60   thorpej 	int s;
   1870  1.43   thorpej 
   1871  1.43   thorpej 	for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
   1872  1.43   thorpej 	     pcg = npcg) {
   1873  1.43   thorpej 		npcg = TAILQ_NEXT(pcg, pcg_list);
   1874  1.43   thorpej 		while (pcg->pcg_avail != 0) {
   1875  1.48   thorpej 			pc->pc_nitems--;
   1876  1.87   thorpej 			object = pcg_get(pcg, NULL);
   1877  1.45   thorpej 			if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg)
   1878  1.45   thorpej 				pc->pc_allocfrom = NULL;
   1879  1.43   thorpej 			if (pc->pc_dtor != NULL)
   1880  1.43   thorpej 				(*pc->pc_dtor)(pc->pc_arg, object);
   1881  1.56  sommerfe 			(*putit)(pc->pc_pool, object);
   1882  1.43   thorpej 		}
   1883  1.43   thorpej 		if (free_groups) {
   1884  1.48   thorpej 			pc->pc_ngroups--;
   1885  1.43   thorpej 			TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list);
   1886  1.46   thorpej 			if (pc->pc_freeto == pcg)
   1887  1.46   thorpej 				pc->pc_freeto = NULL;
   1888  1.60   thorpej 			s = splvm();
   1889  1.43   thorpej 			pool_put(&pcgpool, pcg);
   1890  1.60   thorpej 			splx(s);
   1891  1.43   thorpej 		}
   1892  1.43   thorpej 	}
   1893  1.43   thorpej }
   1894  1.43   thorpej 
   1895  1.43   thorpej /*
   1896  1.43   thorpej  * pool_cache_invalidate:
   1897  1.43   thorpej  *
   1898  1.43   thorpej  *	Invalidate a pool cache (destruct and release all of the
   1899  1.43   thorpej  *	cached objects).
   1900  1.43   thorpej  */
   1901  1.43   thorpej void
   1902  1.43   thorpej pool_cache_invalidate(struct pool_cache *pc)
   1903  1.43   thorpej {
   1904  1.43   thorpej 
   1905  1.43   thorpej 	simple_lock(&pc->pc_slock);
   1906  1.56  sommerfe 	pool_cache_do_invalidate(pc, 0, pool_put);
   1907  1.43   thorpej 	simple_unlock(&pc->pc_slock);
   1908  1.43   thorpej }
   1909  1.43   thorpej 
   1910  1.43   thorpej /*
   1911  1.43   thorpej  * pool_cache_reclaim:
   1912  1.43   thorpej  *
   1913  1.43   thorpej  *	Reclaim a pool cache for pool_reclaim().
   1914  1.43   thorpej  */
   1915  1.43   thorpej static void
   1916  1.43   thorpej pool_cache_reclaim(struct pool_cache *pc)
   1917  1.43   thorpej {
   1918  1.43   thorpej 
   1919  1.47   thorpej 	simple_lock(&pc->pc_slock);
   1920  1.43   thorpej 	pool_cache_do_invalidate(pc, 1, pool_do_put);
   1921  1.43   thorpej 	simple_unlock(&pc->pc_slock);
   1922   1.3        pk }
   1923  1.66   thorpej 
   1924  1.66   thorpej /*
   1925  1.66   thorpej  * Pool backend allocators.
   1926  1.66   thorpej  *
   1927  1.66   thorpej  * Each pool has a backend allocator that handles allocation, deallocation,
   1928  1.66   thorpej  * and any additional draining that might be needed.
   1929  1.66   thorpej  *
   1930  1.66   thorpej  * We provide two standard allocators:
   1931  1.66   thorpej  *
   1932  1.66   thorpej  *	pool_allocator_kmem - the default when no allocator is specified
   1933  1.66   thorpej  *
   1934  1.66   thorpej  *	pool_allocator_nointr - used for pools that will not be accessed
   1935  1.66   thorpej  *	in interrupt context.
   1936  1.66   thorpej  */
   1937  1.66   thorpej void	*pool_page_alloc(struct pool *, int);
   1938  1.66   thorpej void	pool_page_free(struct pool *, void *);
   1939  1.66   thorpej 
   1940  1.66   thorpej struct pool_allocator pool_allocator_kmem = {
   1941  1.66   thorpej 	pool_page_alloc, pool_page_free, 0,
   1942  1.66   thorpej };
   1943  1.66   thorpej 
   1944  1.66   thorpej void	*pool_page_alloc_nointr(struct pool *, int);
   1945  1.66   thorpej void	pool_page_free_nointr(struct pool *, void *);
   1946  1.66   thorpej 
   1947  1.66   thorpej struct pool_allocator pool_allocator_nointr = {
   1948  1.66   thorpej 	pool_page_alloc_nointr, pool_page_free_nointr, 0,
   1949  1.66   thorpej };
   1950  1.66   thorpej 
   1951  1.66   thorpej #ifdef POOL_SUBPAGE
   1952  1.66   thorpej void	*pool_subpage_alloc(struct pool *, int);
   1953  1.66   thorpej void	pool_subpage_free(struct pool *, void *);
   1954  1.66   thorpej 
   1955  1.66   thorpej struct pool_allocator pool_allocator_kmem_subpage = {
   1956  1.66   thorpej 	pool_subpage_alloc, pool_subpage_free, 0,
   1957  1.66   thorpej };
   1958  1.66   thorpej #endif /* POOL_SUBPAGE */
   1959  1.66   thorpej 
   1960  1.66   thorpej /*
   1961  1.66   thorpej  * We have at least three different resources for the same allocation and
   1962  1.66   thorpej  * each resource can be depleted.  First, we have the ready elements in the
   1963  1.66   thorpej  * pool.  Then we have the resource (typically a vm_map) for this allocator.
   1964  1.66   thorpej  * Finally, we have physical memory.  Waiting for any of these can be
   1965  1.66   thorpej  * unnecessary when any other is freed, but the kernel doesn't support
   1966  1.66   thorpej  * sleeping on multiple wait channels, so we have to employ another strategy.
   1967  1.66   thorpej  *
   1968  1.66   thorpej  * The caller sleeps on the pool (so that it can be awakened when an item
   1969  1.66   thorpej  * is returned to the pool), but we set PA_WANT on the allocator.  When a
   1970  1.66   thorpej  * page is returned to the allocator and PA_WANT is set, pool_allocator_free
   1971  1.66   thorpej  * will wake up all sleeping pools belonging to this allocator.
   1972  1.66   thorpej  *
   1973  1.66   thorpej  * XXX Thundering herd.
   1974  1.66   thorpej  */
   1975  1.66   thorpej void *
   1976  1.66   thorpej pool_allocator_alloc(struct pool *org, int flags)
   1977  1.66   thorpej {
   1978  1.66   thorpej 	struct pool_allocator *pa = org->pr_alloc;
   1979  1.66   thorpej 	struct pool *pp, *start;
   1980  1.66   thorpej 	int s, freed;
   1981  1.66   thorpej 	void *res;
   1982  1.66   thorpej 
   1983  1.91      yamt 	LOCK_ASSERT(!simple_lock_held(&org->pr_slock));
   1984  1.91      yamt 
   1985  1.66   thorpej 	do {
   1986  1.66   thorpej 		if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
   1987  1.66   thorpej 			return (res);
   1988  1.68   thorpej 		if ((flags & PR_WAITOK) == 0) {
   1989  1.68   thorpej 			/*
   1990  1.68   thorpej 			 * We only run the drain hookhere if PR_NOWAIT.
   1991  1.68   thorpej 			 * In other cases, the hook will be run in
   1992  1.68   thorpej 			 * pool_reclaim().
   1993  1.68   thorpej 			 */
   1994  1.68   thorpej 			if (org->pr_drain_hook != NULL) {
   1995  1.68   thorpej 				(*org->pr_drain_hook)(org->pr_drain_hook_arg,
   1996  1.68   thorpej 				    flags);
   1997  1.68   thorpej 				if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
   1998  1.68   thorpej 					return (res);
   1999  1.68   thorpej 			}
   2000  1.66   thorpej 			break;
   2001  1.68   thorpej 		}
   2002  1.66   thorpej 
   2003  1.66   thorpej 		/*
   2004  1.66   thorpej 		 * Drain all pools, except "org", that use this
   2005  1.66   thorpej 		 * allocator.  We do this to reclaim VA space.
   2006  1.66   thorpej 		 * pa_alloc is responsible for waiting for
   2007  1.66   thorpej 		 * physical memory.
   2008  1.66   thorpej 		 *
   2009  1.66   thorpej 		 * XXX We risk looping forever if start if someone
   2010  1.66   thorpej 		 * calls pool_destroy on "start".  But there is no
   2011  1.66   thorpej 		 * other way to have potentially sleeping pool_reclaim,
   2012  1.66   thorpej 		 * non-sleeping locks on pool_allocator, and some
   2013  1.66   thorpej 		 * stirring of drained pools in the allocator.
   2014  1.68   thorpej 		 *
   2015  1.68   thorpej 		 * XXX Maybe we should use pool_head_slock for locking
   2016  1.68   thorpej 		 * the allocators?
   2017  1.66   thorpej 		 */
   2018  1.66   thorpej 		freed = 0;
   2019  1.66   thorpej 
   2020  1.66   thorpej 		s = splvm();
   2021  1.66   thorpej 		simple_lock(&pa->pa_slock);
   2022  1.66   thorpej 		pp = start = TAILQ_FIRST(&pa->pa_list);
   2023  1.66   thorpej 		do {
   2024  1.66   thorpej 			TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list);
   2025  1.66   thorpej 			TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list);
   2026  1.66   thorpej 			if (pp == org)
   2027  1.66   thorpej 				continue;
   2028  1.73   thorpej 			simple_unlock(&pa->pa_slock);
   2029  1.66   thorpej 			freed = pool_reclaim(pp);
   2030  1.73   thorpej 			simple_lock(&pa->pa_slock);
   2031  1.66   thorpej 		} while ((pp = TAILQ_FIRST(&pa->pa_list)) != start &&
   2032  1.66   thorpej 			 freed == 0);
   2033  1.66   thorpej 
   2034  1.66   thorpej 		if (freed == 0) {
   2035  1.66   thorpej 			/*
   2036  1.66   thorpej 			 * We set PA_WANT here, the caller will most likely
   2037  1.66   thorpej 			 * sleep waiting for pages (if not, this won't hurt
   2038  1.66   thorpej 			 * that much), and there is no way to set this in
   2039  1.66   thorpej 			 * the caller without violating locking order.
   2040  1.66   thorpej 			 */
   2041  1.66   thorpej 			pa->pa_flags |= PA_WANT;
   2042  1.66   thorpej 		}
   2043  1.66   thorpej 		simple_unlock(&pa->pa_slock);
   2044  1.66   thorpej 		splx(s);
   2045  1.66   thorpej 	} while (freed);
   2046  1.66   thorpej 	return (NULL);
   2047  1.66   thorpej }
   2048  1.66   thorpej 
   2049  1.66   thorpej void
   2050  1.66   thorpej pool_allocator_free(struct pool *pp, void *v)
   2051  1.66   thorpej {
   2052  1.66   thorpej 	struct pool_allocator *pa = pp->pr_alloc;
   2053  1.66   thorpej 	int s;
   2054  1.66   thorpej 
   2055  1.91      yamt 	LOCK_ASSERT(!simple_lock_held(&pp->pr_slock));
   2056  1.91      yamt 
   2057  1.66   thorpej 	(*pa->pa_free)(pp, v);
   2058  1.66   thorpej 
   2059  1.66   thorpej 	s = splvm();
   2060  1.66   thorpej 	simple_lock(&pa->pa_slock);
   2061  1.66   thorpej 	if ((pa->pa_flags & PA_WANT) == 0) {
   2062  1.66   thorpej 		simple_unlock(&pa->pa_slock);
   2063  1.66   thorpej 		splx(s);
   2064  1.66   thorpej 		return;
   2065  1.66   thorpej 	}
   2066  1.66   thorpej 
   2067  1.66   thorpej 	TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
   2068  1.66   thorpej 		simple_lock(&pp->pr_slock);
   2069  1.66   thorpej 		if ((pp->pr_flags & PR_WANTED) != 0) {
   2070  1.66   thorpej 			pp->pr_flags &= ~PR_WANTED;
   2071  1.66   thorpej 			wakeup(pp);
   2072  1.66   thorpej 		}
   2073  1.69   thorpej 		simple_unlock(&pp->pr_slock);
   2074  1.66   thorpej 	}
   2075  1.66   thorpej 	pa->pa_flags &= ~PA_WANT;
   2076  1.66   thorpej 	simple_unlock(&pa->pa_slock);
   2077  1.66   thorpej 	splx(s);
   2078  1.66   thorpej }
   2079  1.66   thorpej 
   2080  1.66   thorpej void *
   2081  1.66   thorpej pool_page_alloc(struct pool *pp, int flags)
   2082  1.66   thorpej {
   2083  1.66   thorpej 	boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
   2084  1.66   thorpej 
   2085  1.66   thorpej 	return ((void *) uvm_km_alloc_poolpage(waitok));
   2086  1.66   thorpej }
   2087  1.66   thorpej 
   2088  1.66   thorpej void
   2089  1.66   thorpej pool_page_free(struct pool *pp, void *v)
   2090  1.66   thorpej {
   2091  1.66   thorpej 
   2092  1.66   thorpej 	uvm_km_free_poolpage((vaddr_t) v);
   2093  1.66   thorpej }
   2094  1.66   thorpej 
   2095  1.66   thorpej #ifdef POOL_SUBPAGE
   2096  1.66   thorpej /* Sub-page allocator, for machines with large hardware pages. */
   2097  1.66   thorpej void *
   2098  1.66   thorpej pool_subpage_alloc(struct pool *pp, int flags)
   2099  1.66   thorpej {
   2100  1.93       dbj 	void *v;
   2101  1.93       dbj 	int s;
   2102  1.93       dbj 	s = splvm();
   2103  1.93       dbj 	v = pool_get(&psppool, flags);
   2104  1.93       dbj 	splx(s);
   2105  1.93       dbj 	return v;
   2106  1.66   thorpej }
   2107  1.66   thorpej 
   2108  1.66   thorpej void
   2109  1.66   thorpej pool_subpage_free(struct pool *pp, void *v)
   2110  1.66   thorpej {
   2111  1.93       dbj 	int s;
   2112  1.93       dbj 	s = splvm();
   2113  1.66   thorpej 	pool_put(&psppool, v);
   2114  1.93       dbj 	splx(s);
   2115  1.66   thorpej }
   2116  1.66   thorpej 
   2117  1.66   thorpej /* We don't provide a real nointr allocator.  Maybe later. */
   2118  1.66   thorpej void *
   2119  1.66   thorpej pool_page_alloc_nointr(struct pool *pp, int flags)
   2120  1.66   thorpej {
   2121  1.66   thorpej 
   2122  1.66   thorpej 	return (pool_subpage_alloc(pp, flags));
   2123  1.66   thorpej }
   2124  1.66   thorpej 
   2125  1.66   thorpej void
   2126  1.66   thorpej pool_page_free_nointr(struct pool *pp, void *v)
   2127  1.66   thorpej {
   2128  1.66   thorpej 
   2129  1.66   thorpej 	pool_subpage_free(pp, v);
   2130  1.66   thorpej }
   2131  1.66   thorpej #else
   2132  1.66   thorpej void *
   2133  1.66   thorpej pool_page_alloc_nointr(struct pool *pp, int flags)
   2134  1.66   thorpej {
   2135  1.66   thorpej 	boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
   2136  1.66   thorpej 
   2137  1.66   thorpej 	return ((void *) uvm_km_alloc_poolpage1(kernel_map,
   2138  1.66   thorpej 	    uvm.kernel_object, waitok));
   2139  1.66   thorpej }
   2140  1.66   thorpej 
   2141  1.66   thorpej void
   2142  1.66   thorpej pool_page_free_nointr(struct pool *pp, void *v)
   2143  1.66   thorpej {
   2144  1.66   thorpej 
   2145  1.66   thorpej 	uvm_km_free_poolpage1(kernel_map, (vaddr_t) v);
   2146  1.66   thorpej }
   2147  1.66   thorpej #endif /* POOL_SUBPAGE */
   2148