Home | History | Annotate | Line # | Download | only in kern
subr_pool.c revision 1.101.2.11
      1  1.101.2.11      yamt /*	$NetBSD: subr_pool.c,v 1.101.2.11 2008/02/27 08:36:56 yamt Exp $	*/
      2         1.1        pk 
      3         1.1        pk /*-
      4   1.101.2.6      yamt  * Copyright (c) 1997, 1999, 2000, 2002, 2007 The NetBSD Foundation, Inc.
      5         1.1        pk  * All rights reserved.
      6         1.1        pk  *
      7         1.1        pk  * This code is derived from software contributed to The NetBSD Foundation
      8        1.20   thorpej  * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
      9   1.101.2.6      yamt  * Simulation Facility, NASA Ames Research Center, and by Andrew Doran.
     10         1.1        pk  *
     11         1.1        pk  * Redistribution and use in source and binary forms, with or without
     12         1.1        pk  * modification, are permitted provided that the following conditions
     13         1.1        pk  * are met:
     14         1.1        pk  * 1. Redistributions of source code must retain the above copyright
     15         1.1        pk  *    notice, this list of conditions and the following disclaimer.
     16         1.1        pk  * 2. Redistributions in binary form must reproduce the above copyright
     17         1.1        pk  *    notice, this list of conditions and the following disclaimer in the
     18         1.1        pk  *    documentation and/or other materials provided with the distribution.
     19         1.1        pk  * 3. All advertising materials mentioning features or use of this software
     20         1.1        pk  *    must display the following acknowledgement:
     21        1.13  christos  *	This product includes software developed by the NetBSD
     22        1.13  christos  *	Foundation, Inc. and its contributors.
     23         1.1        pk  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24         1.1        pk  *    contributors may be used to endorse or promote products derived
     25         1.1        pk  *    from this software without specific prior written permission.
     26         1.1        pk  *
     27         1.1        pk  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28         1.1        pk  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29         1.1        pk  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30         1.1        pk  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31         1.1        pk  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32         1.1        pk  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33         1.1        pk  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34         1.1        pk  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35         1.1        pk  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36         1.1        pk  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37         1.1        pk  * POSSIBILITY OF SUCH DAMAGE.
     38         1.1        pk  */
     39        1.64     lukem 
     40        1.64     lukem #include <sys/cdefs.h>
     41  1.101.2.11      yamt __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.101.2.11 2008/02/27 08:36:56 yamt Exp $");
     42        1.24    scottr 
     43   1.101.2.8      yamt #include "opt_ddb.h"
     44        1.25   thorpej #include "opt_pool.h"
     45        1.24    scottr #include "opt_poollog.h"
     46        1.28   thorpej #include "opt_lockdebug.h"
     47         1.1        pk 
     48         1.1        pk #include <sys/param.h>
     49         1.1        pk #include <sys/systm.h>
     50   1.101.2.6      yamt #include <sys/bitops.h>
     51         1.1        pk #include <sys/proc.h>
     52         1.1        pk #include <sys/errno.h>
     53         1.1        pk #include <sys/kernel.h>
     54         1.1        pk #include <sys/malloc.h>
     55         1.1        pk #include <sys/pool.h>
     56        1.20   thorpej #include <sys/syslog.h>
     57   1.101.2.3      yamt #include <sys/debug.h>
     58   1.101.2.6      yamt #include <sys/lockdebug.h>
     59   1.101.2.6      yamt #include <sys/xcall.h>
     60   1.101.2.6      yamt #include <sys/cpu.h>
     61   1.101.2.8      yamt #include <sys/atomic.h>
     62         1.3        pk 
     63         1.3        pk #include <uvm/uvm.h>
     64         1.3        pk 
     65         1.1        pk /*
     66         1.1        pk  * Pool resource management utility.
     67         1.3        pk  *
     68        1.88       chs  * Memory is allocated in pages which are split into pieces according to
     69        1.88       chs  * the pool item size. Each page is kept on one of three lists in the
     70        1.88       chs  * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
     71        1.88       chs  * for empty, full and partially-full pages respectively. The individual
     72        1.88       chs  * pool items are on a linked list headed by `ph_itemlist' in each page
     73        1.88       chs  * header. The memory for building the page list is either taken from
     74        1.88       chs  * the allocated pages themselves (for small pool items) or taken from
     75        1.88       chs  * an internal pool of page headers (`phpool').
     76         1.1        pk  */
     77         1.1        pk 
     78         1.3        pk /* List of all pools */
     79   1.101.2.8      yamt TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
     80   1.101.2.6      yamt 
     81         1.3        pk /* Private pool for page header structures */
     82        1.97      yamt #define	PHPOOL_MAX	8
     83        1.97      yamt static struct pool phpool[PHPOOL_MAX];
     84   1.101.2.6      yamt #define	PHPOOL_FREELIST_NELEM(idx) \
     85   1.101.2.6      yamt 	(((idx) == 0) ? 0 : BITMAP_SIZE * (1 << (idx)))
     86         1.3        pk 
     87        1.62     bjh21 #ifdef POOL_SUBPAGE
     88        1.62     bjh21 /* Pool of subpages for use by normal pools. */
     89        1.62     bjh21 static struct pool psppool;
     90        1.62     bjh21 #endif
     91        1.62     bjh21 
     92   1.101.2.1      yamt static SLIST_HEAD(, pool_allocator) pa_deferinitq =
     93   1.101.2.1      yamt     SLIST_HEAD_INITIALIZER(pa_deferinitq);
     94   1.101.2.1      yamt 
     95        1.98      yamt static void *pool_page_alloc_meta(struct pool *, int);
     96        1.98      yamt static void pool_page_free_meta(struct pool *, void *);
     97        1.98      yamt 
     98        1.98      yamt /* allocator for pool metadata */
     99   1.101.2.6      yamt struct pool_allocator pool_allocator_meta = {
    100   1.101.2.1      yamt 	pool_page_alloc_meta, pool_page_free_meta,
    101   1.101.2.1      yamt 	.pa_backingmapptr = &kmem_map,
    102        1.98      yamt };
    103        1.98      yamt 
    104         1.3        pk /* # of seconds to retain page after last use */
    105         1.3        pk int pool_inactive_time = 10;
    106         1.3        pk 
    107         1.3        pk /* Next candidate for drainage (see pool_drain()) */
    108        1.23   thorpej static struct pool	*drainpp;
    109        1.23   thorpej 
    110   1.101.2.6      yamt /* This lock protects both pool_head and drainpp. */
    111   1.101.2.6      yamt static kmutex_t pool_head_lock;
    112   1.101.2.6      yamt static kcondvar_t pool_busy;
    113   1.101.2.6      yamt 
    114   1.101.2.6      yamt typedef uint32_t pool_item_bitmap_t;
    115   1.101.2.6      yamt #define	BITMAP_SIZE	(CHAR_BIT * sizeof(pool_item_bitmap_t))
    116   1.101.2.6      yamt #define	BITMAP_MASK	(BITMAP_SIZE - 1)
    117        1.99      yamt 
    118         1.3        pk struct pool_item_header {
    119         1.3        pk 	/* Page headers */
    120        1.88       chs 	LIST_ENTRY(pool_item_header)
    121         1.3        pk 				ph_pagelist;	/* pool page list */
    122        1.88       chs 	SPLAY_ENTRY(pool_item_header)
    123        1.88       chs 				ph_node;	/* Off-page page headers */
    124   1.101.2.4      yamt 	void *			ph_page;	/* this page's address */
    125  1.101.2.11      yamt 	uint32_t		ph_time;	/* last referenced */
    126   1.101.2.6      yamt 	uint16_t		ph_nmissing;	/* # of chunks in use */
    127   1.101.2.8      yamt 	uint16_t		ph_off;		/* start offset in page */
    128        1.97      yamt 	union {
    129        1.97      yamt 		/* !PR_NOTOUCH */
    130        1.97      yamt 		struct {
    131   1.101.2.1      yamt 			LIST_HEAD(, pool_item)
    132        1.97      yamt 				phu_itemlist;	/* chunk list for this page */
    133        1.97      yamt 		} phu_normal;
    134        1.97      yamt 		/* PR_NOTOUCH */
    135        1.97      yamt 		struct {
    136   1.101.2.8      yamt 			pool_item_bitmap_t phu_bitmap[1];
    137        1.97      yamt 		} phu_notouch;
    138        1.97      yamt 	} ph_u;
    139         1.3        pk };
    140        1.97      yamt #define	ph_itemlist	ph_u.phu_normal.phu_itemlist
    141   1.101.2.6      yamt #define	ph_bitmap	ph_u.phu_notouch.phu_bitmap
    142         1.3        pk 
    143         1.1        pk struct pool_item {
    144         1.3        pk #ifdef DIAGNOSTIC
    145        1.82   thorpej 	u_int pi_magic;
    146        1.33       chs #endif
    147   1.101.2.6      yamt #define	PI_MAGIC 0xdeaddeadU
    148         1.3        pk 	/* Other entries use only this list entry */
    149   1.101.2.1      yamt 	LIST_ENTRY(pool_item)	pi_list;
    150         1.3        pk };
    151         1.3        pk 
    152        1.53   thorpej #define	POOL_NEEDS_CATCHUP(pp)						\
    153        1.53   thorpej 	((pp)->pr_nitems < (pp)->pr_minitems)
    154        1.53   thorpej 
    155        1.43   thorpej /*
    156        1.43   thorpej  * Pool cache management.
    157        1.43   thorpej  *
    158        1.43   thorpej  * Pool caches provide a way for constructed objects to be cached by the
    159        1.43   thorpej  * pool subsystem.  This can lead to performance improvements by avoiding
    160        1.43   thorpej  * needless object construction/destruction; it is deferred until absolutely
    161        1.43   thorpej  * necessary.
    162        1.43   thorpej  *
    163   1.101.2.6      yamt  * Caches are grouped into cache groups.  Each cache group references up
    164   1.101.2.6      yamt  * to PCG_NUMOBJECTS constructed objects.  When a cache allocates an
    165   1.101.2.6      yamt  * object from the pool, it calls the object's constructor and places it
    166   1.101.2.6      yamt  * into a cache group.  When a cache group frees an object back to the
    167   1.101.2.6      yamt  * pool, it first calls the object's destructor.  This allows the object
    168   1.101.2.6      yamt  * to persist in constructed form while freed to the cache.
    169   1.101.2.6      yamt  *
    170   1.101.2.6      yamt  * The pool references each cache, so that when a pool is drained by the
    171   1.101.2.6      yamt  * pagedaemon, it can drain each individual cache as well.  Each time a
    172   1.101.2.6      yamt  * cache is drained, the most idle cache group is freed to the pool in
    173   1.101.2.6      yamt  * its entirety.
    174        1.43   thorpej  *
    175        1.43   thorpej  * Pool caches are layed on top of pools.  By layering them, we can avoid
    176        1.43   thorpej  * the complexity of cache management for pools which would not benefit
    177        1.43   thorpej  * from it.
    178        1.43   thorpej  */
    179        1.43   thorpej 
    180   1.101.2.8      yamt static struct pool pcg_normal_pool;
    181   1.101.2.8      yamt static struct pool pcg_large_pool;
    182   1.101.2.6      yamt static struct pool cache_pool;
    183   1.101.2.6      yamt static struct pool cache_cpu_pool;
    184         1.3        pk 
    185   1.101.2.8      yamt /* List of all caches. */
    186   1.101.2.8      yamt TAILQ_HEAD(,pool_cache) pool_cache_head =
    187   1.101.2.8      yamt     TAILQ_HEAD_INITIALIZER(pool_cache_head);
    188   1.101.2.8      yamt 
    189   1.101.2.8      yamt int pool_cache_disable;
    190   1.101.2.8      yamt 
    191   1.101.2.8      yamt 
    192   1.101.2.6      yamt static pool_cache_cpu_t *pool_cache_put_slow(pool_cache_cpu_t *, int *,
    193   1.101.2.6      yamt 					     void *, paddr_t);
    194   1.101.2.6      yamt static pool_cache_cpu_t *pool_cache_get_slow(pool_cache_cpu_t *, int *,
    195   1.101.2.6      yamt 					     void **, paddr_t *, int);
    196   1.101.2.6      yamt static void	pool_cache_cpu_init1(struct cpu_info *, pool_cache_t);
    197   1.101.2.6      yamt static void	pool_cache_invalidate_groups(pool_cache_t, pcg_t *);
    198   1.101.2.6      yamt static void	pool_cache_xcall(pool_cache_t);
    199         1.3        pk 
    200        1.42   thorpej static int	pool_catchup(struct pool *);
    201   1.101.2.4      yamt static void	pool_prime_page(struct pool *, void *,
    202        1.55   thorpej 		    struct pool_item_header *);
    203        1.88       chs static void	pool_update_curpage(struct pool *);
    204        1.66   thorpej 
    205   1.101.2.1      yamt static int	pool_grow(struct pool *, int);
    206   1.101.2.1      yamt static void	*pool_allocator_alloc(struct pool *, int);
    207   1.101.2.1      yamt static void	pool_allocator_free(struct pool *, void *);
    208         1.3        pk 
    209        1.97      yamt static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
    210        1.88       chs 	void (*)(const char *, ...));
    211        1.42   thorpej static void pool_print1(struct pool *, const char *,
    212        1.42   thorpej 	void (*)(const char *, ...));
    213         1.3        pk 
    214        1.88       chs static int pool_chk_page(struct pool *, const char *,
    215        1.88       chs 			 struct pool_item_header *);
    216        1.88       chs 
    217         1.3        pk /*
    218        1.52   thorpej  * Pool log entry. An array of these is allocated in pool_init().
    219         1.3        pk  */
    220         1.3        pk struct pool_log {
    221         1.3        pk 	const char	*pl_file;
    222         1.3        pk 	long		pl_line;
    223         1.3        pk 	int		pl_action;
    224        1.25   thorpej #define	PRLOG_GET	1
    225        1.25   thorpej #define	PRLOG_PUT	2
    226         1.3        pk 	void		*pl_addr;
    227         1.1        pk };
    228         1.1        pk 
    229        1.86      matt #ifdef POOL_DIAGNOSTIC
    230         1.3        pk /* Number of entries in pool log buffers */
    231        1.17   thorpej #ifndef POOL_LOGSIZE
    232        1.17   thorpej #define	POOL_LOGSIZE	10
    233        1.17   thorpej #endif
    234        1.17   thorpej 
    235        1.17   thorpej int pool_logsize = POOL_LOGSIZE;
    236         1.1        pk 
    237   1.101.2.1      yamt static inline void
    238        1.42   thorpej pr_log(struct pool *pp, void *v, int action, const char *file, long line)
    239         1.3        pk {
    240         1.3        pk 	int n = pp->pr_curlogentry;
    241         1.3        pk 	struct pool_log *pl;
    242         1.3        pk 
    243        1.20   thorpej 	if ((pp->pr_roflags & PR_LOGGING) == 0)
    244         1.3        pk 		return;
    245         1.3        pk 
    246         1.3        pk 	/*
    247         1.3        pk 	 * Fill in the current entry. Wrap around and overwrite
    248         1.3        pk 	 * the oldest entry if necessary.
    249         1.3        pk 	 */
    250         1.3        pk 	pl = &pp->pr_log[n];
    251         1.3        pk 	pl->pl_file = file;
    252         1.3        pk 	pl->pl_line = line;
    253         1.3        pk 	pl->pl_action = action;
    254         1.3        pk 	pl->pl_addr = v;
    255         1.3        pk 	if (++n >= pp->pr_logsize)
    256         1.3        pk 		n = 0;
    257         1.3        pk 	pp->pr_curlogentry = n;
    258         1.3        pk }
    259         1.3        pk 
    260         1.3        pk static void
    261        1.42   thorpej pr_printlog(struct pool *pp, struct pool_item *pi,
    262        1.42   thorpej     void (*pr)(const char *, ...))
    263         1.3        pk {
    264         1.3        pk 	int i = pp->pr_logsize;
    265         1.3        pk 	int n = pp->pr_curlogentry;
    266         1.3        pk 
    267        1.20   thorpej 	if ((pp->pr_roflags & PR_LOGGING) == 0)
    268         1.3        pk 		return;
    269         1.3        pk 
    270         1.3        pk 	/*
    271         1.3        pk 	 * Print all entries in this pool's log.
    272         1.3        pk 	 */
    273         1.3        pk 	while (i-- > 0) {
    274         1.3        pk 		struct pool_log *pl = &pp->pr_log[n];
    275         1.3        pk 		if (pl->pl_action != 0) {
    276        1.25   thorpej 			if (pi == NULL || pi == pl->pl_addr) {
    277        1.25   thorpej 				(*pr)("\tlog entry %d:\n", i);
    278        1.25   thorpej 				(*pr)("\t\taction = %s, addr = %p\n",
    279        1.25   thorpej 				    pl->pl_action == PRLOG_GET ? "get" : "put",
    280        1.25   thorpej 				    pl->pl_addr);
    281        1.25   thorpej 				(*pr)("\t\tfile: %s at line %lu\n",
    282        1.25   thorpej 				    pl->pl_file, pl->pl_line);
    283        1.25   thorpej 			}
    284         1.3        pk 		}
    285         1.3        pk 		if (++n >= pp->pr_logsize)
    286         1.3        pk 			n = 0;
    287         1.3        pk 	}
    288         1.3        pk }
    289        1.25   thorpej 
    290   1.101.2.1      yamt static inline void
    291        1.42   thorpej pr_enter(struct pool *pp, const char *file, long line)
    292        1.25   thorpej {
    293        1.25   thorpej 
    294        1.34   thorpej 	if (__predict_false(pp->pr_entered_file != NULL)) {
    295        1.25   thorpej 		printf("pool %s: reentrancy at file %s line %ld\n",
    296        1.25   thorpej 		    pp->pr_wchan, file, line);
    297        1.25   thorpej 		printf("         previous entry at file %s line %ld\n",
    298        1.25   thorpej 		    pp->pr_entered_file, pp->pr_entered_line);
    299        1.25   thorpej 		panic("pr_enter");
    300        1.25   thorpej 	}
    301        1.25   thorpej 
    302        1.25   thorpej 	pp->pr_entered_file = file;
    303        1.25   thorpej 	pp->pr_entered_line = line;
    304        1.25   thorpej }
    305        1.25   thorpej 
    306   1.101.2.1      yamt static inline void
    307        1.42   thorpej pr_leave(struct pool *pp)
    308        1.25   thorpej {
    309        1.25   thorpej 
    310        1.34   thorpej 	if (__predict_false(pp->pr_entered_file == NULL)) {
    311        1.25   thorpej 		printf("pool %s not entered?\n", pp->pr_wchan);
    312        1.25   thorpej 		panic("pr_leave");
    313        1.25   thorpej 	}
    314        1.25   thorpej 
    315        1.25   thorpej 	pp->pr_entered_file = NULL;
    316        1.25   thorpej 	pp->pr_entered_line = 0;
    317        1.25   thorpej }
    318        1.25   thorpej 
    319   1.101.2.1      yamt static inline void
    320        1.42   thorpej pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
    321        1.25   thorpej {
    322        1.25   thorpej 
    323        1.25   thorpej 	if (pp->pr_entered_file != NULL)
    324        1.25   thorpej 		(*pr)("\n\tcurrently entered from file %s line %ld\n",
    325        1.25   thorpej 		    pp->pr_entered_file, pp->pr_entered_line);
    326        1.25   thorpej }
    327         1.3        pk #else
    328        1.25   thorpej #define	pr_log(pp, v, action, file, line)
    329        1.25   thorpej #define	pr_printlog(pp, pi, pr)
    330        1.25   thorpej #define	pr_enter(pp, file, line)
    331        1.25   thorpej #define	pr_leave(pp)
    332        1.25   thorpej #define	pr_enter_check(pp, pr)
    333        1.59   thorpej #endif /* POOL_DIAGNOSTIC */
    334         1.3        pk 
    335   1.101.2.6      yamt static inline unsigned int
    336        1.97      yamt pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph,
    337        1.97      yamt     const void *v)
    338        1.97      yamt {
    339        1.97      yamt 	const char *cp = v;
    340   1.101.2.6      yamt 	unsigned int idx;
    341        1.97      yamt 
    342        1.97      yamt 	KASSERT(pp->pr_roflags & PR_NOTOUCH);
    343   1.101.2.4      yamt 	idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size;
    344        1.97      yamt 	KASSERT(idx < pp->pr_itemsperpage);
    345        1.97      yamt 	return idx;
    346        1.97      yamt }
    347        1.97      yamt 
    348   1.101.2.1      yamt static inline void
    349        1.97      yamt pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph,
    350        1.97      yamt     void *obj)
    351        1.97      yamt {
    352   1.101.2.6      yamt 	unsigned int idx = pr_item_notouch_index(pp, ph, obj);
    353   1.101.2.6      yamt 	pool_item_bitmap_t *bitmap = ph->ph_bitmap + (idx / BITMAP_SIZE);
    354   1.101.2.6      yamt 	pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
    355        1.97      yamt 
    356   1.101.2.6      yamt 	KASSERT((*bitmap & mask) == 0);
    357   1.101.2.6      yamt 	*bitmap |= mask;
    358        1.97      yamt }
    359        1.97      yamt 
    360   1.101.2.1      yamt static inline void *
    361        1.97      yamt pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph)
    362        1.97      yamt {
    363   1.101.2.6      yamt 	pool_item_bitmap_t *bitmap = ph->ph_bitmap;
    364   1.101.2.6      yamt 	unsigned int idx;
    365   1.101.2.6      yamt 	int i;
    366   1.101.2.6      yamt 
    367   1.101.2.6      yamt 	for (i = 0; ; i++) {
    368   1.101.2.6      yamt 		int bit;
    369   1.101.2.6      yamt 
    370   1.101.2.6      yamt 		KASSERT((i * BITMAP_SIZE) < pp->pr_itemsperpage);
    371   1.101.2.6      yamt 		bit = ffs32(bitmap[i]);
    372   1.101.2.6      yamt 		if (bit) {
    373   1.101.2.6      yamt 			pool_item_bitmap_t mask;
    374   1.101.2.6      yamt 
    375   1.101.2.6      yamt 			bit--;
    376   1.101.2.6      yamt 			idx = (i * BITMAP_SIZE) + bit;
    377   1.101.2.6      yamt 			mask = 1 << bit;
    378   1.101.2.6      yamt 			KASSERT((bitmap[i] & mask) != 0);
    379   1.101.2.6      yamt 			bitmap[i] &= ~mask;
    380   1.101.2.6      yamt 			break;
    381   1.101.2.6      yamt 		}
    382   1.101.2.6      yamt 	}
    383   1.101.2.6      yamt 	KASSERT(idx < pp->pr_itemsperpage);
    384   1.101.2.6      yamt 	return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size;
    385   1.101.2.6      yamt }
    386        1.97      yamt 
    387   1.101.2.6      yamt static inline void
    388   1.101.2.6      yamt pr_item_notouch_init(const struct pool *pp, struct pool_item_header *ph)
    389   1.101.2.6      yamt {
    390   1.101.2.6      yamt 	pool_item_bitmap_t *bitmap = ph->ph_bitmap;
    391   1.101.2.6      yamt 	const int n = howmany(pp->pr_itemsperpage, BITMAP_SIZE);
    392   1.101.2.6      yamt 	int i;
    393        1.97      yamt 
    394   1.101.2.6      yamt 	for (i = 0; i < n; i++) {
    395   1.101.2.6      yamt 		bitmap[i] = (pool_item_bitmap_t)-1;
    396   1.101.2.6      yamt 	}
    397        1.97      yamt }
    398        1.97      yamt 
    399   1.101.2.1      yamt static inline int
    400        1.88       chs phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
    401        1.88       chs {
    402   1.101.2.2      yamt 
    403   1.101.2.2      yamt 	/*
    404   1.101.2.2      yamt 	 * we consider pool_item_header with smaller ph_page bigger.
    405   1.101.2.2      yamt 	 * (this unnatural ordering is for the benefit of pr_find_pagehead.)
    406   1.101.2.2      yamt 	 */
    407   1.101.2.2      yamt 
    408        1.88       chs 	if (a->ph_page < b->ph_page)
    409        1.88       chs 		return (1);
    410   1.101.2.2      yamt 	else if (a->ph_page > b->ph_page)
    411   1.101.2.2      yamt 		return (-1);
    412        1.88       chs 	else
    413        1.88       chs 		return (0);
    414        1.88       chs }
    415        1.88       chs 
    416        1.88       chs SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
    417        1.88       chs SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
    418        1.88       chs 
    419   1.101.2.8      yamt static inline struct pool_item_header *
    420   1.101.2.8      yamt pr_find_pagehead_noalign(struct pool *pp, void *v)
    421   1.101.2.8      yamt {
    422   1.101.2.8      yamt 	struct pool_item_header *ph, tmp;
    423   1.101.2.8      yamt 
    424   1.101.2.8      yamt 	tmp.ph_page = (void *)(uintptr_t)v;
    425   1.101.2.8      yamt 	ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
    426   1.101.2.8      yamt 	if (ph == NULL) {
    427   1.101.2.8      yamt 		ph = SPLAY_ROOT(&pp->pr_phtree);
    428   1.101.2.8      yamt 		if (ph != NULL && phtree_compare(&tmp, ph) >= 0) {
    429   1.101.2.8      yamt 			ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph);
    430   1.101.2.8      yamt 		}
    431   1.101.2.8      yamt 		KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0);
    432   1.101.2.8      yamt 	}
    433   1.101.2.8      yamt 
    434   1.101.2.8      yamt 	return ph;
    435   1.101.2.8      yamt }
    436   1.101.2.8      yamt 
    437         1.3        pk /*
    438   1.101.2.2      yamt  * Return the pool page header based on item address.
    439         1.3        pk  */
    440   1.101.2.1      yamt static inline struct pool_item_header *
    441   1.101.2.2      yamt pr_find_pagehead(struct pool *pp, void *v)
    442         1.3        pk {
    443        1.88       chs 	struct pool_item_header *ph, tmp;
    444         1.3        pk 
    445   1.101.2.2      yamt 	if ((pp->pr_roflags & PR_NOALIGN) != 0) {
    446   1.101.2.8      yamt 		ph = pr_find_pagehead_noalign(pp, v);
    447   1.101.2.2      yamt 	} else {
    448   1.101.2.4      yamt 		void *page =
    449   1.101.2.4      yamt 		    (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask);
    450   1.101.2.2      yamt 
    451   1.101.2.2      yamt 		if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
    452   1.101.2.4      yamt 			ph = (struct pool_item_header *)((char *)page + pp->pr_phoffset);
    453   1.101.2.2      yamt 		} else {
    454   1.101.2.2      yamt 			tmp.ph_page = page;
    455   1.101.2.2      yamt 			ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
    456   1.101.2.2      yamt 		}
    457   1.101.2.2      yamt 	}
    458         1.3        pk 
    459   1.101.2.2      yamt 	KASSERT(ph == NULL || ((pp->pr_roflags & PR_PHINPAGE) != 0) ||
    460   1.101.2.4      yamt 	    ((char *)ph->ph_page <= (char *)v &&
    461   1.101.2.4      yamt 	    (char *)v < (char *)ph->ph_page + pp->pr_alloc->pa_pagesz));
    462        1.88       chs 	return ph;
    463         1.3        pk }
    464         1.3        pk 
    465       1.101   thorpej static void
    466       1.101   thorpej pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq)
    467       1.101   thorpej {
    468       1.101   thorpej 	struct pool_item_header *ph;
    469       1.101   thorpej 
    470       1.101   thorpej 	while ((ph = LIST_FIRST(pq)) != NULL) {
    471       1.101   thorpej 		LIST_REMOVE(ph, ph_pagelist);
    472       1.101   thorpej 		pool_allocator_free(pp, ph->ph_page);
    473   1.101.2.6      yamt 		if ((pp->pr_roflags & PR_PHINPAGE) == 0)
    474       1.101   thorpej 			pool_put(pp->pr_phpool, ph);
    475       1.101   thorpej 	}
    476       1.101   thorpej }
    477       1.101   thorpej 
    478         1.3        pk /*
    479         1.3        pk  * Remove a page from the pool.
    480         1.3        pk  */
    481   1.101.2.1      yamt static inline void
    482        1.61       chs pr_rmpage(struct pool *pp, struct pool_item_header *ph,
    483        1.61       chs      struct pool_pagelist *pq)
    484         1.3        pk {
    485         1.3        pk 
    486   1.101.2.6      yamt 	KASSERT(mutex_owned(&pp->pr_lock));
    487        1.91      yamt 
    488         1.3        pk 	/*
    489         1.7   thorpej 	 * If the page was idle, decrement the idle page count.
    490         1.3        pk 	 */
    491         1.6   thorpej 	if (ph->ph_nmissing == 0) {
    492         1.6   thorpej #ifdef DIAGNOSTIC
    493         1.6   thorpej 		if (pp->pr_nidle == 0)
    494         1.6   thorpej 			panic("pr_rmpage: nidle inconsistent");
    495        1.20   thorpej 		if (pp->pr_nitems < pp->pr_itemsperpage)
    496        1.20   thorpej 			panic("pr_rmpage: nitems inconsistent");
    497         1.6   thorpej #endif
    498         1.6   thorpej 		pp->pr_nidle--;
    499         1.6   thorpej 	}
    500         1.7   thorpej 
    501        1.20   thorpej 	pp->pr_nitems -= pp->pr_itemsperpage;
    502        1.20   thorpej 
    503         1.7   thorpej 	/*
    504       1.101   thorpej 	 * Unlink the page from the pool and queue it for release.
    505         1.7   thorpej 	 */
    506        1.88       chs 	LIST_REMOVE(ph, ph_pagelist);
    507        1.91      yamt 	if ((pp->pr_roflags & PR_PHINPAGE) == 0)
    508        1.91      yamt 		SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
    509       1.101   thorpej 	LIST_INSERT_HEAD(pq, ph, ph_pagelist);
    510       1.101   thorpej 
    511         1.7   thorpej 	pp->pr_npages--;
    512         1.7   thorpej 	pp->pr_npagefree++;
    513         1.6   thorpej 
    514        1.88       chs 	pool_update_curpage(pp);
    515         1.3        pk }
    516         1.3        pk 
    517   1.101.2.3      yamt static bool
    518   1.101.2.1      yamt pa_starved_p(struct pool_allocator *pa)
    519   1.101.2.1      yamt {
    520   1.101.2.1      yamt 
    521   1.101.2.1      yamt 	if (pa->pa_backingmap != NULL) {
    522   1.101.2.1      yamt 		return vm_map_starved_p(pa->pa_backingmap);
    523   1.101.2.1      yamt 	}
    524   1.101.2.3      yamt 	return false;
    525   1.101.2.1      yamt }
    526   1.101.2.1      yamt 
    527   1.101.2.1      yamt static int
    528   1.101.2.1      yamt pool_reclaim_callback(struct callback_entry *ce, void *obj, void *arg)
    529   1.101.2.1      yamt {
    530   1.101.2.1      yamt 	struct pool *pp = obj;
    531   1.101.2.1      yamt 	struct pool_allocator *pa = pp->pr_alloc;
    532   1.101.2.1      yamt 
    533   1.101.2.1      yamt 	KASSERT(&pp->pr_reclaimerentry == ce);
    534   1.101.2.1      yamt 	pool_reclaim(pp);
    535   1.101.2.1      yamt 	if (!pa_starved_p(pa)) {
    536   1.101.2.1      yamt 		return CALLBACK_CHAIN_ABORT;
    537   1.101.2.1      yamt 	}
    538   1.101.2.1      yamt 	return CALLBACK_CHAIN_CONTINUE;
    539   1.101.2.1      yamt }
    540   1.101.2.1      yamt 
    541   1.101.2.1      yamt static void
    542   1.101.2.1      yamt pool_reclaim_register(struct pool *pp)
    543   1.101.2.1      yamt {
    544   1.101.2.1      yamt 	struct vm_map *map = pp->pr_alloc->pa_backingmap;
    545   1.101.2.1      yamt 	int s;
    546   1.101.2.1      yamt 
    547   1.101.2.1      yamt 	if (map == NULL) {
    548   1.101.2.1      yamt 		return;
    549   1.101.2.1      yamt 	}
    550   1.101.2.1      yamt 
    551   1.101.2.1      yamt 	s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */
    552   1.101.2.1      yamt 	callback_register(&vm_map_to_kernel(map)->vmk_reclaim_callback,
    553   1.101.2.1      yamt 	    &pp->pr_reclaimerentry, pp, pool_reclaim_callback);
    554   1.101.2.1      yamt 	splx(s);
    555   1.101.2.1      yamt }
    556   1.101.2.1      yamt 
    557   1.101.2.1      yamt static void
    558   1.101.2.1      yamt pool_reclaim_unregister(struct pool *pp)
    559   1.101.2.1      yamt {
    560   1.101.2.1      yamt 	struct vm_map *map = pp->pr_alloc->pa_backingmap;
    561   1.101.2.1      yamt 	int s;
    562   1.101.2.1      yamt 
    563   1.101.2.1      yamt 	if (map == NULL) {
    564   1.101.2.1      yamt 		return;
    565   1.101.2.1      yamt 	}
    566   1.101.2.1      yamt 
    567   1.101.2.1      yamt 	s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */
    568   1.101.2.1      yamt 	callback_unregister(&vm_map_to_kernel(map)->vmk_reclaim_callback,
    569   1.101.2.1      yamt 	    &pp->pr_reclaimerentry);
    570   1.101.2.1      yamt 	splx(s);
    571   1.101.2.1      yamt }
    572   1.101.2.1      yamt 
    573   1.101.2.1      yamt static void
    574   1.101.2.1      yamt pa_reclaim_register(struct pool_allocator *pa)
    575   1.101.2.1      yamt {
    576   1.101.2.1      yamt 	struct vm_map *map = *pa->pa_backingmapptr;
    577   1.101.2.1      yamt 	struct pool *pp;
    578   1.101.2.1      yamt 
    579   1.101.2.1      yamt 	KASSERT(pa->pa_backingmap == NULL);
    580   1.101.2.1      yamt 	if (map == NULL) {
    581   1.101.2.1      yamt 		SLIST_INSERT_HEAD(&pa_deferinitq, pa, pa_q);
    582   1.101.2.1      yamt 		return;
    583   1.101.2.1      yamt 	}
    584   1.101.2.1      yamt 	pa->pa_backingmap = map;
    585   1.101.2.1      yamt 	TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
    586   1.101.2.1      yamt 		pool_reclaim_register(pp);
    587   1.101.2.1      yamt 	}
    588   1.101.2.1      yamt }
    589   1.101.2.1      yamt 
    590         1.3        pk /*
    591        1.94    simonb  * Initialize all the pools listed in the "pools" link set.
    592        1.94    simonb  */
    593        1.94    simonb void
    594   1.101.2.1      yamt pool_subsystem_init(void)
    595        1.94    simonb {
    596   1.101.2.1      yamt 	struct pool_allocator *pa;
    597        1.94    simonb 	__link_set_decl(pools, struct link_pool_init);
    598        1.94    simonb 	struct link_pool_init * const *pi;
    599        1.94    simonb 
    600   1.101.2.6      yamt 	mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE);
    601   1.101.2.6      yamt 	cv_init(&pool_busy, "poolbusy");
    602   1.101.2.6      yamt 
    603        1.94    simonb 	__link_set_foreach(pi, pools)
    604        1.94    simonb 		pool_init((*pi)->pp, (*pi)->size, (*pi)->align,
    605        1.94    simonb 		    (*pi)->align_offset, (*pi)->flags, (*pi)->wchan,
    606   1.101.2.4      yamt 		    (*pi)->palloc, (*pi)->ipl);
    607   1.101.2.1      yamt 
    608   1.101.2.1      yamt 	while ((pa = SLIST_FIRST(&pa_deferinitq)) != NULL) {
    609   1.101.2.1      yamt 		KASSERT(pa->pa_backingmapptr != NULL);
    610   1.101.2.1      yamt 		KASSERT(*pa->pa_backingmapptr != NULL);
    611   1.101.2.1      yamt 		SLIST_REMOVE_HEAD(&pa_deferinitq, pa_q);
    612   1.101.2.1      yamt 		pa_reclaim_register(pa);
    613   1.101.2.1      yamt 	}
    614   1.101.2.6      yamt 
    615   1.101.2.6      yamt 	pool_init(&cache_pool, sizeof(struct pool_cache), CACHE_LINE_SIZE,
    616   1.101.2.6      yamt 	    0, 0, "pcache", &pool_allocator_nointr, IPL_NONE);
    617   1.101.2.6      yamt 
    618   1.101.2.6      yamt 	pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), CACHE_LINE_SIZE,
    619   1.101.2.6      yamt 	    0, 0, "pcachecpu", &pool_allocator_nointr, IPL_NONE);
    620        1.94    simonb }
    621        1.94    simonb 
    622        1.94    simonb /*
    623         1.3        pk  * Initialize the given pool resource structure.
    624         1.3        pk  *
    625         1.3        pk  * We export this routine to allow other kernel parts to declare
    626         1.3        pk  * static pools that must be initialized before malloc() is available.
    627         1.3        pk  */
    628         1.3        pk void
    629        1.42   thorpej pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
    630   1.101.2.4      yamt     const char *wchan, struct pool_allocator *palloc, int ipl)
    631         1.3        pk {
    632   1.101.2.1      yamt 	struct pool *pp1;
    633        1.92     enami 	size_t trysize, phsize;
    634   1.101.2.6      yamt 	int off, slack;
    635        1.99      yamt 
    636   1.101.2.1      yamt #ifdef DEBUG
    637   1.101.2.1      yamt 	/*
    638   1.101.2.1      yamt 	 * Check that the pool hasn't already been initialised and
    639   1.101.2.1      yamt 	 * added to the list of all pools.
    640   1.101.2.1      yamt 	 */
    641   1.101.2.8      yamt 	TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
    642   1.101.2.1      yamt 		if (pp == pp1)
    643   1.101.2.1      yamt 			panic("pool_init: pool %s already initialised",
    644   1.101.2.1      yamt 			    wchan);
    645   1.101.2.1      yamt 	}
    646   1.101.2.1      yamt #endif
    647   1.101.2.1      yamt 
    648        1.25   thorpej #ifdef POOL_DIAGNOSTIC
    649        1.25   thorpej 	/*
    650        1.25   thorpej 	 * Always log if POOL_DIAGNOSTIC is defined.
    651        1.25   thorpej 	 */
    652        1.25   thorpej 	if (pool_logsize != 0)
    653        1.25   thorpej 		flags |= PR_LOGGING;
    654        1.25   thorpej #endif
    655        1.25   thorpej 
    656        1.66   thorpej 	if (palloc == NULL)
    657        1.66   thorpej 		palloc = &pool_allocator_kmem;
    658   1.101.2.1      yamt #ifdef POOL_SUBPAGE
    659   1.101.2.1      yamt 	if (size > palloc->pa_pagesz) {
    660   1.101.2.1      yamt 		if (palloc == &pool_allocator_kmem)
    661   1.101.2.1      yamt 			palloc = &pool_allocator_kmem_fullpage;
    662   1.101.2.1      yamt 		else if (palloc == &pool_allocator_nointr)
    663   1.101.2.1      yamt 			palloc = &pool_allocator_nointr_fullpage;
    664   1.101.2.1      yamt 	}
    665        1.66   thorpej #endif /* POOL_SUBPAGE */
    666        1.66   thorpej 	if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
    667   1.101.2.1      yamt 		if (palloc->pa_pagesz == 0)
    668        1.66   thorpej 			palloc->pa_pagesz = PAGE_SIZE;
    669        1.66   thorpej 
    670        1.66   thorpej 		TAILQ_INIT(&palloc->pa_list);
    671        1.66   thorpej 
    672   1.101.2.6      yamt 		mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM);
    673        1.66   thorpej 		palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
    674        1.66   thorpej 		palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
    675   1.101.2.1      yamt 
    676   1.101.2.1      yamt 		if (palloc->pa_backingmapptr != NULL) {
    677   1.101.2.1      yamt 			pa_reclaim_register(palloc);
    678   1.101.2.1      yamt 		}
    679        1.66   thorpej 		palloc->pa_flags |= PA_INITIALIZED;
    680         1.4   thorpej 	}
    681         1.3        pk 
    682         1.3        pk 	if (align == 0)
    683         1.3        pk 		align = ALIGN(1);
    684        1.14   thorpej 
    685   1.101.2.2      yamt 	if ((flags & PR_NOTOUCH) == 0 && size < sizeof(struct pool_item))
    686        1.14   thorpej 		size = sizeof(struct pool_item);
    687         1.3        pk 
    688        1.78   thorpej 	size = roundup(size, align);
    689        1.66   thorpej #ifdef DIAGNOSTIC
    690        1.66   thorpej 	if (size > palloc->pa_pagesz)
    691   1.101.2.2      yamt 		panic("pool_init: pool item size (%zu) too large", size);
    692        1.66   thorpej #endif
    693        1.35        pk 
    694         1.3        pk 	/*
    695         1.3        pk 	 * Initialize the pool structure.
    696         1.3        pk 	 */
    697        1.88       chs 	LIST_INIT(&pp->pr_emptypages);
    698        1.88       chs 	LIST_INIT(&pp->pr_fullpages);
    699        1.88       chs 	LIST_INIT(&pp->pr_partpages);
    700   1.101.2.6      yamt 	pp->pr_cache = NULL;
    701         1.3        pk 	pp->pr_curpage = NULL;
    702         1.3        pk 	pp->pr_npages = 0;
    703         1.3        pk 	pp->pr_minitems = 0;
    704         1.3        pk 	pp->pr_minpages = 0;
    705         1.3        pk 	pp->pr_maxpages = UINT_MAX;
    706        1.20   thorpej 	pp->pr_roflags = flags;
    707        1.20   thorpej 	pp->pr_flags = 0;
    708        1.35        pk 	pp->pr_size = size;
    709         1.3        pk 	pp->pr_align = align;
    710         1.3        pk 	pp->pr_wchan = wchan;
    711        1.66   thorpej 	pp->pr_alloc = palloc;
    712        1.20   thorpej 	pp->pr_nitems = 0;
    713        1.20   thorpej 	pp->pr_nout = 0;
    714        1.20   thorpej 	pp->pr_hardlimit = UINT_MAX;
    715        1.20   thorpej 	pp->pr_hardlimit_warning = NULL;
    716        1.31   thorpej 	pp->pr_hardlimit_ratecap.tv_sec = 0;
    717        1.31   thorpej 	pp->pr_hardlimit_ratecap.tv_usec = 0;
    718        1.31   thorpej 	pp->pr_hardlimit_warning_last.tv_sec = 0;
    719        1.31   thorpej 	pp->pr_hardlimit_warning_last.tv_usec = 0;
    720        1.68   thorpej 	pp->pr_drain_hook = NULL;
    721        1.68   thorpej 	pp->pr_drain_hook_arg = NULL;
    722   1.101.2.3      yamt 	pp->pr_freecheck = NULL;
    723         1.3        pk 
    724         1.3        pk 	/*
    725         1.3        pk 	 * Decide whether to put the page header off page to avoid
    726        1.92     enami 	 * wasting too large a part of the page or too big item.
    727        1.92     enami 	 * Off-page page headers go on a hash table, so we can match
    728        1.92     enami 	 * a returned item with its header based on the page address.
    729        1.92     enami 	 * We use 1/16 of the page size and about 8 times of the item
    730        1.92     enami 	 * size as the threshold (XXX: tune)
    731        1.92     enami 	 *
    732        1.92     enami 	 * However, we'll put the header into the page if we can put
    733        1.92     enami 	 * it without wasting any items.
    734        1.92     enami 	 *
    735        1.92     enami 	 * Silently enforce `0 <= ioff < align'.
    736         1.3        pk 	 */
    737        1.92     enami 	pp->pr_itemoffset = ioff %= align;
    738        1.92     enami 	/* See the comment below about reserved bytes. */
    739        1.92     enami 	trysize = palloc->pa_pagesz - ((align - ioff) % align);
    740        1.92     enami 	phsize = ALIGN(sizeof(struct pool_item_header));
    741   1.101.2.2      yamt 	if ((pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) == 0 &&
    742        1.97      yamt 	    (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
    743        1.97      yamt 	    trysize / pp->pr_size == (trysize - phsize) / pp->pr_size)) {
    744         1.3        pk 		/* Use the end of the page for the page header */
    745        1.20   thorpej 		pp->pr_roflags |= PR_PHINPAGE;
    746        1.92     enami 		pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
    747         1.2        pk 	} else {
    748         1.3        pk 		/* The page header will be taken from our page header pool */
    749         1.3        pk 		pp->pr_phoffset = 0;
    750        1.66   thorpej 		off = palloc->pa_pagesz;
    751        1.88       chs 		SPLAY_INIT(&pp->pr_phtree);
    752         1.2        pk 	}
    753         1.1        pk 
    754         1.3        pk 	/*
    755         1.3        pk 	 * Alignment is to take place at `ioff' within the item. This means
    756         1.3        pk 	 * we must reserve up to `align - 1' bytes on the page to allow
    757         1.3        pk 	 * appropriate positioning of each item.
    758         1.3        pk 	 */
    759         1.3        pk 	pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
    760        1.43   thorpej 	KASSERT(pp->pr_itemsperpage != 0);
    761        1.97      yamt 	if ((pp->pr_roflags & PR_NOTOUCH)) {
    762        1.97      yamt 		int idx;
    763        1.97      yamt 
    764        1.97      yamt 		for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
    765        1.97      yamt 		    idx++) {
    766        1.97      yamt 			/* nothing */
    767        1.97      yamt 		}
    768        1.97      yamt 		if (idx >= PHPOOL_MAX) {
    769        1.97      yamt 			/*
    770        1.97      yamt 			 * if you see this panic, consider to tweak
    771        1.97      yamt 			 * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
    772        1.97      yamt 			 */
    773        1.97      yamt 			panic("%s: too large itemsperpage(%d) for PR_NOTOUCH",
    774        1.97      yamt 			    pp->pr_wchan, pp->pr_itemsperpage);
    775        1.97      yamt 		}
    776        1.97      yamt 		pp->pr_phpool = &phpool[idx];
    777        1.97      yamt 	} else if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
    778        1.97      yamt 		pp->pr_phpool = &phpool[0];
    779        1.97      yamt 	}
    780        1.97      yamt #if defined(DIAGNOSTIC)
    781        1.97      yamt 	else {
    782        1.97      yamt 		pp->pr_phpool = NULL;
    783        1.97      yamt 	}
    784        1.97      yamt #endif
    785         1.3        pk 
    786         1.3        pk 	/*
    787         1.3        pk 	 * Use the slack between the chunks and the page header
    788         1.3        pk 	 * for "cache coloring".
    789         1.3        pk 	 */
    790         1.3        pk 	slack = off - pp->pr_itemsperpage * pp->pr_size;
    791         1.3        pk 	pp->pr_maxcolor = (slack / align) * align;
    792         1.3        pk 	pp->pr_curcolor = 0;
    793         1.3        pk 
    794         1.3        pk 	pp->pr_nget = 0;
    795         1.3        pk 	pp->pr_nfail = 0;
    796         1.3        pk 	pp->pr_nput = 0;
    797         1.3        pk 	pp->pr_npagealloc = 0;
    798         1.3        pk 	pp->pr_npagefree = 0;
    799         1.1        pk 	pp->pr_hiwat = 0;
    800         1.8   thorpej 	pp->pr_nidle = 0;
    801   1.101.2.6      yamt 	pp->pr_refcnt = 0;
    802         1.3        pk 
    803        1.59   thorpej #ifdef POOL_DIAGNOSTIC
    804        1.25   thorpej 	if (flags & PR_LOGGING) {
    805        1.25   thorpej 		if (kmem_map == NULL ||
    806        1.25   thorpej 		    (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
    807        1.25   thorpej 		     M_TEMP, M_NOWAIT)) == NULL)
    808        1.20   thorpej 			pp->pr_roflags &= ~PR_LOGGING;
    809         1.3        pk 		pp->pr_curlogentry = 0;
    810         1.3        pk 		pp->pr_logsize = pool_logsize;
    811         1.3        pk 	}
    812        1.59   thorpej #endif
    813        1.25   thorpej 
    814        1.25   thorpej 	pp->pr_entered_file = NULL;
    815        1.25   thorpej 	pp->pr_entered_line = 0;
    816         1.3        pk 
    817   1.101.2.7      yamt 	/*
    818   1.101.2.7      yamt 	 * XXXAD hack to prevent IP input processing from blocking.
    819   1.101.2.7      yamt 	 */
    820   1.101.2.7      yamt 	if (ipl == IPL_SOFTNET) {
    821   1.101.2.7      yamt 		mutex_init(&pp->pr_lock, MUTEX_DEFAULT, IPL_VM);
    822   1.101.2.7      yamt 	} else {
    823   1.101.2.7      yamt 		mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl);
    824   1.101.2.7      yamt 	}
    825   1.101.2.6      yamt 	cv_init(&pp->pr_cv, wchan);
    826   1.101.2.6      yamt 	pp->pr_ipl = ipl;
    827         1.1        pk 
    828         1.3        pk 	/*
    829        1.43   thorpej 	 * Initialize private page header pool and cache magazine pool if we
    830        1.43   thorpej 	 * haven't done so yet.
    831        1.23   thorpej 	 * XXX LOCKING.
    832         1.3        pk 	 */
    833        1.97      yamt 	if (phpool[0].pr_size == 0) {
    834        1.97      yamt 		int idx;
    835        1.97      yamt 		for (idx = 0; idx < PHPOOL_MAX; idx++) {
    836        1.97      yamt 			static char phpool_names[PHPOOL_MAX][6+1+6+1];
    837        1.97      yamt 			int nelem;
    838        1.97      yamt 			size_t sz;
    839        1.97      yamt 
    840        1.97      yamt 			nelem = PHPOOL_FREELIST_NELEM(idx);
    841        1.97      yamt 			snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
    842        1.97      yamt 			    "phpool-%d", nelem);
    843        1.97      yamt 			sz = sizeof(struct pool_item_header);
    844        1.97      yamt 			if (nelem) {
    845   1.101.2.6      yamt 				sz = offsetof(struct pool_item_header,
    846   1.101.2.6      yamt 				    ph_bitmap[howmany(nelem, BITMAP_SIZE)]);
    847        1.97      yamt 			}
    848        1.97      yamt 			pool_init(&phpool[idx], sz, 0, 0, 0,
    849   1.101.2.4      yamt 			    phpool_names[idx], &pool_allocator_meta, IPL_VM);
    850        1.97      yamt 		}
    851        1.62     bjh21 #ifdef POOL_SUBPAGE
    852        1.62     bjh21 		pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
    853   1.101.2.4      yamt 		    PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM);
    854        1.62     bjh21 #endif
    855   1.101.2.8      yamt 
    856   1.101.2.8      yamt 		size = sizeof(pcg_t) +
    857   1.101.2.8      yamt 		    (PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t);
    858   1.101.2.8      yamt 		pool_init(&pcg_normal_pool, size, CACHE_LINE_SIZE, 0, 0,
    859   1.101.2.8      yamt 		    "pcgnormal", &pool_allocator_meta, IPL_VM);
    860   1.101.2.8      yamt 
    861   1.101.2.8      yamt 		size = sizeof(pcg_t) +
    862   1.101.2.8      yamt 		    (PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t);
    863   1.101.2.8      yamt 		pool_init(&pcg_large_pool, size, CACHE_LINE_SIZE, 0, 0,
    864   1.101.2.8      yamt 		    "pcglarge", &pool_allocator_meta, IPL_VM);
    865         1.1        pk 	}
    866         1.1        pk 
    867   1.101.2.8      yamt 	/* Insert into the list of all pools. */
    868   1.101.2.8      yamt 	if (__predict_true(!cold))
    869   1.101.2.6      yamt 		mutex_enter(&pool_head_lock);
    870   1.101.2.8      yamt 	TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
    871   1.101.2.8      yamt 		if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0)
    872   1.101.2.8      yamt 			break;
    873   1.101.2.8      yamt 	}
    874   1.101.2.8      yamt 	if (pp1 == NULL)
    875   1.101.2.8      yamt 		TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
    876   1.101.2.8      yamt 	else
    877   1.101.2.8      yamt 		TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist);
    878   1.101.2.8      yamt 	if (__predict_true(!cold))
    879   1.101.2.6      yamt 		mutex_exit(&pool_head_lock);
    880   1.101.2.6      yamt 
    881   1.101.2.6      yamt 		/* Insert this into the list of pools using this allocator. */
    882   1.101.2.8      yamt 	if (__predict_true(!cold))
    883   1.101.2.6      yamt 		mutex_enter(&palloc->pa_lock);
    884   1.101.2.8      yamt 	TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
    885   1.101.2.8      yamt 	if (__predict_true(!cold))
    886   1.101.2.6      yamt 		mutex_exit(&palloc->pa_lock);
    887        1.66   thorpej 
    888   1.101.2.1      yamt 	pool_reclaim_register(pp);
    889         1.1        pk }
    890         1.1        pk 
    891         1.1        pk /*
    892         1.1        pk  * De-commision a pool resource.
    893         1.1        pk  */
    894         1.1        pk void
    895        1.42   thorpej pool_destroy(struct pool *pp)
    896         1.1        pk {
    897       1.101   thorpej 	struct pool_pagelist pq;
    898         1.3        pk 	struct pool_item_header *ph;
    899        1.43   thorpej 
    900       1.101   thorpej 	/* Remove from global pool list */
    901   1.101.2.6      yamt 	mutex_enter(&pool_head_lock);
    902   1.101.2.6      yamt 	while (pp->pr_refcnt != 0)
    903   1.101.2.6      yamt 		cv_wait(&pool_busy, &pool_head_lock);
    904   1.101.2.8      yamt 	TAILQ_REMOVE(&pool_head, pp, pr_poollist);
    905       1.101   thorpej 	if (drainpp == pp)
    906       1.101   thorpej 		drainpp = NULL;
    907   1.101.2.6      yamt 	mutex_exit(&pool_head_lock);
    908       1.101   thorpej 
    909       1.101   thorpej 	/* Remove this pool from its allocator's list of pools. */
    910   1.101.2.1      yamt 	pool_reclaim_unregister(pp);
    911   1.101.2.6      yamt 	mutex_enter(&pp->pr_alloc->pa_lock);
    912        1.66   thorpej 	TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
    913   1.101.2.6      yamt 	mutex_exit(&pp->pr_alloc->pa_lock);
    914        1.66   thorpej 
    915   1.101.2.6      yamt 	mutex_enter(&pp->pr_lock);
    916       1.101   thorpej 
    917   1.101.2.6      yamt 	KASSERT(pp->pr_cache == NULL);
    918         1.3        pk 
    919         1.3        pk #ifdef DIAGNOSTIC
    920        1.20   thorpej 	if (pp->pr_nout != 0) {
    921        1.25   thorpej 		pr_printlog(pp, NULL, printf);
    922        1.80    provos 		panic("pool_destroy: pool busy: still out: %u",
    923        1.20   thorpej 		    pp->pr_nout);
    924         1.3        pk 	}
    925         1.3        pk #endif
    926         1.1        pk 
    927       1.101   thorpej 	KASSERT(LIST_EMPTY(&pp->pr_fullpages));
    928       1.101   thorpej 	KASSERT(LIST_EMPTY(&pp->pr_partpages));
    929       1.101   thorpej 
    930         1.3        pk 	/* Remove all pages */
    931       1.101   thorpej 	LIST_INIT(&pq);
    932        1.88       chs 	while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
    933       1.101   thorpej 		pr_rmpage(pp, ph, &pq);
    934       1.101   thorpej 
    935   1.101.2.6      yamt 	mutex_exit(&pp->pr_lock);
    936         1.3        pk 
    937       1.101   thorpej 	pr_pagelist_free(pp, &pq);
    938         1.3        pk 
    939        1.59   thorpej #ifdef POOL_DIAGNOSTIC
    940        1.20   thorpej 	if ((pp->pr_roflags & PR_LOGGING) != 0)
    941         1.3        pk 		free(pp->pr_log, M_TEMP);
    942        1.59   thorpej #endif
    943   1.101.2.6      yamt 
    944   1.101.2.6      yamt 	cv_destroy(&pp->pr_cv);
    945   1.101.2.6      yamt 	mutex_destroy(&pp->pr_lock);
    946         1.1        pk }
    947         1.1        pk 
    948        1.68   thorpej void
    949        1.68   thorpej pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
    950        1.68   thorpej {
    951        1.68   thorpej 
    952        1.68   thorpej 	/* XXX no locking -- must be used just after pool_init() */
    953        1.68   thorpej #ifdef DIAGNOSTIC
    954        1.68   thorpej 	if (pp->pr_drain_hook != NULL)
    955        1.68   thorpej 		panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
    956        1.68   thorpej #endif
    957        1.68   thorpej 	pp->pr_drain_hook = fn;
    958        1.68   thorpej 	pp->pr_drain_hook_arg = arg;
    959        1.68   thorpej }
    960        1.68   thorpej 
    961        1.88       chs static struct pool_item_header *
    962   1.101.2.4      yamt pool_alloc_item_header(struct pool *pp, void *storage, int flags)
    963        1.55   thorpej {
    964        1.55   thorpej 	struct pool_item_header *ph;
    965        1.55   thorpej 
    966        1.55   thorpej 	if ((pp->pr_roflags & PR_PHINPAGE) != 0)
    967   1.101.2.4      yamt 		ph = (struct pool_item_header *) ((char *)storage + pp->pr_phoffset);
    968   1.101.2.6      yamt 	else
    969        1.97      yamt 		ph = pool_get(pp->pr_phpool, flags);
    970        1.55   thorpej 
    971        1.55   thorpej 	return (ph);
    972        1.55   thorpej }
    973         1.1        pk 
    974         1.1        pk /*
    975   1.101.2.6      yamt  * Grab an item from the pool.
    976         1.1        pk  */
    977         1.3        pk void *
    978        1.59   thorpej #ifdef POOL_DIAGNOSTIC
    979        1.42   thorpej _pool_get(struct pool *pp, int flags, const char *file, long line)
    980        1.56  sommerfe #else
    981        1.56  sommerfe pool_get(struct pool *pp, int flags)
    982        1.56  sommerfe #endif
    983         1.1        pk {
    984         1.1        pk 	struct pool_item *pi;
    985         1.3        pk 	struct pool_item_header *ph;
    986        1.55   thorpej 	void *v;
    987         1.1        pk 
    988         1.2        pk #ifdef DIAGNOSTIC
    989        1.95    atatat 	if (__predict_false(pp->pr_itemsperpage == 0))
    990        1.95    atatat 		panic("pool_get: pool %p: pr_itemsperpage is zero, "
    991        1.95    atatat 		    "pool not initialized?", pp);
    992        1.84   thorpej 	if (__predict_false(curlwp == NULL && doing_shutdown == 0 &&
    993        1.37  sommerfe 			    (flags & PR_WAITOK) != 0))
    994        1.77      matt 		panic("pool_get: %s: must have NOWAIT", pp->pr_wchan);
    995        1.58   thorpej 
    996   1.101.2.1      yamt #endif /* DIAGNOSTIC */
    997        1.58   thorpej #ifdef LOCKDEBUG
    998        1.58   thorpej 	if (flags & PR_WAITOK)
    999   1.101.2.2      yamt 		ASSERT_SLEEPABLE(NULL, "pool_get(PR_WAITOK)");
   1000        1.56  sommerfe #endif
   1001         1.1        pk 
   1002   1.101.2.6      yamt 	mutex_enter(&pp->pr_lock);
   1003        1.25   thorpej 	pr_enter(pp, file, line);
   1004        1.20   thorpej 
   1005        1.20   thorpej  startover:
   1006        1.20   thorpej 	/*
   1007        1.20   thorpej 	 * Check to see if we've reached the hard limit.  If we have,
   1008        1.20   thorpej 	 * and we can wait, then wait until an item has been returned to
   1009        1.20   thorpej 	 * the pool.
   1010        1.20   thorpej 	 */
   1011        1.20   thorpej #ifdef DIAGNOSTIC
   1012        1.34   thorpej 	if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
   1013        1.25   thorpej 		pr_leave(pp);
   1014   1.101.2.6      yamt 		mutex_exit(&pp->pr_lock);
   1015        1.20   thorpej 		panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
   1016        1.20   thorpej 	}
   1017        1.20   thorpej #endif
   1018        1.34   thorpej 	if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
   1019        1.68   thorpej 		if (pp->pr_drain_hook != NULL) {
   1020        1.68   thorpej 			/*
   1021        1.68   thorpej 			 * Since the drain hook is going to free things
   1022        1.68   thorpej 			 * back to the pool, unlock, call the hook, re-lock,
   1023        1.68   thorpej 			 * and check the hardlimit condition again.
   1024        1.68   thorpej 			 */
   1025        1.68   thorpej 			pr_leave(pp);
   1026   1.101.2.6      yamt 			mutex_exit(&pp->pr_lock);
   1027        1.68   thorpej 			(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
   1028   1.101.2.6      yamt 			mutex_enter(&pp->pr_lock);
   1029        1.68   thorpej 			pr_enter(pp, file, line);
   1030        1.68   thorpej 			if (pp->pr_nout < pp->pr_hardlimit)
   1031        1.68   thorpej 				goto startover;
   1032        1.68   thorpej 		}
   1033        1.68   thorpej 
   1034        1.29  sommerfe 		if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
   1035        1.20   thorpej 			/*
   1036        1.20   thorpej 			 * XXX: A warning isn't logged in this case.  Should
   1037        1.20   thorpej 			 * it be?
   1038        1.20   thorpej 			 */
   1039        1.20   thorpej 			pp->pr_flags |= PR_WANTED;
   1040        1.25   thorpej 			pr_leave(pp);
   1041   1.101.2.6      yamt 			cv_wait(&pp->pr_cv, &pp->pr_lock);
   1042        1.25   thorpej 			pr_enter(pp, file, line);
   1043        1.20   thorpej 			goto startover;
   1044        1.20   thorpej 		}
   1045        1.31   thorpej 
   1046        1.31   thorpej 		/*
   1047        1.31   thorpej 		 * Log a message that the hard limit has been hit.
   1048        1.31   thorpej 		 */
   1049        1.31   thorpej 		if (pp->pr_hardlimit_warning != NULL &&
   1050        1.31   thorpej 		    ratecheck(&pp->pr_hardlimit_warning_last,
   1051        1.31   thorpej 			      &pp->pr_hardlimit_ratecap))
   1052        1.31   thorpej 			log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
   1053        1.21   thorpej 
   1054        1.21   thorpej 		pp->pr_nfail++;
   1055        1.21   thorpej 
   1056        1.25   thorpej 		pr_leave(pp);
   1057   1.101.2.6      yamt 		mutex_exit(&pp->pr_lock);
   1058        1.20   thorpej 		return (NULL);
   1059        1.20   thorpej 	}
   1060        1.20   thorpej 
   1061         1.3        pk 	/*
   1062         1.3        pk 	 * The convention we use is that if `curpage' is not NULL, then
   1063         1.3        pk 	 * it points at a non-empty bucket. In particular, `curpage'
   1064         1.3        pk 	 * never points at a page header which has PR_PHINPAGE set and
   1065         1.3        pk 	 * has no items in its bucket.
   1066         1.3        pk 	 */
   1067        1.20   thorpej 	if ((ph = pp->pr_curpage) == NULL) {
   1068   1.101.2.1      yamt 		int error;
   1069   1.101.2.1      yamt 
   1070        1.20   thorpej #ifdef DIAGNOSTIC
   1071        1.20   thorpej 		if (pp->pr_nitems != 0) {
   1072   1.101.2.6      yamt 			mutex_exit(&pp->pr_lock);
   1073        1.20   thorpej 			printf("pool_get: %s: curpage NULL, nitems %u\n",
   1074        1.20   thorpej 			    pp->pr_wchan, pp->pr_nitems);
   1075        1.80    provos 			panic("pool_get: nitems inconsistent");
   1076        1.20   thorpej 		}
   1077        1.20   thorpej #endif
   1078        1.20   thorpej 
   1079        1.21   thorpej 		/*
   1080        1.21   thorpej 		 * Call the back-end page allocator for more memory.
   1081        1.21   thorpej 		 * Release the pool lock, as the back-end page allocator
   1082        1.21   thorpej 		 * may block.
   1083        1.21   thorpej 		 */
   1084        1.25   thorpej 		pr_leave(pp);
   1085   1.101.2.1      yamt 		error = pool_grow(pp, flags);
   1086   1.101.2.1      yamt 		pr_enter(pp, file, line);
   1087   1.101.2.1      yamt 		if (error != 0) {
   1088        1.21   thorpej 			/*
   1089        1.55   thorpej 			 * We were unable to allocate a page or item
   1090        1.55   thorpej 			 * header, but we released the lock during
   1091        1.55   thorpej 			 * allocation, so perhaps items were freed
   1092        1.55   thorpej 			 * back to the pool.  Check for this case.
   1093        1.21   thorpej 			 */
   1094        1.21   thorpej 			if (pp->pr_curpage != NULL)
   1095        1.21   thorpej 				goto startover;
   1096        1.15        pk 
   1097   1.101.2.1      yamt 			pp->pr_nfail++;
   1098        1.25   thorpej 			pr_leave(pp);
   1099   1.101.2.6      yamt 			mutex_exit(&pp->pr_lock);
   1100   1.101.2.1      yamt 			return (NULL);
   1101         1.1        pk 		}
   1102         1.3        pk 
   1103        1.20   thorpej 		/* Start the allocation process over. */
   1104        1.20   thorpej 		goto startover;
   1105         1.3        pk 	}
   1106        1.97      yamt 	if (pp->pr_roflags & PR_NOTOUCH) {
   1107        1.97      yamt #ifdef DIAGNOSTIC
   1108        1.97      yamt 		if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) {
   1109        1.97      yamt 			pr_leave(pp);
   1110   1.101.2.6      yamt 			mutex_exit(&pp->pr_lock);
   1111        1.97      yamt 			panic("pool_get: %s: page empty", pp->pr_wchan);
   1112        1.97      yamt 		}
   1113        1.97      yamt #endif
   1114        1.97      yamt 		v = pr_item_notouch_get(pp, ph);
   1115        1.97      yamt #ifdef POOL_DIAGNOSTIC
   1116        1.97      yamt 		pr_log(pp, v, PRLOG_GET, file, line);
   1117        1.97      yamt #endif
   1118        1.97      yamt 	} else {
   1119   1.101.2.1      yamt 		v = pi = LIST_FIRST(&ph->ph_itemlist);
   1120        1.97      yamt 		if (__predict_false(v == NULL)) {
   1121        1.97      yamt 			pr_leave(pp);
   1122   1.101.2.6      yamt 			mutex_exit(&pp->pr_lock);
   1123        1.97      yamt 			panic("pool_get: %s: page empty", pp->pr_wchan);
   1124        1.97      yamt 		}
   1125        1.20   thorpej #ifdef DIAGNOSTIC
   1126        1.97      yamt 		if (__predict_false(pp->pr_nitems == 0)) {
   1127        1.97      yamt 			pr_leave(pp);
   1128   1.101.2.6      yamt 			mutex_exit(&pp->pr_lock);
   1129        1.97      yamt 			printf("pool_get: %s: items on itemlist, nitems %u\n",
   1130        1.97      yamt 			    pp->pr_wchan, pp->pr_nitems);
   1131        1.97      yamt 			panic("pool_get: nitems inconsistent");
   1132        1.97      yamt 		}
   1133        1.65     enami #endif
   1134        1.56  sommerfe 
   1135        1.65     enami #ifdef POOL_DIAGNOSTIC
   1136        1.97      yamt 		pr_log(pp, v, PRLOG_GET, file, line);
   1137        1.65     enami #endif
   1138         1.3        pk 
   1139        1.65     enami #ifdef DIAGNOSTIC
   1140        1.97      yamt 		if (__predict_false(pi->pi_magic != PI_MAGIC)) {
   1141        1.97      yamt 			pr_printlog(pp, pi, printf);
   1142        1.97      yamt 			panic("pool_get(%s): free list modified: "
   1143        1.97      yamt 			    "magic=%x; page %p; item addr %p\n",
   1144        1.97      yamt 			    pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
   1145        1.97      yamt 		}
   1146         1.3        pk #endif
   1147         1.3        pk 
   1148        1.97      yamt 		/*
   1149        1.97      yamt 		 * Remove from item list.
   1150        1.97      yamt 		 */
   1151   1.101.2.1      yamt 		LIST_REMOVE(pi, pi_list);
   1152        1.97      yamt 	}
   1153        1.20   thorpej 	pp->pr_nitems--;
   1154        1.20   thorpej 	pp->pr_nout++;
   1155         1.6   thorpej 	if (ph->ph_nmissing == 0) {
   1156         1.6   thorpej #ifdef DIAGNOSTIC
   1157        1.34   thorpej 		if (__predict_false(pp->pr_nidle == 0))
   1158         1.6   thorpej 			panic("pool_get: nidle inconsistent");
   1159         1.6   thorpej #endif
   1160         1.6   thorpej 		pp->pr_nidle--;
   1161        1.88       chs 
   1162        1.88       chs 		/*
   1163        1.88       chs 		 * This page was previously empty.  Move it to the list of
   1164        1.88       chs 		 * partially-full pages.  This page is already curpage.
   1165        1.88       chs 		 */
   1166        1.88       chs 		LIST_REMOVE(ph, ph_pagelist);
   1167        1.88       chs 		LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
   1168         1.6   thorpej 	}
   1169         1.3        pk 	ph->ph_nmissing++;
   1170        1.97      yamt 	if (ph->ph_nmissing == pp->pr_itemsperpage) {
   1171        1.21   thorpej #ifdef DIAGNOSTIC
   1172        1.97      yamt 		if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 &&
   1173   1.101.2.1      yamt 		    !LIST_EMPTY(&ph->ph_itemlist))) {
   1174        1.25   thorpej 			pr_leave(pp);
   1175   1.101.2.6      yamt 			mutex_exit(&pp->pr_lock);
   1176        1.21   thorpej 			panic("pool_get: %s: nmissing inconsistent",
   1177        1.21   thorpej 			    pp->pr_wchan);
   1178        1.21   thorpej 		}
   1179        1.21   thorpej #endif
   1180         1.3        pk 		/*
   1181        1.88       chs 		 * This page is now full.  Move it to the full list
   1182        1.88       chs 		 * and select a new current page.
   1183         1.3        pk 		 */
   1184        1.88       chs 		LIST_REMOVE(ph, ph_pagelist);
   1185        1.88       chs 		LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
   1186        1.88       chs 		pool_update_curpage(pp);
   1187         1.1        pk 	}
   1188         1.3        pk 
   1189         1.3        pk 	pp->pr_nget++;
   1190   1.101.2.1      yamt 	pr_leave(pp);
   1191        1.20   thorpej 
   1192        1.20   thorpej 	/*
   1193        1.20   thorpej 	 * If we have a low water mark and we are now below that low
   1194        1.20   thorpej 	 * water mark, add more items to the pool.
   1195        1.20   thorpej 	 */
   1196        1.53   thorpej 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
   1197        1.20   thorpej 		/*
   1198        1.20   thorpej 		 * XXX: Should we log a warning?  Should we set up a timeout
   1199        1.20   thorpej 		 * to try again in a second or so?  The latter could break
   1200        1.20   thorpej 		 * a caller's assumptions about interrupt protection, etc.
   1201        1.20   thorpej 		 */
   1202        1.20   thorpej 	}
   1203        1.20   thorpej 
   1204   1.101.2.6      yamt 	mutex_exit(&pp->pr_lock);
   1205   1.101.2.3      yamt 	KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0);
   1206   1.101.2.3      yamt 	FREECHECK_OUT(&pp->pr_freecheck, v);
   1207         1.1        pk 	return (v);
   1208         1.1        pk }
   1209         1.1        pk 
   1210         1.1        pk /*
   1211        1.43   thorpej  * Internal version of pool_put().  Pool is already locked/entered.
   1212         1.1        pk  */
   1213        1.43   thorpej static void
   1214       1.101   thorpej pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
   1215         1.1        pk {
   1216         1.1        pk 	struct pool_item *pi = v;
   1217         1.3        pk 	struct pool_item_header *ph;
   1218         1.3        pk 
   1219   1.101.2.6      yamt 	KASSERT(mutex_owned(&pp->pr_lock));
   1220   1.101.2.3      yamt 	FREECHECK_IN(&pp->pr_freecheck, v);
   1221   1.101.2.6      yamt 	LOCKDEBUG_MEM_CHECK(v, pp->pr_size);
   1222        1.61       chs 
   1223        1.30   thorpej #ifdef DIAGNOSTIC
   1224        1.34   thorpej 	if (__predict_false(pp->pr_nout == 0)) {
   1225        1.30   thorpej 		printf("pool %s: putting with none out\n",
   1226        1.30   thorpej 		    pp->pr_wchan);
   1227        1.30   thorpej 		panic("pool_put");
   1228        1.30   thorpej 	}
   1229        1.30   thorpej #endif
   1230         1.3        pk 
   1231   1.101.2.2      yamt 	if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) {
   1232        1.25   thorpej 		pr_printlog(pp, NULL, printf);
   1233         1.3        pk 		panic("pool_put: %s: page header missing", pp->pr_wchan);
   1234         1.3        pk 	}
   1235        1.28   thorpej 
   1236         1.3        pk 	/*
   1237         1.3        pk 	 * Return to item list.
   1238         1.3        pk 	 */
   1239        1.97      yamt 	if (pp->pr_roflags & PR_NOTOUCH) {
   1240        1.97      yamt 		pr_item_notouch_put(pp, ph, v);
   1241        1.97      yamt 	} else {
   1242         1.2        pk #ifdef DIAGNOSTIC
   1243        1.97      yamt 		pi->pi_magic = PI_MAGIC;
   1244         1.3        pk #endif
   1245        1.32       chs #ifdef DEBUG
   1246        1.97      yamt 		{
   1247        1.97      yamt 			int i, *ip = v;
   1248        1.32       chs 
   1249        1.97      yamt 			for (i = 0; i < pp->pr_size / sizeof(int); i++) {
   1250        1.97      yamt 				*ip++ = PI_MAGIC;
   1251        1.97      yamt 			}
   1252        1.32       chs 		}
   1253        1.32       chs #endif
   1254        1.32       chs 
   1255   1.101.2.1      yamt 		LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
   1256        1.97      yamt 	}
   1257        1.79   thorpej 	KDASSERT(ph->ph_nmissing != 0);
   1258         1.3        pk 	ph->ph_nmissing--;
   1259         1.3        pk 	pp->pr_nput++;
   1260        1.20   thorpej 	pp->pr_nitems++;
   1261        1.20   thorpej 	pp->pr_nout--;
   1262         1.3        pk 
   1263         1.3        pk 	/* Cancel "pool empty" condition if it exists */
   1264         1.3        pk 	if (pp->pr_curpage == NULL)
   1265         1.3        pk 		pp->pr_curpage = ph;
   1266         1.3        pk 
   1267         1.3        pk 	if (pp->pr_flags & PR_WANTED) {
   1268         1.3        pk 		pp->pr_flags &= ~PR_WANTED;
   1269        1.15        pk 		if (ph->ph_nmissing == 0)
   1270        1.15        pk 			pp->pr_nidle++;
   1271   1.101.2.6      yamt 		cv_broadcast(&pp->pr_cv);
   1272         1.3        pk 		return;
   1273         1.3        pk 	}
   1274         1.3        pk 
   1275         1.3        pk 	/*
   1276        1.88       chs 	 * If this page is now empty, do one of two things:
   1277        1.21   thorpej 	 *
   1278        1.88       chs 	 *	(1) If we have more pages than the page high water mark,
   1279        1.96   thorpej 	 *	    free the page back to the system.  ONLY CONSIDER
   1280        1.90   thorpej 	 *	    FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
   1281        1.90   thorpej 	 *	    CLAIM.
   1282        1.21   thorpej 	 *
   1283        1.88       chs 	 *	(2) Otherwise, move the page to the empty page list.
   1284        1.88       chs 	 *
   1285        1.88       chs 	 * Either way, select a new current page (so we use a partially-full
   1286        1.88       chs 	 * page if one is available).
   1287         1.3        pk 	 */
   1288         1.3        pk 	if (ph->ph_nmissing == 0) {
   1289         1.6   thorpej 		pp->pr_nidle++;
   1290        1.90   thorpej 		if (pp->pr_npages > pp->pr_minpages &&
   1291        1.90   thorpej 		    (pp->pr_npages > pp->pr_maxpages ||
   1292   1.101.2.1      yamt 		     pa_starved_p(pp->pr_alloc))) {
   1293       1.101   thorpej 			pr_rmpage(pp, ph, pq);
   1294         1.3        pk 		} else {
   1295        1.88       chs 			LIST_REMOVE(ph, ph_pagelist);
   1296        1.88       chs 			LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
   1297         1.3        pk 
   1298        1.21   thorpej 			/*
   1299        1.21   thorpej 			 * Update the timestamp on the page.  A page must
   1300        1.21   thorpej 			 * be idle for some period of time before it can
   1301        1.21   thorpej 			 * be reclaimed by the pagedaemon.  This minimizes
   1302        1.21   thorpej 			 * ping-pong'ing for memory.
   1303  1.101.2.11      yamt 			 *
   1304  1.101.2.11      yamt 			 * note for 64-bit time_t: truncating to 32-bit is not
   1305  1.101.2.11      yamt 			 * a problem for our usage.
   1306        1.21   thorpej 			 */
   1307  1.101.2.11      yamt 			ph->ph_time = time_uptime;
   1308         1.1        pk 		}
   1309        1.88       chs 		pool_update_curpage(pp);
   1310         1.1        pk 	}
   1311        1.88       chs 
   1312        1.21   thorpej 	/*
   1313        1.88       chs 	 * If the page was previously completely full, move it to the
   1314        1.88       chs 	 * partially-full list and make it the current page.  The next
   1315        1.88       chs 	 * allocation will get the item from this page, instead of
   1316        1.88       chs 	 * further fragmenting the pool.
   1317        1.21   thorpej 	 */
   1318        1.21   thorpej 	else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
   1319        1.88       chs 		LIST_REMOVE(ph, ph_pagelist);
   1320        1.88       chs 		LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
   1321        1.21   thorpej 		pp->pr_curpage = ph;
   1322        1.21   thorpej 	}
   1323        1.43   thorpej }
   1324        1.43   thorpej 
   1325        1.43   thorpej /*
   1326   1.101.2.6      yamt  * Return resource to the pool.
   1327        1.43   thorpej  */
   1328        1.59   thorpej #ifdef POOL_DIAGNOSTIC
   1329        1.43   thorpej void
   1330        1.43   thorpej _pool_put(struct pool *pp, void *v, const char *file, long line)
   1331        1.43   thorpej {
   1332       1.101   thorpej 	struct pool_pagelist pq;
   1333       1.101   thorpej 
   1334       1.101   thorpej 	LIST_INIT(&pq);
   1335        1.43   thorpej 
   1336   1.101.2.6      yamt 	mutex_enter(&pp->pr_lock);
   1337        1.43   thorpej 	pr_enter(pp, file, line);
   1338        1.43   thorpej 
   1339        1.56  sommerfe 	pr_log(pp, v, PRLOG_PUT, file, line);
   1340        1.56  sommerfe 
   1341       1.101   thorpej 	pool_do_put(pp, v, &pq);
   1342        1.21   thorpej 
   1343        1.25   thorpej 	pr_leave(pp);
   1344   1.101.2.6      yamt 	mutex_exit(&pp->pr_lock);
   1345       1.101   thorpej 
   1346   1.101.2.1      yamt 	pr_pagelist_free(pp, &pq);
   1347         1.1        pk }
   1348        1.57  sommerfe #undef pool_put
   1349        1.59   thorpej #endif /* POOL_DIAGNOSTIC */
   1350         1.1        pk 
   1351        1.56  sommerfe void
   1352        1.56  sommerfe pool_put(struct pool *pp, void *v)
   1353        1.56  sommerfe {
   1354       1.101   thorpej 	struct pool_pagelist pq;
   1355       1.101   thorpej 
   1356       1.101   thorpej 	LIST_INIT(&pq);
   1357        1.56  sommerfe 
   1358   1.101.2.6      yamt 	mutex_enter(&pp->pr_lock);
   1359       1.101   thorpej 	pool_do_put(pp, v, &pq);
   1360   1.101.2.6      yamt 	mutex_exit(&pp->pr_lock);
   1361        1.56  sommerfe 
   1362   1.101.2.1      yamt 	pr_pagelist_free(pp, &pq);
   1363        1.56  sommerfe }
   1364        1.57  sommerfe 
   1365        1.59   thorpej #ifdef POOL_DIAGNOSTIC
   1366        1.57  sommerfe #define		pool_put(h, v)	_pool_put((h), (v), __FILE__, __LINE__)
   1367        1.56  sommerfe #endif
   1368        1.74   thorpej 
   1369        1.74   thorpej /*
   1370   1.101.2.1      yamt  * pool_grow: grow a pool by a page.
   1371   1.101.2.1      yamt  *
   1372   1.101.2.1      yamt  * => called with pool locked.
   1373   1.101.2.1      yamt  * => unlock and relock the pool.
   1374   1.101.2.1      yamt  * => return with pool locked.
   1375   1.101.2.1      yamt  */
   1376   1.101.2.1      yamt 
   1377   1.101.2.1      yamt static int
   1378   1.101.2.1      yamt pool_grow(struct pool *pp, int flags)
   1379   1.101.2.1      yamt {
   1380   1.101.2.1      yamt 	struct pool_item_header *ph = NULL;
   1381   1.101.2.1      yamt 	char *cp;
   1382   1.101.2.1      yamt 
   1383   1.101.2.6      yamt 	mutex_exit(&pp->pr_lock);
   1384   1.101.2.1      yamt 	cp = pool_allocator_alloc(pp, flags);
   1385   1.101.2.1      yamt 	if (__predict_true(cp != NULL)) {
   1386   1.101.2.1      yamt 		ph = pool_alloc_item_header(pp, cp, flags);
   1387   1.101.2.1      yamt 	}
   1388   1.101.2.1      yamt 	if (__predict_false(cp == NULL || ph == NULL)) {
   1389   1.101.2.1      yamt 		if (cp != NULL) {
   1390   1.101.2.1      yamt 			pool_allocator_free(pp, cp);
   1391   1.101.2.1      yamt 		}
   1392   1.101.2.6      yamt 		mutex_enter(&pp->pr_lock);
   1393   1.101.2.1      yamt 		return ENOMEM;
   1394   1.101.2.1      yamt 	}
   1395   1.101.2.1      yamt 
   1396   1.101.2.6      yamt 	mutex_enter(&pp->pr_lock);
   1397   1.101.2.1      yamt 	pool_prime_page(pp, cp, ph);
   1398   1.101.2.1      yamt 	pp->pr_npagealloc++;
   1399   1.101.2.1      yamt 	return 0;
   1400   1.101.2.1      yamt }
   1401   1.101.2.1      yamt 
   1402   1.101.2.1      yamt /*
   1403        1.74   thorpej  * Add N items to the pool.
   1404        1.74   thorpej  */
   1405        1.74   thorpej int
   1406        1.74   thorpej pool_prime(struct pool *pp, int n)
   1407        1.74   thorpej {
   1408        1.75    simonb 	int newpages;
   1409   1.101.2.1      yamt 	int error = 0;
   1410        1.74   thorpej 
   1411   1.101.2.6      yamt 	mutex_enter(&pp->pr_lock);
   1412        1.74   thorpej 
   1413        1.74   thorpej 	newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1414        1.74   thorpej 
   1415        1.74   thorpej 	while (newpages-- > 0) {
   1416   1.101.2.1      yamt 		error = pool_grow(pp, PR_NOWAIT);
   1417   1.101.2.1      yamt 		if (error) {
   1418        1.74   thorpej 			break;
   1419        1.74   thorpej 		}
   1420        1.74   thorpej 		pp->pr_minpages++;
   1421        1.74   thorpej 	}
   1422        1.74   thorpej 
   1423        1.74   thorpej 	if (pp->pr_minpages >= pp->pr_maxpages)
   1424        1.74   thorpej 		pp->pr_maxpages = pp->pr_minpages + 1;	/* XXX */
   1425        1.74   thorpej 
   1426   1.101.2.6      yamt 	mutex_exit(&pp->pr_lock);
   1427   1.101.2.1      yamt 	return error;
   1428        1.74   thorpej }
   1429        1.55   thorpej 
   1430        1.55   thorpej /*
   1431         1.3        pk  * Add a page worth of items to the pool.
   1432        1.21   thorpej  *
   1433        1.21   thorpej  * Note, we must be called with the pool descriptor LOCKED.
   1434         1.3        pk  */
   1435        1.55   thorpej static void
   1436   1.101.2.4      yamt pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph)
   1437         1.3        pk {
   1438         1.3        pk 	struct pool_item *pi;
   1439   1.101.2.4      yamt 	void *cp = storage;
   1440   1.101.2.3      yamt 	const unsigned int align = pp->pr_align;
   1441   1.101.2.3      yamt 	const unsigned int ioff = pp->pr_itemoffset;
   1442        1.55   thorpej 	int n;
   1443        1.36        pk 
   1444   1.101.2.6      yamt 	KASSERT(mutex_owned(&pp->pr_lock));
   1445        1.91      yamt 
   1446        1.66   thorpej #ifdef DIAGNOSTIC
   1447   1.101.2.2      yamt 	if ((pp->pr_roflags & PR_NOALIGN) == 0 &&
   1448  1.101.2.10      yamt 	    ((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
   1449        1.36        pk 		panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
   1450        1.66   thorpej #endif
   1451         1.3        pk 
   1452         1.3        pk 	/*
   1453         1.3        pk 	 * Insert page header.
   1454         1.3        pk 	 */
   1455        1.88       chs 	LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
   1456   1.101.2.1      yamt 	LIST_INIT(&ph->ph_itemlist);
   1457         1.3        pk 	ph->ph_page = storage;
   1458         1.3        pk 	ph->ph_nmissing = 0;
   1459  1.101.2.11      yamt 	ph->ph_time = time_uptime;
   1460        1.88       chs 	if ((pp->pr_roflags & PR_PHINPAGE) == 0)
   1461        1.88       chs 		SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
   1462         1.3        pk 
   1463         1.6   thorpej 	pp->pr_nidle++;
   1464         1.6   thorpej 
   1465         1.3        pk 	/*
   1466         1.3        pk 	 * Color this page.
   1467         1.3        pk 	 */
   1468   1.101.2.8      yamt 	ph->ph_off = pp->pr_curcolor;
   1469   1.101.2.8      yamt 	cp = (char *)cp + ph->ph_off;
   1470         1.3        pk 	if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
   1471         1.3        pk 		pp->pr_curcolor = 0;
   1472         1.3        pk 
   1473         1.3        pk 	/*
   1474         1.3        pk 	 * Adjust storage to apply aligment to `pr_itemoffset' in each item.
   1475         1.3        pk 	 */
   1476         1.3        pk 	if (ioff != 0)
   1477   1.101.2.4      yamt 		cp = (char *)cp + align - ioff;
   1478         1.3        pk 
   1479   1.101.2.3      yamt 	KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
   1480   1.101.2.3      yamt 
   1481         1.3        pk 	/*
   1482         1.3        pk 	 * Insert remaining chunks on the bucket list.
   1483         1.3        pk 	 */
   1484         1.3        pk 	n = pp->pr_itemsperpage;
   1485        1.20   thorpej 	pp->pr_nitems += n;
   1486         1.3        pk 
   1487        1.97      yamt 	if (pp->pr_roflags & PR_NOTOUCH) {
   1488   1.101.2.6      yamt 		pr_item_notouch_init(pp, ph);
   1489        1.97      yamt 	} else {
   1490        1.97      yamt 		while (n--) {
   1491        1.97      yamt 			pi = (struct pool_item *)cp;
   1492        1.78   thorpej 
   1493        1.97      yamt 			KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
   1494         1.3        pk 
   1495        1.97      yamt 			/* Insert on page list */
   1496   1.101.2.1      yamt 			LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
   1497         1.3        pk #ifdef DIAGNOSTIC
   1498        1.97      yamt 			pi->pi_magic = PI_MAGIC;
   1499         1.3        pk #endif
   1500   1.101.2.4      yamt 			cp = (char *)cp + pp->pr_size;
   1501   1.101.2.3      yamt 
   1502   1.101.2.3      yamt 			KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
   1503        1.97      yamt 		}
   1504         1.3        pk 	}
   1505         1.3        pk 
   1506         1.3        pk 	/*
   1507         1.3        pk 	 * If the pool was depleted, point at the new page.
   1508         1.3        pk 	 */
   1509         1.3        pk 	if (pp->pr_curpage == NULL)
   1510         1.3        pk 		pp->pr_curpage = ph;
   1511         1.3        pk 
   1512         1.3        pk 	if (++pp->pr_npages > pp->pr_hiwat)
   1513         1.3        pk 		pp->pr_hiwat = pp->pr_npages;
   1514         1.3        pk }
   1515         1.3        pk 
   1516        1.20   thorpej /*
   1517        1.52   thorpej  * Used by pool_get() when nitems drops below the low water mark.  This
   1518        1.88       chs  * is used to catch up pr_nitems with the low water mark.
   1519        1.20   thorpej  *
   1520        1.21   thorpej  * Note 1, we never wait for memory here, we let the caller decide what to do.
   1521        1.20   thorpej  *
   1522        1.73   thorpej  * Note 2, we must be called with the pool already locked, and we return
   1523        1.20   thorpej  * with it locked.
   1524        1.20   thorpej  */
   1525        1.20   thorpej static int
   1526        1.42   thorpej pool_catchup(struct pool *pp)
   1527        1.20   thorpej {
   1528        1.20   thorpej 	int error = 0;
   1529        1.20   thorpej 
   1530        1.54   thorpej 	while (POOL_NEEDS_CATCHUP(pp)) {
   1531   1.101.2.1      yamt 		error = pool_grow(pp, PR_NOWAIT);
   1532   1.101.2.1      yamt 		if (error) {
   1533        1.20   thorpej 			break;
   1534        1.20   thorpej 		}
   1535        1.20   thorpej 	}
   1536   1.101.2.1      yamt 	return error;
   1537        1.20   thorpej }
   1538        1.20   thorpej 
   1539        1.88       chs static void
   1540        1.88       chs pool_update_curpage(struct pool *pp)
   1541        1.88       chs {
   1542        1.88       chs 
   1543        1.88       chs 	pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
   1544        1.88       chs 	if (pp->pr_curpage == NULL) {
   1545        1.88       chs 		pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
   1546        1.88       chs 	}
   1547        1.88       chs }
   1548        1.88       chs 
   1549         1.3        pk void
   1550        1.42   thorpej pool_setlowat(struct pool *pp, int n)
   1551         1.3        pk {
   1552        1.15        pk 
   1553   1.101.2.6      yamt 	mutex_enter(&pp->pr_lock);
   1554        1.21   thorpej 
   1555         1.3        pk 	pp->pr_minitems = n;
   1556        1.15        pk 	pp->pr_minpages = (n == 0)
   1557        1.15        pk 		? 0
   1558        1.18   thorpej 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1559        1.20   thorpej 
   1560        1.20   thorpej 	/* Make sure we're caught up with the newly-set low water mark. */
   1561        1.75    simonb 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
   1562        1.20   thorpej 		/*
   1563        1.20   thorpej 		 * XXX: Should we log a warning?  Should we set up a timeout
   1564        1.20   thorpej 		 * to try again in a second or so?  The latter could break
   1565        1.20   thorpej 		 * a caller's assumptions about interrupt protection, etc.
   1566        1.20   thorpej 		 */
   1567        1.20   thorpej 	}
   1568        1.21   thorpej 
   1569   1.101.2.6      yamt 	mutex_exit(&pp->pr_lock);
   1570         1.3        pk }
   1571         1.3        pk 
   1572         1.3        pk void
   1573        1.42   thorpej pool_sethiwat(struct pool *pp, int n)
   1574         1.3        pk {
   1575        1.15        pk 
   1576   1.101.2.6      yamt 	mutex_enter(&pp->pr_lock);
   1577        1.21   thorpej 
   1578        1.15        pk 	pp->pr_maxpages = (n == 0)
   1579        1.15        pk 		? 0
   1580        1.18   thorpej 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1581        1.21   thorpej 
   1582   1.101.2.6      yamt 	mutex_exit(&pp->pr_lock);
   1583         1.3        pk }
   1584         1.3        pk 
   1585        1.20   thorpej void
   1586        1.42   thorpej pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
   1587        1.20   thorpej {
   1588        1.20   thorpej 
   1589   1.101.2.6      yamt 	mutex_enter(&pp->pr_lock);
   1590        1.20   thorpej 
   1591        1.20   thorpej 	pp->pr_hardlimit = n;
   1592        1.20   thorpej 	pp->pr_hardlimit_warning = warnmess;
   1593        1.31   thorpej 	pp->pr_hardlimit_ratecap.tv_sec = ratecap;
   1594        1.31   thorpej 	pp->pr_hardlimit_warning_last.tv_sec = 0;
   1595        1.31   thorpej 	pp->pr_hardlimit_warning_last.tv_usec = 0;
   1596        1.20   thorpej 
   1597        1.20   thorpej 	/*
   1598        1.21   thorpej 	 * In-line version of pool_sethiwat(), because we don't want to
   1599        1.21   thorpej 	 * release the lock.
   1600        1.20   thorpej 	 */
   1601        1.20   thorpej 	pp->pr_maxpages = (n == 0)
   1602        1.20   thorpej 		? 0
   1603        1.20   thorpej 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1604        1.21   thorpej 
   1605   1.101.2.6      yamt 	mutex_exit(&pp->pr_lock);
   1606        1.20   thorpej }
   1607         1.3        pk 
   1608         1.3        pk /*
   1609         1.3        pk  * Release all complete pages that have not been used recently.
   1610         1.3        pk  */
   1611        1.66   thorpej int
   1612        1.59   thorpej #ifdef POOL_DIAGNOSTIC
   1613        1.42   thorpej _pool_reclaim(struct pool *pp, const char *file, long line)
   1614        1.56  sommerfe #else
   1615        1.56  sommerfe pool_reclaim(struct pool *pp)
   1616        1.56  sommerfe #endif
   1617         1.3        pk {
   1618         1.3        pk 	struct pool_item_header *ph, *phnext;
   1619        1.61       chs 	struct pool_pagelist pq;
   1620  1.101.2.11      yamt 	uint32_t curtime;
   1621   1.101.2.6      yamt 	bool klock;
   1622   1.101.2.6      yamt 	int rv;
   1623         1.3        pk 
   1624        1.68   thorpej 	if (pp->pr_drain_hook != NULL) {
   1625        1.68   thorpej 		/*
   1626        1.68   thorpej 		 * The drain hook must be called with the pool unlocked.
   1627        1.68   thorpej 		 */
   1628        1.68   thorpej 		(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
   1629        1.68   thorpej 	}
   1630        1.68   thorpej 
   1631   1.101.2.6      yamt 	/*
   1632   1.101.2.6      yamt 	 * XXXSMP Because mutexes at IPL_SOFTXXX are still spinlocks,
   1633   1.101.2.6      yamt 	 * and we are called from the pagedaemon without kernel_lock.
   1634   1.101.2.6      yamt 	 * Does not apply to IPL_SOFTBIO.
   1635   1.101.2.6      yamt 	 */
   1636   1.101.2.6      yamt 	if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK ||
   1637   1.101.2.6      yamt 	    pp->pr_ipl == IPL_SOFTSERIAL) {
   1638   1.101.2.6      yamt 		KERNEL_LOCK(1, NULL);
   1639   1.101.2.6      yamt 		klock = true;
   1640   1.101.2.6      yamt 	} else
   1641   1.101.2.6      yamt 		klock = false;
   1642   1.101.2.6      yamt 
   1643   1.101.2.6      yamt 	/* Reclaim items from the pool's cache (if any). */
   1644   1.101.2.6      yamt 	if (pp->pr_cache != NULL)
   1645   1.101.2.6      yamt 		pool_cache_invalidate(pp->pr_cache);
   1646   1.101.2.6      yamt 
   1647   1.101.2.6      yamt 	if (mutex_tryenter(&pp->pr_lock) == 0) {
   1648   1.101.2.6      yamt 		if (klock) {
   1649   1.101.2.6      yamt 			KERNEL_UNLOCK_ONE(NULL);
   1650   1.101.2.6      yamt 		}
   1651        1.66   thorpej 		return (0);
   1652   1.101.2.6      yamt 	}
   1653        1.25   thorpej 	pr_enter(pp, file, line);
   1654        1.68   thorpej 
   1655        1.88       chs 	LIST_INIT(&pq);
   1656        1.43   thorpej 
   1657  1.101.2.11      yamt 	curtime = time_uptime;
   1658        1.21   thorpej 
   1659        1.88       chs 	for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
   1660        1.88       chs 		phnext = LIST_NEXT(ph, ph_pagelist);
   1661         1.3        pk 
   1662         1.3        pk 		/* Check our minimum page claim */
   1663         1.3        pk 		if (pp->pr_npages <= pp->pr_minpages)
   1664         1.3        pk 			break;
   1665         1.3        pk 
   1666        1.88       chs 		KASSERT(ph->ph_nmissing == 0);
   1667  1.101.2.11      yamt 		if (curtime - ph->ph_time < pool_inactive_time
   1668   1.101.2.1      yamt 		    && !pa_starved_p(pp->pr_alloc))
   1669        1.88       chs 			continue;
   1670        1.21   thorpej 
   1671        1.88       chs 		/*
   1672        1.88       chs 		 * If freeing this page would put us below
   1673        1.88       chs 		 * the low water mark, stop now.
   1674        1.88       chs 		 */
   1675        1.88       chs 		if ((pp->pr_nitems - pp->pr_itemsperpage) <
   1676        1.88       chs 		    pp->pr_minitems)
   1677        1.88       chs 			break;
   1678        1.21   thorpej 
   1679        1.88       chs 		pr_rmpage(pp, ph, &pq);
   1680         1.3        pk 	}
   1681         1.3        pk 
   1682        1.25   thorpej 	pr_leave(pp);
   1683   1.101.2.6      yamt 	mutex_exit(&pp->pr_lock);
   1684        1.66   thorpej 
   1685   1.101.2.6      yamt 	if (LIST_EMPTY(&pq))
   1686   1.101.2.6      yamt 		rv = 0;
   1687   1.101.2.6      yamt 	else {
   1688   1.101.2.6      yamt 		pr_pagelist_free(pp, &pq);
   1689   1.101.2.6      yamt 		rv = 1;
   1690   1.101.2.6      yamt 	}
   1691   1.101.2.6      yamt 
   1692   1.101.2.6      yamt 	if (klock) {
   1693   1.101.2.6      yamt 		KERNEL_UNLOCK_ONE(NULL);
   1694   1.101.2.6      yamt 	}
   1695   1.101.2.6      yamt 
   1696   1.101.2.6      yamt 	return (rv);
   1697         1.3        pk }
   1698         1.3        pk 
   1699         1.3        pk /*
   1700   1.101.2.6      yamt  * Drain pools, one at a time.  This is a two stage process;
   1701   1.101.2.6      yamt  * drain_start kicks off a cross call to drain CPU-level caches
   1702   1.101.2.6      yamt  * if the pool has an associated pool_cache.  drain_end waits
   1703   1.101.2.6      yamt  * for those cross calls to finish, and then drains the cache
   1704   1.101.2.6      yamt  * (if any) and pool.
   1705   1.101.2.4      yamt  *
   1706   1.101.2.6      yamt  * Note, must never be called from interrupt context.
   1707         1.3        pk  */
   1708         1.3        pk void
   1709   1.101.2.6      yamt pool_drain_start(struct pool **ppp, uint64_t *wp)
   1710         1.3        pk {
   1711         1.3        pk 	struct pool *pp;
   1712   1.101.2.6      yamt 
   1713   1.101.2.8      yamt 	KASSERT(!TAILQ_EMPTY(&pool_head));
   1714         1.3        pk 
   1715        1.61       chs 	pp = NULL;
   1716   1.101.2.6      yamt 
   1717   1.101.2.6      yamt 	/* Find next pool to drain, and add a reference. */
   1718   1.101.2.6      yamt 	mutex_enter(&pool_head_lock);
   1719   1.101.2.6      yamt 	do {
   1720   1.101.2.6      yamt 		if (drainpp == NULL) {
   1721   1.101.2.8      yamt 			drainpp = TAILQ_FIRST(&pool_head);
   1722   1.101.2.6      yamt 		}
   1723   1.101.2.6      yamt 		if (drainpp != NULL) {
   1724   1.101.2.6      yamt 			pp = drainpp;
   1725   1.101.2.8      yamt 			drainpp = TAILQ_NEXT(pp, pr_poollist);
   1726   1.101.2.6      yamt 		}
   1727   1.101.2.6      yamt 		/*
   1728   1.101.2.6      yamt 		 * Skip completely idle pools.  We depend on at least
   1729   1.101.2.6      yamt 		 * one pool in the system being active.
   1730   1.101.2.6      yamt 		 */
   1731   1.101.2.6      yamt 	} while (pp == NULL || pp->pr_npages == 0);
   1732   1.101.2.6      yamt 	pp->pr_refcnt++;
   1733   1.101.2.6      yamt 	mutex_exit(&pool_head_lock);
   1734   1.101.2.6      yamt 
   1735   1.101.2.6      yamt 	/* If there is a pool_cache, drain CPU level caches. */
   1736   1.101.2.6      yamt 	*ppp = pp;
   1737   1.101.2.6      yamt 	if (pp->pr_cache != NULL) {
   1738   1.101.2.6      yamt 		*wp = xc_broadcast(0, (xcfunc_t)pool_cache_xcall,
   1739   1.101.2.6      yamt 		    pp->pr_cache, NULL);
   1740   1.101.2.6      yamt 	}
   1741   1.101.2.6      yamt }
   1742   1.101.2.6      yamt 
   1743   1.101.2.6      yamt void
   1744   1.101.2.6      yamt pool_drain_end(struct pool *pp, uint64_t where)
   1745   1.101.2.6      yamt {
   1746   1.101.2.6      yamt 
   1747   1.101.2.6      yamt 	if (pp == NULL)
   1748   1.101.2.6      yamt 		return;
   1749   1.101.2.6      yamt 
   1750   1.101.2.6      yamt 	KASSERT(pp->pr_refcnt > 0);
   1751   1.101.2.6      yamt 
   1752   1.101.2.6      yamt 	/* Wait for remote draining to complete. */
   1753   1.101.2.6      yamt 	if (pp->pr_cache != NULL)
   1754   1.101.2.6      yamt 		xc_wait(where);
   1755   1.101.2.6      yamt 
   1756   1.101.2.6      yamt 	/* Drain the cache (if any) and pool.. */
   1757   1.101.2.6      yamt 	pool_reclaim(pp);
   1758   1.101.2.6      yamt 
   1759   1.101.2.6      yamt 	/* Finally, unlock the pool. */
   1760   1.101.2.6      yamt 	mutex_enter(&pool_head_lock);
   1761   1.101.2.6      yamt 	pp->pr_refcnt--;
   1762   1.101.2.6      yamt 	cv_broadcast(&pool_busy);
   1763   1.101.2.6      yamt 	mutex_exit(&pool_head_lock);
   1764         1.3        pk }
   1765         1.3        pk 
   1766         1.3        pk /*
   1767         1.3        pk  * Diagnostic helpers.
   1768         1.3        pk  */
   1769         1.3        pk void
   1770        1.42   thorpej pool_print(struct pool *pp, const char *modif)
   1771        1.21   thorpej {
   1772        1.21   thorpej 
   1773        1.25   thorpej 	pool_print1(pp, modif, printf);
   1774        1.21   thorpej }
   1775        1.21   thorpej 
   1776        1.25   thorpej void
   1777   1.101.2.1      yamt pool_printall(const char *modif, void (*pr)(const char *, ...))
   1778   1.101.2.1      yamt {
   1779   1.101.2.1      yamt 	struct pool *pp;
   1780   1.101.2.1      yamt 
   1781   1.101.2.8      yamt 	TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
   1782   1.101.2.1      yamt 		pool_printit(pp, modif, pr);
   1783   1.101.2.1      yamt 	}
   1784   1.101.2.1      yamt }
   1785   1.101.2.1      yamt 
   1786   1.101.2.1      yamt void
   1787        1.42   thorpej pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
   1788        1.25   thorpej {
   1789        1.25   thorpej 
   1790        1.25   thorpej 	if (pp == NULL) {
   1791        1.25   thorpej 		(*pr)("Must specify a pool to print.\n");
   1792        1.25   thorpej 		return;
   1793        1.25   thorpej 	}
   1794        1.25   thorpej 
   1795        1.25   thorpej 	pool_print1(pp, modif, pr);
   1796        1.25   thorpej }
   1797        1.25   thorpej 
   1798        1.21   thorpej static void
   1799        1.97      yamt pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
   1800        1.97      yamt     void (*pr)(const char *, ...))
   1801        1.88       chs {
   1802        1.88       chs 	struct pool_item_header *ph;
   1803        1.88       chs #ifdef DIAGNOSTIC
   1804        1.88       chs 	struct pool_item *pi;
   1805        1.88       chs #endif
   1806        1.88       chs 
   1807        1.88       chs 	LIST_FOREACH(ph, pl, ph_pagelist) {
   1808  1.101.2.11      yamt 		(*pr)("\t\tpage %p, nmissing %d, time %" PRIu32 "\n",
   1809  1.101.2.11      yamt 		    ph->ph_page, ph->ph_nmissing, ph->ph_time);
   1810        1.88       chs #ifdef DIAGNOSTIC
   1811        1.97      yamt 		if (!(pp->pr_roflags & PR_NOTOUCH)) {
   1812   1.101.2.1      yamt 			LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
   1813        1.97      yamt 				if (pi->pi_magic != PI_MAGIC) {
   1814        1.97      yamt 					(*pr)("\t\t\titem %p, magic 0x%x\n",
   1815        1.97      yamt 					    pi, pi->pi_magic);
   1816        1.97      yamt 				}
   1817        1.88       chs 			}
   1818        1.88       chs 		}
   1819        1.88       chs #endif
   1820        1.88       chs 	}
   1821        1.88       chs }
   1822        1.88       chs 
   1823        1.88       chs static void
   1824        1.42   thorpej pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
   1825         1.3        pk {
   1826        1.25   thorpej 	struct pool_item_header *ph;
   1827   1.101.2.6      yamt 	pool_cache_t pc;
   1828   1.101.2.6      yamt 	pcg_t *pcg;
   1829   1.101.2.6      yamt 	pool_cache_cpu_t *cc;
   1830   1.101.2.6      yamt 	uint64_t cpuhit, cpumiss;
   1831        1.44   thorpej 	int i, print_log = 0, print_pagelist = 0, print_cache = 0;
   1832        1.25   thorpej 	char c;
   1833        1.25   thorpej 
   1834        1.25   thorpej 	while ((c = *modif++) != '\0') {
   1835        1.25   thorpej 		if (c == 'l')
   1836        1.25   thorpej 			print_log = 1;
   1837        1.25   thorpej 		if (c == 'p')
   1838        1.25   thorpej 			print_pagelist = 1;
   1839        1.44   thorpej 		if (c == 'c')
   1840        1.44   thorpej 			print_cache = 1;
   1841        1.25   thorpej 	}
   1842        1.25   thorpej 
   1843   1.101.2.6      yamt 	if ((pc = pp->pr_cache) != NULL) {
   1844   1.101.2.6      yamt 		(*pr)("POOL CACHE");
   1845   1.101.2.6      yamt 	} else {
   1846   1.101.2.6      yamt 		(*pr)("POOL");
   1847   1.101.2.6      yamt 	}
   1848   1.101.2.6      yamt 
   1849   1.101.2.6      yamt 	(*pr)(" %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
   1850        1.25   thorpej 	    pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
   1851        1.25   thorpej 	    pp->pr_roflags);
   1852        1.66   thorpej 	(*pr)("\talloc %p\n", pp->pr_alloc);
   1853        1.25   thorpej 	(*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
   1854        1.25   thorpej 	    pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
   1855        1.25   thorpej 	(*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
   1856        1.25   thorpej 	    pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
   1857        1.25   thorpej 
   1858   1.101.2.6      yamt 	(*pr)("\tnget %lu, nfail %lu, nput %lu\n",
   1859        1.25   thorpej 	    pp->pr_nget, pp->pr_nfail, pp->pr_nput);
   1860        1.25   thorpej 	(*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
   1861        1.25   thorpej 	    pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
   1862        1.25   thorpej 
   1863        1.25   thorpej 	if (print_pagelist == 0)
   1864        1.25   thorpej 		goto skip_pagelist;
   1865        1.25   thorpej 
   1866        1.88       chs 	if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
   1867        1.88       chs 		(*pr)("\n\tempty page list:\n");
   1868        1.97      yamt 	pool_print_pagelist(pp, &pp->pr_emptypages, pr);
   1869        1.88       chs 	if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
   1870        1.88       chs 		(*pr)("\n\tfull page list:\n");
   1871        1.97      yamt 	pool_print_pagelist(pp, &pp->pr_fullpages, pr);
   1872        1.88       chs 	if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
   1873        1.88       chs 		(*pr)("\n\tpartial-page list:\n");
   1874        1.97      yamt 	pool_print_pagelist(pp, &pp->pr_partpages, pr);
   1875        1.88       chs 
   1876        1.25   thorpej 	if (pp->pr_curpage == NULL)
   1877        1.25   thorpej 		(*pr)("\tno current page\n");
   1878        1.25   thorpej 	else
   1879        1.25   thorpej 		(*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
   1880        1.25   thorpej 
   1881        1.25   thorpej  skip_pagelist:
   1882        1.25   thorpej 	if (print_log == 0)
   1883        1.25   thorpej 		goto skip_log;
   1884        1.25   thorpej 
   1885        1.25   thorpej 	(*pr)("\n");
   1886        1.25   thorpej 	if ((pp->pr_roflags & PR_LOGGING) == 0)
   1887        1.25   thorpej 		(*pr)("\tno log\n");
   1888   1.101.2.2      yamt 	else {
   1889        1.25   thorpej 		pr_printlog(pp, NULL, pr);
   1890   1.101.2.2      yamt 	}
   1891         1.3        pk 
   1892        1.25   thorpej  skip_log:
   1893        1.44   thorpej 
   1894   1.101.2.1      yamt #define PR_GROUPLIST(pcg)						\
   1895   1.101.2.1      yamt 	(*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);		\
   1896   1.101.2.8      yamt 	for (i = 0; i < pcg->pcg_size; i++) {				\
   1897   1.101.2.1      yamt 		if (pcg->pcg_objects[i].pcgo_pa !=			\
   1898   1.101.2.1      yamt 		    POOL_PADDR_INVALID) {				\
   1899   1.101.2.1      yamt 			(*pr)("\t\t\t%p, 0x%llx\n",			\
   1900   1.101.2.1      yamt 			    pcg->pcg_objects[i].pcgo_va,		\
   1901   1.101.2.1      yamt 			    (unsigned long long)			\
   1902   1.101.2.1      yamt 			    pcg->pcg_objects[i].pcgo_pa);		\
   1903   1.101.2.1      yamt 		} else {						\
   1904   1.101.2.1      yamt 			(*pr)("\t\t\t%p\n",				\
   1905   1.101.2.1      yamt 			    pcg->pcg_objects[i].pcgo_va);		\
   1906   1.101.2.1      yamt 		}							\
   1907   1.101.2.1      yamt 	}
   1908   1.101.2.1      yamt 
   1909   1.101.2.6      yamt 	if (pc != NULL) {
   1910   1.101.2.6      yamt 		cpuhit = 0;
   1911   1.101.2.6      yamt 		cpumiss = 0;
   1912   1.101.2.6      yamt 		for (i = 0; i < MAXCPUS; i++) {
   1913   1.101.2.6      yamt 			if ((cc = pc->pc_cpus[i]) == NULL)
   1914   1.101.2.6      yamt 				continue;
   1915   1.101.2.6      yamt 			cpuhit += cc->cc_hits;
   1916   1.101.2.6      yamt 			cpumiss += cc->cc_misses;
   1917   1.101.2.6      yamt 		}
   1918   1.101.2.6      yamt 		(*pr)("\tcpu layer hits %llu misses %llu\n", cpuhit, cpumiss);
   1919   1.101.2.6      yamt 		(*pr)("\tcache layer hits %llu misses %llu\n",
   1920   1.101.2.6      yamt 		    pc->pc_hits, pc->pc_misses);
   1921   1.101.2.6      yamt 		(*pr)("\tcache layer entry uncontended %llu contended %llu\n",
   1922   1.101.2.6      yamt 		    pc->pc_hits + pc->pc_misses - pc->pc_contended,
   1923   1.101.2.6      yamt 		    pc->pc_contended);
   1924   1.101.2.6      yamt 		(*pr)("\tcache layer empty groups %u full groups %u\n",
   1925   1.101.2.6      yamt 		    pc->pc_nempty, pc->pc_nfull);
   1926   1.101.2.6      yamt 		if (print_cache) {
   1927   1.101.2.6      yamt 			(*pr)("\tfull cache groups:\n");
   1928   1.101.2.6      yamt 			for (pcg = pc->pc_fullgroups; pcg != NULL;
   1929   1.101.2.6      yamt 			    pcg = pcg->pcg_next) {
   1930   1.101.2.6      yamt 				PR_GROUPLIST(pcg);
   1931   1.101.2.6      yamt 			}
   1932   1.101.2.6      yamt 			(*pr)("\tempty cache groups:\n");
   1933   1.101.2.6      yamt 			for (pcg = pc->pc_emptygroups; pcg != NULL;
   1934   1.101.2.6      yamt 			    pcg = pcg->pcg_next) {
   1935   1.101.2.6      yamt 				PR_GROUPLIST(pcg);
   1936   1.101.2.6      yamt 			}
   1937        1.44   thorpej 		}
   1938        1.44   thorpej 	}
   1939   1.101.2.1      yamt #undef PR_GROUPLIST
   1940        1.44   thorpej 
   1941        1.88       chs 	pr_enter_check(pp, pr);
   1942        1.88       chs }
   1943        1.88       chs 
   1944        1.88       chs static int
   1945        1.88       chs pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
   1946        1.88       chs {
   1947        1.88       chs 	struct pool_item *pi;
   1948   1.101.2.4      yamt 	void *page;
   1949        1.88       chs 	int n;
   1950        1.88       chs 
   1951   1.101.2.2      yamt 	if ((pp->pr_roflags & PR_NOALIGN) == 0) {
   1952   1.101.2.4      yamt 		page = (void *)((uintptr_t)ph & pp->pr_alloc->pa_pagemask);
   1953   1.101.2.2      yamt 		if (page != ph->ph_page &&
   1954   1.101.2.2      yamt 		    (pp->pr_roflags & PR_PHINPAGE) != 0) {
   1955   1.101.2.2      yamt 			if (label != NULL)
   1956   1.101.2.2      yamt 				printf("%s: ", label);
   1957   1.101.2.2      yamt 			printf("pool(%p:%s): page inconsistency: page %p;"
   1958   1.101.2.2      yamt 			       " at page head addr %p (p %p)\n", pp,
   1959   1.101.2.2      yamt 				pp->pr_wchan, ph->ph_page,
   1960   1.101.2.2      yamt 				ph, page);
   1961   1.101.2.2      yamt 			return 1;
   1962   1.101.2.2      yamt 		}
   1963        1.88       chs 	}
   1964         1.3        pk 
   1965        1.97      yamt 	if ((pp->pr_roflags & PR_NOTOUCH) != 0)
   1966        1.97      yamt 		return 0;
   1967        1.97      yamt 
   1968   1.101.2.1      yamt 	for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0;
   1969        1.88       chs 	     pi != NULL;
   1970   1.101.2.1      yamt 	     pi = LIST_NEXT(pi,pi_list), n++) {
   1971        1.88       chs 
   1972        1.88       chs #ifdef DIAGNOSTIC
   1973        1.88       chs 		if (pi->pi_magic != PI_MAGIC) {
   1974        1.88       chs 			if (label != NULL)
   1975        1.88       chs 				printf("%s: ", label);
   1976        1.88       chs 			printf("pool(%s): free list modified: magic=%x;"
   1977   1.101.2.2      yamt 			       " page %p; item ordinal %d; addr %p\n",
   1978        1.88       chs 				pp->pr_wchan, pi->pi_magic, ph->ph_page,
   1979   1.101.2.2      yamt 				n, pi);
   1980        1.88       chs 			panic("pool");
   1981        1.88       chs 		}
   1982        1.88       chs #endif
   1983   1.101.2.2      yamt 		if ((pp->pr_roflags & PR_NOALIGN) != 0) {
   1984   1.101.2.2      yamt 			continue;
   1985   1.101.2.2      yamt 		}
   1986   1.101.2.4      yamt 		page = (void *)((uintptr_t)pi & pp->pr_alloc->pa_pagemask);
   1987        1.88       chs 		if (page == ph->ph_page)
   1988        1.88       chs 			continue;
   1989        1.88       chs 
   1990        1.88       chs 		if (label != NULL)
   1991        1.88       chs 			printf("%s: ", label);
   1992        1.88       chs 		printf("pool(%p:%s): page inconsistency: page %p;"
   1993        1.88       chs 		       " item ordinal %d; addr %p (p %p)\n", pp,
   1994        1.88       chs 			pp->pr_wchan, ph->ph_page,
   1995        1.88       chs 			n, pi, page);
   1996        1.88       chs 		return 1;
   1997        1.88       chs 	}
   1998        1.88       chs 	return 0;
   1999         1.3        pk }
   2000         1.3        pk 
   2001        1.88       chs 
   2002         1.3        pk int
   2003        1.42   thorpej pool_chk(struct pool *pp, const char *label)
   2004         1.3        pk {
   2005         1.3        pk 	struct pool_item_header *ph;
   2006         1.3        pk 	int r = 0;
   2007         1.3        pk 
   2008   1.101.2.6      yamt 	mutex_enter(&pp->pr_lock);
   2009        1.88       chs 	LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
   2010        1.88       chs 		r = pool_chk_page(pp, label, ph);
   2011        1.88       chs 		if (r) {
   2012        1.88       chs 			goto out;
   2013        1.88       chs 		}
   2014        1.88       chs 	}
   2015        1.88       chs 	LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
   2016        1.88       chs 		r = pool_chk_page(pp, label, ph);
   2017        1.88       chs 		if (r) {
   2018         1.3        pk 			goto out;
   2019         1.3        pk 		}
   2020        1.88       chs 	}
   2021        1.88       chs 	LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
   2022        1.88       chs 		r = pool_chk_page(pp, label, ph);
   2023        1.88       chs 		if (r) {
   2024         1.3        pk 			goto out;
   2025         1.3        pk 		}
   2026         1.3        pk 	}
   2027        1.88       chs 
   2028         1.3        pk out:
   2029   1.101.2.6      yamt 	mutex_exit(&pp->pr_lock);
   2030         1.3        pk 	return (r);
   2031        1.43   thorpej }
   2032        1.43   thorpej 
   2033        1.43   thorpej /*
   2034        1.43   thorpej  * pool_cache_init:
   2035        1.43   thorpej  *
   2036        1.43   thorpej  *	Initialize a pool cache.
   2037   1.101.2.6      yamt  */
   2038   1.101.2.6      yamt pool_cache_t
   2039   1.101.2.6      yamt pool_cache_init(size_t size, u_int align, u_int align_offset, u_int flags,
   2040   1.101.2.6      yamt     const char *wchan, struct pool_allocator *palloc, int ipl,
   2041   1.101.2.6      yamt     int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), void *arg)
   2042   1.101.2.6      yamt {
   2043   1.101.2.6      yamt 	pool_cache_t pc;
   2044   1.101.2.6      yamt 
   2045   1.101.2.6      yamt 	pc = pool_get(&cache_pool, PR_WAITOK);
   2046   1.101.2.6      yamt 	if (pc == NULL)
   2047   1.101.2.6      yamt 		return NULL;
   2048   1.101.2.6      yamt 
   2049   1.101.2.6      yamt 	pool_cache_bootstrap(pc, size, align, align_offset, flags, wchan,
   2050   1.101.2.6      yamt 	   palloc, ipl, ctor, dtor, arg);
   2051   1.101.2.6      yamt 
   2052   1.101.2.6      yamt 	return pc;
   2053   1.101.2.6      yamt }
   2054   1.101.2.6      yamt 
   2055   1.101.2.6      yamt /*
   2056   1.101.2.6      yamt  * pool_cache_bootstrap:
   2057        1.43   thorpej  *
   2058   1.101.2.6      yamt  *	Kernel-private version of pool_cache_init().  The caller
   2059   1.101.2.6      yamt  *	provides initial storage.
   2060        1.43   thorpej  */
   2061        1.43   thorpej void
   2062   1.101.2.6      yamt pool_cache_bootstrap(pool_cache_t pc, size_t size, u_int align,
   2063   1.101.2.6      yamt     u_int align_offset, u_int flags, const char *wchan,
   2064   1.101.2.6      yamt     struct pool_allocator *palloc, int ipl,
   2065   1.101.2.6      yamt     int (*ctor)(void *, void *, int), void (*dtor)(void *, void *),
   2066        1.43   thorpej     void *arg)
   2067        1.43   thorpej {
   2068   1.101.2.6      yamt 	CPU_INFO_ITERATOR cii;
   2069   1.101.2.8      yamt 	pool_cache_t pc1;
   2070   1.101.2.6      yamt 	struct cpu_info *ci;
   2071   1.101.2.6      yamt 	struct pool *pp;
   2072   1.101.2.6      yamt 
   2073   1.101.2.6      yamt 	pp = &pc->pc_pool;
   2074   1.101.2.6      yamt 	if (palloc == NULL && ipl == IPL_NONE)
   2075   1.101.2.6      yamt 		palloc = &pool_allocator_nointr;
   2076   1.101.2.6      yamt 	pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl);
   2077        1.43   thorpej 
   2078   1.101.2.7      yamt 	/*
   2079   1.101.2.7      yamt 	 * XXXAD hack to prevent IP input processing from blocking.
   2080   1.101.2.7      yamt 	 */
   2081   1.101.2.7      yamt 	if (ipl == IPL_SOFTNET) {
   2082   1.101.2.7      yamt 		mutex_init(&pc->pc_lock, MUTEX_DEFAULT, IPL_VM);
   2083   1.101.2.7      yamt 	} else {
   2084   1.101.2.7      yamt 		mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl);
   2085   1.101.2.7      yamt 	}
   2086        1.43   thorpej 
   2087   1.101.2.6      yamt 	if (ctor == NULL) {
   2088   1.101.2.6      yamt 		ctor = (int (*)(void *, void *, int))nullop;
   2089   1.101.2.6      yamt 	}
   2090   1.101.2.6      yamt 	if (dtor == NULL) {
   2091   1.101.2.6      yamt 		dtor = (void (*)(void *, void *))nullop;
   2092   1.101.2.6      yamt 	}
   2093        1.43   thorpej 
   2094   1.101.2.6      yamt 	pc->pc_emptygroups = NULL;
   2095   1.101.2.6      yamt 	pc->pc_fullgroups = NULL;
   2096   1.101.2.6      yamt 	pc->pc_partgroups = NULL;
   2097        1.43   thorpej 	pc->pc_ctor = ctor;
   2098        1.43   thorpej 	pc->pc_dtor = dtor;
   2099        1.43   thorpej 	pc->pc_arg  = arg;
   2100   1.101.2.6      yamt 	pc->pc_hits  = 0;
   2101        1.48   thorpej 	pc->pc_misses = 0;
   2102   1.101.2.6      yamt 	pc->pc_nempty = 0;
   2103   1.101.2.6      yamt 	pc->pc_npart = 0;
   2104   1.101.2.6      yamt 	pc->pc_nfull = 0;
   2105   1.101.2.6      yamt 	pc->pc_contended = 0;
   2106   1.101.2.6      yamt 	pc->pc_refcnt = 0;
   2107   1.101.2.6      yamt 	pc->pc_freecheck = NULL;
   2108   1.101.2.6      yamt 
   2109   1.101.2.8      yamt 	if ((flags & PR_LARGECACHE) != 0) {
   2110   1.101.2.8      yamt 		pc->pc_pcgsize = PCG_NOBJECTS_LARGE;
   2111   1.101.2.8      yamt 	} else {
   2112   1.101.2.8      yamt 		pc->pc_pcgsize = PCG_NOBJECTS_NORMAL;
   2113   1.101.2.8      yamt 	}
   2114   1.101.2.8      yamt 
   2115   1.101.2.6      yamt 	/* Allocate per-CPU caches. */
   2116   1.101.2.6      yamt 	memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus));
   2117   1.101.2.6      yamt 	pc->pc_ncpu = 0;
   2118   1.101.2.8      yamt 	if (ncpu < 2) {
   2119   1.101.2.7      yamt 		/* XXX For sparc: boot CPU is not attached yet. */
   2120   1.101.2.7      yamt 		pool_cache_cpu_init1(curcpu(), pc);
   2121   1.101.2.7      yamt 	} else {
   2122   1.101.2.7      yamt 		for (CPU_INFO_FOREACH(cii, ci)) {
   2123   1.101.2.7      yamt 			pool_cache_cpu_init1(ci, pc);
   2124   1.101.2.7      yamt 		}
   2125   1.101.2.6      yamt 	}
   2126   1.101.2.8      yamt 
   2127   1.101.2.8      yamt 	/* Add to list of all pools. */
   2128   1.101.2.8      yamt 	if (__predict_true(!cold))
   2129   1.101.2.6      yamt 		mutex_enter(&pool_head_lock);
   2130   1.101.2.8      yamt 	TAILQ_FOREACH(pc1, &pool_cache_head, pc_cachelist) {
   2131   1.101.2.8      yamt 		if (strcmp(pc1->pc_pool.pr_wchan, pc->pc_pool.pr_wchan) > 0)
   2132   1.101.2.8      yamt 			break;
   2133   1.101.2.6      yamt 	}
   2134   1.101.2.8      yamt 	if (pc1 == NULL)
   2135   1.101.2.8      yamt 		TAILQ_INSERT_TAIL(&pool_cache_head, pc, pc_cachelist);
   2136   1.101.2.8      yamt 	else
   2137   1.101.2.8      yamt 		TAILQ_INSERT_BEFORE(pc1, pc, pc_cachelist);
   2138   1.101.2.8      yamt 	if (__predict_true(!cold))
   2139   1.101.2.8      yamt 		mutex_exit(&pool_head_lock);
   2140   1.101.2.8      yamt 
   2141   1.101.2.8      yamt 	membar_sync();
   2142   1.101.2.8      yamt 	pp->pr_cache = pc;
   2143        1.43   thorpej }
   2144        1.43   thorpej 
   2145        1.43   thorpej /*
   2146        1.43   thorpej  * pool_cache_destroy:
   2147        1.43   thorpej  *
   2148        1.43   thorpej  *	Destroy a pool cache.
   2149        1.43   thorpej  */
   2150        1.43   thorpej void
   2151   1.101.2.6      yamt pool_cache_destroy(pool_cache_t pc)
   2152        1.43   thorpej {
   2153   1.101.2.6      yamt 	struct pool *pp = &pc->pc_pool;
   2154   1.101.2.6      yamt 	pool_cache_cpu_t *cc;
   2155   1.101.2.6      yamt 	pcg_t *pcg;
   2156   1.101.2.6      yamt 	int i;
   2157   1.101.2.6      yamt 
   2158   1.101.2.6      yamt 	/* Remove it from the global list. */
   2159   1.101.2.6      yamt 	mutex_enter(&pool_head_lock);
   2160   1.101.2.6      yamt 	while (pc->pc_refcnt != 0)
   2161   1.101.2.6      yamt 		cv_wait(&pool_busy, &pool_head_lock);
   2162   1.101.2.8      yamt 	TAILQ_REMOVE(&pool_cache_head, pc, pc_cachelist);
   2163   1.101.2.6      yamt 	mutex_exit(&pool_head_lock);
   2164        1.43   thorpej 
   2165        1.43   thorpej 	/* First, invalidate the entire cache. */
   2166        1.43   thorpej 	pool_cache_invalidate(pc);
   2167        1.43   thorpej 
   2168   1.101.2.6      yamt 	/* Disassociate it from the pool. */
   2169   1.101.2.6      yamt 	mutex_enter(&pp->pr_lock);
   2170   1.101.2.6      yamt 	pp->pr_cache = NULL;
   2171   1.101.2.6      yamt 	mutex_exit(&pp->pr_lock);
   2172   1.101.2.6      yamt 
   2173   1.101.2.6      yamt 	/* Destroy per-CPU data */
   2174   1.101.2.6      yamt 	for (i = 0; i < MAXCPUS; i++) {
   2175   1.101.2.6      yamt 		if ((cc = pc->pc_cpus[i]) == NULL)
   2176   1.101.2.6      yamt 			continue;
   2177   1.101.2.6      yamt 		if ((pcg = cc->cc_current) != NULL) {
   2178   1.101.2.6      yamt 			pcg->pcg_next = NULL;
   2179   1.101.2.6      yamt 			pool_cache_invalidate_groups(pc, pcg);
   2180   1.101.2.6      yamt 		}
   2181   1.101.2.6      yamt 		if ((pcg = cc->cc_previous) != NULL) {
   2182   1.101.2.6      yamt 			pcg->pcg_next = NULL;
   2183   1.101.2.6      yamt 			pool_cache_invalidate_groups(pc, pcg);
   2184   1.101.2.6      yamt 		}
   2185   1.101.2.6      yamt 		if (cc != &pc->pc_cpu0)
   2186   1.101.2.6      yamt 			pool_put(&cache_cpu_pool, cc);
   2187   1.101.2.6      yamt 	}
   2188   1.101.2.6      yamt 
   2189   1.101.2.6      yamt 	/* Finally, destroy it. */
   2190   1.101.2.6      yamt 	mutex_destroy(&pc->pc_lock);
   2191   1.101.2.6      yamt 	pool_destroy(pp);
   2192   1.101.2.6      yamt 	pool_put(&cache_pool, pc);
   2193        1.43   thorpej }
   2194        1.43   thorpej 
   2195   1.101.2.6      yamt /*
   2196   1.101.2.6      yamt  * pool_cache_cpu_init1:
   2197   1.101.2.6      yamt  *
   2198   1.101.2.6      yamt  *	Called for each pool_cache whenever a new CPU is attached.
   2199   1.101.2.6      yamt  */
   2200   1.101.2.6      yamt static void
   2201   1.101.2.6      yamt pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc)
   2202        1.43   thorpej {
   2203   1.101.2.6      yamt 	pool_cache_cpu_t *cc;
   2204   1.101.2.7      yamt 	int index;
   2205   1.101.2.7      yamt 
   2206   1.101.2.7      yamt 	index = ci->ci_index;
   2207        1.43   thorpej 
   2208   1.101.2.7      yamt 	KASSERT(index < MAXCPUS);
   2209   1.101.2.6      yamt 	KASSERT(((uintptr_t)pc->pc_cpus & (CACHE_LINE_SIZE - 1)) == 0);
   2210        1.43   thorpej 
   2211   1.101.2.7      yamt 	if ((cc = pc->pc_cpus[index]) != NULL) {
   2212   1.101.2.7      yamt 		KASSERT(cc->cc_cpuindex == index);
   2213   1.101.2.6      yamt 		return;
   2214   1.101.2.6      yamt 	}
   2215   1.101.2.6      yamt 
   2216   1.101.2.6      yamt 	/*
   2217   1.101.2.6      yamt 	 * The first CPU is 'free'.  This needs to be the case for
   2218   1.101.2.6      yamt 	 * bootstrap - we may not be able to allocate yet.
   2219   1.101.2.6      yamt 	 */
   2220   1.101.2.6      yamt 	if (pc->pc_ncpu == 0) {
   2221   1.101.2.6      yamt 		cc = &pc->pc_cpu0;
   2222   1.101.2.6      yamt 		pc->pc_ncpu = 1;
   2223   1.101.2.6      yamt 	} else {
   2224   1.101.2.6      yamt 		mutex_enter(&pc->pc_lock);
   2225   1.101.2.6      yamt 		pc->pc_ncpu++;
   2226   1.101.2.6      yamt 		mutex_exit(&pc->pc_lock);
   2227   1.101.2.6      yamt 		cc = pool_get(&cache_cpu_pool, PR_WAITOK);
   2228   1.101.2.6      yamt 	}
   2229   1.101.2.6      yamt 
   2230   1.101.2.6      yamt 	cc->cc_ipl = pc->pc_pool.pr_ipl;
   2231   1.101.2.6      yamt 	cc->cc_iplcookie = makeiplcookie(cc->cc_ipl);
   2232   1.101.2.6      yamt 	cc->cc_cache = pc;
   2233   1.101.2.7      yamt 	cc->cc_cpuindex = index;
   2234   1.101.2.6      yamt 	cc->cc_hits = 0;
   2235   1.101.2.6      yamt 	cc->cc_misses = 0;
   2236   1.101.2.6      yamt 	cc->cc_current = NULL;
   2237   1.101.2.6      yamt 	cc->cc_previous = NULL;
   2238   1.101.2.6      yamt 
   2239   1.101.2.7      yamt 	pc->pc_cpus[index] = cc;
   2240        1.43   thorpej }
   2241        1.43   thorpej 
   2242   1.101.2.6      yamt /*
   2243   1.101.2.6      yamt  * pool_cache_cpu_init:
   2244   1.101.2.6      yamt  *
   2245   1.101.2.6      yamt  *	Called whenever a new CPU is attached.
   2246   1.101.2.6      yamt  */
   2247   1.101.2.6      yamt void
   2248   1.101.2.6      yamt pool_cache_cpu_init(struct cpu_info *ci)
   2249        1.43   thorpej {
   2250   1.101.2.6      yamt 	pool_cache_t pc;
   2251        1.43   thorpej 
   2252   1.101.2.6      yamt 	mutex_enter(&pool_head_lock);
   2253   1.101.2.8      yamt 	TAILQ_FOREACH(pc, &pool_cache_head, pc_cachelist) {
   2254   1.101.2.6      yamt 		pc->pc_refcnt++;
   2255   1.101.2.6      yamt 		mutex_exit(&pool_head_lock);
   2256        1.43   thorpej 
   2257   1.101.2.6      yamt 		pool_cache_cpu_init1(ci, pc);
   2258   1.101.2.6      yamt 
   2259   1.101.2.6      yamt 		mutex_enter(&pool_head_lock);
   2260   1.101.2.6      yamt 		pc->pc_refcnt--;
   2261   1.101.2.6      yamt 		cv_broadcast(&pool_busy);
   2262   1.101.2.6      yamt 	}
   2263   1.101.2.6      yamt 	mutex_exit(&pool_head_lock);
   2264   1.101.2.6      yamt }
   2265   1.101.2.6      yamt 
   2266   1.101.2.6      yamt /*
   2267   1.101.2.6      yamt  * pool_cache_reclaim:
   2268   1.101.2.6      yamt  *
   2269   1.101.2.6      yamt  *	Reclaim memory from a pool cache.
   2270   1.101.2.6      yamt  */
   2271   1.101.2.6      yamt bool
   2272   1.101.2.6      yamt pool_cache_reclaim(pool_cache_t pc)
   2273   1.101.2.6      yamt {
   2274   1.101.2.6      yamt 
   2275   1.101.2.6      yamt 	return pool_reclaim(&pc->pc_pool);
   2276        1.43   thorpej }
   2277        1.43   thorpej 
   2278   1.101.2.1      yamt static void
   2279   1.101.2.6      yamt pool_cache_destruct_object1(pool_cache_t pc, void *object)
   2280   1.101.2.1      yamt {
   2281   1.101.2.1      yamt 
   2282   1.101.2.6      yamt 	(*pc->pc_dtor)(pc->pc_arg, object);
   2283   1.101.2.6      yamt 	pool_put(&pc->pc_pool, object);
   2284   1.101.2.1      yamt }
   2285   1.101.2.1      yamt 
   2286        1.43   thorpej /*
   2287   1.101.2.6      yamt  * pool_cache_destruct_object:
   2288        1.43   thorpej  *
   2289   1.101.2.6      yamt  *	Force destruction of an object and its release back into
   2290   1.101.2.6      yamt  *	the pool.
   2291        1.43   thorpej  */
   2292   1.101.2.6      yamt void
   2293   1.101.2.6      yamt pool_cache_destruct_object(pool_cache_t pc, void *object)
   2294        1.43   thorpej {
   2295        1.58   thorpej 
   2296   1.101.2.6      yamt 	FREECHECK_IN(&pc->pc_freecheck, object);
   2297        1.43   thorpej 
   2298   1.101.2.6      yamt 	pool_cache_destruct_object1(pc, object);
   2299   1.101.2.6      yamt }
   2300        1.43   thorpej 
   2301   1.101.2.6      yamt /*
   2302   1.101.2.6      yamt  * pool_cache_invalidate_groups:
   2303   1.101.2.6      yamt  *
   2304   1.101.2.6      yamt  *	Invalidate a chain of groups and destruct all objects.
   2305   1.101.2.6      yamt  */
   2306   1.101.2.6      yamt static void
   2307   1.101.2.6      yamt pool_cache_invalidate_groups(pool_cache_t pc, pcg_t *pcg)
   2308   1.101.2.6      yamt {
   2309   1.101.2.6      yamt 	void *object;
   2310   1.101.2.6      yamt 	pcg_t *next;
   2311   1.101.2.6      yamt 	int i;
   2312   1.101.2.3      yamt 
   2313   1.101.2.6      yamt 	for (; pcg != NULL; pcg = next) {
   2314   1.101.2.6      yamt 		next = pcg->pcg_next;
   2315        1.43   thorpej 
   2316   1.101.2.6      yamt 		for (i = 0; i < pcg->pcg_avail; i++) {
   2317   1.101.2.6      yamt 			object = pcg->pcg_objects[i].pcgo_va;
   2318   1.101.2.6      yamt 			pool_cache_destruct_object1(pc, object);
   2319   1.101.2.6      yamt 		}
   2320        1.43   thorpej 
   2321   1.101.2.8      yamt 		if (pcg->pcg_size == PCG_NOBJECTS_LARGE) {
   2322   1.101.2.8      yamt 			pool_put(&pcg_large_pool, pcg);
   2323   1.101.2.8      yamt 		} else {
   2324   1.101.2.8      yamt 			KASSERT(pcg->pcg_size == PCG_NOBJECTS_NORMAL);
   2325   1.101.2.8      yamt 			pool_put(&pcg_normal_pool, pcg);
   2326   1.101.2.8      yamt 		}
   2327   1.101.2.1      yamt 	}
   2328        1.43   thorpej }
   2329        1.43   thorpej 
   2330        1.43   thorpej /*
   2331   1.101.2.6      yamt  * pool_cache_invalidate:
   2332        1.43   thorpej  *
   2333   1.101.2.6      yamt  *	Invalidate a pool cache (destruct and release all of the
   2334   1.101.2.6      yamt  *	cached objects).  Does not reclaim objects from the pool.
   2335        1.43   thorpej  */
   2336        1.43   thorpej void
   2337   1.101.2.6      yamt pool_cache_invalidate(pool_cache_t pc)
   2338        1.43   thorpej {
   2339   1.101.2.6      yamt 	pcg_t *full, *empty, *part;
   2340        1.43   thorpej 
   2341   1.101.2.6      yamt 	mutex_enter(&pc->pc_lock);
   2342   1.101.2.6      yamt 	full = pc->pc_fullgroups;
   2343   1.101.2.6      yamt 	empty = pc->pc_emptygroups;
   2344   1.101.2.6      yamt 	part = pc->pc_partgroups;
   2345   1.101.2.6      yamt 	pc->pc_fullgroups = NULL;
   2346   1.101.2.6      yamt 	pc->pc_emptygroups = NULL;
   2347   1.101.2.6      yamt 	pc->pc_partgroups = NULL;
   2348   1.101.2.6      yamt 	pc->pc_nfull = 0;
   2349   1.101.2.6      yamt 	pc->pc_nempty = 0;
   2350   1.101.2.6      yamt 	pc->pc_npart = 0;
   2351   1.101.2.6      yamt 	mutex_exit(&pc->pc_lock);
   2352   1.101.2.6      yamt 
   2353   1.101.2.6      yamt 	pool_cache_invalidate_groups(pc, full);
   2354   1.101.2.6      yamt 	pool_cache_invalidate_groups(pc, empty);
   2355   1.101.2.6      yamt 	pool_cache_invalidate_groups(pc, part);
   2356   1.101.2.6      yamt }
   2357   1.101.2.6      yamt 
   2358   1.101.2.6      yamt void
   2359   1.101.2.6      yamt pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg)
   2360   1.101.2.6      yamt {
   2361   1.101.2.6      yamt 
   2362   1.101.2.6      yamt 	pool_set_drain_hook(&pc->pc_pool, fn, arg);
   2363   1.101.2.6      yamt }
   2364   1.101.2.6      yamt 
   2365   1.101.2.6      yamt void
   2366   1.101.2.6      yamt pool_cache_setlowat(pool_cache_t pc, int n)
   2367   1.101.2.6      yamt {
   2368   1.101.2.6      yamt 
   2369   1.101.2.6      yamt 	pool_setlowat(&pc->pc_pool, n);
   2370   1.101.2.6      yamt }
   2371   1.101.2.6      yamt 
   2372   1.101.2.6      yamt void
   2373   1.101.2.6      yamt pool_cache_sethiwat(pool_cache_t pc, int n)
   2374   1.101.2.6      yamt {
   2375   1.101.2.6      yamt 
   2376   1.101.2.6      yamt 	pool_sethiwat(&pc->pc_pool, n);
   2377   1.101.2.6      yamt }
   2378   1.101.2.6      yamt 
   2379   1.101.2.6      yamt void
   2380   1.101.2.6      yamt pool_cache_sethardlimit(pool_cache_t pc, int n, const char *warnmess, int ratecap)
   2381   1.101.2.6      yamt {
   2382   1.101.2.6      yamt 
   2383   1.101.2.6      yamt 	pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap);
   2384   1.101.2.6      yamt }
   2385   1.101.2.6      yamt 
   2386   1.101.2.6      yamt static inline pool_cache_cpu_t *
   2387   1.101.2.6      yamt pool_cache_cpu_enter(pool_cache_t pc, int *s)
   2388   1.101.2.6      yamt {
   2389   1.101.2.6      yamt 	pool_cache_cpu_t *cc;
   2390   1.101.2.6      yamt 
   2391   1.101.2.6      yamt 	/*
   2392   1.101.2.6      yamt 	 * Prevent other users of the cache from accessing our
   2393   1.101.2.6      yamt 	 * CPU-local data.  To avoid touching shared state, we
   2394   1.101.2.6      yamt 	 * pull the neccessary information from CPU local data.
   2395   1.101.2.6      yamt 	 */
   2396   1.101.2.7      yamt 	crit_enter();
   2397   1.101.2.7      yamt 	cc = pc->pc_cpus[curcpu()->ci_index];
   2398   1.101.2.6      yamt 	KASSERT(cc->cc_cache == pc);
   2399   1.101.2.7      yamt 	if (cc->cc_ipl != IPL_NONE) {
   2400   1.101.2.6      yamt 		*s = splraiseipl(cc->cc_iplcookie);
   2401   1.101.2.6      yamt 	}
   2402   1.101.2.6      yamt 	KASSERT(((uintptr_t)cc & (CACHE_LINE_SIZE - 1)) == 0);
   2403        1.43   thorpej 
   2404   1.101.2.6      yamt 	return cc;
   2405   1.101.2.6      yamt }
   2406   1.101.2.6      yamt 
   2407   1.101.2.6      yamt static inline void
   2408   1.101.2.6      yamt pool_cache_cpu_exit(pool_cache_cpu_t *cc, int *s)
   2409   1.101.2.6      yamt {
   2410   1.101.2.6      yamt 
   2411   1.101.2.6      yamt 	/* No longer need exclusive access to the per-CPU data. */
   2412   1.101.2.7      yamt 	if (cc->cc_ipl != IPL_NONE) {
   2413   1.101.2.6      yamt 		splx(*s);
   2414   1.101.2.1      yamt 	}
   2415   1.101.2.7      yamt 	crit_exit();
   2416   1.101.2.6      yamt }
   2417   1.101.2.6      yamt 
   2418   1.101.2.6      yamt #if __GNUC_PREREQ__(3, 0)
   2419   1.101.2.6      yamt __attribute ((noinline))
   2420   1.101.2.6      yamt #endif
   2421   1.101.2.6      yamt pool_cache_cpu_t *
   2422   1.101.2.6      yamt pool_cache_get_slow(pool_cache_cpu_t *cc, int *s, void **objectp,
   2423   1.101.2.6      yamt 		    paddr_t *pap, int flags)
   2424   1.101.2.6      yamt {
   2425   1.101.2.6      yamt 	pcg_t *pcg, *cur;
   2426   1.101.2.6      yamt 	uint64_t ncsw;
   2427   1.101.2.6      yamt 	pool_cache_t pc;
   2428   1.101.2.6      yamt 	void *object;
   2429   1.101.2.6      yamt 
   2430   1.101.2.6      yamt 	pc = cc->cc_cache;
   2431   1.101.2.6      yamt 	cc->cc_misses++;
   2432   1.101.2.6      yamt 
   2433   1.101.2.6      yamt 	/*
   2434   1.101.2.6      yamt 	 * Nothing was available locally.  Try and grab a group
   2435   1.101.2.6      yamt 	 * from the cache.
   2436   1.101.2.6      yamt 	 */
   2437   1.101.2.6      yamt 	if (!mutex_tryenter(&pc->pc_lock)) {
   2438   1.101.2.6      yamt 		ncsw = curlwp->l_ncsw;
   2439   1.101.2.6      yamt 		mutex_enter(&pc->pc_lock);
   2440   1.101.2.6      yamt 		pc->pc_contended++;
   2441        1.43   thorpej 
   2442        1.43   thorpej 		/*
   2443   1.101.2.6      yamt 		 * If we context switched while locking, then
   2444   1.101.2.6      yamt 		 * our view of the per-CPU data is invalid:
   2445   1.101.2.6      yamt 		 * retry.
   2446        1.43   thorpej 		 */
   2447   1.101.2.6      yamt 		if (curlwp->l_ncsw != ncsw) {
   2448   1.101.2.6      yamt 			mutex_exit(&pc->pc_lock);
   2449   1.101.2.6      yamt 			pool_cache_cpu_exit(cc, s);
   2450   1.101.2.6      yamt 			return pool_cache_cpu_enter(pc, s);
   2451   1.101.2.6      yamt 		}
   2452   1.101.2.6      yamt 	}
   2453        1.43   thorpej 
   2454   1.101.2.6      yamt 	if ((pcg = pc->pc_fullgroups) != NULL) {
   2455   1.101.2.6      yamt 		/*
   2456   1.101.2.6      yamt 		 * If there's a full group, release our empty
   2457   1.101.2.6      yamt 		 * group back to the cache.  Install the full
   2458   1.101.2.6      yamt 		 * group as cc_current and return.
   2459   1.101.2.6      yamt 		 */
   2460   1.101.2.6      yamt 		if ((cur = cc->cc_current) != NULL) {
   2461   1.101.2.6      yamt 			KASSERT(cur->pcg_avail == 0);
   2462   1.101.2.6      yamt 			cur->pcg_next = pc->pc_emptygroups;
   2463   1.101.2.6      yamt 			pc->pc_emptygroups = cur;
   2464   1.101.2.6      yamt 			pc->pc_nempty++;
   2465   1.101.2.1      yamt 		}
   2466   1.101.2.8      yamt 		KASSERT(pcg->pcg_avail == pcg->pcg_size);
   2467   1.101.2.6      yamt 		cc->cc_current = pcg;
   2468   1.101.2.6      yamt 		pc->pc_fullgroups = pcg->pcg_next;
   2469   1.101.2.6      yamt 		pc->pc_hits++;
   2470   1.101.2.6      yamt 		pc->pc_nfull--;
   2471   1.101.2.6      yamt 		mutex_exit(&pc->pc_lock);
   2472   1.101.2.6      yamt 		return cc;
   2473        1.43   thorpej 	}
   2474        1.43   thorpej 
   2475   1.101.2.6      yamt 	/*
   2476   1.101.2.6      yamt 	 * Nothing available locally or in cache.  Take the slow
   2477   1.101.2.6      yamt 	 * path: fetch a new object from the pool and construct
   2478   1.101.2.6      yamt 	 * it.
   2479   1.101.2.6      yamt 	 */
   2480   1.101.2.6      yamt 	pc->pc_misses++;
   2481   1.101.2.6      yamt 	mutex_exit(&pc->pc_lock);
   2482   1.101.2.6      yamt 	pool_cache_cpu_exit(cc, s);
   2483        1.43   thorpej 
   2484   1.101.2.6      yamt 	object = pool_get(&pc->pc_pool, flags);
   2485   1.101.2.6      yamt 	*objectp = object;
   2486   1.101.2.6      yamt 	if (object == NULL)
   2487   1.101.2.6      yamt 		return NULL;
   2488   1.101.2.6      yamt 
   2489   1.101.2.6      yamt 	if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
   2490   1.101.2.6      yamt 		pool_put(&pc->pc_pool, object);
   2491   1.101.2.6      yamt 		*objectp = NULL;
   2492   1.101.2.6      yamt 		return NULL;
   2493   1.101.2.1      yamt 	}
   2494        1.51   thorpej 
   2495   1.101.2.6      yamt 	KASSERT((((vaddr_t)object + pc->pc_pool.pr_itemoffset) &
   2496   1.101.2.6      yamt 	    (pc->pc_pool.pr_align - 1)) == 0);
   2497   1.101.2.6      yamt 
   2498   1.101.2.6      yamt 	if (pap != NULL) {
   2499   1.101.2.6      yamt #ifdef POOL_VTOPHYS
   2500   1.101.2.6      yamt 		*pap = POOL_VTOPHYS(object);
   2501   1.101.2.6      yamt #else
   2502   1.101.2.6      yamt 		*pap = POOL_PADDR_INVALID;
   2503   1.101.2.6      yamt #endif
   2504   1.101.2.6      yamt 	}
   2505        1.51   thorpej 
   2506   1.101.2.6      yamt 	FREECHECK_OUT(&pc->pc_freecheck, object);
   2507   1.101.2.6      yamt 	return NULL;
   2508        1.43   thorpej }
   2509        1.43   thorpej 
   2510   1.101.2.4      yamt /*
   2511   1.101.2.6      yamt  * pool_cache_get{,_paddr}:
   2512   1.101.2.4      yamt  *
   2513   1.101.2.6      yamt  *	Get an object from a pool cache (optionally returning
   2514   1.101.2.6      yamt  *	the physical address of the object).
   2515   1.101.2.4      yamt  */
   2516   1.101.2.6      yamt void *
   2517   1.101.2.6      yamt pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap)
   2518   1.101.2.1      yamt {
   2519   1.101.2.6      yamt 	pool_cache_cpu_t *cc;
   2520   1.101.2.6      yamt 	pcg_t *pcg;
   2521   1.101.2.1      yamt 	void *object;
   2522   1.101.2.6      yamt 	int s;
   2523   1.101.2.1      yamt 
   2524   1.101.2.6      yamt #ifdef LOCKDEBUG
   2525   1.101.2.6      yamt 	if (flags & PR_WAITOK)
   2526   1.101.2.6      yamt 		ASSERT_SLEEPABLE(NULL, "pool_cache_get(PR_WAITOK)");
   2527   1.101.2.6      yamt #endif
   2528   1.101.2.4      yamt 
   2529   1.101.2.6      yamt 	cc = pool_cache_cpu_enter(pc, &s);
   2530   1.101.2.6      yamt 	do {
   2531   1.101.2.6      yamt 		/* Try and allocate an object from the current group. */
   2532   1.101.2.6      yamt 	 	pcg = cc->cc_current;
   2533   1.101.2.6      yamt 		if (pcg != NULL && pcg->pcg_avail > 0) {
   2534   1.101.2.6      yamt 			object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va;
   2535   1.101.2.6      yamt 			if (pap != NULL)
   2536   1.101.2.6      yamt 				*pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa;
   2537   1.101.2.9      yamt #if defined(DIAGNOSTIC)
   2538   1.101.2.6      yamt 			pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL;
   2539   1.101.2.9      yamt #endif /* defined(DIAGNOSTIC) */
   2540   1.101.2.8      yamt 			KASSERT(pcg->pcg_avail <= pcg->pcg_size);
   2541   1.101.2.6      yamt 			KASSERT(object != NULL);
   2542   1.101.2.6      yamt 			cc->cc_hits++;
   2543   1.101.2.6      yamt 			pool_cache_cpu_exit(cc, &s);
   2544   1.101.2.6      yamt 			FREECHECK_OUT(&pc->pc_freecheck, object);
   2545   1.101.2.6      yamt 			return object;
   2546   1.101.2.1      yamt 		}
   2547   1.101.2.4      yamt 
   2548   1.101.2.6      yamt 		/*
   2549   1.101.2.6      yamt 		 * That failed.  If the previous group isn't empty, swap
   2550   1.101.2.6      yamt 		 * it with the current group and allocate from there.
   2551   1.101.2.6      yamt 		 */
   2552   1.101.2.6      yamt 		pcg = cc->cc_previous;
   2553   1.101.2.6      yamt 		if (pcg != NULL && pcg->pcg_avail > 0) {
   2554   1.101.2.6      yamt 			cc->cc_previous = cc->cc_current;
   2555   1.101.2.6      yamt 			cc->cc_current = pcg;
   2556   1.101.2.6      yamt 			continue;
   2557   1.101.2.6      yamt 		}
   2558   1.101.2.6      yamt 
   2559   1.101.2.6      yamt 		/*
   2560   1.101.2.6      yamt 		 * Can't allocate from either group: try the slow path.
   2561   1.101.2.6      yamt 		 * If get_slow() allocated an object for us, or if
   2562   1.101.2.6      yamt 		 * no more objects are available, it will return NULL.
   2563   1.101.2.6      yamt 		 * Otherwise, we need to retry.
   2564   1.101.2.6      yamt 		 */
   2565   1.101.2.6      yamt 		cc = pool_cache_get_slow(cc, &s, &object, pap, flags);
   2566   1.101.2.6      yamt 	} while (cc != NULL);
   2567   1.101.2.6      yamt 
   2568   1.101.2.6      yamt 	return object;
   2569   1.101.2.1      yamt }
   2570   1.101.2.1      yamt 
   2571   1.101.2.6      yamt #if __GNUC_PREREQ__(3, 0)
   2572   1.101.2.6      yamt __attribute ((noinline))
   2573   1.101.2.6      yamt #endif
   2574   1.101.2.6      yamt pool_cache_cpu_t *
   2575   1.101.2.6      yamt pool_cache_put_slow(pool_cache_cpu_t *cc, int *s, void *object, paddr_t pa)
   2576   1.101.2.1      yamt {
   2577   1.101.2.6      yamt 	pcg_t *pcg, *cur;
   2578   1.101.2.6      yamt 	uint64_t ncsw;
   2579   1.101.2.6      yamt 	pool_cache_t pc;
   2580   1.101.2.8      yamt 	u_int nobj;
   2581   1.101.2.1      yamt 
   2582   1.101.2.6      yamt 	pc = cc->cc_cache;
   2583   1.101.2.6      yamt 	cc->cc_misses++;
   2584   1.101.2.1      yamt 
   2585   1.101.2.6      yamt 	/*
   2586   1.101.2.6      yamt 	 * No free slots locally.  Try to grab an empty, unused
   2587   1.101.2.6      yamt 	 * group from the cache.
   2588   1.101.2.6      yamt 	 */
   2589   1.101.2.6      yamt 	if (!mutex_tryenter(&pc->pc_lock)) {
   2590   1.101.2.6      yamt 		ncsw = curlwp->l_ncsw;
   2591   1.101.2.6      yamt 		mutex_enter(&pc->pc_lock);
   2592   1.101.2.6      yamt 		pc->pc_contended++;
   2593   1.101.2.1      yamt 
   2594   1.101.2.6      yamt 		/*
   2595   1.101.2.6      yamt 		 * If we context switched while locking, then
   2596   1.101.2.6      yamt 		 * our view of the per-CPU data is invalid:
   2597   1.101.2.6      yamt 		 * retry.
   2598   1.101.2.6      yamt 		 */
   2599   1.101.2.6      yamt 		if (curlwp->l_ncsw != ncsw) {
   2600   1.101.2.6      yamt 			mutex_exit(&pc->pc_lock);
   2601   1.101.2.6      yamt 			pool_cache_cpu_exit(cc, s);
   2602   1.101.2.6      yamt 			return pool_cache_cpu_enter(pc, s);
   2603   1.101.2.6      yamt 		}
   2604   1.101.2.6      yamt 	}
   2605   1.101.2.6      yamt 
   2606   1.101.2.6      yamt 	if ((pcg = pc->pc_emptygroups) != NULL) {
   2607   1.101.2.6      yamt 		/*
   2608   1.101.2.6      yamt 		 * If there's a empty group, release our full
   2609   1.101.2.6      yamt 		 * group back to the cache.  Install the empty
   2610   1.101.2.8      yamt 		 * group and return.
   2611   1.101.2.6      yamt 		 */
   2612   1.101.2.6      yamt 		KASSERT(pcg->pcg_avail == 0);
   2613   1.101.2.6      yamt 		pc->pc_emptygroups = pcg->pcg_next;
   2614   1.101.2.8      yamt 		if (cc->cc_previous == NULL) {
   2615   1.101.2.8      yamt 			cc->cc_previous = pcg;
   2616   1.101.2.8      yamt 		} else {
   2617   1.101.2.8      yamt 			if ((cur = cc->cc_current) != NULL) {
   2618   1.101.2.8      yamt 				KASSERT(cur->pcg_avail == pcg->pcg_size);
   2619   1.101.2.8      yamt 				cur->pcg_next = pc->pc_fullgroups;
   2620   1.101.2.8      yamt 				pc->pc_fullgroups = cur;
   2621   1.101.2.8      yamt 				pc->pc_nfull++;
   2622   1.101.2.8      yamt 			}
   2623   1.101.2.8      yamt 			cc->cc_current = pcg;
   2624   1.101.2.8      yamt 		}
   2625   1.101.2.6      yamt 		pc->pc_hits++;
   2626   1.101.2.6      yamt 		pc->pc_nempty--;
   2627   1.101.2.6      yamt 		mutex_exit(&pc->pc_lock);
   2628   1.101.2.6      yamt 		return cc;
   2629   1.101.2.6      yamt 	}
   2630   1.101.2.6      yamt 
   2631   1.101.2.6      yamt 	/*
   2632   1.101.2.6      yamt 	 * Nothing available locally or in cache.  Take the
   2633   1.101.2.6      yamt 	 * slow path and try to allocate a new group that we
   2634   1.101.2.6      yamt 	 * can release to.
   2635   1.101.2.6      yamt 	 */
   2636   1.101.2.6      yamt 	pc->pc_misses++;
   2637   1.101.2.6      yamt 	mutex_exit(&pc->pc_lock);
   2638   1.101.2.6      yamt 	pool_cache_cpu_exit(cc, s);
   2639   1.101.2.6      yamt 
   2640   1.101.2.6      yamt 	/*
   2641   1.101.2.6      yamt 	 * If we can't allocate a new group, just throw the
   2642   1.101.2.6      yamt 	 * object away.
   2643   1.101.2.6      yamt 	 */
   2644   1.101.2.8      yamt 	nobj = pc->pc_pcgsize;
   2645   1.101.2.8      yamt 	if (pool_cache_disable) {
   2646   1.101.2.8      yamt 		pcg = NULL;
   2647   1.101.2.8      yamt 	} else if (nobj == PCG_NOBJECTS_LARGE) {
   2648   1.101.2.8      yamt 		pcg = pool_get(&pcg_large_pool, PR_NOWAIT);
   2649   1.101.2.8      yamt 	} else {
   2650   1.101.2.8      yamt 		pcg = pool_get(&pcg_normal_pool, PR_NOWAIT);
   2651   1.101.2.8      yamt 	}
   2652   1.101.2.6      yamt 	if (pcg == NULL) {
   2653   1.101.2.6      yamt 		pool_cache_destruct_object(pc, object);
   2654   1.101.2.6      yamt 		return NULL;
   2655   1.101.2.6      yamt 	}
   2656   1.101.2.6      yamt 	pcg->pcg_avail = 0;
   2657   1.101.2.8      yamt 	pcg->pcg_size = nobj;
   2658   1.101.2.6      yamt 
   2659   1.101.2.6      yamt 	/*
   2660   1.101.2.6      yamt 	 * Add the empty group to the cache and try again.
   2661   1.101.2.6      yamt 	 */
   2662   1.101.2.6      yamt 	mutex_enter(&pc->pc_lock);
   2663   1.101.2.6      yamt 	pcg->pcg_next = pc->pc_emptygroups;
   2664   1.101.2.6      yamt 	pc->pc_emptygroups = pcg;
   2665   1.101.2.6      yamt 	pc->pc_nempty++;
   2666   1.101.2.6      yamt 	mutex_exit(&pc->pc_lock);
   2667   1.101.2.6      yamt 
   2668   1.101.2.6      yamt 	return pool_cache_cpu_enter(pc, s);
   2669   1.101.2.6      yamt }
   2670   1.101.2.1      yamt 
   2671        1.43   thorpej /*
   2672   1.101.2.6      yamt  * pool_cache_put{,_paddr}:
   2673        1.43   thorpej  *
   2674   1.101.2.6      yamt  *	Put an object back to the pool cache (optionally caching the
   2675   1.101.2.6      yamt  *	physical address of the object).
   2676        1.43   thorpej  */
   2677       1.101   thorpej void
   2678   1.101.2.6      yamt pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa)
   2679        1.43   thorpej {
   2680   1.101.2.6      yamt 	pool_cache_cpu_t *cc;
   2681   1.101.2.6      yamt 	pcg_t *pcg;
   2682   1.101.2.6      yamt 	int s;
   2683       1.101   thorpej 
   2684   1.101.2.6      yamt 	FREECHECK_IN(&pc->pc_freecheck, object);
   2685        1.43   thorpej 
   2686   1.101.2.6      yamt 	cc = pool_cache_cpu_enter(pc, &s);
   2687   1.101.2.6      yamt 	do {
   2688   1.101.2.6      yamt 		/* If the current group isn't full, release it there. */
   2689   1.101.2.6      yamt 	 	pcg = cc->cc_current;
   2690   1.101.2.8      yamt 		if (pcg != NULL && pcg->pcg_avail < pcg->pcg_size) {
   2691   1.101.2.6      yamt 			pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object;
   2692   1.101.2.6      yamt 			pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa;
   2693   1.101.2.6      yamt 			pcg->pcg_avail++;
   2694   1.101.2.6      yamt 			cc->cc_hits++;
   2695   1.101.2.6      yamt 			pool_cache_cpu_exit(cc, &s);
   2696   1.101.2.6      yamt 			return;
   2697   1.101.2.6      yamt 		}
   2698        1.43   thorpej 
   2699   1.101.2.6      yamt 		/*
   2700   1.101.2.6      yamt 		 * That failed.  If the previous group is empty, swap
   2701   1.101.2.6      yamt 		 * it with the current group and try again.
   2702   1.101.2.6      yamt 		 */
   2703   1.101.2.6      yamt 		pcg = cc->cc_previous;
   2704   1.101.2.6      yamt 		if (pcg != NULL && pcg->pcg_avail == 0) {
   2705   1.101.2.6      yamt 			cc->cc_previous = cc->cc_current;
   2706   1.101.2.6      yamt 			cc->cc_current = pcg;
   2707   1.101.2.6      yamt 			continue;
   2708   1.101.2.6      yamt 		}
   2709        1.43   thorpej 
   2710   1.101.2.6      yamt 		/*
   2711   1.101.2.6      yamt 		 * Can't free to either group: try the slow path.
   2712   1.101.2.6      yamt 		 * If put_slow() releases the object for us, it
   2713   1.101.2.6      yamt 		 * will return NULL.  Otherwise we need to retry.
   2714   1.101.2.6      yamt 		 */
   2715   1.101.2.6      yamt 		cc = pool_cache_put_slow(cc, &s, object, pa);
   2716   1.101.2.6      yamt 	} while (cc != NULL);
   2717        1.43   thorpej }
   2718        1.43   thorpej 
   2719        1.43   thorpej /*
   2720   1.101.2.6      yamt  * pool_cache_xcall:
   2721        1.43   thorpej  *
   2722   1.101.2.6      yamt  *	Transfer objects from the per-CPU cache to the global cache.
   2723   1.101.2.6      yamt  *	Run within a cross-call thread.
   2724        1.43   thorpej  */
   2725        1.43   thorpej static void
   2726   1.101.2.6      yamt pool_cache_xcall(pool_cache_t pc)
   2727        1.43   thorpej {
   2728   1.101.2.6      yamt 	pool_cache_cpu_t *cc;
   2729   1.101.2.6      yamt 	pcg_t *prev, *cur, **list;
   2730   1.101.2.6      yamt 	int s = 0; /* XXXgcc */
   2731   1.101.2.6      yamt 
   2732   1.101.2.6      yamt 	cc = pool_cache_cpu_enter(pc, &s);
   2733   1.101.2.6      yamt 	cur = cc->cc_current;
   2734   1.101.2.6      yamt 	cc->cc_current = NULL;
   2735   1.101.2.6      yamt 	prev = cc->cc_previous;
   2736   1.101.2.6      yamt 	cc->cc_previous = NULL;
   2737   1.101.2.6      yamt 	pool_cache_cpu_exit(cc, &s);
   2738   1.101.2.6      yamt 
   2739   1.101.2.6      yamt 	/*
   2740   1.101.2.6      yamt 	 * XXXSMP Go to splvm to prevent kernel_lock from being taken,
   2741   1.101.2.6      yamt 	 * because locks at IPL_SOFTXXX are still spinlocks.  Does not
   2742   1.101.2.6      yamt 	 * apply to IPL_SOFTBIO.  Cross-call threads do not take the
   2743   1.101.2.6      yamt 	 * kernel_lock.
   2744       1.101   thorpej 	 */
   2745   1.101.2.6      yamt 	s = splvm();
   2746   1.101.2.6      yamt 	mutex_enter(&pc->pc_lock);
   2747   1.101.2.6      yamt 	if (cur != NULL) {
   2748   1.101.2.8      yamt 		if (cur->pcg_avail == cur->pcg_size) {
   2749   1.101.2.6      yamt 			list = &pc->pc_fullgroups;
   2750   1.101.2.6      yamt 			pc->pc_nfull++;
   2751   1.101.2.6      yamt 		} else if (cur->pcg_avail == 0) {
   2752   1.101.2.6      yamt 			list = &pc->pc_emptygroups;
   2753   1.101.2.6      yamt 			pc->pc_nempty++;
   2754   1.101.2.6      yamt 		} else {
   2755   1.101.2.6      yamt 			list = &pc->pc_partgroups;
   2756   1.101.2.6      yamt 			pc->pc_npart++;
   2757   1.101.2.6      yamt 		}
   2758   1.101.2.6      yamt 		cur->pcg_next = *list;
   2759   1.101.2.6      yamt 		*list = cur;
   2760   1.101.2.6      yamt 	}
   2761   1.101.2.6      yamt 	if (prev != NULL) {
   2762   1.101.2.8      yamt 		if (prev->pcg_avail == prev->pcg_size) {
   2763   1.101.2.6      yamt 			list = &pc->pc_fullgroups;
   2764   1.101.2.6      yamt 			pc->pc_nfull++;
   2765   1.101.2.6      yamt 		} else if (prev->pcg_avail == 0) {
   2766   1.101.2.6      yamt 			list = &pc->pc_emptygroups;
   2767   1.101.2.6      yamt 			pc->pc_nempty++;
   2768   1.101.2.6      yamt 		} else {
   2769   1.101.2.6      yamt 			list = &pc->pc_partgroups;
   2770   1.101.2.6      yamt 			pc->pc_npart++;
   2771   1.101.2.6      yamt 		}
   2772   1.101.2.6      yamt 		prev->pcg_next = *list;
   2773   1.101.2.6      yamt 		*list = prev;
   2774   1.101.2.6      yamt 	}
   2775   1.101.2.6      yamt 	mutex_exit(&pc->pc_lock);
   2776   1.101.2.6      yamt 	splx(s);
   2777         1.3        pk }
   2778        1.66   thorpej 
   2779        1.66   thorpej /*
   2780        1.66   thorpej  * Pool backend allocators.
   2781        1.66   thorpej  *
   2782        1.66   thorpej  * Each pool has a backend allocator that handles allocation, deallocation,
   2783        1.66   thorpej  * and any additional draining that might be needed.
   2784        1.66   thorpej  *
   2785        1.66   thorpej  * We provide two standard allocators:
   2786        1.66   thorpej  *
   2787        1.66   thorpej  *	pool_allocator_kmem - the default when no allocator is specified
   2788        1.66   thorpej  *
   2789        1.66   thorpej  *	pool_allocator_nointr - used for pools that will not be accessed
   2790        1.66   thorpej  *	in interrupt context.
   2791        1.66   thorpej  */
   2792        1.66   thorpej void	*pool_page_alloc(struct pool *, int);
   2793        1.66   thorpej void	pool_page_free(struct pool *, void *);
   2794        1.66   thorpej 
   2795   1.101.2.1      yamt #ifdef POOL_SUBPAGE
   2796   1.101.2.1      yamt struct pool_allocator pool_allocator_kmem_fullpage = {
   2797   1.101.2.1      yamt 	pool_page_alloc, pool_page_free, 0,
   2798   1.101.2.1      yamt 	.pa_backingmapptr = &kmem_map,
   2799   1.101.2.1      yamt };
   2800   1.101.2.1      yamt #else
   2801        1.66   thorpej struct pool_allocator pool_allocator_kmem = {
   2802        1.66   thorpej 	pool_page_alloc, pool_page_free, 0,
   2803   1.101.2.1      yamt 	.pa_backingmapptr = &kmem_map,
   2804        1.66   thorpej };
   2805   1.101.2.1      yamt #endif
   2806        1.66   thorpej 
   2807        1.66   thorpej void	*pool_page_alloc_nointr(struct pool *, int);
   2808        1.66   thorpej void	pool_page_free_nointr(struct pool *, void *);
   2809        1.66   thorpej 
   2810   1.101.2.1      yamt #ifdef POOL_SUBPAGE
   2811   1.101.2.1      yamt struct pool_allocator pool_allocator_nointr_fullpage = {
   2812   1.101.2.1      yamt 	pool_page_alloc_nointr, pool_page_free_nointr, 0,
   2813   1.101.2.1      yamt 	.pa_backingmapptr = &kernel_map,
   2814   1.101.2.1      yamt };
   2815   1.101.2.1      yamt #else
   2816        1.66   thorpej struct pool_allocator pool_allocator_nointr = {
   2817        1.66   thorpej 	pool_page_alloc_nointr, pool_page_free_nointr, 0,
   2818   1.101.2.1      yamt 	.pa_backingmapptr = &kernel_map,
   2819        1.66   thorpej };
   2820   1.101.2.1      yamt #endif
   2821        1.66   thorpej 
   2822        1.66   thorpej #ifdef POOL_SUBPAGE
   2823        1.66   thorpej void	*pool_subpage_alloc(struct pool *, int);
   2824        1.66   thorpej void	pool_subpage_free(struct pool *, void *);
   2825        1.66   thorpej 
   2826   1.101.2.1      yamt struct pool_allocator pool_allocator_kmem = {
   2827   1.101.2.1      yamt 	pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
   2828   1.101.2.1      yamt 	.pa_backingmapptr = &kmem_map,
   2829   1.101.2.1      yamt };
   2830   1.101.2.1      yamt 
   2831   1.101.2.1      yamt void	*pool_subpage_alloc_nointr(struct pool *, int);
   2832   1.101.2.1      yamt void	pool_subpage_free_nointr(struct pool *, void *);
   2833   1.101.2.1      yamt 
   2834   1.101.2.1      yamt struct pool_allocator pool_allocator_nointr = {
   2835   1.101.2.1      yamt 	pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
   2836   1.101.2.1      yamt 	.pa_backingmapptr = &kmem_map,
   2837        1.66   thorpej };
   2838        1.66   thorpej #endif /* POOL_SUBPAGE */
   2839        1.66   thorpej 
   2840   1.101.2.1      yamt static void *
   2841   1.101.2.1      yamt pool_allocator_alloc(struct pool *pp, int flags)
   2842        1.66   thorpej {
   2843   1.101.2.1      yamt 	struct pool_allocator *pa = pp->pr_alloc;
   2844        1.66   thorpej 	void *res;
   2845        1.66   thorpej 
   2846   1.101.2.1      yamt 	res = (*pa->pa_alloc)(pp, flags);
   2847   1.101.2.1      yamt 	if (res == NULL && (flags & PR_WAITOK) == 0) {
   2848        1.66   thorpej 		/*
   2849   1.101.2.1      yamt 		 * We only run the drain hook here if PR_NOWAIT.
   2850   1.101.2.1      yamt 		 * In other cases, the hook will be run in
   2851   1.101.2.1      yamt 		 * pool_reclaim().
   2852        1.66   thorpej 		 */
   2853   1.101.2.1      yamt 		if (pp->pr_drain_hook != NULL) {
   2854   1.101.2.1      yamt 			(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
   2855   1.101.2.1      yamt 			res = (*pa->pa_alloc)(pp, flags);
   2856        1.66   thorpej 		}
   2857   1.101.2.1      yamt 	}
   2858   1.101.2.1      yamt 	return res;
   2859        1.66   thorpej }
   2860        1.66   thorpej 
   2861   1.101.2.1      yamt static void
   2862        1.66   thorpej pool_allocator_free(struct pool *pp, void *v)
   2863        1.66   thorpej {
   2864        1.66   thorpej 	struct pool_allocator *pa = pp->pr_alloc;
   2865        1.66   thorpej 
   2866        1.66   thorpej 	(*pa->pa_free)(pp, v);
   2867        1.66   thorpej }
   2868        1.66   thorpej 
   2869        1.66   thorpej void *
   2870        1.66   thorpej pool_page_alloc(struct pool *pp, int flags)
   2871        1.66   thorpej {
   2872   1.101.2.3      yamt 	bool waitok = (flags & PR_WAITOK) ? true : false;
   2873        1.66   thorpej 
   2874       1.100      yamt 	return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, waitok));
   2875        1.66   thorpej }
   2876        1.66   thorpej 
   2877        1.66   thorpej void
   2878        1.66   thorpej pool_page_free(struct pool *pp, void *v)
   2879        1.66   thorpej {
   2880        1.66   thorpej 
   2881        1.98      yamt 	uvm_km_free_poolpage_cache(kmem_map, (vaddr_t) v);
   2882        1.98      yamt }
   2883        1.98      yamt 
   2884        1.98      yamt static void *
   2885        1.98      yamt pool_page_alloc_meta(struct pool *pp, int flags)
   2886        1.98      yamt {
   2887   1.101.2.3      yamt 	bool waitok = (flags & PR_WAITOK) ? true : false;
   2888        1.98      yamt 
   2889       1.100      yamt 	return ((void *) uvm_km_alloc_poolpage(kmem_map, waitok));
   2890        1.98      yamt }
   2891        1.98      yamt 
   2892        1.98      yamt static void
   2893        1.98      yamt pool_page_free_meta(struct pool *pp, void *v)
   2894        1.98      yamt {
   2895        1.98      yamt 
   2896       1.100      yamt 	uvm_km_free_poolpage(kmem_map, (vaddr_t) v);
   2897        1.66   thorpej }
   2898        1.66   thorpej 
   2899        1.66   thorpej #ifdef POOL_SUBPAGE
   2900        1.66   thorpej /* Sub-page allocator, for machines with large hardware pages. */
   2901        1.66   thorpej void *
   2902        1.66   thorpej pool_subpage_alloc(struct pool *pp, int flags)
   2903        1.66   thorpej {
   2904   1.101.2.6      yamt 	return pool_get(&psppool, flags);
   2905        1.66   thorpej }
   2906        1.66   thorpej 
   2907        1.66   thorpej void
   2908        1.66   thorpej pool_subpage_free(struct pool *pp, void *v)
   2909        1.66   thorpej {
   2910        1.66   thorpej 	pool_put(&psppool, v);
   2911        1.66   thorpej }
   2912        1.66   thorpej 
   2913        1.66   thorpej /* We don't provide a real nointr allocator.  Maybe later. */
   2914        1.66   thorpej void *
   2915   1.101.2.1      yamt pool_subpage_alloc_nointr(struct pool *pp, int flags)
   2916        1.66   thorpej {
   2917        1.66   thorpej 
   2918        1.66   thorpej 	return (pool_subpage_alloc(pp, flags));
   2919        1.66   thorpej }
   2920        1.66   thorpej 
   2921        1.66   thorpej void
   2922   1.101.2.1      yamt pool_subpage_free_nointr(struct pool *pp, void *v)
   2923        1.66   thorpej {
   2924        1.66   thorpej 
   2925        1.66   thorpej 	pool_subpage_free(pp, v);
   2926        1.66   thorpej }
   2927   1.101.2.1      yamt #endif /* POOL_SUBPAGE */
   2928        1.66   thorpej void *
   2929        1.66   thorpej pool_page_alloc_nointr(struct pool *pp, int flags)
   2930        1.66   thorpej {
   2931   1.101.2.3      yamt 	bool waitok = (flags & PR_WAITOK) ? true : false;
   2932        1.66   thorpej 
   2933       1.100      yamt 	return ((void *) uvm_km_alloc_poolpage_cache(kernel_map, waitok));
   2934        1.66   thorpej }
   2935        1.66   thorpej 
   2936        1.66   thorpej void
   2937        1.66   thorpej pool_page_free_nointr(struct pool *pp, void *v)
   2938        1.66   thorpej {
   2939        1.66   thorpej 
   2940        1.98      yamt 	uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v);
   2941        1.66   thorpej }
   2942   1.101.2.8      yamt 
   2943   1.101.2.8      yamt #if defined(DDB)
   2944   1.101.2.8      yamt static bool
   2945   1.101.2.8      yamt pool_in_page(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
   2946   1.101.2.8      yamt {
   2947   1.101.2.8      yamt 
   2948   1.101.2.8      yamt 	return (uintptr_t)ph->ph_page <= addr &&
   2949   1.101.2.8      yamt 	    addr < (uintptr_t)ph->ph_page + pp->pr_alloc->pa_pagesz;
   2950   1.101.2.8      yamt }
   2951   1.101.2.8      yamt 
   2952   1.101.2.8      yamt static bool
   2953   1.101.2.8      yamt pool_in_item(struct pool *pp, void *item, uintptr_t addr)
   2954   1.101.2.8      yamt {
   2955   1.101.2.8      yamt 
   2956   1.101.2.8      yamt 	return (uintptr_t)item <= addr && addr < (uintptr_t)item + pp->pr_size;
   2957   1.101.2.8      yamt }
   2958   1.101.2.8      yamt 
   2959   1.101.2.8      yamt static bool
   2960   1.101.2.8      yamt pool_in_cg(struct pool *pp, struct pool_cache_group *pcg, uintptr_t addr)
   2961   1.101.2.8      yamt {
   2962   1.101.2.8      yamt 	int i;
   2963   1.101.2.8      yamt 
   2964   1.101.2.8      yamt 	if (pcg == NULL) {
   2965   1.101.2.8      yamt 		return false;
   2966   1.101.2.8      yamt 	}
   2967   1.101.2.8      yamt 	for (i = 0; i < pcg->pcg_avail; i++) {
   2968   1.101.2.8      yamt 		if (pool_in_item(pp, pcg->pcg_objects[i].pcgo_va, addr)) {
   2969   1.101.2.8      yamt 			return true;
   2970   1.101.2.8      yamt 		}
   2971   1.101.2.8      yamt 	}
   2972   1.101.2.8      yamt 	return false;
   2973   1.101.2.8      yamt }
   2974   1.101.2.8      yamt 
   2975   1.101.2.8      yamt static bool
   2976   1.101.2.8      yamt pool_allocated(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
   2977   1.101.2.8      yamt {
   2978   1.101.2.8      yamt 
   2979   1.101.2.8      yamt 	if ((pp->pr_roflags & PR_NOTOUCH) != 0) {
   2980   1.101.2.8      yamt 		unsigned int idx = pr_item_notouch_index(pp, ph, (void *)addr);
   2981   1.101.2.8      yamt 		pool_item_bitmap_t *bitmap =
   2982   1.101.2.8      yamt 		    ph->ph_bitmap + (idx / BITMAP_SIZE);
   2983   1.101.2.8      yamt 		pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
   2984   1.101.2.8      yamt 
   2985   1.101.2.8      yamt 		return (*bitmap & mask) == 0;
   2986   1.101.2.8      yamt 	} else {
   2987   1.101.2.8      yamt 		struct pool_item *pi;
   2988   1.101.2.8      yamt 
   2989   1.101.2.8      yamt 		LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
   2990   1.101.2.8      yamt 			if (pool_in_item(pp, pi, addr)) {
   2991   1.101.2.8      yamt 				return false;
   2992   1.101.2.8      yamt 			}
   2993   1.101.2.8      yamt 		}
   2994   1.101.2.8      yamt 		return true;
   2995   1.101.2.8      yamt 	}
   2996   1.101.2.8      yamt }
   2997   1.101.2.8      yamt 
   2998   1.101.2.8      yamt void
   2999   1.101.2.8      yamt pool_whatis(uintptr_t addr, void (*pr)(const char *, ...))
   3000   1.101.2.8      yamt {
   3001   1.101.2.8      yamt 	struct pool *pp;
   3002   1.101.2.8      yamt 
   3003   1.101.2.8      yamt 	TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
   3004   1.101.2.8      yamt 		struct pool_item_header *ph;
   3005   1.101.2.8      yamt 		uintptr_t item;
   3006   1.101.2.8      yamt 		bool allocated = true;
   3007   1.101.2.8      yamt 		bool incache = false;
   3008   1.101.2.8      yamt 		bool incpucache = false;
   3009   1.101.2.8      yamt 		char cpucachestr[32];
   3010   1.101.2.8      yamt 
   3011   1.101.2.8      yamt 		if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
   3012   1.101.2.8      yamt 			LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
   3013   1.101.2.8      yamt 				if (pool_in_page(pp, ph, addr)) {
   3014   1.101.2.8      yamt 					goto found;
   3015   1.101.2.8      yamt 				}
   3016   1.101.2.8      yamt 			}
   3017   1.101.2.8      yamt 			LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
   3018   1.101.2.8      yamt 				if (pool_in_page(pp, ph, addr)) {
   3019   1.101.2.8      yamt 					allocated =
   3020   1.101.2.8      yamt 					    pool_allocated(pp, ph, addr);
   3021   1.101.2.8      yamt 					goto found;
   3022   1.101.2.8      yamt 				}
   3023   1.101.2.8      yamt 			}
   3024   1.101.2.8      yamt 			LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
   3025   1.101.2.8      yamt 				if (pool_in_page(pp, ph, addr)) {
   3026   1.101.2.8      yamt 					allocated = false;
   3027   1.101.2.8      yamt 					goto found;
   3028   1.101.2.8      yamt 				}
   3029   1.101.2.8      yamt 			}
   3030   1.101.2.8      yamt 			continue;
   3031   1.101.2.8      yamt 		} else {
   3032   1.101.2.8      yamt 			ph = pr_find_pagehead_noalign(pp, (void *)addr);
   3033   1.101.2.8      yamt 			if (ph == NULL || !pool_in_page(pp, ph, addr)) {
   3034   1.101.2.8      yamt 				continue;
   3035   1.101.2.8      yamt 			}
   3036   1.101.2.8      yamt 			allocated = pool_allocated(pp, ph, addr);
   3037   1.101.2.8      yamt 		}
   3038   1.101.2.8      yamt found:
   3039   1.101.2.8      yamt 		if (allocated && pp->pr_cache) {
   3040   1.101.2.8      yamt 			pool_cache_t pc = pp->pr_cache;
   3041   1.101.2.8      yamt 			struct pool_cache_group *pcg;
   3042   1.101.2.8      yamt 			int i;
   3043   1.101.2.8      yamt 
   3044   1.101.2.8      yamt 			for (pcg = pc->pc_fullgroups; pcg != NULL;
   3045   1.101.2.8      yamt 			    pcg = pcg->pcg_next) {
   3046   1.101.2.8      yamt 				if (pool_in_cg(pp, pcg, addr)) {
   3047   1.101.2.8      yamt 					incache = true;
   3048   1.101.2.8      yamt 					goto print;
   3049   1.101.2.8      yamt 				}
   3050   1.101.2.8      yamt 			}
   3051   1.101.2.8      yamt 			for (i = 0; i < MAXCPUS; i++) {
   3052   1.101.2.8      yamt 				pool_cache_cpu_t *cc;
   3053   1.101.2.8      yamt 
   3054   1.101.2.8      yamt 				if ((cc = pc->pc_cpus[i]) == NULL) {
   3055   1.101.2.8      yamt 					continue;
   3056   1.101.2.8      yamt 				}
   3057   1.101.2.8      yamt 				if (pool_in_cg(pp, cc->cc_current, addr) ||
   3058   1.101.2.8      yamt 				    pool_in_cg(pp, cc->cc_previous, addr)) {
   3059   1.101.2.8      yamt 					struct cpu_info *ci =
   3060   1.101.2.8      yamt 					    cpu_lookup_byindex(i);
   3061   1.101.2.8      yamt 
   3062   1.101.2.8      yamt 					incpucache = true;
   3063   1.101.2.8      yamt 					snprintf(cpucachestr,
   3064   1.101.2.8      yamt 					    sizeof(cpucachestr),
   3065   1.101.2.8      yamt 					    "cached by CPU %u",
   3066   1.101.2.8      yamt 					    (u_int)ci->ci_cpuid);
   3067   1.101.2.8      yamt 					goto print;
   3068   1.101.2.8      yamt 				}
   3069   1.101.2.8      yamt 			}
   3070   1.101.2.8      yamt 		}
   3071   1.101.2.8      yamt print:
   3072   1.101.2.8      yamt 		item = (uintptr_t)ph->ph_page + ph->ph_off;
   3073   1.101.2.8      yamt 		item = item + rounddown(addr - item, pp->pr_size);
   3074   1.101.2.8      yamt 		(*pr)("%p is %p+%zu in POOL '%s' (%s)\n",
   3075   1.101.2.8      yamt 		    (void *)addr, item, (size_t)(addr - item),
   3076   1.101.2.8      yamt 		    pp->pr_wchan,
   3077   1.101.2.8      yamt 		    incpucache ? cpucachestr :
   3078   1.101.2.8      yamt 		    incache ? "cached" : allocated ? "allocated" : "free");
   3079   1.101.2.8      yamt 	}
   3080   1.101.2.8      yamt }
   3081   1.101.2.8      yamt #endif /* defined(DDB) */
   3082