Home | History | Annotate | Line # | Download | only in kern
subr_pool.c revision 1.181
      1 /*	$NetBSD: subr_pool.c,v 1.181 2010/01/03 09:42:22 mlelstv Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
      9  * Simulation Facility, NASA Ames Research Center, and by Andrew Doran.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 #include <sys/cdefs.h>
     34 __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.181 2010/01/03 09:42:22 mlelstv Exp $");
     35 
     36 #include "opt_ddb.h"
     37 #include "opt_pool.h"
     38 #include "opt_poollog.h"
     39 #include "opt_lockdebug.h"
     40 
     41 #include <sys/param.h>
     42 #include <sys/systm.h>
     43 #include <sys/bitops.h>
     44 #include <sys/proc.h>
     45 #include <sys/errno.h>
     46 #include <sys/kernel.h>
     47 #include <sys/malloc.h>
     48 #include <sys/pool.h>
     49 #include <sys/syslog.h>
     50 #include <sys/debug.h>
     51 #include <sys/lockdebug.h>
     52 #include <sys/xcall.h>
     53 #include <sys/cpu.h>
     54 #include <sys/atomic.h>
     55 
     56 #include <uvm/uvm.h>
     57 
     58 /*
     59  * Pool resource management utility.
     60  *
     61  * Memory is allocated in pages which are split into pieces according to
     62  * the pool item size. Each page is kept on one of three lists in the
     63  * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
     64  * for empty, full and partially-full pages respectively. The individual
     65  * pool items are on a linked list headed by `ph_itemlist' in each page
     66  * header. The memory for building the page list is either taken from
     67  * the allocated pages themselves (for small pool items) or taken from
     68  * an internal pool of page headers (`phpool').
     69  */
     70 
     71 /* List of all pools */
     72 static TAILQ_HEAD(, pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
     73 
     74 /* Private pool for page header structures */
     75 #define	PHPOOL_MAX	8
     76 static struct pool phpool[PHPOOL_MAX];
     77 #define	PHPOOL_FREELIST_NELEM(idx) \
     78 	(((idx) == 0) ? 0 : BITMAP_SIZE * (1 << (idx)))
     79 
     80 #ifdef POOL_SUBPAGE
     81 /* Pool of subpages for use by normal pools. */
     82 static struct pool psppool;
     83 #endif
     84 
     85 static SLIST_HEAD(, pool_allocator) pa_deferinitq =
     86     SLIST_HEAD_INITIALIZER(pa_deferinitq);
     87 
     88 static void *pool_page_alloc_meta(struct pool *, int);
     89 static void pool_page_free_meta(struct pool *, void *);
     90 
     91 /* allocator for pool metadata */
     92 struct pool_allocator pool_allocator_meta = {
     93 	pool_page_alloc_meta, pool_page_free_meta,
     94 	.pa_backingmapptr = &kmem_map,
     95 };
     96 
     97 /* # of seconds to retain page after last use */
     98 int pool_inactive_time = 10;
     99 
    100 /* Next candidate for drainage (see pool_drain()) */
    101 static struct pool	*drainpp;
    102 
    103 /* This lock protects both pool_head and drainpp. */
    104 static kmutex_t pool_head_lock;
    105 static kcondvar_t pool_busy;
    106 
    107 /* This lock protects initialization of a potentially shared pool allocator */
    108 static kmutex_t pool_allocator_lock;
    109 
    110 typedef uint32_t pool_item_bitmap_t;
    111 #define	BITMAP_SIZE	(CHAR_BIT * sizeof(pool_item_bitmap_t))
    112 #define	BITMAP_MASK	(BITMAP_SIZE - 1)
    113 
    114 struct pool_item_header {
    115 	/* Page headers */
    116 	LIST_ENTRY(pool_item_header)
    117 				ph_pagelist;	/* pool page list */
    118 	SPLAY_ENTRY(pool_item_header)
    119 				ph_node;	/* Off-page page headers */
    120 	void *			ph_page;	/* this page's address */
    121 	uint32_t		ph_time;	/* last referenced */
    122 	uint16_t		ph_nmissing;	/* # of chunks in use */
    123 	uint16_t		ph_off;		/* start offset in page */
    124 	union {
    125 		/* !PR_NOTOUCH */
    126 		struct {
    127 			LIST_HEAD(, pool_item)
    128 				phu_itemlist;	/* chunk list for this page */
    129 		} phu_normal;
    130 		/* PR_NOTOUCH */
    131 		struct {
    132 			pool_item_bitmap_t phu_bitmap[1];
    133 		} phu_notouch;
    134 	} ph_u;
    135 };
    136 #define	ph_itemlist	ph_u.phu_normal.phu_itemlist
    137 #define	ph_bitmap	ph_u.phu_notouch.phu_bitmap
    138 
    139 struct pool_item {
    140 #ifdef DIAGNOSTIC
    141 	u_int pi_magic;
    142 #endif
    143 #define	PI_MAGIC 0xdeaddeadU
    144 	/* Other entries use only this list entry */
    145 	LIST_ENTRY(pool_item)	pi_list;
    146 };
    147 
    148 #define	POOL_NEEDS_CATCHUP(pp)						\
    149 	((pp)->pr_nitems < (pp)->pr_minitems)
    150 
    151 /*
    152  * Pool cache management.
    153  *
    154  * Pool caches provide a way for constructed objects to be cached by the
    155  * pool subsystem.  This can lead to performance improvements by avoiding
    156  * needless object construction/destruction; it is deferred until absolutely
    157  * necessary.
    158  *
    159  * Caches are grouped into cache groups.  Each cache group references up
    160  * to PCG_NUMOBJECTS constructed objects.  When a cache allocates an
    161  * object from the pool, it calls the object's constructor and places it
    162  * into a cache group.  When a cache group frees an object back to the
    163  * pool, it first calls the object's destructor.  This allows the object
    164  * to persist in constructed form while freed to the cache.
    165  *
    166  * The pool references each cache, so that when a pool is drained by the
    167  * pagedaemon, it can drain each individual cache as well.  Each time a
    168  * cache is drained, the most idle cache group is freed to the pool in
    169  * its entirety.
    170  *
    171  * Pool caches are layed on top of pools.  By layering them, we can avoid
    172  * the complexity of cache management for pools which would not benefit
    173  * from it.
    174  */
    175 
    176 static struct pool pcg_normal_pool;
    177 static struct pool pcg_large_pool;
    178 static struct pool cache_pool;
    179 static struct pool cache_cpu_pool;
    180 
    181 /* List of all caches. */
    182 TAILQ_HEAD(,pool_cache) pool_cache_head =
    183     TAILQ_HEAD_INITIALIZER(pool_cache_head);
    184 
    185 int pool_cache_disable;		/* global disable for caching */
    186 static const pcg_t pcg_dummy;	/* zero sized: always empty, yet always full */
    187 
    188 static bool	pool_cache_put_slow(pool_cache_cpu_t *, int,
    189 				    void *);
    190 static bool	pool_cache_get_slow(pool_cache_cpu_t *, int,
    191 				    void **, paddr_t *, int);
    192 static void	pool_cache_cpu_init1(struct cpu_info *, pool_cache_t);
    193 static void	pool_cache_invalidate_groups(pool_cache_t, pcg_t *);
    194 static void	pool_cache_invalidate_cpu(pool_cache_t, u_int);
    195 static void	pool_cache_xcall(pool_cache_t);
    196 
    197 static int	pool_catchup(struct pool *);
    198 static void	pool_prime_page(struct pool *, void *,
    199 		    struct pool_item_header *);
    200 static void	pool_update_curpage(struct pool *);
    201 
    202 static int	pool_grow(struct pool *, int);
    203 static void	*pool_allocator_alloc(struct pool *, int);
    204 static void	pool_allocator_free(struct pool *, void *);
    205 
    206 static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
    207 	void (*)(const char *, ...));
    208 static void pool_print1(struct pool *, const char *,
    209 	void (*)(const char *, ...));
    210 
    211 static int pool_chk_page(struct pool *, const char *,
    212 			 struct pool_item_header *);
    213 
    214 /*
    215  * Pool log entry. An array of these is allocated in pool_init().
    216  */
    217 struct pool_log {
    218 	const char	*pl_file;
    219 	long		pl_line;
    220 	int		pl_action;
    221 #define	PRLOG_GET	1
    222 #define	PRLOG_PUT	2
    223 	void		*pl_addr;
    224 };
    225 
    226 #ifdef POOL_DIAGNOSTIC
    227 /* Number of entries in pool log buffers */
    228 #ifndef POOL_LOGSIZE
    229 #define	POOL_LOGSIZE	10
    230 #endif
    231 
    232 int pool_logsize = POOL_LOGSIZE;
    233 
    234 static inline void
    235 pr_log(struct pool *pp, void *v, int action, const char *file, long line)
    236 {
    237 	int n;
    238 	struct pool_log *pl;
    239 
    240 	if ((pp->pr_roflags & PR_LOGGING) == 0)
    241 		return;
    242 
    243 	if (pp->pr_log == NULL) {
    244 		if (kmem_map != NULL)
    245 			pp->pr_log = malloc(
    246 				pool_logsize * sizeof(struct pool_log),
    247 				M_TEMP, M_NOWAIT | M_ZERO);
    248 		if (pp->pr_log == NULL)
    249 			return;
    250 		pp->pr_curlogentry = 0;
    251 		pp->pr_logsize = pool_logsize;
    252 	}
    253 
    254 	/*
    255 	 * Fill in the current entry. Wrap around and overwrite
    256 	 * the oldest entry if necessary.
    257 	 */
    258 	n = pp->pr_curlogentry;
    259 	pl = &pp->pr_log[n];
    260 	pl->pl_file = file;
    261 	pl->pl_line = line;
    262 	pl->pl_action = action;
    263 	pl->pl_addr = v;
    264 	if (++n >= pp->pr_logsize)
    265 		n = 0;
    266 	pp->pr_curlogentry = n;
    267 }
    268 
    269 static void
    270 pr_printlog(struct pool *pp, struct pool_item *pi,
    271     void (*pr)(const char *, ...))
    272 {
    273 	int i = pp->pr_logsize;
    274 	int n = pp->pr_curlogentry;
    275 
    276 	if (pp->pr_log == NULL)
    277 		return;
    278 
    279 	/*
    280 	 * Print all entries in this pool's log.
    281 	 */
    282 	while (i-- > 0) {
    283 		struct pool_log *pl = &pp->pr_log[n];
    284 		if (pl->pl_action != 0) {
    285 			if (pi == NULL || pi == pl->pl_addr) {
    286 				(*pr)("\tlog entry %d:\n", i);
    287 				(*pr)("\t\taction = %s, addr = %p\n",
    288 				    pl->pl_action == PRLOG_GET ? "get" : "put",
    289 				    pl->pl_addr);
    290 				(*pr)("\t\tfile: %s at line %lu\n",
    291 				    pl->pl_file, pl->pl_line);
    292 			}
    293 		}
    294 		if (++n >= pp->pr_logsize)
    295 			n = 0;
    296 	}
    297 }
    298 
    299 static inline void
    300 pr_enter(struct pool *pp, const char *file, long line)
    301 {
    302 
    303 	if (__predict_false(pp->pr_entered_file != NULL)) {
    304 		printf("pool %s: reentrancy at file %s line %ld\n",
    305 		    pp->pr_wchan, file, line);
    306 		printf("         previous entry at file %s line %ld\n",
    307 		    pp->pr_entered_file, pp->pr_entered_line);
    308 		panic("pr_enter");
    309 	}
    310 
    311 	pp->pr_entered_file = file;
    312 	pp->pr_entered_line = line;
    313 }
    314 
    315 static inline void
    316 pr_leave(struct pool *pp)
    317 {
    318 
    319 	if (__predict_false(pp->pr_entered_file == NULL)) {
    320 		printf("pool %s not entered?\n", pp->pr_wchan);
    321 		panic("pr_leave");
    322 	}
    323 
    324 	pp->pr_entered_file = NULL;
    325 	pp->pr_entered_line = 0;
    326 }
    327 
    328 static inline void
    329 pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
    330 {
    331 
    332 	if (pp->pr_entered_file != NULL)
    333 		(*pr)("\n\tcurrently entered from file %s line %ld\n",
    334 		    pp->pr_entered_file, pp->pr_entered_line);
    335 }
    336 #else
    337 #define	pr_log(pp, v, action, file, line)
    338 #define	pr_printlog(pp, pi, pr)
    339 #define	pr_enter(pp, file, line)
    340 #define	pr_leave(pp)
    341 #define	pr_enter_check(pp, pr)
    342 #endif /* POOL_DIAGNOSTIC */
    343 
    344 static inline unsigned int
    345 pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph,
    346     const void *v)
    347 {
    348 	const char *cp = v;
    349 	unsigned int idx;
    350 
    351 	KASSERT(pp->pr_roflags & PR_NOTOUCH);
    352 	idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size;
    353 	KASSERT(idx < pp->pr_itemsperpage);
    354 	return idx;
    355 }
    356 
    357 static inline void
    358 pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph,
    359     void *obj)
    360 {
    361 	unsigned int idx = pr_item_notouch_index(pp, ph, obj);
    362 	pool_item_bitmap_t *bitmap = ph->ph_bitmap + (idx / BITMAP_SIZE);
    363 	pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
    364 
    365 	KASSERT((*bitmap & mask) == 0);
    366 	*bitmap |= mask;
    367 }
    368 
    369 static inline void *
    370 pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph)
    371 {
    372 	pool_item_bitmap_t *bitmap = ph->ph_bitmap;
    373 	unsigned int idx;
    374 	int i;
    375 
    376 	for (i = 0; ; i++) {
    377 		int bit;
    378 
    379 		KASSERT((i * BITMAP_SIZE) < pp->pr_itemsperpage);
    380 		bit = ffs32(bitmap[i]);
    381 		if (bit) {
    382 			pool_item_bitmap_t mask;
    383 
    384 			bit--;
    385 			idx = (i * BITMAP_SIZE) + bit;
    386 			mask = 1 << bit;
    387 			KASSERT((bitmap[i] & mask) != 0);
    388 			bitmap[i] &= ~mask;
    389 			break;
    390 		}
    391 	}
    392 	KASSERT(idx < pp->pr_itemsperpage);
    393 	return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size;
    394 }
    395 
    396 static inline void
    397 pr_item_notouch_init(const struct pool *pp, struct pool_item_header *ph)
    398 {
    399 	pool_item_bitmap_t *bitmap = ph->ph_bitmap;
    400 	const int n = howmany(pp->pr_itemsperpage, BITMAP_SIZE);
    401 	int i;
    402 
    403 	for (i = 0; i < n; i++) {
    404 		bitmap[i] = (pool_item_bitmap_t)-1;
    405 	}
    406 }
    407 
    408 static inline int
    409 phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
    410 {
    411 
    412 	/*
    413 	 * we consider pool_item_header with smaller ph_page bigger.
    414 	 * (this unnatural ordering is for the benefit of pr_find_pagehead.)
    415 	 */
    416 
    417 	if (a->ph_page < b->ph_page)
    418 		return (1);
    419 	else if (a->ph_page > b->ph_page)
    420 		return (-1);
    421 	else
    422 		return (0);
    423 }
    424 
    425 SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
    426 SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
    427 
    428 static inline struct pool_item_header *
    429 pr_find_pagehead_noalign(struct pool *pp, void *v)
    430 {
    431 	struct pool_item_header *ph, tmp;
    432 
    433 	tmp.ph_page = (void *)(uintptr_t)v;
    434 	ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
    435 	if (ph == NULL) {
    436 		ph = SPLAY_ROOT(&pp->pr_phtree);
    437 		if (ph != NULL && phtree_compare(&tmp, ph) >= 0) {
    438 			ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph);
    439 		}
    440 		KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0);
    441 	}
    442 
    443 	return ph;
    444 }
    445 
    446 /*
    447  * Return the pool page header based on item address.
    448  */
    449 static inline struct pool_item_header *
    450 pr_find_pagehead(struct pool *pp, void *v)
    451 {
    452 	struct pool_item_header *ph, tmp;
    453 
    454 	if ((pp->pr_roflags & PR_NOALIGN) != 0) {
    455 		ph = pr_find_pagehead_noalign(pp, v);
    456 	} else {
    457 		void *page =
    458 		    (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask);
    459 
    460 		if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
    461 			ph = (struct pool_item_header *)((char *)page + pp->pr_phoffset);
    462 		} else {
    463 			tmp.ph_page = page;
    464 			ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
    465 		}
    466 	}
    467 
    468 	KASSERT(ph == NULL || ((pp->pr_roflags & PR_PHINPAGE) != 0) ||
    469 	    ((char *)ph->ph_page <= (char *)v &&
    470 	    (char *)v < (char *)ph->ph_page + pp->pr_alloc->pa_pagesz));
    471 	return ph;
    472 }
    473 
    474 static void
    475 pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq)
    476 {
    477 	struct pool_item_header *ph;
    478 
    479 	while ((ph = LIST_FIRST(pq)) != NULL) {
    480 		LIST_REMOVE(ph, ph_pagelist);
    481 		pool_allocator_free(pp, ph->ph_page);
    482 		if ((pp->pr_roflags & PR_PHINPAGE) == 0)
    483 			pool_put(pp->pr_phpool, ph);
    484 	}
    485 }
    486 
    487 /*
    488  * Remove a page from the pool.
    489  */
    490 static inline void
    491 pr_rmpage(struct pool *pp, struct pool_item_header *ph,
    492      struct pool_pagelist *pq)
    493 {
    494 
    495 	KASSERT(mutex_owned(&pp->pr_lock));
    496 
    497 	/*
    498 	 * If the page was idle, decrement the idle page count.
    499 	 */
    500 	if (ph->ph_nmissing == 0) {
    501 #ifdef DIAGNOSTIC
    502 		if (pp->pr_nidle == 0)
    503 			panic("pr_rmpage: nidle inconsistent");
    504 		if (pp->pr_nitems < pp->pr_itemsperpage)
    505 			panic("pr_rmpage: nitems inconsistent");
    506 #endif
    507 		pp->pr_nidle--;
    508 	}
    509 
    510 	pp->pr_nitems -= pp->pr_itemsperpage;
    511 
    512 	/*
    513 	 * Unlink the page from the pool and queue it for release.
    514 	 */
    515 	LIST_REMOVE(ph, ph_pagelist);
    516 	if ((pp->pr_roflags & PR_PHINPAGE) == 0)
    517 		SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
    518 	LIST_INSERT_HEAD(pq, ph, ph_pagelist);
    519 
    520 	pp->pr_npages--;
    521 	pp->pr_npagefree++;
    522 
    523 	pool_update_curpage(pp);
    524 }
    525 
    526 static bool
    527 pa_starved_p(struct pool_allocator *pa)
    528 {
    529 
    530 	if (pa->pa_backingmap != NULL) {
    531 		return vm_map_starved_p(pa->pa_backingmap);
    532 	}
    533 	return false;
    534 }
    535 
    536 static int
    537 pool_reclaim_callback(struct callback_entry *ce, void *obj, void *arg)
    538 {
    539 	struct pool *pp = obj;
    540 	struct pool_allocator *pa = pp->pr_alloc;
    541 
    542 	KASSERT(&pp->pr_reclaimerentry == ce);
    543 	pool_reclaim(pp);
    544 	if (!pa_starved_p(pa)) {
    545 		return CALLBACK_CHAIN_ABORT;
    546 	}
    547 	return CALLBACK_CHAIN_CONTINUE;
    548 }
    549 
    550 static void
    551 pool_reclaim_register(struct pool *pp)
    552 {
    553 	struct vm_map *map = pp->pr_alloc->pa_backingmap;
    554 	int s;
    555 
    556 	if (map == NULL) {
    557 		return;
    558 	}
    559 
    560 	s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */
    561 	callback_register(&vm_map_to_kernel(map)->vmk_reclaim_callback,
    562 	    &pp->pr_reclaimerentry, pp, pool_reclaim_callback);
    563 	splx(s);
    564 }
    565 
    566 static void
    567 pool_reclaim_unregister(struct pool *pp)
    568 {
    569 	struct vm_map *map = pp->pr_alloc->pa_backingmap;
    570 	int s;
    571 
    572 	if (map == NULL) {
    573 		return;
    574 	}
    575 
    576 	s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */
    577 	callback_unregister(&vm_map_to_kernel(map)->vmk_reclaim_callback,
    578 	    &pp->pr_reclaimerentry);
    579 	splx(s);
    580 }
    581 
    582 static void
    583 pa_reclaim_register(struct pool_allocator *pa)
    584 {
    585 	struct vm_map *map = *pa->pa_backingmapptr;
    586 	struct pool *pp;
    587 
    588 	KASSERT(pa->pa_backingmap == NULL);
    589 	if (map == NULL) {
    590 		SLIST_INSERT_HEAD(&pa_deferinitq, pa, pa_q);
    591 		return;
    592 	}
    593 	pa->pa_backingmap = map;
    594 	TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
    595 		pool_reclaim_register(pp);
    596 	}
    597 }
    598 
    599 /*
    600  * Initialize all the pools listed in the "pools" link set.
    601  */
    602 void
    603 pool_subsystem_init(void)
    604 {
    605 	struct pool_allocator *pa;
    606 
    607 	mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE);
    608 	mutex_init(&pool_allocator_lock, MUTEX_DEFAULT, IPL_NONE);
    609 	cv_init(&pool_busy, "poolbusy");
    610 
    611 	while ((pa = SLIST_FIRST(&pa_deferinitq)) != NULL) {
    612 		KASSERT(pa->pa_backingmapptr != NULL);
    613 		KASSERT(*pa->pa_backingmapptr != NULL);
    614 		SLIST_REMOVE_HEAD(&pa_deferinitq, pa_q);
    615 		pa_reclaim_register(pa);
    616 	}
    617 
    618 	pool_init(&cache_pool, sizeof(struct pool_cache), coherency_unit,
    619 	    0, 0, "pcache", &pool_allocator_nointr, IPL_NONE);
    620 
    621 	pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), coherency_unit,
    622 	    0, 0, "pcachecpu", &pool_allocator_nointr, IPL_NONE);
    623 }
    624 
    625 /*
    626  * Initialize the given pool resource structure.
    627  *
    628  * We export this routine to allow other kernel parts to declare
    629  * static pools that must be initialized before malloc() is available.
    630  */
    631 void
    632 pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
    633     const char *wchan, struct pool_allocator *palloc, int ipl)
    634 {
    635 	struct pool *pp1;
    636 	size_t trysize, phsize;
    637 	int off, slack;
    638 
    639 #ifdef DEBUG
    640 	/*
    641 	 * Check that the pool hasn't already been initialised and
    642 	 * added to the list of all pools.
    643 	 */
    644 	TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
    645 		if (pp == pp1)
    646 			panic("pool_init: pool %s already initialised",
    647 			    wchan);
    648 	}
    649 #endif
    650 
    651 #ifdef POOL_DIAGNOSTIC
    652 	/*
    653 	 * Always log if POOL_DIAGNOSTIC is defined.
    654 	 */
    655 	if (pool_logsize != 0)
    656 		flags |= PR_LOGGING;
    657 #endif
    658 
    659 	if (palloc == NULL)
    660 		palloc = &pool_allocator_kmem;
    661 #ifdef POOL_SUBPAGE
    662 	if (size > palloc->pa_pagesz) {
    663 		if (palloc == &pool_allocator_kmem)
    664 			palloc = &pool_allocator_kmem_fullpage;
    665 		else if (palloc == &pool_allocator_nointr)
    666 			palloc = &pool_allocator_nointr_fullpage;
    667 	}
    668 #endif /* POOL_SUBPAGE */
    669 	if (!cold)
    670 		mutex_enter(&pool_allocator_lock);
    671 	if (palloc->pa_refcnt++ == 0) {
    672 		if (palloc->pa_pagesz == 0)
    673 			palloc->pa_pagesz = PAGE_SIZE;
    674 
    675 		TAILQ_INIT(&palloc->pa_list);
    676 
    677 		mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM);
    678 		palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
    679 		palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
    680 
    681 		if (palloc->pa_backingmapptr != NULL) {
    682 			pa_reclaim_register(palloc);
    683 		}
    684 	}
    685 	if (!cold)
    686 		mutex_exit(&pool_allocator_lock);
    687 
    688 	if (align == 0)
    689 		align = ALIGN(1);
    690 
    691 	if ((flags & PR_NOTOUCH) == 0 && size < sizeof(struct pool_item))
    692 		size = sizeof(struct pool_item);
    693 
    694 	size = roundup(size, align);
    695 #ifdef DIAGNOSTIC
    696 	if (size > palloc->pa_pagesz)
    697 		panic("pool_init: pool item size (%zu) too large", size);
    698 #endif
    699 
    700 	/*
    701 	 * Initialize the pool structure.
    702 	 */
    703 	LIST_INIT(&pp->pr_emptypages);
    704 	LIST_INIT(&pp->pr_fullpages);
    705 	LIST_INIT(&pp->pr_partpages);
    706 	pp->pr_cache = NULL;
    707 	pp->pr_curpage = NULL;
    708 	pp->pr_npages = 0;
    709 	pp->pr_minitems = 0;
    710 	pp->pr_minpages = 0;
    711 	pp->pr_maxpages = UINT_MAX;
    712 	pp->pr_roflags = flags;
    713 	pp->pr_flags = 0;
    714 	pp->pr_size = size;
    715 	pp->pr_align = align;
    716 	pp->pr_wchan = wchan;
    717 	pp->pr_alloc = palloc;
    718 	pp->pr_nitems = 0;
    719 	pp->pr_nout = 0;
    720 	pp->pr_hardlimit = UINT_MAX;
    721 	pp->pr_hardlimit_warning = NULL;
    722 	pp->pr_hardlimit_ratecap.tv_sec = 0;
    723 	pp->pr_hardlimit_ratecap.tv_usec = 0;
    724 	pp->pr_hardlimit_warning_last.tv_sec = 0;
    725 	pp->pr_hardlimit_warning_last.tv_usec = 0;
    726 	pp->pr_drain_hook = NULL;
    727 	pp->pr_drain_hook_arg = NULL;
    728 	pp->pr_freecheck = NULL;
    729 
    730 	/*
    731 	 * Decide whether to put the page header off page to avoid
    732 	 * wasting too large a part of the page or too big item.
    733 	 * Off-page page headers go on a hash table, so we can match
    734 	 * a returned item with its header based on the page address.
    735 	 * We use 1/16 of the page size and about 8 times of the item
    736 	 * size as the threshold (XXX: tune)
    737 	 *
    738 	 * However, we'll put the header into the page if we can put
    739 	 * it without wasting any items.
    740 	 *
    741 	 * Silently enforce `0 <= ioff < align'.
    742 	 */
    743 	pp->pr_itemoffset = ioff %= align;
    744 	/* See the comment below about reserved bytes. */
    745 	trysize = palloc->pa_pagesz - ((align - ioff) % align);
    746 	phsize = ALIGN(sizeof(struct pool_item_header));
    747 	if ((pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) == 0 &&
    748 	    (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
    749 	    trysize / pp->pr_size == (trysize - phsize) / pp->pr_size)) {
    750 		/* Use the end of the page for the page header */
    751 		pp->pr_roflags |= PR_PHINPAGE;
    752 		pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
    753 	} else {
    754 		/* The page header will be taken from our page header pool */
    755 		pp->pr_phoffset = 0;
    756 		off = palloc->pa_pagesz;
    757 		SPLAY_INIT(&pp->pr_phtree);
    758 	}
    759 
    760 	/*
    761 	 * Alignment is to take place at `ioff' within the item. This means
    762 	 * we must reserve up to `align - 1' bytes on the page to allow
    763 	 * appropriate positioning of each item.
    764 	 */
    765 	pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
    766 	KASSERT(pp->pr_itemsperpage != 0);
    767 	if ((pp->pr_roflags & PR_NOTOUCH)) {
    768 		int idx;
    769 
    770 		for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
    771 		    idx++) {
    772 			/* nothing */
    773 		}
    774 		if (idx >= PHPOOL_MAX) {
    775 			/*
    776 			 * if you see this panic, consider to tweak
    777 			 * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
    778 			 */
    779 			panic("%s: too large itemsperpage(%d) for PR_NOTOUCH",
    780 			    pp->pr_wchan, pp->pr_itemsperpage);
    781 		}
    782 		pp->pr_phpool = &phpool[idx];
    783 	} else if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
    784 		pp->pr_phpool = &phpool[0];
    785 	}
    786 #if defined(DIAGNOSTIC)
    787 	else {
    788 		pp->pr_phpool = NULL;
    789 	}
    790 #endif
    791 
    792 	/*
    793 	 * Use the slack between the chunks and the page header
    794 	 * for "cache coloring".
    795 	 */
    796 	slack = off - pp->pr_itemsperpage * pp->pr_size;
    797 	pp->pr_maxcolor = (slack / align) * align;
    798 	pp->pr_curcolor = 0;
    799 
    800 	pp->pr_nget = 0;
    801 	pp->pr_nfail = 0;
    802 	pp->pr_nput = 0;
    803 	pp->pr_npagealloc = 0;
    804 	pp->pr_npagefree = 0;
    805 	pp->pr_hiwat = 0;
    806 	pp->pr_nidle = 0;
    807 	pp->pr_refcnt = 0;
    808 
    809 	pp->pr_log = NULL;
    810 
    811 	pp->pr_entered_file = NULL;
    812 	pp->pr_entered_line = 0;
    813 
    814 	mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl);
    815 	cv_init(&pp->pr_cv, wchan);
    816 	pp->pr_ipl = ipl;
    817 
    818 	/*
    819 	 * Initialize private page header pool and cache magazine pool if we
    820 	 * haven't done so yet.
    821 	 * XXX LOCKING.
    822 	 */
    823 	if (phpool[0].pr_size == 0) {
    824 		int idx;
    825 		for (idx = 0; idx < PHPOOL_MAX; idx++) {
    826 			static char phpool_names[PHPOOL_MAX][6+1+6+1];
    827 			int nelem;
    828 			size_t sz;
    829 
    830 			nelem = PHPOOL_FREELIST_NELEM(idx);
    831 			snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
    832 			    "phpool-%d", nelem);
    833 			sz = sizeof(struct pool_item_header);
    834 			if (nelem) {
    835 				sz = offsetof(struct pool_item_header,
    836 				    ph_bitmap[howmany(nelem, BITMAP_SIZE)]);
    837 			}
    838 			pool_init(&phpool[idx], sz, 0, 0, 0,
    839 			    phpool_names[idx], &pool_allocator_meta, IPL_VM);
    840 		}
    841 #ifdef POOL_SUBPAGE
    842 		pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
    843 		    PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM);
    844 #endif
    845 
    846 		size = sizeof(pcg_t) +
    847 		    (PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t);
    848 		pool_init(&pcg_normal_pool, size, coherency_unit, 0, 0,
    849 		    "pcgnormal", &pool_allocator_meta, IPL_VM);
    850 
    851 		size = sizeof(pcg_t) +
    852 		    (PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t);
    853 		pool_init(&pcg_large_pool, size, coherency_unit, 0, 0,
    854 		    "pcglarge", &pool_allocator_meta, IPL_VM);
    855 	}
    856 
    857 	/* Insert into the list of all pools. */
    858 	if (!cold)
    859 		mutex_enter(&pool_head_lock);
    860 	TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
    861 		if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0)
    862 			break;
    863 	}
    864 	if (pp1 == NULL)
    865 		TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
    866 	else
    867 		TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist);
    868 	if (!cold)
    869 		mutex_exit(&pool_head_lock);
    870 
    871 	/* Insert this into the list of pools using this allocator. */
    872 	if (!cold)
    873 		mutex_enter(&palloc->pa_lock);
    874 	TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
    875 	if (!cold)
    876 		mutex_exit(&palloc->pa_lock);
    877 
    878 	pool_reclaim_register(pp);
    879 }
    880 
    881 /*
    882  * De-commision a pool resource.
    883  */
    884 void
    885 pool_destroy(struct pool *pp)
    886 {
    887 	struct pool_pagelist pq;
    888 	struct pool_item_header *ph;
    889 
    890 	/* Remove from global pool list */
    891 	mutex_enter(&pool_head_lock);
    892 	while (pp->pr_refcnt != 0)
    893 		cv_wait(&pool_busy, &pool_head_lock);
    894 	TAILQ_REMOVE(&pool_head, pp, pr_poollist);
    895 	if (drainpp == pp)
    896 		drainpp = NULL;
    897 	mutex_exit(&pool_head_lock);
    898 
    899 	/* Remove this pool from its allocator's list of pools. */
    900 	pool_reclaim_unregister(pp);
    901 	mutex_enter(&pp->pr_alloc->pa_lock);
    902 	TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
    903 	mutex_exit(&pp->pr_alloc->pa_lock);
    904 
    905 	mutex_enter(&pool_allocator_lock);
    906 	if (--pp->pr_alloc->pa_refcnt == 0)
    907 		mutex_destroy(&pp->pr_alloc->pa_lock);
    908 	mutex_exit(&pool_allocator_lock);
    909 
    910 	mutex_enter(&pp->pr_lock);
    911 
    912 	KASSERT(pp->pr_cache == NULL);
    913 
    914 #ifdef DIAGNOSTIC
    915 	if (pp->pr_nout != 0) {
    916 		pr_printlog(pp, NULL, printf);
    917 		panic("pool_destroy: pool busy: still out: %u",
    918 		    pp->pr_nout);
    919 	}
    920 #endif
    921 
    922 	KASSERT(LIST_EMPTY(&pp->pr_fullpages));
    923 	KASSERT(LIST_EMPTY(&pp->pr_partpages));
    924 
    925 	/* Remove all pages */
    926 	LIST_INIT(&pq);
    927 	while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
    928 		pr_rmpage(pp, ph, &pq);
    929 
    930 	mutex_exit(&pp->pr_lock);
    931 
    932 	pr_pagelist_free(pp, &pq);
    933 
    934 #ifdef POOL_DIAGNOSTIC
    935 	if (pp->pr_log != NULL) {
    936 		free(pp->pr_log, M_TEMP);
    937 		pp->pr_log = NULL;
    938 	}
    939 #endif
    940 
    941 	cv_destroy(&pp->pr_cv);
    942 	mutex_destroy(&pp->pr_lock);
    943 }
    944 
    945 void
    946 pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
    947 {
    948 
    949 	/* XXX no locking -- must be used just after pool_init() */
    950 #ifdef DIAGNOSTIC
    951 	if (pp->pr_drain_hook != NULL)
    952 		panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
    953 #endif
    954 	pp->pr_drain_hook = fn;
    955 	pp->pr_drain_hook_arg = arg;
    956 }
    957 
    958 static struct pool_item_header *
    959 pool_alloc_item_header(struct pool *pp, void *storage, int flags)
    960 {
    961 	struct pool_item_header *ph;
    962 
    963 	if ((pp->pr_roflags & PR_PHINPAGE) != 0)
    964 		ph = (struct pool_item_header *) ((char *)storage + pp->pr_phoffset);
    965 	else
    966 		ph = pool_get(pp->pr_phpool, flags);
    967 
    968 	return (ph);
    969 }
    970 
    971 /*
    972  * Grab an item from the pool.
    973  */
    974 void *
    975 #ifdef POOL_DIAGNOSTIC
    976 _pool_get(struct pool *pp, int flags, const char *file, long line)
    977 #else
    978 pool_get(struct pool *pp, int flags)
    979 #endif
    980 {
    981 	struct pool_item *pi;
    982 	struct pool_item_header *ph;
    983 	void *v;
    984 
    985 #ifdef DIAGNOSTIC
    986 	if (__predict_false(pp->pr_itemsperpage == 0))
    987 		panic("pool_get: pool %p: pr_itemsperpage is zero, "
    988 		    "pool not initialized?", pp);
    989 	if (__predict_false(curlwp == NULL && doing_shutdown == 0 &&
    990 			    (flags & PR_WAITOK) != 0))
    991 		panic("pool_get: %s: must have NOWAIT", pp->pr_wchan);
    992 
    993 #endif /* DIAGNOSTIC */
    994 #ifdef LOCKDEBUG
    995 	if (flags & PR_WAITOK) {
    996 		ASSERT_SLEEPABLE();
    997 	}
    998 #endif
    999 
   1000 	mutex_enter(&pp->pr_lock);
   1001 	pr_enter(pp, file, line);
   1002 
   1003  startover:
   1004 	/*
   1005 	 * Check to see if we've reached the hard limit.  If we have,
   1006 	 * and we can wait, then wait until an item has been returned to
   1007 	 * the pool.
   1008 	 */
   1009 #ifdef DIAGNOSTIC
   1010 	if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
   1011 		pr_leave(pp);
   1012 		mutex_exit(&pp->pr_lock);
   1013 		panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
   1014 	}
   1015 #endif
   1016 	if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
   1017 		if (pp->pr_drain_hook != NULL) {
   1018 			/*
   1019 			 * Since the drain hook is going to free things
   1020 			 * back to the pool, unlock, call the hook, re-lock,
   1021 			 * and check the hardlimit condition again.
   1022 			 */
   1023 			pr_leave(pp);
   1024 			mutex_exit(&pp->pr_lock);
   1025 			(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
   1026 			mutex_enter(&pp->pr_lock);
   1027 			pr_enter(pp, file, line);
   1028 			if (pp->pr_nout < pp->pr_hardlimit)
   1029 				goto startover;
   1030 		}
   1031 
   1032 		if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
   1033 			/*
   1034 			 * XXX: A warning isn't logged in this case.  Should
   1035 			 * it be?
   1036 			 */
   1037 			pp->pr_flags |= PR_WANTED;
   1038 			pr_leave(pp);
   1039 			cv_wait(&pp->pr_cv, &pp->pr_lock);
   1040 			pr_enter(pp, file, line);
   1041 			goto startover;
   1042 		}
   1043 
   1044 		/*
   1045 		 * Log a message that the hard limit has been hit.
   1046 		 */
   1047 		if (pp->pr_hardlimit_warning != NULL &&
   1048 		    ratecheck(&pp->pr_hardlimit_warning_last,
   1049 			      &pp->pr_hardlimit_ratecap))
   1050 			log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
   1051 
   1052 		pp->pr_nfail++;
   1053 
   1054 		pr_leave(pp);
   1055 		mutex_exit(&pp->pr_lock);
   1056 		return (NULL);
   1057 	}
   1058 
   1059 	/*
   1060 	 * The convention we use is that if `curpage' is not NULL, then
   1061 	 * it points at a non-empty bucket. In particular, `curpage'
   1062 	 * never points at a page header which has PR_PHINPAGE set and
   1063 	 * has no items in its bucket.
   1064 	 */
   1065 	if ((ph = pp->pr_curpage) == NULL) {
   1066 		int error;
   1067 
   1068 #ifdef DIAGNOSTIC
   1069 		if (pp->pr_nitems != 0) {
   1070 			mutex_exit(&pp->pr_lock);
   1071 			printf("pool_get: %s: curpage NULL, nitems %u\n",
   1072 			    pp->pr_wchan, pp->pr_nitems);
   1073 			panic("pool_get: nitems inconsistent");
   1074 		}
   1075 #endif
   1076 
   1077 		/*
   1078 		 * Call the back-end page allocator for more memory.
   1079 		 * Release the pool lock, as the back-end page allocator
   1080 		 * may block.
   1081 		 */
   1082 		pr_leave(pp);
   1083 		error = pool_grow(pp, flags);
   1084 		pr_enter(pp, file, line);
   1085 		if (error != 0) {
   1086 			/*
   1087 			 * We were unable to allocate a page or item
   1088 			 * header, but we released the lock during
   1089 			 * allocation, so perhaps items were freed
   1090 			 * back to the pool.  Check for this case.
   1091 			 */
   1092 			if (pp->pr_curpage != NULL)
   1093 				goto startover;
   1094 
   1095 			pp->pr_nfail++;
   1096 			pr_leave(pp);
   1097 			mutex_exit(&pp->pr_lock);
   1098 			return (NULL);
   1099 		}
   1100 
   1101 		/* Start the allocation process over. */
   1102 		goto startover;
   1103 	}
   1104 	if (pp->pr_roflags & PR_NOTOUCH) {
   1105 #ifdef DIAGNOSTIC
   1106 		if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) {
   1107 			pr_leave(pp);
   1108 			mutex_exit(&pp->pr_lock);
   1109 			panic("pool_get: %s: page empty", pp->pr_wchan);
   1110 		}
   1111 #endif
   1112 		v = pr_item_notouch_get(pp, ph);
   1113 #ifdef POOL_DIAGNOSTIC
   1114 		pr_log(pp, v, PRLOG_GET, file, line);
   1115 #endif
   1116 	} else {
   1117 		v = pi = LIST_FIRST(&ph->ph_itemlist);
   1118 		if (__predict_false(v == NULL)) {
   1119 			pr_leave(pp);
   1120 			mutex_exit(&pp->pr_lock);
   1121 			panic("pool_get: %s: page empty", pp->pr_wchan);
   1122 		}
   1123 #ifdef DIAGNOSTIC
   1124 		if (__predict_false(pp->pr_nitems == 0)) {
   1125 			pr_leave(pp);
   1126 			mutex_exit(&pp->pr_lock);
   1127 			printf("pool_get: %s: items on itemlist, nitems %u\n",
   1128 			    pp->pr_wchan, pp->pr_nitems);
   1129 			panic("pool_get: nitems inconsistent");
   1130 		}
   1131 #endif
   1132 
   1133 #ifdef POOL_DIAGNOSTIC
   1134 		pr_log(pp, v, PRLOG_GET, file, line);
   1135 #endif
   1136 
   1137 #ifdef DIAGNOSTIC
   1138 		if (__predict_false(pi->pi_magic != PI_MAGIC)) {
   1139 			pr_printlog(pp, pi, printf);
   1140 			panic("pool_get(%s): free list modified: "
   1141 			    "magic=%x; page %p; item addr %p\n",
   1142 			    pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
   1143 		}
   1144 #endif
   1145 
   1146 		/*
   1147 		 * Remove from item list.
   1148 		 */
   1149 		LIST_REMOVE(pi, pi_list);
   1150 	}
   1151 	pp->pr_nitems--;
   1152 	pp->pr_nout++;
   1153 	if (ph->ph_nmissing == 0) {
   1154 #ifdef DIAGNOSTIC
   1155 		if (__predict_false(pp->pr_nidle == 0))
   1156 			panic("pool_get: nidle inconsistent");
   1157 #endif
   1158 		pp->pr_nidle--;
   1159 
   1160 		/*
   1161 		 * This page was previously empty.  Move it to the list of
   1162 		 * partially-full pages.  This page is already curpage.
   1163 		 */
   1164 		LIST_REMOVE(ph, ph_pagelist);
   1165 		LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
   1166 	}
   1167 	ph->ph_nmissing++;
   1168 	if (ph->ph_nmissing == pp->pr_itemsperpage) {
   1169 #ifdef DIAGNOSTIC
   1170 		if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 &&
   1171 		    !LIST_EMPTY(&ph->ph_itemlist))) {
   1172 			pr_leave(pp);
   1173 			mutex_exit(&pp->pr_lock);
   1174 			panic("pool_get: %s: nmissing inconsistent",
   1175 			    pp->pr_wchan);
   1176 		}
   1177 #endif
   1178 		/*
   1179 		 * This page is now full.  Move it to the full list
   1180 		 * and select a new current page.
   1181 		 */
   1182 		LIST_REMOVE(ph, ph_pagelist);
   1183 		LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
   1184 		pool_update_curpage(pp);
   1185 	}
   1186 
   1187 	pp->pr_nget++;
   1188 	pr_leave(pp);
   1189 
   1190 	/*
   1191 	 * If we have a low water mark and we are now below that low
   1192 	 * water mark, add more items to the pool.
   1193 	 */
   1194 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
   1195 		/*
   1196 		 * XXX: Should we log a warning?  Should we set up a timeout
   1197 		 * to try again in a second or so?  The latter could break
   1198 		 * a caller's assumptions about interrupt protection, etc.
   1199 		 */
   1200 	}
   1201 
   1202 	mutex_exit(&pp->pr_lock);
   1203 	KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0);
   1204 	FREECHECK_OUT(&pp->pr_freecheck, v);
   1205 	return (v);
   1206 }
   1207 
   1208 /*
   1209  * Internal version of pool_put().  Pool is already locked/entered.
   1210  */
   1211 static void
   1212 pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
   1213 {
   1214 	struct pool_item *pi = v;
   1215 	struct pool_item_header *ph;
   1216 
   1217 	KASSERT(mutex_owned(&pp->pr_lock));
   1218 	FREECHECK_IN(&pp->pr_freecheck, v);
   1219 	LOCKDEBUG_MEM_CHECK(v, pp->pr_size);
   1220 
   1221 #ifdef DIAGNOSTIC
   1222 	if (__predict_false(pp->pr_nout == 0)) {
   1223 		printf("pool %s: putting with none out\n",
   1224 		    pp->pr_wchan);
   1225 		panic("pool_put");
   1226 	}
   1227 #endif
   1228 
   1229 	if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) {
   1230 		pr_printlog(pp, NULL, printf);
   1231 		panic("pool_put: %s: page header missing", pp->pr_wchan);
   1232 	}
   1233 
   1234 	/*
   1235 	 * Return to item list.
   1236 	 */
   1237 	if (pp->pr_roflags & PR_NOTOUCH) {
   1238 		pr_item_notouch_put(pp, ph, v);
   1239 	} else {
   1240 #ifdef DIAGNOSTIC
   1241 		pi->pi_magic = PI_MAGIC;
   1242 #endif
   1243 #ifdef DEBUG
   1244 		{
   1245 			int i, *ip = v;
   1246 
   1247 			for (i = 0; i < pp->pr_size / sizeof(int); i++) {
   1248 				*ip++ = PI_MAGIC;
   1249 			}
   1250 		}
   1251 #endif
   1252 
   1253 		LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
   1254 	}
   1255 	KDASSERT(ph->ph_nmissing != 0);
   1256 	ph->ph_nmissing--;
   1257 	pp->pr_nput++;
   1258 	pp->pr_nitems++;
   1259 	pp->pr_nout--;
   1260 
   1261 	/* Cancel "pool empty" condition if it exists */
   1262 	if (pp->pr_curpage == NULL)
   1263 		pp->pr_curpage = ph;
   1264 
   1265 	if (pp->pr_flags & PR_WANTED) {
   1266 		pp->pr_flags &= ~PR_WANTED;
   1267 		cv_broadcast(&pp->pr_cv);
   1268 	}
   1269 
   1270 	/*
   1271 	 * If this page is now empty, do one of two things:
   1272 	 *
   1273 	 *	(1) If we have more pages than the page high water mark,
   1274 	 *	    free the page back to the system.  ONLY CONSIDER
   1275 	 *	    FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
   1276 	 *	    CLAIM.
   1277 	 *
   1278 	 *	(2) Otherwise, move the page to the empty page list.
   1279 	 *
   1280 	 * Either way, select a new current page (so we use a partially-full
   1281 	 * page if one is available).
   1282 	 */
   1283 	if (ph->ph_nmissing == 0) {
   1284 		pp->pr_nidle++;
   1285 		if (pp->pr_npages > pp->pr_minpages &&
   1286 		    pp->pr_npages > pp->pr_maxpages) {
   1287 			pr_rmpage(pp, ph, pq);
   1288 		} else {
   1289 			LIST_REMOVE(ph, ph_pagelist);
   1290 			LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
   1291 
   1292 			/*
   1293 			 * Update the timestamp on the page.  A page must
   1294 			 * be idle for some period of time before it can
   1295 			 * be reclaimed by the pagedaemon.  This minimizes
   1296 			 * ping-pong'ing for memory.
   1297 			 *
   1298 			 * note for 64-bit time_t: truncating to 32-bit is not
   1299 			 * a problem for our usage.
   1300 			 */
   1301 			ph->ph_time = time_uptime;
   1302 		}
   1303 		pool_update_curpage(pp);
   1304 	}
   1305 
   1306 	/*
   1307 	 * If the page was previously completely full, move it to the
   1308 	 * partially-full list and make it the current page.  The next
   1309 	 * allocation will get the item from this page, instead of
   1310 	 * further fragmenting the pool.
   1311 	 */
   1312 	else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
   1313 		LIST_REMOVE(ph, ph_pagelist);
   1314 		LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
   1315 		pp->pr_curpage = ph;
   1316 	}
   1317 }
   1318 
   1319 /*
   1320  * Return resource to the pool.
   1321  */
   1322 #ifdef POOL_DIAGNOSTIC
   1323 void
   1324 _pool_put(struct pool *pp, void *v, const char *file, long line)
   1325 {
   1326 	struct pool_pagelist pq;
   1327 
   1328 	LIST_INIT(&pq);
   1329 
   1330 	mutex_enter(&pp->pr_lock);
   1331 	pr_enter(pp, file, line);
   1332 
   1333 	pr_log(pp, v, PRLOG_PUT, file, line);
   1334 
   1335 	pool_do_put(pp, v, &pq);
   1336 
   1337 	pr_leave(pp);
   1338 	mutex_exit(&pp->pr_lock);
   1339 
   1340 	pr_pagelist_free(pp, &pq);
   1341 }
   1342 #undef pool_put
   1343 #endif /* POOL_DIAGNOSTIC */
   1344 
   1345 void
   1346 pool_put(struct pool *pp, void *v)
   1347 {
   1348 	struct pool_pagelist pq;
   1349 
   1350 	LIST_INIT(&pq);
   1351 
   1352 	mutex_enter(&pp->pr_lock);
   1353 	pool_do_put(pp, v, &pq);
   1354 	mutex_exit(&pp->pr_lock);
   1355 
   1356 	pr_pagelist_free(pp, &pq);
   1357 }
   1358 
   1359 #ifdef POOL_DIAGNOSTIC
   1360 #define		pool_put(h, v)	_pool_put((h), (v), __FILE__, __LINE__)
   1361 #endif
   1362 
   1363 /*
   1364  * pool_grow: grow a pool by a page.
   1365  *
   1366  * => called with pool locked.
   1367  * => unlock and relock the pool.
   1368  * => return with pool locked.
   1369  */
   1370 
   1371 static int
   1372 pool_grow(struct pool *pp, int flags)
   1373 {
   1374 	struct pool_item_header *ph = NULL;
   1375 	char *cp;
   1376 
   1377 	mutex_exit(&pp->pr_lock);
   1378 	cp = pool_allocator_alloc(pp, flags);
   1379 	if (__predict_true(cp != NULL)) {
   1380 		ph = pool_alloc_item_header(pp, cp, flags);
   1381 	}
   1382 	if (__predict_false(cp == NULL || ph == NULL)) {
   1383 		if (cp != NULL) {
   1384 			pool_allocator_free(pp, cp);
   1385 		}
   1386 		mutex_enter(&pp->pr_lock);
   1387 		return ENOMEM;
   1388 	}
   1389 
   1390 	mutex_enter(&pp->pr_lock);
   1391 	pool_prime_page(pp, cp, ph);
   1392 	pp->pr_npagealloc++;
   1393 	return 0;
   1394 }
   1395 
   1396 /*
   1397  * Add N items to the pool.
   1398  */
   1399 int
   1400 pool_prime(struct pool *pp, int n)
   1401 {
   1402 	int newpages;
   1403 	int error = 0;
   1404 
   1405 	mutex_enter(&pp->pr_lock);
   1406 
   1407 	newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1408 
   1409 	while (newpages-- > 0) {
   1410 		error = pool_grow(pp, PR_NOWAIT);
   1411 		if (error) {
   1412 			break;
   1413 		}
   1414 		pp->pr_minpages++;
   1415 	}
   1416 
   1417 	if (pp->pr_minpages >= pp->pr_maxpages)
   1418 		pp->pr_maxpages = pp->pr_minpages + 1;	/* XXX */
   1419 
   1420 	mutex_exit(&pp->pr_lock);
   1421 	return error;
   1422 }
   1423 
   1424 /*
   1425  * Add a page worth of items to the pool.
   1426  *
   1427  * Note, we must be called with the pool descriptor LOCKED.
   1428  */
   1429 static void
   1430 pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph)
   1431 {
   1432 	struct pool_item *pi;
   1433 	void *cp = storage;
   1434 	const unsigned int align = pp->pr_align;
   1435 	const unsigned int ioff = pp->pr_itemoffset;
   1436 	int n;
   1437 
   1438 	KASSERT(mutex_owned(&pp->pr_lock));
   1439 
   1440 #ifdef DIAGNOSTIC
   1441 	if ((pp->pr_roflags & PR_NOALIGN) == 0 &&
   1442 	    ((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
   1443 		panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
   1444 #endif
   1445 
   1446 	/*
   1447 	 * Insert page header.
   1448 	 */
   1449 	LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
   1450 	LIST_INIT(&ph->ph_itemlist);
   1451 	ph->ph_page = storage;
   1452 	ph->ph_nmissing = 0;
   1453 	ph->ph_time = time_uptime;
   1454 	if ((pp->pr_roflags & PR_PHINPAGE) == 0)
   1455 		SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
   1456 
   1457 	pp->pr_nidle++;
   1458 
   1459 	/*
   1460 	 * Color this page.
   1461 	 */
   1462 	ph->ph_off = pp->pr_curcolor;
   1463 	cp = (char *)cp + ph->ph_off;
   1464 	if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
   1465 		pp->pr_curcolor = 0;
   1466 
   1467 	/*
   1468 	 * Adjust storage to apply aligment to `pr_itemoffset' in each item.
   1469 	 */
   1470 	if (ioff != 0)
   1471 		cp = (char *)cp + align - ioff;
   1472 
   1473 	KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
   1474 
   1475 	/*
   1476 	 * Insert remaining chunks on the bucket list.
   1477 	 */
   1478 	n = pp->pr_itemsperpage;
   1479 	pp->pr_nitems += n;
   1480 
   1481 	if (pp->pr_roflags & PR_NOTOUCH) {
   1482 		pr_item_notouch_init(pp, ph);
   1483 	} else {
   1484 		while (n--) {
   1485 			pi = (struct pool_item *)cp;
   1486 
   1487 			KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
   1488 
   1489 			/* Insert on page list */
   1490 			LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
   1491 #ifdef DIAGNOSTIC
   1492 			pi->pi_magic = PI_MAGIC;
   1493 #endif
   1494 			cp = (char *)cp + pp->pr_size;
   1495 
   1496 			KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
   1497 		}
   1498 	}
   1499 
   1500 	/*
   1501 	 * If the pool was depleted, point at the new page.
   1502 	 */
   1503 	if (pp->pr_curpage == NULL)
   1504 		pp->pr_curpage = ph;
   1505 
   1506 	if (++pp->pr_npages > pp->pr_hiwat)
   1507 		pp->pr_hiwat = pp->pr_npages;
   1508 }
   1509 
   1510 /*
   1511  * Used by pool_get() when nitems drops below the low water mark.  This
   1512  * is used to catch up pr_nitems with the low water mark.
   1513  *
   1514  * Note 1, we never wait for memory here, we let the caller decide what to do.
   1515  *
   1516  * Note 2, we must be called with the pool already locked, and we return
   1517  * with it locked.
   1518  */
   1519 static int
   1520 pool_catchup(struct pool *pp)
   1521 {
   1522 	int error = 0;
   1523 
   1524 	while (POOL_NEEDS_CATCHUP(pp)) {
   1525 		error = pool_grow(pp, PR_NOWAIT);
   1526 		if (error) {
   1527 			break;
   1528 		}
   1529 	}
   1530 	return error;
   1531 }
   1532 
   1533 static void
   1534 pool_update_curpage(struct pool *pp)
   1535 {
   1536 
   1537 	pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
   1538 	if (pp->pr_curpage == NULL) {
   1539 		pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
   1540 	}
   1541 	KASSERT((pp->pr_curpage == NULL && pp->pr_nitems == 0) ||
   1542 	    (pp->pr_curpage != NULL && pp->pr_nitems > 0));
   1543 }
   1544 
   1545 void
   1546 pool_setlowat(struct pool *pp, int n)
   1547 {
   1548 
   1549 	mutex_enter(&pp->pr_lock);
   1550 
   1551 	pp->pr_minitems = n;
   1552 	pp->pr_minpages = (n == 0)
   1553 		? 0
   1554 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1555 
   1556 	/* Make sure we're caught up with the newly-set low water mark. */
   1557 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
   1558 		/*
   1559 		 * XXX: Should we log a warning?  Should we set up a timeout
   1560 		 * to try again in a second or so?  The latter could break
   1561 		 * a caller's assumptions about interrupt protection, etc.
   1562 		 */
   1563 	}
   1564 
   1565 	mutex_exit(&pp->pr_lock);
   1566 }
   1567 
   1568 void
   1569 pool_sethiwat(struct pool *pp, int n)
   1570 {
   1571 
   1572 	mutex_enter(&pp->pr_lock);
   1573 
   1574 	pp->pr_maxpages = (n == 0)
   1575 		? 0
   1576 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1577 
   1578 	mutex_exit(&pp->pr_lock);
   1579 }
   1580 
   1581 void
   1582 pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
   1583 {
   1584 
   1585 	mutex_enter(&pp->pr_lock);
   1586 
   1587 	pp->pr_hardlimit = n;
   1588 	pp->pr_hardlimit_warning = warnmess;
   1589 	pp->pr_hardlimit_ratecap.tv_sec = ratecap;
   1590 	pp->pr_hardlimit_warning_last.tv_sec = 0;
   1591 	pp->pr_hardlimit_warning_last.tv_usec = 0;
   1592 
   1593 	/*
   1594 	 * In-line version of pool_sethiwat(), because we don't want to
   1595 	 * release the lock.
   1596 	 */
   1597 	pp->pr_maxpages = (n == 0)
   1598 		? 0
   1599 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1600 
   1601 	mutex_exit(&pp->pr_lock);
   1602 }
   1603 
   1604 /*
   1605  * Release all complete pages that have not been used recently.
   1606  */
   1607 int
   1608 #ifdef POOL_DIAGNOSTIC
   1609 _pool_reclaim(struct pool *pp, const char *file, long line)
   1610 #else
   1611 pool_reclaim(struct pool *pp)
   1612 #endif
   1613 {
   1614 	struct pool_item_header *ph, *phnext;
   1615 	struct pool_pagelist pq;
   1616 	uint32_t curtime;
   1617 	bool klock;
   1618 	int rv;
   1619 
   1620 	if (pp->pr_drain_hook != NULL) {
   1621 		/*
   1622 		 * The drain hook must be called with the pool unlocked.
   1623 		 */
   1624 		(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
   1625 	}
   1626 
   1627 	/*
   1628 	 * XXXSMP Because we do not want to cause non-MPSAFE code
   1629 	 * to block.
   1630 	 */
   1631 	if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK ||
   1632 	    pp->pr_ipl == IPL_SOFTSERIAL) {
   1633 		KERNEL_LOCK(1, NULL);
   1634 		klock = true;
   1635 	} else
   1636 		klock = false;
   1637 
   1638 	/* Reclaim items from the pool's cache (if any). */
   1639 	if (pp->pr_cache != NULL)
   1640 		pool_cache_invalidate(pp->pr_cache);
   1641 
   1642 	if (mutex_tryenter(&pp->pr_lock) == 0) {
   1643 		if (klock) {
   1644 			KERNEL_UNLOCK_ONE(NULL);
   1645 		}
   1646 		return (0);
   1647 	}
   1648 	pr_enter(pp, file, line);
   1649 
   1650 	LIST_INIT(&pq);
   1651 
   1652 	curtime = time_uptime;
   1653 
   1654 	for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
   1655 		phnext = LIST_NEXT(ph, ph_pagelist);
   1656 
   1657 		/* Check our minimum page claim */
   1658 		if (pp->pr_npages <= pp->pr_minpages)
   1659 			break;
   1660 
   1661 		KASSERT(ph->ph_nmissing == 0);
   1662 		if (curtime - ph->ph_time < pool_inactive_time
   1663 		    && !pa_starved_p(pp->pr_alloc))
   1664 			continue;
   1665 
   1666 		/*
   1667 		 * If freeing this page would put us below
   1668 		 * the low water mark, stop now.
   1669 		 */
   1670 		if ((pp->pr_nitems - pp->pr_itemsperpage) <
   1671 		    pp->pr_minitems)
   1672 			break;
   1673 
   1674 		pr_rmpage(pp, ph, &pq);
   1675 	}
   1676 
   1677 	pr_leave(pp);
   1678 	mutex_exit(&pp->pr_lock);
   1679 
   1680 	if (LIST_EMPTY(&pq))
   1681 		rv = 0;
   1682 	else {
   1683 		pr_pagelist_free(pp, &pq);
   1684 		rv = 1;
   1685 	}
   1686 
   1687 	if (klock) {
   1688 		KERNEL_UNLOCK_ONE(NULL);
   1689 	}
   1690 
   1691 	return (rv);
   1692 }
   1693 
   1694 /*
   1695  * Drain pools, one at a time.  This is a two stage process;
   1696  * drain_start kicks off a cross call to drain CPU-level caches
   1697  * if the pool has an associated pool_cache.  drain_end waits
   1698  * for those cross calls to finish, and then drains the cache
   1699  * (if any) and pool.
   1700  *
   1701  * Note, must never be called from interrupt context.
   1702  */
   1703 void
   1704 pool_drain_start(struct pool **ppp, uint64_t *wp)
   1705 {
   1706 	struct pool *pp;
   1707 
   1708 	KASSERT(!TAILQ_EMPTY(&pool_head));
   1709 
   1710 	pp = NULL;
   1711 
   1712 	/* Find next pool to drain, and add a reference. */
   1713 	mutex_enter(&pool_head_lock);
   1714 	do {
   1715 		if (drainpp == NULL) {
   1716 			drainpp = TAILQ_FIRST(&pool_head);
   1717 		}
   1718 		if (drainpp != NULL) {
   1719 			pp = drainpp;
   1720 			drainpp = TAILQ_NEXT(pp, pr_poollist);
   1721 		}
   1722 		/*
   1723 		 * Skip completely idle pools.  We depend on at least
   1724 		 * one pool in the system being active.
   1725 		 */
   1726 	} while (pp == NULL || pp->pr_npages == 0);
   1727 	pp->pr_refcnt++;
   1728 	mutex_exit(&pool_head_lock);
   1729 
   1730 	/* If there is a pool_cache, drain CPU level caches. */
   1731 	*ppp = pp;
   1732 	if (pp->pr_cache != NULL) {
   1733 		*wp = xc_broadcast(0, (xcfunc_t)pool_cache_xcall,
   1734 		    pp->pr_cache, NULL);
   1735 	}
   1736 }
   1737 
   1738 void
   1739 pool_drain_end(struct pool *pp, uint64_t where)
   1740 {
   1741 
   1742 	if (pp == NULL)
   1743 		return;
   1744 
   1745 	KASSERT(pp->pr_refcnt > 0);
   1746 
   1747 	/* Wait for remote draining to complete. */
   1748 	if (pp->pr_cache != NULL)
   1749 		xc_wait(where);
   1750 
   1751 	/* Drain the cache (if any) and pool.. */
   1752 	pool_reclaim(pp);
   1753 
   1754 	/* Finally, unlock the pool. */
   1755 	mutex_enter(&pool_head_lock);
   1756 	pp->pr_refcnt--;
   1757 	cv_broadcast(&pool_busy);
   1758 	mutex_exit(&pool_head_lock);
   1759 }
   1760 
   1761 /*
   1762  * Diagnostic helpers.
   1763  */
   1764 void
   1765 pool_print(struct pool *pp, const char *modif)
   1766 {
   1767 
   1768 	pool_print1(pp, modif, printf);
   1769 }
   1770 
   1771 void
   1772 pool_printall(const char *modif, void (*pr)(const char *, ...))
   1773 {
   1774 	struct pool *pp;
   1775 
   1776 	TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
   1777 		pool_printit(pp, modif, pr);
   1778 	}
   1779 }
   1780 
   1781 void
   1782 pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
   1783 {
   1784 
   1785 	if (pp == NULL) {
   1786 		(*pr)("Must specify a pool to print.\n");
   1787 		return;
   1788 	}
   1789 
   1790 	pool_print1(pp, modif, pr);
   1791 }
   1792 
   1793 static void
   1794 pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
   1795     void (*pr)(const char *, ...))
   1796 {
   1797 	struct pool_item_header *ph;
   1798 #ifdef DIAGNOSTIC
   1799 	struct pool_item *pi;
   1800 #endif
   1801 
   1802 	LIST_FOREACH(ph, pl, ph_pagelist) {
   1803 		(*pr)("\t\tpage %p, nmissing %d, time %" PRIu32 "\n",
   1804 		    ph->ph_page, ph->ph_nmissing, ph->ph_time);
   1805 #ifdef DIAGNOSTIC
   1806 		if (!(pp->pr_roflags & PR_NOTOUCH)) {
   1807 			LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
   1808 				if (pi->pi_magic != PI_MAGIC) {
   1809 					(*pr)("\t\t\titem %p, magic 0x%x\n",
   1810 					    pi, pi->pi_magic);
   1811 				}
   1812 			}
   1813 		}
   1814 #endif
   1815 	}
   1816 }
   1817 
   1818 static void
   1819 pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
   1820 {
   1821 	struct pool_item_header *ph;
   1822 	pool_cache_t pc;
   1823 	pcg_t *pcg;
   1824 	pool_cache_cpu_t *cc;
   1825 	uint64_t cpuhit, cpumiss;
   1826 	int i, print_log = 0, print_pagelist = 0, print_cache = 0;
   1827 	char c;
   1828 
   1829 	while ((c = *modif++) != '\0') {
   1830 		if (c == 'l')
   1831 			print_log = 1;
   1832 		if (c == 'p')
   1833 			print_pagelist = 1;
   1834 		if (c == 'c')
   1835 			print_cache = 1;
   1836 	}
   1837 
   1838 	if ((pc = pp->pr_cache) != NULL) {
   1839 		(*pr)("POOL CACHE");
   1840 	} else {
   1841 		(*pr)("POOL");
   1842 	}
   1843 
   1844 	(*pr)(" %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
   1845 	    pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
   1846 	    pp->pr_roflags);
   1847 	(*pr)("\talloc %p\n", pp->pr_alloc);
   1848 	(*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
   1849 	    pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
   1850 	(*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
   1851 	    pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
   1852 
   1853 	(*pr)("\tnget %lu, nfail %lu, nput %lu\n",
   1854 	    pp->pr_nget, pp->pr_nfail, pp->pr_nput);
   1855 	(*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
   1856 	    pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
   1857 
   1858 	if (print_pagelist == 0)
   1859 		goto skip_pagelist;
   1860 
   1861 	if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
   1862 		(*pr)("\n\tempty page list:\n");
   1863 	pool_print_pagelist(pp, &pp->pr_emptypages, pr);
   1864 	if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
   1865 		(*pr)("\n\tfull page list:\n");
   1866 	pool_print_pagelist(pp, &pp->pr_fullpages, pr);
   1867 	if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
   1868 		(*pr)("\n\tpartial-page list:\n");
   1869 	pool_print_pagelist(pp, &pp->pr_partpages, pr);
   1870 
   1871 	if (pp->pr_curpage == NULL)
   1872 		(*pr)("\tno current page\n");
   1873 	else
   1874 		(*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
   1875 
   1876  skip_pagelist:
   1877 	if (print_log == 0)
   1878 		goto skip_log;
   1879 
   1880 	(*pr)("\n");
   1881 	if ((pp->pr_roflags & PR_LOGGING) == 0)
   1882 		(*pr)("\tno log\n");
   1883 	else {
   1884 		pr_printlog(pp, NULL, pr);
   1885 	}
   1886 
   1887  skip_log:
   1888 
   1889 #define PR_GROUPLIST(pcg)						\
   1890 	(*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);		\
   1891 	for (i = 0; i < pcg->pcg_size; i++) {				\
   1892 		if (pcg->pcg_objects[i].pcgo_pa !=			\
   1893 		    POOL_PADDR_INVALID) {				\
   1894 			(*pr)("\t\t\t%p, 0x%llx\n",			\
   1895 			    pcg->pcg_objects[i].pcgo_va,		\
   1896 			    (unsigned long long)			\
   1897 			    pcg->pcg_objects[i].pcgo_pa);		\
   1898 		} else {						\
   1899 			(*pr)("\t\t\t%p\n",				\
   1900 			    pcg->pcg_objects[i].pcgo_va);		\
   1901 		}							\
   1902 	}
   1903 
   1904 	if (pc != NULL) {
   1905 		cpuhit = 0;
   1906 		cpumiss = 0;
   1907 		for (i = 0; i < MAXCPUS; i++) {
   1908 			if ((cc = pc->pc_cpus[i]) == NULL)
   1909 				continue;
   1910 			cpuhit += cc->cc_hits;
   1911 			cpumiss += cc->cc_misses;
   1912 		}
   1913 		(*pr)("\tcpu layer hits %llu misses %llu\n", cpuhit, cpumiss);
   1914 		(*pr)("\tcache layer hits %llu misses %llu\n",
   1915 		    pc->pc_hits, pc->pc_misses);
   1916 		(*pr)("\tcache layer entry uncontended %llu contended %llu\n",
   1917 		    pc->pc_hits + pc->pc_misses - pc->pc_contended,
   1918 		    pc->pc_contended);
   1919 		(*pr)("\tcache layer empty groups %u full groups %u\n",
   1920 		    pc->pc_nempty, pc->pc_nfull);
   1921 		if (print_cache) {
   1922 			(*pr)("\tfull cache groups:\n");
   1923 			for (pcg = pc->pc_fullgroups; pcg != NULL;
   1924 			    pcg = pcg->pcg_next) {
   1925 				PR_GROUPLIST(pcg);
   1926 			}
   1927 			(*pr)("\tempty cache groups:\n");
   1928 			for (pcg = pc->pc_emptygroups; pcg != NULL;
   1929 			    pcg = pcg->pcg_next) {
   1930 				PR_GROUPLIST(pcg);
   1931 			}
   1932 		}
   1933 	}
   1934 #undef PR_GROUPLIST
   1935 
   1936 	pr_enter_check(pp, pr);
   1937 }
   1938 
   1939 static int
   1940 pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
   1941 {
   1942 	struct pool_item *pi;
   1943 	void *page;
   1944 	int n;
   1945 
   1946 	if ((pp->pr_roflags & PR_NOALIGN) == 0) {
   1947 		page = (void *)((uintptr_t)ph & pp->pr_alloc->pa_pagemask);
   1948 		if (page != ph->ph_page &&
   1949 		    (pp->pr_roflags & PR_PHINPAGE) != 0) {
   1950 			if (label != NULL)
   1951 				printf("%s: ", label);
   1952 			printf("pool(%p:%s): page inconsistency: page %p;"
   1953 			       " at page head addr %p (p %p)\n", pp,
   1954 				pp->pr_wchan, ph->ph_page,
   1955 				ph, page);
   1956 			return 1;
   1957 		}
   1958 	}
   1959 
   1960 	if ((pp->pr_roflags & PR_NOTOUCH) != 0)
   1961 		return 0;
   1962 
   1963 	for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0;
   1964 	     pi != NULL;
   1965 	     pi = LIST_NEXT(pi,pi_list), n++) {
   1966 
   1967 #ifdef DIAGNOSTIC
   1968 		if (pi->pi_magic != PI_MAGIC) {
   1969 			if (label != NULL)
   1970 				printf("%s: ", label);
   1971 			printf("pool(%s): free list modified: magic=%x;"
   1972 			       " page %p; item ordinal %d; addr %p\n",
   1973 				pp->pr_wchan, pi->pi_magic, ph->ph_page,
   1974 				n, pi);
   1975 			panic("pool");
   1976 		}
   1977 #endif
   1978 		if ((pp->pr_roflags & PR_NOALIGN) != 0) {
   1979 			continue;
   1980 		}
   1981 		page = (void *)((uintptr_t)pi & pp->pr_alloc->pa_pagemask);
   1982 		if (page == ph->ph_page)
   1983 			continue;
   1984 
   1985 		if (label != NULL)
   1986 			printf("%s: ", label);
   1987 		printf("pool(%p:%s): page inconsistency: page %p;"
   1988 		       " item ordinal %d; addr %p (p %p)\n", pp,
   1989 			pp->pr_wchan, ph->ph_page,
   1990 			n, pi, page);
   1991 		return 1;
   1992 	}
   1993 	return 0;
   1994 }
   1995 
   1996 
   1997 int
   1998 pool_chk(struct pool *pp, const char *label)
   1999 {
   2000 	struct pool_item_header *ph;
   2001 	int r = 0;
   2002 
   2003 	mutex_enter(&pp->pr_lock);
   2004 	LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
   2005 		r = pool_chk_page(pp, label, ph);
   2006 		if (r) {
   2007 			goto out;
   2008 		}
   2009 	}
   2010 	LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
   2011 		r = pool_chk_page(pp, label, ph);
   2012 		if (r) {
   2013 			goto out;
   2014 		}
   2015 	}
   2016 	LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
   2017 		r = pool_chk_page(pp, label, ph);
   2018 		if (r) {
   2019 			goto out;
   2020 		}
   2021 	}
   2022 
   2023 out:
   2024 	mutex_exit(&pp->pr_lock);
   2025 	return (r);
   2026 }
   2027 
   2028 /*
   2029  * pool_cache_init:
   2030  *
   2031  *	Initialize a pool cache.
   2032  */
   2033 pool_cache_t
   2034 pool_cache_init(size_t size, u_int align, u_int align_offset, u_int flags,
   2035     const char *wchan, struct pool_allocator *palloc, int ipl,
   2036     int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), void *arg)
   2037 {
   2038 	pool_cache_t pc;
   2039 
   2040 	pc = pool_get(&cache_pool, PR_WAITOK);
   2041 	if (pc == NULL)
   2042 		return NULL;
   2043 
   2044 	pool_cache_bootstrap(pc, size, align, align_offset, flags, wchan,
   2045 	   palloc, ipl, ctor, dtor, arg);
   2046 
   2047 	return pc;
   2048 }
   2049 
   2050 /*
   2051  * pool_cache_bootstrap:
   2052  *
   2053  *	Kernel-private version of pool_cache_init().  The caller
   2054  *	provides initial storage.
   2055  */
   2056 void
   2057 pool_cache_bootstrap(pool_cache_t pc, size_t size, u_int align,
   2058     u_int align_offset, u_int flags, const char *wchan,
   2059     struct pool_allocator *palloc, int ipl,
   2060     int (*ctor)(void *, void *, int), void (*dtor)(void *, void *),
   2061     void *arg)
   2062 {
   2063 	CPU_INFO_ITERATOR cii;
   2064 	pool_cache_t pc1;
   2065 	struct cpu_info *ci;
   2066 	struct pool *pp;
   2067 
   2068 	pp = &pc->pc_pool;
   2069 	if (palloc == NULL && ipl == IPL_NONE)
   2070 		palloc = &pool_allocator_nointr;
   2071 	pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl);
   2072 	mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl);
   2073 
   2074 	if (ctor == NULL) {
   2075 		ctor = (int (*)(void *, void *, int))nullop;
   2076 	}
   2077 	if (dtor == NULL) {
   2078 		dtor = (void (*)(void *, void *))nullop;
   2079 	}
   2080 
   2081 	pc->pc_emptygroups = NULL;
   2082 	pc->pc_fullgroups = NULL;
   2083 	pc->pc_partgroups = NULL;
   2084 	pc->pc_ctor = ctor;
   2085 	pc->pc_dtor = dtor;
   2086 	pc->pc_arg  = arg;
   2087 	pc->pc_hits  = 0;
   2088 	pc->pc_misses = 0;
   2089 	pc->pc_nempty = 0;
   2090 	pc->pc_npart = 0;
   2091 	pc->pc_nfull = 0;
   2092 	pc->pc_contended = 0;
   2093 	pc->pc_refcnt = 0;
   2094 	pc->pc_freecheck = NULL;
   2095 
   2096 	if ((flags & PR_LARGECACHE) != 0) {
   2097 		pc->pc_pcgsize = PCG_NOBJECTS_LARGE;
   2098 		pc->pc_pcgpool = &pcg_large_pool;
   2099 	} else {
   2100 		pc->pc_pcgsize = PCG_NOBJECTS_NORMAL;
   2101 		pc->pc_pcgpool = &pcg_normal_pool;
   2102 	}
   2103 
   2104 	/* Allocate per-CPU caches. */
   2105 	memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus));
   2106 	pc->pc_ncpu = 0;
   2107 	if (ncpu < 2) {
   2108 		/* XXX For sparc: boot CPU is not attached yet. */
   2109 		pool_cache_cpu_init1(curcpu(), pc);
   2110 	} else {
   2111 		for (CPU_INFO_FOREACH(cii, ci)) {
   2112 			pool_cache_cpu_init1(ci, pc);
   2113 		}
   2114 	}
   2115 
   2116 	/* Add to list of all pools. */
   2117 	if (__predict_true(!cold))
   2118 		mutex_enter(&pool_head_lock);
   2119 	TAILQ_FOREACH(pc1, &pool_cache_head, pc_cachelist) {
   2120 		if (strcmp(pc1->pc_pool.pr_wchan, pc->pc_pool.pr_wchan) > 0)
   2121 			break;
   2122 	}
   2123 	if (pc1 == NULL)
   2124 		TAILQ_INSERT_TAIL(&pool_cache_head, pc, pc_cachelist);
   2125 	else
   2126 		TAILQ_INSERT_BEFORE(pc1, pc, pc_cachelist);
   2127 	if (__predict_true(!cold))
   2128 		mutex_exit(&pool_head_lock);
   2129 
   2130 	membar_sync();
   2131 	pp->pr_cache = pc;
   2132 }
   2133 
   2134 /*
   2135  * pool_cache_destroy:
   2136  *
   2137  *	Destroy a pool cache.
   2138  */
   2139 void
   2140 pool_cache_destroy(pool_cache_t pc)
   2141 {
   2142 	struct pool *pp = &pc->pc_pool;
   2143 	u_int i;
   2144 
   2145 	/* Remove it from the global list. */
   2146 	mutex_enter(&pool_head_lock);
   2147 	while (pc->pc_refcnt != 0)
   2148 		cv_wait(&pool_busy, &pool_head_lock);
   2149 	TAILQ_REMOVE(&pool_cache_head, pc, pc_cachelist);
   2150 	mutex_exit(&pool_head_lock);
   2151 
   2152 	/* First, invalidate the entire cache. */
   2153 	pool_cache_invalidate(pc);
   2154 
   2155 	/* Disassociate it from the pool. */
   2156 	mutex_enter(&pp->pr_lock);
   2157 	pp->pr_cache = NULL;
   2158 	mutex_exit(&pp->pr_lock);
   2159 
   2160 	/* Destroy per-CPU data */
   2161 	for (i = 0; i < MAXCPUS; i++)
   2162 		pool_cache_invalidate_cpu(pc, i);
   2163 
   2164 	/* Finally, destroy it. */
   2165 	mutex_destroy(&pc->pc_lock);
   2166 	pool_destroy(pp);
   2167 	pool_put(&cache_pool, pc);
   2168 }
   2169 
   2170 /*
   2171  * pool_cache_cpu_init1:
   2172  *
   2173  *	Called for each pool_cache whenever a new CPU is attached.
   2174  */
   2175 static void
   2176 pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc)
   2177 {
   2178 	pool_cache_cpu_t *cc;
   2179 	int index;
   2180 
   2181 	index = ci->ci_index;
   2182 
   2183 	KASSERT(index < MAXCPUS);
   2184 
   2185 	if ((cc = pc->pc_cpus[index]) != NULL) {
   2186 		KASSERT(cc->cc_cpuindex == index);
   2187 		return;
   2188 	}
   2189 
   2190 	/*
   2191 	 * The first CPU is 'free'.  This needs to be the case for
   2192 	 * bootstrap - we may not be able to allocate yet.
   2193 	 */
   2194 	if (pc->pc_ncpu == 0) {
   2195 		cc = &pc->pc_cpu0;
   2196 		pc->pc_ncpu = 1;
   2197 	} else {
   2198 		mutex_enter(&pc->pc_lock);
   2199 		pc->pc_ncpu++;
   2200 		mutex_exit(&pc->pc_lock);
   2201 		cc = pool_get(&cache_cpu_pool, PR_WAITOK);
   2202 	}
   2203 
   2204 	cc->cc_ipl = pc->pc_pool.pr_ipl;
   2205 	cc->cc_iplcookie = makeiplcookie(cc->cc_ipl);
   2206 	cc->cc_cache = pc;
   2207 	cc->cc_cpuindex = index;
   2208 	cc->cc_hits = 0;
   2209 	cc->cc_misses = 0;
   2210 	cc->cc_current = __UNCONST(&pcg_dummy);
   2211 	cc->cc_previous = __UNCONST(&pcg_dummy);
   2212 
   2213 	pc->pc_cpus[index] = cc;
   2214 }
   2215 
   2216 /*
   2217  * pool_cache_cpu_init:
   2218  *
   2219  *	Called whenever a new CPU is attached.
   2220  */
   2221 void
   2222 pool_cache_cpu_init(struct cpu_info *ci)
   2223 {
   2224 	pool_cache_t pc;
   2225 
   2226 	mutex_enter(&pool_head_lock);
   2227 	TAILQ_FOREACH(pc, &pool_cache_head, pc_cachelist) {
   2228 		pc->pc_refcnt++;
   2229 		mutex_exit(&pool_head_lock);
   2230 
   2231 		pool_cache_cpu_init1(ci, pc);
   2232 
   2233 		mutex_enter(&pool_head_lock);
   2234 		pc->pc_refcnt--;
   2235 		cv_broadcast(&pool_busy);
   2236 	}
   2237 	mutex_exit(&pool_head_lock);
   2238 }
   2239 
   2240 /*
   2241  * pool_cache_reclaim:
   2242  *
   2243  *	Reclaim memory from a pool cache.
   2244  */
   2245 bool
   2246 pool_cache_reclaim(pool_cache_t pc)
   2247 {
   2248 
   2249 	return pool_reclaim(&pc->pc_pool);
   2250 }
   2251 
   2252 static void
   2253 pool_cache_destruct_object1(pool_cache_t pc, void *object)
   2254 {
   2255 
   2256 	(*pc->pc_dtor)(pc->pc_arg, object);
   2257 	pool_put(&pc->pc_pool, object);
   2258 }
   2259 
   2260 /*
   2261  * pool_cache_destruct_object:
   2262  *
   2263  *	Force destruction of an object and its release back into
   2264  *	the pool.
   2265  */
   2266 void
   2267 pool_cache_destruct_object(pool_cache_t pc, void *object)
   2268 {
   2269 
   2270 	FREECHECK_IN(&pc->pc_freecheck, object);
   2271 
   2272 	pool_cache_destruct_object1(pc, object);
   2273 }
   2274 
   2275 /*
   2276  * pool_cache_invalidate_groups:
   2277  *
   2278  *	Invalidate a chain of groups and destruct all objects.
   2279  */
   2280 static void
   2281 pool_cache_invalidate_groups(pool_cache_t pc, pcg_t *pcg)
   2282 {
   2283 	void *object;
   2284 	pcg_t *next;
   2285 	int i;
   2286 
   2287 	for (; pcg != NULL; pcg = next) {
   2288 		next = pcg->pcg_next;
   2289 
   2290 		for (i = 0; i < pcg->pcg_avail; i++) {
   2291 			object = pcg->pcg_objects[i].pcgo_va;
   2292 			pool_cache_destruct_object1(pc, object);
   2293 		}
   2294 
   2295 		if (pcg->pcg_size == PCG_NOBJECTS_LARGE) {
   2296 			pool_put(&pcg_large_pool, pcg);
   2297 		} else {
   2298 			KASSERT(pcg->pcg_size == PCG_NOBJECTS_NORMAL);
   2299 			pool_put(&pcg_normal_pool, pcg);
   2300 		}
   2301 	}
   2302 }
   2303 
   2304 /*
   2305  * pool_cache_invalidate:
   2306  *
   2307  *	Invalidate a pool cache (destruct and release all of the
   2308  *	cached objects).  Does not reclaim objects from the pool.
   2309  *
   2310  *	Note: For pool caches that provide constructed objects, there
   2311  *	is an assumption that another level of synchronization is occurring
   2312  *	between the input to the constructor and the cache invalidation.
   2313  */
   2314 void
   2315 pool_cache_invalidate(pool_cache_t pc)
   2316 {
   2317 	pcg_t *full, *empty, *part;
   2318 	uint64_t where;
   2319 
   2320 	if (ncpu < 2 || !mp_online) {
   2321 		/*
   2322 		 * We might be called early enough in the boot process
   2323 		 * for the CPU data structures to not be fully initialized.
   2324 		 * In this case, simply gather the local CPU's cache now
   2325 		 * since it will be the only one running.
   2326 		 */
   2327 		pool_cache_xcall(pc);
   2328 	} else {
   2329 		/*
   2330 		 * Gather all of the CPU-specific caches into the
   2331 		 * global cache.
   2332 		 */
   2333 		where = xc_broadcast(0, (xcfunc_t)pool_cache_xcall, pc, NULL);
   2334 		xc_wait(where);
   2335 	}
   2336 
   2337 	mutex_enter(&pc->pc_lock);
   2338 	full = pc->pc_fullgroups;
   2339 	empty = pc->pc_emptygroups;
   2340 	part = pc->pc_partgroups;
   2341 	pc->pc_fullgroups = NULL;
   2342 	pc->pc_emptygroups = NULL;
   2343 	pc->pc_partgroups = NULL;
   2344 	pc->pc_nfull = 0;
   2345 	pc->pc_nempty = 0;
   2346 	pc->pc_npart = 0;
   2347 	mutex_exit(&pc->pc_lock);
   2348 
   2349 	pool_cache_invalidate_groups(pc, full);
   2350 	pool_cache_invalidate_groups(pc, empty);
   2351 	pool_cache_invalidate_groups(pc, part);
   2352 }
   2353 
   2354 /*
   2355  * pool_cache_invalidate_cpu:
   2356  *
   2357  *	Invalidate all CPU-bound cached objects in pool cache, the CPU being
   2358  *	identified by its associated index.
   2359  *	It is caller's responsibility to ensure that no operation is
   2360  *	taking place on this pool cache while doing this invalidation.
   2361  *	WARNING: as no inter-CPU locking is enforced, trying to invalidate
   2362  *	pool cached objects from a CPU different from the one currently running
   2363  *	may result in an undefined behaviour.
   2364  */
   2365 static void
   2366 pool_cache_invalidate_cpu(pool_cache_t pc, u_int index)
   2367 {
   2368 
   2369 	pool_cache_cpu_t *cc;
   2370 	pcg_t *pcg;
   2371 
   2372 	if ((cc = pc->pc_cpus[index]) == NULL)
   2373 		return;
   2374 
   2375 	if ((pcg = cc->cc_current) != &pcg_dummy) {
   2376 		pcg->pcg_next = NULL;
   2377 		pool_cache_invalidate_groups(pc, pcg);
   2378 	}
   2379 	if ((pcg = cc->cc_previous) != &pcg_dummy) {
   2380 		pcg->pcg_next = NULL;
   2381 		pool_cache_invalidate_groups(pc, pcg);
   2382 	}
   2383 	if (cc != &pc->pc_cpu0)
   2384 		pool_put(&cache_cpu_pool, cc);
   2385 
   2386 }
   2387 
   2388 void
   2389 pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg)
   2390 {
   2391 
   2392 	pool_set_drain_hook(&pc->pc_pool, fn, arg);
   2393 }
   2394 
   2395 void
   2396 pool_cache_setlowat(pool_cache_t pc, int n)
   2397 {
   2398 
   2399 	pool_setlowat(&pc->pc_pool, n);
   2400 }
   2401 
   2402 void
   2403 pool_cache_sethiwat(pool_cache_t pc, int n)
   2404 {
   2405 
   2406 	pool_sethiwat(&pc->pc_pool, n);
   2407 }
   2408 
   2409 void
   2410 pool_cache_sethardlimit(pool_cache_t pc, int n, const char *warnmess, int ratecap)
   2411 {
   2412 
   2413 	pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap);
   2414 }
   2415 
   2416 static bool __noinline
   2417 pool_cache_get_slow(pool_cache_cpu_t *cc, int s, void **objectp,
   2418 		    paddr_t *pap, int flags)
   2419 {
   2420 	pcg_t *pcg, *cur;
   2421 	uint64_t ncsw;
   2422 	pool_cache_t pc;
   2423 	void *object;
   2424 
   2425 	KASSERT(cc->cc_current->pcg_avail == 0);
   2426 	KASSERT(cc->cc_previous->pcg_avail == 0);
   2427 
   2428 	pc = cc->cc_cache;
   2429 	cc->cc_misses++;
   2430 
   2431 	/*
   2432 	 * Nothing was available locally.  Try and grab a group
   2433 	 * from the cache.
   2434 	 */
   2435 	if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {
   2436 		ncsw = curlwp->l_ncsw;
   2437 		mutex_enter(&pc->pc_lock);
   2438 		pc->pc_contended++;
   2439 
   2440 		/*
   2441 		 * If we context switched while locking, then
   2442 		 * our view of the per-CPU data is invalid:
   2443 		 * retry.
   2444 		 */
   2445 		if (curlwp->l_ncsw != ncsw) {
   2446 			mutex_exit(&pc->pc_lock);
   2447 			return true;
   2448 		}
   2449 	}
   2450 
   2451 	if (__predict_true((pcg = pc->pc_fullgroups) != NULL)) {
   2452 		/*
   2453 		 * If there's a full group, release our empty
   2454 		 * group back to the cache.  Install the full
   2455 		 * group as cc_current and return.
   2456 		 */
   2457 		if (__predict_true((cur = cc->cc_current) != &pcg_dummy)) {
   2458 			KASSERT(cur->pcg_avail == 0);
   2459 			cur->pcg_next = pc->pc_emptygroups;
   2460 			pc->pc_emptygroups = cur;
   2461 			pc->pc_nempty++;
   2462 		}
   2463 		KASSERT(pcg->pcg_avail == pcg->pcg_size);
   2464 		cc->cc_current = pcg;
   2465 		pc->pc_fullgroups = pcg->pcg_next;
   2466 		pc->pc_hits++;
   2467 		pc->pc_nfull--;
   2468 		mutex_exit(&pc->pc_lock);
   2469 		return true;
   2470 	}
   2471 
   2472 	/*
   2473 	 * Nothing available locally or in cache.  Take the slow
   2474 	 * path: fetch a new object from the pool and construct
   2475 	 * it.
   2476 	 */
   2477 	pc->pc_misses++;
   2478 	mutex_exit(&pc->pc_lock);
   2479 	splx(s);
   2480 
   2481 	object = pool_get(&pc->pc_pool, flags);
   2482 	*objectp = object;
   2483 	if (__predict_false(object == NULL))
   2484 		return false;
   2485 
   2486 	if (__predict_false((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0)) {
   2487 		pool_put(&pc->pc_pool, object);
   2488 		*objectp = NULL;
   2489 		return false;
   2490 	}
   2491 
   2492 	KASSERT((((vaddr_t)object + pc->pc_pool.pr_itemoffset) &
   2493 	    (pc->pc_pool.pr_align - 1)) == 0);
   2494 
   2495 	if (pap != NULL) {
   2496 #ifdef POOL_VTOPHYS
   2497 		*pap = POOL_VTOPHYS(object);
   2498 #else
   2499 		*pap = POOL_PADDR_INVALID;
   2500 #endif
   2501 	}
   2502 
   2503 	FREECHECK_OUT(&pc->pc_freecheck, object);
   2504 	return false;
   2505 }
   2506 
   2507 /*
   2508  * pool_cache_get{,_paddr}:
   2509  *
   2510  *	Get an object from a pool cache (optionally returning
   2511  *	the physical address of the object).
   2512  */
   2513 void *
   2514 pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap)
   2515 {
   2516 	pool_cache_cpu_t *cc;
   2517 	pcg_t *pcg;
   2518 	void *object;
   2519 	int s;
   2520 
   2521 #ifdef LOCKDEBUG
   2522 	if (flags & PR_WAITOK) {
   2523 		ASSERT_SLEEPABLE();
   2524 	}
   2525 #endif
   2526 
   2527 	/* Lock out interrupts and disable preemption. */
   2528 	s = splvm();
   2529 	while (/* CONSTCOND */ true) {
   2530 		/* Try and allocate an object from the current group. */
   2531 		cc = pc->pc_cpus[curcpu()->ci_index];
   2532 		KASSERT(cc->cc_cache == pc);
   2533 	 	pcg = cc->cc_current;
   2534 		if (__predict_true(pcg->pcg_avail > 0)) {
   2535 			object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va;
   2536 			if (__predict_false(pap != NULL))
   2537 				*pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa;
   2538 #if defined(DIAGNOSTIC)
   2539 			pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL;
   2540 			KASSERT(pcg->pcg_avail < pcg->pcg_size);
   2541 			KASSERT(object != NULL);
   2542 #endif
   2543 			cc->cc_hits++;
   2544 			splx(s);
   2545 			FREECHECK_OUT(&pc->pc_freecheck, object);
   2546 			return object;
   2547 		}
   2548 
   2549 		/*
   2550 		 * That failed.  If the previous group isn't empty, swap
   2551 		 * it with the current group and allocate from there.
   2552 		 */
   2553 		pcg = cc->cc_previous;
   2554 		if (__predict_true(pcg->pcg_avail > 0)) {
   2555 			cc->cc_previous = cc->cc_current;
   2556 			cc->cc_current = pcg;
   2557 			continue;
   2558 		}
   2559 
   2560 		/*
   2561 		 * Can't allocate from either group: try the slow path.
   2562 		 * If get_slow() allocated an object for us, or if
   2563 		 * no more objects are available, it will return false.
   2564 		 * Otherwise, we need to retry.
   2565 		 */
   2566 		if (!pool_cache_get_slow(cc, s, &object, pap, flags))
   2567 			break;
   2568 	}
   2569 
   2570 	return object;
   2571 }
   2572 
   2573 static bool __noinline
   2574 pool_cache_put_slow(pool_cache_cpu_t *cc, int s, void *object)
   2575 {
   2576 	pcg_t *pcg, *cur;
   2577 	uint64_t ncsw;
   2578 	pool_cache_t pc;
   2579 
   2580 	KASSERT(cc->cc_current->pcg_avail == cc->cc_current->pcg_size);
   2581 	KASSERT(cc->cc_previous->pcg_avail == cc->cc_previous->pcg_size);
   2582 
   2583 	pc = cc->cc_cache;
   2584 	pcg = NULL;
   2585 	cc->cc_misses++;
   2586 
   2587 	/*
   2588 	 * If there are no empty groups in the cache then allocate one
   2589 	 * while still unlocked.
   2590 	 */
   2591 	if (__predict_false(pc->pc_emptygroups == NULL)) {
   2592 		if (__predict_true(!pool_cache_disable)) {
   2593 			pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT);
   2594 		}
   2595 		if (__predict_true(pcg != NULL)) {
   2596 			pcg->pcg_avail = 0;
   2597 			pcg->pcg_size = pc->pc_pcgsize;
   2598 		}
   2599 	}
   2600 
   2601 	/* Lock the cache. */
   2602 	if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {
   2603 		ncsw = curlwp->l_ncsw;
   2604 		mutex_enter(&pc->pc_lock);
   2605 		pc->pc_contended++;
   2606 
   2607 		/*
   2608 		 * If we context switched while locking, then our view of
   2609 		 * the per-CPU data is invalid: retry.
   2610 		 */
   2611 		if (__predict_false(curlwp->l_ncsw != ncsw)) {
   2612 			mutex_exit(&pc->pc_lock);
   2613 			if (pcg != NULL) {
   2614 				pool_put(pc->pc_pcgpool, pcg);
   2615 			}
   2616 			return true;
   2617 		}
   2618 	}
   2619 
   2620 	/* If there are no empty groups in the cache then allocate one. */
   2621 	if (pcg == NULL && pc->pc_emptygroups != NULL) {
   2622 		pcg = pc->pc_emptygroups;
   2623 		pc->pc_emptygroups = pcg->pcg_next;
   2624 		pc->pc_nempty--;
   2625 	}
   2626 
   2627 	/*
   2628 	 * If there's a empty group, release our full group back
   2629 	 * to the cache.  Install the empty group to the local CPU
   2630 	 * and return.
   2631 	 */
   2632 	if (pcg != NULL) {
   2633 		KASSERT(pcg->pcg_avail == 0);
   2634 		if (__predict_false(cc->cc_previous == &pcg_dummy)) {
   2635 			cc->cc_previous = pcg;
   2636 		} else {
   2637 			cur = cc->cc_current;
   2638 			if (__predict_true(cur != &pcg_dummy)) {
   2639 				KASSERT(cur->pcg_avail == cur->pcg_size);
   2640 				cur->pcg_next = pc->pc_fullgroups;
   2641 				pc->pc_fullgroups = cur;
   2642 				pc->pc_nfull++;
   2643 			}
   2644 			cc->cc_current = pcg;
   2645 		}
   2646 		pc->pc_hits++;
   2647 		mutex_exit(&pc->pc_lock);
   2648 		return true;
   2649 	}
   2650 
   2651 	/*
   2652 	 * Nothing available locally or in cache, and we didn't
   2653 	 * allocate an empty group.  Take the slow path and destroy
   2654 	 * the object here and now.
   2655 	 */
   2656 	pc->pc_misses++;
   2657 	mutex_exit(&pc->pc_lock);
   2658 	splx(s);
   2659 	pool_cache_destruct_object(pc, object);
   2660 
   2661 	return false;
   2662 }
   2663 
   2664 /*
   2665  * pool_cache_put{,_paddr}:
   2666  *
   2667  *	Put an object back to the pool cache (optionally caching the
   2668  *	physical address of the object).
   2669  */
   2670 void
   2671 pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa)
   2672 {
   2673 	pool_cache_cpu_t *cc;
   2674 	pcg_t *pcg;
   2675 	int s;
   2676 
   2677 	KASSERT(object != NULL);
   2678 	FREECHECK_IN(&pc->pc_freecheck, object);
   2679 
   2680 	/* Lock out interrupts and disable preemption. */
   2681 	s = splvm();
   2682 	while (/* CONSTCOND */ true) {
   2683 		/* If the current group isn't full, release it there. */
   2684 		cc = pc->pc_cpus[curcpu()->ci_index];
   2685 		KASSERT(cc->cc_cache == pc);
   2686 	 	pcg = cc->cc_current;
   2687 		if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) {
   2688 			pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object;
   2689 			pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa;
   2690 			pcg->pcg_avail++;
   2691 			cc->cc_hits++;
   2692 			splx(s);
   2693 			return;
   2694 		}
   2695 
   2696 		/*
   2697 		 * That failed.  If the previous group isn't full, swap
   2698 		 * it with the current group and try again.
   2699 		 */
   2700 		pcg = cc->cc_previous;
   2701 		if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) {
   2702 			cc->cc_previous = cc->cc_current;
   2703 			cc->cc_current = pcg;
   2704 			continue;
   2705 		}
   2706 
   2707 		/*
   2708 		 * Can't free to either group: try the slow path.
   2709 		 * If put_slow() releases the object for us, it
   2710 		 * will return false.  Otherwise we need to retry.
   2711 		 */
   2712 		if (!pool_cache_put_slow(cc, s, object))
   2713 			break;
   2714 	}
   2715 }
   2716 
   2717 /*
   2718  * pool_cache_xcall:
   2719  *
   2720  *	Transfer objects from the per-CPU cache to the global cache.
   2721  *	Run within a cross-call thread.
   2722  */
   2723 static void
   2724 pool_cache_xcall(pool_cache_t pc)
   2725 {
   2726 	pool_cache_cpu_t *cc;
   2727 	pcg_t *prev, *cur, **list;
   2728 	int s;
   2729 
   2730 	s = splvm();
   2731 	mutex_enter(&pc->pc_lock);
   2732 	cc = pc->pc_cpus[curcpu()->ci_index];
   2733 	cur = cc->cc_current;
   2734 	cc->cc_current = __UNCONST(&pcg_dummy);
   2735 	prev = cc->cc_previous;
   2736 	cc->cc_previous = __UNCONST(&pcg_dummy);
   2737 	if (cur != &pcg_dummy) {
   2738 		if (cur->pcg_avail == cur->pcg_size) {
   2739 			list = &pc->pc_fullgroups;
   2740 			pc->pc_nfull++;
   2741 		} else if (cur->pcg_avail == 0) {
   2742 			list = &pc->pc_emptygroups;
   2743 			pc->pc_nempty++;
   2744 		} else {
   2745 			list = &pc->pc_partgroups;
   2746 			pc->pc_npart++;
   2747 		}
   2748 		cur->pcg_next = *list;
   2749 		*list = cur;
   2750 	}
   2751 	if (prev != &pcg_dummy) {
   2752 		if (prev->pcg_avail == prev->pcg_size) {
   2753 			list = &pc->pc_fullgroups;
   2754 			pc->pc_nfull++;
   2755 		} else if (prev->pcg_avail == 0) {
   2756 			list = &pc->pc_emptygroups;
   2757 			pc->pc_nempty++;
   2758 		} else {
   2759 			list = &pc->pc_partgroups;
   2760 			pc->pc_npart++;
   2761 		}
   2762 		prev->pcg_next = *list;
   2763 		*list = prev;
   2764 	}
   2765 	mutex_exit(&pc->pc_lock);
   2766 	splx(s);
   2767 }
   2768 
   2769 /*
   2770  * Pool backend allocators.
   2771  *
   2772  * Each pool has a backend allocator that handles allocation, deallocation,
   2773  * and any additional draining that might be needed.
   2774  *
   2775  * We provide two standard allocators:
   2776  *
   2777  *	pool_allocator_kmem - the default when no allocator is specified
   2778  *
   2779  *	pool_allocator_nointr - used for pools that will not be accessed
   2780  *	in interrupt context.
   2781  */
   2782 void	*pool_page_alloc(struct pool *, int);
   2783 void	pool_page_free(struct pool *, void *);
   2784 
   2785 #ifdef POOL_SUBPAGE
   2786 struct pool_allocator pool_allocator_kmem_fullpage = {
   2787 	pool_page_alloc, pool_page_free, 0,
   2788 	.pa_backingmapptr = &kmem_map,
   2789 };
   2790 #else
   2791 struct pool_allocator pool_allocator_kmem = {
   2792 	pool_page_alloc, pool_page_free, 0,
   2793 	.pa_backingmapptr = &kmem_map,
   2794 };
   2795 #endif
   2796 
   2797 void	*pool_page_alloc_nointr(struct pool *, int);
   2798 void	pool_page_free_nointr(struct pool *, void *);
   2799 
   2800 #ifdef POOL_SUBPAGE
   2801 struct pool_allocator pool_allocator_nointr_fullpage = {
   2802 	pool_page_alloc_nointr, pool_page_free_nointr, 0,
   2803 	.pa_backingmapptr = &kernel_map,
   2804 };
   2805 #else
   2806 struct pool_allocator pool_allocator_nointr = {
   2807 	pool_page_alloc_nointr, pool_page_free_nointr, 0,
   2808 	.pa_backingmapptr = &kernel_map,
   2809 };
   2810 #endif
   2811 
   2812 #ifdef POOL_SUBPAGE
   2813 void	*pool_subpage_alloc(struct pool *, int);
   2814 void	pool_subpage_free(struct pool *, void *);
   2815 
   2816 struct pool_allocator pool_allocator_kmem = {
   2817 	pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
   2818 	.pa_backingmapptr = &kmem_map,
   2819 };
   2820 
   2821 void	*pool_subpage_alloc_nointr(struct pool *, int);
   2822 void	pool_subpage_free_nointr(struct pool *, void *);
   2823 
   2824 struct pool_allocator pool_allocator_nointr = {
   2825 	pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
   2826 	.pa_backingmapptr = &kmem_map,
   2827 };
   2828 #endif /* POOL_SUBPAGE */
   2829 
   2830 static void *
   2831 pool_allocator_alloc(struct pool *pp, int flags)
   2832 {
   2833 	struct pool_allocator *pa = pp->pr_alloc;
   2834 	void *res;
   2835 
   2836 	res = (*pa->pa_alloc)(pp, flags);
   2837 	if (res == NULL && (flags & PR_WAITOK) == 0) {
   2838 		/*
   2839 		 * We only run the drain hook here if PR_NOWAIT.
   2840 		 * In other cases, the hook will be run in
   2841 		 * pool_reclaim().
   2842 		 */
   2843 		if (pp->pr_drain_hook != NULL) {
   2844 			(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
   2845 			res = (*pa->pa_alloc)(pp, flags);
   2846 		}
   2847 	}
   2848 	return res;
   2849 }
   2850 
   2851 static void
   2852 pool_allocator_free(struct pool *pp, void *v)
   2853 {
   2854 	struct pool_allocator *pa = pp->pr_alloc;
   2855 
   2856 	(*pa->pa_free)(pp, v);
   2857 }
   2858 
   2859 void *
   2860 pool_page_alloc(struct pool *pp, int flags)
   2861 {
   2862 	bool waitok = (flags & PR_WAITOK) ? true : false;
   2863 
   2864 	return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, waitok));
   2865 }
   2866 
   2867 void
   2868 pool_page_free(struct pool *pp, void *v)
   2869 {
   2870 
   2871 	uvm_km_free_poolpage_cache(kmem_map, (vaddr_t) v);
   2872 }
   2873 
   2874 static void *
   2875 pool_page_alloc_meta(struct pool *pp, int flags)
   2876 {
   2877 	bool waitok = (flags & PR_WAITOK) ? true : false;
   2878 
   2879 	return ((void *) uvm_km_alloc_poolpage(kmem_map, waitok));
   2880 }
   2881 
   2882 static void
   2883 pool_page_free_meta(struct pool *pp, void *v)
   2884 {
   2885 
   2886 	uvm_km_free_poolpage(kmem_map, (vaddr_t) v);
   2887 }
   2888 
   2889 #ifdef POOL_SUBPAGE
   2890 /* Sub-page allocator, for machines with large hardware pages. */
   2891 void *
   2892 pool_subpage_alloc(struct pool *pp, int flags)
   2893 {
   2894 	return pool_get(&psppool, flags);
   2895 }
   2896 
   2897 void
   2898 pool_subpage_free(struct pool *pp, void *v)
   2899 {
   2900 	pool_put(&psppool, v);
   2901 }
   2902 
   2903 /* We don't provide a real nointr allocator.  Maybe later. */
   2904 void *
   2905 pool_subpage_alloc_nointr(struct pool *pp, int flags)
   2906 {
   2907 
   2908 	return (pool_subpage_alloc(pp, flags));
   2909 }
   2910 
   2911 void
   2912 pool_subpage_free_nointr(struct pool *pp, void *v)
   2913 {
   2914 
   2915 	pool_subpage_free(pp, v);
   2916 }
   2917 #endif /* POOL_SUBPAGE */
   2918 void *
   2919 pool_page_alloc_nointr(struct pool *pp, int flags)
   2920 {
   2921 	bool waitok = (flags & PR_WAITOK) ? true : false;
   2922 
   2923 	return ((void *) uvm_km_alloc_poolpage_cache(kernel_map, waitok));
   2924 }
   2925 
   2926 void
   2927 pool_page_free_nointr(struct pool *pp, void *v)
   2928 {
   2929 
   2930 	uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v);
   2931 }
   2932 
   2933 #if defined(DDB)
   2934 static bool
   2935 pool_in_page(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
   2936 {
   2937 
   2938 	return (uintptr_t)ph->ph_page <= addr &&
   2939 	    addr < (uintptr_t)ph->ph_page + pp->pr_alloc->pa_pagesz;
   2940 }
   2941 
   2942 static bool
   2943 pool_in_item(struct pool *pp, void *item, uintptr_t addr)
   2944 {
   2945 
   2946 	return (uintptr_t)item <= addr && addr < (uintptr_t)item + pp->pr_size;
   2947 }
   2948 
   2949 static bool
   2950 pool_in_cg(struct pool *pp, struct pool_cache_group *pcg, uintptr_t addr)
   2951 {
   2952 	int i;
   2953 
   2954 	if (pcg == NULL) {
   2955 		return false;
   2956 	}
   2957 	for (i = 0; i < pcg->pcg_avail; i++) {
   2958 		if (pool_in_item(pp, pcg->pcg_objects[i].pcgo_va, addr)) {
   2959 			return true;
   2960 		}
   2961 	}
   2962 	return false;
   2963 }
   2964 
   2965 static bool
   2966 pool_allocated(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
   2967 {
   2968 
   2969 	if ((pp->pr_roflags & PR_NOTOUCH) != 0) {
   2970 		unsigned int idx = pr_item_notouch_index(pp, ph, (void *)addr);
   2971 		pool_item_bitmap_t *bitmap =
   2972 		    ph->ph_bitmap + (idx / BITMAP_SIZE);
   2973 		pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
   2974 
   2975 		return (*bitmap & mask) == 0;
   2976 	} else {
   2977 		struct pool_item *pi;
   2978 
   2979 		LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
   2980 			if (pool_in_item(pp, pi, addr)) {
   2981 				return false;
   2982 			}
   2983 		}
   2984 		return true;
   2985 	}
   2986 }
   2987 
   2988 void
   2989 pool_whatis(uintptr_t addr, void (*pr)(const char *, ...))
   2990 {
   2991 	struct pool *pp;
   2992 
   2993 	TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
   2994 		struct pool_item_header *ph;
   2995 		uintptr_t item;
   2996 		bool allocated = true;
   2997 		bool incache = false;
   2998 		bool incpucache = false;
   2999 		char cpucachestr[32];
   3000 
   3001 		if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
   3002 			LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
   3003 				if (pool_in_page(pp, ph, addr)) {
   3004 					goto found;
   3005 				}
   3006 			}
   3007 			LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
   3008 				if (pool_in_page(pp, ph, addr)) {
   3009 					allocated =
   3010 					    pool_allocated(pp, ph, addr);
   3011 					goto found;
   3012 				}
   3013 			}
   3014 			LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
   3015 				if (pool_in_page(pp, ph, addr)) {
   3016 					allocated = false;
   3017 					goto found;
   3018 				}
   3019 			}
   3020 			continue;
   3021 		} else {
   3022 			ph = pr_find_pagehead_noalign(pp, (void *)addr);
   3023 			if (ph == NULL || !pool_in_page(pp, ph, addr)) {
   3024 				continue;
   3025 			}
   3026 			allocated = pool_allocated(pp, ph, addr);
   3027 		}
   3028 found:
   3029 		if (allocated && pp->pr_cache) {
   3030 			pool_cache_t pc = pp->pr_cache;
   3031 			struct pool_cache_group *pcg;
   3032 			int i;
   3033 
   3034 			for (pcg = pc->pc_fullgroups; pcg != NULL;
   3035 			    pcg = pcg->pcg_next) {
   3036 				if (pool_in_cg(pp, pcg, addr)) {
   3037 					incache = true;
   3038 					goto print;
   3039 				}
   3040 			}
   3041 			for (i = 0; i < MAXCPUS; i++) {
   3042 				pool_cache_cpu_t *cc;
   3043 
   3044 				if ((cc = pc->pc_cpus[i]) == NULL) {
   3045 					continue;
   3046 				}
   3047 				if (pool_in_cg(pp, cc->cc_current, addr) ||
   3048 				    pool_in_cg(pp, cc->cc_previous, addr)) {
   3049 					struct cpu_info *ci =
   3050 					    cpu_lookup(i);
   3051 
   3052 					incpucache = true;
   3053 					snprintf(cpucachestr,
   3054 					    sizeof(cpucachestr),
   3055 					    "cached by CPU %u",
   3056 					    ci->ci_index);
   3057 					goto print;
   3058 				}
   3059 			}
   3060 		}
   3061 print:
   3062 		item = (uintptr_t)ph->ph_page + ph->ph_off;
   3063 		item = item + rounddown(addr - item, pp->pr_size);
   3064 		(*pr)("%p is %p+%zu in POOL '%s' (%s)\n",
   3065 		    (void *)addr, item, (size_t)(addr - item),
   3066 		    pp->pr_wchan,
   3067 		    incpucache ? cpucachestr :
   3068 		    incache ? "cached" : allocated ? "allocated" : "free");
   3069 	}
   3070 }
   3071 #endif /* defined(DDB) */
   3072