Home | History | Annotate | Line # | Download | only in kern
subr_pool.c revision 1.232
      1 /*	$NetBSD: subr_pool.c,v 1.232 2019/02/10 17:13:33 christos Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010, 2014, 2015, 2018
      5  *     The NetBSD Foundation, Inc.
      6  * All rights reserved.
      7  *
      8  * This code is derived from software contributed to The NetBSD Foundation
      9  * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
     10  * Simulation Facility, NASA Ames Research Center; by Andrew Doran, and by
     11  * Maxime Villard.
     12  *
     13  * Redistribution and use in source and binary forms, with or without
     14  * modification, are permitted provided that the following conditions
     15  * are met:
     16  * 1. Redistributions of source code must retain the above copyright
     17  *    notice, this list of conditions and the following disclaimer.
     18  * 2. Redistributions in binary form must reproduce the above copyright
     19  *    notice, this list of conditions and the following disclaimer in the
     20  *    documentation and/or other materials provided with the distribution.
     21  *
     22  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     24  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     25  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32  * POSSIBILITY OF SUCH DAMAGE.
     33  */
     34 
     35 #include <sys/cdefs.h>
     36 __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.232 2019/02/10 17:13:33 christos Exp $");
     37 
     38 #ifdef _KERNEL_OPT
     39 #include "opt_ddb.h"
     40 #include "opt_lockdebug.h"
     41 #include "opt_kleak.h"
     42 #endif
     43 
     44 #include <sys/param.h>
     45 #include <sys/systm.h>
     46 #include <sys/sysctl.h>
     47 #include <sys/bitops.h>
     48 #include <sys/proc.h>
     49 #include <sys/errno.h>
     50 #include <sys/kernel.h>
     51 #include <sys/vmem.h>
     52 #include <sys/pool.h>
     53 #include <sys/syslog.h>
     54 #include <sys/debug.h>
     55 #include <sys/lockdebug.h>
     56 #include <sys/xcall.h>
     57 #include <sys/cpu.h>
     58 #include <sys/atomic.h>
     59 #include <sys/asan.h>
     60 
     61 #include <uvm/uvm_extern.h>
     62 
     63 /*
     64  * Pool resource management utility.
     65  *
     66  * Memory is allocated in pages which are split into pieces according to
     67  * the pool item size. Each page is kept on one of three lists in the
     68  * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
     69  * for empty, full and partially-full pages respectively. The individual
     70  * pool items are on a linked list headed by `ph_itemlist' in each page
     71  * header. The memory for building the page list is either taken from
     72  * the allocated pages themselves (for small pool items) or taken from
     73  * an internal pool of page headers (`phpool').
     74  */
     75 
     76 /* List of all pools. Non static as needed by 'vmstat -m' */
     77 TAILQ_HEAD(, pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
     78 
     79 /* Private pool for page header structures */
     80 #define	PHPOOL_MAX	8
     81 static struct pool phpool[PHPOOL_MAX];
     82 #define	PHPOOL_FREELIST_NELEM(idx) \
     83 	(((idx) == 0) ? 0 : BITMAP_SIZE * (1 << (idx)))
     84 
     85 #ifdef POOL_SUBPAGE
     86 /* Pool of subpages for use by normal pools. */
     87 static struct pool psppool;
     88 #endif
     89 
     90 #if defined(KASAN)
     91 #define POOL_REDZONE
     92 #endif
     93 
     94 #ifdef POOL_REDZONE
     95 # ifdef KASAN
     96 #  define POOL_REDZONE_SIZE 8
     97 # else
     98 #  define POOL_REDZONE_SIZE 2
     99 # endif
    100 static void pool_redzone_init(struct pool *, size_t);
    101 static void pool_redzone_fill(struct pool *, void *);
    102 static void pool_redzone_check(struct pool *, void *);
    103 static void pool_cache_redzone_check(pool_cache_t, void *);
    104 #else
    105 # define pool_redzone_init(pp, sz)		__nothing
    106 # define pool_redzone_fill(pp, ptr)		__nothing
    107 # define pool_redzone_check(pp, ptr)		__nothing
    108 # define pool_cache_redzone_check(pc, ptr)	__nothing
    109 #endif
    110 
    111 #ifdef KLEAK
    112 static void pool_kleak_fill(struct pool *, void *);
    113 static void pool_cache_kleak_fill(pool_cache_t, void *);
    114 #else
    115 #define pool_kleak_fill(pp, ptr)	__nothing
    116 #define pool_cache_kleak_fill(pc, ptr)	__nothing
    117 #endif
    118 
    119 #define pc_has_ctor(pc) \
    120 	(pc->pc_ctor != (int (*)(void *, void *, int))nullop)
    121 #define pc_has_dtor(pc) \
    122 	(pc->pc_dtor != (void (*)(void *, void *))nullop)
    123 
    124 static void *pool_page_alloc_meta(struct pool *, int);
    125 static void pool_page_free_meta(struct pool *, void *);
    126 
    127 /* allocator for pool metadata */
    128 struct pool_allocator pool_allocator_meta = {
    129 	.pa_alloc = pool_page_alloc_meta,
    130 	.pa_free = pool_page_free_meta,
    131 	.pa_pagesz = 0
    132 };
    133 
    134 #define POOL_ALLOCATOR_BIG_BASE 13
    135 extern struct pool_allocator pool_allocator_big[];
    136 static int pool_bigidx(size_t);
    137 
    138 /* # of seconds to retain page after last use */
    139 int pool_inactive_time = 10;
    140 
    141 /* Next candidate for drainage (see pool_drain()) */
    142 static struct pool	*drainpp;
    143 
    144 /* This lock protects both pool_head and drainpp. */
    145 static kmutex_t pool_head_lock;
    146 static kcondvar_t pool_busy;
    147 
    148 /* This lock protects initialization of a potentially shared pool allocator */
    149 static kmutex_t pool_allocator_lock;
    150 
    151 typedef uint32_t pool_item_bitmap_t;
    152 #define	BITMAP_SIZE	(CHAR_BIT * sizeof(pool_item_bitmap_t))
    153 #define	BITMAP_MASK	(BITMAP_SIZE - 1)
    154 
    155 struct pool_item_header {
    156 	/* Page headers */
    157 	LIST_ENTRY(pool_item_header)
    158 				ph_pagelist;	/* pool page list */
    159 	SPLAY_ENTRY(pool_item_header)
    160 				ph_node;	/* Off-page page headers */
    161 	void *			ph_page;	/* this page's address */
    162 	uint32_t		ph_time;	/* last referenced */
    163 	uint16_t		ph_nmissing;	/* # of chunks in use */
    164 	uint16_t		ph_off;		/* start offset in page */
    165 	union {
    166 		/* !PR_NOTOUCH */
    167 		struct {
    168 			LIST_HEAD(, pool_item)
    169 				phu_itemlist;	/* chunk list for this page */
    170 		} phu_normal;
    171 		/* PR_NOTOUCH */
    172 		struct {
    173 			pool_item_bitmap_t phu_bitmap[1];
    174 		} phu_notouch;
    175 	} ph_u;
    176 };
    177 #define	ph_itemlist	ph_u.phu_normal.phu_itemlist
    178 #define	ph_bitmap	ph_u.phu_notouch.phu_bitmap
    179 
    180 #if defined(DIAGNOSTIC) && !defined(KASAN)
    181 #define POOL_CHECK_MAGIC
    182 #endif
    183 
    184 struct pool_item {
    185 #ifdef POOL_CHECK_MAGIC
    186 	u_int pi_magic;
    187 #endif
    188 #define	PI_MAGIC 0xdeaddeadU
    189 	/* Other entries use only this list entry */
    190 	LIST_ENTRY(pool_item)	pi_list;
    191 };
    192 
    193 #define	POOL_NEEDS_CATCHUP(pp)						\
    194 	((pp)->pr_nitems < (pp)->pr_minitems)
    195 
    196 /*
    197  * Pool cache management.
    198  *
    199  * Pool caches provide a way for constructed objects to be cached by the
    200  * pool subsystem.  This can lead to performance improvements by avoiding
    201  * needless object construction/destruction; it is deferred until absolutely
    202  * necessary.
    203  *
    204  * Caches are grouped into cache groups.  Each cache group references up
    205  * to PCG_NUMOBJECTS constructed objects.  When a cache allocates an
    206  * object from the pool, it calls the object's constructor and places it
    207  * into a cache group.  When a cache group frees an object back to the
    208  * pool, it first calls the object's destructor.  This allows the object
    209  * to persist in constructed form while freed to the cache.
    210  *
    211  * The pool references each cache, so that when a pool is drained by the
    212  * pagedaemon, it can drain each individual cache as well.  Each time a
    213  * cache is drained, the most idle cache group is freed to the pool in
    214  * its entirety.
    215  *
    216  * Pool caches are layed on top of pools.  By layering them, we can avoid
    217  * the complexity of cache management for pools which would not benefit
    218  * from it.
    219  */
    220 
    221 static struct pool pcg_normal_pool;
    222 static struct pool pcg_large_pool;
    223 static struct pool cache_pool;
    224 static struct pool cache_cpu_pool;
    225 
    226 pool_cache_t pnbuf_cache;	/* pathname buffer cache */
    227 
    228 /* List of all caches. */
    229 TAILQ_HEAD(,pool_cache) pool_cache_head =
    230     TAILQ_HEAD_INITIALIZER(pool_cache_head);
    231 
    232 int pool_cache_disable;		/* global disable for caching */
    233 static const pcg_t pcg_dummy;	/* zero sized: always empty, yet always full */
    234 
    235 static bool	pool_cache_put_slow(pool_cache_cpu_t *, int,
    236 				    void *);
    237 static bool	pool_cache_get_slow(pool_cache_cpu_t *, int,
    238 				    void **, paddr_t *, int);
    239 static void	pool_cache_cpu_init1(struct cpu_info *, pool_cache_t);
    240 static void	pool_cache_invalidate_groups(pool_cache_t, pcg_t *);
    241 static void	pool_cache_invalidate_cpu(pool_cache_t, u_int);
    242 static void	pool_cache_transfer(pool_cache_t);
    243 
    244 static int	pool_catchup(struct pool *);
    245 static void	pool_prime_page(struct pool *, void *,
    246 		    struct pool_item_header *);
    247 static void	pool_update_curpage(struct pool *);
    248 
    249 static int	pool_grow(struct pool *, int);
    250 static void	*pool_allocator_alloc(struct pool *, int);
    251 static void	pool_allocator_free(struct pool *, void *);
    252 
    253 static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
    254 	void (*)(const char *, ...) __printflike(1, 2));
    255 static void pool_print1(struct pool *, const char *,
    256 	void (*)(const char *, ...) __printflike(1, 2));
    257 
    258 static int pool_chk_page(struct pool *, const char *,
    259 			 struct pool_item_header *);
    260 
    261 static inline unsigned int
    262 pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph,
    263     const void *v)
    264 {
    265 	const char *cp = v;
    266 	unsigned int idx;
    267 
    268 	KASSERT(pp->pr_roflags & PR_NOTOUCH);
    269 	idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size;
    270 	KASSERT(idx < pp->pr_itemsperpage);
    271 	return idx;
    272 }
    273 
    274 static inline void
    275 pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph,
    276     void *obj)
    277 {
    278 	unsigned int idx = pr_item_notouch_index(pp, ph, obj);
    279 	pool_item_bitmap_t *bitmap = ph->ph_bitmap + (idx / BITMAP_SIZE);
    280 	pool_item_bitmap_t mask = 1U << (idx & BITMAP_MASK);
    281 
    282 	KASSERT((*bitmap & mask) == 0);
    283 	*bitmap |= mask;
    284 }
    285 
    286 static inline void *
    287 pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph)
    288 {
    289 	pool_item_bitmap_t *bitmap = ph->ph_bitmap;
    290 	unsigned int idx;
    291 	int i;
    292 
    293 	for (i = 0; ; i++) {
    294 		int bit;
    295 
    296 		KASSERT((i * BITMAP_SIZE) < pp->pr_itemsperpage);
    297 		bit = ffs32(bitmap[i]);
    298 		if (bit) {
    299 			pool_item_bitmap_t mask;
    300 
    301 			bit--;
    302 			idx = (i * BITMAP_SIZE) + bit;
    303 			mask = 1U << bit;
    304 			KASSERT((bitmap[i] & mask) != 0);
    305 			bitmap[i] &= ~mask;
    306 			break;
    307 		}
    308 	}
    309 	KASSERT(idx < pp->pr_itemsperpage);
    310 	return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size;
    311 }
    312 
    313 static inline void
    314 pr_item_notouch_init(const struct pool *pp, struct pool_item_header *ph)
    315 {
    316 	pool_item_bitmap_t *bitmap = ph->ph_bitmap;
    317 	const int n = howmany(pp->pr_itemsperpage, BITMAP_SIZE);
    318 	int i;
    319 
    320 	for (i = 0; i < n; i++) {
    321 		bitmap[i] = (pool_item_bitmap_t)-1;
    322 	}
    323 }
    324 
    325 static inline int
    326 phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
    327 {
    328 
    329 	/*
    330 	 * we consider pool_item_header with smaller ph_page bigger.
    331 	 * (this unnatural ordering is for the benefit of pr_find_pagehead.)
    332 	 */
    333 
    334 	if (a->ph_page < b->ph_page)
    335 		return (1);
    336 	else if (a->ph_page > b->ph_page)
    337 		return (-1);
    338 	else
    339 		return (0);
    340 }
    341 
    342 SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
    343 SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
    344 
    345 static inline struct pool_item_header *
    346 pr_find_pagehead_noalign(struct pool *pp, void *v)
    347 {
    348 	struct pool_item_header *ph, tmp;
    349 
    350 	tmp.ph_page = (void *)(uintptr_t)v;
    351 	ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
    352 	if (ph == NULL) {
    353 		ph = SPLAY_ROOT(&pp->pr_phtree);
    354 		if (ph != NULL && phtree_compare(&tmp, ph) >= 0) {
    355 			ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph);
    356 		}
    357 		KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0);
    358 	}
    359 
    360 	return ph;
    361 }
    362 
    363 /*
    364  * Return the pool page header based on item address.
    365  */
    366 static inline struct pool_item_header *
    367 pr_find_pagehead(struct pool *pp, void *v)
    368 {
    369 	struct pool_item_header *ph, tmp;
    370 
    371 	if ((pp->pr_roflags & PR_NOALIGN) != 0) {
    372 		ph = pr_find_pagehead_noalign(pp, v);
    373 	} else {
    374 		void *page =
    375 		    (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask);
    376 
    377 		if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
    378 			ph = (struct pool_item_header *)((char *)page + pp->pr_phoffset);
    379 		} else {
    380 			tmp.ph_page = page;
    381 			ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
    382 		}
    383 	}
    384 
    385 	KASSERT(ph == NULL || ((pp->pr_roflags & PR_PHINPAGE) != 0) ||
    386 	    ((char *)ph->ph_page <= (char *)v &&
    387 	    (char *)v < (char *)ph->ph_page + pp->pr_alloc->pa_pagesz));
    388 	return ph;
    389 }
    390 
    391 static void
    392 pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq)
    393 {
    394 	struct pool_item_header *ph;
    395 
    396 	while ((ph = LIST_FIRST(pq)) != NULL) {
    397 		LIST_REMOVE(ph, ph_pagelist);
    398 		pool_allocator_free(pp, ph->ph_page);
    399 		if ((pp->pr_roflags & PR_PHINPAGE) == 0)
    400 			pool_put(pp->pr_phpool, ph);
    401 	}
    402 }
    403 
    404 /*
    405  * Remove a page from the pool.
    406  */
    407 static inline void
    408 pr_rmpage(struct pool *pp, struct pool_item_header *ph,
    409      struct pool_pagelist *pq)
    410 {
    411 
    412 	KASSERT(mutex_owned(&pp->pr_lock));
    413 
    414 	/*
    415 	 * If the page was idle, decrement the idle page count.
    416 	 */
    417 	if (ph->ph_nmissing == 0) {
    418 		KASSERT(pp->pr_nidle != 0);
    419 		KASSERTMSG((pp->pr_nitems >= pp->pr_itemsperpage),
    420 		    "nitems=%u < itemsperpage=%u",
    421 		    pp->pr_nitems, pp->pr_itemsperpage);
    422 		pp->pr_nidle--;
    423 	}
    424 
    425 	pp->pr_nitems -= pp->pr_itemsperpage;
    426 
    427 	/*
    428 	 * Unlink the page from the pool and queue it for release.
    429 	 */
    430 	LIST_REMOVE(ph, ph_pagelist);
    431 	if ((pp->pr_roflags & PR_PHINPAGE) == 0)
    432 		SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
    433 	LIST_INSERT_HEAD(pq, ph, ph_pagelist);
    434 
    435 	pp->pr_npages--;
    436 	pp->pr_npagefree++;
    437 
    438 	pool_update_curpage(pp);
    439 }
    440 
    441 /*
    442  * Initialize all the pools listed in the "pools" link set.
    443  */
    444 void
    445 pool_subsystem_init(void)
    446 {
    447 	size_t size;
    448 	int idx;
    449 
    450 	mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE);
    451 	mutex_init(&pool_allocator_lock, MUTEX_DEFAULT, IPL_NONE);
    452 	cv_init(&pool_busy, "poolbusy");
    453 
    454 	/*
    455 	 * Initialize private page header pool and cache magazine pool if we
    456 	 * haven't done so yet.
    457 	 */
    458 	for (idx = 0; idx < PHPOOL_MAX; idx++) {
    459 		static char phpool_names[PHPOOL_MAX][6+1+6+1];
    460 		int nelem;
    461 		size_t sz;
    462 
    463 		nelem = PHPOOL_FREELIST_NELEM(idx);
    464 		snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
    465 		    "phpool-%d", nelem);
    466 		sz = sizeof(struct pool_item_header);
    467 		if (nelem) {
    468 			sz = offsetof(struct pool_item_header,
    469 			    ph_bitmap[howmany(nelem, BITMAP_SIZE)]);
    470 		}
    471 		pool_init(&phpool[idx], sz, 0, 0, 0,
    472 		    phpool_names[idx], &pool_allocator_meta, IPL_VM);
    473 	}
    474 #ifdef POOL_SUBPAGE
    475 	pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
    476 	    PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM);
    477 #endif
    478 
    479 	size = sizeof(pcg_t) +
    480 	    (PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t);
    481 	pool_init(&pcg_normal_pool, size, coherency_unit, 0, 0,
    482 	    "pcgnormal", &pool_allocator_meta, IPL_VM);
    483 
    484 	size = sizeof(pcg_t) +
    485 	    (PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t);
    486 	pool_init(&pcg_large_pool, size, coherency_unit, 0, 0,
    487 	    "pcglarge", &pool_allocator_meta, IPL_VM);
    488 
    489 	pool_init(&cache_pool, sizeof(struct pool_cache), coherency_unit,
    490 	    0, 0, "pcache", &pool_allocator_meta, IPL_NONE);
    491 
    492 	pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), coherency_unit,
    493 	    0, 0, "pcachecpu", &pool_allocator_meta, IPL_NONE);
    494 }
    495 
    496 /*
    497  * Initialize the given pool resource structure.
    498  *
    499  * We export this routine to allow other kernel parts to declare
    500  * static pools that must be initialized before kmem(9) is available.
    501  */
    502 void
    503 pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
    504     const char *wchan, struct pool_allocator *palloc, int ipl)
    505 {
    506 	struct pool *pp1;
    507 	size_t trysize, phsize, prsize;
    508 	int off, slack;
    509 
    510 #ifdef DEBUG
    511 	if (__predict_true(!cold))
    512 		mutex_enter(&pool_head_lock);
    513 	/*
    514 	 * Check that the pool hasn't already been initialised and
    515 	 * added to the list of all pools.
    516 	 */
    517 	TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
    518 		if (pp == pp1)
    519 			panic("%s: [%s] already initialised", __func__,
    520 			    wchan);
    521 	}
    522 	if (__predict_true(!cold))
    523 		mutex_exit(&pool_head_lock);
    524 #endif
    525 
    526 	if (palloc == NULL)
    527 		palloc = &pool_allocator_kmem;
    528 #ifdef POOL_SUBPAGE
    529 	if (size > palloc->pa_pagesz) {
    530 		if (palloc == &pool_allocator_kmem)
    531 			palloc = &pool_allocator_kmem_fullpage;
    532 		else if (palloc == &pool_allocator_nointr)
    533 			palloc = &pool_allocator_nointr_fullpage;
    534 	}
    535 #endif /* POOL_SUBPAGE */
    536 	if (!cold)
    537 		mutex_enter(&pool_allocator_lock);
    538 	if (palloc->pa_refcnt++ == 0) {
    539 		if (palloc->pa_pagesz == 0)
    540 			palloc->pa_pagesz = PAGE_SIZE;
    541 
    542 		TAILQ_INIT(&palloc->pa_list);
    543 
    544 		mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM);
    545 		palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
    546 		palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
    547 	}
    548 	if (!cold)
    549 		mutex_exit(&pool_allocator_lock);
    550 
    551 	if (align == 0)
    552 		align = ALIGN(1);
    553 
    554 	prsize = size;
    555 	if ((flags & PR_NOTOUCH) == 0 && prsize < sizeof(struct pool_item))
    556 		prsize = sizeof(struct pool_item);
    557 
    558 	prsize = roundup(prsize, align);
    559 	KASSERTMSG((prsize <= palloc->pa_pagesz),
    560 	    "%s: [%s] pool item size (%zu) larger than page size (%u)",
    561 	    __func__, wchan, prsize, palloc->pa_pagesz);
    562 
    563 	/*
    564 	 * Initialize the pool structure.
    565 	 */
    566 	LIST_INIT(&pp->pr_emptypages);
    567 	LIST_INIT(&pp->pr_fullpages);
    568 	LIST_INIT(&pp->pr_partpages);
    569 	pp->pr_cache = NULL;
    570 	pp->pr_curpage = NULL;
    571 	pp->pr_npages = 0;
    572 	pp->pr_minitems = 0;
    573 	pp->pr_minpages = 0;
    574 	pp->pr_maxpages = UINT_MAX;
    575 	pp->pr_roflags = flags;
    576 	pp->pr_flags = 0;
    577 	pp->pr_size = prsize;
    578 	pp->pr_align = align;
    579 	pp->pr_wchan = wchan;
    580 	pp->pr_alloc = palloc;
    581 	pp->pr_nitems = 0;
    582 	pp->pr_nout = 0;
    583 	pp->pr_hardlimit = UINT_MAX;
    584 	pp->pr_hardlimit_warning = NULL;
    585 	pp->pr_hardlimit_ratecap.tv_sec = 0;
    586 	pp->pr_hardlimit_ratecap.tv_usec = 0;
    587 	pp->pr_hardlimit_warning_last.tv_sec = 0;
    588 	pp->pr_hardlimit_warning_last.tv_usec = 0;
    589 	pp->pr_drain_hook = NULL;
    590 	pp->pr_drain_hook_arg = NULL;
    591 	pp->pr_freecheck = NULL;
    592 	pool_redzone_init(pp, size);
    593 
    594 	/*
    595 	 * Decide whether to put the page header off page to avoid
    596 	 * wasting too large a part of the page or too big item.
    597 	 * Off-page page headers go on a hash table, so we can match
    598 	 * a returned item with its header based on the page address.
    599 	 * We use 1/16 of the page size and about 8 times of the item
    600 	 * size as the threshold (XXX: tune)
    601 	 *
    602 	 * However, we'll put the header into the page if we can put
    603 	 * it without wasting any items.
    604 	 *
    605 	 * Silently enforce `0 <= ioff < align'.
    606 	 */
    607 	pp->pr_itemoffset = ioff %= align;
    608 	/* See the comment below about reserved bytes. */
    609 	trysize = palloc->pa_pagesz - ((align - ioff) % align);
    610 	phsize = ALIGN(sizeof(struct pool_item_header));
    611 	if (pp->pr_roflags & PR_PHINPAGE ||
    612 	    ((pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) == 0 &&
    613 	    (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
    614 	    trysize / pp->pr_size == (trysize - phsize) / pp->pr_size))) {
    615 		/* Use the end of the page for the page header */
    616 		pp->pr_roflags |= PR_PHINPAGE;
    617 		pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
    618 	} else {
    619 		/* The page header will be taken from our page header pool */
    620 		pp->pr_phoffset = 0;
    621 		off = palloc->pa_pagesz;
    622 		SPLAY_INIT(&pp->pr_phtree);
    623 	}
    624 
    625 	/*
    626 	 * Alignment is to take place at `ioff' within the item. This means
    627 	 * we must reserve up to `align - 1' bytes on the page to allow
    628 	 * appropriate positioning of each item.
    629 	 */
    630 	pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
    631 	KASSERT(pp->pr_itemsperpage != 0);
    632 	if ((pp->pr_roflags & PR_NOTOUCH)) {
    633 		int idx;
    634 
    635 		for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
    636 		    idx++) {
    637 			/* nothing */
    638 		}
    639 		if (idx >= PHPOOL_MAX) {
    640 			/*
    641 			 * if you see this panic, consider to tweak
    642 			 * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
    643 			 */
    644 			panic("%s: [%s] too large itemsperpage(%d) for "
    645 			    "PR_NOTOUCH", __func__,
    646 			    pp->pr_wchan, pp->pr_itemsperpage);
    647 		}
    648 		pp->pr_phpool = &phpool[idx];
    649 	} else if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
    650 		pp->pr_phpool = &phpool[0];
    651 	}
    652 #if defined(DIAGNOSTIC)
    653 	else {
    654 		pp->pr_phpool = NULL;
    655 	}
    656 #endif
    657 
    658 	/*
    659 	 * Use the slack between the chunks and the page header
    660 	 * for "cache coloring".
    661 	 */
    662 	slack = off - pp->pr_itemsperpage * pp->pr_size;
    663 	pp->pr_maxcolor = (slack / align) * align;
    664 	pp->pr_curcolor = 0;
    665 
    666 	pp->pr_nget = 0;
    667 	pp->pr_nfail = 0;
    668 	pp->pr_nput = 0;
    669 	pp->pr_npagealloc = 0;
    670 	pp->pr_npagefree = 0;
    671 	pp->pr_hiwat = 0;
    672 	pp->pr_nidle = 0;
    673 	pp->pr_refcnt = 0;
    674 
    675 	mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl);
    676 	cv_init(&pp->pr_cv, wchan);
    677 	pp->pr_ipl = ipl;
    678 
    679 	/* Insert into the list of all pools. */
    680 	if (!cold)
    681 		mutex_enter(&pool_head_lock);
    682 	TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
    683 		if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0)
    684 			break;
    685 	}
    686 	if (pp1 == NULL)
    687 		TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
    688 	else
    689 		TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist);
    690 	if (!cold)
    691 		mutex_exit(&pool_head_lock);
    692 
    693 	/* Insert this into the list of pools using this allocator. */
    694 	if (!cold)
    695 		mutex_enter(&palloc->pa_lock);
    696 	TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
    697 	if (!cold)
    698 		mutex_exit(&palloc->pa_lock);
    699 }
    700 
    701 /*
    702  * De-commision a pool resource.
    703  */
    704 void
    705 pool_destroy(struct pool *pp)
    706 {
    707 	struct pool_pagelist pq;
    708 	struct pool_item_header *ph;
    709 
    710 	/* Remove from global pool list */
    711 	mutex_enter(&pool_head_lock);
    712 	while (pp->pr_refcnt != 0)
    713 		cv_wait(&pool_busy, &pool_head_lock);
    714 	TAILQ_REMOVE(&pool_head, pp, pr_poollist);
    715 	if (drainpp == pp)
    716 		drainpp = NULL;
    717 	mutex_exit(&pool_head_lock);
    718 
    719 	/* Remove this pool from its allocator's list of pools. */
    720 	mutex_enter(&pp->pr_alloc->pa_lock);
    721 	TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
    722 	mutex_exit(&pp->pr_alloc->pa_lock);
    723 
    724 	mutex_enter(&pool_allocator_lock);
    725 	if (--pp->pr_alloc->pa_refcnt == 0)
    726 		mutex_destroy(&pp->pr_alloc->pa_lock);
    727 	mutex_exit(&pool_allocator_lock);
    728 
    729 	mutex_enter(&pp->pr_lock);
    730 
    731 	KASSERT(pp->pr_cache == NULL);
    732 	KASSERTMSG((pp->pr_nout == 0),
    733 	    "%s: pool busy: still out: %u", __func__, pp->pr_nout);
    734 	KASSERT(LIST_EMPTY(&pp->pr_fullpages));
    735 	KASSERT(LIST_EMPTY(&pp->pr_partpages));
    736 
    737 	/* Remove all pages */
    738 	LIST_INIT(&pq);
    739 	while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
    740 		pr_rmpage(pp, ph, &pq);
    741 
    742 	mutex_exit(&pp->pr_lock);
    743 
    744 	pr_pagelist_free(pp, &pq);
    745 	cv_destroy(&pp->pr_cv);
    746 	mutex_destroy(&pp->pr_lock);
    747 }
    748 
    749 void
    750 pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
    751 {
    752 
    753 	/* XXX no locking -- must be used just after pool_init() */
    754 	KASSERTMSG((pp->pr_drain_hook == NULL),
    755 	    "%s: [%s] already set", __func__, pp->pr_wchan);
    756 	pp->pr_drain_hook = fn;
    757 	pp->pr_drain_hook_arg = arg;
    758 }
    759 
    760 static struct pool_item_header *
    761 pool_alloc_item_header(struct pool *pp, void *storage, int flags)
    762 {
    763 	struct pool_item_header *ph;
    764 
    765 	if ((pp->pr_roflags & PR_PHINPAGE) != 0)
    766 		ph = (void *)((char *)storage + pp->pr_phoffset);
    767 	else
    768 		ph = pool_get(pp->pr_phpool, flags);
    769 
    770 	return (ph);
    771 }
    772 
    773 /*
    774  * Grab an item from the pool.
    775  */
    776 void *
    777 pool_get(struct pool *pp, int flags)
    778 {
    779 	struct pool_item *pi;
    780 	struct pool_item_header *ph;
    781 	void *v;
    782 
    783 	KASSERT(!(flags & PR_NOWAIT) != !(flags & PR_WAITOK));
    784 	KASSERTMSG((pp->pr_itemsperpage != 0),
    785 	    "%s: [%s] pr_itemsperpage is zero, "
    786 	    "pool not initialized?", __func__, pp->pr_wchan);
    787 	KASSERTMSG((!(cpu_intr_p() || cpu_softintr_p())
    788 		|| pp->pr_ipl != IPL_NONE || cold || panicstr != NULL),
    789 	    "%s: [%s] is IPL_NONE, but called from interrupt context",
    790 	    __func__, pp->pr_wchan);
    791 	if (flags & PR_WAITOK) {
    792 		ASSERT_SLEEPABLE();
    793 	}
    794 
    795 	mutex_enter(&pp->pr_lock);
    796  startover:
    797 	/*
    798 	 * Check to see if we've reached the hard limit.  If we have,
    799 	 * and we can wait, then wait until an item has been returned to
    800 	 * the pool.
    801 	 */
    802 	KASSERTMSG((pp->pr_nout <= pp->pr_hardlimit),
    803 	    "%s: %s: crossed hard limit", __func__, pp->pr_wchan);
    804 	if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
    805 		if (pp->pr_drain_hook != NULL) {
    806 			/*
    807 			 * Since the drain hook is going to free things
    808 			 * back to the pool, unlock, call the hook, re-lock,
    809 			 * and check the hardlimit condition again.
    810 			 */
    811 			mutex_exit(&pp->pr_lock);
    812 			(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
    813 			mutex_enter(&pp->pr_lock);
    814 			if (pp->pr_nout < pp->pr_hardlimit)
    815 				goto startover;
    816 		}
    817 
    818 		if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
    819 			/*
    820 			 * XXX: A warning isn't logged in this case.  Should
    821 			 * it be?
    822 			 */
    823 			pp->pr_flags |= PR_WANTED;
    824 			do {
    825 				cv_wait(&pp->pr_cv, &pp->pr_lock);
    826 			} while (pp->pr_flags & PR_WANTED);
    827 			goto startover;
    828 		}
    829 
    830 		/*
    831 		 * Log a message that the hard limit has been hit.
    832 		 */
    833 		if (pp->pr_hardlimit_warning != NULL &&
    834 		    ratecheck(&pp->pr_hardlimit_warning_last,
    835 			      &pp->pr_hardlimit_ratecap))
    836 			log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
    837 
    838 		pp->pr_nfail++;
    839 
    840 		mutex_exit(&pp->pr_lock);
    841 		KASSERT((flags & (PR_NOWAIT|PR_LIMITFAIL)) != 0);
    842 		return (NULL);
    843 	}
    844 
    845 	/*
    846 	 * The convention we use is that if `curpage' is not NULL, then
    847 	 * it points at a non-empty bucket. In particular, `curpage'
    848 	 * never points at a page header which has PR_PHINPAGE set and
    849 	 * has no items in its bucket.
    850 	 */
    851 	if ((ph = pp->pr_curpage) == NULL) {
    852 		int error;
    853 
    854 		KASSERTMSG((pp->pr_nitems == 0),
    855 		    "%s: [%s] curpage NULL, inconsistent nitems %u",
    856 		    __func__, pp->pr_wchan, pp->pr_nitems);
    857 
    858 		/*
    859 		 * Call the back-end page allocator for more memory.
    860 		 * Release the pool lock, as the back-end page allocator
    861 		 * may block.
    862 		 */
    863 		error = pool_grow(pp, flags);
    864 		if (error != 0) {
    865 			/*
    866 			 * pool_grow aborts when another thread
    867 			 * is allocating a new page. Retry if it
    868 			 * waited for it.
    869 			 */
    870 			if (error == ERESTART)
    871 				goto startover;
    872 
    873 			/*
    874 			 * We were unable to allocate a page or item
    875 			 * header, but we released the lock during
    876 			 * allocation, so perhaps items were freed
    877 			 * back to the pool.  Check for this case.
    878 			 */
    879 			if (pp->pr_curpage != NULL)
    880 				goto startover;
    881 
    882 			pp->pr_nfail++;
    883 			mutex_exit(&pp->pr_lock);
    884 			KASSERT((flags & (PR_WAITOK|PR_NOWAIT)) == PR_NOWAIT);
    885 			return (NULL);
    886 		}
    887 
    888 		/* Start the allocation process over. */
    889 		goto startover;
    890 	}
    891 	if (pp->pr_roflags & PR_NOTOUCH) {
    892 		KASSERTMSG((ph->ph_nmissing < pp->pr_itemsperpage),
    893 		    "%s: %s: page empty", __func__, pp->pr_wchan);
    894 		v = pr_item_notouch_get(pp, ph);
    895 	} else {
    896 		v = pi = LIST_FIRST(&ph->ph_itemlist);
    897 		if (__predict_false(v == NULL)) {
    898 			mutex_exit(&pp->pr_lock);
    899 			panic("%s: [%s] page empty", __func__, pp->pr_wchan);
    900 		}
    901 		KASSERTMSG((pp->pr_nitems > 0),
    902 		    "%s: [%s] nitems %u inconsistent on itemlist",
    903 		    __func__, pp->pr_wchan, pp->pr_nitems);
    904 #ifdef POOL_CHECK_MAGIC
    905 		KASSERTMSG((pi->pi_magic == PI_MAGIC),
    906 		    "%s: [%s] free list modified: "
    907 		    "magic=%x; page %p; item addr %p", __func__,
    908 		    pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
    909 #endif
    910 
    911 		/*
    912 		 * Remove from item list.
    913 		 */
    914 		LIST_REMOVE(pi, pi_list);
    915 	}
    916 	pp->pr_nitems--;
    917 	pp->pr_nout++;
    918 	if (ph->ph_nmissing == 0) {
    919 		KASSERT(pp->pr_nidle > 0);
    920 		pp->pr_nidle--;
    921 
    922 		/*
    923 		 * This page was previously empty.  Move it to the list of
    924 		 * partially-full pages.  This page is already curpage.
    925 		 */
    926 		LIST_REMOVE(ph, ph_pagelist);
    927 		LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
    928 	}
    929 	ph->ph_nmissing++;
    930 	if (ph->ph_nmissing == pp->pr_itemsperpage) {
    931 		KASSERTMSG(((pp->pr_roflags & PR_NOTOUCH) ||
    932 			LIST_EMPTY(&ph->ph_itemlist)),
    933 		    "%s: [%s] nmissing (%u) inconsistent", __func__,
    934 			pp->pr_wchan, ph->ph_nmissing);
    935 		/*
    936 		 * This page is now full.  Move it to the full list
    937 		 * and select a new current page.
    938 		 */
    939 		LIST_REMOVE(ph, ph_pagelist);
    940 		LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
    941 		pool_update_curpage(pp);
    942 	}
    943 
    944 	pp->pr_nget++;
    945 
    946 	/*
    947 	 * If we have a low water mark and we are now below that low
    948 	 * water mark, add more items to the pool.
    949 	 */
    950 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
    951 		/*
    952 		 * XXX: Should we log a warning?  Should we set up a timeout
    953 		 * to try again in a second or so?  The latter could break
    954 		 * a caller's assumptions about interrupt protection, etc.
    955 		 */
    956 	}
    957 
    958 	mutex_exit(&pp->pr_lock);
    959 	KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0);
    960 	FREECHECK_OUT(&pp->pr_freecheck, v);
    961 	pool_redzone_fill(pp, v);
    962 	if (flags & PR_ZERO)
    963 		memset(v, 0, pp->pr_size);
    964 	else
    965 		pool_kleak_fill(pp, v);
    966 	return v;
    967 }
    968 
    969 /*
    970  * Internal version of pool_put().  Pool is already locked/entered.
    971  */
    972 static void
    973 pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
    974 {
    975 	struct pool_item *pi = v;
    976 	struct pool_item_header *ph;
    977 
    978 	KASSERT(mutex_owned(&pp->pr_lock));
    979 	pool_redzone_check(pp, v);
    980 	FREECHECK_IN(&pp->pr_freecheck, v);
    981 	LOCKDEBUG_MEM_CHECK(v, pp->pr_size);
    982 
    983 	KASSERTMSG((pp->pr_nout > 0),
    984 	    "%s: [%s] putting with none out", __func__, pp->pr_wchan);
    985 
    986 	if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) {
    987 		panic("%s: [%s] page header missing", __func__,  pp->pr_wchan);
    988 	}
    989 
    990 	/*
    991 	 * Return to item list.
    992 	 */
    993 	if (pp->pr_roflags & PR_NOTOUCH) {
    994 		pr_item_notouch_put(pp, ph, v);
    995 	} else {
    996 #ifdef POOL_CHECK_MAGIC
    997 		pi->pi_magic = PI_MAGIC;
    998 #endif
    999 
   1000 		if (pp->pr_redzone) {
   1001 			/*
   1002 			 * Mark the pool_item as valid. The rest is already
   1003 			 * invalid.
   1004 			 */
   1005 			kasan_mark(pi, sizeof(*pi), sizeof(*pi));
   1006 		}
   1007 
   1008 		LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
   1009 	}
   1010 	KDASSERT(ph->ph_nmissing != 0);
   1011 	ph->ph_nmissing--;
   1012 	pp->pr_nput++;
   1013 	pp->pr_nitems++;
   1014 	pp->pr_nout--;
   1015 
   1016 	/* Cancel "pool empty" condition if it exists */
   1017 	if (pp->pr_curpage == NULL)
   1018 		pp->pr_curpage = ph;
   1019 
   1020 	if (pp->pr_flags & PR_WANTED) {
   1021 		pp->pr_flags &= ~PR_WANTED;
   1022 		cv_broadcast(&pp->pr_cv);
   1023 	}
   1024 
   1025 	/*
   1026 	 * If this page is now empty, do one of two things:
   1027 	 *
   1028 	 *	(1) If we have more pages than the page high water mark,
   1029 	 *	    free the page back to the system.  ONLY CONSIDER
   1030 	 *	    FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
   1031 	 *	    CLAIM.
   1032 	 *
   1033 	 *	(2) Otherwise, move the page to the empty page list.
   1034 	 *
   1035 	 * Either way, select a new current page (so we use a partially-full
   1036 	 * page if one is available).
   1037 	 */
   1038 	if (ph->ph_nmissing == 0) {
   1039 		pp->pr_nidle++;
   1040 		if (pp->pr_npages > pp->pr_minpages &&
   1041 		    pp->pr_npages > pp->pr_maxpages) {
   1042 			pr_rmpage(pp, ph, pq);
   1043 		} else {
   1044 			LIST_REMOVE(ph, ph_pagelist);
   1045 			LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
   1046 
   1047 			/*
   1048 			 * Update the timestamp on the page.  A page must
   1049 			 * be idle for some period of time before it can
   1050 			 * be reclaimed by the pagedaemon.  This minimizes
   1051 			 * ping-pong'ing for memory.
   1052 			 *
   1053 			 * note for 64-bit time_t: truncating to 32-bit is not
   1054 			 * a problem for our usage.
   1055 			 */
   1056 			ph->ph_time = time_uptime;
   1057 		}
   1058 		pool_update_curpage(pp);
   1059 	}
   1060 
   1061 	/*
   1062 	 * If the page was previously completely full, move it to the
   1063 	 * partially-full list and make it the current page.  The next
   1064 	 * allocation will get the item from this page, instead of
   1065 	 * further fragmenting the pool.
   1066 	 */
   1067 	else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
   1068 		LIST_REMOVE(ph, ph_pagelist);
   1069 		LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
   1070 		pp->pr_curpage = ph;
   1071 	}
   1072 }
   1073 
   1074 void
   1075 pool_put(struct pool *pp, void *v)
   1076 {
   1077 	struct pool_pagelist pq;
   1078 
   1079 	LIST_INIT(&pq);
   1080 
   1081 	mutex_enter(&pp->pr_lock);
   1082 	pool_do_put(pp, v, &pq);
   1083 	mutex_exit(&pp->pr_lock);
   1084 
   1085 	pr_pagelist_free(pp, &pq);
   1086 }
   1087 
   1088 /*
   1089  * pool_grow: grow a pool by a page.
   1090  *
   1091  * => called with pool locked.
   1092  * => unlock and relock the pool.
   1093  * => return with pool locked.
   1094  */
   1095 
   1096 static int
   1097 pool_grow(struct pool *pp, int flags)
   1098 {
   1099 	/*
   1100 	 * If there's a pool_grow in progress, wait for it to complete
   1101 	 * and try again from the top.
   1102 	 */
   1103 	if (pp->pr_flags & PR_GROWING) {
   1104 		if (flags & PR_WAITOK) {
   1105 			do {
   1106 				cv_wait(&pp->pr_cv, &pp->pr_lock);
   1107 			} while (pp->pr_flags & PR_GROWING);
   1108 			return ERESTART;
   1109 		} else {
   1110 			if (pp->pr_flags & PR_GROWINGNOWAIT) {
   1111 				/*
   1112 				 * This needs an unlock/relock dance so
   1113 				 * that the other caller has a chance to
   1114 				 * run and actually do the thing.  Note
   1115 				 * that this is effectively a busy-wait.
   1116 				 */
   1117 				mutex_exit(&pp->pr_lock);
   1118 				mutex_enter(&pp->pr_lock);
   1119 				return ERESTART;
   1120 			}
   1121 			return EWOULDBLOCK;
   1122 		}
   1123 	}
   1124 	pp->pr_flags |= PR_GROWING;
   1125 	if (flags & PR_WAITOK)
   1126 		mutex_exit(&pp->pr_lock);
   1127 	else
   1128 		pp->pr_flags |= PR_GROWINGNOWAIT;
   1129 
   1130 	char *cp = pool_allocator_alloc(pp, flags);
   1131 	if (__predict_false(cp == NULL))
   1132 		goto out;
   1133 
   1134 	struct pool_item_header *ph = pool_alloc_item_header(pp, cp, flags);
   1135 	if (__predict_false(ph == NULL)) {
   1136 		pool_allocator_free(pp, cp);
   1137 		goto out;
   1138 	}
   1139 
   1140 	if (flags & PR_WAITOK)
   1141 		mutex_enter(&pp->pr_lock);
   1142 	pool_prime_page(pp, cp, ph);
   1143 	pp->pr_npagealloc++;
   1144 	KASSERT(pp->pr_flags & PR_GROWING);
   1145 	pp->pr_flags &= ~(PR_GROWING|PR_GROWINGNOWAIT);
   1146 	/*
   1147 	 * If anyone was waiting for pool_grow, notify them that we
   1148 	 * may have just done it.
   1149 	 */
   1150 	cv_broadcast(&pp->pr_cv);
   1151 	return 0;
   1152 out:
   1153 	if (flags & PR_WAITOK)
   1154 		mutex_enter(&pp->pr_lock);
   1155 	KASSERT(pp->pr_flags & PR_GROWING);
   1156 	pp->pr_flags &= ~(PR_GROWING|PR_GROWINGNOWAIT);
   1157 	return ENOMEM;
   1158 }
   1159 
   1160 /*
   1161  * Add N items to the pool.
   1162  */
   1163 int
   1164 pool_prime(struct pool *pp, int n)
   1165 {
   1166 	int newpages;
   1167 	int error = 0;
   1168 
   1169 	mutex_enter(&pp->pr_lock);
   1170 
   1171 	newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1172 
   1173 	while (newpages > 0) {
   1174 		error = pool_grow(pp, PR_NOWAIT);
   1175 		if (error) {
   1176 			if (error == ERESTART)
   1177 				continue;
   1178 			break;
   1179 		}
   1180 		pp->pr_minpages++;
   1181 		newpages--;
   1182 	}
   1183 
   1184 	if (pp->pr_minpages >= pp->pr_maxpages)
   1185 		pp->pr_maxpages = pp->pr_minpages + 1;	/* XXX */
   1186 
   1187 	mutex_exit(&pp->pr_lock);
   1188 	return error;
   1189 }
   1190 
   1191 /*
   1192  * Add a page worth of items to the pool.
   1193  *
   1194  * Note, we must be called with the pool descriptor LOCKED.
   1195  */
   1196 static void
   1197 pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph)
   1198 {
   1199 	struct pool_item *pi;
   1200 	void *cp = storage;
   1201 	const unsigned int align = pp->pr_align;
   1202 	const unsigned int ioff = pp->pr_itemoffset;
   1203 	int n;
   1204 
   1205 	KASSERT(mutex_owned(&pp->pr_lock));
   1206 	KASSERTMSG(((pp->pr_roflags & PR_NOALIGN) ||
   1207 		(((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) == 0)),
   1208 	    "%s: [%s] unaligned page: %p", __func__, pp->pr_wchan, cp);
   1209 
   1210 	/*
   1211 	 * Insert page header.
   1212 	 */
   1213 	LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
   1214 	LIST_INIT(&ph->ph_itemlist);
   1215 	ph->ph_page = storage;
   1216 	ph->ph_nmissing = 0;
   1217 	ph->ph_time = time_uptime;
   1218 	if ((pp->pr_roflags & PR_PHINPAGE) == 0)
   1219 		SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
   1220 
   1221 	pp->pr_nidle++;
   1222 
   1223 	/*
   1224 	 * Color this page.
   1225 	 */
   1226 	ph->ph_off = pp->pr_curcolor;
   1227 	cp = (char *)cp + ph->ph_off;
   1228 	if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
   1229 		pp->pr_curcolor = 0;
   1230 
   1231 	/*
   1232 	 * Adjust storage to apply aligment to `pr_itemoffset' in each item.
   1233 	 */
   1234 	if (ioff != 0)
   1235 		cp = (char *)cp + align - ioff;
   1236 
   1237 	KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
   1238 
   1239 	/*
   1240 	 * Insert remaining chunks on the bucket list.
   1241 	 */
   1242 	n = pp->pr_itemsperpage;
   1243 	pp->pr_nitems += n;
   1244 
   1245 	if (pp->pr_roflags & PR_NOTOUCH) {
   1246 		pr_item_notouch_init(pp, ph);
   1247 	} else {
   1248 		while (n--) {
   1249 			pi = (struct pool_item *)cp;
   1250 
   1251 			KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
   1252 
   1253 			/* Insert on page list */
   1254 			LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
   1255 #ifdef POOL_CHECK_MAGIC
   1256 			pi->pi_magic = PI_MAGIC;
   1257 #endif
   1258 			cp = (char *)cp + pp->pr_size;
   1259 
   1260 			KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
   1261 		}
   1262 	}
   1263 
   1264 	/*
   1265 	 * If the pool was depleted, point at the new page.
   1266 	 */
   1267 	if (pp->pr_curpage == NULL)
   1268 		pp->pr_curpage = ph;
   1269 
   1270 	if (++pp->pr_npages > pp->pr_hiwat)
   1271 		pp->pr_hiwat = pp->pr_npages;
   1272 }
   1273 
   1274 /*
   1275  * Used by pool_get() when nitems drops below the low water mark.  This
   1276  * is used to catch up pr_nitems with the low water mark.
   1277  *
   1278  * Note 1, we never wait for memory here, we let the caller decide what to do.
   1279  *
   1280  * Note 2, we must be called with the pool already locked, and we return
   1281  * with it locked.
   1282  */
   1283 static int
   1284 pool_catchup(struct pool *pp)
   1285 {
   1286 	int error = 0;
   1287 
   1288 	while (POOL_NEEDS_CATCHUP(pp)) {
   1289 		error = pool_grow(pp, PR_NOWAIT);
   1290 		if (error) {
   1291 			if (error == ERESTART)
   1292 				continue;
   1293 			break;
   1294 		}
   1295 	}
   1296 	return error;
   1297 }
   1298 
   1299 static void
   1300 pool_update_curpage(struct pool *pp)
   1301 {
   1302 
   1303 	pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
   1304 	if (pp->pr_curpage == NULL) {
   1305 		pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
   1306 	}
   1307 	KASSERT((pp->pr_curpage == NULL && pp->pr_nitems == 0) ||
   1308 	    (pp->pr_curpage != NULL && pp->pr_nitems > 0));
   1309 }
   1310 
   1311 void
   1312 pool_setlowat(struct pool *pp, int n)
   1313 {
   1314 
   1315 	mutex_enter(&pp->pr_lock);
   1316 
   1317 	pp->pr_minitems = n;
   1318 	pp->pr_minpages = (n == 0)
   1319 		? 0
   1320 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1321 
   1322 	/* Make sure we're caught up with the newly-set low water mark. */
   1323 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
   1324 		/*
   1325 		 * XXX: Should we log a warning?  Should we set up a timeout
   1326 		 * to try again in a second or so?  The latter could break
   1327 		 * a caller's assumptions about interrupt protection, etc.
   1328 		 */
   1329 	}
   1330 
   1331 	mutex_exit(&pp->pr_lock);
   1332 }
   1333 
   1334 void
   1335 pool_sethiwat(struct pool *pp, int n)
   1336 {
   1337 
   1338 	mutex_enter(&pp->pr_lock);
   1339 
   1340 	pp->pr_maxpages = (n == 0)
   1341 		? 0
   1342 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1343 
   1344 	mutex_exit(&pp->pr_lock);
   1345 }
   1346 
   1347 void
   1348 pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
   1349 {
   1350 
   1351 	mutex_enter(&pp->pr_lock);
   1352 
   1353 	pp->pr_hardlimit = n;
   1354 	pp->pr_hardlimit_warning = warnmess;
   1355 	pp->pr_hardlimit_ratecap.tv_sec = ratecap;
   1356 	pp->pr_hardlimit_warning_last.tv_sec = 0;
   1357 	pp->pr_hardlimit_warning_last.tv_usec = 0;
   1358 
   1359 	/*
   1360 	 * In-line version of pool_sethiwat(), because we don't want to
   1361 	 * release the lock.
   1362 	 */
   1363 	pp->pr_maxpages = (n == 0)
   1364 		? 0
   1365 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1366 
   1367 	mutex_exit(&pp->pr_lock);
   1368 }
   1369 
   1370 /*
   1371  * Release all complete pages that have not been used recently.
   1372  *
   1373  * Must not be called from interrupt context.
   1374  */
   1375 int
   1376 pool_reclaim(struct pool *pp)
   1377 {
   1378 	struct pool_item_header *ph, *phnext;
   1379 	struct pool_pagelist pq;
   1380 	uint32_t curtime;
   1381 	bool klock;
   1382 	int rv;
   1383 
   1384 	KASSERT(!cpu_intr_p() && !cpu_softintr_p());
   1385 
   1386 	if (pp->pr_drain_hook != NULL) {
   1387 		/*
   1388 		 * The drain hook must be called with the pool unlocked.
   1389 		 */
   1390 		(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
   1391 	}
   1392 
   1393 	/*
   1394 	 * XXXSMP Because we do not want to cause non-MPSAFE code
   1395 	 * to block.
   1396 	 */
   1397 	if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK ||
   1398 	    pp->pr_ipl == IPL_SOFTSERIAL) {
   1399 		KERNEL_LOCK(1, NULL);
   1400 		klock = true;
   1401 	} else
   1402 		klock = false;
   1403 
   1404 	/* Reclaim items from the pool's cache (if any). */
   1405 	if (pp->pr_cache != NULL)
   1406 		pool_cache_invalidate(pp->pr_cache);
   1407 
   1408 	if (mutex_tryenter(&pp->pr_lock) == 0) {
   1409 		if (klock) {
   1410 			KERNEL_UNLOCK_ONE(NULL);
   1411 		}
   1412 		return (0);
   1413 	}
   1414 
   1415 	LIST_INIT(&pq);
   1416 
   1417 	curtime = time_uptime;
   1418 
   1419 	for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
   1420 		phnext = LIST_NEXT(ph, ph_pagelist);
   1421 
   1422 		/* Check our minimum page claim */
   1423 		if (pp->pr_npages <= pp->pr_minpages)
   1424 			break;
   1425 
   1426 		KASSERT(ph->ph_nmissing == 0);
   1427 		if (curtime - ph->ph_time < pool_inactive_time)
   1428 			continue;
   1429 
   1430 		/*
   1431 		 * If freeing this page would put us below
   1432 		 * the low water mark, stop now.
   1433 		 */
   1434 		if ((pp->pr_nitems - pp->pr_itemsperpage) <
   1435 		    pp->pr_minitems)
   1436 			break;
   1437 
   1438 		pr_rmpage(pp, ph, &pq);
   1439 	}
   1440 
   1441 	mutex_exit(&pp->pr_lock);
   1442 
   1443 	if (LIST_EMPTY(&pq))
   1444 		rv = 0;
   1445 	else {
   1446 		pr_pagelist_free(pp, &pq);
   1447 		rv = 1;
   1448 	}
   1449 
   1450 	if (klock) {
   1451 		KERNEL_UNLOCK_ONE(NULL);
   1452 	}
   1453 
   1454 	return (rv);
   1455 }
   1456 
   1457 /*
   1458  * Drain pools, one at a time. The drained pool is returned within ppp.
   1459  *
   1460  * Note, must never be called from interrupt context.
   1461  */
   1462 bool
   1463 pool_drain(struct pool **ppp)
   1464 {
   1465 	bool reclaimed;
   1466 	struct pool *pp;
   1467 
   1468 	KASSERT(!TAILQ_EMPTY(&pool_head));
   1469 
   1470 	pp = NULL;
   1471 
   1472 	/* Find next pool to drain, and add a reference. */
   1473 	mutex_enter(&pool_head_lock);
   1474 	do {
   1475 		if (drainpp == NULL) {
   1476 			drainpp = TAILQ_FIRST(&pool_head);
   1477 		}
   1478 		if (drainpp != NULL) {
   1479 			pp = drainpp;
   1480 			drainpp = TAILQ_NEXT(pp, pr_poollist);
   1481 		}
   1482 		/*
   1483 		 * Skip completely idle pools.  We depend on at least
   1484 		 * one pool in the system being active.
   1485 		 */
   1486 	} while (pp == NULL || pp->pr_npages == 0);
   1487 	pp->pr_refcnt++;
   1488 	mutex_exit(&pool_head_lock);
   1489 
   1490 	/* Drain the cache (if any) and pool.. */
   1491 	reclaimed = pool_reclaim(pp);
   1492 
   1493 	/* Finally, unlock the pool. */
   1494 	mutex_enter(&pool_head_lock);
   1495 	pp->pr_refcnt--;
   1496 	cv_broadcast(&pool_busy);
   1497 	mutex_exit(&pool_head_lock);
   1498 
   1499 	if (ppp != NULL)
   1500 		*ppp = pp;
   1501 
   1502 	return reclaimed;
   1503 }
   1504 
   1505 /*
   1506  * Calculate the total number of pages consumed by pools.
   1507  */
   1508 int
   1509 pool_totalpages(void)
   1510 {
   1511 	struct pool *pp;
   1512 	uint64_t total = 0;
   1513 
   1514 	mutex_enter(&pool_head_lock);
   1515 	TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
   1516 		uint64_t bytes = pp->pr_npages * pp->pr_alloc->pa_pagesz;
   1517 
   1518 		if ((pp->pr_roflags & PR_RECURSIVE) != 0)
   1519 			bytes -= (pp->pr_nout * pp->pr_size);
   1520 		total += bytes;
   1521 	}
   1522 	mutex_exit(&pool_head_lock);
   1523 
   1524 	return atop(total);
   1525 }
   1526 
   1527 /*
   1528  * Diagnostic helpers.
   1529  */
   1530 
   1531 void
   1532 pool_printall(const char *modif, void (*pr)(const char *, ...))
   1533 {
   1534 	struct pool *pp;
   1535 
   1536 	TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
   1537 		pool_printit(pp, modif, pr);
   1538 	}
   1539 }
   1540 
   1541 void
   1542 pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
   1543 {
   1544 
   1545 	if (pp == NULL) {
   1546 		(*pr)("Must specify a pool to print.\n");
   1547 		return;
   1548 	}
   1549 
   1550 	pool_print1(pp, modif, pr);
   1551 }
   1552 
   1553 static void
   1554 pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
   1555     void (*pr)(const char *, ...))
   1556 {
   1557 	struct pool_item_header *ph;
   1558 
   1559 	LIST_FOREACH(ph, pl, ph_pagelist) {
   1560 		(*pr)("\t\tpage %p, nmissing %d, time %" PRIu32 "\n",
   1561 		    ph->ph_page, ph->ph_nmissing, ph->ph_time);
   1562 #ifdef POOL_CHECK_MAGIC
   1563 		struct pool_item *pi;
   1564 		if (!(pp->pr_roflags & PR_NOTOUCH)) {
   1565 			LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
   1566 				if (pi->pi_magic != PI_MAGIC) {
   1567 					(*pr)("\t\t\titem %p, magic 0x%x\n",
   1568 					    pi, pi->pi_magic);
   1569 				}
   1570 			}
   1571 		}
   1572 #endif
   1573 	}
   1574 }
   1575 
   1576 static void
   1577 pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
   1578 {
   1579 	struct pool_item_header *ph;
   1580 	pool_cache_t pc;
   1581 	pcg_t *pcg;
   1582 	pool_cache_cpu_t *cc;
   1583 	uint64_t cpuhit, cpumiss;
   1584 	int i, print_log = 0, print_pagelist = 0, print_cache = 0;
   1585 	char c;
   1586 
   1587 	while ((c = *modif++) != '\0') {
   1588 		if (c == 'l')
   1589 			print_log = 1;
   1590 		if (c == 'p')
   1591 			print_pagelist = 1;
   1592 		if (c == 'c')
   1593 			print_cache = 1;
   1594 	}
   1595 
   1596 	if ((pc = pp->pr_cache) != NULL) {
   1597 		(*pr)("POOL CACHE");
   1598 	} else {
   1599 		(*pr)("POOL");
   1600 	}
   1601 
   1602 	(*pr)(" %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
   1603 	    pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
   1604 	    pp->pr_roflags);
   1605 	(*pr)("\talloc %p\n", pp->pr_alloc);
   1606 	(*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
   1607 	    pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
   1608 	(*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
   1609 	    pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
   1610 
   1611 	(*pr)("\tnget %lu, nfail %lu, nput %lu\n",
   1612 	    pp->pr_nget, pp->pr_nfail, pp->pr_nput);
   1613 	(*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
   1614 	    pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
   1615 
   1616 	if (print_pagelist == 0)
   1617 		goto skip_pagelist;
   1618 
   1619 	if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
   1620 		(*pr)("\n\tempty page list:\n");
   1621 	pool_print_pagelist(pp, &pp->pr_emptypages, pr);
   1622 	if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
   1623 		(*pr)("\n\tfull page list:\n");
   1624 	pool_print_pagelist(pp, &pp->pr_fullpages, pr);
   1625 	if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
   1626 		(*pr)("\n\tpartial-page list:\n");
   1627 	pool_print_pagelist(pp, &pp->pr_partpages, pr);
   1628 
   1629 	if (pp->pr_curpage == NULL)
   1630 		(*pr)("\tno current page\n");
   1631 	else
   1632 		(*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
   1633 
   1634  skip_pagelist:
   1635 	if (print_log == 0)
   1636 		goto skip_log;
   1637 
   1638 	(*pr)("\n");
   1639 
   1640  skip_log:
   1641 
   1642 #define PR_GROUPLIST(pcg)						\
   1643 	(*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);		\
   1644 	for (i = 0; i < pcg->pcg_size; i++) {				\
   1645 		if (pcg->pcg_objects[i].pcgo_pa !=			\
   1646 		    POOL_PADDR_INVALID) {				\
   1647 			(*pr)("\t\t\t%p, 0x%llx\n",			\
   1648 			    pcg->pcg_objects[i].pcgo_va,		\
   1649 			    (unsigned long long)			\
   1650 			    pcg->pcg_objects[i].pcgo_pa);		\
   1651 		} else {						\
   1652 			(*pr)("\t\t\t%p\n",				\
   1653 			    pcg->pcg_objects[i].pcgo_va);		\
   1654 		}							\
   1655 	}
   1656 
   1657 	if (pc != NULL) {
   1658 		cpuhit = 0;
   1659 		cpumiss = 0;
   1660 		for (i = 0; i < __arraycount(pc->pc_cpus); i++) {
   1661 			if ((cc = pc->pc_cpus[i]) == NULL)
   1662 				continue;
   1663 			cpuhit += cc->cc_hits;
   1664 			cpumiss += cc->cc_misses;
   1665 		}
   1666 		(*pr)("\tcpu layer hits %llu misses %llu\n", cpuhit, cpumiss);
   1667 		(*pr)("\tcache layer hits %llu misses %llu\n",
   1668 		    pc->pc_hits, pc->pc_misses);
   1669 		(*pr)("\tcache layer entry uncontended %llu contended %llu\n",
   1670 		    pc->pc_hits + pc->pc_misses - pc->pc_contended,
   1671 		    pc->pc_contended);
   1672 		(*pr)("\tcache layer empty groups %u full groups %u\n",
   1673 		    pc->pc_nempty, pc->pc_nfull);
   1674 		if (print_cache) {
   1675 			(*pr)("\tfull cache groups:\n");
   1676 			for (pcg = pc->pc_fullgroups; pcg != NULL;
   1677 			    pcg = pcg->pcg_next) {
   1678 				PR_GROUPLIST(pcg);
   1679 			}
   1680 			(*pr)("\tempty cache groups:\n");
   1681 			for (pcg = pc->pc_emptygroups; pcg != NULL;
   1682 			    pcg = pcg->pcg_next) {
   1683 				PR_GROUPLIST(pcg);
   1684 			}
   1685 		}
   1686 	}
   1687 #undef PR_GROUPLIST
   1688 }
   1689 
   1690 static int
   1691 pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
   1692 {
   1693 	struct pool_item *pi;
   1694 	void *page;
   1695 	int n;
   1696 
   1697 	if ((pp->pr_roflags & PR_NOALIGN) == 0) {
   1698 		page = (void *)((uintptr_t)ph & pp->pr_alloc->pa_pagemask);
   1699 		if (page != ph->ph_page &&
   1700 		    (pp->pr_roflags & PR_PHINPAGE) != 0) {
   1701 			if (label != NULL)
   1702 				printf("%s: ", label);
   1703 			printf("pool(%p:%s): page inconsistency: page %p;"
   1704 			       " at page head addr %p (p %p)\n", pp,
   1705 				pp->pr_wchan, ph->ph_page,
   1706 				ph, page);
   1707 			return 1;
   1708 		}
   1709 	}
   1710 
   1711 	if ((pp->pr_roflags & PR_NOTOUCH) != 0)
   1712 		return 0;
   1713 
   1714 	for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0;
   1715 	     pi != NULL;
   1716 	     pi = LIST_NEXT(pi,pi_list), n++) {
   1717 
   1718 #ifdef POOL_CHECK_MAGIC
   1719 		if (pi->pi_magic != PI_MAGIC) {
   1720 			if (label != NULL)
   1721 				printf("%s: ", label);
   1722 			printf("pool(%s): free list modified: magic=%x;"
   1723 			       " page %p; item ordinal %d; addr %p\n",
   1724 				pp->pr_wchan, pi->pi_magic, ph->ph_page,
   1725 				n, pi);
   1726 			panic("pool");
   1727 		}
   1728 #endif
   1729 		if ((pp->pr_roflags & PR_NOALIGN) != 0) {
   1730 			continue;
   1731 		}
   1732 		page = (void *)((uintptr_t)pi & pp->pr_alloc->pa_pagemask);
   1733 		if (page == ph->ph_page)
   1734 			continue;
   1735 
   1736 		if (label != NULL)
   1737 			printf("%s: ", label);
   1738 		printf("pool(%p:%s): page inconsistency: page %p;"
   1739 		       " item ordinal %d; addr %p (p %p)\n", pp,
   1740 			pp->pr_wchan, ph->ph_page,
   1741 			n, pi, page);
   1742 		return 1;
   1743 	}
   1744 	return 0;
   1745 }
   1746 
   1747 
   1748 int
   1749 pool_chk(struct pool *pp, const char *label)
   1750 {
   1751 	struct pool_item_header *ph;
   1752 	int r = 0;
   1753 
   1754 	mutex_enter(&pp->pr_lock);
   1755 	LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
   1756 		r = pool_chk_page(pp, label, ph);
   1757 		if (r) {
   1758 			goto out;
   1759 		}
   1760 	}
   1761 	LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
   1762 		r = pool_chk_page(pp, label, ph);
   1763 		if (r) {
   1764 			goto out;
   1765 		}
   1766 	}
   1767 	LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
   1768 		r = pool_chk_page(pp, label, ph);
   1769 		if (r) {
   1770 			goto out;
   1771 		}
   1772 	}
   1773 
   1774 out:
   1775 	mutex_exit(&pp->pr_lock);
   1776 	return (r);
   1777 }
   1778 
   1779 /*
   1780  * pool_cache_init:
   1781  *
   1782  *	Initialize a pool cache.
   1783  */
   1784 pool_cache_t
   1785 pool_cache_init(size_t size, u_int align, u_int align_offset, u_int flags,
   1786     const char *wchan, struct pool_allocator *palloc, int ipl,
   1787     int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), void *arg)
   1788 {
   1789 	pool_cache_t pc;
   1790 
   1791 	pc = pool_get(&cache_pool, PR_WAITOK);
   1792 	if (pc == NULL)
   1793 		return NULL;
   1794 
   1795 	pool_cache_bootstrap(pc, size, align, align_offset, flags, wchan,
   1796 	   palloc, ipl, ctor, dtor, arg);
   1797 
   1798 	return pc;
   1799 }
   1800 
   1801 /*
   1802  * pool_cache_bootstrap:
   1803  *
   1804  *	Kernel-private version of pool_cache_init().  The caller
   1805  *	provides initial storage.
   1806  */
   1807 void
   1808 pool_cache_bootstrap(pool_cache_t pc, size_t size, u_int align,
   1809     u_int align_offset, u_int flags, const char *wchan,
   1810     struct pool_allocator *palloc, int ipl,
   1811     int (*ctor)(void *, void *, int), void (*dtor)(void *, void *),
   1812     void *arg)
   1813 {
   1814 	CPU_INFO_ITERATOR cii;
   1815 	pool_cache_t pc1;
   1816 	struct cpu_info *ci;
   1817 	struct pool *pp;
   1818 
   1819 	pp = &pc->pc_pool;
   1820 	if (palloc == NULL && ipl == IPL_NONE) {
   1821 		if (size > PAGE_SIZE) {
   1822 			int bigidx = pool_bigidx(size);
   1823 
   1824 			palloc = &pool_allocator_big[bigidx];
   1825 		} else
   1826 			palloc = &pool_allocator_nointr;
   1827 	}
   1828 	pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl);
   1829 	mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl);
   1830 
   1831 	if (ctor == NULL) {
   1832 		ctor = (int (*)(void *, void *, int))nullop;
   1833 	}
   1834 	if (dtor == NULL) {
   1835 		dtor = (void (*)(void *, void *))nullop;
   1836 	}
   1837 
   1838 	pc->pc_emptygroups = NULL;
   1839 	pc->pc_fullgroups = NULL;
   1840 	pc->pc_partgroups = NULL;
   1841 	pc->pc_ctor = ctor;
   1842 	pc->pc_dtor = dtor;
   1843 	pc->pc_arg  = arg;
   1844 	pc->pc_hits  = 0;
   1845 	pc->pc_misses = 0;
   1846 	pc->pc_nempty = 0;
   1847 	pc->pc_npart = 0;
   1848 	pc->pc_nfull = 0;
   1849 	pc->pc_contended = 0;
   1850 	pc->pc_refcnt = 0;
   1851 	pc->pc_freecheck = NULL;
   1852 
   1853 	if ((flags & PR_LARGECACHE) != 0) {
   1854 		pc->pc_pcgsize = PCG_NOBJECTS_LARGE;
   1855 		pc->pc_pcgpool = &pcg_large_pool;
   1856 	} else {
   1857 		pc->pc_pcgsize = PCG_NOBJECTS_NORMAL;
   1858 		pc->pc_pcgpool = &pcg_normal_pool;
   1859 	}
   1860 
   1861 	/* Allocate per-CPU caches. */
   1862 	memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus));
   1863 	pc->pc_ncpu = 0;
   1864 	if (ncpu < 2) {
   1865 		/* XXX For sparc: boot CPU is not attached yet. */
   1866 		pool_cache_cpu_init1(curcpu(), pc);
   1867 	} else {
   1868 		for (CPU_INFO_FOREACH(cii, ci)) {
   1869 			pool_cache_cpu_init1(ci, pc);
   1870 		}
   1871 	}
   1872 
   1873 	/* Add to list of all pools. */
   1874 	if (__predict_true(!cold))
   1875 		mutex_enter(&pool_head_lock);
   1876 	TAILQ_FOREACH(pc1, &pool_cache_head, pc_cachelist) {
   1877 		if (strcmp(pc1->pc_pool.pr_wchan, pc->pc_pool.pr_wchan) > 0)
   1878 			break;
   1879 	}
   1880 	if (pc1 == NULL)
   1881 		TAILQ_INSERT_TAIL(&pool_cache_head, pc, pc_cachelist);
   1882 	else
   1883 		TAILQ_INSERT_BEFORE(pc1, pc, pc_cachelist);
   1884 	if (__predict_true(!cold))
   1885 		mutex_exit(&pool_head_lock);
   1886 
   1887 	membar_sync();
   1888 	pp->pr_cache = pc;
   1889 }
   1890 
   1891 /*
   1892  * pool_cache_destroy:
   1893  *
   1894  *	Destroy a pool cache.
   1895  */
   1896 void
   1897 pool_cache_destroy(pool_cache_t pc)
   1898 {
   1899 
   1900 	pool_cache_bootstrap_destroy(pc);
   1901 	pool_put(&cache_pool, pc);
   1902 }
   1903 
   1904 /*
   1905  * pool_cache_bootstrap_destroy:
   1906  *
   1907  *	Destroy a pool cache.
   1908  */
   1909 void
   1910 pool_cache_bootstrap_destroy(pool_cache_t pc)
   1911 {
   1912 	struct pool *pp = &pc->pc_pool;
   1913 	u_int i;
   1914 
   1915 	/* Remove it from the global list. */
   1916 	mutex_enter(&pool_head_lock);
   1917 	while (pc->pc_refcnt != 0)
   1918 		cv_wait(&pool_busy, &pool_head_lock);
   1919 	TAILQ_REMOVE(&pool_cache_head, pc, pc_cachelist);
   1920 	mutex_exit(&pool_head_lock);
   1921 
   1922 	/* First, invalidate the entire cache. */
   1923 	pool_cache_invalidate(pc);
   1924 
   1925 	/* Disassociate it from the pool. */
   1926 	mutex_enter(&pp->pr_lock);
   1927 	pp->pr_cache = NULL;
   1928 	mutex_exit(&pp->pr_lock);
   1929 
   1930 	/* Destroy per-CPU data */
   1931 	for (i = 0; i < __arraycount(pc->pc_cpus); i++)
   1932 		pool_cache_invalidate_cpu(pc, i);
   1933 
   1934 	/* Finally, destroy it. */
   1935 	mutex_destroy(&pc->pc_lock);
   1936 	pool_destroy(pp);
   1937 }
   1938 
   1939 /*
   1940  * pool_cache_cpu_init1:
   1941  *
   1942  *	Called for each pool_cache whenever a new CPU is attached.
   1943  */
   1944 static void
   1945 pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc)
   1946 {
   1947 	pool_cache_cpu_t *cc;
   1948 	int index;
   1949 
   1950 	index = ci->ci_index;
   1951 
   1952 	KASSERT(index < __arraycount(pc->pc_cpus));
   1953 
   1954 	if ((cc = pc->pc_cpus[index]) != NULL) {
   1955 		KASSERT(cc->cc_cpuindex == index);
   1956 		return;
   1957 	}
   1958 
   1959 	/*
   1960 	 * The first CPU is 'free'.  This needs to be the case for
   1961 	 * bootstrap - we may not be able to allocate yet.
   1962 	 */
   1963 	if (pc->pc_ncpu == 0) {
   1964 		cc = &pc->pc_cpu0;
   1965 		pc->pc_ncpu = 1;
   1966 	} else {
   1967 		mutex_enter(&pc->pc_lock);
   1968 		pc->pc_ncpu++;
   1969 		mutex_exit(&pc->pc_lock);
   1970 		cc = pool_get(&cache_cpu_pool, PR_WAITOK);
   1971 	}
   1972 
   1973 	cc->cc_ipl = pc->pc_pool.pr_ipl;
   1974 	cc->cc_iplcookie = makeiplcookie(cc->cc_ipl);
   1975 	cc->cc_cache = pc;
   1976 	cc->cc_cpuindex = index;
   1977 	cc->cc_hits = 0;
   1978 	cc->cc_misses = 0;
   1979 	cc->cc_current = __UNCONST(&pcg_dummy);
   1980 	cc->cc_previous = __UNCONST(&pcg_dummy);
   1981 
   1982 	pc->pc_cpus[index] = cc;
   1983 }
   1984 
   1985 /*
   1986  * pool_cache_cpu_init:
   1987  *
   1988  *	Called whenever a new CPU is attached.
   1989  */
   1990 void
   1991 pool_cache_cpu_init(struct cpu_info *ci)
   1992 {
   1993 	pool_cache_t pc;
   1994 
   1995 	mutex_enter(&pool_head_lock);
   1996 	TAILQ_FOREACH(pc, &pool_cache_head, pc_cachelist) {
   1997 		pc->pc_refcnt++;
   1998 		mutex_exit(&pool_head_lock);
   1999 
   2000 		pool_cache_cpu_init1(ci, pc);
   2001 
   2002 		mutex_enter(&pool_head_lock);
   2003 		pc->pc_refcnt--;
   2004 		cv_broadcast(&pool_busy);
   2005 	}
   2006 	mutex_exit(&pool_head_lock);
   2007 }
   2008 
   2009 /*
   2010  * pool_cache_reclaim:
   2011  *
   2012  *	Reclaim memory from a pool cache.
   2013  */
   2014 bool
   2015 pool_cache_reclaim(pool_cache_t pc)
   2016 {
   2017 
   2018 	return pool_reclaim(&pc->pc_pool);
   2019 }
   2020 
   2021 static void
   2022 pool_cache_destruct_object1(pool_cache_t pc, void *object)
   2023 {
   2024 	if (pc->pc_pool.pr_redzone) {
   2025 		/*
   2026 		 * The object is marked as invalid. Temporarily mark it as
   2027 		 * valid for the destructor. pool_put below will re-mark it
   2028 		 * as invalid.
   2029 		 */
   2030 		kasan_mark(object, pc->pc_pool.pr_reqsize,
   2031 		    pc->pc_pool.pr_reqsize_with_redzone);
   2032 	}
   2033 
   2034 	(*pc->pc_dtor)(pc->pc_arg, object);
   2035 	pool_put(&pc->pc_pool, object);
   2036 }
   2037 
   2038 /*
   2039  * pool_cache_destruct_object:
   2040  *
   2041  *	Force destruction of an object and its release back into
   2042  *	the pool.
   2043  */
   2044 void
   2045 pool_cache_destruct_object(pool_cache_t pc, void *object)
   2046 {
   2047 
   2048 	FREECHECK_IN(&pc->pc_freecheck, object);
   2049 
   2050 	pool_cache_destruct_object1(pc, object);
   2051 }
   2052 
   2053 /*
   2054  * pool_cache_invalidate_groups:
   2055  *
   2056  *	Invalidate a chain of groups and destruct all objects.
   2057  */
   2058 static void
   2059 pool_cache_invalidate_groups(pool_cache_t pc, pcg_t *pcg)
   2060 {
   2061 	void *object;
   2062 	pcg_t *next;
   2063 	int i;
   2064 
   2065 	for (; pcg != NULL; pcg = next) {
   2066 		next = pcg->pcg_next;
   2067 
   2068 		for (i = 0; i < pcg->pcg_avail; i++) {
   2069 			object = pcg->pcg_objects[i].pcgo_va;
   2070 			pool_cache_destruct_object1(pc, object);
   2071 		}
   2072 
   2073 		if (pcg->pcg_size == PCG_NOBJECTS_LARGE) {
   2074 			pool_put(&pcg_large_pool, pcg);
   2075 		} else {
   2076 			KASSERT(pcg->pcg_size == PCG_NOBJECTS_NORMAL);
   2077 			pool_put(&pcg_normal_pool, pcg);
   2078 		}
   2079 	}
   2080 }
   2081 
   2082 /*
   2083  * pool_cache_invalidate:
   2084  *
   2085  *	Invalidate a pool cache (destruct and release all of the
   2086  *	cached objects).  Does not reclaim objects from the pool.
   2087  *
   2088  *	Note: For pool caches that provide constructed objects, there
   2089  *	is an assumption that another level of synchronization is occurring
   2090  *	between the input to the constructor and the cache invalidation.
   2091  *
   2092  *	Invalidation is a costly process and should not be called from
   2093  *	interrupt context.
   2094  */
   2095 void
   2096 pool_cache_invalidate(pool_cache_t pc)
   2097 {
   2098 	uint64_t where;
   2099 	pcg_t *full, *empty, *part;
   2100 
   2101 	KASSERT(!cpu_intr_p() && !cpu_softintr_p());
   2102 
   2103 	if (ncpu < 2 || !mp_online) {
   2104 		/*
   2105 		 * We might be called early enough in the boot process
   2106 		 * for the CPU data structures to not be fully initialized.
   2107 		 * In this case, transfer the content of the local CPU's
   2108 		 * cache back into global cache as only this CPU is currently
   2109 		 * running.
   2110 		 */
   2111 		pool_cache_transfer(pc);
   2112 	} else {
   2113 		/*
   2114 		 * Signal all CPUs that they must transfer their local
   2115 		 * cache back to the global pool then wait for the xcall to
   2116 		 * complete.
   2117 		 */
   2118 		where = xc_broadcast(0, (xcfunc_t)pool_cache_transfer,
   2119 		    pc, NULL);
   2120 		xc_wait(where);
   2121 	}
   2122 
   2123 	/* Empty pool caches, then invalidate objects */
   2124 	mutex_enter(&pc->pc_lock);
   2125 	full = pc->pc_fullgroups;
   2126 	empty = pc->pc_emptygroups;
   2127 	part = pc->pc_partgroups;
   2128 	pc->pc_fullgroups = NULL;
   2129 	pc->pc_emptygroups = NULL;
   2130 	pc->pc_partgroups = NULL;
   2131 	pc->pc_nfull = 0;
   2132 	pc->pc_nempty = 0;
   2133 	pc->pc_npart = 0;
   2134 	mutex_exit(&pc->pc_lock);
   2135 
   2136 	pool_cache_invalidate_groups(pc, full);
   2137 	pool_cache_invalidate_groups(pc, empty);
   2138 	pool_cache_invalidate_groups(pc, part);
   2139 }
   2140 
   2141 /*
   2142  * pool_cache_invalidate_cpu:
   2143  *
   2144  *	Invalidate all CPU-bound cached objects in pool cache, the CPU being
   2145  *	identified by its associated index.
   2146  *	It is caller's responsibility to ensure that no operation is
   2147  *	taking place on this pool cache while doing this invalidation.
   2148  *	WARNING: as no inter-CPU locking is enforced, trying to invalidate
   2149  *	pool cached objects from a CPU different from the one currently running
   2150  *	may result in an undefined behaviour.
   2151  */
   2152 static void
   2153 pool_cache_invalidate_cpu(pool_cache_t pc, u_int index)
   2154 {
   2155 	pool_cache_cpu_t *cc;
   2156 	pcg_t *pcg;
   2157 
   2158 	if ((cc = pc->pc_cpus[index]) == NULL)
   2159 		return;
   2160 
   2161 	if ((pcg = cc->cc_current) != &pcg_dummy) {
   2162 		pcg->pcg_next = NULL;
   2163 		pool_cache_invalidate_groups(pc, pcg);
   2164 	}
   2165 	if ((pcg = cc->cc_previous) != &pcg_dummy) {
   2166 		pcg->pcg_next = NULL;
   2167 		pool_cache_invalidate_groups(pc, pcg);
   2168 	}
   2169 	if (cc != &pc->pc_cpu0)
   2170 		pool_put(&cache_cpu_pool, cc);
   2171 
   2172 }
   2173 
   2174 void
   2175 pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg)
   2176 {
   2177 
   2178 	pool_set_drain_hook(&pc->pc_pool, fn, arg);
   2179 }
   2180 
   2181 void
   2182 pool_cache_setlowat(pool_cache_t pc, int n)
   2183 {
   2184 
   2185 	pool_setlowat(&pc->pc_pool, n);
   2186 }
   2187 
   2188 void
   2189 pool_cache_sethiwat(pool_cache_t pc, int n)
   2190 {
   2191 
   2192 	pool_sethiwat(&pc->pc_pool, n);
   2193 }
   2194 
   2195 void
   2196 pool_cache_sethardlimit(pool_cache_t pc, int n, const char *warnmess, int ratecap)
   2197 {
   2198 
   2199 	pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap);
   2200 }
   2201 
   2202 static bool __noinline
   2203 pool_cache_get_slow(pool_cache_cpu_t *cc, int s, void **objectp,
   2204 		    paddr_t *pap, int flags)
   2205 {
   2206 	pcg_t *pcg, *cur;
   2207 	uint64_t ncsw;
   2208 	pool_cache_t pc;
   2209 	void *object;
   2210 
   2211 	KASSERT(cc->cc_current->pcg_avail == 0);
   2212 	KASSERT(cc->cc_previous->pcg_avail == 0);
   2213 
   2214 	pc = cc->cc_cache;
   2215 	cc->cc_misses++;
   2216 
   2217 	/*
   2218 	 * Nothing was available locally.  Try and grab a group
   2219 	 * from the cache.
   2220 	 */
   2221 	if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {
   2222 		ncsw = curlwp->l_ncsw;
   2223 		mutex_enter(&pc->pc_lock);
   2224 		pc->pc_contended++;
   2225 
   2226 		/*
   2227 		 * If we context switched while locking, then
   2228 		 * our view of the per-CPU data is invalid:
   2229 		 * retry.
   2230 		 */
   2231 		if (curlwp->l_ncsw != ncsw) {
   2232 			mutex_exit(&pc->pc_lock);
   2233 			return true;
   2234 		}
   2235 	}
   2236 
   2237 	if (__predict_true((pcg = pc->pc_fullgroups) != NULL)) {
   2238 		/*
   2239 		 * If there's a full group, release our empty
   2240 		 * group back to the cache.  Install the full
   2241 		 * group as cc_current and return.
   2242 		 */
   2243 		if (__predict_true((cur = cc->cc_current) != &pcg_dummy)) {
   2244 			KASSERT(cur->pcg_avail == 0);
   2245 			cur->pcg_next = pc->pc_emptygroups;
   2246 			pc->pc_emptygroups = cur;
   2247 			pc->pc_nempty++;
   2248 		}
   2249 		KASSERT(pcg->pcg_avail == pcg->pcg_size);
   2250 		cc->cc_current = pcg;
   2251 		pc->pc_fullgroups = pcg->pcg_next;
   2252 		pc->pc_hits++;
   2253 		pc->pc_nfull--;
   2254 		mutex_exit(&pc->pc_lock);
   2255 		return true;
   2256 	}
   2257 
   2258 	/*
   2259 	 * Nothing available locally or in cache.  Take the slow
   2260 	 * path: fetch a new object from the pool and construct
   2261 	 * it.
   2262 	 */
   2263 	pc->pc_misses++;
   2264 	mutex_exit(&pc->pc_lock);
   2265 	splx(s);
   2266 
   2267 	object = pool_get(&pc->pc_pool, flags);
   2268 	*objectp = object;
   2269 	if (__predict_false(object == NULL)) {
   2270 		KASSERT((flags & (PR_WAITOK|PR_NOWAIT)) == PR_NOWAIT);
   2271 		return false;
   2272 	}
   2273 
   2274 	if (__predict_false((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0)) {
   2275 		pool_put(&pc->pc_pool, object);
   2276 		*objectp = NULL;
   2277 		return false;
   2278 	}
   2279 
   2280 	KASSERT((((vaddr_t)object + pc->pc_pool.pr_itemoffset) &
   2281 	    (pc->pc_pool.pr_align - 1)) == 0);
   2282 
   2283 	if (pap != NULL) {
   2284 #ifdef POOL_VTOPHYS
   2285 		*pap = POOL_VTOPHYS(object);
   2286 #else
   2287 		*pap = POOL_PADDR_INVALID;
   2288 #endif
   2289 	}
   2290 
   2291 	FREECHECK_OUT(&pc->pc_freecheck, object);
   2292 	pool_redzone_fill(&pc->pc_pool, object);
   2293 	pool_cache_kleak_fill(pc, object);
   2294 	return false;
   2295 }
   2296 
   2297 /*
   2298  * pool_cache_get{,_paddr}:
   2299  *
   2300  *	Get an object from a pool cache (optionally returning
   2301  *	the physical address of the object).
   2302  */
   2303 void *
   2304 pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap)
   2305 {
   2306 	pool_cache_cpu_t *cc;
   2307 	pcg_t *pcg;
   2308 	void *object;
   2309 	int s;
   2310 
   2311 	KASSERT(!(flags & PR_NOWAIT) != !(flags & PR_WAITOK));
   2312 	KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()) ||
   2313 	    (pc->pc_pool.pr_ipl != IPL_NONE || cold || panicstr != NULL),
   2314 	    "%s: [%s] is IPL_NONE, but called from interrupt context",
   2315 	    __func__, pc->pc_pool.pr_wchan);
   2316 
   2317 	if (flags & PR_WAITOK) {
   2318 		ASSERT_SLEEPABLE();
   2319 	}
   2320 
   2321 	/* Lock out interrupts and disable preemption. */
   2322 	s = splvm();
   2323 	while (/* CONSTCOND */ true) {
   2324 		/* Try and allocate an object from the current group. */
   2325 		cc = pc->pc_cpus[curcpu()->ci_index];
   2326 		KASSERT(cc->cc_cache == pc);
   2327 	 	pcg = cc->cc_current;
   2328 		if (__predict_true(pcg->pcg_avail > 0)) {
   2329 			object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va;
   2330 			if (__predict_false(pap != NULL))
   2331 				*pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa;
   2332 #if defined(DIAGNOSTIC)
   2333 			pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL;
   2334 			KASSERT(pcg->pcg_avail < pcg->pcg_size);
   2335 			KASSERT(object != NULL);
   2336 #endif
   2337 			cc->cc_hits++;
   2338 			splx(s);
   2339 			FREECHECK_OUT(&pc->pc_freecheck, object);
   2340 			pool_redzone_fill(&pc->pc_pool, object);
   2341 			pool_cache_kleak_fill(pc, object);
   2342 			return object;
   2343 		}
   2344 
   2345 		/*
   2346 		 * That failed.  If the previous group isn't empty, swap
   2347 		 * it with the current group and allocate from there.
   2348 		 */
   2349 		pcg = cc->cc_previous;
   2350 		if (__predict_true(pcg->pcg_avail > 0)) {
   2351 			cc->cc_previous = cc->cc_current;
   2352 			cc->cc_current = pcg;
   2353 			continue;
   2354 		}
   2355 
   2356 		/*
   2357 		 * Can't allocate from either group: try the slow path.
   2358 		 * If get_slow() allocated an object for us, or if
   2359 		 * no more objects are available, it will return false.
   2360 		 * Otherwise, we need to retry.
   2361 		 */
   2362 		if (!pool_cache_get_slow(cc, s, &object, pap, flags))
   2363 			break;
   2364 	}
   2365 
   2366 	/*
   2367 	 * We would like to KASSERT(object || (flags & PR_NOWAIT)), but
   2368 	 * pool_cache_get can fail even in the PR_WAITOK case, if the
   2369 	 * constructor fails.
   2370 	 */
   2371 	return object;
   2372 }
   2373 
   2374 static bool __noinline
   2375 pool_cache_put_slow(pool_cache_cpu_t *cc, int s, void *object)
   2376 {
   2377 	struct lwp *l = curlwp;
   2378 	pcg_t *pcg, *cur;
   2379 	uint64_t ncsw;
   2380 	pool_cache_t pc;
   2381 
   2382 	KASSERT(cc->cc_current->pcg_avail == cc->cc_current->pcg_size);
   2383 	KASSERT(cc->cc_previous->pcg_avail == cc->cc_previous->pcg_size);
   2384 
   2385 	pc = cc->cc_cache;
   2386 	pcg = NULL;
   2387 	cc->cc_misses++;
   2388 	ncsw = l->l_ncsw;
   2389 
   2390 	/*
   2391 	 * If there are no empty groups in the cache then allocate one
   2392 	 * while still unlocked.
   2393 	 */
   2394 	if (__predict_false(pc->pc_emptygroups == NULL)) {
   2395 		if (__predict_true(!pool_cache_disable)) {
   2396 			pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT);
   2397 		}
   2398 		/*
   2399 		 * If pool_get() blocked, then our view of
   2400 		 * the per-CPU data is invalid: retry.
   2401 		 */
   2402 		if (__predict_false(l->l_ncsw != ncsw)) {
   2403 			if (pcg != NULL) {
   2404 				pool_put(pc->pc_pcgpool, pcg);
   2405 			}
   2406 			return true;
   2407 		}
   2408 		if (__predict_true(pcg != NULL)) {
   2409 			pcg->pcg_avail = 0;
   2410 			pcg->pcg_size = pc->pc_pcgsize;
   2411 		}
   2412 	}
   2413 
   2414 	/* Lock the cache. */
   2415 	if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {
   2416 		mutex_enter(&pc->pc_lock);
   2417 		pc->pc_contended++;
   2418 
   2419 		/*
   2420 		 * If we context switched while locking, then our view of
   2421 		 * the per-CPU data is invalid: retry.
   2422 		 */
   2423 		if (__predict_false(l->l_ncsw != ncsw)) {
   2424 			mutex_exit(&pc->pc_lock);
   2425 			if (pcg != NULL) {
   2426 				pool_put(pc->pc_pcgpool, pcg);
   2427 			}
   2428 			return true;
   2429 		}
   2430 	}
   2431 
   2432 	/* If there are no empty groups in the cache then allocate one. */
   2433 	if (pcg == NULL && pc->pc_emptygroups != NULL) {
   2434 		pcg = pc->pc_emptygroups;
   2435 		pc->pc_emptygroups = pcg->pcg_next;
   2436 		pc->pc_nempty--;
   2437 	}
   2438 
   2439 	/*
   2440 	 * If there's a empty group, release our full group back
   2441 	 * to the cache.  Install the empty group to the local CPU
   2442 	 * and return.
   2443 	 */
   2444 	if (pcg != NULL) {
   2445 		KASSERT(pcg->pcg_avail == 0);
   2446 		if (__predict_false(cc->cc_previous == &pcg_dummy)) {
   2447 			cc->cc_previous = pcg;
   2448 		} else {
   2449 			cur = cc->cc_current;
   2450 			if (__predict_true(cur != &pcg_dummy)) {
   2451 				KASSERT(cur->pcg_avail == cur->pcg_size);
   2452 				cur->pcg_next = pc->pc_fullgroups;
   2453 				pc->pc_fullgroups = cur;
   2454 				pc->pc_nfull++;
   2455 			}
   2456 			cc->cc_current = pcg;
   2457 		}
   2458 		pc->pc_hits++;
   2459 		mutex_exit(&pc->pc_lock);
   2460 		return true;
   2461 	}
   2462 
   2463 	/*
   2464 	 * Nothing available locally or in cache, and we didn't
   2465 	 * allocate an empty group.  Take the slow path and destroy
   2466 	 * the object here and now.
   2467 	 */
   2468 	pc->pc_misses++;
   2469 	mutex_exit(&pc->pc_lock);
   2470 	splx(s);
   2471 	pool_cache_destruct_object(pc, object);
   2472 
   2473 	return false;
   2474 }
   2475 
   2476 /*
   2477  * pool_cache_put{,_paddr}:
   2478  *
   2479  *	Put an object back to the pool cache (optionally caching the
   2480  *	physical address of the object).
   2481  */
   2482 void
   2483 pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa)
   2484 {
   2485 	pool_cache_cpu_t *cc;
   2486 	pcg_t *pcg;
   2487 	int s;
   2488 
   2489 	KASSERT(object != NULL);
   2490 	pool_cache_redzone_check(pc, object);
   2491 	FREECHECK_IN(&pc->pc_freecheck, object);
   2492 
   2493 	/* Lock out interrupts and disable preemption. */
   2494 	s = splvm();
   2495 	while (/* CONSTCOND */ true) {
   2496 		/* If the current group isn't full, release it there. */
   2497 		cc = pc->pc_cpus[curcpu()->ci_index];
   2498 		KASSERT(cc->cc_cache == pc);
   2499 	 	pcg = cc->cc_current;
   2500 		if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) {
   2501 			pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object;
   2502 			pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa;
   2503 			pcg->pcg_avail++;
   2504 			cc->cc_hits++;
   2505 			splx(s);
   2506 			return;
   2507 		}
   2508 
   2509 		/*
   2510 		 * That failed.  If the previous group isn't full, swap
   2511 		 * it with the current group and try again.
   2512 		 */
   2513 		pcg = cc->cc_previous;
   2514 		if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) {
   2515 			cc->cc_previous = cc->cc_current;
   2516 			cc->cc_current = pcg;
   2517 			continue;
   2518 		}
   2519 
   2520 		/*
   2521 		 * Can't free to either group: try the slow path.
   2522 		 * If put_slow() releases the object for us, it
   2523 		 * will return false.  Otherwise we need to retry.
   2524 		 */
   2525 		if (!pool_cache_put_slow(cc, s, object))
   2526 			break;
   2527 	}
   2528 }
   2529 
   2530 /*
   2531  * pool_cache_transfer:
   2532  *
   2533  *	Transfer objects from the per-CPU cache to the global cache.
   2534  *	Run within a cross-call thread.
   2535  */
   2536 static void
   2537 pool_cache_transfer(pool_cache_t pc)
   2538 {
   2539 	pool_cache_cpu_t *cc;
   2540 	pcg_t *prev, *cur, **list;
   2541 	int s;
   2542 
   2543 	s = splvm();
   2544 	mutex_enter(&pc->pc_lock);
   2545 	cc = pc->pc_cpus[curcpu()->ci_index];
   2546 	cur = cc->cc_current;
   2547 	cc->cc_current = __UNCONST(&pcg_dummy);
   2548 	prev = cc->cc_previous;
   2549 	cc->cc_previous = __UNCONST(&pcg_dummy);
   2550 	if (cur != &pcg_dummy) {
   2551 		if (cur->pcg_avail == cur->pcg_size) {
   2552 			list = &pc->pc_fullgroups;
   2553 			pc->pc_nfull++;
   2554 		} else if (cur->pcg_avail == 0) {
   2555 			list = &pc->pc_emptygroups;
   2556 			pc->pc_nempty++;
   2557 		} else {
   2558 			list = &pc->pc_partgroups;
   2559 			pc->pc_npart++;
   2560 		}
   2561 		cur->pcg_next = *list;
   2562 		*list = cur;
   2563 	}
   2564 	if (prev != &pcg_dummy) {
   2565 		if (prev->pcg_avail == prev->pcg_size) {
   2566 			list = &pc->pc_fullgroups;
   2567 			pc->pc_nfull++;
   2568 		} else if (prev->pcg_avail == 0) {
   2569 			list = &pc->pc_emptygroups;
   2570 			pc->pc_nempty++;
   2571 		} else {
   2572 			list = &pc->pc_partgroups;
   2573 			pc->pc_npart++;
   2574 		}
   2575 		prev->pcg_next = *list;
   2576 		*list = prev;
   2577 	}
   2578 	mutex_exit(&pc->pc_lock);
   2579 	splx(s);
   2580 }
   2581 
   2582 /*
   2583  * Pool backend allocators.
   2584  *
   2585  * Each pool has a backend allocator that handles allocation, deallocation,
   2586  * and any additional draining that might be needed.
   2587  *
   2588  * We provide two standard allocators:
   2589  *
   2590  *	pool_allocator_kmem - the default when no allocator is specified
   2591  *
   2592  *	pool_allocator_nointr - used for pools that will not be accessed
   2593  *	in interrupt context.
   2594  */
   2595 void	*pool_page_alloc(struct pool *, int);
   2596 void	pool_page_free(struct pool *, void *);
   2597 
   2598 #ifdef POOL_SUBPAGE
   2599 struct pool_allocator pool_allocator_kmem_fullpage = {
   2600 	.pa_alloc = pool_page_alloc,
   2601 	.pa_free = pool_page_free,
   2602 	.pa_pagesz = 0
   2603 };
   2604 #else
   2605 struct pool_allocator pool_allocator_kmem = {
   2606 	.pa_alloc = pool_page_alloc,
   2607 	.pa_free = pool_page_free,
   2608 	.pa_pagesz = 0
   2609 };
   2610 #endif
   2611 
   2612 #ifdef POOL_SUBPAGE
   2613 struct pool_allocator pool_allocator_nointr_fullpage = {
   2614 	.pa_alloc = pool_page_alloc,
   2615 	.pa_free = pool_page_free,
   2616 	.pa_pagesz = 0
   2617 };
   2618 #else
   2619 struct pool_allocator pool_allocator_nointr = {
   2620 	.pa_alloc = pool_page_alloc,
   2621 	.pa_free = pool_page_free,
   2622 	.pa_pagesz = 0
   2623 };
   2624 #endif
   2625 
   2626 #ifdef POOL_SUBPAGE
   2627 void	*pool_subpage_alloc(struct pool *, int);
   2628 void	pool_subpage_free(struct pool *, void *);
   2629 
   2630 struct pool_allocator pool_allocator_kmem = {
   2631 	.pa_alloc = pool_subpage_alloc,
   2632 	.pa_free = pool_subpage_free,
   2633 	.pa_pagesz = POOL_SUBPAGE
   2634 };
   2635 
   2636 struct pool_allocator pool_allocator_nointr = {
   2637 	.pa_alloc = pool_subpage_alloc,
   2638 	.pa_free = pool_subpage_free,
   2639 	.pa_pagesz = POOL_SUBPAGE
   2640 };
   2641 #endif /* POOL_SUBPAGE */
   2642 
   2643 struct pool_allocator pool_allocator_big[] = {
   2644 	{
   2645 		.pa_alloc = pool_page_alloc,
   2646 		.pa_free = pool_page_free,
   2647 		.pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 0),
   2648 	},
   2649 	{
   2650 		.pa_alloc = pool_page_alloc,
   2651 		.pa_free = pool_page_free,
   2652 		.pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 1),
   2653 	},
   2654 	{
   2655 		.pa_alloc = pool_page_alloc,
   2656 		.pa_free = pool_page_free,
   2657 		.pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 2),
   2658 	},
   2659 	{
   2660 		.pa_alloc = pool_page_alloc,
   2661 		.pa_free = pool_page_free,
   2662 		.pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 3),
   2663 	},
   2664 	{
   2665 		.pa_alloc = pool_page_alloc,
   2666 		.pa_free = pool_page_free,
   2667 		.pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 4),
   2668 	},
   2669 	{
   2670 		.pa_alloc = pool_page_alloc,
   2671 		.pa_free = pool_page_free,
   2672 		.pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 5),
   2673 	},
   2674 	{
   2675 		.pa_alloc = pool_page_alloc,
   2676 		.pa_free = pool_page_free,
   2677 		.pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 6),
   2678 	},
   2679 	{
   2680 		.pa_alloc = pool_page_alloc,
   2681 		.pa_free = pool_page_free,
   2682 		.pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 7),
   2683 	}
   2684 };
   2685 
   2686 static int
   2687 pool_bigidx(size_t size)
   2688 {
   2689 	int i;
   2690 
   2691 	for (i = 0; i < __arraycount(pool_allocator_big); i++) {
   2692 		if (1 << (i + POOL_ALLOCATOR_BIG_BASE) >= size)
   2693 			return i;
   2694 	}
   2695 	panic("pool item size %zu too large, use a custom allocator", size);
   2696 }
   2697 
   2698 static void *
   2699 pool_allocator_alloc(struct pool *pp, int flags)
   2700 {
   2701 	struct pool_allocator *pa = pp->pr_alloc;
   2702 	void *res;
   2703 
   2704 	res = (*pa->pa_alloc)(pp, flags);
   2705 	if (res == NULL && (flags & PR_WAITOK) == 0) {
   2706 		/*
   2707 		 * We only run the drain hook here if PR_NOWAIT.
   2708 		 * In other cases, the hook will be run in
   2709 		 * pool_reclaim().
   2710 		 */
   2711 		if (pp->pr_drain_hook != NULL) {
   2712 			(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
   2713 			res = (*pa->pa_alloc)(pp, flags);
   2714 		}
   2715 	}
   2716 	return res;
   2717 }
   2718 
   2719 static void
   2720 pool_allocator_free(struct pool *pp, void *v)
   2721 {
   2722 	struct pool_allocator *pa = pp->pr_alloc;
   2723 
   2724 	if (pp->pr_redzone) {
   2725 		kasan_mark(v, pa->pa_pagesz, pa->pa_pagesz);
   2726 	}
   2727 	(*pa->pa_free)(pp, v);
   2728 }
   2729 
   2730 void *
   2731 pool_page_alloc(struct pool *pp, int flags)
   2732 {
   2733 	const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
   2734 	vmem_addr_t va;
   2735 	int ret;
   2736 
   2737 	ret = uvm_km_kmem_alloc(kmem_va_arena, pp->pr_alloc->pa_pagesz,
   2738 	    vflags | VM_INSTANTFIT, &va);
   2739 
   2740 	return ret ? NULL : (void *)va;
   2741 }
   2742 
   2743 void
   2744 pool_page_free(struct pool *pp, void *v)
   2745 {
   2746 
   2747 	uvm_km_kmem_free(kmem_va_arena, (vaddr_t)v, pp->pr_alloc->pa_pagesz);
   2748 }
   2749 
   2750 static void *
   2751 pool_page_alloc_meta(struct pool *pp, int flags)
   2752 {
   2753 	const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
   2754 	vmem_addr_t va;
   2755 	int ret;
   2756 
   2757 	ret = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz,
   2758 	    vflags | VM_INSTANTFIT, &va);
   2759 
   2760 	return ret ? NULL : (void *)va;
   2761 }
   2762 
   2763 static void
   2764 pool_page_free_meta(struct pool *pp, void *v)
   2765 {
   2766 
   2767 	vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz);
   2768 }
   2769 
   2770 #ifdef KLEAK
   2771 static void
   2772 pool_kleak_fill(struct pool *pp, void *p)
   2773 {
   2774 	if (__predict_false(pp->pr_roflags & PR_NOTOUCH)) {
   2775 		return;
   2776 	}
   2777 	kleak_fill_area(p, pp->pr_size);
   2778 }
   2779 
   2780 static void
   2781 pool_cache_kleak_fill(pool_cache_t pc, void *p)
   2782 {
   2783 	if (__predict_false(pc_has_ctor(pc) || pc_has_dtor(pc))) {
   2784 		return;
   2785 	}
   2786 	pool_kleak_fill(&pc->pc_pool, p);
   2787 }
   2788 #endif
   2789 
   2790 #ifdef POOL_REDZONE
   2791 #if defined(_LP64)
   2792 # define PRIME 0x9e37fffffffc0000UL
   2793 #else /* defined(_LP64) */
   2794 # define PRIME 0x9e3779b1
   2795 #endif /* defined(_LP64) */
   2796 #define STATIC_BYTE	0xFE
   2797 CTASSERT(POOL_REDZONE_SIZE > 1);
   2798 
   2799 #ifndef KASAN
   2800 static inline uint8_t
   2801 pool_pattern_generate(const void *p)
   2802 {
   2803 	return (uint8_t)(((uintptr_t)p) * PRIME
   2804 	   >> ((sizeof(uintptr_t) - sizeof(uint8_t))) * CHAR_BIT);
   2805 }
   2806 #endif
   2807 
   2808 static void
   2809 pool_redzone_init(struct pool *pp, size_t requested_size)
   2810 {
   2811 	size_t redzsz;
   2812 	size_t nsz;
   2813 
   2814 #ifdef KASAN
   2815 	redzsz = requested_size;
   2816 	kasan_add_redzone(&redzsz);
   2817 	redzsz -= requested_size;
   2818 #else
   2819 	redzsz = POOL_REDZONE_SIZE;
   2820 #endif
   2821 
   2822 	if (pp->pr_roflags & PR_NOTOUCH) {
   2823 		pp->pr_reqsize = 0;
   2824 		pp->pr_redzone = false;
   2825 		return;
   2826 	}
   2827 
   2828 	/*
   2829 	 * We may have extended the requested size earlier; check if
   2830 	 * there's naturally space in the padding for a red zone.
   2831 	 */
   2832 	if (pp->pr_size - requested_size >= redzsz) {
   2833 		pp->pr_reqsize = requested_size;
   2834 		pp->pr_reqsize_with_redzone = requested_size + redzsz;
   2835 		pp->pr_redzone = true;
   2836 		return;
   2837 	}
   2838 
   2839 	/*
   2840 	 * No space in the natural padding; check if we can extend a
   2841 	 * bit the size of the pool.
   2842 	 */
   2843 	nsz = roundup(pp->pr_size + redzsz, pp->pr_align);
   2844 	if (nsz <= pp->pr_alloc->pa_pagesz) {
   2845 		/* Ok, we can */
   2846 		pp->pr_size = nsz;
   2847 		pp->pr_reqsize = requested_size;
   2848 		pp->pr_reqsize_with_redzone = requested_size + redzsz;
   2849 		pp->pr_redzone = true;
   2850 	} else {
   2851 		/* No space for a red zone... snif :'( */
   2852 		pp->pr_reqsize = 0;
   2853 		pp->pr_redzone = false;
   2854 		printf("pool redzone disabled for '%s'\n", pp->pr_wchan);
   2855 	}
   2856 }
   2857 
   2858 static void
   2859 pool_redzone_fill(struct pool *pp, void *p)
   2860 {
   2861 	if (!pp->pr_redzone)
   2862 		return;
   2863 #ifdef KASAN
   2864 	kasan_mark(p, pp->pr_reqsize, pp->pr_reqsize_with_redzone);
   2865 #else
   2866 	uint8_t *cp, pat;
   2867 	const uint8_t *ep;
   2868 
   2869 	cp = (uint8_t *)p + pp->pr_reqsize;
   2870 	ep = cp + POOL_REDZONE_SIZE;
   2871 
   2872 	/*
   2873 	 * We really don't want the first byte of the red zone to be '\0';
   2874 	 * an off-by-one in a string may not be properly detected.
   2875 	 */
   2876 	pat = pool_pattern_generate(cp);
   2877 	*cp = (pat == '\0') ? STATIC_BYTE: pat;
   2878 	cp++;
   2879 
   2880 	while (cp < ep) {
   2881 		*cp = pool_pattern_generate(cp);
   2882 		cp++;
   2883 	}
   2884 #endif
   2885 }
   2886 
   2887 static void
   2888 pool_redzone_check(struct pool *pp, void *p)
   2889 {
   2890 	if (!pp->pr_redzone)
   2891 		return;
   2892 #ifdef KASAN
   2893 	kasan_mark(p, 0, pp->pr_reqsize_with_redzone);
   2894 #else
   2895 	uint8_t *cp, pat, expected;
   2896 	const uint8_t *ep;
   2897 
   2898 	cp = (uint8_t *)p + pp->pr_reqsize;
   2899 	ep = cp + POOL_REDZONE_SIZE;
   2900 
   2901 	pat = pool_pattern_generate(cp);
   2902 	expected = (pat == '\0') ? STATIC_BYTE: pat;
   2903 	if (__predict_false(expected != *cp)) {
   2904 		printf("%s: %p: 0x%02x != 0x%02x\n",
   2905 		   __func__, cp, *cp, expected);
   2906 	}
   2907 	cp++;
   2908 
   2909 	while (cp < ep) {
   2910 		expected = pool_pattern_generate(cp);
   2911 		if (__predict_false(*cp != expected)) {
   2912 			printf("%s: %p: 0x%02x != 0x%02x\n",
   2913 			   __func__, cp, *cp, expected);
   2914 		}
   2915 		cp++;
   2916 	}
   2917 #endif
   2918 }
   2919 
   2920 static void
   2921 pool_cache_redzone_check(pool_cache_t pc, void *p)
   2922 {
   2923 #ifdef KASAN
   2924 	/* If there is a ctor/dtor, leave the data as valid. */
   2925 	if (__predict_false(pc_has_ctor(pc) || pc_has_dtor(pc))) {
   2926 		return;
   2927 	}
   2928 #endif
   2929 	pool_redzone_check(&pc->pc_pool, p);
   2930 }
   2931 
   2932 #endif /* POOL_REDZONE */
   2933 
   2934 
   2935 #ifdef POOL_SUBPAGE
   2936 /* Sub-page allocator, for machines with large hardware pages. */
   2937 void *
   2938 pool_subpage_alloc(struct pool *pp, int flags)
   2939 {
   2940 	return pool_get(&psppool, flags);
   2941 }
   2942 
   2943 void
   2944 pool_subpage_free(struct pool *pp, void *v)
   2945 {
   2946 	pool_put(&psppool, v);
   2947 }
   2948 
   2949 #endif /* POOL_SUBPAGE */
   2950 
   2951 #if defined(DDB)
   2952 static bool
   2953 pool_in_page(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
   2954 {
   2955 
   2956 	return (uintptr_t)ph->ph_page <= addr &&
   2957 	    addr < (uintptr_t)ph->ph_page + pp->pr_alloc->pa_pagesz;
   2958 }
   2959 
   2960 static bool
   2961 pool_in_item(struct pool *pp, void *item, uintptr_t addr)
   2962 {
   2963 
   2964 	return (uintptr_t)item <= addr && addr < (uintptr_t)item + pp->pr_size;
   2965 }
   2966 
   2967 static bool
   2968 pool_in_cg(struct pool *pp, struct pool_cache_group *pcg, uintptr_t addr)
   2969 {
   2970 	int i;
   2971 
   2972 	if (pcg == NULL) {
   2973 		return false;
   2974 	}
   2975 	for (i = 0; i < pcg->pcg_avail; i++) {
   2976 		if (pool_in_item(pp, pcg->pcg_objects[i].pcgo_va, addr)) {
   2977 			return true;
   2978 		}
   2979 	}
   2980 	return false;
   2981 }
   2982 
   2983 static bool
   2984 pool_allocated(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
   2985 {
   2986 
   2987 	if ((pp->pr_roflags & PR_NOTOUCH) != 0) {
   2988 		unsigned int idx = pr_item_notouch_index(pp, ph, (void *)addr);
   2989 		pool_item_bitmap_t *bitmap =
   2990 		    ph->ph_bitmap + (idx / BITMAP_SIZE);
   2991 		pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
   2992 
   2993 		return (*bitmap & mask) == 0;
   2994 	} else {
   2995 		struct pool_item *pi;
   2996 
   2997 		LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
   2998 			if (pool_in_item(pp, pi, addr)) {
   2999 				return false;
   3000 			}
   3001 		}
   3002 		return true;
   3003 	}
   3004 }
   3005 
   3006 void
   3007 pool_whatis(uintptr_t addr, void (*pr)(const char *, ...))
   3008 {
   3009 	struct pool *pp;
   3010 
   3011 	TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
   3012 		struct pool_item_header *ph;
   3013 		uintptr_t item;
   3014 		bool allocated = true;
   3015 		bool incache = false;
   3016 		bool incpucache = false;
   3017 		char cpucachestr[32];
   3018 
   3019 		if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
   3020 			LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
   3021 				if (pool_in_page(pp, ph, addr)) {
   3022 					goto found;
   3023 				}
   3024 			}
   3025 			LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
   3026 				if (pool_in_page(pp, ph, addr)) {
   3027 					allocated =
   3028 					    pool_allocated(pp, ph, addr);
   3029 					goto found;
   3030 				}
   3031 			}
   3032 			LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
   3033 				if (pool_in_page(pp, ph, addr)) {
   3034 					allocated = false;
   3035 					goto found;
   3036 				}
   3037 			}
   3038 			continue;
   3039 		} else {
   3040 			ph = pr_find_pagehead_noalign(pp, (void *)addr);
   3041 			if (ph == NULL || !pool_in_page(pp, ph, addr)) {
   3042 				continue;
   3043 			}
   3044 			allocated = pool_allocated(pp, ph, addr);
   3045 		}
   3046 found:
   3047 		if (allocated && pp->pr_cache) {
   3048 			pool_cache_t pc = pp->pr_cache;
   3049 			struct pool_cache_group *pcg;
   3050 			int i;
   3051 
   3052 			for (pcg = pc->pc_fullgroups; pcg != NULL;
   3053 			    pcg = pcg->pcg_next) {
   3054 				if (pool_in_cg(pp, pcg, addr)) {
   3055 					incache = true;
   3056 					goto print;
   3057 				}
   3058 			}
   3059 			for (i = 0; i < __arraycount(pc->pc_cpus); i++) {
   3060 				pool_cache_cpu_t *cc;
   3061 
   3062 				if ((cc = pc->pc_cpus[i]) == NULL) {
   3063 					continue;
   3064 				}
   3065 				if (pool_in_cg(pp, cc->cc_current, addr) ||
   3066 				    pool_in_cg(pp, cc->cc_previous, addr)) {
   3067 					struct cpu_info *ci =
   3068 					    cpu_lookup(i);
   3069 
   3070 					incpucache = true;
   3071 					snprintf(cpucachestr,
   3072 					    sizeof(cpucachestr),
   3073 					    "cached by CPU %u",
   3074 					    ci->ci_index);
   3075 					goto print;
   3076 				}
   3077 			}
   3078 		}
   3079 print:
   3080 		item = (uintptr_t)ph->ph_page + ph->ph_off;
   3081 		item = item + rounddown(addr - item, pp->pr_size);
   3082 		(*pr)("%p is %p+%zu in POOL '%s' (%s)\n",
   3083 		    (void *)addr, item, (size_t)(addr - item),
   3084 		    pp->pr_wchan,
   3085 		    incpucache ? cpucachestr :
   3086 		    incache ? "cached" : allocated ? "allocated" : "free");
   3087 	}
   3088 }
   3089 #endif /* defined(DDB) */
   3090 
   3091 static int
   3092 pool_sysctl(SYSCTLFN_ARGS)
   3093 {
   3094 	struct pool_sysctl data;
   3095 	struct pool *pp;
   3096 	struct pool_cache *pc;
   3097 	pool_cache_cpu_t *cc;
   3098 	int error;
   3099 	size_t i, written;
   3100 
   3101 	if (oldp == NULL) {
   3102 		*oldlenp = 0;
   3103 		TAILQ_FOREACH(pp, &pool_head, pr_poollist)
   3104 			*oldlenp += sizeof(data);
   3105 		return 0;
   3106 	}
   3107 
   3108 	memset(&data, 0, sizeof(data));
   3109 	error = 0;
   3110 	written = 0;
   3111 	TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
   3112 		if (written + sizeof(data) > *oldlenp)
   3113 			break;
   3114 		strlcpy(data.pr_wchan, pp->pr_wchan, sizeof(data.pr_wchan));
   3115 		data.pr_pagesize = pp->pr_alloc->pa_pagesz;
   3116 		data.pr_flags = pp->pr_roflags | pp->pr_flags;
   3117 #define COPY(field) data.field = pp->field
   3118 		COPY(pr_size);
   3119 
   3120 		COPY(pr_itemsperpage);
   3121 		COPY(pr_nitems);
   3122 		COPY(pr_nout);
   3123 		COPY(pr_hardlimit);
   3124 		COPY(pr_npages);
   3125 		COPY(pr_minpages);
   3126 		COPY(pr_maxpages);
   3127 
   3128 		COPY(pr_nget);
   3129 		COPY(pr_nfail);
   3130 		COPY(pr_nput);
   3131 		COPY(pr_npagealloc);
   3132 		COPY(pr_npagefree);
   3133 		COPY(pr_hiwat);
   3134 		COPY(pr_nidle);
   3135 #undef COPY
   3136 
   3137 		data.pr_cache_nmiss_pcpu = 0;
   3138 		data.pr_cache_nhit_pcpu = 0;
   3139 		if (pp->pr_cache) {
   3140 			pc = pp->pr_cache;
   3141 			data.pr_cache_meta_size = pc->pc_pcgsize;
   3142 			data.pr_cache_nfull = pc->pc_nfull;
   3143 			data.pr_cache_npartial = pc->pc_npart;
   3144 			data.pr_cache_nempty = pc->pc_nempty;
   3145 			data.pr_cache_ncontended = pc->pc_contended;
   3146 			data.pr_cache_nmiss_global = pc->pc_misses;
   3147 			data.pr_cache_nhit_global = pc->pc_hits;
   3148 			for (i = 0; i < pc->pc_ncpu; ++i) {
   3149 				cc = pc->pc_cpus[i];
   3150 				if (cc == NULL)
   3151 					continue;
   3152 				data.pr_cache_nmiss_pcpu += cc->cc_misses;
   3153 				data.pr_cache_nhit_pcpu += cc->cc_hits;
   3154 			}
   3155 		} else {
   3156 			data.pr_cache_meta_size = 0;
   3157 			data.pr_cache_nfull = 0;
   3158 			data.pr_cache_npartial = 0;
   3159 			data.pr_cache_nempty = 0;
   3160 			data.pr_cache_ncontended = 0;
   3161 			data.pr_cache_nmiss_global = 0;
   3162 			data.pr_cache_nhit_global = 0;
   3163 		}
   3164 
   3165 		error = sysctl_copyout(l, &data, oldp, sizeof(data));
   3166 		if (error)
   3167 			break;
   3168 		written += sizeof(data);
   3169 		oldp = (char *)oldp + sizeof(data);
   3170 	}
   3171 
   3172 	*oldlenp = written;
   3173 	return error;
   3174 }
   3175 
   3176 SYSCTL_SETUP(sysctl_pool_setup, "sysctl kern.pool setup")
   3177 {
   3178 	const struct sysctlnode *rnode = NULL;
   3179 
   3180 	sysctl_createv(clog, 0, NULL, &rnode,
   3181 		       CTLFLAG_PERMANENT,
   3182 		       CTLTYPE_STRUCT, "pool",
   3183 		       SYSCTL_DESCR("Get pool statistics"),
   3184 		       pool_sysctl, 0, NULL, 0,
   3185 		       CTL_KERN, CTL_CREATE, CTL_EOL);
   3186 }
   3187