Home | History | Annotate | Line # | Download | only in kern
subr_pool.c revision 1.228
      1  1.228      maxv /*	$NetBSD: subr_pool.c,v 1.228 2018/12/02 21:00:13 maxv Exp $	*/
      2    1.1        pk 
      3    1.1        pk /*-
      4  1.204      maxv  * Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010, 2014, 2015
      5  1.183        ad  *     The NetBSD Foundation, Inc.
      6    1.1        pk  * All rights reserved.
      7    1.1        pk  *
      8    1.1        pk  * This code is derived from software contributed to The NetBSD Foundation
      9   1.20   thorpej  * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
     10  1.204      maxv  * Simulation Facility, NASA Ames Research Center; by Andrew Doran, and by
     11  1.204      maxv  * Maxime Villard.
     12    1.1        pk  *
     13    1.1        pk  * Redistribution and use in source and binary forms, with or without
     14    1.1        pk  * modification, are permitted provided that the following conditions
     15    1.1        pk  * are met:
     16    1.1        pk  * 1. Redistributions of source code must retain the above copyright
     17    1.1        pk  *    notice, this list of conditions and the following disclaimer.
     18    1.1        pk  * 2. Redistributions in binary form must reproduce the above copyright
     19    1.1        pk  *    notice, this list of conditions and the following disclaimer in the
     20    1.1        pk  *    documentation and/or other materials provided with the distribution.
     21    1.1        pk  *
     22    1.1        pk  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     23    1.1        pk  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     24    1.1        pk  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     25    1.1        pk  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     26    1.1        pk  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27    1.1        pk  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28    1.1        pk  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29    1.1        pk  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30    1.1        pk  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31    1.1        pk  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32    1.1        pk  * POSSIBILITY OF SUCH DAMAGE.
     33    1.1        pk  */
     34   1.64     lukem 
     35   1.64     lukem #include <sys/cdefs.h>
     36  1.228      maxv __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.228 2018/12/02 21:00:13 maxv Exp $");
     37   1.24    scottr 
     38  1.205     pooka #ifdef _KERNEL_OPT
     39  1.141      yamt #include "opt_ddb.h"
     40   1.28   thorpej #include "opt_lockdebug.h"
     41  1.228      maxv #include "opt_kleak.h"
     42  1.205     pooka #endif
     43    1.1        pk 
     44    1.1        pk #include <sys/param.h>
     45    1.1        pk #include <sys/systm.h>
     46  1.203     joerg #include <sys/sysctl.h>
     47  1.135      yamt #include <sys/bitops.h>
     48    1.1        pk #include <sys/proc.h>
     49    1.1        pk #include <sys/errno.h>
     50    1.1        pk #include <sys/kernel.h>
     51  1.191      para #include <sys/vmem.h>
     52    1.1        pk #include <sys/pool.h>
     53   1.20   thorpej #include <sys/syslog.h>
     54  1.125        ad #include <sys/debug.h>
     55  1.134        ad #include <sys/lockdebug.h>
     56  1.134        ad #include <sys/xcall.h>
     57  1.134        ad #include <sys/cpu.h>
     58  1.145        ad #include <sys/atomic.h>
     59  1.224      maxv #include <sys/asan.h>
     60    1.3        pk 
     61  1.187  uebayasi #include <uvm/uvm_extern.h>
     62    1.3        pk 
     63    1.1        pk /*
     64    1.1        pk  * Pool resource management utility.
     65    1.3        pk  *
     66   1.88       chs  * Memory is allocated in pages which are split into pieces according to
     67   1.88       chs  * the pool item size. Each page is kept on one of three lists in the
     68   1.88       chs  * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
     69   1.88       chs  * for empty, full and partially-full pages respectively. The individual
     70   1.88       chs  * pool items are on a linked list headed by `ph_itemlist' in each page
     71   1.88       chs  * header. The memory for building the page list is either taken from
     72   1.88       chs  * the allocated pages themselves (for small pool items) or taken from
     73   1.88       chs  * an internal pool of page headers (`phpool').
     74    1.1        pk  */
     75    1.1        pk 
     76  1.221      para /* List of all pools. Non static as needed by 'vmstat -m' */
     77  1.202       abs TAILQ_HEAD(, pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
     78  1.134        ad 
     79    1.3        pk /* Private pool for page header structures */
     80   1.97      yamt #define	PHPOOL_MAX	8
     81   1.97      yamt static struct pool phpool[PHPOOL_MAX];
     82  1.135      yamt #define	PHPOOL_FREELIST_NELEM(idx) \
     83  1.135      yamt 	(((idx) == 0) ? 0 : BITMAP_SIZE * (1 << (idx)))
     84    1.3        pk 
     85   1.62     bjh21 #ifdef POOL_SUBPAGE
     86   1.62     bjh21 /* Pool of subpages for use by normal pools. */
     87   1.62     bjh21 static struct pool psppool;
     88   1.62     bjh21 #endif
     89   1.62     bjh21 
     90  1.226      maxv #if defined(KASAN)
     91  1.224      maxv #define POOL_REDZONE
     92  1.224      maxv #endif
     93  1.224      maxv 
     94  1.204      maxv #ifdef POOL_REDZONE
     95  1.224      maxv # ifdef KASAN
     96  1.224      maxv #  define POOL_REDZONE_SIZE 8
     97  1.224      maxv # else
     98  1.224      maxv #  define POOL_REDZONE_SIZE 2
     99  1.224      maxv # endif
    100  1.204      maxv static void pool_redzone_init(struct pool *, size_t);
    101  1.204      maxv static void pool_redzone_fill(struct pool *, void *);
    102  1.204      maxv static void pool_redzone_check(struct pool *, void *);
    103  1.204      maxv #else
    104  1.204      maxv # define pool_redzone_init(pp, sz)	/* NOTHING */
    105  1.204      maxv # define pool_redzone_fill(pp, ptr)	/* NOTHING */
    106  1.204      maxv # define pool_redzone_check(pp, ptr)	/* NOTHING */
    107  1.204      maxv #endif
    108  1.204      maxv 
    109  1.228      maxv #ifdef KLEAK
    110  1.228      maxv static void pool_kleak_fill(struct pool *, void *);
    111  1.228      maxv static void pool_cache_kleak_fill(pool_cache_t, void *);
    112  1.228      maxv #else
    113  1.228      maxv #define pool_kleak_fill(pp, ptr)	__nothing
    114  1.228      maxv #define pool_cache_kleak_fill(pc, ptr)	__nothing
    115  1.228      maxv #endif
    116  1.228      maxv 
    117   1.98      yamt static void *pool_page_alloc_meta(struct pool *, int);
    118   1.98      yamt static void pool_page_free_meta(struct pool *, void *);
    119   1.98      yamt 
    120   1.98      yamt /* allocator for pool metadata */
    121  1.134        ad struct pool_allocator pool_allocator_meta = {
    122  1.191      para 	.pa_alloc = pool_page_alloc_meta,
    123  1.191      para 	.pa_free = pool_page_free_meta,
    124  1.191      para 	.pa_pagesz = 0
    125   1.98      yamt };
    126   1.98      yamt 
    127  1.208       chs #define POOL_ALLOCATOR_BIG_BASE 13
    128  1.208       chs extern struct pool_allocator pool_allocator_big[];
    129  1.208       chs static int pool_bigidx(size_t);
    130  1.208       chs 
    131    1.3        pk /* # of seconds to retain page after last use */
    132    1.3        pk int pool_inactive_time = 10;
    133    1.3        pk 
    134    1.3        pk /* Next candidate for drainage (see pool_drain()) */
    135   1.23   thorpej static struct pool	*drainpp;
    136   1.23   thorpej 
    137  1.134        ad /* This lock protects both pool_head and drainpp. */
    138  1.134        ad static kmutex_t pool_head_lock;
    139  1.134        ad static kcondvar_t pool_busy;
    140    1.3        pk 
    141  1.178      elad /* This lock protects initialization of a potentially shared pool allocator */
    142  1.178      elad static kmutex_t pool_allocator_lock;
    143  1.178      elad 
    144  1.135      yamt typedef uint32_t pool_item_bitmap_t;
    145  1.135      yamt #define	BITMAP_SIZE	(CHAR_BIT * sizeof(pool_item_bitmap_t))
    146  1.135      yamt #define	BITMAP_MASK	(BITMAP_SIZE - 1)
    147   1.99      yamt 
    148    1.3        pk struct pool_item_header {
    149    1.3        pk 	/* Page headers */
    150   1.88       chs 	LIST_ENTRY(pool_item_header)
    151    1.3        pk 				ph_pagelist;	/* pool page list */
    152   1.88       chs 	SPLAY_ENTRY(pool_item_header)
    153   1.88       chs 				ph_node;	/* Off-page page headers */
    154  1.128  christos 	void *			ph_page;	/* this page's address */
    155  1.151      yamt 	uint32_t		ph_time;	/* last referenced */
    156  1.135      yamt 	uint16_t		ph_nmissing;	/* # of chunks in use */
    157  1.141      yamt 	uint16_t		ph_off;		/* start offset in page */
    158   1.97      yamt 	union {
    159   1.97      yamt 		/* !PR_NOTOUCH */
    160   1.97      yamt 		struct {
    161  1.102       chs 			LIST_HEAD(, pool_item)
    162   1.97      yamt 				phu_itemlist;	/* chunk list for this page */
    163   1.97      yamt 		} phu_normal;
    164   1.97      yamt 		/* PR_NOTOUCH */
    165   1.97      yamt 		struct {
    166  1.141      yamt 			pool_item_bitmap_t phu_bitmap[1];
    167   1.97      yamt 		} phu_notouch;
    168   1.97      yamt 	} ph_u;
    169    1.3        pk };
    170   1.97      yamt #define	ph_itemlist	ph_u.phu_normal.phu_itemlist
    171  1.135      yamt #define	ph_bitmap	ph_u.phu_notouch.phu_bitmap
    172    1.3        pk 
    173    1.1        pk struct pool_item {
    174    1.3        pk #ifdef DIAGNOSTIC
    175   1.82   thorpej 	u_int pi_magic;
    176   1.33       chs #endif
    177  1.134        ad #define	PI_MAGIC 0xdeaddeadU
    178    1.3        pk 	/* Other entries use only this list entry */
    179  1.102       chs 	LIST_ENTRY(pool_item)	pi_list;
    180    1.3        pk };
    181    1.3        pk 
    182   1.53   thorpej #define	POOL_NEEDS_CATCHUP(pp)						\
    183   1.53   thorpej 	((pp)->pr_nitems < (pp)->pr_minitems)
    184   1.53   thorpej 
    185   1.43   thorpej /*
    186   1.43   thorpej  * Pool cache management.
    187   1.43   thorpej  *
    188   1.43   thorpej  * Pool caches provide a way for constructed objects to be cached by the
    189   1.43   thorpej  * pool subsystem.  This can lead to performance improvements by avoiding
    190   1.43   thorpej  * needless object construction/destruction; it is deferred until absolutely
    191   1.43   thorpej  * necessary.
    192   1.43   thorpej  *
    193  1.134        ad  * Caches are grouped into cache groups.  Each cache group references up
    194  1.134        ad  * to PCG_NUMOBJECTS constructed objects.  When a cache allocates an
    195  1.134        ad  * object from the pool, it calls the object's constructor and places it
    196  1.134        ad  * into a cache group.  When a cache group frees an object back to the
    197  1.134        ad  * pool, it first calls the object's destructor.  This allows the object
    198  1.134        ad  * to persist in constructed form while freed to the cache.
    199  1.134        ad  *
    200  1.134        ad  * The pool references each cache, so that when a pool is drained by the
    201  1.134        ad  * pagedaemon, it can drain each individual cache as well.  Each time a
    202  1.134        ad  * cache is drained, the most idle cache group is freed to the pool in
    203  1.134        ad  * its entirety.
    204   1.43   thorpej  *
    205   1.43   thorpej  * Pool caches are layed on top of pools.  By layering them, we can avoid
    206   1.43   thorpej  * the complexity of cache management for pools which would not benefit
    207   1.43   thorpej  * from it.
    208   1.43   thorpej  */
    209   1.43   thorpej 
    210  1.142        ad static struct pool pcg_normal_pool;
    211  1.142        ad static struct pool pcg_large_pool;
    212  1.134        ad static struct pool cache_pool;
    213  1.134        ad static struct pool cache_cpu_pool;
    214    1.3        pk 
    215  1.189     pooka pool_cache_t pnbuf_cache;	/* pathname buffer cache */
    216  1.189     pooka 
    217  1.145        ad /* List of all caches. */
    218  1.145        ad TAILQ_HEAD(,pool_cache) pool_cache_head =
    219  1.145        ad     TAILQ_HEAD_INITIALIZER(pool_cache_head);
    220  1.145        ad 
    221  1.162        ad int pool_cache_disable;		/* global disable for caching */
    222  1.169      yamt static const pcg_t pcg_dummy;	/* zero sized: always empty, yet always full */
    223  1.145        ad 
    224  1.162        ad static bool	pool_cache_put_slow(pool_cache_cpu_t *, int,
    225  1.162        ad 				    void *);
    226  1.162        ad static bool	pool_cache_get_slow(pool_cache_cpu_t *, int,
    227  1.162        ad 				    void **, paddr_t *, int);
    228  1.134        ad static void	pool_cache_cpu_init1(struct cpu_info *, pool_cache_t);
    229  1.134        ad static void	pool_cache_invalidate_groups(pool_cache_t, pcg_t *);
    230  1.175       jym static void	pool_cache_invalidate_cpu(pool_cache_t, u_int);
    231  1.196       jym static void	pool_cache_transfer(pool_cache_t);
    232    1.3        pk 
    233   1.42   thorpej static int	pool_catchup(struct pool *);
    234  1.128  christos static void	pool_prime_page(struct pool *, void *,
    235   1.55   thorpej 		    struct pool_item_header *);
    236   1.88       chs static void	pool_update_curpage(struct pool *);
    237   1.66   thorpej 
    238  1.113      yamt static int	pool_grow(struct pool *, int);
    239  1.117      yamt static void	*pool_allocator_alloc(struct pool *, int);
    240  1.117      yamt static void	pool_allocator_free(struct pool *, void *);
    241    1.3        pk 
    242   1.97      yamt static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
    243  1.199  christos 	void (*)(const char *, ...) __printflike(1, 2));
    244   1.42   thorpej static void pool_print1(struct pool *, const char *,
    245  1.199  christos 	void (*)(const char *, ...) __printflike(1, 2));
    246    1.3        pk 
    247   1.88       chs static int pool_chk_page(struct pool *, const char *,
    248   1.88       chs 			 struct pool_item_header *);
    249   1.88       chs 
    250  1.135      yamt static inline unsigned int
    251   1.97      yamt pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph,
    252   1.97      yamt     const void *v)
    253   1.97      yamt {
    254   1.97      yamt 	const char *cp = v;
    255  1.135      yamt 	unsigned int idx;
    256   1.97      yamt 
    257   1.97      yamt 	KASSERT(pp->pr_roflags & PR_NOTOUCH);
    258  1.128  christos 	idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size;
    259   1.97      yamt 	KASSERT(idx < pp->pr_itemsperpage);
    260   1.97      yamt 	return idx;
    261   1.97      yamt }
    262   1.97      yamt 
    263  1.110     perry static inline void
    264   1.97      yamt pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph,
    265   1.97      yamt     void *obj)
    266   1.97      yamt {
    267  1.135      yamt 	unsigned int idx = pr_item_notouch_index(pp, ph, obj);
    268  1.135      yamt 	pool_item_bitmap_t *bitmap = ph->ph_bitmap + (idx / BITMAP_SIZE);
    269  1.223     kamil 	pool_item_bitmap_t mask = 1U << (idx & BITMAP_MASK);
    270   1.97      yamt 
    271  1.135      yamt 	KASSERT((*bitmap & mask) == 0);
    272  1.135      yamt 	*bitmap |= mask;
    273   1.97      yamt }
    274   1.97      yamt 
    275  1.110     perry static inline void *
    276   1.97      yamt pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph)
    277   1.97      yamt {
    278  1.135      yamt 	pool_item_bitmap_t *bitmap = ph->ph_bitmap;
    279  1.135      yamt 	unsigned int idx;
    280  1.135      yamt 	int i;
    281   1.97      yamt 
    282  1.135      yamt 	for (i = 0; ; i++) {
    283  1.135      yamt 		int bit;
    284   1.97      yamt 
    285  1.135      yamt 		KASSERT((i * BITMAP_SIZE) < pp->pr_itemsperpage);
    286  1.135      yamt 		bit = ffs32(bitmap[i]);
    287  1.135      yamt 		if (bit) {
    288  1.135      yamt 			pool_item_bitmap_t mask;
    289  1.135      yamt 
    290  1.135      yamt 			bit--;
    291  1.135      yamt 			idx = (i * BITMAP_SIZE) + bit;
    292  1.222     kamil 			mask = 1U << bit;
    293  1.135      yamt 			KASSERT((bitmap[i] & mask) != 0);
    294  1.135      yamt 			bitmap[i] &= ~mask;
    295  1.135      yamt 			break;
    296  1.135      yamt 		}
    297  1.135      yamt 	}
    298  1.135      yamt 	KASSERT(idx < pp->pr_itemsperpage);
    299  1.128  christos 	return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size;
    300   1.97      yamt }
    301   1.97      yamt 
    302  1.135      yamt static inline void
    303  1.141      yamt pr_item_notouch_init(const struct pool *pp, struct pool_item_header *ph)
    304  1.135      yamt {
    305  1.135      yamt 	pool_item_bitmap_t *bitmap = ph->ph_bitmap;
    306  1.135      yamt 	const int n = howmany(pp->pr_itemsperpage, BITMAP_SIZE);
    307  1.135      yamt 	int i;
    308  1.135      yamt 
    309  1.135      yamt 	for (i = 0; i < n; i++) {
    310  1.135      yamt 		bitmap[i] = (pool_item_bitmap_t)-1;
    311  1.135      yamt 	}
    312  1.135      yamt }
    313  1.135      yamt 
    314  1.110     perry static inline int
    315   1.88       chs phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
    316   1.88       chs {
    317  1.121      yamt 
    318  1.121      yamt 	/*
    319  1.121      yamt 	 * we consider pool_item_header with smaller ph_page bigger.
    320  1.121      yamt 	 * (this unnatural ordering is for the benefit of pr_find_pagehead.)
    321  1.121      yamt 	 */
    322  1.121      yamt 
    323   1.88       chs 	if (a->ph_page < b->ph_page)
    324  1.121      yamt 		return (1);
    325  1.121      yamt 	else if (a->ph_page > b->ph_page)
    326   1.88       chs 		return (-1);
    327   1.88       chs 	else
    328   1.88       chs 		return (0);
    329   1.88       chs }
    330   1.88       chs 
    331   1.88       chs SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
    332   1.88       chs SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
    333   1.88       chs 
    334  1.141      yamt static inline struct pool_item_header *
    335  1.141      yamt pr_find_pagehead_noalign(struct pool *pp, void *v)
    336  1.141      yamt {
    337  1.141      yamt 	struct pool_item_header *ph, tmp;
    338  1.141      yamt 
    339  1.141      yamt 	tmp.ph_page = (void *)(uintptr_t)v;
    340  1.141      yamt 	ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
    341  1.141      yamt 	if (ph == NULL) {
    342  1.141      yamt 		ph = SPLAY_ROOT(&pp->pr_phtree);
    343  1.141      yamt 		if (ph != NULL && phtree_compare(&tmp, ph) >= 0) {
    344  1.141      yamt 			ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph);
    345  1.141      yamt 		}
    346  1.141      yamt 		KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0);
    347  1.141      yamt 	}
    348  1.141      yamt 
    349  1.141      yamt 	return ph;
    350  1.141      yamt }
    351  1.141      yamt 
    352    1.3        pk /*
    353  1.121      yamt  * Return the pool page header based on item address.
    354    1.3        pk  */
    355  1.110     perry static inline struct pool_item_header *
    356  1.121      yamt pr_find_pagehead(struct pool *pp, void *v)
    357    1.3        pk {
    358   1.88       chs 	struct pool_item_header *ph, tmp;
    359    1.3        pk 
    360  1.121      yamt 	if ((pp->pr_roflags & PR_NOALIGN) != 0) {
    361  1.141      yamt 		ph = pr_find_pagehead_noalign(pp, v);
    362  1.121      yamt 	} else {
    363  1.128  christos 		void *page =
    364  1.128  christos 		    (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask);
    365  1.121      yamt 
    366  1.121      yamt 		if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
    367  1.128  christos 			ph = (struct pool_item_header *)((char *)page + pp->pr_phoffset);
    368  1.121      yamt 		} else {
    369  1.121      yamt 			tmp.ph_page = page;
    370  1.121      yamt 			ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
    371  1.121      yamt 		}
    372  1.121      yamt 	}
    373    1.3        pk 
    374  1.121      yamt 	KASSERT(ph == NULL || ((pp->pr_roflags & PR_PHINPAGE) != 0) ||
    375  1.128  christos 	    ((char *)ph->ph_page <= (char *)v &&
    376  1.128  christos 	    (char *)v < (char *)ph->ph_page + pp->pr_alloc->pa_pagesz));
    377   1.88       chs 	return ph;
    378    1.3        pk }
    379    1.3        pk 
    380  1.101   thorpej static void
    381  1.101   thorpej pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq)
    382  1.101   thorpej {
    383  1.101   thorpej 	struct pool_item_header *ph;
    384  1.101   thorpej 
    385  1.101   thorpej 	while ((ph = LIST_FIRST(pq)) != NULL) {
    386  1.101   thorpej 		LIST_REMOVE(ph, ph_pagelist);
    387  1.101   thorpej 		pool_allocator_free(pp, ph->ph_page);
    388  1.134        ad 		if ((pp->pr_roflags & PR_PHINPAGE) == 0)
    389  1.101   thorpej 			pool_put(pp->pr_phpool, ph);
    390  1.101   thorpej 	}
    391  1.101   thorpej }
    392  1.101   thorpej 
    393    1.3        pk /*
    394    1.3        pk  * Remove a page from the pool.
    395    1.3        pk  */
    396  1.110     perry static inline void
    397   1.61       chs pr_rmpage(struct pool *pp, struct pool_item_header *ph,
    398   1.61       chs      struct pool_pagelist *pq)
    399    1.3        pk {
    400    1.3        pk 
    401  1.134        ad 	KASSERT(mutex_owned(&pp->pr_lock));
    402   1.91      yamt 
    403    1.3        pk 	/*
    404    1.7   thorpej 	 * If the page was idle, decrement the idle page count.
    405    1.3        pk 	 */
    406    1.6   thorpej 	if (ph->ph_nmissing == 0) {
    407  1.207  riastrad 		KASSERT(pp->pr_nidle != 0);
    408  1.207  riastrad 		KASSERTMSG((pp->pr_nitems >= pp->pr_itemsperpage),
    409  1.207  riastrad 		    "nitems=%u < itemsperpage=%u",
    410  1.207  riastrad 		    pp->pr_nitems, pp->pr_itemsperpage);
    411    1.6   thorpej 		pp->pr_nidle--;
    412    1.6   thorpej 	}
    413    1.7   thorpej 
    414   1.20   thorpej 	pp->pr_nitems -= pp->pr_itemsperpage;
    415   1.20   thorpej 
    416    1.7   thorpej 	/*
    417  1.101   thorpej 	 * Unlink the page from the pool and queue it for release.
    418    1.7   thorpej 	 */
    419   1.88       chs 	LIST_REMOVE(ph, ph_pagelist);
    420   1.91      yamt 	if ((pp->pr_roflags & PR_PHINPAGE) == 0)
    421   1.91      yamt 		SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
    422  1.101   thorpej 	LIST_INSERT_HEAD(pq, ph, ph_pagelist);
    423  1.101   thorpej 
    424    1.7   thorpej 	pp->pr_npages--;
    425    1.7   thorpej 	pp->pr_npagefree++;
    426    1.6   thorpej 
    427   1.88       chs 	pool_update_curpage(pp);
    428    1.3        pk }
    429    1.3        pk 
    430    1.3        pk /*
    431   1.94    simonb  * Initialize all the pools listed in the "pools" link set.
    432   1.94    simonb  */
    433   1.94    simonb void
    434  1.117      yamt pool_subsystem_init(void)
    435   1.94    simonb {
    436  1.192     rmind 	size_t size;
    437  1.191      para 	int idx;
    438   1.94    simonb 
    439  1.134        ad 	mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE);
    440  1.179   mlelstv 	mutex_init(&pool_allocator_lock, MUTEX_DEFAULT, IPL_NONE);
    441  1.134        ad 	cv_init(&pool_busy, "poolbusy");
    442  1.134        ad 
    443  1.191      para 	/*
    444  1.191      para 	 * Initialize private page header pool and cache magazine pool if we
    445  1.191      para 	 * haven't done so yet.
    446  1.191      para 	 */
    447  1.191      para 	for (idx = 0; idx < PHPOOL_MAX; idx++) {
    448  1.191      para 		static char phpool_names[PHPOOL_MAX][6+1+6+1];
    449  1.191      para 		int nelem;
    450  1.191      para 		size_t sz;
    451  1.191      para 
    452  1.191      para 		nelem = PHPOOL_FREELIST_NELEM(idx);
    453  1.191      para 		snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
    454  1.191      para 		    "phpool-%d", nelem);
    455  1.191      para 		sz = sizeof(struct pool_item_header);
    456  1.191      para 		if (nelem) {
    457  1.191      para 			sz = offsetof(struct pool_item_header,
    458  1.191      para 			    ph_bitmap[howmany(nelem, BITMAP_SIZE)]);
    459  1.191      para 		}
    460  1.191      para 		pool_init(&phpool[idx], sz, 0, 0, 0,
    461  1.191      para 		    phpool_names[idx], &pool_allocator_meta, IPL_VM);
    462  1.117      yamt 	}
    463  1.191      para #ifdef POOL_SUBPAGE
    464  1.191      para 	pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
    465  1.191      para 	    PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM);
    466  1.191      para #endif
    467  1.191      para 
    468  1.191      para 	size = sizeof(pcg_t) +
    469  1.191      para 	    (PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t);
    470  1.191      para 	pool_init(&pcg_normal_pool, size, coherency_unit, 0, 0,
    471  1.191      para 	    "pcgnormal", &pool_allocator_meta, IPL_VM);
    472  1.191      para 
    473  1.191      para 	size = sizeof(pcg_t) +
    474  1.191      para 	    (PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t);
    475  1.191      para 	pool_init(&pcg_large_pool, size, coherency_unit, 0, 0,
    476  1.191      para 	    "pcglarge", &pool_allocator_meta, IPL_VM);
    477  1.134        ad 
    478  1.156        ad 	pool_init(&cache_pool, sizeof(struct pool_cache), coherency_unit,
    479  1.191      para 	    0, 0, "pcache", &pool_allocator_meta, IPL_NONE);
    480  1.134        ad 
    481  1.156        ad 	pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), coherency_unit,
    482  1.191      para 	    0, 0, "pcachecpu", &pool_allocator_meta, IPL_NONE);
    483   1.94    simonb }
    484   1.94    simonb 
    485   1.94    simonb /*
    486    1.3        pk  * Initialize the given pool resource structure.
    487    1.3        pk  *
    488    1.3        pk  * We export this routine to allow other kernel parts to declare
    489  1.195     rmind  * static pools that must be initialized before kmem(9) is available.
    490    1.3        pk  */
    491    1.3        pk void
    492   1.42   thorpej pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
    493  1.129        ad     const char *wchan, struct pool_allocator *palloc, int ipl)
    494    1.3        pk {
    495  1.116    simonb 	struct pool *pp1;
    496  1.204      maxv 	size_t trysize, phsize, prsize;
    497  1.134        ad 	int off, slack;
    498    1.3        pk 
    499  1.116    simonb #ifdef DEBUG
    500  1.198  christos 	if (__predict_true(!cold))
    501  1.198  christos 		mutex_enter(&pool_head_lock);
    502  1.116    simonb 	/*
    503  1.116    simonb 	 * Check that the pool hasn't already been initialised and
    504  1.116    simonb 	 * added to the list of all pools.
    505  1.116    simonb 	 */
    506  1.145        ad 	TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
    507  1.116    simonb 		if (pp == pp1)
    508  1.213  christos 			panic("%s: [%s] already initialised", __func__,
    509  1.116    simonb 			    wchan);
    510  1.116    simonb 	}
    511  1.198  christos 	if (__predict_true(!cold))
    512  1.198  christos 		mutex_exit(&pool_head_lock);
    513  1.116    simonb #endif
    514  1.116    simonb 
    515   1.66   thorpej 	if (palloc == NULL)
    516   1.66   thorpej 		palloc = &pool_allocator_kmem;
    517  1.112     bjh21 #ifdef POOL_SUBPAGE
    518  1.112     bjh21 	if (size > palloc->pa_pagesz) {
    519  1.112     bjh21 		if (palloc == &pool_allocator_kmem)
    520  1.112     bjh21 			palloc = &pool_allocator_kmem_fullpage;
    521  1.112     bjh21 		else if (palloc == &pool_allocator_nointr)
    522  1.112     bjh21 			palloc = &pool_allocator_nointr_fullpage;
    523  1.112     bjh21 	}
    524   1.66   thorpej #endif /* POOL_SUBPAGE */
    525  1.180   mlelstv 	if (!cold)
    526  1.180   mlelstv 		mutex_enter(&pool_allocator_lock);
    527  1.178      elad 	if (palloc->pa_refcnt++ == 0) {
    528  1.112     bjh21 		if (palloc->pa_pagesz == 0)
    529   1.66   thorpej 			palloc->pa_pagesz = PAGE_SIZE;
    530   1.66   thorpej 
    531   1.66   thorpej 		TAILQ_INIT(&palloc->pa_list);
    532   1.66   thorpej 
    533  1.134        ad 		mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM);
    534   1.66   thorpej 		palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
    535   1.66   thorpej 		palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
    536    1.4   thorpej 	}
    537  1.180   mlelstv 	if (!cold)
    538  1.180   mlelstv 		mutex_exit(&pool_allocator_lock);
    539    1.3        pk 
    540    1.3        pk 	if (align == 0)
    541    1.3        pk 		align = ALIGN(1);
    542   1.14   thorpej 
    543  1.204      maxv 	prsize = size;
    544  1.204      maxv 	if ((flags & PR_NOTOUCH) == 0 && prsize < sizeof(struct pool_item))
    545  1.204      maxv 		prsize = sizeof(struct pool_item);
    546    1.3        pk 
    547  1.204      maxv 	prsize = roundup(prsize, align);
    548  1.207  riastrad 	KASSERTMSG((prsize <= palloc->pa_pagesz),
    549  1.213  christos 	    "%s: [%s] pool item size (%zu) larger than page size (%u)",
    550  1.213  christos 	    __func__, wchan, prsize, palloc->pa_pagesz);
    551   1.35        pk 
    552    1.3        pk 	/*
    553    1.3        pk 	 * Initialize the pool structure.
    554    1.3        pk 	 */
    555   1.88       chs 	LIST_INIT(&pp->pr_emptypages);
    556   1.88       chs 	LIST_INIT(&pp->pr_fullpages);
    557   1.88       chs 	LIST_INIT(&pp->pr_partpages);
    558  1.134        ad 	pp->pr_cache = NULL;
    559    1.3        pk 	pp->pr_curpage = NULL;
    560    1.3        pk 	pp->pr_npages = 0;
    561    1.3        pk 	pp->pr_minitems = 0;
    562    1.3        pk 	pp->pr_minpages = 0;
    563    1.3        pk 	pp->pr_maxpages = UINT_MAX;
    564   1.20   thorpej 	pp->pr_roflags = flags;
    565   1.20   thorpej 	pp->pr_flags = 0;
    566  1.204      maxv 	pp->pr_size = prsize;
    567    1.3        pk 	pp->pr_align = align;
    568    1.3        pk 	pp->pr_wchan = wchan;
    569   1.66   thorpej 	pp->pr_alloc = palloc;
    570   1.20   thorpej 	pp->pr_nitems = 0;
    571   1.20   thorpej 	pp->pr_nout = 0;
    572   1.20   thorpej 	pp->pr_hardlimit = UINT_MAX;
    573   1.20   thorpej 	pp->pr_hardlimit_warning = NULL;
    574   1.31   thorpej 	pp->pr_hardlimit_ratecap.tv_sec = 0;
    575   1.31   thorpej 	pp->pr_hardlimit_ratecap.tv_usec = 0;
    576   1.31   thorpej 	pp->pr_hardlimit_warning_last.tv_sec = 0;
    577   1.31   thorpej 	pp->pr_hardlimit_warning_last.tv_usec = 0;
    578   1.68   thorpej 	pp->pr_drain_hook = NULL;
    579   1.68   thorpej 	pp->pr_drain_hook_arg = NULL;
    580  1.125        ad 	pp->pr_freecheck = NULL;
    581  1.204      maxv 	pool_redzone_init(pp, size);
    582    1.3        pk 
    583    1.3        pk 	/*
    584    1.3        pk 	 * Decide whether to put the page header off page to avoid
    585   1.92     enami 	 * wasting too large a part of the page or too big item.
    586   1.92     enami 	 * Off-page page headers go on a hash table, so we can match
    587   1.92     enami 	 * a returned item with its header based on the page address.
    588   1.92     enami 	 * We use 1/16 of the page size and about 8 times of the item
    589   1.92     enami 	 * size as the threshold (XXX: tune)
    590   1.92     enami 	 *
    591   1.92     enami 	 * However, we'll put the header into the page if we can put
    592   1.92     enami 	 * it without wasting any items.
    593   1.92     enami 	 *
    594   1.92     enami 	 * Silently enforce `0 <= ioff < align'.
    595    1.3        pk 	 */
    596   1.92     enami 	pp->pr_itemoffset = ioff %= align;
    597   1.92     enami 	/* See the comment below about reserved bytes. */
    598   1.92     enami 	trysize = palloc->pa_pagesz - ((align - ioff) % align);
    599   1.92     enami 	phsize = ALIGN(sizeof(struct pool_item_header));
    600  1.201      para 	if (pp->pr_roflags & PR_PHINPAGE ||
    601  1.201      para 	    ((pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) == 0 &&
    602   1.97      yamt 	    (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
    603  1.201      para 	    trysize / pp->pr_size == (trysize - phsize) / pp->pr_size))) {
    604    1.3        pk 		/* Use the end of the page for the page header */
    605   1.20   thorpej 		pp->pr_roflags |= PR_PHINPAGE;
    606   1.92     enami 		pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
    607    1.2        pk 	} else {
    608    1.3        pk 		/* The page header will be taken from our page header pool */
    609    1.3        pk 		pp->pr_phoffset = 0;
    610   1.66   thorpej 		off = palloc->pa_pagesz;
    611   1.88       chs 		SPLAY_INIT(&pp->pr_phtree);
    612    1.2        pk 	}
    613    1.1        pk 
    614    1.3        pk 	/*
    615    1.3        pk 	 * Alignment is to take place at `ioff' within the item. This means
    616    1.3        pk 	 * we must reserve up to `align - 1' bytes on the page to allow
    617    1.3        pk 	 * appropriate positioning of each item.
    618    1.3        pk 	 */
    619    1.3        pk 	pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
    620   1.43   thorpej 	KASSERT(pp->pr_itemsperpage != 0);
    621   1.97      yamt 	if ((pp->pr_roflags & PR_NOTOUCH)) {
    622   1.97      yamt 		int idx;
    623   1.97      yamt 
    624   1.97      yamt 		for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
    625   1.97      yamt 		    idx++) {
    626   1.97      yamt 			/* nothing */
    627   1.97      yamt 		}
    628   1.97      yamt 		if (idx >= PHPOOL_MAX) {
    629   1.97      yamt 			/*
    630   1.97      yamt 			 * if you see this panic, consider to tweak
    631   1.97      yamt 			 * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
    632   1.97      yamt 			 */
    633  1.213  christos 			panic("%s: [%s] too large itemsperpage(%d) for "
    634  1.213  christos 			    "PR_NOTOUCH", __func__,
    635   1.97      yamt 			    pp->pr_wchan, pp->pr_itemsperpage);
    636   1.97      yamt 		}
    637   1.97      yamt 		pp->pr_phpool = &phpool[idx];
    638   1.97      yamt 	} else if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
    639   1.97      yamt 		pp->pr_phpool = &phpool[0];
    640   1.97      yamt 	}
    641   1.97      yamt #if defined(DIAGNOSTIC)
    642   1.97      yamt 	else {
    643   1.97      yamt 		pp->pr_phpool = NULL;
    644   1.97      yamt 	}
    645   1.97      yamt #endif
    646    1.3        pk 
    647    1.3        pk 	/*
    648    1.3        pk 	 * Use the slack between the chunks and the page header
    649    1.3        pk 	 * for "cache coloring".
    650    1.3        pk 	 */
    651    1.3        pk 	slack = off - pp->pr_itemsperpage * pp->pr_size;
    652    1.3        pk 	pp->pr_maxcolor = (slack / align) * align;
    653    1.3        pk 	pp->pr_curcolor = 0;
    654    1.3        pk 
    655    1.3        pk 	pp->pr_nget = 0;
    656    1.3        pk 	pp->pr_nfail = 0;
    657    1.3        pk 	pp->pr_nput = 0;
    658    1.3        pk 	pp->pr_npagealloc = 0;
    659    1.3        pk 	pp->pr_npagefree = 0;
    660    1.1        pk 	pp->pr_hiwat = 0;
    661    1.8   thorpej 	pp->pr_nidle = 0;
    662  1.134        ad 	pp->pr_refcnt = 0;
    663    1.3        pk 
    664  1.157        ad 	mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl);
    665  1.134        ad 	cv_init(&pp->pr_cv, wchan);
    666  1.134        ad 	pp->pr_ipl = ipl;
    667    1.1        pk 
    668  1.145        ad 	/* Insert into the list of all pools. */
    669  1.181   mlelstv 	if (!cold)
    670  1.134        ad 		mutex_enter(&pool_head_lock);
    671  1.145        ad 	TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
    672  1.145        ad 		if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0)
    673  1.145        ad 			break;
    674  1.145        ad 	}
    675  1.145        ad 	if (pp1 == NULL)
    676  1.145        ad 		TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
    677  1.145        ad 	else
    678  1.145        ad 		TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist);
    679  1.181   mlelstv 	if (!cold)
    680  1.134        ad 		mutex_exit(&pool_head_lock);
    681  1.134        ad 
    682  1.167     skrll 	/* Insert this into the list of pools using this allocator. */
    683  1.181   mlelstv 	if (!cold)
    684  1.134        ad 		mutex_enter(&palloc->pa_lock);
    685  1.145        ad 	TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
    686  1.181   mlelstv 	if (!cold)
    687  1.134        ad 		mutex_exit(&palloc->pa_lock);
    688    1.1        pk }
    689    1.1        pk 
    690    1.1        pk /*
    691    1.1        pk  * De-commision a pool resource.
    692    1.1        pk  */
    693    1.1        pk void
    694   1.42   thorpej pool_destroy(struct pool *pp)
    695    1.1        pk {
    696  1.101   thorpej 	struct pool_pagelist pq;
    697    1.3        pk 	struct pool_item_header *ph;
    698   1.43   thorpej 
    699  1.101   thorpej 	/* Remove from global pool list */
    700  1.134        ad 	mutex_enter(&pool_head_lock);
    701  1.134        ad 	while (pp->pr_refcnt != 0)
    702  1.134        ad 		cv_wait(&pool_busy, &pool_head_lock);
    703  1.145        ad 	TAILQ_REMOVE(&pool_head, pp, pr_poollist);
    704  1.101   thorpej 	if (drainpp == pp)
    705  1.101   thorpej 		drainpp = NULL;
    706  1.134        ad 	mutex_exit(&pool_head_lock);
    707  1.101   thorpej 
    708  1.101   thorpej 	/* Remove this pool from its allocator's list of pools. */
    709  1.134        ad 	mutex_enter(&pp->pr_alloc->pa_lock);
    710   1.66   thorpej 	TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
    711  1.134        ad 	mutex_exit(&pp->pr_alloc->pa_lock);
    712   1.66   thorpej 
    713  1.178      elad 	mutex_enter(&pool_allocator_lock);
    714  1.178      elad 	if (--pp->pr_alloc->pa_refcnt == 0)
    715  1.178      elad 		mutex_destroy(&pp->pr_alloc->pa_lock);
    716  1.178      elad 	mutex_exit(&pool_allocator_lock);
    717  1.178      elad 
    718  1.134        ad 	mutex_enter(&pp->pr_lock);
    719  1.101   thorpej 
    720  1.134        ad 	KASSERT(pp->pr_cache == NULL);
    721  1.207  riastrad 	KASSERTMSG((pp->pr_nout == 0),
    722  1.213  christos 	    "%s: pool busy: still out: %u", __func__, pp->pr_nout);
    723  1.101   thorpej 	KASSERT(LIST_EMPTY(&pp->pr_fullpages));
    724  1.101   thorpej 	KASSERT(LIST_EMPTY(&pp->pr_partpages));
    725  1.101   thorpej 
    726    1.3        pk 	/* Remove all pages */
    727  1.101   thorpej 	LIST_INIT(&pq);
    728   1.88       chs 	while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
    729  1.101   thorpej 		pr_rmpage(pp, ph, &pq);
    730  1.101   thorpej 
    731  1.134        ad 	mutex_exit(&pp->pr_lock);
    732    1.3        pk 
    733  1.101   thorpej 	pr_pagelist_free(pp, &pq);
    734  1.134        ad 	cv_destroy(&pp->pr_cv);
    735  1.134        ad 	mutex_destroy(&pp->pr_lock);
    736    1.1        pk }
    737    1.1        pk 
    738   1.68   thorpej void
    739   1.68   thorpej pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
    740   1.68   thorpej {
    741   1.68   thorpej 
    742   1.68   thorpej 	/* XXX no locking -- must be used just after pool_init() */
    743  1.207  riastrad 	KASSERTMSG((pp->pr_drain_hook == NULL),
    744  1.213  christos 	    "%s: [%s] already set", __func__, pp->pr_wchan);
    745   1.68   thorpej 	pp->pr_drain_hook = fn;
    746   1.68   thorpej 	pp->pr_drain_hook_arg = arg;
    747   1.68   thorpej }
    748   1.68   thorpej 
    749   1.88       chs static struct pool_item_header *
    750  1.128  christos pool_alloc_item_header(struct pool *pp, void *storage, int flags)
    751   1.55   thorpej {
    752   1.55   thorpej 	struct pool_item_header *ph;
    753   1.55   thorpej 
    754   1.55   thorpej 	if ((pp->pr_roflags & PR_PHINPAGE) != 0)
    755  1.213  christos 		ph = (void *)((char *)storage + pp->pr_phoffset);
    756  1.134        ad 	else
    757   1.97      yamt 		ph = pool_get(pp->pr_phpool, flags);
    758   1.55   thorpej 
    759   1.55   thorpej 	return (ph);
    760   1.55   thorpej }
    761    1.1        pk 
    762    1.1        pk /*
    763  1.134        ad  * Grab an item from the pool.
    764    1.1        pk  */
    765    1.3        pk void *
    766   1.56  sommerfe pool_get(struct pool *pp, int flags)
    767    1.1        pk {
    768    1.1        pk 	struct pool_item *pi;
    769    1.3        pk 	struct pool_item_header *ph;
    770   1.55   thorpej 	void *v;
    771    1.1        pk 
    772  1.215  christos 	KASSERT(!(flags & PR_NOWAIT) != !(flags & PR_WAITOK));
    773  1.207  riastrad 	KASSERTMSG((pp->pr_itemsperpage != 0),
    774  1.213  christos 	    "%s: [%s] pr_itemsperpage is zero, "
    775  1.213  christos 	    "pool not initialized?", __func__, pp->pr_wchan);
    776  1.207  riastrad 	KASSERTMSG((!(cpu_intr_p() || cpu_softintr_p())
    777  1.207  riastrad 		|| pp->pr_ipl != IPL_NONE || cold || panicstr != NULL),
    778  1.213  christos 	    "%s: [%s] is IPL_NONE, but called from interrupt context",
    779  1.213  christos 	    __func__, pp->pr_wchan);
    780  1.155        ad 	if (flags & PR_WAITOK) {
    781  1.154      yamt 		ASSERT_SLEEPABLE();
    782  1.155        ad 	}
    783    1.1        pk 
    784  1.134        ad 	mutex_enter(&pp->pr_lock);
    785   1.20   thorpej  startover:
    786   1.20   thorpej 	/*
    787   1.20   thorpej 	 * Check to see if we've reached the hard limit.  If we have,
    788   1.20   thorpej 	 * and we can wait, then wait until an item has been returned to
    789   1.20   thorpej 	 * the pool.
    790   1.20   thorpej 	 */
    791  1.207  riastrad 	KASSERTMSG((pp->pr_nout <= pp->pr_hardlimit),
    792  1.213  christos 	    "%s: %s: crossed hard limit", __func__, pp->pr_wchan);
    793   1.34   thorpej 	if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
    794   1.68   thorpej 		if (pp->pr_drain_hook != NULL) {
    795   1.68   thorpej 			/*
    796   1.68   thorpej 			 * Since the drain hook is going to free things
    797   1.68   thorpej 			 * back to the pool, unlock, call the hook, re-lock,
    798   1.68   thorpej 			 * and check the hardlimit condition again.
    799   1.68   thorpej 			 */
    800  1.134        ad 			mutex_exit(&pp->pr_lock);
    801   1.68   thorpej 			(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
    802  1.134        ad 			mutex_enter(&pp->pr_lock);
    803   1.68   thorpej 			if (pp->pr_nout < pp->pr_hardlimit)
    804   1.68   thorpej 				goto startover;
    805   1.68   thorpej 		}
    806   1.68   thorpej 
    807   1.29  sommerfe 		if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
    808   1.20   thorpej 			/*
    809   1.20   thorpej 			 * XXX: A warning isn't logged in this case.  Should
    810   1.20   thorpej 			 * it be?
    811   1.20   thorpej 			 */
    812   1.20   thorpej 			pp->pr_flags |= PR_WANTED;
    813  1.212  christos 			do {
    814  1.212  christos 				cv_wait(&pp->pr_cv, &pp->pr_lock);
    815  1.212  christos 			} while (pp->pr_flags & PR_WANTED);
    816   1.20   thorpej 			goto startover;
    817   1.20   thorpej 		}
    818   1.31   thorpej 
    819   1.31   thorpej 		/*
    820   1.31   thorpej 		 * Log a message that the hard limit has been hit.
    821   1.31   thorpej 		 */
    822   1.31   thorpej 		if (pp->pr_hardlimit_warning != NULL &&
    823   1.31   thorpej 		    ratecheck(&pp->pr_hardlimit_warning_last,
    824   1.31   thorpej 			      &pp->pr_hardlimit_ratecap))
    825   1.31   thorpej 			log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
    826   1.21   thorpej 
    827   1.21   thorpej 		pp->pr_nfail++;
    828   1.21   thorpej 
    829  1.134        ad 		mutex_exit(&pp->pr_lock);
    830  1.216  christos 		KASSERT((flags & (PR_NOWAIT|PR_LIMITFAIL)) != 0);
    831   1.20   thorpej 		return (NULL);
    832   1.20   thorpej 	}
    833   1.20   thorpej 
    834    1.3        pk 	/*
    835    1.3        pk 	 * The convention we use is that if `curpage' is not NULL, then
    836    1.3        pk 	 * it points at a non-empty bucket. In particular, `curpage'
    837    1.3        pk 	 * never points at a page header which has PR_PHINPAGE set and
    838    1.3        pk 	 * has no items in its bucket.
    839    1.3        pk 	 */
    840   1.20   thorpej 	if ((ph = pp->pr_curpage) == NULL) {
    841  1.113      yamt 		int error;
    842  1.113      yamt 
    843  1.207  riastrad 		KASSERTMSG((pp->pr_nitems == 0),
    844  1.213  christos 		    "%s: [%s] curpage NULL, inconsistent nitems %u",
    845  1.213  christos 		    __func__, pp->pr_wchan, pp->pr_nitems);
    846   1.20   thorpej 
    847   1.21   thorpej 		/*
    848   1.21   thorpej 		 * Call the back-end page allocator for more memory.
    849   1.21   thorpej 		 * Release the pool lock, as the back-end page allocator
    850   1.21   thorpej 		 * may block.
    851   1.21   thorpej 		 */
    852  1.113      yamt 		error = pool_grow(pp, flags);
    853  1.113      yamt 		if (error != 0) {
    854   1.21   thorpej 			/*
    855  1.210   mlelstv 			 * pool_grow aborts when another thread
    856  1.210   mlelstv 			 * is allocating a new page. Retry if it
    857  1.210   mlelstv 			 * waited for it.
    858  1.210   mlelstv 			 */
    859  1.210   mlelstv 			if (error == ERESTART)
    860  1.210   mlelstv 				goto startover;
    861  1.210   mlelstv 
    862  1.210   mlelstv 			/*
    863   1.55   thorpej 			 * We were unable to allocate a page or item
    864   1.55   thorpej 			 * header, but we released the lock during
    865   1.55   thorpej 			 * allocation, so perhaps items were freed
    866   1.55   thorpej 			 * back to the pool.  Check for this case.
    867   1.21   thorpej 			 */
    868   1.21   thorpej 			if (pp->pr_curpage != NULL)
    869   1.21   thorpej 				goto startover;
    870   1.15        pk 
    871  1.117      yamt 			pp->pr_nfail++;
    872  1.134        ad 			mutex_exit(&pp->pr_lock);
    873  1.211  riastrad 			KASSERT((flags & (PR_WAITOK|PR_NOWAIT)) == PR_NOWAIT);
    874  1.117      yamt 			return (NULL);
    875    1.1        pk 		}
    876    1.3        pk 
    877   1.20   thorpej 		/* Start the allocation process over. */
    878   1.20   thorpej 		goto startover;
    879    1.3        pk 	}
    880   1.97      yamt 	if (pp->pr_roflags & PR_NOTOUCH) {
    881  1.207  riastrad 		KASSERTMSG((ph->ph_nmissing < pp->pr_itemsperpage),
    882  1.213  christos 		    "%s: %s: page empty", __func__, pp->pr_wchan);
    883   1.97      yamt 		v = pr_item_notouch_get(pp, ph);
    884   1.97      yamt 	} else {
    885  1.102       chs 		v = pi = LIST_FIRST(&ph->ph_itemlist);
    886   1.97      yamt 		if (__predict_false(v == NULL)) {
    887  1.134        ad 			mutex_exit(&pp->pr_lock);
    888  1.213  christos 			panic("%s: [%s] page empty", __func__, pp->pr_wchan);
    889   1.97      yamt 		}
    890  1.207  riastrad 		KASSERTMSG((pp->pr_nitems > 0),
    891  1.213  christos 		    "%s: [%s] nitems %u inconsistent on itemlist",
    892  1.213  christos 		    __func__, pp->pr_wchan, pp->pr_nitems);
    893  1.207  riastrad 		KASSERTMSG((pi->pi_magic == PI_MAGIC),
    894  1.213  christos 		    "%s: [%s] free list modified: "
    895  1.213  christos 		    "magic=%x; page %p; item addr %p", __func__,
    896  1.207  riastrad 		    pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
    897    1.3        pk 
    898   1.97      yamt 		/*
    899   1.97      yamt 		 * Remove from item list.
    900   1.97      yamt 		 */
    901  1.102       chs 		LIST_REMOVE(pi, pi_list);
    902   1.97      yamt 	}
    903   1.20   thorpej 	pp->pr_nitems--;
    904   1.20   thorpej 	pp->pr_nout++;
    905    1.6   thorpej 	if (ph->ph_nmissing == 0) {
    906  1.207  riastrad 		KASSERT(pp->pr_nidle > 0);
    907    1.6   thorpej 		pp->pr_nidle--;
    908   1.88       chs 
    909   1.88       chs 		/*
    910   1.88       chs 		 * This page was previously empty.  Move it to the list of
    911   1.88       chs 		 * partially-full pages.  This page is already curpage.
    912   1.88       chs 		 */
    913   1.88       chs 		LIST_REMOVE(ph, ph_pagelist);
    914   1.88       chs 		LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
    915    1.6   thorpej 	}
    916    1.3        pk 	ph->ph_nmissing++;
    917   1.97      yamt 	if (ph->ph_nmissing == pp->pr_itemsperpage) {
    918  1.207  riastrad 		KASSERTMSG(((pp->pr_roflags & PR_NOTOUCH) ||
    919  1.207  riastrad 			LIST_EMPTY(&ph->ph_itemlist)),
    920  1.213  christos 		    "%s: [%s] nmissing (%u) inconsistent", __func__,
    921  1.213  christos 			pp->pr_wchan, ph->ph_nmissing);
    922    1.3        pk 		/*
    923   1.88       chs 		 * This page is now full.  Move it to the full list
    924   1.88       chs 		 * and select a new current page.
    925    1.3        pk 		 */
    926   1.88       chs 		LIST_REMOVE(ph, ph_pagelist);
    927   1.88       chs 		LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
    928   1.88       chs 		pool_update_curpage(pp);
    929    1.1        pk 	}
    930    1.3        pk 
    931    1.3        pk 	pp->pr_nget++;
    932   1.20   thorpej 
    933   1.20   thorpej 	/*
    934   1.20   thorpej 	 * If we have a low water mark and we are now below that low
    935   1.20   thorpej 	 * water mark, add more items to the pool.
    936   1.20   thorpej 	 */
    937   1.53   thorpej 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
    938   1.20   thorpej 		/*
    939   1.20   thorpej 		 * XXX: Should we log a warning?  Should we set up a timeout
    940   1.20   thorpej 		 * to try again in a second or so?  The latter could break
    941   1.20   thorpej 		 * a caller's assumptions about interrupt protection, etc.
    942   1.20   thorpej 		 */
    943   1.20   thorpej 	}
    944   1.20   thorpej 
    945  1.134        ad 	mutex_exit(&pp->pr_lock);
    946  1.125        ad 	KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0);
    947  1.125        ad 	FREECHECK_OUT(&pp->pr_freecheck, v);
    948  1.204      maxv 	pool_redzone_fill(pp, v);
    949  1.228      maxv 	pool_kleak_fill(pp, v);
    950    1.1        pk 	return (v);
    951    1.1        pk }
    952    1.1        pk 
    953    1.1        pk /*
    954   1.43   thorpej  * Internal version of pool_put().  Pool is already locked/entered.
    955    1.1        pk  */
    956   1.43   thorpej static void
    957  1.101   thorpej pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
    958    1.1        pk {
    959    1.1        pk 	struct pool_item *pi = v;
    960    1.3        pk 	struct pool_item_header *ph;
    961    1.3        pk 
    962  1.134        ad 	KASSERT(mutex_owned(&pp->pr_lock));
    963  1.204      maxv 	pool_redzone_check(pp, v);
    964  1.125        ad 	FREECHECK_IN(&pp->pr_freecheck, v);
    965  1.134        ad 	LOCKDEBUG_MEM_CHECK(v, pp->pr_size);
    966   1.61       chs 
    967  1.207  riastrad 	KASSERTMSG((pp->pr_nout > 0),
    968  1.213  christos 	    "%s: [%s] putting with none out", __func__, pp->pr_wchan);
    969    1.3        pk 
    970  1.121      yamt 	if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) {
    971  1.213  christos 		panic("%s: [%s] page header missing", __func__,  pp->pr_wchan);
    972    1.3        pk 	}
    973   1.28   thorpej 
    974    1.3        pk 	/*
    975    1.3        pk 	 * Return to item list.
    976    1.3        pk 	 */
    977   1.97      yamt 	if (pp->pr_roflags & PR_NOTOUCH) {
    978   1.97      yamt 		pr_item_notouch_put(pp, ph, v);
    979   1.97      yamt 	} else {
    980    1.2        pk #ifdef DIAGNOSTIC
    981   1.97      yamt 		pi->pi_magic = PI_MAGIC;
    982    1.3        pk #endif
    983   1.32       chs #ifdef DEBUG
    984   1.97      yamt 		{
    985   1.97      yamt 			int i, *ip = v;
    986   1.32       chs 
    987   1.97      yamt 			for (i = 0; i < pp->pr_size / sizeof(int); i++) {
    988   1.97      yamt 				*ip++ = PI_MAGIC;
    989   1.97      yamt 			}
    990   1.32       chs 		}
    991   1.32       chs #endif
    992   1.32       chs 
    993  1.102       chs 		LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
    994   1.97      yamt 	}
    995   1.79   thorpej 	KDASSERT(ph->ph_nmissing != 0);
    996    1.3        pk 	ph->ph_nmissing--;
    997    1.3        pk 	pp->pr_nput++;
    998   1.20   thorpej 	pp->pr_nitems++;
    999   1.20   thorpej 	pp->pr_nout--;
   1000    1.3        pk 
   1001    1.3        pk 	/* Cancel "pool empty" condition if it exists */
   1002    1.3        pk 	if (pp->pr_curpage == NULL)
   1003    1.3        pk 		pp->pr_curpage = ph;
   1004    1.3        pk 
   1005    1.3        pk 	if (pp->pr_flags & PR_WANTED) {
   1006    1.3        pk 		pp->pr_flags &= ~PR_WANTED;
   1007  1.134        ad 		cv_broadcast(&pp->pr_cv);
   1008    1.3        pk 	}
   1009    1.3        pk 
   1010    1.3        pk 	/*
   1011   1.88       chs 	 * If this page is now empty, do one of two things:
   1012   1.21   thorpej 	 *
   1013   1.88       chs 	 *	(1) If we have more pages than the page high water mark,
   1014   1.96   thorpej 	 *	    free the page back to the system.  ONLY CONSIDER
   1015   1.90   thorpej 	 *	    FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
   1016   1.90   thorpej 	 *	    CLAIM.
   1017   1.21   thorpej 	 *
   1018   1.88       chs 	 *	(2) Otherwise, move the page to the empty page list.
   1019   1.88       chs 	 *
   1020   1.88       chs 	 * Either way, select a new current page (so we use a partially-full
   1021   1.88       chs 	 * page if one is available).
   1022    1.3        pk 	 */
   1023    1.3        pk 	if (ph->ph_nmissing == 0) {
   1024    1.6   thorpej 		pp->pr_nidle++;
   1025   1.90   thorpej 		if (pp->pr_npages > pp->pr_minpages &&
   1026  1.152      yamt 		    pp->pr_npages > pp->pr_maxpages) {
   1027  1.101   thorpej 			pr_rmpage(pp, ph, pq);
   1028    1.3        pk 		} else {
   1029   1.88       chs 			LIST_REMOVE(ph, ph_pagelist);
   1030   1.88       chs 			LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
   1031    1.3        pk 
   1032   1.21   thorpej 			/*
   1033   1.21   thorpej 			 * Update the timestamp on the page.  A page must
   1034   1.21   thorpej 			 * be idle for some period of time before it can
   1035   1.21   thorpej 			 * be reclaimed by the pagedaemon.  This minimizes
   1036   1.21   thorpej 			 * ping-pong'ing for memory.
   1037  1.151      yamt 			 *
   1038  1.151      yamt 			 * note for 64-bit time_t: truncating to 32-bit is not
   1039  1.151      yamt 			 * a problem for our usage.
   1040   1.21   thorpej 			 */
   1041  1.151      yamt 			ph->ph_time = time_uptime;
   1042    1.1        pk 		}
   1043   1.88       chs 		pool_update_curpage(pp);
   1044    1.1        pk 	}
   1045   1.88       chs 
   1046   1.21   thorpej 	/*
   1047   1.88       chs 	 * If the page was previously completely full, move it to the
   1048   1.88       chs 	 * partially-full list and make it the current page.  The next
   1049   1.88       chs 	 * allocation will get the item from this page, instead of
   1050   1.88       chs 	 * further fragmenting the pool.
   1051   1.21   thorpej 	 */
   1052   1.21   thorpej 	else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
   1053   1.88       chs 		LIST_REMOVE(ph, ph_pagelist);
   1054   1.88       chs 		LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
   1055   1.21   thorpej 		pp->pr_curpage = ph;
   1056   1.21   thorpej 	}
   1057   1.43   thorpej }
   1058   1.43   thorpej 
   1059   1.56  sommerfe void
   1060   1.56  sommerfe pool_put(struct pool *pp, void *v)
   1061   1.56  sommerfe {
   1062  1.101   thorpej 	struct pool_pagelist pq;
   1063  1.101   thorpej 
   1064  1.101   thorpej 	LIST_INIT(&pq);
   1065   1.56  sommerfe 
   1066  1.134        ad 	mutex_enter(&pp->pr_lock);
   1067  1.101   thorpej 	pool_do_put(pp, v, &pq);
   1068  1.134        ad 	mutex_exit(&pp->pr_lock);
   1069   1.56  sommerfe 
   1070  1.102       chs 	pr_pagelist_free(pp, &pq);
   1071   1.56  sommerfe }
   1072   1.57  sommerfe 
   1073   1.74   thorpej /*
   1074  1.113      yamt  * pool_grow: grow a pool by a page.
   1075  1.113      yamt  *
   1076  1.113      yamt  * => called with pool locked.
   1077  1.113      yamt  * => unlock and relock the pool.
   1078  1.113      yamt  * => return with pool locked.
   1079  1.113      yamt  */
   1080  1.113      yamt 
   1081  1.113      yamt static int
   1082  1.113      yamt pool_grow(struct pool *pp, int flags)
   1083  1.113      yamt {
   1084  1.209  riastrad 	/*
   1085  1.209  riastrad 	 * If there's a pool_grow in progress, wait for it to complete
   1086  1.209  riastrad 	 * and try again from the top.
   1087  1.209  riastrad 	 */
   1088  1.209  riastrad 	if (pp->pr_flags & PR_GROWING) {
   1089  1.209  riastrad 		if (flags & PR_WAITOK) {
   1090  1.209  riastrad 			do {
   1091  1.209  riastrad 				cv_wait(&pp->pr_cv, &pp->pr_lock);
   1092  1.209  riastrad 			} while (pp->pr_flags & PR_GROWING);
   1093  1.209  riastrad 			return ERESTART;
   1094  1.209  riastrad 		} else {
   1095  1.219       mrg 			if (pp->pr_flags & PR_GROWINGNOWAIT) {
   1096  1.219       mrg 				/*
   1097  1.219       mrg 				 * This needs an unlock/relock dance so
   1098  1.219       mrg 				 * that the other caller has a chance to
   1099  1.219       mrg 				 * run and actually do the thing.  Note
   1100  1.219       mrg 				 * that this is effectively a busy-wait.
   1101  1.219       mrg 				 */
   1102  1.219       mrg 				mutex_exit(&pp->pr_lock);
   1103  1.219       mrg 				mutex_enter(&pp->pr_lock);
   1104  1.219       mrg 				return ERESTART;
   1105  1.219       mrg 			}
   1106  1.209  riastrad 			return EWOULDBLOCK;
   1107  1.209  riastrad 		}
   1108  1.209  riastrad 	}
   1109  1.209  riastrad 	pp->pr_flags |= PR_GROWING;
   1110  1.220  christos 	if (flags & PR_WAITOK)
   1111  1.220  christos 		mutex_exit(&pp->pr_lock);
   1112  1.220  christos 	else
   1113  1.219       mrg 		pp->pr_flags |= PR_GROWINGNOWAIT;
   1114  1.113      yamt 
   1115  1.216  christos 	char *cp = pool_allocator_alloc(pp, flags);
   1116  1.216  christos 	if (__predict_false(cp == NULL))
   1117  1.216  christos 		goto out;
   1118  1.216  christos 
   1119  1.216  christos 	struct pool_item_header *ph = pool_alloc_item_header(pp, cp, flags);
   1120  1.216  christos 	if (__predict_false(ph == NULL)) {
   1121  1.216  christos 		pool_allocator_free(pp, cp);
   1122  1.209  riastrad 		goto out;
   1123  1.113      yamt 	}
   1124  1.113      yamt 
   1125  1.220  christos 	if (flags & PR_WAITOK)
   1126  1.220  christos 		mutex_enter(&pp->pr_lock);
   1127  1.113      yamt 	pool_prime_page(pp, cp, ph);
   1128  1.113      yamt 	pp->pr_npagealloc++;
   1129  1.216  christos 	KASSERT(pp->pr_flags & PR_GROWING);
   1130  1.219       mrg 	pp->pr_flags &= ~(PR_GROWING|PR_GROWINGNOWAIT);
   1131  1.209  riastrad 	/*
   1132  1.209  riastrad 	 * If anyone was waiting for pool_grow, notify them that we
   1133  1.209  riastrad 	 * may have just done it.
   1134  1.209  riastrad 	 */
   1135  1.216  christos 	cv_broadcast(&pp->pr_cv);
   1136  1.216  christos 	return 0;
   1137  1.216  christos out:
   1138  1.220  christos 	if (flags & PR_WAITOK)
   1139  1.220  christos 		mutex_enter(&pp->pr_lock);
   1140  1.209  riastrad 	KASSERT(pp->pr_flags & PR_GROWING);
   1141  1.219       mrg 	pp->pr_flags &= ~(PR_GROWING|PR_GROWINGNOWAIT);
   1142  1.216  christos 	return ENOMEM;
   1143  1.113      yamt }
   1144  1.113      yamt 
   1145  1.113      yamt /*
   1146   1.74   thorpej  * Add N items to the pool.
   1147   1.74   thorpej  */
   1148   1.74   thorpej int
   1149   1.74   thorpej pool_prime(struct pool *pp, int n)
   1150   1.74   thorpej {
   1151   1.75    simonb 	int newpages;
   1152  1.113      yamt 	int error = 0;
   1153   1.74   thorpej 
   1154  1.134        ad 	mutex_enter(&pp->pr_lock);
   1155   1.74   thorpej 
   1156   1.74   thorpej 	newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1157   1.74   thorpej 
   1158  1.216  christos 	while (newpages > 0) {
   1159  1.113      yamt 		error = pool_grow(pp, PR_NOWAIT);
   1160  1.113      yamt 		if (error) {
   1161  1.214  christos 			if (error == ERESTART)
   1162  1.214  christos 				continue;
   1163   1.74   thorpej 			break;
   1164   1.74   thorpej 		}
   1165   1.74   thorpej 		pp->pr_minpages++;
   1166  1.216  christos 		newpages--;
   1167   1.74   thorpej 	}
   1168   1.74   thorpej 
   1169   1.74   thorpej 	if (pp->pr_minpages >= pp->pr_maxpages)
   1170   1.74   thorpej 		pp->pr_maxpages = pp->pr_minpages + 1;	/* XXX */
   1171   1.74   thorpej 
   1172  1.134        ad 	mutex_exit(&pp->pr_lock);
   1173  1.113      yamt 	return error;
   1174   1.74   thorpej }
   1175   1.55   thorpej 
   1176   1.55   thorpej /*
   1177    1.3        pk  * Add a page worth of items to the pool.
   1178   1.21   thorpej  *
   1179   1.21   thorpej  * Note, we must be called with the pool descriptor LOCKED.
   1180    1.3        pk  */
   1181   1.55   thorpej static void
   1182  1.128  christos pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph)
   1183    1.3        pk {
   1184    1.3        pk 	struct pool_item *pi;
   1185  1.128  christos 	void *cp = storage;
   1186  1.125        ad 	const unsigned int align = pp->pr_align;
   1187  1.125        ad 	const unsigned int ioff = pp->pr_itemoffset;
   1188   1.55   thorpej 	int n;
   1189   1.36        pk 
   1190  1.134        ad 	KASSERT(mutex_owned(&pp->pr_lock));
   1191  1.207  riastrad 	KASSERTMSG(((pp->pr_roflags & PR_NOALIGN) ||
   1192  1.207  riastrad 		(((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) == 0)),
   1193  1.213  christos 	    "%s: [%s] unaligned page: %p", __func__, pp->pr_wchan, cp);
   1194    1.3        pk 
   1195    1.3        pk 	/*
   1196    1.3        pk 	 * Insert page header.
   1197    1.3        pk 	 */
   1198   1.88       chs 	LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
   1199  1.102       chs 	LIST_INIT(&ph->ph_itemlist);
   1200    1.3        pk 	ph->ph_page = storage;
   1201    1.3        pk 	ph->ph_nmissing = 0;
   1202  1.151      yamt 	ph->ph_time = time_uptime;
   1203   1.88       chs 	if ((pp->pr_roflags & PR_PHINPAGE) == 0)
   1204   1.88       chs 		SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
   1205    1.3        pk 
   1206    1.6   thorpej 	pp->pr_nidle++;
   1207    1.6   thorpej 
   1208    1.3        pk 	/*
   1209    1.3        pk 	 * Color this page.
   1210    1.3        pk 	 */
   1211  1.141      yamt 	ph->ph_off = pp->pr_curcolor;
   1212  1.141      yamt 	cp = (char *)cp + ph->ph_off;
   1213    1.3        pk 	if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
   1214    1.3        pk 		pp->pr_curcolor = 0;
   1215    1.3        pk 
   1216    1.3        pk 	/*
   1217    1.3        pk 	 * Adjust storage to apply aligment to `pr_itemoffset' in each item.
   1218    1.3        pk 	 */
   1219    1.3        pk 	if (ioff != 0)
   1220  1.128  christos 		cp = (char *)cp + align - ioff;
   1221    1.3        pk 
   1222  1.125        ad 	KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
   1223  1.125        ad 
   1224    1.3        pk 	/*
   1225    1.3        pk 	 * Insert remaining chunks on the bucket list.
   1226    1.3        pk 	 */
   1227    1.3        pk 	n = pp->pr_itemsperpage;
   1228   1.20   thorpej 	pp->pr_nitems += n;
   1229    1.3        pk 
   1230   1.97      yamt 	if (pp->pr_roflags & PR_NOTOUCH) {
   1231  1.141      yamt 		pr_item_notouch_init(pp, ph);
   1232   1.97      yamt 	} else {
   1233   1.97      yamt 		while (n--) {
   1234   1.97      yamt 			pi = (struct pool_item *)cp;
   1235   1.78   thorpej 
   1236   1.97      yamt 			KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
   1237    1.3        pk 
   1238   1.97      yamt 			/* Insert on page list */
   1239  1.102       chs 			LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
   1240    1.3        pk #ifdef DIAGNOSTIC
   1241   1.97      yamt 			pi->pi_magic = PI_MAGIC;
   1242    1.3        pk #endif
   1243  1.128  christos 			cp = (char *)cp + pp->pr_size;
   1244  1.125        ad 
   1245  1.125        ad 			KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
   1246   1.97      yamt 		}
   1247    1.3        pk 	}
   1248    1.3        pk 
   1249    1.3        pk 	/*
   1250    1.3        pk 	 * If the pool was depleted, point at the new page.
   1251    1.3        pk 	 */
   1252    1.3        pk 	if (pp->pr_curpage == NULL)
   1253    1.3        pk 		pp->pr_curpage = ph;
   1254    1.3        pk 
   1255    1.3        pk 	if (++pp->pr_npages > pp->pr_hiwat)
   1256    1.3        pk 		pp->pr_hiwat = pp->pr_npages;
   1257    1.3        pk }
   1258    1.3        pk 
   1259   1.20   thorpej /*
   1260   1.52   thorpej  * Used by pool_get() when nitems drops below the low water mark.  This
   1261   1.88       chs  * is used to catch up pr_nitems with the low water mark.
   1262   1.20   thorpej  *
   1263   1.21   thorpej  * Note 1, we never wait for memory here, we let the caller decide what to do.
   1264   1.20   thorpej  *
   1265   1.73   thorpej  * Note 2, we must be called with the pool already locked, and we return
   1266   1.20   thorpej  * with it locked.
   1267   1.20   thorpej  */
   1268   1.20   thorpej static int
   1269   1.42   thorpej pool_catchup(struct pool *pp)
   1270   1.20   thorpej {
   1271   1.20   thorpej 	int error = 0;
   1272   1.20   thorpej 
   1273   1.54   thorpej 	while (POOL_NEEDS_CATCHUP(pp)) {
   1274  1.113      yamt 		error = pool_grow(pp, PR_NOWAIT);
   1275  1.113      yamt 		if (error) {
   1276  1.214  christos 			if (error == ERESTART)
   1277  1.214  christos 				continue;
   1278   1.20   thorpej 			break;
   1279   1.20   thorpej 		}
   1280   1.20   thorpej 	}
   1281  1.113      yamt 	return error;
   1282   1.20   thorpej }
   1283   1.20   thorpej 
   1284   1.88       chs static void
   1285   1.88       chs pool_update_curpage(struct pool *pp)
   1286   1.88       chs {
   1287   1.88       chs 
   1288   1.88       chs 	pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
   1289   1.88       chs 	if (pp->pr_curpage == NULL) {
   1290   1.88       chs 		pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
   1291   1.88       chs 	}
   1292  1.168      yamt 	KASSERT((pp->pr_curpage == NULL && pp->pr_nitems == 0) ||
   1293  1.168      yamt 	    (pp->pr_curpage != NULL && pp->pr_nitems > 0));
   1294   1.88       chs }
   1295   1.88       chs 
   1296    1.3        pk void
   1297   1.42   thorpej pool_setlowat(struct pool *pp, int n)
   1298    1.3        pk {
   1299   1.15        pk 
   1300  1.134        ad 	mutex_enter(&pp->pr_lock);
   1301   1.21   thorpej 
   1302    1.3        pk 	pp->pr_minitems = n;
   1303   1.15        pk 	pp->pr_minpages = (n == 0)
   1304   1.15        pk 		? 0
   1305   1.18   thorpej 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1306   1.20   thorpej 
   1307   1.20   thorpej 	/* Make sure we're caught up with the newly-set low water mark. */
   1308   1.75    simonb 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
   1309   1.20   thorpej 		/*
   1310   1.20   thorpej 		 * XXX: Should we log a warning?  Should we set up a timeout
   1311   1.20   thorpej 		 * to try again in a second or so?  The latter could break
   1312   1.20   thorpej 		 * a caller's assumptions about interrupt protection, etc.
   1313   1.20   thorpej 		 */
   1314   1.20   thorpej 	}
   1315   1.21   thorpej 
   1316  1.134        ad 	mutex_exit(&pp->pr_lock);
   1317    1.3        pk }
   1318    1.3        pk 
   1319    1.3        pk void
   1320   1.42   thorpej pool_sethiwat(struct pool *pp, int n)
   1321    1.3        pk {
   1322   1.15        pk 
   1323  1.134        ad 	mutex_enter(&pp->pr_lock);
   1324   1.21   thorpej 
   1325   1.15        pk 	pp->pr_maxpages = (n == 0)
   1326   1.15        pk 		? 0
   1327   1.18   thorpej 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1328   1.21   thorpej 
   1329  1.134        ad 	mutex_exit(&pp->pr_lock);
   1330    1.3        pk }
   1331    1.3        pk 
   1332   1.20   thorpej void
   1333   1.42   thorpej pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
   1334   1.20   thorpej {
   1335   1.20   thorpej 
   1336  1.134        ad 	mutex_enter(&pp->pr_lock);
   1337   1.20   thorpej 
   1338   1.20   thorpej 	pp->pr_hardlimit = n;
   1339   1.20   thorpej 	pp->pr_hardlimit_warning = warnmess;
   1340   1.31   thorpej 	pp->pr_hardlimit_ratecap.tv_sec = ratecap;
   1341   1.31   thorpej 	pp->pr_hardlimit_warning_last.tv_sec = 0;
   1342   1.31   thorpej 	pp->pr_hardlimit_warning_last.tv_usec = 0;
   1343   1.20   thorpej 
   1344   1.20   thorpej 	/*
   1345   1.21   thorpej 	 * In-line version of pool_sethiwat(), because we don't want to
   1346   1.21   thorpej 	 * release the lock.
   1347   1.20   thorpej 	 */
   1348   1.20   thorpej 	pp->pr_maxpages = (n == 0)
   1349   1.20   thorpej 		? 0
   1350   1.20   thorpej 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1351   1.21   thorpej 
   1352  1.134        ad 	mutex_exit(&pp->pr_lock);
   1353   1.20   thorpej }
   1354    1.3        pk 
   1355    1.3        pk /*
   1356    1.3        pk  * Release all complete pages that have not been used recently.
   1357  1.184     rmind  *
   1358  1.197       jym  * Must not be called from interrupt context.
   1359    1.3        pk  */
   1360   1.66   thorpej int
   1361   1.56  sommerfe pool_reclaim(struct pool *pp)
   1362    1.3        pk {
   1363    1.3        pk 	struct pool_item_header *ph, *phnext;
   1364   1.61       chs 	struct pool_pagelist pq;
   1365  1.151      yamt 	uint32_t curtime;
   1366  1.134        ad 	bool klock;
   1367  1.134        ad 	int rv;
   1368    1.3        pk 
   1369  1.197       jym 	KASSERT(!cpu_intr_p() && !cpu_softintr_p());
   1370  1.184     rmind 
   1371   1.68   thorpej 	if (pp->pr_drain_hook != NULL) {
   1372   1.68   thorpej 		/*
   1373   1.68   thorpej 		 * The drain hook must be called with the pool unlocked.
   1374   1.68   thorpej 		 */
   1375   1.68   thorpej 		(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
   1376   1.68   thorpej 	}
   1377   1.68   thorpej 
   1378  1.134        ad 	/*
   1379  1.157        ad 	 * XXXSMP Because we do not want to cause non-MPSAFE code
   1380  1.157        ad 	 * to block.
   1381  1.134        ad 	 */
   1382  1.134        ad 	if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK ||
   1383  1.134        ad 	    pp->pr_ipl == IPL_SOFTSERIAL) {
   1384  1.134        ad 		KERNEL_LOCK(1, NULL);
   1385  1.134        ad 		klock = true;
   1386  1.134        ad 	} else
   1387  1.134        ad 		klock = false;
   1388  1.134        ad 
   1389  1.134        ad 	/* Reclaim items from the pool's cache (if any). */
   1390  1.134        ad 	if (pp->pr_cache != NULL)
   1391  1.134        ad 		pool_cache_invalidate(pp->pr_cache);
   1392  1.134        ad 
   1393  1.134        ad 	if (mutex_tryenter(&pp->pr_lock) == 0) {
   1394  1.134        ad 		if (klock) {
   1395  1.134        ad 			KERNEL_UNLOCK_ONE(NULL);
   1396  1.134        ad 		}
   1397   1.66   thorpej 		return (0);
   1398  1.134        ad 	}
   1399   1.68   thorpej 
   1400   1.88       chs 	LIST_INIT(&pq);
   1401   1.43   thorpej 
   1402  1.151      yamt 	curtime = time_uptime;
   1403   1.21   thorpej 
   1404   1.88       chs 	for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
   1405   1.88       chs 		phnext = LIST_NEXT(ph, ph_pagelist);
   1406    1.3        pk 
   1407    1.3        pk 		/* Check our minimum page claim */
   1408    1.3        pk 		if (pp->pr_npages <= pp->pr_minpages)
   1409    1.3        pk 			break;
   1410    1.3        pk 
   1411   1.88       chs 		KASSERT(ph->ph_nmissing == 0);
   1412  1.191      para 		if (curtime - ph->ph_time < pool_inactive_time)
   1413   1.88       chs 			continue;
   1414   1.21   thorpej 
   1415   1.88       chs 		/*
   1416   1.88       chs 		 * If freeing this page would put us below
   1417   1.88       chs 		 * the low water mark, stop now.
   1418   1.88       chs 		 */
   1419   1.88       chs 		if ((pp->pr_nitems - pp->pr_itemsperpage) <
   1420   1.88       chs 		    pp->pr_minitems)
   1421   1.88       chs 			break;
   1422   1.21   thorpej 
   1423   1.88       chs 		pr_rmpage(pp, ph, &pq);
   1424    1.3        pk 	}
   1425    1.3        pk 
   1426  1.134        ad 	mutex_exit(&pp->pr_lock);
   1427  1.134        ad 
   1428  1.134        ad 	if (LIST_EMPTY(&pq))
   1429  1.134        ad 		rv = 0;
   1430  1.134        ad 	else {
   1431  1.134        ad 		pr_pagelist_free(pp, &pq);
   1432  1.134        ad 		rv = 1;
   1433  1.134        ad 	}
   1434  1.134        ad 
   1435  1.134        ad 	if (klock) {
   1436  1.134        ad 		KERNEL_UNLOCK_ONE(NULL);
   1437  1.134        ad 	}
   1438   1.66   thorpej 
   1439  1.134        ad 	return (rv);
   1440    1.3        pk }
   1441    1.3        pk 
   1442    1.3        pk /*
   1443  1.197       jym  * Drain pools, one at a time. The drained pool is returned within ppp.
   1444  1.131        ad  *
   1445  1.134        ad  * Note, must never be called from interrupt context.
   1446    1.3        pk  */
   1447  1.197       jym bool
   1448  1.197       jym pool_drain(struct pool **ppp)
   1449    1.3        pk {
   1450  1.197       jym 	bool reclaimed;
   1451    1.3        pk 	struct pool *pp;
   1452  1.134        ad 
   1453  1.145        ad 	KASSERT(!TAILQ_EMPTY(&pool_head));
   1454    1.3        pk 
   1455   1.61       chs 	pp = NULL;
   1456  1.134        ad 
   1457  1.134        ad 	/* Find next pool to drain, and add a reference. */
   1458  1.134        ad 	mutex_enter(&pool_head_lock);
   1459  1.134        ad 	do {
   1460  1.134        ad 		if (drainpp == NULL) {
   1461  1.145        ad 			drainpp = TAILQ_FIRST(&pool_head);
   1462  1.134        ad 		}
   1463  1.134        ad 		if (drainpp != NULL) {
   1464  1.134        ad 			pp = drainpp;
   1465  1.145        ad 			drainpp = TAILQ_NEXT(pp, pr_poollist);
   1466  1.134        ad 		}
   1467  1.134        ad 		/*
   1468  1.134        ad 		 * Skip completely idle pools.  We depend on at least
   1469  1.134        ad 		 * one pool in the system being active.
   1470  1.134        ad 		 */
   1471  1.134        ad 	} while (pp == NULL || pp->pr_npages == 0);
   1472  1.134        ad 	pp->pr_refcnt++;
   1473  1.134        ad 	mutex_exit(&pool_head_lock);
   1474  1.134        ad 
   1475  1.134        ad 	/* Drain the cache (if any) and pool.. */
   1476  1.186     pooka 	reclaimed = pool_reclaim(pp);
   1477  1.134        ad 
   1478  1.134        ad 	/* Finally, unlock the pool. */
   1479  1.134        ad 	mutex_enter(&pool_head_lock);
   1480  1.134        ad 	pp->pr_refcnt--;
   1481  1.134        ad 	cv_broadcast(&pool_busy);
   1482  1.134        ad 	mutex_exit(&pool_head_lock);
   1483  1.186     pooka 
   1484  1.197       jym 	if (ppp != NULL)
   1485  1.197       jym 		*ppp = pp;
   1486  1.197       jym 
   1487  1.186     pooka 	return reclaimed;
   1488    1.3        pk }
   1489    1.3        pk 
   1490    1.3        pk /*
   1491  1.217       mrg  * Calculate the total number of pages consumed by pools.
   1492  1.217       mrg  */
   1493  1.217       mrg int
   1494  1.217       mrg pool_totalpages(void)
   1495  1.217       mrg {
   1496  1.217       mrg 	struct pool *pp;
   1497  1.218       mrg 	uint64_t total = 0;
   1498  1.217       mrg 
   1499  1.217       mrg 	mutex_enter(&pool_head_lock);
   1500  1.218       mrg 	TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
   1501  1.218       mrg 		uint64_t bytes = pp->pr_npages * pp->pr_alloc->pa_pagesz;
   1502  1.218       mrg 
   1503  1.218       mrg 		if ((pp->pr_roflags & PR_RECURSIVE) != 0)
   1504  1.218       mrg 			bytes -= (pp->pr_nout * pp->pr_size);
   1505  1.218       mrg 		total += bytes;
   1506  1.218       mrg 	}
   1507  1.217       mrg 	mutex_exit(&pool_head_lock);
   1508  1.217       mrg 
   1509  1.218       mrg 	return atop(total);
   1510  1.217       mrg }
   1511  1.217       mrg 
   1512  1.217       mrg /*
   1513    1.3        pk  * Diagnostic helpers.
   1514    1.3        pk  */
   1515   1.21   thorpej 
   1516   1.25   thorpej void
   1517  1.108      yamt pool_printall(const char *modif, void (*pr)(const char *, ...))
   1518  1.108      yamt {
   1519  1.108      yamt 	struct pool *pp;
   1520  1.108      yamt 
   1521  1.145        ad 	TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
   1522  1.108      yamt 		pool_printit(pp, modif, pr);
   1523  1.108      yamt 	}
   1524  1.108      yamt }
   1525  1.108      yamt 
   1526  1.108      yamt void
   1527   1.42   thorpej pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
   1528   1.25   thorpej {
   1529   1.25   thorpej 
   1530   1.25   thorpej 	if (pp == NULL) {
   1531   1.25   thorpej 		(*pr)("Must specify a pool to print.\n");
   1532   1.25   thorpej 		return;
   1533   1.25   thorpej 	}
   1534   1.25   thorpej 
   1535   1.25   thorpej 	pool_print1(pp, modif, pr);
   1536   1.25   thorpej }
   1537   1.25   thorpej 
   1538   1.21   thorpej static void
   1539  1.124      yamt pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
   1540   1.97      yamt     void (*pr)(const char *, ...))
   1541   1.88       chs {
   1542   1.88       chs 	struct pool_item_header *ph;
   1543  1.207  riastrad 	struct pool_item *pi __diagused;
   1544   1.88       chs 
   1545   1.88       chs 	LIST_FOREACH(ph, pl, ph_pagelist) {
   1546  1.151      yamt 		(*pr)("\t\tpage %p, nmissing %d, time %" PRIu32 "\n",
   1547  1.151      yamt 		    ph->ph_page, ph->ph_nmissing, ph->ph_time);
   1548   1.88       chs #ifdef DIAGNOSTIC
   1549   1.97      yamt 		if (!(pp->pr_roflags & PR_NOTOUCH)) {
   1550  1.102       chs 			LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
   1551   1.97      yamt 				if (pi->pi_magic != PI_MAGIC) {
   1552   1.97      yamt 					(*pr)("\t\t\titem %p, magic 0x%x\n",
   1553   1.97      yamt 					    pi, pi->pi_magic);
   1554   1.97      yamt 				}
   1555   1.88       chs 			}
   1556   1.88       chs 		}
   1557   1.88       chs #endif
   1558   1.88       chs 	}
   1559   1.88       chs }
   1560   1.88       chs 
   1561   1.88       chs static void
   1562   1.42   thorpej pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
   1563    1.3        pk {
   1564   1.25   thorpej 	struct pool_item_header *ph;
   1565  1.134        ad 	pool_cache_t pc;
   1566  1.134        ad 	pcg_t *pcg;
   1567  1.134        ad 	pool_cache_cpu_t *cc;
   1568  1.134        ad 	uint64_t cpuhit, cpumiss;
   1569   1.44   thorpej 	int i, print_log = 0, print_pagelist = 0, print_cache = 0;
   1570   1.25   thorpej 	char c;
   1571   1.25   thorpej 
   1572   1.25   thorpej 	while ((c = *modif++) != '\0') {
   1573   1.25   thorpej 		if (c == 'l')
   1574   1.25   thorpej 			print_log = 1;
   1575   1.25   thorpej 		if (c == 'p')
   1576   1.25   thorpej 			print_pagelist = 1;
   1577   1.44   thorpej 		if (c == 'c')
   1578   1.44   thorpej 			print_cache = 1;
   1579   1.25   thorpej 	}
   1580   1.25   thorpej 
   1581  1.134        ad 	if ((pc = pp->pr_cache) != NULL) {
   1582  1.134        ad 		(*pr)("POOL CACHE");
   1583  1.134        ad 	} else {
   1584  1.134        ad 		(*pr)("POOL");
   1585  1.134        ad 	}
   1586  1.134        ad 
   1587  1.134        ad 	(*pr)(" %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
   1588   1.25   thorpej 	    pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
   1589   1.25   thorpej 	    pp->pr_roflags);
   1590   1.66   thorpej 	(*pr)("\talloc %p\n", pp->pr_alloc);
   1591   1.25   thorpej 	(*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
   1592   1.25   thorpej 	    pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
   1593   1.25   thorpej 	(*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
   1594   1.25   thorpej 	    pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
   1595   1.25   thorpej 
   1596  1.134        ad 	(*pr)("\tnget %lu, nfail %lu, nput %lu\n",
   1597   1.25   thorpej 	    pp->pr_nget, pp->pr_nfail, pp->pr_nput);
   1598   1.25   thorpej 	(*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
   1599   1.25   thorpej 	    pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
   1600   1.25   thorpej 
   1601   1.25   thorpej 	if (print_pagelist == 0)
   1602   1.25   thorpej 		goto skip_pagelist;
   1603   1.25   thorpej 
   1604   1.88       chs 	if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
   1605   1.88       chs 		(*pr)("\n\tempty page list:\n");
   1606   1.97      yamt 	pool_print_pagelist(pp, &pp->pr_emptypages, pr);
   1607   1.88       chs 	if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
   1608   1.88       chs 		(*pr)("\n\tfull page list:\n");
   1609   1.97      yamt 	pool_print_pagelist(pp, &pp->pr_fullpages, pr);
   1610   1.88       chs 	if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
   1611   1.88       chs 		(*pr)("\n\tpartial-page list:\n");
   1612   1.97      yamt 	pool_print_pagelist(pp, &pp->pr_partpages, pr);
   1613   1.88       chs 
   1614   1.25   thorpej 	if (pp->pr_curpage == NULL)
   1615   1.25   thorpej 		(*pr)("\tno current page\n");
   1616   1.25   thorpej 	else
   1617   1.25   thorpej 		(*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
   1618   1.25   thorpej 
   1619   1.25   thorpej  skip_pagelist:
   1620   1.25   thorpej 	if (print_log == 0)
   1621   1.25   thorpej 		goto skip_log;
   1622   1.25   thorpej 
   1623   1.25   thorpej 	(*pr)("\n");
   1624    1.3        pk 
   1625   1.25   thorpej  skip_log:
   1626   1.44   thorpej 
   1627  1.102       chs #define PR_GROUPLIST(pcg)						\
   1628  1.102       chs 	(*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);		\
   1629  1.142        ad 	for (i = 0; i < pcg->pcg_size; i++) {				\
   1630  1.102       chs 		if (pcg->pcg_objects[i].pcgo_pa !=			\
   1631  1.102       chs 		    POOL_PADDR_INVALID) {				\
   1632  1.102       chs 			(*pr)("\t\t\t%p, 0x%llx\n",			\
   1633  1.102       chs 			    pcg->pcg_objects[i].pcgo_va,		\
   1634  1.102       chs 			    (unsigned long long)			\
   1635  1.102       chs 			    pcg->pcg_objects[i].pcgo_pa);		\
   1636  1.102       chs 		} else {						\
   1637  1.102       chs 			(*pr)("\t\t\t%p\n",				\
   1638  1.102       chs 			    pcg->pcg_objects[i].pcgo_va);		\
   1639  1.102       chs 		}							\
   1640  1.102       chs 	}
   1641  1.102       chs 
   1642  1.134        ad 	if (pc != NULL) {
   1643  1.134        ad 		cpuhit = 0;
   1644  1.134        ad 		cpumiss = 0;
   1645  1.183        ad 		for (i = 0; i < __arraycount(pc->pc_cpus); i++) {
   1646  1.134        ad 			if ((cc = pc->pc_cpus[i]) == NULL)
   1647  1.134        ad 				continue;
   1648  1.134        ad 			cpuhit += cc->cc_hits;
   1649  1.134        ad 			cpumiss += cc->cc_misses;
   1650  1.134        ad 		}
   1651  1.134        ad 		(*pr)("\tcpu layer hits %llu misses %llu\n", cpuhit, cpumiss);
   1652  1.134        ad 		(*pr)("\tcache layer hits %llu misses %llu\n",
   1653  1.134        ad 		    pc->pc_hits, pc->pc_misses);
   1654  1.134        ad 		(*pr)("\tcache layer entry uncontended %llu contended %llu\n",
   1655  1.134        ad 		    pc->pc_hits + pc->pc_misses - pc->pc_contended,
   1656  1.134        ad 		    pc->pc_contended);
   1657  1.134        ad 		(*pr)("\tcache layer empty groups %u full groups %u\n",
   1658  1.134        ad 		    pc->pc_nempty, pc->pc_nfull);
   1659  1.134        ad 		if (print_cache) {
   1660  1.134        ad 			(*pr)("\tfull cache groups:\n");
   1661  1.134        ad 			for (pcg = pc->pc_fullgroups; pcg != NULL;
   1662  1.134        ad 			    pcg = pcg->pcg_next) {
   1663  1.134        ad 				PR_GROUPLIST(pcg);
   1664  1.134        ad 			}
   1665  1.134        ad 			(*pr)("\tempty cache groups:\n");
   1666  1.134        ad 			for (pcg = pc->pc_emptygroups; pcg != NULL;
   1667  1.134        ad 			    pcg = pcg->pcg_next) {
   1668  1.134        ad 				PR_GROUPLIST(pcg);
   1669  1.134        ad 			}
   1670  1.103       chs 		}
   1671   1.44   thorpej 	}
   1672  1.102       chs #undef PR_GROUPLIST
   1673   1.88       chs }
   1674   1.88       chs 
   1675   1.88       chs static int
   1676   1.88       chs pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
   1677   1.88       chs {
   1678   1.88       chs 	struct pool_item *pi;
   1679  1.128  christos 	void *page;
   1680   1.88       chs 	int n;
   1681   1.88       chs 
   1682  1.121      yamt 	if ((pp->pr_roflags & PR_NOALIGN) == 0) {
   1683  1.128  christos 		page = (void *)((uintptr_t)ph & pp->pr_alloc->pa_pagemask);
   1684  1.121      yamt 		if (page != ph->ph_page &&
   1685  1.121      yamt 		    (pp->pr_roflags & PR_PHINPAGE) != 0) {
   1686  1.121      yamt 			if (label != NULL)
   1687  1.121      yamt 				printf("%s: ", label);
   1688  1.121      yamt 			printf("pool(%p:%s): page inconsistency: page %p;"
   1689  1.121      yamt 			       " at page head addr %p (p %p)\n", pp,
   1690  1.121      yamt 				pp->pr_wchan, ph->ph_page,
   1691  1.121      yamt 				ph, page);
   1692  1.121      yamt 			return 1;
   1693  1.121      yamt 		}
   1694   1.88       chs 	}
   1695    1.3        pk 
   1696   1.97      yamt 	if ((pp->pr_roflags & PR_NOTOUCH) != 0)
   1697   1.97      yamt 		return 0;
   1698   1.97      yamt 
   1699  1.102       chs 	for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0;
   1700   1.88       chs 	     pi != NULL;
   1701  1.102       chs 	     pi = LIST_NEXT(pi,pi_list), n++) {
   1702   1.88       chs 
   1703   1.88       chs #ifdef DIAGNOSTIC
   1704   1.88       chs 		if (pi->pi_magic != PI_MAGIC) {
   1705   1.88       chs 			if (label != NULL)
   1706   1.88       chs 				printf("%s: ", label);
   1707   1.88       chs 			printf("pool(%s): free list modified: magic=%x;"
   1708  1.121      yamt 			       " page %p; item ordinal %d; addr %p\n",
   1709   1.88       chs 				pp->pr_wchan, pi->pi_magic, ph->ph_page,
   1710  1.121      yamt 				n, pi);
   1711   1.88       chs 			panic("pool");
   1712   1.88       chs 		}
   1713   1.88       chs #endif
   1714  1.121      yamt 		if ((pp->pr_roflags & PR_NOALIGN) != 0) {
   1715  1.121      yamt 			continue;
   1716  1.121      yamt 		}
   1717  1.128  christos 		page = (void *)((uintptr_t)pi & pp->pr_alloc->pa_pagemask);
   1718   1.88       chs 		if (page == ph->ph_page)
   1719   1.88       chs 			continue;
   1720   1.88       chs 
   1721   1.88       chs 		if (label != NULL)
   1722   1.88       chs 			printf("%s: ", label);
   1723   1.88       chs 		printf("pool(%p:%s): page inconsistency: page %p;"
   1724   1.88       chs 		       " item ordinal %d; addr %p (p %p)\n", pp,
   1725   1.88       chs 			pp->pr_wchan, ph->ph_page,
   1726   1.88       chs 			n, pi, page);
   1727   1.88       chs 		return 1;
   1728   1.88       chs 	}
   1729   1.88       chs 	return 0;
   1730    1.3        pk }
   1731    1.3        pk 
   1732   1.88       chs 
   1733    1.3        pk int
   1734   1.42   thorpej pool_chk(struct pool *pp, const char *label)
   1735    1.3        pk {
   1736    1.3        pk 	struct pool_item_header *ph;
   1737    1.3        pk 	int r = 0;
   1738    1.3        pk 
   1739  1.134        ad 	mutex_enter(&pp->pr_lock);
   1740   1.88       chs 	LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
   1741   1.88       chs 		r = pool_chk_page(pp, label, ph);
   1742   1.88       chs 		if (r) {
   1743   1.88       chs 			goto out;
   1744   1.88       chs 		}
   1745   1.88       chs 	}
   1746   1.88       chs 	LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
   1747   1.88       chs 		r = pool_chk_page(pp, label, ph);
   1748   1.88       chs 		if (r) {
   1749    1.3        pk 			goto out;
   1750    1.3        pk 		}
   1751   1.88       chs 	}
   1752   1.88       chs 	LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
   1753   1.88       chs 		r = pool_chk_page(pp, label, ph);
   1754   1.88       chs 		if (r) {
   1755    1.3        pk 			goto out;
   1756    1.3        pk 		}
   1757    1.3        pk 	}
   1758   1.88       chs 
   1759    1.3        pk out:
   1760  1.134        ad 	mutex_exit(&pp->pr_lock);
   1761    1.3        pk 	return (r);
   1762   1.43   thorpej }
   1763   1.43   thorpej 
   1764   1.43   thorpej /*
   1765   1.43   thorpej  * pool_cache_init:
   1766   1.43   thorpej  *
   1767   1.43   thorpej  *	Initialize a pool cache.
   1768  1.134        ad  */
   1769  1.134        ad pool_cache_t
   1770  1.134        ad pool_cache_init(size_t size, u_int align, u_int align_offset, u_int flags,
   1771  1.134        ad     const char *wchan, struct pool_allocator *palloc, int ipl,
   1772  1.134        ad     int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), void *arg)
   1773  1.134        ad {
   1774  1.134        ad 	pool_cache_t pc;
   1775  1.134        ad 
   1776  1.134        ad 	pc = pool_get(&cache_pool, PR_WAITOK);
   1777  1.134        ad 	if (pc == NULL)
   1778  1.134        ad 		return NULL;
   1779  1.134        ad 
   1780  1.134        ad 	pool_cache_bootstrap(pc, size, align, align_offset, flags, wchan,
   1781  1.134        ad 	   palloc, ipl, ctor, dtor, arg);
   1782  1.134        ad 
   1783  1.134        ad 	return pc;
   1784  1.134        ad }
   1785  1.134        ad 
   1786  1.134        ad /*
   1787  1.134        ad  * pool_cache_bootstrap:
   1788   1.43   thorpej  *
   1789  1.134        ad  *	Kernel-private version of pool_cache_init().  The caller
   1790  1.134        ad  *	provides initial storage.
   1791   1.43   thorpej  */
   1792   1.43   thorpej void
   1793  1.134        ad pool_cache_bootstrap(pool_cache_t pc, size_t size, u_int align,
   1794  1.134        ad     u_int align_offset, u_int flags, const char *wchan,
   1795  1.134        ad     struct pool_allocator *palloc, int ipl,
   1796  1.134        ad     int (*ctor)(void *, void *, int), void (*dtor)(void *, void *),
   1797   1.43   thorpej     void *arg)
   1798   1.43   thorpej {
   1799  1.134        ad 	CPU_INFO_ITERATOR cii;
   1800  1.145        ad 	pool_cache_t pc1;
   1801  1.134        ad 	struct cpu_info *ci;
   1802  1.134        ad 	struct pool *pp;
   1803  1.134        ad 
   1804  1.134        ad 	pp = &pc->pc_pool;
   1805  1.208       chs 	if (palloc == NULL && ipl == IPL_NONE) {
   1806  1.208       chs 		if (size > PAGE_SIZE) {
   1807  1.208       chs 			int bigidx = pool_bigidx(size);
   1808  1.208       chs 
   1809  1.208       chs 			palloc = &pool_allocator_big[bigidx];
   1810  1.208       chs 		} else
   1811  1.208       chs 			palloc = &pool_allocator_nointr;
   1812  1.208       chs 	}
   1813  1.134        ad 	pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl);
   1814  1.157        ad 	mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl);
   1815   1.43   thorpej 
   1816  1.134        ad 	if (ctor == NULL) {
   1817  1.134        ad 		ctor = (int (*)(void *, void *, int))nullop;
   1818  1.134        ad 	}
   1819  1.134        ad 	if (dtor == NULL) {
   1820  1.134        ad 		dtor = (void (*)(void *, void *))nullop;
   1821  1.134        ad 	}
   1822   1.43   thorpej 
   1823  1.134        ad 	pc->pc_emptygroups = NULL;
   1824  1.134        ad 	pc->pc_fullgroups = NULL;
   1825  1.134        ad 	pc->pc_partgroups = NULL;
   1826   1.43   thorpej 	pc->pc_ctor = ctor;
   1827   1.43   thorpej 	pc->pc_dtor = dtor;
   1828   1.43   thorpej 	pc->pc_arg  = arg;
   1829  1.134        ad 	pc->pc_hits  = 0;
   1830   1.48   thorpej 	pc->pc_misses = 0;
   1831  1.134        ad 	pc->pc_nempty = 0;
   1832  1.134        ad 	pc->pc_npart = 0;
   1833  1.134        ad 	pc->pc_nfull = 0;
   1834  1.134        ad 	pc->pc_contended = 0;
   1835  1.134        ad 	pc->pc_refcnt = 0;
   1836  1.136      yamt 	pc->pc_freecheck = NULL;
   1837  1.134        ad 
   1838  1.142        ad 	if ((flags & PR_LARGECACHE) != 0) {
   1839  1.142        ad 		pc->pc_pcgsize = PCG_NOBJECTS_LARGE;
   1840  1.163        ad 		pc->pc_pcgpool = &pcg_large_pool;
   1841  1.142        ad 	} else {
   1842  1.142        ad 		pc->pc_pcgsize = PCG_NOBJECTS_NORMAL;
   1843  1.163        ad 		pc->pc_pcgpool = &pcg_normal_pool;
   1844  1.142        ad 	}
   1845  1.142        ad 
   1846  1.134        ad 	/* Allocate per-CPU caches. */
   1847  1.134        ad 	memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus));
   1848  1.134        ad 	pc->pc_ncpu = 0;
   1849  1.139        ad 	if (ncpu < 2) {
   1850  1.137        ad 		/* XXX For sparc: boot CPU is not attached yet. */
   1851  1.137        ad 		pool_cache_cpu_init1(curcpu(), pc);
   1852  1.137        ad 	} else {
   1853  1.137        ad 		for (CPU_INFO_FOREACH(cii, ci)) {
   1854  1.137        ad 			pool_cache_cpu_init1(ci, pc);
   1855  1.137        ad 		}
   1856  1.134        ad 	}
   1857  1.145        ad 
   1858  1.145        ad 	/* Add to list of all pools. */
   1859  1.145        ad 	if (__predict_true(!cold))
   1860  1.134        ad 		mutex_enter(&pool_head_lock);
   1861  1.145        ad 	TAILQ_FOREACH(pc1, &pool_cache_head, pc_cachelist) {
   1862  1.145        ad 		if (strcmp(pc1->pc_pool.pr_wchan, pc->pc_pool.pr_wchan) > 0)
   1863  1.145        ad 			break;
   1864  1.145        ad 	}
   1865  1.145        ad 	if (pc1 == NULL)
   1866  1.145        ad 		TAILQ_INSERT_TAIL(&pool_cache_head, pc, pc_cachelist);
   1867  1.145        ad 	else
   1868  1.145        ad 		TAILQ_INSERT_BEFORE(pc1, pc, pc_cachelist);
   1869  1.145        ad 	if (__predict_true(!cold))
   1870  1.134        ad 		mutex_exit(&pool_head_lock);
   1871  1.145        ad 
   1872  1.145        ad 	membar_sync();
   1873  1.145        ad 	pp->pr_cache = pc;
   1874   1.43   thorpej }
   1875   1.43   thorpej 
   1876   1.43   thorpej /*
   1877   1.43   thorpej  * pool_cache_destroy:
   1878   1.43   thorpej  *
   1879   1.43   thorpej  *	Destroy a pool cache.
   1880   1.43   thorpej  */
   1881   1.43   thorpej void
   1882  1.134        ad pool_cache_destroy(pool_cache_t pc)
   1883   1.43   thorpej {
   1884  1.191      para 
   1885  1.191      para 	pool_cache_bootstrap_destroy(pc);
   1886  1.191      para 	pool_put(&cache_pool, pc);
   1887  1.191      para }
   1888  1.191      para 
   1889  1.191      para /*
   1890  1.191      para  * pool_cache_bootstrap_destroy:
   1891  1.191      para  *
   1892  1.191      para  *	Destroy a pool cache.
   1893  1.191      para  */
   1894  1.191      para void
   1895  1.191      para pool_cache_bootstrap_destroy(pool_cache_t pc)
   1896  1.191      para {
   1897  1.134        ad 	struct pool *pp = &pc->pc_pool;
   1898  1.175       jym 	u_int i;
   1899  1.134        ad 
   1900  1.134        ad 	/* Remove it from the global list. */
   1901  1.134        ad 	mutex_enter(&pool_head_lock);
   1902  1.134        ad 	while (pc->pc_refcnt != 0)
   1903  1.134        ad 		cv_wait(&pool_busy, &pool_head_lock);
   1904  1.145        ad 	TAILQ_REMOVE(&pool_cache_head, pc, pc_cachelist);
   1905  1.134        ad 	mutex_exit(&pool_head_lock);
   1906   1.43   thorpej 
   1907   1.43   thorpej 	/* First, invalidate the entire cache. */
   1908   1.43   thorpej 	pool_cache_invalidate(pc);
   1909   1.43   thorpej 
   1910  1.134        ad 	/* Disassociate it from the pool. */
   1911  1.134        ad 	mutex_enter(&pp->pr_lock);
   1912  1.134        ad 	pp->pr_cache = NULL;
   1913  1.134        ad 	mutex_exit(&pp->pr_lock);
   1914  1.134        ad 
   1915  1.134        ad 	/* Destroy per-CPU data */
   1916  1.183        ad 	for (i = 0; i < __arraycount(pc->pc_cpus); i++)
   1917  1.175       jym 		pool_cache_invalidate_cpu(pc, i);
   1918  1.134        ad 
   1919  1.134        ad 	/* Finally, destroy it. */
   1920  1.134        ad 	mutex_destroy(&pc->pc_lock);
   1921  1.134        ad 	pool_destroy(pp);
   1922  1.134        ad }
   1923  1.134        ad 
   1924  1.134        ad /*
   1925  1.134        ad  * pool_cache_cpu_init1:
   1926  1.134        ad  *
   1927  1.134        ad  *	Called for each pool_cache whenever a new CPU is attached.
   1928  1.134        ad  */
   1929  1.134        ad static void
   1930  1.134        ad pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc)
   1931  1.134        ad {
   1932  1.134        ad 	pool_cache_cpu_t *cc;
   1933  1.137        ad 	int index;
   1934  1.134        ad 
   1935  1.137        ad 	index = ci->ci_index;
   1936  1.137        ad 
   1937  1.183        ad 	KASSERT(index < __arraycount(pc->pc_cpus));
   1938  1.134        ad 
   1939  1.137        ad 	if ((cc = pc->pc_cpus[index]) != NULL) {
   1940  1.137        ad 		KASSERT(cc->cc_cpuindex == index);
   1941  1.134        ad 		return;
   1942  1.134        ad 	}
   1943  1.134        ad 
   1944  1.134        ad 	/*
   1945  1.134        ad 	 * The first CPU is 'free'.  This needs to be the case for
   1946  1.134        ad 	 * bootstrap - we may not be able to allocate yet.
   1947  1.134        ad 	 */
   1948  1.134        ad 	if (pc->pc_ncpu == 0) {
   1949  1.134        ad 		cc = &pc->pc_cpu0;
   1950  1.134        ad 		pc->pc_ncpu = 1;
   1951  1.134        ad 	} else {
   1952  1.134        ad 		mutex_enter(&pc->pc_lock);
   1953  1.134        ad 		pc->pc_ncpu++;
   1954  1.134        ad 		mutex_exit(&pc->pc_lock);
   1955  1.134        ad 		cc = pool_get(&cache_cpu_pool, PR_WAITOK);
   1956  1.134        ad 	}
   1957  1.134        ad 
   1958  1.134        ad 	cc->cc_ipl = pc->pc_pool.pr_ipl;
   1959  1.134        ad 	cc->cc_iplcookie = makeiplcookie(cc->cc_ipl);
   1960  1.134        ad 	cc->cc_cache = pc;
   1961  1.137        ad 	cc->cc_cpuindex = index;
   1962  1.134        ad 	cc->cc_hits = 0;
   1963  1.134        ad 	cc->cc_misses = 0;
   1964  1.169      yamt 	cc->cc_current = __UNCONST(&pcg_dummy);
   1965  1.169      yamt 	cc->cc_previous = __UNCONST(&pcg_dummy);
   1966  1.134        ad 
   1967  1.137        ad 	pc->pc_cpus[index] = cc;
   1968   1.43   thorpej }
   1969   1.43   thorpej 
   1970  1.134        ad /*
   1971  1.134        ad  * pool_cache_cpu_init:
   1972  1.134        ad  *
   1973  1.134        ad  *	Called whenever a new CPU is attached.
   1974  1.134        ad  */
   1975  1.134        ad void
   1976  1.134        ad pool_cache_cpu_init(struct cpu_info *ci)
   1977   1.43   thorpej {
   1978  1.134        ad 	pool_cache_t pc;
   1979  1.134        ad 
   1980  1.134        ad 	mutex_enter(&pool_head_lock);
   1981  1.145        ad 	TAILQ_FOREACH(pc, &pool_cache_head, pc_cachelist) {
   1982  1.134        ad 		pc->pc_refcnt++;
   1983  1.134        ad 		mutex_exit(&pool_head_lock);
   1984   1.43   thorpej 
   1985  1.134        ad 		pool_cache_cpu_init1(ci, pc);
   1986   1.43   thorpej 
   1987  1.134        ad 		mutex_enter(&pool_head_lock);
   1988  1.134        ad 		pc->pc_refcnt--;
   1989  1.134        ad 		cv_broadcast(&pool_busy);
   1990  1.134        ad 	}
   1991  1.134        ad 	mutex_exit(&pool_head_lock);
   1992   1.43   thorpej }
   1993   1.43   thorpej 
   1994  1.134        ad /*
   1995  1.134        ad  * pool_cache_reclaim:
   1996  1.134        ad  *
   1997  1.134        ad  *	Reclaim memory from a pool cache.
   1998  1.134        ad  */
   1999  1.134        ad bool
   2000  1.134        ad pool_cache_reclaim(pool_cache_t pc)
   2001   1.43   thorpej {
   2002   1.43   thorpej 
   2003  1.134        ad 	return pool_reclaim(&pc->pc_pool);
   2004  1.134        ad }
   2005   1.43   thorpej 
   2006  1.136      yamt static void
   2007  1.136      yamt pool_cache_destruct_object1(pool_cache_t pc, void *object)
   2008  1.136      yamt {
   2009  1.136      yamt 
   2010  1.136      yamt 	(*pc->pc_dtor)(pc->pc_arg, object);
   2011  1.136      yamt 	pool_put(&pc->pc_pool, object);
   2012  1.136      yamt }
   2013  1.136      yamt 
   2014  1.134        ad /*
   2015  1.134        ad  * pool_cache_destruct_object:
   2016  1.134        ad  *
   2017  1.134        ad  *	Force destruction of an object and its release back into
   2018  1.134        ad  *	the pool.
   2019  1.134        ad  */
   2020  1.134        ad void
   2021  1.134        ad pool_cache_destruct_object(pool_cache_t pc, void *object)
   2022  1.134        ad {
   2023  1.134        ad 
   2024  1.136      yamt 	FREECHECK_IN(&pc->pc_freecheck, object);
   2025  1.136      yamt 
   2026  1.136      yamt 	pool_cache_destruct_object1(pc, object);
   2027   1.43   thorpej }
   2028   1.43   thorpej 
   2029  1.134        ad /*
   2030  1.134        ad  * pool_cache_invalidate_groups:
   2031  1.134        ad  *
   2032  1.134        ad  *	Invalidate a chain of groups and destruct all objects.
   2033  1.134        ad  */
   2034  1.102       chs static void
   2035  1.134        ad pool_cache_invalidate_groups(pool_cache_t pc, pcg_t *pcg)
   2036  1.102       chs {
   2037  1.134        ad 	void *object;
   2038  1.134        ad 	pcg_t *next;
   2039  1.134        ad 	int i;
   2040  1.134        ad 
   2041  1.134        ad 	for (; pcg != NULL; pcg = next) {
   2042  1.134        ad 		next = pcg->pcg_next;
   2043  1.134        ad 
   2044  1.134        ad 		for (i = 0; i < pcg->pcg_avail; i++) {
   2045  1.134        ad 			object = pcg->pcg_objects[i].pcgo_va;
   2046  1.136      yamt 			pool_cache_destruct_object1(pc, object);
   2047  1.134        ad 		}
   2048  1.102       chs 
   2049  1.142        ad 		if (pcg->pcg_size == PCG_NOBJECTS_LARGE) {
   2050  1.142        ad 			pool_put(&pcg_large_pool, pcg);
   2051  1.142        ad 		} else {
   2052  1.142        ad 			KASSERT(pcg->pcg_size == PCG_NOBJECTS_NORMAL);
   2053  1.142        ad 			pool_put(&pcg_normal_pool, pcg);
   2054  1.142        ad 		}
   2055  1.102       chs 	}
   2056  1.102       chs }
   2057  1.102       chs 
   2058   1.43   thorpej /*
   2059  1.134        ad  * pool_cache_invalidate:
   2060   1.43   thorpej  *
   2061  1.134        ad  *	Invalidate a pool cache (destruct and release all of the
   2062  1.134        ad  *	cached objects).  Does not reclaim objects from the pool.
   2063  1.176   thorpej  *
   2064  1.176   thorpej  *	Note: For pool caches that provide constructed objects, there
   2065  1.176   thorpej  *	is an assumption that another level of synchronization is occurring
   2066  1.176   thorpej  *	between the input to the constructor and the cache invalidation.
   2067  1.196       jym  *
   2068  1.196       jym  *	Invalidation is a costly process and should not be called from
   2069  1.196       jym  *	interrupt context.
   2070   1.43   thorpej  */
   2071  1.134        ad void
   2072  1.134        ad pool_cache_invalidate(pool_cache_t pc)
   2073  1.134        ad {
   2074  1.196       jym 	uint64_t where;
   2075  1.134        ad 	pcg_t *full, *empty, *part;
   2076  1.196       jym 
   2077  1.196       jym 	KASSERT(!cpu_intr_p() && !cpu_softintr_p());
   2078  1.176   thorpej 
   2079  1.177       jym 	if (ncpu < 2 || !mp_online) {
   2080  1.176   thorpej 		/*
   2081  1.176   thorpej 		 * We might be called early enough in the boot process
   2082  1.176   thorpej 		 * for the CPU data structures to not be fully initialized.
   2083  1.196       jym 		 * In this case, transfer the content of the local CPU's
   2084  1.196       jym 		 * cache back into global cache as only this CPU is currently
   2085  1.196       jym 		 * running.
   2086  1.176   thorpej 		 */
   2087  1.196       jym 		pool_cache_transfer(pc);
   2088  1.176   thorpej 	} else {
   2089  1.176   thorpej 		/*
   2090  1.196       jym 		 * Signal all CPUs that they must transfer their local
   2091  1.196       jym 		 * cache back to the global pool then wait for the xcall to
   2092  1.196       jym 		 * complete.
   2093  1.176   thorpej 		 */
   2094  1.196       jym 		where = xc_broadcast(0, (xcfunc_t)pool_cache_transfer,
   2095  1.196       jym 		    pc, NULL);
   2096  1.176   thorpej 		xc_wait(where);
   2097  1.176   thorpej 	}
   2098  1.196       jym 
   2099  1.196       jym 	/* Empty pool caches, then invalidate objects */
   2100  1.134        ad 	mutex_enter(&pc->pc_lock);
   2101  1.134        ad 	full = pc->pc_fullgroups;
   2102  1.134        ad 	empty = pc->pc_emptygroups;
   2103  1.134        ad 	part = pc->pc_partgroups;
   2104  1.134        ad 	pc->pc_fullgroups = NULL;
   2105  1.134        ad 	pc->pc_emptygroups = NULL;
   2106  1.134        ad 	pc->pc_partgroups = NULL;
   2107  1.134        ad 	pc->pc_nfull = 0;
   2108  1.134        ad 	pc->pc_nempty = 0;
   2109  1.134        ad 	pc->pc_npart = 0;
   2110  1.134        ad 	mutex_exit(&pc->pc_lock);
   2111  1.134        ad 
   2112  1.134        ad 	pool_cache_invalidate_groups(pc, full);
   2113  1.134        ad 	pool_cache_invalidate_groups(pc, empty);
   2114  1.134        ad 	pool_cache_invalidate_groups(pc, part);
   2115  1.134        ad }
   2116  1.134        ad 
   2117  1.175       jym /*
   2118  1.175       jym  * pool_cache_invalidate_cpu:
   2119  1.175       jym  *
   2120  1.175       jym  *	Invalidate all CPU-bound cached objects in pool cache, the CPU being
   2121  1.175       jym  *	identified by its associated index.
   2122  1.175       jym  *	It is caller's responsibility to ensure that no operation is
   2123  1.175       jym  *	taking place on this pool cache while doing this invalidation.
   2124  1.175       jym  *	WARNING: as no inter-CPU locking is enforced, trying to invalidate
   2125  1.175       jym  *	pool cached objects from a CPU different from the one currently running
   2126  1.175       jym  *	may result in an undefined behaviour.
   2127  1.175       jym  */
   2128  1.175       jym static void
   2129  1.175       jym pool_cache_invalidate_cpu(pool_cache_t pc, u_int index)
   2130  1.175       jym {
   2131  1.175       jym 	pool_cache_cpu_t *cc;
   2132  1.175       jym 	pcg_t *pcg;
   2133  1.175       jym 
   2134  1.175       jym 	if ((cc = pc->pc_cpus[index]) == NULL)
   2135  1.175       jym 		return;
   2136  1.175       jym 
   2137  1.175       jym 	if ((pcg = cc->cc_current) != &pcg_dummy) {
   2138  1.175       jym 		pcg->pcg_next = NULL;
   2139  1.175       jym 		pool_cache_invalidate_groups(pc, pcg);
   2140  1.175       jym 	}
   2141  1.175       jym 	if ((pcg = cc->cc_previous) != &pcg_dummy) {
   2142  1.175       jym 		pcg->pcg_next = NULL;
   2143  1.175       jym 		pool_cache_invalidate_groups(pc, pcg);
   2144  1.175       jym 	}
   2145  1.175       jym 	if (cc != &pc->pc_cpu0)
   2146  1.175       jym 		pool_put(&cache_cpu_pool, cc);
   2147  1.175       jym 
   2148  1.175       jym }
   2149  1.175       jym 
   2150  1.134        ad void
   2151  1.134        ad pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg)
   2152  1.134        ad {
   2153  1.134        ad 
   2154  1.134        ad 	pool_set_drain_hook(&pc->pc_pool, fn, arg);
   2155  1.134        ad }
   2156  1.134        ad 
   2157  1.134        ad void
   2158  1.134        ad pool_cache_setlowat(pool_cache_t pc, int n)
   2159  1.134        ad {
   2160  1.134        ad 
   2161  1.134        ad 	pool_setlowat(&pc->pc_pool, n);
   2162  1.134        ad }
   2163  1.134        ad 
   2164  1.134        ad void
   2165  1.134        ad pool_cache_sethiwat(pool_cache_t pc, int n)
   2166  1.134        ad {
   2167  1.134        ad 
   2168  1.134        ad 	pool_sethiwat(&pc->pc_pool, n);
   2169  1.134        ad }
   2170  1.134        ad 
   2171  1.134        ad void
   2172  1.134        ad pool_cache_sethardlimit(pool_cache_t pc, int n, const char *warnmess, int ratecap)
   2173  1.134        ad {
   2174  1.134        ad 
   2175  1.134        ad 	pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap);
   2176  1.134        ad }
   2177  1.134        ad 
   2178  1.162        ad static bool __noinline
   2179  1.162        ad pool_cache_get_slow(pool_cache_cpu_t *cc, int s, void **objectp,
   2180  1.134        ad 		    paddr_t *pap, int flags)
   2181   1.43   thorpej {
   2182  1.134        ad 	pcg_t *pcg, *cur;
   2183  1.134        ad 	uint64_t ncsw;
   2184  1.134        ad 	pool_cache_t pc;
   2185   1.43   thorpej 	void *object;
   2186   1.58   thorpej 
   2187  1.168      yamt 	KASSERT(cc->cc_current->pcg_avail == 0);
   2188  1.168      yamt 	KASSERT(cc->cc_previous->pcg_avail == 0);
   2189  1.168      yamt 
   2190  1.134        ad 	pc = cc->cc_cache;
   2191  1.134        ad 	cc->cc_misses++;
   2192   1.43   thorpej 
   2193  1.134        ad 	/*
   2194  1.134        ad 	 * Nothing was available locally.  Try and grab a group
   2195  1.134        ad 	 * from the cache.
   2196  1.134        ad 	 */
   2197  1.162        ad 	if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {
   2198  1.134        ad 		ncsw = curlwp->l_ncsw;
   2199  1.134        ad 		mutex_enter(&pc->pc_lock);
   2200  1.134        ad 		pc->pc_contended++;
   2201   1.43   thorpej 
   2202  1.134        ad 		/*
   2203  1.134        ad 		 * If we context switched while locking, then
   2204  1.134        ad 		 * our view of the per-CPU data is invalid:
   2205  1.134        ad 		 * retry.
   2206  1.134        ad 		 */
   2207  1.134        ad 		if (curlwp->l_ncsw != ncsw) {
   2208  1.134        ad 			mutex_exit(&pc->pc_lock);
   2209  1.162        ad 			return true;
   2210   1.43   thorpej 		}
   2211  1.102       chs 	}
   2212   1.43   thorpej 
   2213  1.162        ad 	if (__predict_true((pcg = pc->pc_fullgroups) != NULL)) {
   2214   1.43   thorpej 		/*
   2215  1.134        ad 		 * If there's a full group, release our empty
   2216  1.134        ad 		 * group back to the cache.  Install the full
   2217  1.134        ad 		 * group as cc_current and return.
   2218   1.43   thorpej 		 */
   2219  1.162        ad 		if (__predict_true((cur = cc->cc_current) != &pcg_dummy)) {
   2220  1.134        ad 			KASSERT(cur->pcg_avail == 0);
   2221  1.134        ad 			cur->pcg_next = pc->pc_emptygroups;
   2222  1.134        ad 			pc->pc_emptygroups = cur;
   2223  1.134        ad 			pc->pc_nempty++;
   2224   1.87   thorpej 		}
   2225  1.142        ad 		KASSERT(pcg->pcg_avail == pcg->pcg_size);
   2226  1.134        ad 		cc->cc_current = pcg;
   2227  1.134        ad 		pc->pc_fullgroups = pcg->pcg_next;
   2228  1.134        ad 		pc->pc_hits++;
   2229  1.134        ad 		pc->pc_nfull--;
   2230  1.134        ad 		mutex_exit(&pc->pc_lock);
   2231  1.162        ad 		return true;
   2232  1.134        ad 	}
   2233  1.134        ad 
   2234  1.134        ad 	/*
   2235  1.134        ad 	 * Nothing available locally or in cache.  Take the slow
   2236  1.134        ad 	 * path: fetch a new object from the pool and construct
   2237  1.134        ad 	 * it.
   2238  1.134        ad 	 */
   2239  1.134        ad 	pc->pc_misses++;
   2240  1.134        ad 	mutex_exit(&pc->pc_lock);
   2241  1.162        ad 	splx(s);
   2242  1.134        ad 
   2243  1.134        ad 	object = pool_get(&pc->pc_pool, flags);
   2244  1.134        ad 	*objectp = object;
   2245  1.211  riastrad 	if (__predict_false(object == NULL)) {
   2246  1.211  riastrad 		KASSERT((flags & (PR_WAITOK|PR_NOWAIT)) == PR_NOWAIT);
   2247  1.162        ad 		return false;
   2248  1.211  riastrad 	}
   2249  1.125        ad 
   2250  1.162        ad 	if (__predict_false((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0)) {
   2251  1.134        ad 		pool_put(&pc->pc_pool, object);
   2252  1.134        ad 		*objectp = NULL;
   2253  1.162        ad 		return false;
   2254   1.43   thorpej 	}
   2255   1.43   thorpej 
   2256  1.134        ad 	KASSERT((((vaddr_t)object + pc->pc_pool.pr_itemoffset) &
   2257  1.134        ad 	    (pc->pc_pool.pr_align - 1)) == 0);
   2258   1.43   thorpej 
   2259  1.134        ad 	if (pap != NULL) {
   2260  1.134        ad #ifdef POOL_VTOPHYS
   2261  1.134        ad 		*pap = POOL_VTOPHYS(object);
   2262  1.134        ad #else
   2263  1.134        ad 		*pap = POOL_PADDR_INVALID;
   2264  1.134        ad #endif
   2265  1.102       chs 	}
   2266   1.43   thorpej 
   2267  1.125        ad 	FREECHECK_OUT(&pc->pc_freecheck, object);
   2268  1.204      maxv 	pool_redzone_fill(&pc->pc_pool, object);
   2269  1.228      maxv 	pool_cache_kleak_fill(pc, object);
   2270  1.162        ad 	return false;
   2271   1.43   thorpej }
   2272   1.43   thorpej 
   2273   1.43   thorpej /*
   2274  1.134        ad  * pool_cache_get{,_paddr}:
   2275   1.43   thorpej  *
   2276  1.134        ad  *	Get an object from a pool cache (optionally returning
   2277  1.134        ad  *	the physical address of the object).
   2278   1.43   thorpej  */
   2279  1.134        ad void *
   2280  1.134        ad pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap)
   2281   1.43   thorpej {
   2282  1.134        ad 	pool_cache_cpu_t *cc;
   2283  1.134        ad 	pcg_t *pcg;
   2284  1.134        ad 	void *object;
   2285   1.60   thorpej 	int s;
   2286   1.43   thorpej 
   2287  1.215  christos 	KASSERT(!(flags & PR_NOWAIT) != !(flags & PR_WAITOK));
   2288  1.184     rmind 	KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()) ||
   2289  1.185     rmind 	    (pc->pc_pool.pr_ipl != IPL_NONE || cold || panicstr != NULL),
   2290  1.213  christos 	    "%s: [%s] is IPL_NONE, but called from interrupt context",
   2291  1.213  christos 	    __func__, pc->pc_pool.pr_wchan);
   2292  1.184     rmind 
   2293  1.155        ad 	if (flags & PR_WAITOK) {
   2294  1.154      yamt 		ASSERT_SLEEPABLE();
   2295  1.155        ad 	}
   2296  1.125        ad 
   2297  1.162        ad 	/* Lock out interrupts and disable preemption. */
   2298  1.162        ad 	s = splvm();
   2299  1.165      yamt 	while (/* CONSTCOND */ true) {
   2300  1.134        ad 		/* Try and allocate an object from the current group. */
   2301  1.162        ad 		cc = pc->pc_cpus[curcpu()->ci_index];
   2302  1.162        ad 		KASSERT(cc->cc_cache == pc);
   2303  1.134        ad 	 	pcg = cc->cc_current;
   2304  1.162        ad 		if (__predict_true(pcg->pcg_avail > 0)) {
   2305  1.134        ad 			object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va;
   2306  1.162        ad 			if (__predict_false(pap != NULL))
   2307  1.134        ad 				*pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa;
   2308  1.148      yamt #if defined(DIAGNOSTIC)
   2309  1.134        ad 			pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL;
   2310  1.163        ad 			KASSERT(pcg->pcg_avail < pcg->pcg_size);
   2311  1.134        ad 			KASSERT(object != NULL);
   2312  1.163        ad #endif
   2313  1.134        ad 			cc->cc_hits++;
   2314  1.162        ad 			splx(s);
   2315  1.134        ad 			FREECHECK_OUT(&pc->pc_freecheck, object);
   2316  1.204      maxv 			pool_redzone_fill(&pc->pc_pool, object);
   2317  1.228      maxv 			pool_cache_kleak_fill(pc, object);
   2318  1.134        ad 			return object;
   2319   1.43   thorpej 		}
   2320   1.43   thorpej 
   2321   1.43   thorpej 		/*
   2322  1.134        ad 		 * That failed.  If the previous group isn't empty, swap
   2323  1.134        ad 		 * it with the current group and allocate from there.
   2324   1.43   thorpej 		 */
   2325  1.134        ad 		pcg = cc->cc_previous;
   2326  1.162        ad 		if (__predict_true(pcg->pcg_avail > 0)) {
   2327  1.134        ad 			cc->cc_previous = cc->cc_current;
   2328  1.134        ad 			cc->cc_current = pcg;
   2329  1.134        ad 			continue;
   2330   1.43   thorpej 		}
   2331   1.43   thorpej 
   2332  1.134        ad 		/*
   2333  1.134        ad 		 * Can't allocate from either group: try the slow path.
   2334  1.134        ad 		 * If get_slow() allocated an object for us, or if
   2335  1.162        ad 		 * no more objects are available, it will return false.
   2336  1.134        ad 		 * Otherwise, we need to retry.
   2337  1.134        ad 		 */
   2338  1.165      yamt 		if (!pool_cache_get_slow(cc, s, &object, pap, flags))
   2339  1.165      yamt 			break;
   2340  1.165      yamt 	}
   2341   1.43   thorpej 
   2342  1.211  riastrad 	/*
   2343  1.211  riastrad 	 * We would like to KASSERT(object || (flags & PR_NOWAIT)), but
   2344  1.211  riastrad 	 * pool_cache_get can fail even in the PR_WAITOK case, if the
   2345  1.211  riastrad 	 * constructor fails.
   2346  1.211  riastrad 	 */
   2347  1.134        ad 	return object;
   2348   1.51   thorpej }
   2349   1.51   thorpej 
   2350  1.162        ad static bool __noinline
   2351  1.162        ad pool_cache_put_slow(pool_cache_cpu_t *cc, int s, void *object)
   2352   1.51   thorpej {
   2353  1.200     pooka 	struct lwp *l = curlwp;
   2354  1.163        ad 	pcg_t *pcg, *cur;
   2355  1.134        ad 	uint64_t ncsw;
   2356  1.134        ad 	pool_cache_t pc;
   2357   1.51   thorpej 
   2358  1.168      yamt 	KASSERT(cc->cc_current->pcg_avail == cc->cc_current->pcg_size);
   2359  1.168      yamt 	KASSERT(cc->cc_previous->pcg_avail == cc->cc_previous->pcg_size);
   2360  1.168      yamt 
   2361  1.134        ad 	pc = cc->cc_cache;
   2362  1.171        ad 	pcg = NULL;
   2363  1.134        ad 	cc->cc_misses++;
   2364  1.200     pooka 	ncsw = l->l_ncsw;
   2365   1.43   thorpej 
   2366  1.171        ad 	/*
   2367  1.171        ad 	 * If there are no empty groups in the cache then allocate one
   2368  1.171        ad 	 * while still unlocked.
   2369  1.171        ad 	 */
   2370  1.171        ad 	if (__predict_false(pc->pc_emptygroups == NULL)) {
   2371  1.171        ad 		if (__predict_true(!pool_cache_disable)) {
   2372  1.171        ad 			pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT);
   2373  1.171        ad 		}
   2374  1.200     pooka 		/*
   2375  1.200     pooka 		 * If pool_get() blocked, then our view of
   2376  1.200     pooka 		 * the per-CPU data is invalid: retry.
   2377  1.200     pooka 		 */
   2378  1.200     pooka 		if (__predict_false(l->l_ncsw != ncsw)) {
   2379  1.200     pooka 			if (pcg != NULL) {
   2380  1.200     pooka 				pool_put(pc->pc_pcgpool, pcg);
   2381  1.200     pooka 			}
   2382  1.200     pooka 			return true;
   2383  1.200     pooka 		}
   2384  1.171        ad 		if (__predict_true(pcg != NULL)) {
   2385  1.171        ad 			pcg->pcg_avail = 0;
   2386  1.171        ad 			pcg->pcg_size = pc->pc_pcgsize;
   2387  1.171        ad 		}
   2388  1.171        ad 	}
   2389  1.171        ad 
   2390  1.162        ad 	/* Lock the cache. */
   2391  1.162        ad 	if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {
   2392  1.134        ad 		mutex_enter(&pc->pc_lock);
   2393  1.134        ad 		pc->pc_contended++;
   2394  1.162        ad 
   2395  1.163        ad 		/*
   2396  1.163        ad 		 * If we context switched while locking, then our view of
   2397  1.163        ad 		 * the per-CPU data is invalid: retry.
   2398  1.163        ad 		 */
   2399  1.200     pooka 		if (__predict_false(l->l_ncsw != ncsw)) {
   2400  1.163        ad 			mutex_exit(&pc->pc_lock);
   2401  1.171        ad 			if (pcg != NULL) {
   2402  1.171        ad 				pool_put(pc->pc_pcgpool, pcg);
   2403  1.171        ad 			}
   2404  1.163        ad 			return true;
   2405  1.163        ad 		}
   2406  1.162        ad 	}
   2407  1.102       chs 
   2408  1.163        ad 	/* If there are no empty groups in the cache then allocate one. */
   2409  1.171        ad 	if (pcg == NULL && pc->pc_emptygroups != NULL) {
   2410  1.171        ad 		pcg = pc->pc_emptygroups;
   2411  1.163        ad 		pc->pc_emptygroups = pcg->pcg_next;
   2412  1.163        ad 		pc->pc_nempty--;
   2413  1.134        ad 	}
   2414  1.130        ad 
   2415  1.162        ad 	/*
   2416  1.162        ad 	 * If there's a empty group, release our full group back
   2417  1.162        ad 	 * to the cache.  Install the empty group to the local CPU
   2418  1.162        ad 	 * and return.
   2419  1.162        ad 	 */
   2420  1.163        ad 	if (pcg != NULL) {
   2421  1.134        ad 		KASSERT(pcg->pcg_avail == 0);
   2422  1.162        ad 		if (__predict_false(cc->cc_previous == &pcg_dummy)) {
   2423  1.146        ad 			cc->cc_previous = pcg;
   2424  1.146        ad 		} else {
   2425  1.162        ad 			cur = cc->cc_current;
   2426  1.162        ad 			if (__predict_true(cur != &pcg_dummy)) {
   2427  1.163        ad 				KASSERT(cur->pcg_avail == cur->pcg_size);
   2428  1.146        ad 				cur->pcg_next = pc->pc_fullgroups;
   2429  1.146        ad 				pc->pc_fullgroups = cur;
   2430  1.146        ad 				pc->pc_nfull++;
   2431  1.146        ad 			}
   2432  1.146        ad 			cc->cc_current = pcg;
   2433  1.146        ad 		}
   2434  1.163        ad 		pc->pc_hits++;
   2435  1.134        ad 		mutex_exit(&pc->pc_lock);
   2436  1.162        ad 		return true;
   2437  1.102       chs 	}
   2438  1.105  christos 
   2439  1.134        ad 	/*
   2440  1.162        ad 	 * Nothing available locally or in cache, and we didn't
   2441  1.162        ad 	 * allocate an empty group.  Take the slow path and destroy
   2442  1.162        ad 	 * the object here and now.
   2443  1.134        ad 	 */
   2444  1.134        ad 	pc->pc_misses++;
   2445  1.134        ad 	mutex_exit(&pc->pc_lock);
   2446  1.162        ad 	splx(s);
   2447  1.162        ad 	pool_cache_destruct_object(pc, object);
   2448  1.105  christos 
   2449  1.162        ad 	return false;
   2450  1.134        ad }
   2451  1.102       chs 
   2452   1.43   thorpej /*
   2453  1.134        ad  * pool_cache_put{,_paddr}:
   2454   1.43   thorpej  *
   2455  1.134        ad  *	Put an object back to the pool cache (optionally caching the
   2456  1.134        ad  *	physical address of the object).
   2457   1.43   thorpej  */
   2458  1.101   thorpej void
   2459  1.134        ad pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa)
   2460   1.43   thorpej {
   2461  1.134        ad 	pool_cache_cpu_t *cc;
   2462  1.134        ad 	pcg_t *pcg;
   2463  1.134        ad 	int s;
   2464  1.101   thorpej 
   2465  1.172      yamt 	KASSERT(object != NULL);
   2466  1.204      maxv 	pool_redzone_check(&pc->pc_pool, object);
   2467  1.134        ad 	FREECHECK_IN(&pc->pc_freecheck, object);
   2468  1.101   thorpej 
   2469  1.162        ad 	/* Lock out interrupts and disable preemption. */
   2470  1.162        ad 	s = splvm();
   2471  1.165      yamt 	while (/* CONSTCOND */ true) {
   2472  1.134        ad 		/* If the current group isn't full, release it there. */
   2473  1.162        ad 		cc = pc->pc_cpus[curcpu()->ci_index];
   2474  1.162        ad 		KASSERT(cc->cc_cache == pc);
   2475  1.134        ad 	 	pcg = cc->cc_current;
   2476  1.162        ad 		if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) {
   2477  1.134        ad 			pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object;
   2478  1.134        ad 			pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa;
   2479  1.134        ad 			pcg->pcg_avail++;
   2480  1.134        ad 			cc->cc_hits++;
   2481  1.162        ad 			splx(s);
   2482  1.134        ad 			return;
   2483  1.134        ad 		}
   2484   1.43   thorpej 
   2485  1.134        ad 		/*
   2486  1.162        ad 		 * That failed.  If the previous group isn't full, swap
   2487  1.134        ad 		 * it with the current group and try again.
   2488  1.134        ad 		 */
   2489  1.134        ad 		pcg = cc->cc_previous;
   2490  1.162        ad 		if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) {
   2491  1.134        ad 			cc->cc_previous = cc->cc_current;
   2492  1.134        ad 			cc->cc_current = pcg;
   2493  1.134        ad 			continue;
   2494  1.134        ad 		}
   2495   1.43   thorpej 
   2496  1.134        ad 		/*
   2497  1.134        ad 		 * Can't free to either group: try the slow path.
   2498  1.134        ad 		 * If put_slow() releases the object for us, it
   2499  1.162        ad 		 * will return false.  Otherwise we need to retry.
   2500  1.134        ad 		 */
   2501  1.165      yamt 		if (!pool_cache_put_slow(cc, s, object))
   2502  1.165      yamt 			break;
   2503  1.165      yamt 	}
   2504   1.43   thorpej }
   2505   1.43   thorpej 
   2506   1.43   thorpej /*
   2507  1.196       jym  * pool_cache_transfer:
   2508   1.43   thorpej  *
   2509  1.134        ad  *	Transfer objects from the per-CPU cache to the global cache.
   2510  1.134        ad  *	Run within a cross-call thread.
   2511   1.43   thorpej  */
   2512   1.43   thorpej static void
   2513  1.196       jym pool_cache_transfer(pool_cache_t pc)
   2514   1.43   thorpej {
   2515  1.134        ad 	pool_cache_cpu_t *cc;
   2516  1.134        ad 	pcg_t *prev, *cur, **list;
   2517  1.162        ad 	int s;
   2518  1.134        ad 
   2519  1.162        ad 	s = splvm();
   2520  1.162        ad 	mutex_enter(&pc->pc_lock);
   2521  1.162        ad 	cc = pc->pc_cpus[curcpu()->ci_index];
   2522  1.134        ad 	cur = cc->cc_current;
   2523  1.169      yamt 	cc->cc_current = __UNCONST(&pcg_dummy);
   2524  1.134        ad 	prev = cc->cc_previous;
   2525  1.169      yamt 	cc->cc_previous = __UNCONST(&pcg_dummy);
   2526  1.162        ad 	if (cur != &pcg_dummy) {
   2527  1.142        ad 		if (cur->pcg_avail == cur->pcg_size) {
   2528  1.134        ad 			list = &pc->pc_fullgroups;
   2529  1.134        ad 			pc->pc_nfull++;
   2530  1.134        ad 		} else if (cur->pcg_avail == 0) {
   2531  1.134        ad 			list = &pc->pc_emptygroups;
   2532  1.134        ad 			pc->pc_nempty++;
   2533  1.134        ad 		} else {
   2534  1.134        ad 			list = &pc->pc_partgroups;
   2535  1.134        ad 			pc->pc_npart++;
   2536  1.134        ad 		}
   2537  1.134        ad 		cur->pcg_next = *list;
   2538  1.134        ad 		*list = cur;
   2539  1.134        ad 	}
   2540  1.162        ad 	if (prev != &pcg_dummy) {
   2541  1.142        ad 		if (prev->pcg_avail == prev->pcg_size) {
   2542  1.134        ad 			list = &pc->pc_fullgroups;
   2543  1.134        ad 			pc->pc_nfull++;
   2544  1.134        ad 		} else if (prev->pcg_avail == 0) {
   2545  1.134        ad 			list = &pc->pc_emptygroups;
   2546  1.134        ad 			pc->pc_nempty++;
   2547  1.134        ad 		} else {
   2548  1.134        ad 			list = &pc->pc_partgroups;
   2549  1.134        ad 			pc->pc_npart++;
   2550  1.134        ad 		}
   2551  1.134        ad 		prev->pcg_next = *list;
   2552  1.134        ad 		*list = prev;
   2553  1.134        ad 	}
   2554  1.134        ad 	mutex_exit(&pc->pc_lock);
   2555  1.134        ad 	splx(s);
   2556    1.3        pk }
   2557   1.66   thorpej 
   2558   1.66   thorpej /*
   2559   1.66   thorpej  * Pool backend allocators.
   2560   1.66   thorpej  *
   2561   1.66   thorpej  * Each pool has a backend allocator that handles allocation, deallocation,
   2562   1.66   thorpej  * and any additional draining that might be needed.
   2563   1.66   thorpej  *
   2564   1.66   thorpej  * We provide two standard allocators:
   2565   1.66   thorpej  *
   2566   1.66   thorpej  *	pool_allocator_kmem - the default when no allocator is specified
   2567   1.66   thorpej  *
   2568   1.66   thorpej  *	pool_allocator_nointr - used for pools that will not be accessed
   2569   1.66   thorpej  *	in interrupt context.
   2570   1.66   thorpej  */
   2571   1.66   thorpej void	*pool_page_alloc(struct pool *, int);
   2572   1.66   thorpej void	pool_page_free(struct pool *, void *);
   2573   1.66   thorpej 
   2574  1.112     bjh21 #ifdef POOL_SUBPAGE
   2575  1.112     bjh21 struct pool_allocator pool_allocator_kmem_fullpage = {
   2576  1.192     rmind 	.pa_alloc = pool_page_alloc,
   2577  1.192     rmind 	.pa_free = pool_page_free,
   2578  1.192     rmind 	.pa_pagesz = 0
   2579  1.112     bjh21 };
   2580  1.112     bjh21 #else
   2581   1.66   thorpej struct pool_allocator pool_allocator_kmem = {
   2582  1.191      para 	.pa_alloc = pool_page_alloc,
   2583  1.191      para 	.pa_free = pool_page_free,
   2584  1.191      para 	.pa_pagesz = 0
   2585   1.66   thorpej };
   2586  1.112     bjh21 #endif
   2587   1.66   thorpej 
   2588  1.112     bjh21 #ifdef POOL_SUBPAGE
   2589  1.112     bjh21 struct pool_allocator pool_allocator_nointr_fullpage = {
   2590  1.194      para 	.pa_alloc = pool_page_alloc,
   2591  1.194      para 	.pa_free = pool_page_free,
   2592  1.192     rmind 	.pa_pagesz = 0
   2593  1.112     bjh21 };
   2594  1.112     bjh21 #else
   2595   1.66   thorpej struct pool_allocator pool_allocator_nointr = {
   2596  1.191      para 	.pa_alloc = pool_page_alloc,
   2597  1.191      para 	.pa_free = pool_page_free,
   2598  1.191      para 	.pa_pagesz = 0
   2599   1.66   thorpej };
   2600  1.112     bjh21 #endif
   2601   1.66   thorpej 
   2602   1.66   thorpej #ifdef POOL_SUBPAGE
   2603   1.66   thorpej void	*pool_subpage_alloc(struct pool *, int);
   2604   1.66   thorpej void	pool_subpage_free(struct pool *, void *);
   2605   1.66   thorpej 
   2606  1.112     bjh21 struct pool_allocator pool_allocator_kmem = {
   2607  1.193        he 	.pa_alloc = pool_subpage_alloc,
   2608  1.193        he 	.pa_free = pool_subpage_free,
   2609  1.193        he 	.pa_pagesz = POOL_SUBPAGE
   2610  1.112     bjh21 };
   2611  1.112     bjh21 
   2612  1.112     bjh21 struct pool_allocator pool_allocator_nointr = {
   2613  1.192     rmind 	.pa_alloc = pool_subpage_alloc,
   2614  1.192     rmind 	.pa_free = pool_subpage_free,
   2615  1.192     rmind 	.pa_pagesz = POOL_SUBPAGE
   2616   1.66   thorpej };
   2617   1.66   thorpej #endif /* POOL_SUBPAGE */
   2618   1.66   thorpej 
   2619  1.208       chs struct pool_allocator pool_allocator_big[] = {
   2620  1.208       chs 	{
   2621  1.208       chs 		.pa_alloc = pool_page_alloc,
   2622  1.208       chs 		.pa_free = pool_page_free,
   2623  1.208       chs 		.pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 0),
   2624  1.208       chs 	},
   2625  1.208       chs 	{
   2626  1.208       chs 		.pa_alloc = pool_page_alloc,
   2627  1.208       chs 		.pa_free = pool_page_free,
   2628  1.208       chs 		.pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 1),
   2629  1.208       chs 	},
   2630  1.208       chs 	{
   2631  1.208       chs 		.pa_alloc = pool_page_alloc,
   2632  1.208       chs 		.pa_free = pool_page_free,
   2633  1.208       chs 		.pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 2),
   2634  1.208       chs 	},
   2635  1.208       chs 	{
   2636  1.208       chs 		.pa_alloc = pool_page_alloc,
   2637  1.208       chs 		.pa_free = pool_page_free,
   2638  1.208       chs 		.pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 3),
   2639  1.208       chs 	},
   2640  1.208       chs 	{
   2641  1.208       chs 		.pa_alloc = pool_page_alloc,
   2642  1.208       chs 		.pa_free = pool_page_free,
   2643  1.208       chs 		.pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 4),
   2644  1.208       chs 	},
   2645  1.208       chs 	{
   2646  1.208       chs 		.pa_alloc = pool_page_alloc,
   2647  1.208       chs 		.pa_free = pool_page_free,
   2648  1.208       chs 		.pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 5),
   2649  1.208       chs 	},
   2650  1.208       chs 	{
   2651  1.208       chs 		.pa_alloc = pool_page_alloc,
   2652  1.208       chs 		.pa_free = pool_page_free,
   2653  1.208       chs 		.pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 6),
   2654  1.208       chs 	},
   2655  1.208       chs 	{
   2656  1.208       chs 		.pa_alloc = pool_page_alloc,
   2657  1.208       chs 		.pa_free = pool_page_free,
   2658  1.208       chs 		.pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 7),
   2659  1.208       chs 	}
   2660  1.208       chs };
   2661  1.208       chs 
   2662  1.208       chs static int
   2663  1.208       chs pool_bigidx(size_t size)
   2664  1.208       chs {
   2665  1.208       chs 	int i;
   2666  1.208       chs 
   2667  1.208       chs 	for (i = 0; i < __arraycount(pool_allocator_big); i++) {
   2668  1.208       chs 		if (1 << (i + POOL_ALLOCATOR_BIG_BASE) >= size)
   2669  1.208       chs 			return i;
   2670  1.208       chs 	}
   2671  1.208       chs 	panic("pool item size %zu too large, use a custom allocator", size);
   2672  1.208       chs }
   2673  1.208       chs 
   2674  1.117      yamt static void *
   2675  1.117      yamt pool_allocator_alloc(struct pool *pp, int flags)
   2676   1.66   thorpej {
   2677  1.117      yamt 	struct pool_allocator *pa = pp->pr_alloc;
   2678   1.66   thorpej 	void *res;
   2679   1.66   thorpej 
   2680  1.117      yamt 	res = (*pa->pa_alloc)(pp, flags);
   2681  1.117      yamt 	if (res == NULL && (flags & PR_WAITOK) == 0) {
   2682   1.66   thorpej 		/*
   2683  1.117      yamt 		 * We only run the drain hook here if PR_NOWAIT.
   2684  1.117      yamt 		 * In other cases, the hook will be run in
   2685  1.117      yamt 		 * pool_reclaim().
   2686   1.66   thorpej 		 */
   2687  1.117      yamt 		if (pp->pr_drain_hook != NULL) {
   2688  1.117      yamt 			(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
   2689  1.117      yamt 			res = (*pa->pa_alloc)(pp, flags);
   2690   1.66   thorpej 		}
   2691  1.117      yamt 	}
   2692  1.117      yamt 	return res;
   2693   1.66   thorpej }
   2694   1.66   thorpej 
   2695  1.117      yamt static void
   2696   1.66   thorpej pool_allocator_free(struct pool *pp, void *v)
   2697   1.66   thorpej {
   2698   1.66   thorpej 	struct pool_allocator *pa = pp->pr_alloc;
   2699   1.66   thorpej 
   2700   1.66   thorpej 	(*pa->pa_free)(pp, v);
   2701   1.66   thorpej }
   2702   1.66   thorpej 
   2703   1.66   thorpej void *
   2704  1.124      yamt pool_page_alloc(struct pool *pp, int flags)
   2705   1.66   thorpej {
   2706  1.192     rmind 	const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
   2707  1.191      para 	vmem_addr_t va;
   2708  1.192     rmind 	int ret;
   2709  1.191      para 
   2710  1.192     rmind 	ret = uvm_km_kmem_alloc(kmem_va_arena, pp->pr_alloc->pa_pagesz,
   2711  1.192     rmind 	    vflags | VM_INSTANTFIT, &va);
   2712   1.66   thorpej 
   2713  1.192     rmind 	return ret ? NULL : (void *)va;
   2714   1.66   thorpej }
   2715   1.66   thorpej 
   2716   1.66   thorpej void
   2717  1.124      yamt pool_page_free(struct pool *pp, void *v)
   2718   1.66   thorpej {
   2719   1.66   thorpej 
   2720  1.191      para 	uvm_km_kmem_free(kmem_va_arena, (vaddr_t)v, pp->pr_alloc->pa_pagesz);
   2721   1.98      yamt }
   2722   1.98      yamt 
   2723   1.98      yamt static void *
   2724  1.124      yamt pool_page_alloc_meta(struct pool *pp, int flags)
   2725   1.98      yamt {
   2726  1.192     rmind 	const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
   2727  1.192     rmind 	vmem_addr_t va;
   2728  1.192     rmind 	int ret;
   2729  1.191      para 
   2730  1.192     rmind 	ret = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz,
   2731  1.192     rmind 	    vflags | VM_INSTANTFIT, &va);
   2732   1.98      yamt 
   2733  1.192     rmind 	return ret ? NULL : (void *)va;
   2734   1.98      yamt }
   2735   1.98      yamt 
   2736   1.98      yamt static void
   2737  1.124      yamt pool_page_free_meta(struct pool *pp, void *v)
   2738   1.98      yamt {
   2739   1.98      yamt 
   2740  1.192     rmind 	vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz);
   2741   1.66   thorpej }
   2742   1.66   thorpej 
   2743  1.228      maxv #ifdef KLEAK
   2744  1.228      maxv static void
   2745  1.228      maxv pool_kleak_fill(struct pool *pp, void *p)
   2746  1.228      maxv {
   2747  1.228      maxv 	if (__predict_false(pp->pr_roflags & PR_NOTOUCH)) {
   2748  1.228      maxv 		return;
   2749  1.228      maxv 	}
   2750  1.228      maxv 	kleak_fill_area(p, pp->pr_size);
   2751  1.228      maxv }
   2752  1.228      maxv 
   2753  1.228      maxv static void
   2754  1.228      maxv pool_cache_kleak_fill(pool_cache_t pc, void *p)
   2755  1.228      maxv {
   2756  1.228      maxv 	if (__predict_false(pc->pc_ctor != NULL || pc->pc_dtor != NULL)) {
   2757  1.228      maxv 		return;
   2758  1.228      maxv 	}
   2759  1.228      maxv 	pool_kleak_fill(&pc->pc_pool, p);
   2760  1.228      maxv }
   2761  1.228      maxv #endif
   2762  1.228      maxv 
   2763  1.204      maxv #ifdef POOL_REDZONE
   2764  1.204      maxv #if defined(_LP64)
   2765  1.204      maxv # define PRIME 0x9e37fffffffc0000UL
   2766  1.204      maxv #else /* defined(_LP64) */
   2767  1.204      maxv # define PRIME 0x9e3779b1
   2768  1.204      maxv #endif /* defined(_LP64) */
   2769  1.204      maxv #define STATIC_BYTE	0xFE
   2770  1.204      maxv CTASSERT(POOL_REDZONE_SIZE > 1);
   2771  1.204      maxv 
   2772  1.224      maxv #ifndef KASAN
   2773  1.204      maxv static inline uint8_t
   2774  1.204      maxv pool_pattern_generate(const void *p)
   2775  1.204      maxv {
   2776  1.204      maxv 	return (uint8_t)(((uintptr_t)p) * PRIME
   2777  1.204      maxv 	   >> ((sizeof(uintptr_t) - sizeof(uint8_t))) * CHAR_BIT);
   2778  1.204      maxv }
   2779  1.224      maxv #endif
   2780  1.204      maxv 
   2781  1.204      maxv static void
   2782  1.204      maxv pool_redzone_init(struct pool *pp, size_t requested_size)
   2783  1.204      maxv {
   2784  1.227      maxv 	size_t redzsz;
   2785  1.204      maxv 	size_t nsz;
   2786  1.204      maxv 
   2787  1.227      maxv #ifdef KASAN
   2788  1.227      maxv 	redzsz = requested_size;
   2789  1.227      maxv 	kasan_add_redzone(&redzsz);
   2790  1.227      maxv 	redzsz -= requested_size;
   2791  1.227      maxv #else
   2792  1.227      maxv 	redzsz = POOL_REDZONE_SIZE;
   2793  1.227      maxv #endif
   2794  1.227      maxv 
   2795  1.204      maxv 	if (pp->pr_roflags & PR_NOTOUCH) {
   2796  1.204      maxv 		pp->pr_reqsize = 0;
   2797  1.204      maxv 		pp->pr_redzone = false;
   2798  1.204      maxv 		return;
   2799  1.204      maxv 	}
   2800  1.204      maxv 
   2801  1.204      maxv 	/*
   2802  1.204      maxv 	 * We may have extended the requested size earlier; check if
   2803  1.204      maxv 	 * there's naturally space in the padding for a red zone.
   2804  1.204      maxv 	 */
   2805  1.227      maxv 	if (pp->pr_size - requested_size >= redzsz) {
   2806  1.204      maxv 		pp->pr_reqsize = requested_size;
   2807  1.204      maxv 		pp->pr_redzone = true;
   2808  1.204      maxv 		return;
   2809  1.204      maxv 	}
   2810  1.204      maxv 
   2811  1.204      maxv 	/*
   2812  1.204      maxv 	 * No space in the natural padding; check if we can extend a
   2813  1.204      maxv 	 * bit the size of the pool.
   2814  1.204      maxv 	 */
   2815  1.227      maxv 	nsz = roundup(pp->pr_size + redzsz, pp->pr_align);
   2816  1.204      maxv 	if (nsz <= pp->pr_alloc->pa_pagesz) {
   2817  1.204      maxv 		/* Ok, we can */
   2818  1.204      maxv 		pp->pr_size = nsz;
   2819  1.204      maxv 		pp->pr_reqsize = requested_size;
   2820  1.204      maxv 		pp->pr_redzone = true;
   2821  1.204      maxv 	} else {
   2822  1.204      maxv 		/* No space for a red zone... snif :'( */
   2823  1.204      maxv 		pp->pr_reqsize = 0;
   2824  1.204      maxv 		pp->pr_redzone = false;
   2825  1.204      maxv 		printf("pool redzone disabled for '%s'\n", pp->pr_wchan);
   2826  1.204      maxv 	}
   2827  1.204      maxv }
   2828  1.204      maxv 
   2829  1.204      maxv static void
   2830  1.204      maxv pool_redzone_fill(struct pool *pp, void *p)
   2831  1.204      maxv {
   2832  1.224      maxv 	if (!pp->pr_redzone)
   2833  1.224      maxv 		return;
   2834  1.224      maxv #ifdef KASAN
   2835  1.227      maxv 	size_t size_with_redzone = pp->pr_reqsize;
   2836  1.227      maxv 	kasan_add_redzone(&size_with_redzone);
   2837  1.227      maxv 	kasan_alloc(p, pp->pr_reqsize, size_with_redzone);
   2838  1.224      maxv #else
   2839  1.204      maxv 	uint8_t *cp, pat;
   2840  1.204      maxv 	const uint8_t *ep;
   2841  1.204      maxv 
   2842  1.204      maxv 	cp = (uint8_t *)p + pp->pr_reqsize;
   2843  1.204      maxv 	ep = cp + POOL_REDZONE_SIZE;
   2844  1.204      maxv 
   2845  1.204      maxv 	/*
   2846  1.204      maxv 	 * We really don't want the first byte of the red zone to be '\0';
   2847  1.204      maxv 	 * an off-by-one in a string may not be properly detected.
   2848  1.204      maxv 	 */
   2849  1.204      maxv 	pat = pool_pattern_generate(cp);
   2850  1.204      maxv 	*cp = (pat == '\0') ? STATIC_BYTE: pat;
   2851  1.204      maxv 	cp++;
   2852  1.204      maxv 
   2853  1.204      maxv 	while (cp < ep) {
   2854  1.204      maxv 		*cp = pool_pattern_generate(cp);
   2855  1.204      maxv 		cp++;
   2856  1.204      maxv 	}
   2857  1.224      maxv #endif
   2858  1.204      maxv }
   2859  1.204      maxv 
   2860  1.204      maxv static void
   2861  1.204      maxv pool_redzone_check(struct pool *pp, void *p)
   2862  1.204      maxv {
   2863  1.224      maxv 	if (!pp->pr_redzone)
   2864  1.224      maxv 		return;
   2865  1.224      maxv #ifdef KASAN
   2866  1.227      maxv 	size_t size_with_redzone = pp->pr_reqsize;
   2867  1.227      maxv 	kasan_add_redzone(&size_with_redzone);
   2868  1.227      maxv 	kasan_free(p, size_with_redzone);
   2869  1.224      maxv #else
   2870  1.204      maxv 	uint8_t *cp, pat, expected;
   2871  1.204      maxv 	const uint8_t *ep;
   2872  1.204      maxv 
   2873  1.204      maxv 	cp = (uint8_t *)p + pp->pr_reqsize;
   2874  1.204      maxv 	ep = cp + POOL_REDZONE_SIZE;
   2875  1.204      maxv 
   2876  1.204      maxv 	pat = pool_pattern_generate(cp);
   2877  1.204      maxv 	expected = (pat == '\0') ? STATIC_BYTE: pat;
   2878  1.225      maxv 	if (__predict_false(expected != *cp)) {
   2879  1.225      maxv 		printf("%s: %p: 0x%02x != 0x%02x\n",
   2880  1.204      maxv 		   __func__, cp, *cp, expected);
   2881  1.204      maxv 	}
   2882  1.204      maxv 	cp++;
   2883  1.204      maxv 
   2884  1.204      maxv 	while (cp < ep) {
   2885  1.204      maxv 		expected = pool_pattern_generate(cp);
   2886  1.225      maxv 		if (__predict_false(*cp != expected)) {
   2887  1.225      maxv 			printf("%s: %p: 0x%02x != 0x%02x\n",
   2888  1.204      maxv 			   __func__, cp, *cp, expected);
   2889  1.204      maxv 		}
   2890  1.204      maxv 		cp++;
   2891  1.204      maxv 	}
   2892  1.224      maxv #endif
   2893  1.204      maxv }
   2894  1.204      maxv 
   2895  1.204      maxv #endif /* POOL_REDZONE */
   2896  1.204      maxv 
   2897  1.204      maxv 
   2898   1.66   thorpej #ifdef POOL_SUBPAGE
   2899   1.66   thorpej /* Sub-page allocator, for machines with large hardware pages. */
   2900   1.66   thorpej void *
   2901   1.66   thorpej pool_subpage_alloc(struct pool *pp, int flags)
   2902   1.66   thorpej {
   2903  1.134        ad 	return pool_get(&psppool, flags);
   2904   1.66   thorpej }
   2905   1.66   thorpej 
   2906   1.66   thorpej void
   2907   1.66   thorpej pool_subpage_free(struct pool *pp, void *v)
   2908   1.66   thorpej {
   2909   1.66   thorpej 	pool_put(&psppool, v);
   2910   1.66   thorpej }
   2911   1.66   thorpej 
   2912  1.112     bjh21 #endif /* POOL_SUBPAGE */
   2913  1.141      yamt 
   2914  1.141      yamt #if defined(DDB)
   2915  1.141      yamt static bool
   2916  1.141      yamt pool_in_page(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
   2917  1.141      yamt {
   2918  1.141      yamt 
   2919  1.141      yamt 	return (uintptr_t)ph->ph_page <= addr &&
   2920  1.141      yamt 	    addr < (uintptr_t)ph->ph_page + pp->pr_alloc->pa_pagesz;
   2921  1.141      yamt }
   2922  1.141      yamt 
   2923  1.143      yamt static bool
   2924  1.143      yamt pool_in_item(struct pool *pp, void *item, uintptr_t addr)
   2925  1.143      yamt {
   2926  1.143      yamt 
   2927  1.143      yamt 	return (uintptr_t)item <= addr && addr < (uintptr_t)item + pp->pr_size;
   2928  1.143      yamt }
   2929  1.143      yamt 
   2930  1.143      yamt static bool
   2931  1.143      yamt pool_in_cg(struct pool *pp, struct pool_cache_group *pcg, uintptr_t addr)
   2932  1.143      yamt {
   2933  1.143      yamt 	int i;
   2934  1.143      yamt 
   2935  1.143      yamt 	if (pcg == NULL) {
   2936  1.143      yamt 		return false;
   2937  1.143      yamt 	}
   2938  1.144      yamt 	for (i = 0; i < pcg->pcg_avail; i++) {
   2939  1.143      yamt 		if (pool_in_item(pp, pcg->pcg_objects[i].pcgo_va, addr)) {
   2940  1.143      yamt 			return true;
   2941  1.143      yamt 		}
   2942  1.143      yamt 	}
   2943  1.143      yamt 	return false;
   2944  1.143      yamt }
   2945  1.143      yamt 
   2946  1.143      yamt static bool
   2947  1.143      yamt pool_allocated(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
   2948  1.143      yamt {
   2949  1.143      yamt 
   2950  1.143      yamt 	if ((pp->pr_roflags & PR_NOTOUCH) != 0) {
   2951  1.143      yamt 		unsigned int idx = pr_item_notouch_index(pp, ph, (void *)addr);
   2952  1.143      yamt 		pool_item_bitmap_t *bitmap =
   2953  1.143      yamt 		    ph->ph_bitmap + (idx / BITMAP_SIZE);
   2954  1.143      yamt 		pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
   2955  1.143      yamt 
   2956  1.143      yamt 		return (*bitmap & mask) == 0;
   2957  1.143      yamt 	} else {
   2958  1.143      yamt 		struct pool_item *pi;
   2959  1.143      yamt 
   2960  1.143      yamt 		LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
   2961  1.143      yamt 			if (pool_in_item(pp, pi, addr)) {
   2962  1.143      yamt 				return false;
   2963  1.143      yamt 			}
   2964  1.143      yamt 		}
   2965  1.143      yamt 		return true;
   2966  1.143      yamt 	}
   2967  1.143      yamt }
   2968  1.143      yamt 
   2969  1.141      yamt void
   2970  1.141      yamt pool_whatis(uintptr_t addr, void (*pr)(const char *, ...))
   2971  1.141      yamt {
   2972  1.141      yamt 	struct pool *pp;
   2973  1.141      yamt 
   2974  1.145        ad 	TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
   2975  1.141      yamt 		struct pool_item_header *ph;
   2976  1.141      yamt 		uintptr_t item;
   2977  1.143      yamt 		bool allocated = true;
   2978  1.143      yamt 		bool incache = false;
   2979  1.143      yamt 		bool incpucache = false;
   2980  1.143      yamt 		char cpucachestr[32];
   2981  1.141      yamt 
   2982  1.141      yamt 		if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
   2983  1.141      yamt 			LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
   2984  1.141      yamt 				if (pool_in_page(pp, ph, addr)) {
   2985  1.141      yamt 					goto found;
   2986  1.141      yamt 				}
   2987  1.141      yamt 			}
   2988  1.141      yamt 			LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
   2989  1.141      yamt 				if (pool_in_page(pp, ph, addr)) {
   2990  1.143      yamt 					allocated =
   2991  1.143      yamt 					    pool_allocated(pp, ph, addr);
   2992  1.143      yamt 					goto found;
   2993  1.143      yamt 				}
   2994  1.143      yamt 			}
   2995  1.143      yamt 			LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
   2996  1.143      yamt 				if (pool_in_page(pp, ph, addr)) {
   2997  1.143      yamt 					allocated = false;
   2998  1.141      yamt 					goto found;
   2999  1.141      yamt 				}
   3000  1.141      yamt 			}
   3001  1.141      yamt 			continue;
   3002  1.141      yamt 		} else {
   3003  1.141      yamt 			ph = pr_find_pagehead_noalign(pp, (void *)addr);
   3004  1.141      yamt 			if (ph == NULL || !pool_in_page(pp, ph, addr)) {
   3005  1.141      yamt 				continue;
   3006  1.141      yamt 			}
   3007  1.143      yamt 			allocated = pool_allocated(pp, ph, addr);
   3008  1.141      yamt 		}
   3009  1.141      yamt found:
   3010  1.143      yamt 		if (allocated && pp->pr_cache) {
   3011  1.143      yamt 			pool_cache_t pc = pp->pr_cache;
   3012  1.143      yamt 			struct pool_cache_group *pcg;
   3013  1.143      yamt 			int i;
   3014  1.143      yamt 
   3015  1.143      yamt 			for (pcg = pc->pc_fullgroups; pcg != NULL;
   3016  1.143      yamt 			    pcg = pcg->pcg_next) {
   3017  1.143      yamt 				if (pool_in_cg(pp, pcg, addr)) {
   3018  1.143      yamt 					incache = true;
   3019  1.143      yamt 					goto print;
   3020  1.143      yamt 				}
   3021  1.143      yamt 			}
   3022  1.183        ad 			for (i = 0; i < __arraycount(pc->pc_cpus); i++) {
   3023  1.143      yamt 				pool_cache_cpu_t *cc;
   3024  1.143      yamt 
   3025  1.143      yamt 				if ((cc = pc->pc_cpus[i]) == NULL) {
   3026  1.143      yamt 					continue;
   3027  1.143      yamt 				}
   3028  1.143      yamt 				if (pool_in_cg(pp, cc->cc_current, addr) ||
   3029  1.143      yamt 				    pool_in_cg(pp, cc->cc_previous, addr)) {
   3030  1.143      yamt 					struct cpu_info *ci =
   3031  1.170        ad 					    cpu_lookup(i);
   3032  1.143      yamt 
   3033  1.143      yamt 					incpucache = true;
   3034  1.143      yamt 					snprintf(cpucachestr,
   3035  1.143      yamt 					    sizeof(cpucachestr),
   3036  1.143      yamt 					    "cached by CPU %u",
   3037  1.153    martin 					    ci->ci_index);
   3038  1.143      yamt 					goto print;
   3039  1.143      yamt 				}
   3040  1.143      yamt 			}
   3041  1.143      yamt 		}
   3042  1.143      yamt print:
   3043  1.141      yamt 		item = (uintptr_t)ph->ph_page + ph->ph_off;
   3044  1.141      yamt 		item = item + rounddown(addr - item, pp->pr_size);
   3045  1.143      yamt 		(*pr)("%p is %p+%zu in POOL '%s' (%s)\n",
   3046  1.141      yamt 		    (void *)addr, item, (size_t)(addr - item),
   3047  1.143      yamt 		    pp->pr_wchan,
   3048  1.143      yamt 		    incpucache ? cpucachestr :
   3049  1.143      yamt 		    incache ? "cached" : allocated ? "allocated" : "free");
   3050  1.141      yamt 	}
   3051  1.141      yamt }
   3052  1.141      yamt #endif /* defined(DDB) */
   3053  1.203     joerg 
   3054  1.203     joerg static int
   3055  1.203     joerg pool_sysctl(SYSCTLFN_ARGS)
   3056  1.203     joerg {
   3057  1.203     joerg 	struct pool_sysctl data;
   3058  1.203     joerg 	struct pool *pp;
   3059  1.203     joerg 	struct pool_cache *pc;
   3060  1.203     joerg 	pool_cache_cpu_t *cc;
   3061  1.203     joerg 	int error;
   3062  1.203     joerg 	size_t i, written;
   3063  1.203     joerg 
   3064  1.203     joerg 	if (oldp == NULL) {
   3065  1.203     joerg 		*oldlenp = 0;
   3066  1.203     joerg 		TAILQ_FOREACH(pp, &pool_head, pr_poollist)
   3067  1.203     joerg 			*oldlenp += sizeof(data);
   3068  1.203     joerg 		return 0;
   3069  1.203     joerg 	}
   3070  1.203     joerg 
   3071  1.203     joerg 	memset(&data, 0, sizeof(data));
   3072  1.203     joerg 	error = 0;
   3073  1.203     joerg 	written = 0;
   3074  1.203     joerg 	TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
   3075  1.203     joerg 		if (written + sizeof(data) > *oldlenp)
   3076  1.203     joerg 			break;
   3077  1.203     joerg 		strlcpy(data.pr_wchan, pp->pr_wchan, sizeof(data.pr_wchan));
   3078  1.203     joerg 		data.pr_pagesize = pp->pr_alloc->pa_pagesz;
   3079  1.203     joerg 		data.pr_flags = pp->pr_roflags | pp->pr_flags;
   3080  1.203     joerg #define COPY(field) data.field = pp->field
   3081  1.203     joerg 		COPY(pr_size);
   3082  1.203     joerg 
   3083  1.203     joerg 		COPY(pr_itemsperpage);
   3084  1.203     joerg 		COPY(pr_nitems);
   3085  1.203     joerg 		COPY(pr_nout);
   3086  1.203     joerg 		COPY(pr_hardlimit);
   3087  1.203     joerg 		COPY(pr_npages);
   3088  1.203     joerg 		COPY(pr_minpages);
   3089  1.203     joerg 		COPY(pr_maxpages);
   3090  1.203     joerg 
   3091  1.203     joerg 		COPY(pr_nget);
   3092  1.203     joerg 		COPY(pr_nfail);
   3093  1.203     joerg 		COPY(pr_nput);
   3094  1.203     joerg 		COPY(pr_npagealloc);
   3095  1.203     joerg 		COPY(pr_npagefree);
   3096  1.203     joerg 		COPY(pr_hiwat);
   3097  1.203     joerg 		COPY(pr_nidle);
   3098  1.203     joerg #undef COPY
   3099  1.203     joerg 
   3100  1.203     joerg 		data.pr_cache_nmiss_pcpu = 0;
   3101  1.203     joerg 		data.pr_cache_nhit_pcpu = 0;
   3102  1.203     joerg 		if (pp->pr_cache) {
   3103  1.203     joerg 			pc = pp->pr_cache;
   3104  1.203     joerg 			data.pr_cache_meta_size = pc->pc_pcgsize;
   3105  1.203     joerg 			data.pr_cache_nfull = pc->pc_nfull;
   3106  1.203     joerg 			data.pr_cache_npartial = pc->pc_npart;
   3107  1.203     joerg 			data.pr_cache_nempty = pc->pc_nempty;
   3108  1.203     joerg 			data.pr_cache_ncontended = pc->pc_contended;
   3109  1.203     joerg 			data.pr_cache_nmiss_global = pc->pc_misses;
   3110  1.203     joerg 			data.pr_cache_nhit_global = pc->pc_hits;
   3111  1.203     joerg 			for (i = 0; i < pc->pc_ncpu; ++i) {
   3112  1.203     joerg 				cc = pc->pc_cpus[i];
   3113  1.203     joerg 				if (cc == NULL)
   3114  1.203     joerg 					continue;
   3115  1.206  knakahar 				data.pr_cache_nmiss_pcpu += cc->cc_misses;
   3116  1.206  knakahar 				data.pr_cache_nhit_pcpu += cc->cc_hits;
   3117  1.203     joerg 			}
   3118  1.203     joerg 		} else {
   3119  1.203     joerg 			data.pr_cache_meta_size = 0;
   3120  1.203     joerg 			data.pr_cache_nfull = 0;
   3121  1.203     joerg 			data.pr_cache_npartial = 0;
   3122  1.203     joerg 			data.pr_cache_nempty = 0;
   3123  1.203     joerg 			data.pr_cache_ncontended = 0;
   3124  1.203     joerg 			data.pr_cache_nmiss_global = 0;
   3125  1.203     joerg 			data.pr_cache_nhit_global = 0;
   3126  1.203     joerg 		}
   3127  1.203     joerg 
   3128  1.203     joerg 		error = sysctl_copyout(l, &data, oldp, sizeof(data));
   3129  1.203     joerg 		if (error)
   3130  1.203     joerg 			break;
   3131  1.203     joerg 		written += sizeof(data);
   3132  1.203     joerg 		oldp = (char *)oldp + sizeof(data);
   3133  1.203     joerg 	}
   3134  1.203     joerg 
   3135  1.203     joerg 	*oldlenp = written;
   3136  1.203     joerg 	return error;
   3137  1.203     joerg }
   3138  1.203     joerg 
   3139  1.203     joerg SYSCTL_SETUP(sysctl_pool_setup, "sysctl kern.pool setup")
   3140  1.203     joerg {
   3141  1.203     joerg 	const struct sysctlnode *rnode = NULL;
   3142  1.203     joerg 
   3143  1.203     joerg 	sysctl_createv(clog, 0, NULL, &rnode,
   3144  1.203     joerg 		       CTLFLAG_PERMANENT,
   3145  1.203     joerg 		       CTLTYPE_STRUCT, "pool",
   3146  1.203     joerg 		       SYSCTL_DESCR("Get pool statistics"),
   3147  1.203     joerg 		       pool_sysctl, 0, NULL, 0,
   3148  1.203     joerg 		       CTL_KERN, CTL_CREATE, CTL_EOL);
   3149  1.203     joerg }
   3150