Home | History | Annotate | Line # | Download | only in kern
subr_pool.c revision 1.169
      1  1.169      yamt /*	$NetBSD: subr_pool.c,v 1.169 2008/08/11 02:48:42 yamt Exp $	*/
      2    1.1        pk 
      3    1.1        pk /*-
      4  1.161        ad  * Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008 The NetBSD Foundation, Inc.
      5    1.1        pk  * All rights reserved.
      6    1.1        pk  *
      7    1.1        pk  * This code is derived from software contributed to The NetBSD Foundation
      8   1.20   thorpej  * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
      9  1.134        ad  * Simulation Facility, NASA Ames Research Center, and by Andrew Doran.
     10    1.1        pk  *
     11    1.1        pk  * Redistribution and use in source and binary forms, with or without
     12    1.1        pk  * modification, are permitted provided that the following conditions
     13    1.1        pk  * are met:
     14    1.1        pk  * 1. Redistributions of source code must retain the above copyright
     15    1.1        pk  *    notice, this list of conditions and the following disclaimer.
     16    1.1        pk  * 2. Redistributions in binary form must reproduce the above copyright
     17    1.1        pk  *    notice, this list of conditions and the following disclaimer in the
     18    1.1        pk  *    documentation and/or other materials provided with the distribution.
     19    1.1        pk  *
     20    1.1        pk  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21    1.1        pk  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22    1.1        pk  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23    1.1        pk  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24    1.1        pk  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25    1.1        pk  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26    1.1        pk  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27    1.1        pk  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28    1.1        pk  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29    1.1        pk  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30    1.1        pk  * POSSIBILITY OF SUCH DAMAGE.
     31    1.1        pk  */
     32   1.64     lukem 
     33   1.64     lukem #include <sys/cdefs.h>
     34  1.169      yamt __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.169 2008/08/11 02:48:42 yamt Exp $");
     35   1.24    scottr 
     36  1.141      yamt #include "opt_ddb.h"
     37   1.25   thorpej #include "opt_pool.h"
     38   1.24    scottr #include "opt_poollog.h"
     39   1.28   thorpej #include "opt_lockdebug.h"
     40    1.1        pk 
     41    1.1        pk #include <sys/param.h>
     42    1.1        pk #include <sys/systm.h>
     43  1.135      yamt #include <sys/bitops.h>
     44    1.1        pk #include <sys/proc.h>
     45    1.1        pk #include <sys/errno.h>
     46    1.1        pk #include <sys/kernel.h>
     47    1.1        pk #include <sys/malloc.h>
     48    1.1        pk #include <sys/pool.h>
     49   1.20   thorpej #include <sys/syslog.h>
     50  1.125        ad #include <sys/debug.h>
     51  1.134        ad #include <sys/lockdebug.h>
     52  1.134        ad #include <sys/xcall.h>
     53  1.134        ad #include <sys/cpu.h>
     54  1.145        ad #include <sys/atomic.h>
     55    1.3        pk 
     56    1.3        pk #include <uvm/uvm.h>
     57    1.3        pk 
     58    1.1        pk /*
     59    1.1        pk  * Pool resource management utility.
     60    1.3        pk  *
     61   1.88       chs  * Memory is allocated in pages which are split into pieces according to
     62   1.88       chs  * the pool item size. Each page is kept on one of three lists in the
     63   1.88       chs  * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
     64   1.88       chs  * for empty, full and partially-full pages respectively. The individual
     65   1.88       chs  * pool items are on a linked list headed by `ph_itemlist' in each page
     66   1.88       chs  * header. The memory for building the page list is either taken from
     67   1.88       chs  * the allocated pages themselves (for small pool items) or taken from
     68   1.88       chs  * an internal pool of page headers (`phpool').
     69    1.1        pk  */
     70    1.1        pk 
     71    1.3        pk /* List of all pools */
     72  1.145        ad TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
     73  1.134        ad 
     74    1.3        pk /* Private pool for page header structures */
     75   1.97      yamt #define	PHPOOL_MAX	8
     76   1.97      yamt static struct pool phpool[PHPOOL_MAX];
     77  1.135      yamt #define	PHPOOL_FREELIST_NELEM(idx) \
     78  1.135      yamt 	(((idx) == 0) ? 0 : BITMAP_SIZE * (1 << (idx)))
     79    1.3        pk 
     80   1.62     bjh21 #ifdef POOL_SUBPAGE
     81   1.62     bjh21 /* Pool of subpages for use by normal pools. */
     82   1.62     bjh21 static struct pool psppool;
     83   1.62     bjh21 #endif
     84   1.62     bjh21 
     85  1.117      yamt static SLIST_HEAD(, pool_allocator) pa_deferinitq =
     86  1.117      yamt     SLIST_HEAD_INITIALIZER(pa_deferinitq);
     87  1.117      yamt 
     88   1.98      yamt static void *pool_page_alloc_meta(struct pool *, int);
     89   1.98      yamt static void pool_page_free_meta(struct pool *, void *);
     90   1.98      yamt 
     91   1.98      yamt /* allocator for pool metadata */
     92  1.134        ad struct pool_allocator pool_allocator_meta = {
     93  1.117      yamt 	pool_page_alloc_meta, pool_page_free_meta,
     94  1.117      yamt 	.pa_backingmapptr = &kmem_map,
     95   1.98      yamt };
     96   1.98      yamt 
     97    1.3        pk /* # of seconds to retain page after last use */
     98    1.3        pk int pool_inactive_time = 10;
     99    1.3        pk 
    100    1.3        pk /* Next candidate for drainage (see pool_drain()) */
    101   1.23   thorpej static struct pool	*drainpp;
    102   1.23   thorpej 
    103  1.134        ad /* This lock protects both pool_head and drainpp. */
    104  1.134        ad static kmutex_t pool_head_lock;
    105  1.134        ad static kcondvar_t pool_busy;
    106    1.3        pk 
    107  1.135      yamt typedef uint32_t pool_item_bitmap_t;
    108  1.135      yamt #define	BITMAP_SIZE	(CHAR_BIT * sizeof(pool_item_bitmap_t))
    109  1.135      yamt #define	BITMAP_MASK	(BITMAP_SIZE - 1)
    110   1.99      yamt 
    111    1.3        pk struct pool_item_header {
    112    1.3        pk 	/* Page headers */
    113   1.88       chs 	LIST_ENTRY(pool_item_header)
    114    1.3        pk 				ph_pagelist;	/* pool page list */
    115   1.88       chs 	SPLAY_ENTRY(pool_item_header)
    116   1.88       chs 				ph_node;	/* Off-page page headers */
    117  1.128  christos 	void *			ph_page;	/* this page's address */
    118  1.151      yamt 	uint32_t		ph_time;	/* last referenced */
    119  1.135      yamt 	uint16_t		ph_nmissing;	/* # of chunks in use */
    120  1.141      yamt 	uint16_t		ph_off;		/* start offset in page */
    121   1.97      yamt 	union {
    122   1.97      yamt 		/* !PR_NOTOUCH */
    123   1.97      yamt 		struct {
    124  1.102       chs 			LIST_HEAD(, pool_item)
    125   1.97      yamt 				phu_itemlist;	/* chunk list for this page */
    126   1.97      yamt 		} phu_normal;
    127   1.97      yamt 		/* PR_NOTOUCH */
    128   1.97      yamt 		struct {
    129  1.141      yamt 			pool_item_bitmap_t phu_bitmap[1];
    130   1.97      yamt 		} phu_notouch;
    131   1.97      yamt 	} ph_u;
    132    1.3        pk };
    133   1.97      yamt #define	ph_itemlist	ph_u.phu_normal.phu_itemlist
    134  1.135      yamt #define	ph_bitmap	ph_u.phu_notouch.phu_bitmap
    135    1.3        pk 
    136    1.1        pk struct pool_item {
    137    1.3        pk #ifdef DIAGNOSTIC
    138   1.82   thorpej 	u_int pi_magic;
    139   1.33       chs #endif
    140  1.134        ad #define	PI_MAGIC 0xdeaddeadU
    141    1.3        pk 	/* Other entries use only this list entry */
    142  1.102       chs 	LIST_ENTRY(pool_item)	pi_list;
    143    1.3        pk };
    144    1.3        pk 
    145   1.53   thorpej #define	POOL_NEEDS_CATCHUP(pp)						\
    146   1.53   thorpej 	((pp)->pr_nitems < (pp)->pr_minitems)
    147   1.53   thorpej 
    148   1.43   thorpej /*
    149   1.43   thorpej  * Pool cache management.
    150   1.43   thorpej  *
    151   1.43   thorpej  * Pool caches provide a way for constructed objects to be cached by the
    152   1.43   thorpej  * pool subsystem.  This can lead to performance improvements by avoiding
    153   1.43   thorpej  * needless object construction/destruction; it is deferred until absolutely
    154   1.43   thorpej  * necessary.
    155   1.43   thorpej  *
    156  1.134        ad  * Caches are grouped into cache groups.  Each cache group references up
    157  1.134        ad  * to PCG_NUMOBJECTS constructed objects.  When a cache allocates an
    158  1.134        ad  * object from the pool, it calls the object's constructor and places it
    159  1.134        ad  * into a cache group.  When a cache group frees an object back to the
    160  1.134        ad  * pool, it first calls the object's destructor.  This allows the object
    161  1.134        ad  * to persist in constructed form while freed to the cache.
    162  1.134        ad  *
    163  1.134        ad  * The pool references each cache, so that when a pool is drained by the
    164  1.134        ad  * pagedaemon, it can drain each individual cache as well.  Each time a
    165  1.134        ad  * cache is drained, the most idle cache group is freed to the pool in
    166  1.134        ad  * its entirety.
    167   1.43   thorpej  *
    168   1.43   thorpej  * Pool caches are layed on top of pools.  By layering them, we can avoid
    169   1.43   thorpej  * the complexity of cache management for pools which would not benefit
    170   1.43   thorpej  * from it.
    171   1.43   thorpej  */
    172   1.43   thorpej 
    173  1.142        ad static struct pool pcg_normal_pool;
    174  1.142        ad static struct pool pcg_large_pool;
    175  1.134        ad static struct pool cache_pool;
    176  1.134        ad static struct pool cache_cpu_pool;
    177    1.3        pk 
    178  1.145        ad /* List of all caches. */
    179  1.145        ad TAILQ_HEAD(,pool_cache) pool_cache_head =
    180  1.145        ad     TAILQ_HEAD_INITIALIZER(pool_cache_head);
    181  1.145        ad 
    182  1.162        ad int pool_cache_disable;		/* global disable for caching */
    183  1.169      yamt static const pcg_t pcg_dummy;	/* zero sized: always empty, yet always full */
    184  1.145        ad 
    185  1.162        ad static bool	pool_cache_put_slow(pool_cache_cpu_t *, int,
    186  1.162        ad 				    void *);
    187  1.162        ad static bool	pool_cache_get_slow(pool_cache_cpu_t *, int,
    188  1.162        ad 				    void **, paddr_t *, int);
    189  1.134        ad static void	pool_cache_cpu_init1(struct cpu_info *, pool_cache_t);
    190  1.134        ad static void	pool_cache_invalidate_groups(pool_cache_t, pcg_t *);
    191  1.134        ad static void	pool_cache_xcall(pool_cache_t);
    192    1.3        pk 
    193   1.42   thorpej static int	pool_catchup(struct pool *);
    194  1.128  christos static void	pool_prime_page(struct pool *, void *,
    195   1.55   thorpej 		    struct pool_item_header *);
    196   1.88       chs static void	pool_update_curpage(struct pool *);
    197   1.66   thorpej 
    198  1.113      yamt static int	pool_grow(struct pool *, int);
    199  1.117      yamt static void	*pool_allocator_alloc(struct pool *, int);
    200  1.117      yamt static void	pool_allocator_free(struct pool *, void *);
    201    1.3        pk 
    202   1.97      yamt static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
    203   1.88       chs 	void (*)(const char *, ...));
    204   1.42   thorpej static void pool_print1(struct pool *, const char *,
    205   1.42   thorpej 	void (*)(const char *, ...));
    206    1.3        pk 
    207   1.88       chs static int pool_chk_page(struct pool *, const char *,
    208   1.88       chs 			 struct pool_item_header *);
    209   1.88       chs 
    210    1.3        pk /*
    211   1.52   thorpej  * Pool log entry. An array of these is allocated in pool_init().
    212    1.3        pk  */
    213    1.3        pk struct pool_log {
    214    1.3        pk 	const char	*pl_file;
    215    1.3        pk 	long		pl_line;
    216    1.3        pk 	int		pl_action;
    217   1.25   thorpej #define	PRLOG_GET	1
    218   1.25   thorpej #define	PRLOG_PUT	2
    219    1.3        pk 	void		*pl_addr;
    220    1.1        pk };
    221    1.1        pk 
    222   1.86      matt #ifdef POOL_DIAGNOSTIC
    223    1.3        pk /* Number of entries in pool log buffers */
    224   1.17   thorpej #ifndef POOL_LOGSIZE
    225   1.17   thorpej #define	POOL_LOGSIZE	10
    226   1.17   thorpej #endif
    227   1.17   thorpej 
    228   1.17   thorpej int pool_logsize = POOL_LOGSIZE;
    229    1.1        pk 
    230  1.110     perry static inline void
    231   1.42   thorpej pr_log(struct pool *pp, void *v, int action, const char *file, long line)
    232    1.3        pk {
    233    1.3        pk 	int n = pp->pr_curlogentry;
    234    1.3        pk 	struct pool_log *pl;
    235    1.3        pk 
    236   1.20   thorpej 	if ((pp->pr_roflags & PR_LOGGING) == 0)
    237    1.3        pk 		return;
    238    1.3        pk 
    239    1.3        pk 	/*
    240    1.3        pk 	 * Fill in the current entry. Wrap around and overwrite
    241    1.3        pk 	 * the oldest entry if necessary.
    242    1.3        pk 	 */
    243    1.3        pk 	pl = &pp->pr_log[n];
    244    1.3        pk 	pl->pl_file = file;
    245    1.3        pk 	pl->pl_line = line;
    246    1.3        pk 	pl->pl_action = action;
    247    1.3        pk 	pl->pl_addr = v;
    248    1.3        pk 	if (++n >= pp->pr_logsize)
    249    1.3        pk 		n = 0;
    250    1.3        pk 	pp->pr_curlogentry = n;
    251    1.3        pk }
    252    1.3        pk 
    253    1.3        pk static void
    254   1.42   thorpej pr_printlog(struct pool *pp, struct pool_item *pi,
    255   1.42   thorpej     void (*pr)(const char *, ...))
    256    1.3        pk {
    257    1.3        pk 	int i = pp->pr_logsize;
    258    1.3        pk 	int n = pp->pr_curlogentry;
    259    1.3        pk 
    260   1.20   thorpej 	if ((pp->pr_roflags & PR_LOGGING) == 0)
    261    1.3        pk 		return;
    262    1.3        pk 
    263    1.3        pk 	/*
    264    1.3        pk 	 * Print all entries in this pool's log.
    265    1.3        pk 	 */
    266    1.3        pk 	while (i-- > 0) {
    267    1.3        pk 		struct pool_log *pl = &pp->pr_log[n];
    268    1.3        pk 		if (pl->pl_action != 0) {
    269   1.25   thorpej 			if (pi == NULL || pi == pl->pl_addr) {
    270   1.25   thorpej 				(*pr)("\tlog entry %d:\n", i);
    271   1.25   thorpej 				(*pr)("\t\taction = %s, addr = %p\n",
    272   1.25   thorpej 				    pl->pl_action == PRLOG_GET ? "get" : "put",
    273   1.25   thorpej 				    pl->pl_addr);
    274   1.25   thorpej 				(*pr)("\t\tfile: %s at line %lu\n",
    275   1.25   thorpej 				    pl->pl_file, pl->pl_line);
    276   1.25   thorpej 			}
    277    1.3        pk 		}
    278    1.3        pk 		if (++n >= pp->pr_logsize)
    279    1.3        pk 			n = 0;
    280    1.3        pk 	}
    281    1.3        pk }
    282   1.25   thorpej 
    283  1.110     perry static inline void
    284   1.42   thorpej pr_enter(struct pool *pp, const char *file, long line)
    285   1.25   thorpej {
    286   1.25   thorpej 
    287   1.34   thorpej 	if (__predict_false(pp->pr_entered_file != NULL)) {
    288   1.25   thorpej 		printf("pool %s: reentrancy at file %s line %ld\n",
    289   1.25   thorpej 		    pp->pr_wchan, file, line);
    290   1.25   thorpej 		printf("         previous entry at file %s line %ld\n",
    291   1.25   thorpej 		    pp->pr_entered_file, pp->pr_entered_line);
    292   1.25   thorpej 		panic("pr_enter");
    293   1.25   thorpej 	}
    294   1.25   thorpej 
    295   1.25   thorpej 	pp->pr_entered_file = file;
    296   1.25   thorpej 	pp->pr_entered_line = line;
    297   1.25   thorpej }
    298   1.25   thorpej 
    299  1.110     perry static inline void
    300   1.42   thorpej pr_leave(struct pool *pp)
    301   1.25   thorpej {
    302   1.25   thorpej 
    303   1.34   thorpej 	if (__predict_false(pp->pr_entered_file == NULL)) {
    304   1.25   thorpej 		printf("pool %s not entered?\n", pp->pr_wchan);
    305   1.25   thorpej 		panic("pr_leave");
    306   1.25   thorpej 	}
    307   1.25   thorpej 
    308   1.25   thorpej 	pp->pr_entered_file = NULL;
    309   1.25   thorpej 	pp->pr_entered_line = 0;
    310   1.25   thorpej }
    311   1.25   thorpej 
    312  1.110     perry static inline void
    313   1.42   thorpej pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
    314   1.25   thorpej {
    315   1.25   thorpej 
    316   1.25   thorpej 	if (pp->pr_entered_file != NULL)
    317   1.25   thorpej 		(*pr)("\n\tcurrently entered from file %s line %ld\n",
    318   1.25   thorpej 		    pp->pr_entered_file, pp->pr_entered_line);
    319   1.25   thorpej }
    320    1.3        pk #else
    321   1.25   thorpej #define	pr_log(pp, v, action, file, line)
    322   1.25   thorpej #define	pr_printlog(pp, pi, pr)
    323   1.25   thorpej #define	pr_enter(pp, file, line)
    324   1.25   thorpej #define	pr_leave(pp)
    325   1.25   thorpej #define	pr_enter_check(pp, pr)
    326   1.59   thorpej #endif /* POOL_DIAGNOSTIC */
    327    1.3        pk 
    328  1.135      yamt static inline unsigned int
    329   1.97      yamt pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph,
    330   1.97      yamt     const void *v)
    331   1.97      yamt {
    332   1.97      yamt 	const char *cp = v;
    333  1.135      yamt 	unsigned int idx;
    334   1.97      yamt 
    335   1.97      yamt 	KASSERT(pp->pr_roflags & PR_NOTOUCH);
    336  1.128  christos 	idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size;
    337   1.97      yamt 	KASSERT(idx < pp->pr_itemsperpage);
    338   1.97      yamt 	return idx;
    339   1.97      yamt }
    340   1.97      yamt 
    341  1.110     perry static inline void
    342   1.97      yamt pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph,
    343   1.97      yamt     void *obj)
    344   1.97      yamt {
    345  1.135      yamt 	unsigned int idx = pr_item_notouch_index(pp, ph, obj);
    346  1.135      yamt 	pool_item_bitmap_t *bitmap = ph->ph_bitmap + (idx / BITMAP_SIZE);
    347  1.135      yamt 	pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
    348   1.97      yamt 
    349  1.135      yamt 	KASSERT((*bitmap & mask) == 0);
    350  1.135      yamt 	*bitmap |= mask;
    351   1.97      yamt }
    352   1.97      yamt 
    353  1.110     perry static inline void *
    354   1.97      yamt pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph)
    355   1.97      yamt {
    356  1.135      yamt 	pool_item_bitmap_t *bitmap = ph->ph_bitmap;
    357  1.135      yamt 	unsigned int idx;
    358  1.135      yamt 	int i;
    359   1.97      yamt 
    360  1.135      yamt 	for (i = 0; ; i++) {
    361  1.135      yamt 		int bit;
    362   1.97      yamt 
    363  1.135      yamt 		KASSERT((i * BITMAP_SIZE) < pp->pr_itemsperpage);
    364  1.135      yamt 		bit = ffs32(bitmap[i]);
    365  1.135      yamt 		if (bit) {
    366  1.135      yamt 			pool_item_bitmap_t mask;
    367  1.135      yamt 
    368  1.135      yamt 			bit--;
    369  1.135      yamt 			idx = (i * BITMAP_SIZE) + bit;
    370  1.135      yamt 			mask = 1 << bit;
    371  1.135      yamt 			KASSERT((bitmap[i] & mask) != 0);
    372  1.135      yamt 			bitmap[i] &= ~mask;
    373  1.135      yamt 			break;
    374  1.135      yamt 		}
    375  1.135      yamt 	}
    376  1.135      yamt 	KASSERT(idx < pp->pr_itemsperpage);
    377  1.128  christos 	return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size;
    378   1.97      yamt }
    379   1.97      yamt 
    380  1.135      yamt static inline void
    381  1.141      yamt pr_item_notouch_init(const struct pool *pp, struct pool_item_header *ph)
    382  1.135      yamt {
    383  1.135      yamt 	pool_item_bitmap_t *bitmap = ph->ph_bitmap;
    384  1.135      yamt 	const int n = howmany(pp->pr_itemsperpage, BITMAP_SIZE);
    385  1.135      yamt 	int i;
    386  1.135      yamt 
    387  1.135      yamt 	for (i = 0; i < n; i++) {
    388  1.135      yamt 		bitmap[i] = (pool_item_bitmap_t)-1;
    389  1.135      yamt 	}
    390  1.135      yamt }
    391  1.135      yamt 
    392  1.110     perry static inline int
    393   1.88       chs phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
    394   1.88       chs {
    395  1.121      yamt 
    396  1.121      yamt 	/*
    397  1.121      yamt 	 * we consider pool_item_header with smaller ph_page bigger.
    398  1.121      yamt 	 * (this unnatural ordering is for the benefit of pr_find_pagehead.)
    399  1.121      yamt 	 */
    400  1.121      yamt 
    401   1.88       chs 	if (a->ph_page < b->ph_page)
    402  1.121      yamt 		return (1);
    403  1.121      yamt 	else if (a->ph_page > b->ph_page)
    404   1.88       chs 		return (-1);
    405   1.88       chs 	else
    406   1.88       chs 		return (0);
    407   1.88       chs }
    408   1.88       chs 
    409   1.88       chs SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
    410   1.88       chs SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
    411   1.88       chs 
    412  1.141      yamt static inline struct pool_item_header *
    413  1.141      yamt pr_find_pagehead_noalign(struct pool *pp, void *v)
    414  1.141      yamt {
    415  1.141      yamt 	struct pool_item_header *ph, tmp;
    416  1.141      yamt 
    417  1.141      yamt 	tmp.ph_page = (void *)(uintptr_t)v;
    418  1.141      yamt 	ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
    419  1.141      yamt 	if (ph == NULL) {
    420  1.141      yamt 		ph = SPLAY_ROOT(&pp->pr_phtree);
    421  1.141      yamt 		if (ph != NULL && phtree_compare(&tmp, ph) >= 0) {
    422  1.141      yamt 			ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph);
    423  1.141      yamt 		}
    424  1.141      yamt 		KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0);
    425  1.141      yamt 	}
    426  1.141      yamt 
    427  1.141      yamt 	return ph;
    428  1.141      yamt }
    429  1.141      yamt 
    430    1.3        pk /*
    431  1.121      yamt  * Return the pool page header based on item address.
    432    1.3        pk  */
    433  1.110     perry static inline struct pool_item_header *
    434  1.121      yamt pr_find_pagehead(struct pool *pp, void *v)
    435    1.3        pk {
    436   1.88       chs 	struct pool_item_header *ph, tmp;
    437    1.3        pk 
    438  1.121      yamt 	if ((pp->pr_roflags & PR_NOALIGN) != 0) {
    439  1.141      yamt 		ph = pr_find_pagehead_noalign(pp, v);
    440  1.121      yamt 	} else {
    441  1.128  christos 		void *page =
    442  1.128  christos 		    (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask);
    443  1.121      yamt 
    444  1.121      yamt 		if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
    445  1.128  christos 			ph = (struct pool_item_header *)((char *)page + pp->pr_phoffset);
    446  1.121      yamt 		} else {
    447  1.121      yamt 			tmp.ph_page = page;
    448  1.121      yamt 			ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
    449  1.121      yamt 		}
    450  1.121      yamt 	}
    451    1.3        pk 
    452  1.121      yamt 	KASSERT(ph == NULL || ((pp->pr_roflags & PR_PHINPAGE) != 0) ||
    453  1.128  christos 	    ((char *)ph->ph_page <= (char *)v &&
    454  1.128  christos 	    (char *)v < (char *)ph->ph_page + pp->pr_alloc->pa_pagesz));
    455   1.88       chs 	return ph;
    456    1.3        pk }
    457    1.3        pk 
    458  1.101   thorpej static void
    459  1.101   thorpej pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq)
    460  1.101   thorpej {
    461  1.101   thorpej 	struct pool_item_header *ph;
    462  1.101   thorpej 
    463  1.101   thorpej 	while ((ph = LIST_FIRST(pq)) != NULL) {
    464  1.101   thorpej 		LIST_REMOVE(ph, ph_pagelist);
    465  1.101   thorpej 		pool_allocator_free(pp, ph->ph_page);
    466  1.134        ad 		if ((pp->pr_roflags & PR_PHINPAGE) == 0)
    467  1.101   thorpej 			pool_put(pp->pr_phpool, ph);
    468  1.101   thorpej 	}
    469  1.101   thorpej }
    470  1.101   thorpej 
    471    1.3        pk /*
    472    1.3        pk  * Remove a page from the pool.
    473    1.3        pk  */
    474  1.110     perry static inline void
    475   1.61       chs pr_rmpage(struct pool *pp, struct pool_item_header *ph,
    476   1.61       chs      struct pool_pagelist *pq)
    477    1.3        pk {
    478    1.3        pk 
    479  1.134        ad 	KASSERT(mutex_owned(&pp->pr_lock));
    480   1.91      yamt 
    481    1.3        pk 	/*
    482    1.7   thorpej 	 * If the page was idle, decrement the idle page count.
    483    1.3        pk 	 */
    484    1.6   thorpej 	if (ph->ph_nmissing == 0) {
    485    1.6   thorpej #ifdef DIAGNOSTIC
    486    1.6   thorpej 		if (pp->pr_nidle == 0)
    487    1.6   thorpej 			panic("pr_rmpage: nidle inconsistent");
    488   1.20   thorpej 		if (pp->pr_nitems < pp->pr_itemsperpage)
    489   1.20   thorpej 			panic("pr_rmpage: nitems inconsistent");
    490    1.6   thorpej #endif
    491    1.6   thorpej 		pp->pr_nidle--;
    492    1.6   thorpej 	}
    493    1.7   thorpej 
    494   1.20   thorpej 	pp->pr_nitems -= pp->pr_itemsperpage;
    495   1.20   thorpej 
    496    1.7   thorpej 	/*
    497  1.101   thorpej 	 * Unlink the page from the pool and queue it for release.
    498    1.7   thorpej 	 */
    499   1.88       chs 	LIST_REMOVE(ph, ph_pagelist);
    500   1.91      yamt 	if ((pp->pr_roflags & PR_PHINPAGE) == 0)
    501   1.91      yamt 		SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
    502  1.101   thorpej 	LIST_INSERT_HEAD(pq, ph, ph_pagelist);
    503  1.101   thorpej 
    504    1.7   thorpej 	pp->pr_npages--;
    505    1.7   thorpej 	pp->pr_npagefree++;
    506    1.6   thorpej 
    507   1.88       chs 	pool_update_curpage(pp);
    508    1.3        pk }
    509    1.3        pk 
    510  1.126   thorpej static bool
    511  1.117      yamt pa_starved_p(struct pool_allocator *pa)
    512  1.117      yamt {
    513  1.117      yamt 
    514  1.117      yamt 	if (pa->pa_backingmap != NULL) {
    515  1.117      yamt 		return vm_map_starved_p(pa->pa_backingmap);
    516  1.117      yamt 	}
    517  1.127   thorpej 	return false;
    518  1.117      yamt }
    519  1.117      yamt 
    520  1.117      yamt static int
    521  1.124      yamt pool_reclaim_callback(struct callback_entry *ce, void *obj, void *arg)
    522  1.117      yamt {
    523  1.117      yamt 	struct pool *pp = obj;
    524  1.117      yamt 	struct pool_allocator *pa = pp->pr_alloc;
    525  1.117      yamt 
    526  1.117      yamt 	KASSERT(&pp->pr_reclaimerentry == ce);
    527  1.117      yamt 	pool_reclaim(pp);
    528  1.117      yamt 	if (!pa_starved_p(pa)) {
    529  1.117      yamt 		return CALLBACK_CHAIN_ABORT;
    530  1.117      yamt 	}
    531  1.117      yamt 	return CALLBACK_CHAIN_CONTINUE;
    532  1.117      yamt }
    533  1.117      yamt 
    534  1.117      yamt static void
    535  1.117      yamt pool_reclaim_register(struct pool *pp)
    536  1.117      yamt {
    537  1.117      yamt 	struct vm_map *map = pp->pr_alloc->pa_backingmap;
    538  1.117      yamt 	int s;
    539  1.117      yamt 
    540  1.117      yamt 	if (map == NULL) {
    541  1.117      yamt 		return;
    542  1.117      yamt 	}
    543  1.117      yamt 
    544  1.117      yamt 	s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */
    545  1.117      yamt 	callback_register(&vm_map_to_kernel(map)->vmk_reclaim_callback,
    546  1.117      yamt 	    &pp->pr_reclaimerentry, pp, pool_reclaim_callback);
    547  1.117      yamt 	splx(s);
    548  1.117      yamt }
    549  1.117      yamt 
    550  1.117      yamt static void
    551  1.117      yamt pool_reclaim_unregister(struct pool *pp)
    552  1.117      yamt {
    553  1.117      yamt 	struct vm_map *map = pp->pr_alloc->pa_backingmap;
    554  1.117      yamt 	int s;
    555  1.117      yamt 
    556  1.117      yamt 	if (map == NULL) {
    557  1.117      yamt 		return;
    558  1.117      yamt 	}
    559  1.117      yamt 
    560  1.117      yamt 	s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */
    561  1.117      yamt 	callback_unregister(&vm_map_to_kernel(map)->vmk_reclaim_callback,
    562  1.117      yamt 	    &pp->pr_reclaimerentry);
    563  1.117      yamt 	splx(s);
    564  1.117      yamt }
    565  1.117      yamt 
    566  1.117      yamt static void
    567  1.117      yamt pa_reclaim_register(struct pool_allocator *pa)
    568  1.117      yamt {
    569  1.117      yamt 	struct vm_map *map = *pa->pa_backingmapptr;
    570  1.117      yamt 	struct pool *pp;
    571  1.117      yamt 
    572  1.117      yamt 	KASSERT(pa->pa_backingmap == NULL);
    573  1.117      yamt 	if (map == NULL) {
    574  1.117      yamt 		SLIST_INSERT_HEAD(&pa_deferinitq, pa, pa_q);
    575  1.117      yamt 		return;
    576  1.117      yamt 	}
    577  1.117      yamt 	pa->pa_backingmap = map;
    578  1.117      yamt 	TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
    579  1.117      yamt 		pool_reclaim_register(pp);
    580  1.117      yamt 	}
    581  1.117      yamt }
    582  1.117      yamt 
    583    1.3        pk /*
    584   1.94    simonb  * Initialize all the pools listed in the "pools" link set.
    585   1.94    simonb  */
    586   1.94    simonb void
    587  1.117      yamt pool_subsystem_init(void)
    588   1.94    simonb {
    589  1.117      yamt 	struct pool_allocator *pa;
    590   1.94    simonb 	__link_set_decl(pools, struct link_pool_init);
    591   1.94    simonb 	struct link_pool_init * const *pi;
    592   1.94    simonb 
    593  1.134        ad 	mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE);
    594  1.134        ad 	cv_init(&pool_busy, "poolbusy");
    595  1.134        ad 
    596   1.94    simonb 	__link_set_foreach(pi, pools)
    597   1.94    simonb 		pool_init((*pi)->pp, (*pi)->size, (*pi)->align,
    598   1.94    simonb 		    (*pi)->align_offset, (*pi)->flags, (*pi)->wchan,
    599  1.129        ad 		    (*pi)->palloc, (*pi)->ipl);
    600  1.117      yamt 
    601  1.117      yamt 	while ((pa = SLIST_FIRST(&pa_deferinitq)) != NULL) {
    602  1.117      yamt 		KASSERT(pa->pa_backingmapptr != NULL);
    603  1.117      yamt 		KASSERT(*pa->pa_backingmapptr != NULL);
    604  1.117      yamt 		SLIST_REMOVE_HEAD(&pa_deferinitq, pa_q);
    605  1.117      yamt 		pa_reclaim_register(pa);
    606  1.117      yamt 	}
    607  1.134        ad 
    608  1.156        ad 	pool_init(&cache_pool, sizeof(struct pool_cache), coherency_unit,
    609  1.134        ad 	    0, 0, "pcache", &pool_allocator_nointr, IPL_NONE);
    610  1.134        ad 
    611  1.156        ad 	pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), coherency_unit,
    612  1.134        ad 	    0, 0, "pcachecpu", &pool_allocator_nointr, IPL_NONE);
    613   1.94    simonb }
    614   1.94    simonb 
    615   1.94    simonb /*
    616    1.3        pk  * Initialize the given pool resource structure.
    617    1.3        pk  *
    618    1.3        pk  * We export this routine to allow other kernel parts to declare
    619    1.3        pk  * static pools that must be initialized before malloc() is available.
    620    1.3        pk  */
    621    1.3        pk void
    622   1.42   thorpej pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
    623  1.129        ad     const char *wchan, struct pool_allocator *palloc, int ipl)
    624    1.3        pk {
    625  1.116    simonb 	struct pool *pp1;
    626   1.92     enami 	size_t trysize, phsize;
    627  1.134        ad 	int off, slack;
    628    1.3        pk 
    629  1.116    simonb #ifdef DEBUG
    630  1.116    simonb 	/*
    631  1.116    simonb 	 * Check that the pool hasn't already been initialised and
    632  1.116    simonb 	 * added to the list of all pools.
    633  1.116    simonb 	 */
    634  1.145        ad 	TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
    635  1.116    simonb 		if (pp == pp1)
    636  1.116    simonb 			panic("pool_init: pool %s already initialised",
    637  1.116    simonb 			    wchan);
    638  1.116    simonb 	}
    639  1.116    simonb #endif
    640  1.116    simonb 
    641   1.25   thorpej #ifdef POOL_DIAGNOSTIC
    642   1.25   thorpej 	/*
    643   1.25   thorpej 	 * Always log if POOL_DIAGNOSTIC is defined.
    644   1.25   thorpej 	 */
    645   1.25   thorpej 	if (pool_logsize != 0)
    646   1.25   thorpej 		flags |= PR_LOGGING;
    647   1.25   thorpej #endif
    648   1.25   thorpej 
    649   1.66   thorpej 	if (palloc == NULL)
    650   1.66   thorpej 		palloc = &pool_allocator_kmem;
    651  1.112     bjh21 #ifdef POOL_SUBPAGE
    652  1.112     bjh21 	if (size > palloc->pa_pagesz) {
    653  1.112     bjh21 		if (palloc == &pool_allocator_kmem)
    654  1.112     bjh21 			palloc = &pool_allocator_kmem_fullpage;
    655  1.112     bjh21 		else if (palloc == &pool_allocator_nointr)
    656  1.112     bjh21 			palloc = &pool_allocator_nointr_fullpage;
    657  1.112     bjh21 	}
    658   1.66   thorpej #endif /* POOL_SUBPAGE */
    659   1.66   thorpej 	if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
    660  1.112     bjh21 		if (palloc->pa_pagesz == 0)
    661   1.66   thorpej 			palloc->pa_pagesz = PAGE_SIZE;
    662   1.66   thorpej 
    663   1.66   thorpej 		TAILQ_INIT(&palloc->pa_list);
    664   1.66   thorpej 
    665  1.134        ad 		mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM);
    666   1.66   thorpej 		palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
    667   1.66   thorpej 		palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
    668  1.117      yamt 
    669  1.117      yamt 		if (palloc->pa_backingmapptr != NULL) {
    670  1.117      yamt 			pa_reclaim_register(palloc);
    671  1.117      yamt 		}
    672   1.66   thorpej 		palloc->pa_flags |= PA_INITIALIZED;
    673    1.4   thorpej 	}
    674    1.3        pk 
    675    1.3        pk 	if (align == 0)
    676    1.3        pk 		align = ALIGN(1);
    677   1.14   thorpej 
    678  1.120      yamt 	if ((flags & PR_NOTOUCH) == 0 && size < sizeof(struct pool_item))
    679   1.14   thorpej 		size = sizeof(struct pool_item);
    680    1.3        pk 
    681   1.78   thorpej 	size = roundup(size, align);
    682   1.66   thorpej #ifdef DIAGNOSTIC
    683   1.66   thorpej 	if (size > palloc->pa_pagesz)
    684  1.121      yamt 		panic("pool_init: pool item size (%zu) too large", size);
    685   1.66   thorpej #endif
    686   1.35        pk 
    687    1.3        pk 	/*
    688    1.3        pk 	 * Initialize the pool structure.
    689    1.3        pk 	 */
    690   1.88       chs 	LIST_INIT(&pp->pr_emptypages);
    691   1.88       chs 	LIST_INIT(&pp->pr_fullpages);
    692   1.88       chs 	LIST_INIT(&pp->pr_partpages);
    693  1.134        ad 	pp->pr_cache = NULL;
    694    1.3        pk 	pp->pr_curpage = NULL;
    695    1.3        pk 	pp->pr_npages = 0;
    696    1.3        pk 	pp->pr_minitems = 0;
    697    1.3        pk 	pp->pr_minpages = 0;
    698    1.3        pk 	pp->pr_maxpages = UINT_MAX;
    699   1.20   thorpej 	pp->pr_roflags = flags;
    700   1.20   thorpej 	pp->pr_flags = 0;
    701   1.35        pk 	pp->pr_size = size;
    702    1.3        pk 	pp->pr_align = align;
    703    1.3        pk 	pp->pr_wchan = wchan;
    704   1.66   thorpej 	pp->pr_alloc = palloc;
    705   1.20   thorpej 	pp->pr_nitems = 0;
    706   1.20   thorpej 	pp->pr_nout = 0;
    707   1.20   thorpej 	pp->pr_hardlimit = UINT_MAX;
    708   1.20   thorpej 	pp->pr_hardlimit_warning = NULL;
    709   1.31   thorpej 	pp->pr_hardlimit_ratecap.tv_sec = 0;
    710   1.31   thorpej 	pp->pr_hardlimit_ratecap.tv_usec = 0;
    711   1.31   thorpej 	pp->pr_hardlimit_warning_last.tv_sec = 0;
    712   1.31   thorpej 	pp->pr_hardlimit_warning_last.tv_usec = 0;
    713   1.68   thorpej 	pp->pr_drain_hook = NULL;
    714   1.68   thorpej 	pp->pr_drain_hook_arg = NULL;
    715  1.125        ad 	pp->pr_freecheck = NULL;
    716    1.3        pk 
    717    1.3        pk 	/*
    718    1.3        pk 	 * Decide whether to put the page header off page to avoid
    719   1.92     enami 	 * wasting too large a part of the page or too big item.
    720   1.92     enami 	 * Off-page page headers go on a hash table, so we can match
    721   1.92     enami 	 * a returned item with its header based on the page address.
    722   1.92     enami 	 * We use 1/16 of the page size and about 8 times of the item
    723   1.92     enami 	 * size as the threshold (XXX: tune)
    724   1.92     enami 	 *
    725   1.92     enami 	 * However, we'll put the header into the page if we can put
    726   1.92     enami 	 * it without wasting any items.
    727   1.92     enami 	 *
    728   1.92     enami 	 * Silently enforce `0 <= ioff < align'.
    729    1.3        pk 	 */
    730   1.92     enami 	pp->pr_itemoffset = ioff %= align;
    731   1.92     enami 	/* See the comment below about reserved bytes. */
    732   1.92     enami 	trysize = palloc->pa_pagesz - ((align - ioff) % align);
    733   1.92     enami 	phsize = ALIGN(sizeof(struct pool_item_header));
    734  1.121      yamt 	if ((pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) == 0 &&
    735   1.97      yamt 	    (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
    736   1.97      yamt 	    trysize / pp->pr_size == (trysize - phsize) / pp->pr_size)) {
    737    1.3        pk 		/* Use the end of the page for the page header */
    738   1.20   thorpej 		pp->pr_roflags |= PR_PHINPAGE;
    739   1.92     enami 		pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
    740    1.2        pk 	} else {
    741    1.3        pk 		/* The page header will be taken from our page header pool */
    742    1.3        pk 		pp->pr_phoffset = 0;
    743   1.66   thorpej 		off = palloc->pa_pagesz;
    744   1.88       chs 		SPLAY_INIT(&pp->pr_phtree);
    745    1.2        pk 	}
    746    1.1        pk 
    747    1.3        pk 	/*
    748    1.3        pk 	 * Alignment is to take place at `ioff' within the item. This means
    749    1.3        pk 	 * we must reserve up to `align - 1' bytes on the page to allow
    750    1.3        pk 	 * appropriate positioning of each item.
    751    1.3        pk 	 */
    752    1.3        pk 	pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
    753   1.43   thorpej 	KASSERT(pp->pr_itemsperpage != 0);
    754   1.97      yamt 	if ((pp->pr_roflags & PR_NOTOUCH)) {
    755   1.97      yamt 		int idx;
    756   1.97      yamt 
    757   1.97      yamt 		for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
    758   1.97      yamt 		    idx++) {
    759   1.97      yamt 			/* nothing */
    760   1.97      yamt 		}
    761   1.97      yamt 		if (idx >= PHPOOL_MAX) {
    762   1.97      yamt 			/*
    763   1.97      yamt 			 * if you see this panic, consider to tweak
    764   1.97      yamt 			 * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
    765   1.97      yamt 			 */
    766   1.97      yamt 			panic("%s: too large itemsperpage(%d) for PR_NOTOUCH",
    767   1.97      yamt 			    pp->pr_wchan, pp->pr_itemsperpage);
    768   1.97      yamt 		}
    769   1.97      yamt 		pp->pr_phpool = &phpool[idx];
    770   1.97      yamt 	} else if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
    771   1.97      yamt 		pp->pr_phpool = &phpool[0];
    772   1.97      yamt 	}
    773   1.97      yamt #if defined(DIAGNOSTIC)
    774   1.97      yamt 	else {
    775   1.97      yamt 		pp->pr_phpool = NULL;
    776   1.97      yamt 	}
    777   1.97      yamt #endif
    778    1.3        pk 
    779    1.3        pk 	/*
    780    1.3        pk 	 * Use the slack between the chunks and the page header
    781    1.3        pk 	 * for "cache coloring".
    782    1.3        pk 	 */
    783    1.3        pk 	slack = off - pp->pr_itemsperpage * pp->pr_size;
    784    1.3        pk 	pp->pr_maxcolor = (slack / align) * align;
    785    1.3        pk 	pp->pr_curcolor = 0;
    786    1.3        pk 
    787    1.3        pk 	pp->pr_nget = 0;
    788    1.3        pk 	pp->pr_nfail = 0;
    789    1.3        pk 	pp->pr_nput = 0;
    790    1.3        pk 	pp->pr_npagealloc = 0;
    791    1.3        pk 	pp->pr_npagefree = 0;
    792    1.1        pk 	pp->pr_hiwat = 0;
    793    1.8   thorpej 	pp->pr_nidle = 0;
    794  1.134        ad 	pp->pr_refcnt = 0;
    795    1.3        pk 
    796   1.59   thorpej #ifdef POOL_DIAGNOSTIC
    797   1.25   thorpej 	if (flags & PR_LOGGING) {
    798   1.25   thorpej 		if (kmem_map == NULL ||
    799   1.25   thorpej 		    (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
    800   1.25   thorpej 		     M_TEMP, M_NOWAIT)) == NULL)
    801   1.20   thorpej 			pp->pr_roflags &= ~PR_LOGGING;
    802    1.3        pk 		pp->pr_curlogentry = 0;
    803    1.3        pk 		pp->pr_logsize = pool_logsize;
    804    1.3        pk 	}
    805   1.59   thorpej #endif
    806   1.25   thorpej 
    807   1.25   thorpej 	pp->pr_entered_file = NULL;
    808   1.25   thorpej 	pp->pr_entered_line = 0;
    809    1.3        pk 
    810  1.157        ad 	mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl);
    811  1.134        ad 	cv_init(&pp->pr_cv, wchan);
    812  1.134        ad 	pp->pr_ipl = ipl;
    813    1.1        pk 
    814    1.3        pk 	/*
    815   1.43   thorpej 	 * Initialize private page header pool and cache magazine pool if we
    816   1.43   thorpej 	 * haven't done so yet.
    817   1.23   thorpej 	 * XXX LOCKING.
    818    1.3        pk 	 */
    819   1.97      yamt 	if (phpool[0].pr_size == 0) {
    820   1.97      yamt 		int idx;
    821   1.97      yamt 		for (idx = 0; idx < PHPOOL_MAX; idx++) {
    822   1.97      yamt 			static char phpool_names[PHPOOL_MAX][6+1+6+1];
    823   1.97      yamt 			int nelem;
    824   1.97      yamt 			size_t sz;
    825   1.97      yamt 
    826   1.97      yamt 			nelem = PHPOOL_FREELIST_NELEM(idx);
    827   1.97      yamt 			snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
    828   1.97      yamt 			    "phpool-%d", nelem);
    829   1.97      yamt 			sz = sizeof(struct pool_item_header);
    830   1.97      yamt 			if (nelem) {
    831  1.135      yamt 				sz = offsetof(struct pool_item_header,
    832  1.135      yamt 				    ph_bitmap[howmany(nelem, BITMAP_SIZE)]);
    833   1.97      yamt 			}
    834   1.97      yamt 			pool_init(&phpool[idx], sz, 0, 0, 0,
    835  1.129        ad 			    phpool_names[idx], &pool_allocator_meta, IPL_VM);
    836   1.97      yamt 		}
    837   1.62     bjh21 #ifdef POOL_SUBPAGE
    838   1.62     bjh21 		pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
    839  1.129        ad 		    PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM);
    840   1.62     bjh21 #endif
    841  1.142        ad 
    842  1.142        ad 		size = sizeof(pcg_t) +
    843  1.142        ad 		    (PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t);
    844  1.156        ad 		pool_init(&pcg_normal_pool, size, coherency_unit, 0, 0,
    845  1.142        ad 		    "pcgnormal", &pool_allocator_meta, IPL_VM);
    846  1.142        ad 
    847  1.142        ad 		size = sizeof(pcg_t) +
    848  1.142        ad 		    (PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t);
    849  1.156        ad 		pool_init(&pcg_large_pool, size, coherency_unit, 0, 0,
    850  1.142        ad 		    "pcglarge", &pool_allocator_meta, IPL_VM);
    851    1.1        pk 	}
    852    1.1        pk 
    853  1.145        ad 	/* Insert into the list of all pools. */
    854  1.145        ad 	if (__predict_true(!cold))
    855  1.134        ad 		mutex_enter(&pool_head_lock);
    856  1.145        ad 	TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
    857  1.145        ad 		if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0)
    858  1.145        ad 			break;
    859  1.145        ad 	}
    860  1.145        ad 	if (pp1 == NULL)
    861  1.145        ad 		TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
    862  1.145        ad 	else
    863  1.145        ad 		TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist);
    864  1.145        ad 	if (__predict_true(!cold))
    865  1.134        ad 		mutex_exit(&pool_head_lock);
    866  1.134        ad 
    867  1.167     skrll 	/* Insert this into the list of pools using this allocator. */
    868  1.145        ad 	if (__predict_true(!cold))
    869  1.134        ad 		mutex_enter(&palloc->pa_lock);
    870  1.145        ad 	TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
    871  1.145        ad 	if (__predict_true(!cold))
    872  1.134        ad 		mutex_exit(&palloc->pa_lock);
    873   1.66   thorpej 
    874  1.117      yamt 	pool_reclaim_register(pp);
    875    1.1        pk }
    876    1.1        pk 
    877    1.1        pk /*
    878    1.1        pk  * De-commision a pool resource.
    879    1.1        pk  */
    880    1.1        pk void
    881   1.42   thorpej pool_destroy(struct pool *pp)
    882    1.1        pk {
    883  1.101   thorpej 	struct pool_pagelist pq;
    884    1.3        pk 	struct pool_item_header *ph;
    885   1.43   thorpej 
    886  1.101   thorpej 	/* Remove from global pool list */
    887  1.134        ad 	mutex_enter(&pool_head_lock);
    888  1.134        ad 	while (pp->pr_refcnt != 0)
    889  1.134        ad 		cv_wait(&pool_busy, &pool_head_lock);
    890  1.145        ad 	TAILQ_REMOVE(&pool_head, pp, pr_poollist);
    891  1.101   thorpej 	if (drainpp == pp)
    892  1.101   thorpej 		drainpp = NULL;
    893  1.134        ad 	mutex_exit(&pool_head_lock);
    894  1.101   thorpej 
    895  1.101   thorpej 	/* Remove this pool from its allocator's list of pools. */
    896  1.117      yamt 	pool_reclaim_unregister(pp);
    897  1.134        ad 	mutex_enter(&pp->pr_alloc->pa_lock);
    898   1.66   thorpej 	TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
    899  1.134        ad 	mutex_exit(&pp->pr_alloc->pa_lock);
    900   1.66   thorpej 
    901  1.134        ad 	mutex_enter(&pp->pr_lock);
    902  1.101   thorpej 
    903  1.134        ad 	KASSERT(pp->pr_cache == NULL);
    904    1.3        pk 
    905    1.3        pk #ifdef DIAGNOSTIC
    906   1.20   thorpej 	if (pp->pr_nout != 0) {
    907   1.25   thorpej 		pr_printlog(pp, NULL, printf);
    908   1.80    provos 		panic("pool_destroy: pool busy: still out: %u",
    909   1.20   thorpej 		    pp->pr_nout);
    910    1.3        pk 	}
    911    1.3        pk #endif
    912    1.1        pk 
    913  1.101   thorpej 	KASSERT(LIST_EMPTY(&pp->pr_fullpages));
    914  1.101   thorpej 	KASSERT(LIST_EMPTY(&pp->pr_partpages));
    915  1.101   thorpej 
    916    1.3        pk 	/* Remove all pages */
    917  1.101   thorpej 	LIST_INIT(&pq);
    918   1.88       chs 	while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
    919  1.101   thorpej 		pr_rmpage(pp, ph, &pq);
    920  1.101   thorpej 
    921  1.134        ad 	mutex_exit(&pp->pr_lock);
    922    1.3        pk 
    923  1.101   thorpej 	pr_pagelist_free(pp, &pq);
    924    1.3        pk 
    925   1.59   thorpej #ifdef POOL_DIAGNOSTIC
    926   1.20   thorpej 	if ((pp->pr_roflags & PR_LOGGING) != 0)
    927    1.3        pk 		free(pp->pr_log, M_TEMP);
    928   1.59   thorpej #endif
    929  1.134        ad 
    930  1.134        ad 	cv_destroy(&pp->pr_cv);
    931  1.134        ad 	mutex_destroy(&pp->pr_lock);
    932    1.1        pk }
    933    1.1        pk 
    934   1.68   thorpej void
    935   1.68   thorpej pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
    936   1.68   thorpej {
    937   1.68   thorpej 
    938   1.68   thorpej 	/* XXX no locking -- must be used just after pool_init() */
    939   1.68   thorpej #ifdef DIAGNOSTIC
    940   1.68   thorpej 	if (pp->pr_drain_hook != NULL)
    941   1.68   thorpej 		panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
    942   1.68   thorpej #endif
    943   1.68   thorpej 	pp->pr_drain_hook = fn;
    944   1.68   thorpej 	pp->pr_drain_hook_arg = arg;
    945   1.68   thorpej }
    946   1.68   thorpej 
    947   1.88       chs static struct pool_item_header *
    948  1.128  christos pool_alloc_item_header(struct pool *pp, void *storage, int flags)
    949   1.55   thorpej {
    950   1.55   thorpej 	struct pool_item_header *ph;
    951   1.55   thorpej 
    952   1.55   thorpej 	if ((pp->pr_roflags & PR_PHINPAGE) != 0)
    953  1.128  christos 		ph = (struct pool_item_header *) ((char *)storage + pp->pr_phoffset);
    954  1.134        ad 	else
    955   1.97      yamt 		ph = pool_get(pp->pr_phpool, flags);
    956   1.55   thorpej 
    957   1.55   thorpej 	return (ph);
    958   1.55   thorpej }
    959    1.1        pk 
    960    1.1        pk /*
    961  1.134        ad  * Grab an item from the pool.
    962    1.1        pk  */
    963    1.3        pk void *
    964   1.59   thorpej #ifdef POOL_DIAGNOSTIC
    965   1.42   thorpej _pool_get(struct pool *pp, int flags, const char *file, long line)
    966   1.56  sommerfe #else
    967   1.56  sommerfe pool_get(struct pool *pp, int flags)
    968   1.56  sommerfe #endif
    969    1.1        pk {
    970    1.1        pk 	struct pool_item *pi;
    971    1.3        pk 	struct pool_item_header *ph;
    972   1.55   thorpej 	void *v;
    973    1.1        pk 
    974    1.2        pk #ifdef DIAGNOSTIC
    975   1.95    atatat 	if (__predict_false(pp->pr_itemsperpage == 0))
    976   1.95    atatat 		panic("pool_get: pool %p: pr_itemsperpage is zero, "
    977   1.95    atatat 		    "pool not initialized?", pp);
    978   1.84   thorpej 	if (__predict_false(curlwp == NULL && doing_shutdown == 0 &&
    979   1.37  sommerfe 			    (flags & PR_WAITOK) != 0))
    980   1.77      matt 		panic("pool_get: %s: must have NOWAIT", pp->pr_wchan);
    981   1.58   thorpej 
    982  1.102       chs #endif /* DIAGNOSTIC */
    983   1.58   thorpej #ifdef LOCKDEBUG
    984  1.155        ad 	if (flags & PR_WAITOK) {
    985  1.154      yamt 		ASSERT_SLEEPABLE();
    986  1.155        ad 	}
    987   1.56  sommerfe #endif
    988    1.1        pk 
    989  1.134        ad 	mutex_enter(&pp->pr_lock);
    990   1.25   thorpej 	pr_enter(pp, file, line);
    991   1.20   thorpej 
    992   1.20   thorpej  startover:
    993   1.20   thorpej 	/*
    994   1.20   thorpej 	 * Check to see if we've reached the hard limit.  If we have,
    995   1.20   thorpej 	 * and we can wait, then wait until an item has been returned to
    996   1.20   thorpej 	 * the pool.
    997   1.20   thorpej 	 */
    998   1.20   thorpej #ifdef DIAGNOSTIC
    999   1.34   thorpej 	if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
   1000   1.25   thorpej 		pr_leave(pp);
   1001  1.134        ad 		mutex_exit(&pp->pr_lock);
   1002   1.20   thorpej 		panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
   1003   1.20   thorpej 	}
   1004   1.20   thorpej #endif
   1005   1.34   thorpej 	if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
   1006   1.68   thorpej 		if (pp->pr_drain_hook != NULL) {
   1007   1.68   thorpej 			/*
   1008   1.68   thorpej 			 * Since the drain hook is going to free things
   1009   1.68   thorpej 			 * back to the pool, unlock, call the hook, re-lock,
   1010   1.68   thorpej 			 * and check the hardlimit condition again.
   1011   1.68   thorpej 			 */
   1012   1.68   thorpej 			pr_leave(pp);
   1013  1.134        ad 			mutex_exit(&pp->pr_lock);
   1014   1.68   thorpej 			(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
   1015  1.134        ad 			mutex_enter(&pp->pr_lock);
   1016   1.68   thorpej 			pr_enter(pp, file, line);
   1017   1.68   thorpej 			if (pp->pr_nout < pp->pr_hardlimit)
   1018   1.68   thorpej 				goto startover;
   1019   1.68   thorpej 		}
   1020   1.68   thorpej 
   1021   1.29  sommerfe 		if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
   1022   1.20   thorpej 			/*
   1023   1.20   thorpej 			 * XXX: A warning isn't logged in this case.  Should
   1024   1.20   thorpej 			 * it be?
   1025   1.20   thorpej 			 */
   1026   1.20   thorpej 			pp->pr_flags |= PR_WANTED;
   1027   1.25   thorpej 			pr_leave(pp);
   1028  1.134        ad 			cv_wait(&pp->pr_cv, &pp->pr_lock);
   1029   1.25   thorpej 			pr_enter(pp, file, line);
   1030   1.20   thorpej 			goto startover;
   1031   1.20   thorpej 		}
   1032   1.31   thorpej 
   1033   1.31   thorpej 		/*
   1034   1.31   thorpej 		 * Log a message that the hard limit has been hit.
   1035   1.31   thorpej 		 */
   1036   1.31   thorpej 		if (pp->pr_hardlimit_warning != NULL &&
   1037   1.31   thorpej 		    ratecheck(&pp->pr_hardlimit_warning_last,
   1038   1.31   thorpej 			      &pp->pr_hardlimit_ratecap))
   1039   1.31   thorpej 			log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
   1040   1.21   thorpej 
   1041   1.21   thorpej 		pp->pr_nfail++;
   1042   1.21   thorpej 
   1043   1.25   thorpej 		pr_leave(pp);
   1044  1.134        ad 		mutex_exit(&pp->pr_lock);
   1045   1.20   thorpej 		return (NULL);
   1046   1.20   thorpej 	}
   1047   1.20   thorpej 
   1048    1.3        pk 	/*
   1049    1.3        pk 	 * The convention we use is that if `curpage' is not NULL, then
   1050    1.3        pk 	 * it points at a non-empty bucket. In particular, `curpage'
   1051    1.3        pk 	 * never points at a page header which has PR_PHINPAGE set and
   1052    1.3        pk 	 * has no items in its bucket.
   1053    1.3        pk 	 */
   1054   1.20   thorpej 	if ((ph = pp->pr_curpage) == NULL) {
   1055  1.113      yamt 		int error;
   1056  1.113      yamt 
   1057   1.20   thorpej #ifdef DIAGNOSTIC
   1058   1.20   thorpej 		if (pp->pr_nitems != 0) {
   1059  1.134        ad 			mutex_exit(&pp->pr_lock);
   1060   1.20   thorpej 			printf("pool_get: %s: curpage NULL, nitems %u\n",
   1061   1.20   thorpej 			    pp->pr_wchan, pp->pr_nitems);
   1062   1.80    provos 			panic("pool_get: nitems inconsistent");
   1063   1.20   thorpej 		}
   1064   1.20   thorpej #endif
   1065   1.20   thorpej 
   1066   1.21   thorpej 		/*
   1067   1.21   thorpej 		 * Call the back-end page allocator for more memory.
   1068   1.21   thorpej 		 * Release the pool lock, as the back-end page allocator
   1069   1.21   thorpej 		 * may block.
   1070   1.21   thorpej 		 */
   1071   1.25   thorpej 		pr_leave(pp);
   1072  1.113      yamt 		error = pool_grow(pp, flags);
   1073  1.113      yamt 		pr_enter(pp, file, line);
   1074  1.113      yamt 		if (error != 0) {
   1075   1.21   thorpej 			/*
   1076   1.55   thorpej 			 * We were unable to allocate a page or item
   1077   1.55   thorpej 			 * header, but we released the lock during
   1078   1.55   thorpej 			 * allocation, so perhaps items were freed
   1079   1.55   thorpej 			 * back to the pool.  Check for this case.
   1080   1.21   thorpej 			 */
   1081   1.21   thorpej 			if (pp->pr_curpage != NULL)
   1082   1.21   thorpej 				goto startover;
   1083   1.15        pk 
   1084  1.117      yamt 			pp->pr_nfail++;
   1085   1.25   thorpej 			pr_leave(pp);
   1086  1.134        ad 			mutex_exit(&pp->pr_lock);
   1087  1.117      yamt 			return (NULL);
   1088    1.1        pk 		}
   1089    1.3        pk 
   1090   1.20   thorpej 		/* Start the allocation process over. */
   1091   1.20   thorpej 		goto startover;
   1092    1.3        pk 	}
   1093   1.97      yamt 	if (pp->pr_roflags & PR_NOTOUCH) {
   1094   1.97      yamt #ifdef DIAGNOSTIC
   1095   1.97      yamt 		if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) {
   1096   1.97      yamt 			pr_leave(pp);
   1097  1.134        ad 			mutex_exit(&pp->pr_lock);
   1098   1.97      yamt 			panic("pool_get: %s: page empty", pp->pr_wchan);
   1099   1.97      yamt 		}
   1100   1.97      yamt #endif
   1101   1.97      yamt 		v = pr_item_notouch_get(pp, ph);
   1102   1.97      yamt #ifdef POOL_DIAGNOSTIC
   1103   1.97      yamt 		pr_log(pp, v, PRLOG_GET, file, line);
   1104   1.97      yamt #endif
   1105   1.97      yamt 	} else {
   1106  1.102       chs 		v = pi = LIST_FIRST(&ph->ph_itemlist);
   1107   1.97      yamt 		if (__predict_false(v == NULL)) {
   1108   1.97      yamt 			pr_leave(pp);
   1109  1.134        ad 			mutex_exit(&pp->pr_lock);
   1110   1.97      yamt 			panic("pool_get: %s: page empty", pp->pr_wchan);
   1111   1.97      yamt 		}
   1112   1.20   thorpej #ifdef DIAGNOSTIC
   1113   1.97      yamt 		if (__predict_false(pp->pr_nitems == 0)) {
   1114   1.97      yamt 			pr_leave(pp);
   1115  1.134        ad 			mutex_exit(&pp->pr_lock);
   1116   1.97      yamt 			printf("pool_get: %s: items on itemlist, nitems %u\n",
   1117   1.97      yamt 			    pp->pr_wchan, pp->pr_nitems);
   1118   1.97      yamt 			panic("pool_get: nitems inconsistent");
   1119   1.97      yamt 		}
   1120   1.65     enami #endif
   1121   1.56  sommerfe 
   1122   1.65     enami #ifdef POOL_DIAGNOSTIC
   1123   1.97      yamt 		pr_log(pp, v, PRLOG_GET, file, line);
   1124   1.65     enami #endif
   1125    1.3        pk 
   1126   1.65     enami #ifdef DIAGNOSTIC
   1127   1.97      yamt 		if (__predict_false(pi->pi_magic != PI_MAGIC)) {
   1128   1.97      yamt 			pr_printlog(pp, pi, printf);
   1129   1.97      yamt 			panic("pool_get(%s): free list modified: "
   1130   1.97      yamt 			    "magic=%x; page %p; item addr %p\n",
   1131   1.97      yamt 			    pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
   1132   1.97      yamt 		}
   1133    1.3        pk #endif
   1134    1.3        pk 
   1135   1.97      yamt 		/*
   1136   1.97      yamt 		 * Remove from item list.
   1137   1.97      yamt 		 */
   1138  1.102       chs 		LIST_REMOVE(pi, pi_list);
   1139   1.97      yamt 	}
   1140   1.20   thorpej 	pp->pr_nitems--;
   1141   1.20   thorpej 	pp->pr_nout++;
   1142    1.6   thorpej 	if (ph->ph_nmissing == 0) {
   1143    1.6   thorpej #ifdef DIAGNOSTIC
   1144   1.34   thorpej 		if (__predict_false(pp->pr_nidle == 0))
   1145    1.6   thorpej 			panic("pool_get: nidle inconsistent");
   1146    1.6   thorpej #endif
   1147    1.6   thorpej 		pp->pr_nidle--;
   1148   1.88       chs 
   1149   1.88       chs 		/*
   1150   1.88       chs 		 * This page was previously empty.  Move it to the list of
   1151   1.88       chs 		 * partially-full pages.  This page is already curpage.
   1152   1.88       chs 		 */
   1153   1.88       chs 		LIST_REMOVE(ph, ph_pagelist);
   1154   1.88       chs 		LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
   1155    1.6   thorpej 	}
   1156    1.3        pk 	ph->ph_nmissing++;
   1157   1.97      yamt 	if (ph->ph_nmissing == pp->pr_itemsperpage) {
   1158   1.21   thorpej #ifdef DIAGNOSTIC
   1159   1.97      yamt 		if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 &&
   1160  1.102       chs 		    !LIST_EMPTY(&ph->ph_itemlist))) {
   1161   1.25   thorpej 			pr_leave(pp);
   1162  1.134        ad 			mutex_exit(&pp->pr_lock);
   1163   1.21   thorpej 			panic("pool_get: %s: nmissing inconsistent",
   1164   1.21   thorpej 			    pp->pr_wchan);
   1165   1.21   thorpej 		}
   1166   1.21   thorpej #endif
   1167    1.3        pk 		/*
   1168   1.88       chs 		 * This page is now full.  Move it to the full list
   1169   1.88       chs 		 * and select a new current page.
   1170    1.3        pk 		 */
   1171   1.88       chs 		LIST_REMOVE(ph, ph_pagelist);
   1172   1.88       chs 		LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
   1173   1.88       chs 		pool_update_curpage(pp);
   1174    1.1        pk 	}
   1175    1.3        pk 
   1176    1.3        pk 	pp->pr_nget++;
   1177  1.111  christos 	pr_leave(pp);
   1178   1.20   thorpej 
   1179   1.20   thorpej 	/*
   1180   1.20   thorpej 	 * If we have a low water mark and we are now below that low
   1181   1.20   thorpej 	 * water mark, add more items to the pool.
   1182   1.20   thorpej 	 */
   1183   1.53   thorpej 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
   1184   1.20   thorpej 		/*
   1185   1.20   thorpej 		 * XXX: Should we log a warning?  Should we set up a timeout
   1186   1.20   thorpej 		 * to try again in a second or so?  The latter could break
   1187   1.20   thorpej 		 * a caller's assumptions about interrupt protection, etc.
   1188   1.20   thorpej 		 */
   1189   1.20   thorpej 	}
   1190   1.20   thorpej 
   1191  1.134        ad 	mutex_exit(&pp->pr_lock);
   1192  1.125        ad 	KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0);
   1193  1.125        ad 	FREECHECK_OUT(&pp->pr_freecheck, v);
   1194    1.1        pk 	return (v);
   1195    1.1        pk }
   1196    1.1        pk 
   1197    1.1        pk /*
   1198   1.43   thorpej  * Internal version of pool_put().  Pool is already locked/entered.
   1199    1.1        pk  */
   1200   1.43   thorpej static void
   1201  1.101   thorpej pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
   1202    1.1        pk {
   1203    1.1        pk 	struct pool_item *pi = v;
   1204    1.3        pk 	struct pool_item_header *ph;
   1205    1.3        pk 
   1206  1.134        ad 	KASSERT(mutex_owned(&pp->pr_lock));
   1207  1.125        ad 	FREECHECK_IN(&pp->pr_freecheck, v);
   1208  1.134        ad 	LOCKDEBUG_MEM_CHECK(v, pp->pr_size);
   1209   1.61       chs 
   1210   1.30   thorpej #ifdef DIAGNOSTIC
   1211   1.34   thorpej 	if (__predict_false(pp->pr_nout == 0)) {
   1212   1.30   thorpej 		printf("pool %s: putting with none out\n",
   1213   1.30   thorpej 		    pp->pr_wchan);
   1214   1.30   thorpej 		panic("pool_put");
   1215   1.30   thorpej 	}
   1216   1.30   thorpej #endif
   1217    1.3        pk 
   1218  1.121      yamt 	if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) {
   1219   1.25   thorpej 		pr_printlog(pp, NULL, printf);
   1220    1.3        pk 		panic("pool_put: %s: page header missing", pp->pr_wchan);
   1221    1.3        pk 	}
   1222   1.28   thorpej 
   1223    1.3        pk 	/*
   1224    1.3        pk 	 * Return to item list.
   1225    1.3        pk 	 */
   1226   1.97      yamt 	if (pp->pr_roflags & PR_NOTOUCH) {
   1227   1.97      yamt 		pr_item_notouch_put(pp, ph, v);
   1228   1.97      yamt 	} else {
   1229    1.2        pk #ifdef DIAGNOSTIC
   1230   1.97      yamt 		pi->pi_magic = PI_MAGIC;
   1231    1.3        pk #endif
   1232   1.32       chs #ifdef DEBUG
   1233   1.97      yamt 		{
   1234   1.97      yamt 			int i, *ip = v;
   1235   1.32       chs 
   1236   1.97      yamt 			for (i = 0; i < pp->pr_size / sizeof(int); i++) {
   1237   1.97      yamt 				*ip++ = PI_MAGIC;
   1238   1.97      yamt 			}
   1239   1.32       chs 		}
   1240   1.32       chs #endif
   1241   1.32       chs 
   1242  1.102       chs 		LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
   1243   1.97      yamt 	}
   1244   1.79   thorpej 	KDASSERT(ph->ph_nmissing != 0);
   1245    1.3        pk 	ph->ph_nmissing--;
   1246    1.3        pk 	pp->pr_nput++;
   1247   1.20   thorpej 	pp->pr_nitems++;
   1248   1.20   thorpej 	pp->pr_nout--;
   1249    1.3        pk 
   1250    1.3        pk 	/* Cancel "pool empty" condition if it exists */
   1251    1.3        pk 	if (pp->pr_curpage == NULL)
   1252    1.3        pk 		pp->pr_curpage = ph;
   1253    1.3        pk 
   1254    1.3        pk 	if (pp->pr_flags & PR_WANTED) {
   1255    1.3        pk 		pp->pr_flags &= ~PR_WANTED;
   1256  1.134        ad 		cv_broadcast(&pp->pr_cv);
   1257    1.3        pk 	}
   1258    1.3        pk 
   1259    1.3        pk 	/*
   1260   1.88       chs 	 * If this page is now empty, do one of two things:
   1261   1.21   thorpej 	 *
   1262   1.88       chs 	 *	(1) If we have more pages than the page high water mark,
   1263   1.96   thorpej 	 *	    free the page back to the system.  ONLY CONSIDER
   1264   1.90   thorpej 	 *	    FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
   1265   1.90   thorpej 	 *	    CLAIM.
   1266   1.21   thorpej 	 *
   1267   1.88       chs 	 *	(2) Otherwise, move the page to the empty page list.
   1268   1.88       chs 	 *
   1269   1.88       chs 	 * Either way, select a new current page (so we use a partially-full
   1270   1.88       chs 	 * page if one is available).
   1271    1.3        pk 	 */
   1272    1.3        pk 	if (ph->ph_nmissing == 0) {
   1273    1.6   thorpej 		pp->pr_nidle++;
   1274   1.90   thorpej 		if (pp->pr_npages > pp->pr_minpages &&
   1275  1.152      yamt 		    pp->pr_npages > pp->pr_maxpages) {
   1276  1.101   thorpej 			pr_rmpage(pp, ph, pq);
   1277    1.3        pk 		} else {
   1278   1.88       chs 			LIST_REMOVE(ph, ph_pagelist);
   1279   1.88       chs 			LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
   1280    1.3        pk 
   1281   1.21   thorpej 			/*
   1282   1.21   thorpej 			 * Update the timestamp on the page.  A page must
   1283   1.21   thorpej 			 * be idle for some period of time before it can
   1284   1.21   thorpej 			 * be reclaimed by the pagedaemon.  This minimizes
   1285   1.21   thorpej 			 * ping-pong'ing for memory.
   1286  1.151      yamt 			 *
   1287  1.151      yamt 			 * note for 64-bit time_t: truncating to 32-bit is not
   1288  1.151      yamt 			 * a problem for our usage.
   1289   1.21   thorpej 			 */
   1290  1.151      yamt 			ph->ph_time = time_uptime;
   1291    1.1        pk 		}
   1292   1.88       chs 		pool_update_curpage(pp);
   1293    1.1        pk 	}
   1294   1.88       chs 
   1295   1.21   thorpej 	/*
   1296   1.88       chs 	 * If the page was previously completely full, move it to the
   1297   1.88       chs 	 * partially-full list and make it the current page.  The next
   1298   1.88       chs 	 * allocation will get the item from this page, instead of
   1299   1.88       chs 	 * further fragmenting the pool.
   1300   1.21   thorpej 	 */
   1301   1.21   thorpej 	else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
   1302   1.88       chs 		LIST_REMOVE(ph, ph_pagelist);
   1303   1.88       chs 		LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
   1304   1.21   thorpej 		pp->pr_curpage = ph;
   1305   1.21   thorpej 	}
   1306   1.43   thorpej }
   1307   1.43   thorpej 
   1308   1.43   thorpej /*
   1309  1.134        ad  * Return resource to the pool.
   1310   1.43   thorpej  */
   1311   1.59   thorpej #ifdef POOL_DIAGNOSTIC
   1312   1.43   thorpej void
   1313   1.43   thorpej _pool_put(struct pool *pp, void *v, const char *file, long line)
   1314   1.43   thorpej {
   1315  1.101   thorpej 	struct pool_pagelist pq;
   1316  1.101   thorpej 
   1317  1.101   thorpej 	LIST_INIT(&pq);
   1318   1.43   thorpej 
   1319  1.134        ad 	mutex_enter(&pp->pr_lock);
   1320   1.43   thorpej 	pr_enter(pp, file, line);
   1321   1.43   thorpej 
   1322   1.56  sommerfe 	pr_log(pp, v, PRLOG_PUT, file, line);
   1323   1.56  sommerfe 
   1324  1.101   thorpej 	pool_do_put(pp, v, &pq);
   1325   1.21   thorpej 
   1326   1.25   thorpej 	pr_leave(pp);
   1327  1.134        ad 	mutex_exit(&pp->pr_lock);
   1328  1.101   thorpej 
   1329  1.102       chs 	pr_pagelist_free(pp, &pq);
   1330    1.1        pk }
   1331   1.57  sommerfe #undef pool_put
   1332   1.59   thorpej #endif /* POOL_DIAGNOSTIC */
   1333    1.1        pk 
   1334   1.56  sommerfe void
   1335   1.56  sommerfe pool_put(struct pool *pp, void *v)
   1336   1.56  sommerfe {
   1337  1.101   thorpej 	struct pool_pagelist pq;
   1338  1.101   thorpej 
   1339  1.101   thorpej 	LIST_INIT(&pq);
   1340   1.56  sommerfe 
   1341  1.134        ad 	mutex_enter(&pp->pr_lock);
   1342  1.101   thorpej 	pool_do_put(pp, v, &pq);
   1343  1.134        ad 	mutex_exit(&pp->pr_lock);
   1344   1.56  sommerfe 
   1345  1.102       chs 	pr_pagelist_free(pp, &pq);
   1346   1.56  sommerfe }
   1347   1.57  sommerfe 
   1348   1.59   thorpej #ifdef POOL_DIAGNOSTIC
   1349   1.57  sommerfe #define		pool_put(h, v)	_pool_put((h), (v), __FILE__, __LINE__)
   1350   1.56  sommerfe #endif
   1351   1.74   thorpej 
   1352   1.74   thorpej /*
   1353  1.113      yamt  * pool_grow: grow a pool by a page.
   1354  1.113      yamt  *
   1355  1.113      yamt  * => called with pool locked.
   1356  1.113      yamt  * => unlock and relock the pool.
   1357  1.113      yamt  * => return with pool locked.
   1358  1.113      yamt  */
   1359  1.113      yamt 
   1360  1.113      yamt static int
   1361  1.113      yamt pool_grow(struct pool *pp, int flags)
   1362  1.113      yamt {
   1363  1.113      yamt 	struct pool_item_header *ph = NULL;
   1364  1.113      yamt 	char *cp;
   1365  1.113      yamt 
   1366  1.134        ad 	mutex_exit(&pp->pr_lock);
   1367  1.113      yamt 	cp = pool_allocator_alloc(pp, flags);
   1368  1.113      yamt 	if (__predict_true(cp != NULL)) {
   1369  1.113      yamt 		ph = pool_alloc_item_header(pp, cp, flags);
   1370  1.113      yamt 	}
   1371  1.113      yamt 	if (__predict_false(cp == NULL || ph == NULL)) {
   1372  1.113      yamt 		if (cp != NULL) {
   1373  1.113      yamt 			pool_allocator_free(pp, cp);
   1374  1.113      yamt 		}
   1375  1.134        ad 		mutex_enter(&pp->pr_lock);
   1376  1.113      yamt 		return ENOMEM;
   1377  1.113      yamt 	}
   1378  1.113      yamt 
   1379  1.134        ad 	mutex_enter(&pp->pr_lock);
   1380  1.113      yamt 	pool_prime_page(pp, cp, ph);
   1381  1.113      yamt 	pp->pr_npagealloc++;
   1382  1.113      yamt 	return 0;
   1383  1.113      yamt }
   1384  1.113      yamt 
   1385  1.113      yamt /*
   1386   1.74   thorpej  * Add N items to the pool.
   1387   1.74   thorpej  */
   1388   1.74   thorpej int
   1389   1.74   thorpej pool_prime(struct pool *pp, int n)
   1390   1.74   thorpej {
   1391   1.75    simonb 	int newpages;
   1392  1.113      yamt 	int error = 0;
   1393   1.74   thorpej 
   1394  1.134        ad 	mutex_enter(&pp->pr_lock);
   1395   1.74   thorpej 
   1396   1.74   thorpej 	newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1397   1.74   thorpej 
   1398   1.74   thorpej 	while (newpages-- > 0) {
   1399  1.113      yamt 		error = pool_grow(pp, PR_NOWAIT);
   1400  1.113      yamt 		if (error) {
   1401   1.74   thorpej 			break;
   1402   1.74   thorpej 		}
   1403   1.74   thorpej 		pp->pr_minpages++;
   1404   1.74   thorpej 	}
   1405   1.74   thorpej 
   1406   1.74   thorpej 	if (pp->pr_minpages >= pp->pr_maxpages)
   1407   1.74   thorpej 		pp->pr_maxpages = pp->pr_minpages + 1;	/* XXX */
   1408   1.74   thorpej 
   1409  1.134        ad 	mutex_exit(&pp->pr_lock);
   1410  1.113      yamt 	return error;
   1411   1.74   thorpej }
   1412   1.55   thorpej 
   1413   1.55   thorpej /*
   1414    1.3        pk  * Add a page worth of items to the pool.
   1415   1.21   thorpej  *
   1416   1.21   thorpej  * Note, we must be called with the pool descriptor LOCKED.
   1417    1.3        pk  */
   1418   1.55   thorpej static void
   1419  1.128  christos pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph)
   1420    1.3        pk {
   1421    1.3        pk 	struct pool_item *pi;
   1422  1.128  christos 	void *cp = storage;
   1423  1.125        ad 	const unsigned int align = pp->pr_align;
   1424  1.125        ad 	const unsigned int ioff = pp->pr_itemoffset;
   1425   1.55   thorpej 	int n;
   1426   1.36        pk 
   1427  1.134        ad 	KASSERT(mutex_owned(&pp->pr_lock));
   1428   1.91      yamt 
   1429   1.66   thorpej #ifdef DIAGNOSTIC
   1430  1.121      yamt 	if ((pp->pr_roflags & PR_NOALIGN) == 0 &&
   1431  1.150     skrll 	    ((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
   1432   1.36        pk 		panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
   1433   1.66   thorpej #endif
   1434    1.3        pk 
   1435    1.3        pk 	/*
   1436    1.3        pk 	 * Insert page header.
   1437    1.3        pk 	 */
   1438   1.88       chs 	LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
   1439  1.102       chs 	LIST_INIT(&ph->ph_itemlist);
   1440    1.3        pk 	ph->ph_page = storage;
   1441    1.3        pk 	ph->ph_nmissing = 0;
   1442  1.151      yamt 	ph->ph_time = time_uptime;
   1443   1.88       chs 	if ((pp->pr_roflags & PR_PHINPAGE) == 0)
   1444   1.88       chs 		SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
   1445    1.3        pk 
   1446    1.6   thorpej 	pp->pr_nidle++;
   1447    1.6   thorpej 
   1448    1.3        pk 	/*
   1449    1.3        pk 	 * Color this page.
   1450    1.3        pk 	 */
   1451  1.141      yamt 	ph->ph_off = pp->pr_curcolor;
   1452  1.141      yamt 	cp = (char *)cp + ph->ph_off;
   1453    1.3        pk 	if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
   1454    1.3        pk 		pp->pr_curcolor = 0;
   1455    1.3        pk 
   1456    1.3        pk 	/*
   1457    1.3        pk 	 * Adjust storage to apply aligment to `pr_itemoffset' in each item.
   1458    1.3        pk 	 */
   1459    1.3        pk 	if (ioff != 0)
   1460  1.128  christos 		cp = (char *)cp + align - ioff;
   1461    1.3        pk 
   1462  1.125        ad 	KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
   1463  1.125        ad 
   1464    1.3        pk 	/*
   1465    1.3        pk 	 * Insert remaining chunks on the bucket list.
   1466    1.3        pk 	 */
   1467    1.3        pk 	n = pp->pr_itemsperpage;
   1468   1.20   thorpej 	pp->pr_nitems += n;
   1469    1.3        pk 
   1470   1.97      yamt 	if (pp->pr_roflags & PR_NOTOUCH) {
   1471  1.141      yamt 		pr_item_notouch_init(pp, ph);
   1472   1.97      yamt 	} else {
   1473   1.97      yamt 		while (n--) {
   1474   1.97      yamt 			pi = (struct pool_item *)cp;
   1475   1.78   thorpej 
   1476   1.97      yamt 			KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
   1477    1.3        pk 
   1478   1.97      yamt 			/* Insert on page list */
   1479  1.102       chs 			LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
   1480    1.3        pk #ifdef DIAGNOSTIC
   1481   1.97      yamt 			pi->pi_magic = PI_MAGIC;
   1482    1.3        pk #endif
   1483  1.128  christos 			cp = (char *)cp + pp->pr_size;
   1484  1.125        ad 
   1485  1.125        ad 			KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
   1486   1.97      yamt 		}
   1487    1.3        pk 	}
   1488    1.3        pk 
   1489    1.3        pk 	/*
   1490    1.3        pk 	 * If the pool was depleted, point at the new page.
   1491    1.3        pk 	 */
   1492    1.3        pk 	if (pp->pr_curpage == NULL)
   1493    1.3        pk 		pp->pr_curpage = ph;
   1494    1.3        pk 
   1495    1.3        pk 	if (++pp->pr_npages > pp->pr_hiwat)
   1496    1.3        pk 		pp->pr_hiwat = pp->pr_npages;
   1497    1.3        pk }
   1498    1.3        pk 
   1499   1.20   thorpej /*
   1500   1.52   thorpej  * Used by pool_get() when nitems drops below the low water mark.  This
   1501   1.88       chs  * is used to catch up pr_nitems with the low water mark.
   1502   1.20   thorpej  *
   1503   1.21   thorpej  * Note 1, we never wait for memory here, we let the caller decide what to do.
   1504   1.20   thorpej  *
   1505   1.73   thorpej  * Note 2, we must be called with the pool already locked, and we return
   1506   1.20   thorpej  * with it locked.
   1507   1.20   thorpej  */
   1508   1.20   thorpej static int
   1509   1.42   thorpej pool_catchup(struct pool *pp)
   1510   1.20   thorpej {
   1511   1.20   thorpej 	int error = 0;
   1512   1.20   thorpej 
   1513   1.54   thorpej 	while (POOL_NEEDS_CATCHUP(pp)) {
   1514  1.113      yamt 		error = pool_grow(pp, PR_NOWAIT);
   1515  1.113      yamt 		if (error) {
   1516   1.20   thorpej 			break;
   1517   1.20   thorpej 		}
   1518   1.20   thorpej 	}
   1519  1.113      yamt 	return error;
   1520   1.20   thorpej }
   1521   1.20   thorpej 
   1522   1.88       chs static void
   1523   1.88       chs pool_update_curpage(struct pool *pp)
   1524   1.88       chs {
   1525   1.88       chs 
   1526   1.88       chs 	pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
   1527   1.88       chs 	if (pp->pr_curpage == NULL) {
   1528   1.88       chs 		pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
   1529   1.88       chs 	}
   1530  1.168      yamt 	KASSERT((pp->pr_curpage == NULL && pp->pr_nitems == 0) ||
   1531  1.168      yamt 	    (pp->pr_curpage != NULL && pp->pr_nitems > 0));
   1532   1.88       chs }
   1533   1.88       chs 
   1534    1.3        pk void
   1535   1.42   thorpej pool_setlowat(struct pool *pp, int n)
   1536    1.3        pk {
   1537   1.15        pk 
   1538  1.134        ad 	mutex_enter(&pp->pr_lock);
   1539   1.21   thorpej 
   1540    1.3        pk 	pp->pr_minitems = n;
   1541   1.15        pk 	pp->pr_minpages = (n == 0)
   1542   1.15        pk 		? 0
   1543   1.18   thorpej 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1544   1.20   thorpej 
   1545   1.20   thorpej 	/* Make sure we're caught up with the newly-set low water mark. */
   1546   1.75    simonb 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
   1547   1.20   thorpej 		/*
   1548   1.20   thorpej 		 * XXX: Should we log a warning?  Should we set up a timeout
   1549   1.20   thorpej 		 * to try again in a second or so?  The latter could break
   1550   1.20   thorpej 		 * a caller's assumptions about interrupt protection, etc.
   1551   1.20   thorpej 		 */
   1552   1.20   thorpej 	}
   1553   1.21   thorpej 
   1554  1.134        ad 	mutex_exit(&pp->pr_lock);
   1555    1.3        pk }
   1556    1.3        pk 
   1557    1.3        pk void
   1558   1.42   thorpej pool_sethiwat(struct pool *pp, int n)
   1559    1.3        pk {
   1560   1.15        pk 
   1561  1.134        ad 	mutex_enter(&pp->pr_lock);
   1562   1.21   thorpej 
   1563   1.15        pk 	pp->pr_maxpages = (n == 0)
   1564   1.15        pk 		? 0
   1565   1.18   thorpej 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1566   1.21   thorpej 
   1567  1.134        ad 	mutex_exit(&pp->pr_lock);
   1568    1.3        pk }
   1569    1.3        pk 
   1570   1.20   thorpej void
   1571   1.42   thorpej pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
   1572   1.20   thorpej {
   1573   1.20   thorpej 
   1574  1.134        ad 	mutex_enter(&pp->pr_lock);
   1575   1.20   thorpej 
   1576   1.20   thorpej 	pp->pr_hardlimit = n;
   1577   1.20   thorpej 	pp->pr_hardlimit_warning = warnmess;
   1578   1.31   thorpej 	pp->pr_hardlimit_ratecap.tv_sec = ratecap;
   1579   1.31   thorpej 	pp->pr_hardlimit_warning_last.tv_sec = 0;
   1580   1.31   thorpej 	pp->pr_hardlimit_warning_last.tv_usec = 0;
   1581   1.20   thorpej 
   1582   1.20   thorpej 	/*
   1583   1.21   thorpej 	 * In-line version of pool_sethiwat(), because we don't want to
   1584   1.21   thorpej 	 * release the lock.
   1585   1.20   thorpej 	 */
   1586   1.20   thorpej 	pp->pr_maxpages = (n == 0)
   1587   1.20   thorpej 		? 0
   1588   1.20   thorpej 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1589   1.21   thorpej 
   1590  1.134        ad 	mutex_exit(&pp->pr_lock);
   1591   1.20   thorpej }
   1592    1.3        pk 
   1593    1.3        pk /*
   1594    1.3        pk  * Release all complete pages that have not been used recently.
   1595    1.3        pk  */
   1596   1.66   thorpej int
   1597   1.59   thorpej #ifdef POOL_DIAGNOSTIC
   1598   1.42   thorpej _pool_reclaim(struct pool *pp, const char *file, long line)
   1599   1.56  sommerfe #else
   1600   1.56  sommerfe pool_reclaim(struct pool *pp)
   1601   1.56  sommerfe #endif
   1602    1.3        pk {
   1603    1.3        pk 	struct pool_item_header *ph, *phnext;
   1604   1.61       chs 	struct pool_pagelist pq;
   1605  1.151      yamt 	uint32_t curtime;
   1606  1.134        ad 	bool klock;
   1607  1.134        ad 	int rv;
   1608    1.3        pk 
   1609   1.68   thorpej 	if (pp->pr_drain_hook != NULL) {
   1610   1.68   thorpej 		/*
   1611   1.68   thorpej 		 * The drain hook must be called with the pool unlocked.
   1612   1.68   thorpej 		 */
   1613   1.68   thorpej 		(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
   1614   1.68   thorpej 	}
   1615   1.68   thorpej 
   1616  1.134        ad 	/*
   1617  1.157        ad 	 * XXXSMP Because we do not want to cause non-MPSAFE code
   1618  1.157        ad 	 * to block.
   1619  1.134        ad 	 */
   1620  1.134        ad 	if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK ||
   1621  1.134        ad 	    pp->pr_ipl == IPL_SOFTSERIAL) {
   1622  1.134        ad 		KERNEL_LOCK(1, NULL);
   1623  1.134        ad 		klock = true;
   1624  1.134        ad 	} else
   1625  1.134        ad 		klock = false;
   1626  1.134        ad 
   1627  1.134        ad 	/* Reclaim items from the pool's cache (if any). */
   1628  1.134        ad 	if (pp->pr_cache != NULL)
   1629  1.134        ad 		pool_cache_invalidate(pp->pr_cache);
   1630  1.134        ad 
   1631  1.134        ad 	if (mutex_tryenter(&pp->pr_lock) == 0) {
   1632  1.134        ad 		if (klock) {
   1633  1.134        ad 			KERNEL_UNLOCK_ONE(NULL);
   1634  1.134        ad 		}
   1635   1.66   thorpej 		return (0);
   1636  1.134        ad 	}
   1637   1.25   thorpej 	pr_enter(pp, file, line);
   1638   1.68   thorpej 
   1639   1.88       chs 	LIST_INIT(&pq);
   1640   1.43   thorpej 
   1641  1.151      yamt 	curtime = time_uptime;
   1642   1.21   thorpej 
   1643   1.88       chs 	for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
   1644   1.88       chs 		phnext = LIST_NEXT(ph, ph_pagelist);
   1645    1.3        pk 
   1646    1.3        pk 		/* Check our minimum page claim */
   1647    1.3        pk 		if (pp->pr_npages <= pp->pr_minpages)
   1648    1.3        pk 			break;
   1649    1.3        pk 
   1650   1.88       chs 		KASSERT(ph->ph_nmissing == 0);
   1651  1.151      yamt 		if (curtime - ph->ph_time < pool_inactive_time
   1652  1.117      yamt 		    && !pa_starved_p(pp->pr_alloc))
   1653   1.88       chs 			continue;
   1654   1.21   thorpej 
   1655   1.88       chs 		/*
   1656   1.88       chs 		 * If freeing this page would put us below
   1657   1.88       chs 		 * the low water mark, stop now.
   1658   1.88       chs 		 */
   1659   1.88       chs 		if ((pp->pr_nitems - pp->pr_itemsperpage) <
   1660   1.88       chs 		    pp->pr_minitems)
   1661   1.88       chs 			break;
   1662   1.21   thorpej 
   1663   1.88       chs 		pr_rmpage(pp, ph, &pq);
   1664    1.3        pk 	}
   1665    1.3        pk 
   1666   1.25   thorpej 	pr_leave(pp);
   1667  1.134        ad 	mutex_exit(&pp->pr_lock);
   1668  1.134        ad 
   1669  1.134        ad 	if (LIST_EMPTY(&pq))
   1670  1.134        ad 		rv = 0;
   1671  1.134        ad 	else {
   1672  1.134        ad 		pr_pagelist_free(pp, &pq);
   1673  1.134        ad 		rv = 1;
   1674  1.134        ad 	}
   1675  1.134        ad 
   1676  1.134        ad 	if (klock) {
   1677  1.134        ad 		KERNEL_UNLOCK_ONE(NULL);
   1678  1.134        ad 	}
   1679   1.66   thorpej 
   1680  1.134        ad 	return (rv);
   1681    1.3        pk }
   1682    1.3        pk 
   1683    1.3        pk /*
   1684  1.134        ad  * Drain pools, one at a time.  This is a two stage process;
   1685  1.134        ad  * drain_start kicks off a cross call to drain CPU-level caches
   1686  1.134        ad  * if the pool has an associated pool_cache.  drain_end waits
   1687  1.134        ad  * for those cross calls to finish, and then drains the cache
   1688  1.134        ad  * (if any) and pool.
   1689  1.131        ad  *
   1690  1.134        ad  * Note, must never be called from interrupt context.
   1691    1.3        pk  */
   1692    1.3        pk void
   1693  1.134        ad pool_drain_start(struct pool **ppp, uint64_t *wp)
   1694    1.3        pk {
   1695    1.3        pk 	struct pool *pp;
   1696  1.134        ad 
   1697  1.145        ad 	KASSERT(!TAILQ_EMPTY(&pool_head));
   1698    1.3        pk 
   1699   1.61       chs 	pp = NULL;
   1700  1.134        ad 
   1701  1.134        ad 	/* Find next pool to drain, and add a reference. */
   1702  1.134        ad 	mutex_enter(&pool_head_lock);
   1703  1.134        ad 	do {
   1704  1.134        ad 		if (drainpp == NULL) {
   1705  1.145        ad 			drainpp = TAILQ_FIRST(&pool_head);
   1706  1.134        ad 		}
   1707  1.134        ad 		if (drainpp != NULL) {
   1708  1.134        ad 			pp = drainpp;
   1709  1.145        ad 			drainpp = TAILQ_NEXT(pp, pr_poollist);
   1710  1.134        ad 		}
   1711  1.134        ad 		/*
   1712  1.134        ad 		 * Skip completely idle pools.  We depend on at least
   1713  1.134        ad 		 * one pool in the system being active.
   1714  1.134        ad 		 */
   1715  1.134        ad 	} while (pp == NULL || pp->pr_npages == 0);
   1716  1.134        ad 	pp->pr_refcnt++;
   1717  1.134        ad 	mutex_exit(&pool_head_lock);
   1718  1.134        ad 
   1719  1.134        ad 	/* If there is a pool_cache, drain CPU level caches. */
   1720  1.134        ad 	*ppp = pp;
   1721  1.134        ad 	if (pp->pr_cache != NULL) {
   1722  1.134        ad 		*wp = xc_broadcast(0, (xcfunc_t)pool_cache_xcall,
   1723  1.134        ad 		    pp->pr_cache, NULL);
   1724  1.134        ad 	}
   1725  1.134        ad }
   1726  1.134        ad 
   1727  1.134        ad void
   1728  1.134        ad pool_drain_end(struct pool *pp, uint64_t where)
   1729  1.134        ad {
   1730  1.134        ad 
   1731  1.134        ad 	if (pp == NULL)
   1732  1.134        ad 		return;
   1733  1.134        ad 
   1734  1.134        ad 	KASSERT(pp->pr_refcnt > 0);
   1735  1.134        ad 
   1736  1.134        ad 	/* Wait for remote draining to complete. */
   1737  1.134        ad 	if (pp->pr_cache != NULL)
   1738  1.134        ad 		xc_wait(where);
   1739  1.134        ad 
   1740  1.134        ad 	/* Drain the cache (if any) and pool.. */
   1741  1.134        ad 	pool_reclaim(pp);
   1742  1.134        ad 
   1743  1.134        ad 	/* Finally, unlock the pool. */
   1744  1.134        ad 	mutex_enter(&pool_head_lock);
   1745  1.134        ad 	pp->pr_refcnt--;
   1746  1.134        ad 	cv_broadcast(&pool_busy);
   1747  1.134        ad 	mutex_exit(&pool_head_lock);
   1748    1.3        pk }
   1749    1.3        pk 
   1750    1.3        pk /*
   1751    1.3        pk  * Diagnostic helpers.
   1752    1.3        pk  */
   1753    1.3        pk void
   1754   1.42   thorpej pool_print(struct pool *pp, const char *modif)
   1755   1.21   thorpej {
   1756   1.21   thorpej 
   1757   1.25   thorpej 	pool_print1(pp, modif, printf);
   1758   1.21   thorpej }
   1759   1.21   thorpej 
   1760   1.25   thorpej void
   1761  1.108      yamt pool_printall(const char *modif, void (*pr)(const char *, ...))
   1762  1.108      yamt {
   1763  1.108      yamt 	struct pool *pp;
   1764  1.108      yamt 
   1765  1.145        ad 	TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
   1766  1.108      yamt 		pool_printit(pp, modif, pr);
   1767  1.108      yamt 	}
   1768  1.108      yamt }
   1769  1.108      yamt 
   1770  1.108      yamt void
   1771   1.42   thorpej pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
   1772   1.25   thorpej {
   1773   1.25   thorpej 
   1774   1.25   thorpej 	if (pp == NULL) {
   1775   1.25   thorpej 		(*pr)("Must specify a pool to print.\n");
   1776   1.25   thorpej 		return;
   1777   1.25   thorpej 	}
   1778   1.25   thorpej 
   1779   1.25   thorpej 	pool_print1(pp, modif, pr);
   1780   1.25   thorpej }
   1781   1.25   thorpej 
   1782   1.21   thorpej static void
   1783  1.124      yamt pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
   1784   1.97      yamt     void (*pr)(const char *, ...))
   1785   1.88       chs {
   1786   1.88       chs 	struct pool_item_header *ph;
   1787   1.88       chs #ifdef DIAGNOSTIC
   1788   1.88       chs 	struct pool_item *pi;
   1789   1.88       chs #endif
   1790   1.88       chs 
   1791   1.88       chs 	LIST_FOREACH(ph, pl, ph_pagelist) {
   1792  1.151      yamt 		(*pr)("\t\tpage %p, nmissing %d, time %" PRIu32 "\n",
   1793  1.151      yamt 		    ph->ph_page, ph->ph_nmissing, ph->ph_time);
   1794   1.88       chs #ifdef DIAGNOSTIC
   1795   1.97      yamt 		if (!(pp->pr_roflags & PR_NOTOUCH)) {
   1796  1.102       chs 			LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
   1797   1.97      yamt 				if (pi->pi_magic != PI_MAGIC) {
   1798   1.97      yamt 					(*pr)("\t\t\titem %p, magic 0x%x\n",
   1799   1.97      yamt 					    pi, pi->pi_magic);
   1800   1.97      yamt 				}
   1801   1.88       chs 			}
   1802   1.88       chs 		}
   1803   1.88       chs #endif
   1804   1.88       chs 	}
   1805   1.88       chs }
   1806   1.88       chs 
   1807   1.88       chs static void
   1808   1.42   thorpej pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
   1809    1.3        pk {
   1810   1.25   thorpej 	struct pool_item_header *ph;
   1811  1.134        ad 	pool_cache_t pc;
   1812  1.134        ad 	pcg_t *pcg;
   1813  1.134        ad 	pool_cache_cpu_t *cc;
   1814  1.134        ad 	uint64_t cpuhit, cpumiss;
   1815   1.44   thorpej 	int i, print_log = 0, print_pagelist = 0, print_cache = 0;
   1816   1.25   thorpej 	char c;
   1817   1.25   thorpej 
   1818   1.25   thorpej 	while ((c = *modif++) != '\0') {
   1819   1.25   thorpej 		if (c == 'l')
   1820   1.25   thorpej 			print_log = 1;
   1821   1.25   thorpej 		if (c == 'p')
   1822   1.25   thorpej 			print_pagelist = 1;
   1823   1.44   thorpej 		if (c == 'c')
   1824   1.44   thorpej 			print_cache = 1;
   1825   1.25   thorpej 	}
   1826   1.25   thorpej 
   1827  1.134        ad 	if ((pc = pp->pr_cache) != NULL) {
   1828  1.134        ad 		(*pr)("POOL CACHE");
   1829  1.134        ad 	} else {
   1830  1.134        ad 		(*pr)("POOL");
   1831  1.134        ad 	}
   1832  1.134        ad 
   1833  1.134        ad 	(*pr)(" %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
   1834   1.25   thorpej 	    pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
   1835   1.25   thorpej 	    pp->pr_roflags);
   1836   1.66   thorpej 	(*pr)("\talloc %p\n", pp->pr_alloc);
   1837   1.25   thorpej 	(*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
   1838   1.25   thorpej 	    pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
   1839   1.25   thorpej 	(*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
   1840   1.25   thorpej 	    pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
   1841   1.25   thorpej 
   1842  1.134        ad 	(*pr)("\tnget %lu, nfail %lu, nput %lu\n",
   1843   1.25   thorpej 	    pp->pr_nget, pp->pr_nfail, pp->pr_nput);
   1844   1.25   thorpej 	(*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
   1845   1.25   thorpej 	    pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
   1846   1.25   thorpej 
   1847   1.25   thorpej 	if (print_pagelist == 0)
   1848   1.25   thorpej 		goto skip_pagelist;
   1849   1.25   thorpej 
   1850   1.88       chs 	if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
   1851   1.88       chs 		(*pr)("\n\tempty page list:\n");
   1852   1.97      yamt 	pool_print_pagelist(pp, &pp->pr_emptypages, pr);
   1853   1.88       chs 	if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
   1854   1.88       chs 		(*pr)("\n\tfull page list:\n");
   1855   1.97      yamt 	pool_print_pagelist(pp, &pp->pr_fullpages, pr);
   1856   1.88       chs 	if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
   1857   1.88       chs 		(*pr)("\n\tpartial-page list:\n");
   1858   1.97      yamt 	pool_print_pagelist(pp, &pp->pr_partpages, pr);
   1859   1.88       chs 
   1860   1.25   thorpej 	if (pp->pr_curpage == NULL)
   1861   1.25   thorpej 		(*pr)("\tno current page\n");
   1862   1.25   thorpej 	else
   1863   1.25   thorpej 		(*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
   1864   1.25   thorpej 
   1865   1.25   thorpej  skip_pagelist:
   1866   1.25   thorpej 	if (print_log == 0)
   1867   1.25   thorpej 		goto skip_log;
   1868   1.25   thorpej 
   1869   1.25   thorpej 	(*pr)("\n");
   1870   1.25   thorpej 	if ((pp->pr_roflags & PR_LOGGING) == 0)
   1871   1.25   thorpej 		(*pr)("\tno log\n");
   1872  1.122  christos 	else {
   1873   1.25   thorpej 		pr_printlog(pp, NULL, pr);
   1874  1.122  christos 	}
   1875    1.3        pk 
   1876   1.25   thorpej  skip_log:
   1877   1.44   thorpej 
   1878  1.102       chs #define PR_GROUPLIST(pcg)						\
   1879  1.102       chs 	(*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);		\
   1880  1.142        ad 	for (i = 0; i < pcg->pcg_size; i++) {				\
   1881  1.102       chs 		if (pcg->pcg_objects[i].pcgo_pa !=			\
   1882  1.102       chs 		    POOL_PADDR_INVALID) {				\
   1883  1.102       chs 			(*pr)("\t\t\t%p, 0x%llx\n",			\
   1884  1.102       chs 			    pcg->pcg_objects[i].pcgo_va,		\
   1885  1.102       chs 			    (unsigned long long)			\
   1886  1.102       chs 			    pcg->pcg_objects[i].pcgo_pa);		\
   1887  1.102       chs 		} else {						\
   1888  1.102       chs 			(*pr)("\t\t\t%p\n",				\
   1889  1.102       chs 			    pcg->pcg_objects[i].pcgo_va);		\
   1890  1.102       chs 		}							\
   1891  1.102       chs 	}
   1892  1.102       chs 
   1893  1.134        ad 	if (pc != NULL) {
   1894  1.134        ad 		cpuhit = 0;
   1895  1.134        ad 		cpumiss = 0;
   1896  1.134        ad 		for (i = 0; i < MAXCPUS; i++) {
   1897  1.134        ad 			if ((cc = pc->pc_cpus[i]) == NULL)
   1898  1.134        ad 				continue;
   1899  1.134        ad 			cpuhit += cc->cc_hits;
   1900  1.134        ad 			cpumiss += cc->cc_misses;
   1901  1.134        ad 		}
   1902  1.134        ad 		(*pr)("\tcpu layer hits %llu misses %llu\n", cpuhit, cpumiss);
   1903  1.134        ad 		(*pr)("\tcache layer hits %llu misses %llu\n",
   1904  1.134        ad 		    pc->pc_hits, pc->pc_misses);
   1905  1.134        ad 		(*pr)("\tcache layer entry uncontended %llu contended %llu\n",
   1906  1.134        ad 		    pc->pc_hits + pc->pc_misses - pc->pc_contended,
   1907  1.134        ad 		    pc->pc_contended);
   1908  1.134        ad 		(*pr)("\tcache layer empty groups %u full groups %u\n",
   1909  1.134        ad 		    pc->pc_nempty, pc->pc_nfull);
   1910  1.134        ad 		if (print_cache) {
   1911  1.134        ad 			(*pr)("\tfull cache groups:\n");
   1912  1.134        ad 			for (pcg = pc->pc_fullgroups; pcg != NULL;
   1913  1.134        ad 			    pcg = pcg->pcg_next) {
   1914  1.134        ad 				PR_GROUPLIST(pcg);
   1915  1.134        ad 			}
   1916  1.134        ad 			(*pr)("\tempty cache groups:\n");
   1917  1.134        ad 			for (pcg = pc->pc_emptygroups; pcg != NULL;
   1918  1.134        ad 			    pcg = pcg->pcg_next) {
   1919  1.134        ad 				PR_GROUPLIST(pcg);
   1920  1.134        ad 			}
   1921  1.103       chs 		}
   1922   1.44   thorpej 	}
   1923  1.102       chs #undef PR_GROUPLIST
   1924   1.44   thorpej 
   1925   1.88       chs 	pr_enter_check(pp, pr);
   1926   1.88       chs }
   1927   1.88       chs 
   1928   1.88       chs static int
   1929   1.88       chs pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
   1930   1.88       chs {
   1931   1.88       chs 	struct pool_item *pi;
   1932  1.128  christos 	void *page;
   1933   1.88       chs 	int n;
   1934   1.88       chs 
   1935  1.121      yamt 	if ((pp->pr_roflags & PR_NOALIGN) == 0) {
   1936  1.128  christos 		page = (void *)((uintptr_t)ph & pp->pr_alloc->pa_pagemask);
   1937  1.121      yamt 		if (page != ph->ph_page &&
   1938  1.121      yamt 		    (pp->pr_roflags & PR_PHINPAGE) != 0) {
   1939  1.121      yamt 			if (label != NULL)
   1940  1.121      yamt 				printf("%s: ", label);
   1941  1.121      yamt 			printf("pool(%p:%s): page inconsistency: page %p;"
   1942  1.121      yamt 			       " at page head addr %p (p %p)\n", pp,
   1943  1.121      yamt 				pp->pr_wchan, ph->ph_page,
   1944  1.121      yamt 				ph, page);
   1945  1.121      yamt 			return 1;
   1946  1.121      yamt 		}
   1947   1.88       chs 	}
   1948    1.3        pk 
   1949   1.97      yamt 	if ((pp->pr_roflags & PR_NOTOUCH) != 0)
   1950   1.97      yamt 		return 0;
   1951   1.97      yamt 
   1952  1.102       chs 	for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0;
   1953   1.88       chs 	     pi != NULL;
   1954  1.102       chs 	     pi = LIST_NEXT(pi,pi_list), n++) {
   1955   1.88       chs 
   1956   1.88       chs #ifdef DIAGNOSTIC
   1957   1.88       chs 		if (pi->pi_magic != PI_MAGIC) {
   1958   1.88       chs 			if (label != NULL)
   1959   1.88       chs 				printf("%s: ", label);
   1960   1.88       chs 			printf("pool(%s): free list modified: magic=%x;"
   1961  1.121      yamt 			       " page %p; item ordinal %d; addr %p\n",
   1962   1.88       chs 				pp->pr_wchan, pi->pi_magic, ph->ph_page,
   1963  1.121      yamt 				n, pi);
   1964   1.88       chs 			panic("pool");
   1965   1.88       chs 		}
   1966   1.88       chs #endif
   1967  1.121      yamt 		if ((pp->pr_roflags & PR_NOALIGN) != 0) {
   1968  1.121      yamt 			continue;
   1969  1.121      yamt 		}
   1970  1.128  christos 		page = (void *)((uintptr_t)pi & pp->pr_alloc->pa_pagemask);
   1971   1.88       chs 		if (page == ph->ph_page)
   1972   1.88       chs 			continue;
   1973   1.88       chs 
   1974   1.88       chs 		if (label != NULL)
   1975   1.88       chs 			printf("%s: ", label);
   1976   1.88       chs 		printf("pool(%p:%s): page inconsistency: page %p;"
   1977   1.88       chs 		       " item ordinal %d; addr %p (p %p)\n", pp,
   1978   1.88       chs 			pp->pr_wchan, ph->ph_page,
   1979   1.88       chs 			n, pi, page);
   1980   1.88       chs 		return 1;
   1981   1.88       chs 	}
   1982   1.88       chs 	return 0;
   1983    1.3        pk }
   1984    1.3        pk 
   1985   1.88       chs 
   1986    1.3        pk int
   1987   1.42   thorpej pool_chk(struct pool *pp, const char *label)
   1988    1.3        pk {
   1989    1.3        pk 	struct pool_item_header *ph;
   1990    1.3        pk 	int r = 0;
   1991    1.3        pk 
   1992  1.134        ad 	mutex_enter(&pp->pr_lock);
   1993   1.88       chs 	LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
   1994   1.88       chs 		r = pool_chk_page(pp, label, ph);
   1995   1.88       chs 		if (r) {
   1996   1.88       chs 			goto out;
   1997   1.88       chs 		}
   1998   1.88       chs 	}
   1999   1.88       chs 	LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
   2000   1.88       chs 		r = pool_chk_page(pp, label, ph);
   2001   1.88       chs 		if (r) {
   2002    1.3        pk 			goto out;
   2003    1.3        pk 		}
   2004   1.88       chs 	}
   2005   1.88       chs 	LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
   2006   1.88       chs 		r = pool_chk_page(pp, label, ph);
   2007   1.88       chs 		if (r) {
   2008    1.3        pk 			goto out;
   2009    1.3        pk 		}
   2010    1.3        pk 	}
   2011   1.88       chs 
   2012    1.3        pk out:
   2013  1.134        ad 	mutex_exit(&pp->pr_lock);
   2014    1.3        pk 	return (r);
   2015   1.43   thorpej }
   2016   1.43   thorpej 
   2017   1.43   thorpej /*
   2018   1.43   thorpej  * pool_cache_init:
   2019   1.43   thorpej  *
   2020   1.43   thorpej  *	Initialize a pool cache.
   2021  1.134        ad  */
   2022  1.134        ad pool_cache_t
   2023  1.134        ad pool_cache_init(size_t size, u_int align, u_int align_offset, u_int flags,
   2024  1.134        ad     const char *wchan, struct pool_allocator *palloc, int ipl,
   2025  1.134        ad     int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), void *arg)
   2026  1.134        ad {
   2027  1.134        ad 	pool_cache_t pc;
   2028  1.134        ad 
   2029  1.134        ad 	pc = pool_get(&cache_pool, PR_WAITOK);
   2030  1.134        ad 	if (pc == NULL)
   2031  1.134        ad 		return NULL;
   2032  1.134        ad 
   2033  1.134        ad 	pool_cache_bootstrap(pc, size, align, align_offset, flags, wchan,
   2034  1.134        ad 	   palloc, ipl, ctor, dtor, arg);
   2035  1.134        ad 
   2036  1.134        ad 	return pc;
   2037  1.134        ad }
   2038  1.134        ad 
   2039  1.134        ad /*
   2040  1.134        ad  * pool_cache_bootstrap:
   2041   1.43   thorpej  *
   2042  1.134        ad  *	Kernel-private version of pool_cache_init().  The caller
   2043  1.134        ad  *	provides initial storage.
   2044   1.43   thorpej  */
   2045   1.43   thorpej void
   2046  1.134        ad pool_cache_bootstrap(pool_cache_t pc, size_t size, u_int align,
   2047  1.134        ad     u_int align_offset, u_int flags, const char *wchan,
   2048  1.134        ad     struct pool_allocator *palloc, int ipl,
   2049  1.134        ad     int (*ctor)(void *, void *, int), void (*dtor)(void *, void *),
   2050   1.43   thorpej     void *arg)
   2051   1.43   thorpej {
   2052  1.134        ad 	CPU_INFO_ITERATOR cii;
   2053  1.145        ad 	pool_cache_t pc1;
   2054  1.134        ad 	struct cpu_info *ci;
   2055  1.134        ad 	struct pool *pp;
   2056  1.134        ad 
   2057  1.134        ad 	pp = &pc->pc_pool;
   2058  1.134        ad 	if (palloc == NULL && ipl == IPL_NONE)
   2059  1.134        ad 		palloc = &pool_allocator_nointr;
   2060  1.134        ad 	pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl);
   2061  1.157        ad 	mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl);
   2062   1.43   thorpej 
   2063  1.134        ad 	if (ctor == NULL) {
   2064  1.134        ad 		ctor = (int (*)(void *, void *, int))nullop;
   2065  1.134        ad 	}
   2066  1.134        ad 	if (dtor == NULL) {
   2067  1.134        ad 		dtor = (void (*)(void *, void *))nullop;
   2068  1.134        ad 	}
   2069   1.43   thorpej 
   2070  1.134        ad 	pc->pc_emptygroups = NULL;
   2071  1.134        ad 	pc->pc_fullgroups = NULL;
   2072  1.134        ad 	pc->pc_partgroups = NULL;
   2073   1.43   thorpej 	pc->pc_ctor = ctor;
   2074   1.43   thorpej 	pc->pc_dtor = dtor;
   2075   1.43   thorpej 	pc->pc_arg  = arg;
   2076  1.134        ad 	pc->pc_hits  = 0;
   2077   1.48   thorpej 	pc->pc_misses = 0;
   2078  1.134        ad 	pc->pc_nempty = 0;
   2079  1.134        ad 	pc->pc_npart = 0;
   2080  1.134        ad 	pc->pc_nfull = 0;
   2081  1.134        ad 	pc->pc_contended = 0;
   2082  1.134        ad 	pc->pc_refcnt = 0;
   2083  1.136      yamt 	pc->pc_freecheck = NULL;
   2084  1.134        ad 
   2085  1.142        ad 	if ((flags & PR_LARGECACHE) != 0) {
   2086  1.142        ad 		pc->pc_pcgsize = PCG_NOBJECTS_LARGE;
   2087  1.163        ad 		pc->pc_pcgpool = &pcg_large_pool;
   2088  1.142        ad 	} else {
   2089  1.142        ad 		pc->pc_pcgsize = PCG_NOBJECTS_NORMAL;
   2090  1.163        ad 		pc->pc_pcgpool = &pcg_normal_pool;
   2091  1.142        ad 	}
   2092  1.142        ad 
   2093  1.134        ad 	/* Allocate per-CPU caches. */
   2094  1.134        ad 	memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus));
   2095  1.134        ad 	pc->pc_ncpu = 0;
   2096  1.139        ad 	if (ncpu < 2) {
   2097  1.137        ad 		/* XXX For sparc: boot CPU is not attached yet. */
   2098  1.137        ad 		pool_cache_cpu_init1(curcpu(), pc);
   2099  1.137        ad 	} else {
   2100  1.137        ad 		for (CPU_INFO_FOREACH(cii, ci)) {
   2101  1.137        ad 			pool_cache_cpu_init1(ci, pc);
   2102  1.137        ad 		}
   2103  1.134        ad 	}
   2104  1.145        ad 
   2105  1.145        ad 	/* Add to list of all pools. */
   2106  1.145        ad 	if (__predict_true(!cold))
   2107  1.134        ad 		mutex_enter(&pool_head_lock);
   2108  1.145        ad 	TAILQ_FOREACH(pc1, &pool_cache_head, pc_cachelist) {
   2109  1.145        ad 		if (strcmp(pc1->pc_pool.pr_wchan, pc->pc_pool.pr_wchan) > 0)
   2110  1.145        ad 			break;
   2111  1.145        ad 	}
   2112  1.145        ad 	if (pc1 == NULL)
   2113  1.145        ad 		TAILQ_INSERT_TAIL(&pool_cache_head, pc, pc_cachelist);
   2114  1.145        ad 	else
   2115  1.145        ad 		TAILQ_INSERT_BEFORE(pc1, pc, pc_cachelist);
   2116  1.145        ad 	if (__predict_true(!cold))
   2117  1.134        ad 		mutex_exit(&pool_head_lock);
   2118  1.145        ad 
   2119  1.145        ad 	membar_sync();
   2120  1.145        ad 	pp->pr_cache = pc;
   2121   1.43   thorpej }
   2122   1.43   thorpej 
   2123   1.43   thorpej /*
   2124   1.43   thorpej  * pool_cache_destroy:
   2125   1.43   thorpej  *
   2126   1.43   thorpej  *	Destroy a pool cache.
   2127   1.43   thorpej  */
   2128   1.43   thorpej void
   2129  1.134        ad pool_cache_destroy(pool_cache_t pc)
   2130   1.43   thorpej {
   2131  1.134        ad 	struct pool *pp = &pc->pc_pool;
   2132  1.134        ad 	pool_cache_cpu_t *cc;
   2133  1.134        ad 	pcg_t *pcg;
   2134  1.134        ad 	int i;
   2135  1.134        ad 
   2136  1.134        ad 	/* Remove it from the global list. */
   2137  1.134        ad 	mutex_enter(&pool_head_lock);
   2138  1.134        ad 	while (pc->pc_refcnt != 0)
   2139  1.134        ad 		cv_wait(&pool_busy, &pool_head_lock);
   2140  1.145        ad 	TAILQ_REMOVE(&pool_cache_head, pc, pc_cachelist);
   2141  1.134        ad 	mutex_exit(&pool_head_lock);
   2142   1.43   thorpej 
   2143   1.43   thorpej 	/* First, invalidate the entire cache. */
   2144   1.43   thorpej 	pool_cache_invalidate(pc);
   2145   1.43   thorpej 
   2146  1.134        ad 	/* Disassociate it from the pool. */
   2147  1.134        ad 	mutex_enter(&pp->pr_lock);
   2148  1.134        ad 	pp->pr_cache = NULL;
   2149  1.134        ad 	mutex_exit(&pp->pr_lock);
   2150  1.134        ad 
   2151  1.134        ad 	/* Destroy per-CPU data */
   2152  1.134        ad 	for (i = 0; i < MAXCPUS; i++) {
   2153  1.134        ad 		if ((cc = pc->pc_cpus[i]) == NULL)
   2154  1.134        ad 			continue;
   2155  1.162        ad 		if ((pcg = cc->cc_current) != &pcg_dummy) {
   2156  1.134        ad 			pcg->pcg_next = NULL;
   2157  1.134        ad 			pool_cache_invalidate_groups(pc, pcg);
   2158  1.134        ad 		}
   2159  1.162        ad 		if ((pcg = cc->cc_previous) != &pcg_dummy) {
   2160  1.134        ad 			pcg->pcg_next = NULL;
   2161  1.134        ad 			pool_cache_invalidate_groups(pc, pcg);
   2162  1.134        ad 		}
   2163  1.134        ad 		if (cc != &pc->pc_cpu0)
   2164  1.134        ad 			pool_put(&cache_cpu_pool, cc);
   2165  1.134        ad 	}
   2166  1.134        ad 
   2167  1.134        ad 	/* Finally, destroy it. */
   2168  1.134        ad 	mutex_destroy(&pc->pc_lock);
   2169  1.134        ad 	pool_destroy(pp);
   2170  1.134        ad 	pool_put(&cache_pool, pc);
   2171  1.134        ad }
   2172  1.134        ad 
   2173  1.134        ad /*
   2174  1.134        ad  * pool_cache_cpu_init1:
   2175  1.134        ad  *
   2176  1.134        ad  *	Called for each pool_cache whenever a new CPU is attached.
   2177  1.134        ad  */
   2178  1.134        ad static void
   2179  1.134        ad pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc)
   2180  1.134        ad {
   2181  1.134        ad 	pool_cache_cpu_t *cc;
   2182  1.137        ad 	int index;
   2183  1.134        ad 
   2184  1.137        ad 	index = ci->ci_index;
   2185  1.137        ad 
   2186  1.137        ad 	KASSERT(index < MAXCPUS);
   2187  1.134        ad 
   2188  1.137        ad 	if ((cc = pc->pc_cpus[index]) != NULL) {
   2189  1.137        ad 		KASSERT(cc->cc_cpuindex == index);
   2190  1.134        ad 		return;
   2191  1.134        ad 	}
   2192  1.134        ad 
   2193  1.134        ad 	/*
   2194  1.134        ad 	 * The first CPU is 'free'.  This needs to be the case for
   2195  1.134        ad 	 * bootstrap - we may not be able to allocate yet.
   2196  1.134        ad 	 */
   2197  1.134        ad 	if (pc->pc_ncpu == 0) {
   2198  1.134        ad 		cc = &pc->pc_cpu0;
   2199  1.134        ad 		pc->pc_ncpu = 1;
   2200  1.134        ad 	} else {
   2201  1.134        ad 		mutex_enter(&pc->pc_lock);
   2202  1.134        ad 		pc->pc_ncpu++;
   2203  1.134        ad 		mutex_exit(&pc->pc_lock);
   2204  1.134        ad 		cc = pool_get(&cache_cpu_pool, PR_WAITOK);
   2205  1.134        ad 	}
   2206  1.134        ad 
   2207  1.134        ad 	cc->cc_ipl = pc->pc_pool.pr_ipl;
   2208  1.134        ad 	cc->cc_iplcookie = makeiplcookie(cc->cc_ipl);
   2209  1.134        ad 	cc->cc_cache = pc;
   2210  1.137        ad 	cc->cc_cpuindex = index;
   2211  1.134        ad 	cc->cc_hits = 0;
   2212  1.134        ad 	cc->cc_misses = 0;
   2213  1.169      yamt 	cc->cc_current = __UNCONST(&pcg_dummy);
   2214  1.169      yamt 	cc->cc_previous = __UNCONST(&pcg_dummy);
   2215  1.134        ad 
   2216  1.137        ad 	pc->pc_cpus[index] = cc;
   2217   1.43   thorpej }
   2218   1.43   thorpej 
   2219  1.134        ad /*
   2220  1.134        ad  * pool_cache_cpu_init:
   2221  1.134        ad  *
   2222  1.134        ad  *	Called whenever a new CPU is attached.
   2223  1.134        ad  */
   2224  1.134        ad void
   2225  1.134        ad pool_cache_cpu_init(struct cpu_info *ci)
   2226   1.43   thorpej {
   2227  1.134        ad 	pool_cache_t pc;
   2228  1.134        ad 
   2229  1.134        ad 	mutex_enter(&pool_head_lock);
   2230  1.145        ad 	TAILQ_FOREACH(pc, &pool_cache_head, pc_cachelist) {
   2231  1.134        ad 		pc->pc_refcnt++;
   2232  1.134        ad 		mutex_exit(&pool_head_lock);
   2233   1.43   thorpej 
   2234  1.134        ad 		pool_cache_cpu_init1(ci, pc);
   2235   1.43   thorpej 
   2236  1.134        ad 		mutex_enter(&pool_head_lock);
   2237  1.134        ad 		pc->pc_refcnt--;
   2238  1.134        ad 		cv_broadcast(&pool_busy);
   2239  1.134        ad 	}
   2240  1.134        ad 	mutex_exit(&pool_head_lock);
   2241   1.43   thorpej }
   2242   1.43   thorpej 
   2243  1.134        ad /*
   2244  1.134        ad  * pool_cache_reclaim:
   2245  1.134        ad  *
   2246  1.134        ad  *	Reclaim memory from a pool cache.
   2247  1.134        ad  */
   2248  1.134        ad bool
   2249  1.134        ad pool_cache_reclaim(pool_cache_t pc)
   2250   1.43   thorpej {
   2251   1.43   thorpej 
   2252  1.134        ad 	return pool_reclaim(&pc->pc_pool);
   2253  1.134        ad }
   2254   1.43   thorpej 
   2255  1.136      yamt static void
   2256  1.136      yamt pool_cache_destruct_object1(pool_cache_t pc, void *object)
   2257  1.136      yamt {
   2258  1.136      yamt 
   2259  1.136      yamt 	(*pc->pc_dtor)(pc->pc_arg, object);
   2260  1.136      yamt 	pool_put(&pc->pc_pool, object);
   2261  1.136      yamt }
   2262  1.136      yamt 
   2263  1.134        ad /*
   2264  1.134        ad  * pool_cache_destruct_object:
   2265  1.134        ad  *
   2266  1.134        ad  *	Force destruction of an object and its release back into
   2267  1.134        ad  *	the pool.
   2268  1.134        ad  */
   2269  1.134        ad void
   2270  1.134        ad pool_cache_destruct_object(pool_cache_t pc, void *object)
   2271  1.134        ad {
   2272  1.134        ad 
   2273  1.136      yamt 	FREECHECK_IN(&pc->pc_freecheck, object);
   2274  1.136      yamt 
   2275  1.136      yamt 	pool_cache_destruct_object1(pc, object);
   2276   1.43   thorpej }
   2277   1.43   thorpej 
   2278  1.134        ad /*
   2279  1.134        ad  * pool_cache_invalidate_groups:
   2280  1.134        ad  *
   2281  1.134        ad  *	Invalidate a chain of groups and destruct all objects.
   2282  1.134        ad  */
   2283  1.102       chs static void
   2284  1.134        ad pool_cache_invalidate_groups(pool_cache_t pc, pcg_t *pcg)
   2285  1.102       chs {
   2286  1.134        ad 	void *object;
   2287  1.134        ad 	pcg_t *next;
   2288  1.134        ad 	int i;
   2289  1.134        ad 
   2290  1.134        ad 	for (; pcg != NULL; pcg = next) {
   2291  1.134        ad 		next = pcg->pcg_next;
   2292  1.134        ad 
   2293  1.134        ad 		for (i = 0; i < pcg->pcg_avail; i++) {
   2294  1.134        ad 			object = pcg->pcg_objects[i].pcgo_va;
   2295  1.136      yamt 			pool_cache_destruct_object1(pc, object);
   2296  1.134        ad 		}
   2297  1.102       chs 
   2298  1.142        ad 		if (pcg->pcg_size == PCG_NOBJECTS_LARGE) {
   2299  1.142        ad 			pool_put(&pcg_large_pool, pcg);
   2300  1.142        ad 		} else {
   2301  1.142        ad 			KASSERT(pcg->pcg_size == PCG_NOBJECTS_NORMAL);
   2302  1.142        ad 			pool_put(&pcg_normal_pool, pcg);
   2303  1.142        ad 		}
   2304  1.102       chs 	}
   2305  1.102       chs }
   2306  1.102       chs 
   2307   1.43   thorpej /*
   2308  1.134        ad  * pool_cache_invalidate:
   2309   1.43   thorpej  *
   2310  1.134        ad  *	Invalidate a pool cache (destruct and release all of the
   2311  1.134        ad  *	cached objects).  Does not reclaim objects from the pool.
   2312   1.43   thorpej  */
   2313  1.134        ad void
   2314  1.134        ad pool_cache_invalidate(pool_cache_t pc)
   2315  1.134        ad {
   2316  1.134        ad 	pcg_t *full, *empty, *part;
   2317  1.134        ad 
   2318  1.134        ad 	mutex_enter(&pc->pc_lock);
   2319  1.134        ad 	full = pc->pc_fullgroups;
   2320  1.134        ad 	empty = pc->pc_emptygroups;
   2321  1.134        ad 	part = pc->pc_partgroups;
   2322  1.134        ad 	pc->pc_fullgroups = NULL;
   2323  1.134        ad 	pc->pc_emptygroups = NULL;
   2324  1.134        ad 	pc->pc_partgroups = NULL;
   2325  1.134        ad 	pc->pc_nfull = 0;
   2326  1.134        ad 	pc->pc_nempty = 0;
   2327  1.134        ad 	pc->pc_npart = 0;
   2328  1.134        ad 	mutex_exit(&pc->pc_lock);
   2329  1.134        ad 
   2330  1.134        ad 	pool_cache_invalidate_groups(pc, full);
   2331  1.134        ad 	pool_cache_invalidate_groups(pc, empty);
   2332  1.134        ad 	pool_cache_invalidate_groups(pc, part);
   2333  1.134        ad }
   2334  1.134        ad 
   2335  1.134        ad void
   2336  1.134        ad pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg)
   2337  1.134        ad {
   2338  1.134        ad 
   2339  1.134        ad 	pool_set_drain_hook(&pc->pc_pool, fn, arg);
   2340  1.134        ad }
   2341  1.134        ad 
   2342  1.134        ad void
   2343  1.134        ad pool_cache_setlowat(pool_cache_t pc, int n)
   2344  1.134        ad {
   2345  1.134        ad 
   2346  1.134        ad 	pool_setlowat(&pc->pc_pool, n);
   2347  1.134        ad }
   2348  1.134        ad 
   2349  1.134        ad void
   2350  1.134        ad pool_cache_sethiwat(pool_cache_t pc, int n)
   2351  1.134        ad {
   2352  1.134        ad 
   2353  1.134        ad 	pool_sethiwat(&pc->pc_pool, n);
   2354  1.134        ad }
   2355  1.134        ad 
   2356  1.134        ad void
   2357  1.134        ad pool_cache_sethardlimit(pool_cache_t pc, int n, const char *warnmess, int ratecap)
   2358  1.134        ad {
   2359  1.134        ad 
   2360  1.134        ad 	pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap);
   2361  1.134        ad }
   2362  1.134        ad 
   2363  1.162        ad static bool __noinline
   2364  1.162        ad pool_cache_get_slow(pool_cache_cpu_t *cc, int s, void **objectp,
   2365  1.134        ad 		    paddr_t *pap, int flags)
   2366   1.43   thorpej {
   2367  1.134        ad 	pcg_t *pcg, *cur;
   2368  1.134        ad 	uint64_t ncsw;
   2369  1.134        ad 	pool_cache_t pc;
   2370   1.43   thorpej 	void *object;
   2371   1.58   thorpej 
   2372  1.168      yamt 	KASSERT(cc->cc_current->pcg_avail == 0);
   2373  1.168      yamt 	KASSERT(cc->cc_previous->pcg_avail == 0);
   2374  1.168      yamt 
   2375  1.134        ad 	pc = cc->cc_cache;
   2376  1.134        ad 	cc->cc_misses++;
   2377   1.43   thorpej 
   2378  1.134        ad 	/*
   2379  1.134        ad 	 * Nothing was available locally.  Try and grab a group
   2380  1.134        ad 	 * from the cache.
   2381  1.134        ad 	 */
   2382  1.162        ad 	if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {
   2383  1.134        ad 		ncsw = curlwp->l_ncsw;
   2384  1.134        ad 		mutex_enter(&pc->pc_lock);
   2385  1.134        ad 		pc->pc_contended++;
   2386   1.43   thorpej 
   2387  1.134        ad 		/*
   2388  1.134        ad 		 * If we context switched while locking, then
   2389  1.134        ad 		 * our view of the per-CPU data is invalid:
   2390  1.134        ad 		 * retry.
   2391  1.134        ad 		 */
   2392  1.134        ad 		if (curlwp->l_ncsw != ncsw) {
   2393  1.134        ad 			mutex_exit(&pc->pc_lock);
   2394  1.162        ad 			return true;
   2395   1.43   thorpej 		}
   2396  1.102       chs 	}
   2397   1.43   thorpej 
   2398  1.162        ad 	if (__predict_true((pcg = pc->pc_fullgroups) != NULL)) {
   2399   1.43   thorpej 		/*
   2400  1.134        ad 		 * If there's a full group, release our empty
   2401  1.134        ad 		 * group back to the cache.  Install the full
   2402  1.134        ad 		 * group as cc_current and return.
   2403   1.43   thorpej 		 */
   2404  1.162        ad 		if (__predict_true((cur = cc->cc_current) != &pcg_dummy)) {
   2405  1.134        ad 			KASSERT(cur->pcg_avail == 0);
   2406  1.134        ad 			cur->pcg_next = pc->pc_emptygroups;
   2407  1.134        ad 			pc->pc_emptygroups = cur;
   2408  1.134        ad 			pc->pc_nempty++;
   2409   1.87   thorpej 		}
   2410  1.142        ad 		KASSERT(pcg->pcg_avail == pcg->pcg_size);
   2411  1.134        ad 		cc->cc_current = pcg;
   2412  1.134        ad 		pc->pc_fullgroups = pcg->pcg_next;
   2413  1.134        ad 		pc->pc_hits++;
   2414  1.134        ad 		pc->pc_nfull--;
   2415  1.134        ad 		mutex_exit(&pc->pc_lock);
   2416  1.162        ad 		return true;
   2417  1.134        ad 	}
   2418  1.134        ad 
   2419  1.134        ad 	/*
   2420  1.134        ad 	 * Nothing available locally or in cache.  Take the slow
   2421  1.134        ad 	 * path: fetch a new object from the pool and construct
   2422  1.134        ad 	 * it.
   2423  1.134        ad 	 */
   2424  1.134        ad 	pc->pc_misses++;
   2425  1.134        ad 	mutex_exit(&pc->pc_lock);
   2426  1.162        ad 	splx(s);
   2427  1.134        ad 
   2428  1.134        ad 	object = pool_get(&pc->pc_pool, flags);
   2429  1.134        ad 	*objectp = object;
   2430  1.162        ad 	if (__predict_false(object == NULL))
   2431  1.162        ad 		return false;
   2432  1.125        ad 
   2433  1.162        ad 	if (__predict_false((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0)) {
   2434  1.134        ad 		pool_put(&pc->pc_pool, object);
   2435  1.134        ad 		*objectp = NULL;
   2436  1.162        ad 		return false;
   2437   1.43   thorpej 	}
   2438   1.43   thorpej 
   2439  1.134        ad 	KASSERT((((vaddr_t)object + pc->pc_pool.pr_itemoffset) &
   2440  1.134        ad 	    (pc->pc_pool.pr_align - 1)) == 0);
   2441   1.43   thorpej 
   2442  1.134        ad 	if (pap != NULL) {
   2443  1.134        ad #ifdef POOL_VTOPHYS
   2444  1.134        ad 		*pap = POOL_VTOPHYS(object);
   2445  1.134        ad #else
   2446  1.134        ad 		*pap = POOL_PADDR_INVALID;
   2447  1.134        ad #endif
   2448  1.102       chs 	}
   2449   1.43   thorpej 
   2450  1.125        ad 	FREECHECK_OUT(&pc->pc_freecheck, object);
   2451  1.162        ad 	return false;
   2452   1.43   thorpej }
   2453   1.43   thorpej 
   2454   1.43   thorpej /*
   2455  1.134        ad  * pool_cache_get{,_paddr}:
   2456   1.43   thorpej  *
   2457  1.134        ad  *	Get an object from a pool cache (optionally returning
   2458  1.134        ad  *	the physical address of the object).
   2459   1.43   thorpej  */
   2460  1.134        ad void *
   2461  1.134        ad pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap)
   2462   1.43   thorpej {
   2463  1.134        ad 	pool_cache_cpu_t *cc;
   2464  1.134        ad 	pcg_t *pcg;
   2465  1.134        ad 	void *object;
   2466   1.60   thorpej 	int s;
   2467   1.43   thorpej 
   2468  1.134        ad #ifdef LOCKDEBUG
   2469  1.155        ad 	if (flags & PR_WAITOK) {
   2470  1.154      yamt 		ASSERT_SLEEPABLE();
   2471  1.155        ad 	}
   2472  1.134        ad #endif
   2473  1.125        ad 
   2474  1.162        ad 	/* Lock out interrupts and disable preemption. */
   2475  1.162        ad 	s = splvm();
   2476  1.165      yamt 	while (/* CONSTCOND */ true) {
   2477  1.134        ad 		/* Try and allocate an object from the current group. */
   2478  1.162        ad 		cc = pc->pc_cpus[curcpu()->ci_index];
   2479  1.162        ad 		KASSERT(cc->cc_cache == pc);
   2480  1.134        ad 	 	pcg = cc->cc_current;
   2481  1.162        ad 		if (__predict_true(pcg->pcg_avail > 0)) {
   2482  1.134        ad 			object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va;
   2483  1.162        ad 			if (__predict_false(pap != NULL))
   2484  1.134        ad 				*pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa;
   2485  1.148      yamt #if defined(DIAGNOSTIC)
   2486  1.134        ad 			pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL;
   2487  1.163        ad 			KASSERT(pcg->pcg_avail < pcg->pcg_size);
   2488  1.134        ad 			KASSERT(object != NULL);
   2489  1.163        ad #endif
   2490  1.134        ad 			cc->cc_hits++;
   2491  1.162        ad 			splx(s);
   2492  1.134        ad 			FREECHECK_OUT(&pc->pc_freecheck, object);
   2493  1.134        ad 			return object;
   2494   1.43   thorpej 		}
   2495   1.43   thorpej 
   2496   1.43   thorpej 		/*
   2497  1.134        ad 		 * That failed.  If the previous group isn't empty, swap
   2498  1.134        ad 		 * it with the current group and allocate from there.
   2499   1.43   thorpej 		 */
   2500  1.134        ad 		pcg = cc->cc_previous;
   2501  1.162        ad 		if (__predict_true(pcg->pcg_avail > 0)) {
   2502  1.134        ad 			cc->cc_previous = cc->cc_current;
   2503  1.134        ad 			cc->cc_current = pcg;
   2504  1.134        ad 			continue;
   2505   1.43   thorpej 		}
   2506   1.43   thorpej 
   2507  1.134        ad 		/*
   2508  1.134        ad 		 * Can't allocate from either group: try the slow path.
   2509  1.134        ad 		 * If get_slow() allocated an object for us, or if
   2510  1.162        ad 		 * no more objects are available, it will return false.
   2511  1.134        ad 		 * Otherwise, we need to retry.
   2512  1.134        ad 		 */
   2513  1.165      yamt 		if (!pool_cache_get_slow(cc, s, &object, pap, flags))
   2514  1.165      yamt 			break;
   2515  1.165      yamt 	}
   2516   1.43   thorpej 
   2517  1.134        ad 	return object;
   2518   1.51   thorpej }
   2519   1.51   thorpej 
   2520  1.162        ad static bool __noinline
   2521  1.162        ad pool_cache_put_slow(pool_cache_cpu_t *cc, int s, void *object)
   2522   1.51   thorpej {
   2523  1.163        ad 	pcg_t *pcg, *cur;
   2524  1.134        ad 	uint64_t ncsw;
   2525  1.134        ad 	pool_cache_t pc;
   2526   1.51   thorpej 
   2527  1.168      yamt 	KASSERT(cc->cc_current->pcg_avail == cc->cc_current->pcg_size);
   2528  1.168      yamt 	KASSERT(cc->cc_previous->pcg_avail == cc->cc_previous->pcg_size);
   2529  1.168      yamt 
   2530  1.134        ad 	pc = cc->cc_cache;
   2531  1.134        ad 	cc->cc_misses++;
   2532   1.43   thorpej 
   2533  1.162        ad 	/* Lock the cache. */
   2534  1.162        ad 	if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {
   2535  1.164        ad 		ncsw = curlwp->l_ncsw;
   2536  1.134        ad 		mutex_enter(&pc->pc_lock);
   2537  1.134        ad 		pc->pc_contended++;
   2538  1.162        ad 
   2539  1.163        ad 		/*
   2540  1.163        ad 		 * If we context switched while locking, then our view of
   2541  1.163        ad 		 * the per-CPU data is invalid: retry.
   2542  1.163        ad 		 */
   2543  1.163        ad 		if (__predict_false(curlwp->l_ncsw != ncsw)) {
   2544  1.163        ad 			mutex_exit(&pc->pc_lock);
   2545  1.163        ad 			return true;
   2546  1.163        ad 		}
   2547  1.162        ad 	}
   2548  1.102       chs 
   2549  1.163        ad 	/* If there are no empty groups in the cache then allocate one. */
   2550  1.163        ad 	if (__predict_false((pcg = pc->pc_emptygroups) == NULL)) {
   2551  1.163        ad 		if (__predict_true(!pool_cache_disable)) {
   2552  1.163        ad 			pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT);
   2553  1.163        ad 		}
   2554  1.163        ad 		if (__predict_true(pcg != NULL)) {
   2555  1.163        ad 			pcg->pcg_avail = 0;
   2556  1.163        ad 			pcg->pcg_size = pc->pc_pcgsize;
   2557  1.163        ad 		}
   2558  1.163        ad 	} else {
   2559  1.163        ad 		pc->pc_emptygroups = pcg->pcg_next;
   2560  1.163        ad 		pc->pc_nempty--;
   2561  1.134        ad 	}
   2562  1.130        ad 
   2563  1.162        ad 	/*
   2564  1.162        ad 	 * If there's a empty group, release our full group back
   2565  1.162        ad 	 * to the cache.  Install the empty group to the local CPU
   2566  1.162        ad 	 * and return.
   2567  1.162        ad 	 */
   2568  1.163        ad 	if (pcg != NULL) {
   2569  1.134        ad 		KASSERT(pcg->pcg_avail == 0);
   2570  1.162        ad 		if (__predict_false(cc->cc_previous == &pcg_dummy)) {
   2571  1.146        ad 			cc->cc_previous = pcg;
   2572  1.146        ad 		} else {
   2573  1.162        ad 			cur = cc->cc_current;
   2574  1.162        ad 			if (__predict_true(cur != &pcg_dummy)) {
   2575  1.163        ad 				KASSERT(cur->pcg_avail == cur->pcg_size);
   2576  1.146        ad 				cur->pcg_next = pc->pc_fullgroups;
   2577  1.146        ad 				pc->pc_fullgroups = cur;
   2578  1.146        ad 				pc->pc_nfull++;
   2579  1.146        ad 			}
   2580  1.146        ad 			cc->cc_current = pcg;
   2581  1.146        ad 		}
   2582  1.163        ad 		pc->pc_hits++;
   2583  1.134        ad 		mutex_exit(&pc->pc_lock);
   2584  1.162        ad 		return true;
   2585  1.102       chs 	}
   2586  1.105  christos 
   2587  1.134        ad 	/*
   2588  1.162        ad 	 * Nothing available locally or in cache, and we didn't
   2589  1.162        ad 	 * allocate an empty group.  Take the slow path and destroy
   2590  1.162        ad 	 * the object here and now.
   2591  1.134        ad 	 */
   2592  1.134        ad 	pc->pc_misses++;
   2593  1.134        ad 	mutex_exit(&pc->pc_lock);
   2594  1.162        ad 	splx(s);
   2595  1.162        ad 	pool_cache_destruct_object(pc, object);
   2596  1.105  christos 
   2597  1.162        ad 	return false;
   2598  1.134        ad }
   2599  1.102       chs 
   2600   1.43   thorpej /*
   2601  1.134        ad  * pool_cache_put{,_paddr}:
   2602   1.43   thorpej  *
   2603  1.134        ad  *	Put an object back to the pool cache (optionally caching the
   2604  1.134        ad  *	physical address of the object).
   2605   1.43   thorpej  */
   2606  1.101   thorpej void
   2607  1.134        ad pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa)
   2608   1.43   thorpej {
   2609  1.134        ad 	pool_cache_cpu_t *cc;
   2610  1.134        ad 	pcg_t *pcg;
   2611  1.134        ad 	int s;
   2612  1.101   thorpej 
   2613  1.134        ad 	FREECHECK_IN(&pc->pc_freecheck, object);
   2614  1.101   thorpej 
   2615  1.162        ad 	/* Lock out interrupts and disable preemption. */
   2616  1.162        ad 	s = splvm();
   2617  1.165      yamt 	while (/* CONSTCOND */ true) {
   2618  1.134        ad 		/* If the current group isn't full, release it there. */
   2619  1.162        ad 		cc = pc->pc_cpus[curcpu()->ci_index];
   2620  1.162        ad 		KASSERT(cc->cc_cache == pc);
   2621  1.134        ad 	 	pcg = cc->cc_current;
   2622  1.162        ad 		if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) {
   2623  1.134        ad 			pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object;
   2624  1.134        ad 			pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa;
   2625  1.134        ad 			pcg->pcg_avail++;
   2626  1.134        ad 			cc->cc_hits++;
   2627  1.162        ad 			splx(s);
   2628  1.134        ad 			return;
   2629  1.134        ad 		}
   2630   1.43   thorpej 
   2631  1.134        ad 		/*
   2632  1.162        ad 		 * That failed.  If the previous group isn't full, swap
   2633  1.134        ad 		 * it with the current group and try again.
   2634  1.134        ad 		 */
   2635  1.134        ad 		pcg = cc->cc_previous;
   2636  1.162        ad 		if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) {
   2637  1.134        ad 			cc->cc_previous = cc->cc_current;
   2638  1.134        ad 			cc->cc_current = pcg;
   2639  1.134        ad 			continue;
   2640  1.134        ad 		}
   2641   1.43   thorpej 
   2642  1.134        ad 		/*
   2643  1.134        ad 		 * Can't free to either group: try the slow path.
   2644  1.134        ad 		 * If put_slow() releases the object for us, it
   2645  1.162        ad 		 * will return false.  Otherwise we need to retry.
   2646  1.134        ad 		 */
   2647  1.165      yamt 		if (!pool_cache_put_slow(cc, s, object))
   2648  1.165      yamt 			break;
   2649  1.165      yamt 	}
   2650   1.43   thorpej }
   2651   1.43   thorpej 
   2652   1.43   thorpej /*
   2653  1.134        ad  * pool_cache_xcall:
   2654   1.43   thorpej  *
   2655  1.134        ad  *	Transfer objects from the per-CPU cache to the global cache.
   2656  1.134        ad  *	Run within a cross-call thread.
   2657   1.43   thorpej  */
   2658   1.43   thorpej static void
   2659  1.134        ad pool_cache_xcall(pool_cache_t pc)
   2660   1.43   thorpej {
   2661  1.134        ad 	pool_cache_cpu_t *cc;
   2662  1.134        ad 	pcg_t *prev, *cur, **list;
   2663  1.162        ad 	int s;
   2664  1.134        ad 
   2665  1.162        ad 	s = splvm();
   2666  1.162        ad 	mutex_enter(&pc->pc_lock);
   2667  1.162        ad 	cc = pc->pc_cpus[curcpu()->ci_index];
   2668  1.134        ad 	cur = cc->cc_current;
   2669  1.169      yamt 	cc->cc_current = __UNCONST(&pcg_dummy);
   2670  1.134        ad 	prev = cc->cc_previous;
   2671  1.169      yamt 	cc->cc_previous = __UNCONST(&pcg_dummy);
   2672  1.162        ad 	if (cur != &pcg_dummy) {
   2673  1.142        ad 		if (cur->pcg_avail == cur->pcg_size) {
   2674  1.134        ad 			list = &pc->pc_fullgroups;
   2675  1.134        ad 			pc->pc_nfull++;
   2676  1.134        ad 		} else if (cur->pcg_avail == 0) {
   2677  1.134        ad 			list = &pc->pc_emptygroups;
   2678  1.134        ad 			pc->pc_nempty++;
   2679  1.134        ad 		} else {
   2680  1.134        ad 			list = &pc->pc_partgroups;
   2681  1.134        ad 			pc->pc_npart++;
   2682  1.134        ad 		}
   2683  1.134        ad 		cur->pcg_next = *list;
   2684  1.134        ad 		*list = cur;
   2685  1.134        ad 	}
   2686  1.162        ad 	if (prev != &pcg_dummy) {
   2687  1.142        ad 		if (prev->pcg_avail == prev->pcg_size) {
   2688  1.134        ad 			list = &pc->pc_fullgroups;
   2689  1.134        ad 			pc->pc_nfull++;
   2690  1.134        ad 		} else if (prev->pcg_avail == 0) {
   2691  1.134        ad 			list = &pc->pc_emptygroups;
   2692  1.134        ad 			pc->pc_nempty++;
   2693  1.134        ad 		} else {
   2694  1.134        ad 			list = &pc->pc_partgroups;
   2695  1.134        ad 			pc->pc_npart++;
   2696  1.134        ad 		}
   2697  1.134        ad 		prev->pcg_next = *list;
   2698  1.134        ad 		*list = prev;
   2699  1.134        ad 	}
   2700  1.134        ad 	mutex_exit(&pc->pc_lock);
   2701  1.134        ad 	splx(s);
   2702    1.3        pk }
   2703   1.66   thorpej 
   2704   1.66   thorpej /*
   2705   1.66   thorpej  * Pool backend allocators.
   2706   1.66   thorpej  *
   2707   1.66   thorpej  * Each pool has a backend allocator that handles allocation, deallocation,
   2708   1.66   thorpej  * and any additional draining that might be needed.
   2709   1.66   thorpej  *
   2710   1.66   thorpej  * We provide two standard allocators:
   2711   1.66   thorpej  *
   2712   1.66   thorpej  *	pool_allocator_kmem - the default when no allocator is specified
   2713   1.66   thorpej  *
   2714   1.66   thorpej  *	pool_allocator_nointr - used for pools that will not be accessed
   2715   1.66   thorpej  *	in interrupt context.
   2716   1.66   thorpej  */
   2717   1.66   thorpej void	*pool_page_alloc(struct pool *, int);
   2718   1.66   thorpej void	pool_page_free(struct pool *, void *);
   2719   1.66   thorpej 
   2720  1.112     bjh21 #ifdef POOL_SUBPAGE
   2721  1.112     bjh21 struct pool_allocator pool_allocator_kmem_fullpage = {
   2722  1.112     bjh21 	pool_page_alloc, pool_page_free, 0,
   2723  1.117      yamt 	.pa_backingmapptr = &kmem_map,
   2724  1.112     bjh21 };
   2725  1.112     bjh21 #else
   2726   1.66   thorpej struct pool_allocator pool_allocator_kmem = {
   2727   1.66   thorpej 	pool_page_alloc, pool_page_free, 0,
   2728  1.117      yamt 	.pa_backingmapptr = &kmem_map,
   2729   1.66   thorpej };
   2730  1.112     bjh21 #endif
   2731   1.66   thorpej 
   2732   1.66   thorpej void	*pool_page_alloc_nointr(struct pool *, int);
   2733   1.66   thorpej void	pool_page_free_nointr(struct pool *, void *);
   2734   1.66   thorpej 
   2735  1.112     bjh21 #ifdef POOL_SUBPAGE
   2736  1.112     bjh21 struct pool_allocator pool_allocator_nointr_fullpage = {
   2737  1.112     bjh21 	pool_page_alloc_nointr, pool_page_free_nointr, 0,
   2738  1.117      yamt 	.pa_backingmapptr = &kernel_map,
   2739  1.112     bjh21 };
   2740  1.112     bjh21 #else
   2741   1.66   thorpej struct pool_allocator pool_allocator_nointr = {
   2742   1.66   thorpej 	pool_page_alloc_nointr, pool_page_free_nointr, 0,
   2743  1.117      yamt 	.pa_backingmapptr = &kernel_map,
   2744   1.66   thorpej };
   2745  1.112     bjh21 #endif
   2746   1.66   thorpej 
   2747   1.66   thorpej #ifdef POOL_SUBPAGE
   2748   1.66   thorpej void	*pool_subpage_alloc(struct pool *, int);
   2749   1.66   thorpej void	pool_subpage_free(struct pool *, void *);
   2750   1.66   thorpej 
   2751  1.112     bjh21 struct pool_allocator pool_allocator_kmem = {
   2752  1.112     bjh21 	pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
   2753  1.117      yamt 	.pa_backingmapptr = &kmem_map,
   2754  1.112     bjh21 };
   2755  1.112     bjh21 
   2756  1.112     bjh21 void	*pool_subpage_alloc_nointr(struct pool *, int);
   2757  1.112     bjh21 void	pool_subpage_free_nointr(struct pool *, void *);
   2758  1.112     bjh21 
   2759  1.112     bjh21 struct pool_allocator pool_allocator_nointr = {
   2760  1.112     bjh21 	pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
   2761  1.117      yamt 	.pa_backingmapptr = &kmem_map,
   2762   1.66   thorpej };
   2763   1.66   thorpej #endif /* POOL_SUBPAGE */
   2764   1.66   thorpej 
   2765  1.117      yamt static void *
   2766  1.117      yamt pool_allocator_alloc(struct pool *pp, int flags)
   2767   1.66   thorpej {
   2768  1.117      yamt 	struct pool_allocator *pa = pp->pr_alloc;
   2769   1.66   thorpej 	void *res;
   2770   1.66   thorpej 
   2771  1.117      yamt 	res = (*pa->pa_alloc)(pp, flags);
   2772  1.117      yamt 	if (res == NULL && (flags & PR_WAITOK) == 0) {
   2773   1.66   thorpej 		/*
   2774  1.117      yamt 		 * We only run the drain hook here if PR_NOWAIT.
   2775  1.117      yamt 		 * In other cases, the hook will be run in
   2776  1.117      yamt 		 * pool_reclaim().
   2777   1.66   thorpej 		 */
   2778  1.117      yamt 		if (pp->pr_drain_hook != NULL) {
   2779  1.117      yamt 			(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
   2780  1.117      yamt 			res = (*pa->pa_alloc)(pp, flags);
   2781   1.66   thorpej 		}
   2782  1.117      yamt 	}
   2783  1.117      yamt 	return res;
   2784   1.66   thorpej }
   2785   1.66   thorpej 
   2786  1.117      yamt static void
   2787   1.66   thorpej pool_allocator_free(struct pool *pp, void *v)
   2788   1.66   thorpej {
   2789   1.66   thorpej 	struct pool_allocator *pa = pp->pr_alloc;
   2790   1.66   thorpej 
   2791   1.66   thorpej 	(*pa->pa_free)(pp, v);
   2792   1.66   thorpej }
   2793   1.66   thorpej 
   2794   1.66   thorpej void *
   2795  1.124      yamt pool_page_alloc(struct pool *pp, int flags)
   2796   1.66   thorpej {
   2797  1.127   thorpej 	bool waitok = (flags & PR_WAITOK) ? true : false;
   2798   1.66   thorpej 
   2799  1.100      yamt 	return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, waitok));
   2800   1.66   thorpej }
   2801   1.66   thorpej 
   2802   1.66   thorpej void
   2803  1.124      yamt pool_page_free(struct pool *pp, void *v)
   2804   1.66   thorpej {
   2805   1.66   thorpej 
   2806   1.98      yamt 	uvm_km_free_poolpage_cache(kmem_map, (vaddr_t) v);
   2807   1.98      yamt }
   2808   1.98      yamt 
   2809   1.98      yamt static void *
   2810  1.124      yamt pool_page_alloc_meta(struct pool *pp, int flags)
   2811   1.98      yamt {
   2812  1.127   thorpej 	bool waitok = (flags & PR_WAITOK) ? true : false;
   2813   1.98      yamt 
   2814  1.100      yamt 	return ((void *) uvm_km_alloc_poolpage(kmem_map, waitok));
   2815   1.98      yamt }
   2816   1.98      yamt 
   2817   1.98      yamt static void
   2818  1.124      yamt pool_page_free_meta(struct pool *pp, void *v)
   2819   1.98      yamt {
   2820   1.98      yamt 
   2821  1.100      yamt 	uvm_km_free_poolpage(kmem_map, (vaddr_t) v);
   2822   1.66   thorpej }
   2823   1.66   thorpej 
   2824   1.66   thorpej #ifdef POOL_SUBPAGE
   2825   1.66   thorpej /* Sub-page allocator, for machines with large hardware pages. */
   2826   1.66   thorpej void *
   2827   1.66   thorpej pool_subpage_alloc(struct pool *pp, int flags)
   2828   1.66   thorpej {
   2829  1.134        ad 	return pool_get(&psppool, flags);
   2830   1.66   thorpej }
   2831   1.66   thorpej 
   2832   1.66   thorpej void
   2833   1.66   thorpej pool_subpage_free(struct pool *pp, void *v)
   2834   1.66   thorpej {
   2835   1.66   thorpej 	pool_put(&psppool, v);
   2836   1.66   thorpej }
   2837   1.66   thorpej 
   2838   1.66   thorpej /* We don't provide a real nointr allocator.  Maybe later. */
   2839   1.66   thorpej void *
   2840  1.112     bjh21 pool_subpage_alloc_nointr(struct pool *pp, int flags)
   2841   1.66   thorpej {
   2842   1.66   thorpej 
   2843   1.66   thorpej 	return (pool_subpage_alloc(pp, flags));
   2844   1.66   thorpej }
   2845   1.66   thorpej 
   2846   1.66   thorpej void
   2847  1.112     bjh21 pool_subpage_free_nointr(struct pool *pp, void *v)
   2848   1.66   thorpej {
   2849   1.66   thorpej 
   2850   1.66   thorpej 	pool_subpage_free(pp, v);
   2851   1.66   thorpej }
   2852  1.112     bjh21 #endif /* POOL_SUBPAGE */
   2853   1.66   thorpej void *
   2854  1.124      yamt pool_page_alloc_nointr(struct pool *pp, int flags)
   2855   1.66   thorpej {
   2856  1.127   thorpej 	bool waitok = (flags & PR_WAITOK) ? true : false;
   2857   1.66   thorpej 
   2858  1.100      yamt 	return ((void *) uvm_km_alloc_poolpage_cache(kernel_map, waitok));
   2859   1.66   thorpej }
   2860   1.66   thorpej 
   2861   1.66   thorpej void
   2862  1.124      yamt pool_page_free_nointr(struct pool *pp, void *v)
   2863   1.66   thorpej {
   2864   1.66   thorpej 
   2865   1.98      yamt 	uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v);
   2866   1.66   thorpej }
   2867  1.141      yamt 
   2868  1.141      yamt #if defined(DDB)
   2869  1.141      yamt static bool
   2870  1.141      yamt pool_in_page(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
   2871  1.141      yamt {
   2872  1.141      yamt 
   2873  1.141      yamt 	return (uintptr_t)ph->ph_page <= addr &&
   2874  1.141      yamt 	    addr < (uintptr_t)ph->ph_page + pp->pr_alloc->pa_pagesz;
   2875  1.141      yamt }
   2876  1.141      yamt 
   2877  1.143      yamt static bool
   2878  1.143      yamt pool_in_item(struct pool *pp, void *item, uintptr_t addr)
   2879  1.143      yamt {
   2880  1.143      yamt 
   2881  1.143      yamt 	return (uintptr_t)item <= addr && addr < (uintptr_t)item + pp->pr_size;
   2882  1.143      yamt }
   2883  1.143      yamt 
   2884  1.143      yamt static bool
   2885  1.143      yamt pool_in_cg(struct pool *pp, struct pool_cache_group *pcg, uintptr_t addr)
   2886  1.143      yamt {
   2887  1.143      yamt 	int i;
   2888  1.143      yamt 
   2889  1.143      yamt 	if (pcg == NULL) {
   2890  1.143      yamt 		return false;
   2891  1.143      yamt 	}
   2892  1.144      yamt 	for (i = 0; i < pcg->pcg_avail; i++) {
   2893  1.143      yamt 		if (pool_in_item(pp, pcg->pcg_objects[i].pcgo_va, addr)) {
   2894  1.143      yamt 			return true;
   2895  1.143      yamt 		}
   2896  1.143      yamt 	}
   2897  1.143      yamt 	return false;
   2898  1.143      yamt }
   2899  1.143      yamt 
   2900  1.143      yamt static bool
   2901  1.143      yamt pool_allocated(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
   2902  1.143      yamt {
   2903  1.143      yamt 
   2904  1.143      yamt 	if ((pp->pr_roflags & PR_NOTOUCH) != 0) {
   2905  1.143      yamt 		unsigned int idx = pr_item_notouch_index(pp, ph, (void *)addr);
   2906  1.143      yamt 		pool_item_bitmap_t *bitmap =
   2907  1.143      yamt 		    ph->ph_bitmap + (idx / BITMAP_SIZE);
   2908  1.143      yamt 		pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
   2909  1.143      yamt 
   2910  1.143      yamt 		return (*bitmap & mask) == 0;
   2911  1.143      yamt 	} else {
   2912  1.143      yamt 		struct pool_item *pi;
   2913  1.143      yamt 
   2914  1.143      yamt 		LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
   2915  1.143      yamt 			if (pool_in_item(pp, pi, addr)) {
   2916  1.143      yamt 				return false;
   2917  1.143      yamt 			}
   2918  1.143      yamt 		}
   2919  1.143      yamt 		return true;
   2920  1.143      yamt 	}
   2921  1.143      yamt }
   2922  1.143      yamt 
   2923  1.141      yamt void
   2924  1.141      yamt pool_whatis(uintptr_t addr, void (*pr)(const char *, ...))
   2925  1.141      yamt {
   2926  1.141      yamt 	struct pool *pp;
   2927  1.141      yamt 
   2928  1.145        ad 	TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
   2929  1.141      yamt 		struct pool_item_header *ph;
   2930  1.141      yamt 		uintptr_t item;
   2931  1.143      yamt 		bool allocated = true;
   2932  1.143      yamt 		bool incache = false;
   2933  1.143      yamt 		bool incpucache = false;
   2934  1.143      yamt 		char cpucachestr[32];
   2935  1.141      yamt 
   2936  1.141      yamt 		if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
   2937  1.141      yamt 			LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
   2938  1.141      yamt 				if (pool_in_page(pp, ph, addr)) {
   2939  1.141      yamt 					goto found;
   2940  1.141      yamt 				}
   2941  1.141      yamt 			}
   2942  1.141      yamt 			LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
   2943  1.141      yamt 				if (pool_in_page(pp, ph, addr)) {
   2944  1.143      yamt 					allocated =
   2945  1.143      yamt 					    pool_allocated(pp, ph, addr);
   2946  1.143      yamt 					goto found;
   2947  1.143      yamt 				}
   2948  1.143      yamt 			}
   2949  1.143      yamt 			LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
   2950  1.143      yamt 				if (pool_in_page(pp, ph, addr)) {
   2951  1.143      yamt 					allocated = false;
   2952  1.141      yamt 					goto found;
   2953  1.141      yamt 				}
   2954  1.141      yamt 			}
   2955  1.141      yamt 			continue;
   2956  1.141      yamt 		} else {
   2957  1.141      yamt 			ph = pr_find_pagehead_noalign(pp, (void *)addr);
   2958  1.141      yamt 			if (ph == NULL || !pool_in_page(pp, ph, addr)) {
   2959  1.141      yamt 				continue;
   2960  1.141      yamt 			}
   2961  1.143      yamt 			allocated = pool_allocated(pp, ph, addr);
   2962  1.141      yamt 		}
   2963  1.141      yamt found:
   2964  1.143      yamt 		if (allocated && pp->pr_cache) {
   2965  1.143      yamt 			pool_cache_t pc = pp->pr_cache;
   2966  1.143      yamt 			struct pool_cache_group *pcg;
   2967  1.143      yamt 			int i;
   2968  1.143      yamt 
   2969  1.143      yamt 			for (pcg = pc->pc_fullgroups; pcg != NULL;
   2970  1.143      yamt 			    pcg = pcg->pcg_next) {
   2971  1.143      yamt 				if (pool_in_cg(pp, pcg, addr)) {
   2972  1.143      yamt 					incache = true;
   2973  1.143      yamt 					goto print;
   2974  1.143      yamt 				}
   2975  1.143      yamt 			}
   2976  1.143      yamt 			for (i = 0; i < MAXCPUS; i++) {
   2977  1.143      yamt 				pool_cache_cpu_t *cc;
   2978  1.143      yamt 
   2979  1.143      yamt 				if ((cc = pc->pc_cpus[i]) == NULL) {
   2980  1.143      yamt 					continue;
   2981  1.143      yamt 				}
   2982  1.143      yamt 				if (pool_in_cg(pp, cc->cc_current, addr) ||
   2983  1.143      yamt 				    pool_in_cg(pp, cc->cc_previous, addr)) {
   2984  1.143      yamt 					struct cpu_info *ci =
   2985  1.143      yamt 					    cpu_lookup_byindex(i);
   2986  1.143      yamt 
   2987  1.143      yamt 					incpucache = true;
   2988  1.143      yamt 					snprintf(cpucachestr,
   2989  1.143      yamt 					    sizeof(cpucachestr),
   2990  1.143      yamt 					    "cached by CPU %u",
   2991  1.153    martin 					    ci->ci_index);
   2992  1.143      yamt 					goto print;
   2993  1.143      yamt 				}
   2994  1.143      yamt 			}
   2995  1.143      yamt 		}
   2996  1.143      yamt print:
   2997  1.141      yamt 		item = (uintptr_t)ph->ph_page + ph->ph_off;
   2998  1.141      yamt 		item = item + rounddown(addr - item, pp->pr_size);
   2999  1.143      yamt 		(*pr)("%p is %p+%zu in POOL '%s' (%s)\n",
   3000  1.141      yamt 		    (void *)addr, item, (size_t)(addr - item),
   3001  1.143      yamt 		    pp->pr_wchan,
   3002  1.143      yamt 		    incpucache ? cpucachestr :
   3003  1.143      yamt 		    incache ? "cached" : allocated ? "allocated" : "free");
   3004  1.141      yamt 	}
   3005  1.141      yamt }
   3006  1.141      yamt #endif /* defined(DDB) */
   3007