Home | History | Annotate | Line # | Download | only in kern
subr_pool.c revision 1.128.2.7
      1  1.128.2.7        ad /*	$NetBSD: subr_pool.c,v 1.128.2.7 2007/09/01 12:55:15 ad Exp $	*/
      2        1.1        pk 
      3        1.1        pk /*-
      4  1.128.2.2        ad  * Copyright (c) 1997, 1999, 2000, 2002, 2007 The NetBSD Foundation, Inc.
      5        1.1        pk  * All rights reserved.
      6        1.1        pk  *
      7        1.1        pk  * This code is derived from software contributed to The NetBSD Foundation
      8       1.20   thorpej  * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
      9  1.128.2.7        ad  * Simulation Facility, NASA Ames Research Center, and by Andrew Doran.
     10        1.1        pk  *
     11        1.1        pk  * Redistribution and use in source and binary forms, with or without
     12        1.1        pk  * modification, are permitted provided that the following conditions
     13        1.1        pk  * are met:
     14        1.1        pk  * 1. Redistributions of source code must retain the above copyright
     15        1.1        pk  *    notice, this list of conditions and the following disclaimer.
     16        1.1        pk  * 2. Redistributions in binary form must reproduce the above copyright
     17        1.1        pk  *    notice, this list of conditions and the following disclaimer in the
     18        1.1        pk  *    documentation and/or other materials provided with the distribution.
     19        1.1        pk  * 3. All advertising materials mentioning features or use of this software
     20        1.1        pk  *    must display the following acknowledgement:
     21       1.13  christos  *	This product includes software developed by the NetBSD
     22       1.13  christos  *	Foundation, Inc. and its contributors.
     23        1.1        pk  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24        1.1        pk  *    contributors may be used to endorse or promote products derived
     25        1.1        pk  *    from this software without specific prior written permission.
     26        1.1        pk  *
     27        1.1        pk  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28        1.1        pk  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29        1.1        pk  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30        1.1        pk  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31        1.1        pk  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32        1.1        pk  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33        1.1        pk  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34        1.1        pk  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35        1.1        pk  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36        1.1        pk  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37        1.1        pk  * POSSIBILITY OF SUCH DAMAGE.
     38        1.1        pk  */
     39       1.64     lukem 
     40       1.64     lukem #include <sys/cdefs.h>
     41  1.128.2.7        ad __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.128.2.7 2007/09/01 12:55:15 ad Exp $");
     42       1.24    scottr 
     43       1.25   thorpej #include "opt_pool.h"
     44       1.24    scottr #include "opt_poollog.h"
     45       1.28   thorpej #include "opt_lockdebug.h"
     46        1.1        pk 
     47        1.1        pk #include <sys/param.h>
     48        1.1        pk #include <sys/systm.h>
     49        1.1        pk #include <sys/proc.h>
     50        1.1        pk #include <sys/errno.h>
     51        1.1        pk #include <sys/kernel.h>
     52        1.1        pk #include <sys/malloc.h>
     53        1.1        pk #include <sys/lock.h>
     54        1.1        pk #include <sys/pool.h>
     55       1.20   thorpej #include <sys/syslog.h>
     56      1.125        ad #include <sys/debug.h>
     57  1.128.2.5        ad #include <sys/lockdebug.h>
     58        1.3        pk 
     59        1.3        pk #include <uvm/uvm.h>
     60        1.3        pk 
     61        1.1        pk /*
     62        1.1        pk  * Pool resource management utility.
     63        1.3        pk  *
     64       1.88       chs  * Memory is allocated in pages which are split into pieces according to
     65       1.88       chs  * the pool item size. Each page is kept on one of three lists in the
     66       1.88       chs  * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
     67       1.88       chs  * for empty, full and partially-full pages respectively. The individual
     68       1.88       chs  * pool items are on a linked list headed by `ph_itemlist' in each page
     69       1.88       chs  * header. The memory for building the page list is either taken from
     70       1.88       chs  * the allocated pages themselves (for small pool items) or taken from
     71       1.88       chs  * an internal pool of page headers (`phpool').
     72        1.1        pk  */
     73        1.1        pk 
     74        1.3        pk /* List of all pools */
     75      1.102       chs LIST_HEAD(,pool) pool_head = LIST_HEAD_INITIALIZER(pool_head);
     76        1.3        pk 
     77  1.128.2.7        ad /* List of all caches. */
     78  1.128.2.7        ad LIST_HEAD(,pool_cache) pool_cache_head =
     79  1.128.2.7        ad     LIST_HEAD_INITIALIZER(pool_cache_head);
     80  1.128.2.7        ad 
     81        1.3        pk /* Private pool for page header structures */
     82       1.97      yamt #define	PHPOOL_MAX	8
     83       1.97      yamt static struct pool phpool[PHPOOL_MAX];
     84       1.97      yamt #define	PHPOOL_FREELIST_NELEM(idx)	(((idx) == 0) ? 0 : (1 << (idx)))
     85        1.3        pk 
     86       1.62     bjh21 #ifdef POOL_SUBPAGE
     87       1.62     bjh21 /* Pool of subpages for use by normal pools. */
     88       1.62     bjh21 static struct pool psppool;
     89       1.62     bjh21 #endif
     90       1.62     bjh21 
     91      1.117      yamt static SLIST_HEAD(, pool_allocator) pa_deferinitq =
     92      1.117      yamt     SLIST_HEAD_INITIALIZER(pa_deferinitq);
     93      1.117      yamt 
     94       1.98      yamt static void *pool_page_alloc_meta(struct pool *, int);
     95       1.98      yamt static void pool_page_free_meta(struct pool *, void *);
     96       1.98      yamt 
     97       1.98      yamt /* allocator for pool metadata */
     98  1.128.2.7        ad struct pool_allocator pool_allocator_meta = {
     99      1.117      yamt 	pool_page_alloc_meta, pool_page_free_meta,
    100      1.117      yamt 	.pa_backingmapptr = &kmem_map,
    101       1.98      yamt };
    102       1.98      yamt 
    103        1.3        pk /* # of seconds to retain page after last use */
    104        1.3        pk int pool_inactive_time = 10;
    105        1.3        pk 
    106        1.3        pk /* Next candidate for drainage (see pool_drain()) */
    107       1.23   thorpej static struct pool	*drainpp;
    108       1.23   thorpej 
    109  1.128.2.2        ad /* This lock protects both pool_head and drainpp. */
    110  1.128.2.2        ad static kmutex_t pool_head_lock;
    111  1.128.2.7        ad static kcondvar_t pool_busy;
    112        1.3        pk 
    113       1.99      yamt typedef uint8_t pool_item_freelist_t;
    114       1.99      yamt 
    115        1.3        pk struct pool_item_header {
    116        1.3        pk 	/* Page headers */
    117       1.88       chs 	LIST_ENTRY(pool_item_header)
    118        1.3        pk 				ph_pagelist;	/* pool page list */
    119       1.88       chs 	SPLAY_ENTRY(pool_item_header)
    120       1.88       chs 				ph_node;	/* Off-page page headers */
    121      1.128  christos 	void *			ph_page;	/* this page's address */
    122        1.3        pk 	struct timeval		ph_time;	/* last referenced */
    123       1.97      yamt 	union {
    124       1.97      yamt 		/* !PR_NOTOUCH */
    125       1.97      yamt 		struct {
    126      1.102       chs 			LIST_HEAD(, pool_item)
    127       1.97      yamt 				phu_itemlist;	/* chunk list for this page */
    128       1.97      yamt 		} phu_normal;
    129       1.97      yamt 		/* PR_NOTOUCH */
    130       1.97      yamt 		struct {
    131       1.97      yamt 			uint16_t
    132       1.97      yamt 				phu_off;	/* start offset in page */
    133       1.99      yamt 			pool_item_freelist_t
    134       1.97      yamt 				phu_firstfree;	/* first free item */
    135       1.99      yamt 			/*
    136       1.99      yamt 			 * XXX it might be better to use
    137       1.99      yamt 			 * a simple bitmap and ffs(3)
    138       1.99      yamt 			 */
    139       1.97      yamt 		} phu_notouch;
    140       1.97      yamt 	} ph_u;
    141       1.97      yamt 	uint16_t		ph_nmissing;	/* # of chunks in use */
    142        1.3        pk };
    143       1.97      yamt #define	ph_itemlist	ph_u.phu_normal.phu_itemlist
    144       1.97      yamt #define	ph_off		ph_u.phu_notouch.phu_off
    145       1.97      yamt #define	ph_firstfree	ph_u.phu_notouch.phu_firstfree
    146        1.3        pk 
    147        1.1        pk struct pool_item {
    148        1.3        pk #ifdef DIAGNOSTIC
    149       1.82   thorpej 	u_int pi_magic;
    150       1.33       chs #endif
    151       1.82   thorpej #define	PI_MAGIC 0xdeadbeefU
    152        1.3        pk 	/* Other entries use only this list entry */
    153      1.102       chs 	LIST_ENTRY(pool_item)	pi_list;
    154        1.3        pk };
    155        1.3        pk 
    156       1.53   thorpej #define	POOL_NEEDS_CATCHUP(pp)						\
    157       1.53   thorpej 	((pp)->pr_nitems < (pp)->pr_minitems)
    158       1.53   thorpej 
    159       1.43   thorpej /*
    160       1.43   thorpej  * Pool cache management.
    161       1.43   thorpej  *
    162       1.43   thorpej  * Pool caches provide a way for constructed objects to be cached by the
    163       1.43   thorpej  * pool subsystem.  This can lead to performance improvements by avoiding
    164       1.43   thorpej  * needless object construction/destruction; it is deferred until absolutely
    165       1.43   thorpej  * necessary.
    166       1.43   thorpej  *
    167  1.128.2.7        ad  * Caches are grouped into cache groups.  Each cache group references up
    168  1.128.2.7        ad  * to PCG_NUMOBJECTS constructed objects.  When a cache allocates an
    169  1.128.2.7        ad  * object from the pool, it calls the object's constructor and places it
    170  1.128.2.7        ad  * into a cache group.  When a cache group frees an object back to the
    171  1.128.2.7        ad  * pool, it first calls the object's destructor.  This allows the object
    172  1.128.2.7        ad  * to persist in constructed form while freed to the cache.
    173  1.128.2.7        ad  *
    174  1.128.2.7        ad  * The pool references each cache, so that when a pool is drained by the
    175  1.128.2.7        ad  * pagedaemon, it can drain each individual cache as well.  Each time a
    176  1.128.2.7        ad  * cache is drained, the most idle cache group is freed to the pool in
    177  1.128.2.7        ad  * its entirety.
    178       1.43   thorpej  *
    179       1.43   thorpej  * Pool caches are layed on top of pools.  By layering them, we can avoid
    180       1.43   thorpej  * the complexity of cache management for pools which would not benefit
    181       1.43   thorpej  * from it.
    182       1.43   thorpej  */
    183       1.43   thorpej 
    184       1.43   thorpej static struct pool pcgpool;
    185  1.128.2.7        ad static struct pool cache_pool;
    186  1.128.2.7        ad static struct pool cache_cpu_pool;
    187        1.3        pk 
    188  1.128.2.7        ad static pool_cache_cpu_t *pool_cache_put_slow(pool_cache_cpu_t *, int *,
    189  1.128.2.7        ad 					     void *, paddr_t);
    190  1.128.2.7        ad static pool_cache_cpu_t *pool_cache_get_slow(pool_cache_cpu_t *, int *,
    191  1.128.2.7        ad 					     void **, paddr_t *, int);
    192  1.128.2.7        ad static void	pool_cache_cpu_init1(struct cpu_info *, pool_cache_t);
    193  1.128.2.7        ad static void	pool_cache_invalidate_groups(pool_cache_t, pcg_t *);
    194        1.3        pk 
    195       1.42   thorpej static int	pool_catchup(struct pool *);
    196      1.128  christos static void	pool_prime_page(struct pool *, void *,
    197       1.55   thorpej 		    struct pool_item_header *);
    198       1.88       chs static void	pool_update_curpage(struct pool *);
    199       1.66   thorpej 
    200      1.113      yamt static int	pool_grow(struct pool *, int);
    201      1.117      yamt static void	*pool_allocator_alloc(struct pool *, int);
    202      1.117      yamt static void	pool_allocator_free(struct pool *, void *);
    203        1.3        pk 
    204       1.97      yamt static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
    205       1.88       chs 	void (*)(const char *, ...));
    206       1.42   thorpej static void pool_print1(struct pool *, const char *,
    207       1.42   thorpej 	void (*)(const char *, ...));
    208        1.3        pk 
    209       1.88       chs static int pool_chk_page(struct pool *, const char *,
    210       1.88       chs 			 struct pool_item_header *);
    211       1.88       chs 
    212        1.3        pk /*
    213       1.52   thorpej  * Pool log entry. An array of these is allocated in pool_init().
    214        1.3        pk  */
    215        1.3        pk struct pool_log {
    216        1.3        pk 	const char	*pl_file;
    217        1.3        pk 	long		pl_line;
    218        1.3        pk 	int		pl_action;
    219       1.25   thorpej #define	PRLOG_GET	1
    220       1.25   thorpej #define	PRLOG_PUT	2
    221        1.3        pk 	void		*pl_addr;
    222        1.1        pk };
    223        1.1        pk 
    224       1.86      matt #ifdef POOL_DIAGNOSTIC
    225        1.3        pk /* Number of entries in pool log buffers */
    226       1.17   thorpej #ifndef POOL_LOGSIZE
    227       1.17   thorpej #define	POOL_LOGSIZE	10
    228       1.17   thorpej #endif
    229       1.17   thorpej 
    230       1.17   thorpej int pool_logsize = POOL_LOGSIZE;
    231        1.1        pk 
    232      1.110     perry static inline void
    233       1.42   thorpej pr_log(struct pool *pp, void *v, int action, const char *file, long line)
    234        1.3        pk {
    235        1.3        pk 	int n = pp->pr_curlogentry;
    236        1.3        pk 	struct pool_log *pl;
    237        1.3        pk 
    238       1.20   thorpej 	if ((pp->pr_roflags & PR_LOGGING) == 0)
    239        1.3        pk 		return;
    240        1.3        pk 
    241        1.3        pk 	/*
    242        1.3        pk 	 * Fill in the current entry. Wrap around and overwrite
    243        1.3        pk 	 * the oldest entry if necessary.
    244        1.3        pk 	 */
    245        1.3        pk 	pl = &pp->pr_log[n];
    246        1.3        pk 	pl->pl_file = file;
    247        1.3        pk 	pl->pl_line = line;
    248        1.3        pk 	pl->pl_action = action;
    249        1.3        pk 	pl->pl_addr = v;
    250        1.3        pk 	if (++n >= pp->pr_logsize)
    251        1.3        pk 		n = 0;
    252        1.3        pk 	pp->pr_curlogentry = n;
    253        1.3        pk }
    254        1.3        pk 
    255        1.3        pk static void
    256       1.42   thorpej pr_printlog(struct pool *pp, struct pool_item *pi,
    257       1.42   thorpej     void (*pr)(const char *, ...))
    258        1.3        pk {
    259        1.3        pk 	int i = pp->pr_logsize;
    260        1.3        pk 	int n = pp->pr_curlogentry;
    261        1.3        pk 
    262       1.20   thorpej 	if ((pp->pr_roflags & PR_LOGGING) == 0)
    263        1.3        pk 		return;
    264        1.3        pk 
    265        1.3        pk 	/*
    266        1.3        pk 	 * Print all entries in this pool's log.
    267        1.3        pk 	 */
    268        1.3        pk 	while (i-- > 0) {
    269        1.3        pk 		struct pool_log *pl = &pp->pr_log[n];
    270        1.3        pk 		if (pl->pl_action != 0) {
    271       1.25   thorpej 			if (pi == NULL || pi == pl->pl_addr) {
    272       1.25   thorpej 				(*pr)("\tlog entry %d:\n", i);
    273       1.25   thorpej 				(*pr)("\t\taction = %s, addr = %p\n",
    274       1.25   thorpej 				    pl->pl_action == PRLOG_GET ? "get" : "put",
    275       1.25   thorpej 				    pl->pl_addr);
    276       1.25   thorpej 				(*pr)("\t\tfile: %s at line %lu\n",
    277       1.25   thorpej 				    pl->pl_file, pl->pl_line);
    278       1.25   thorpej 			}
    279        1.3        pk 		}
    280        1.3        pk 		if (++n >= pp->pr_logsize)
    281        1.3        pk 			n = 0;
    282        1.3        pk 	}
    283        1.3        pk }
    284       1.25   thorpej 
    285      1.110     perry static inline void
    286       1.42   thorpej pr_enter(struct pool *pp, const char *file, long line)
    287       1.25   thorpej {
    288       1.25   thorpej 
    289       1.34   thorpej 	if (__predict_false(pp->pr_entered_file != NULL)) {
    290       1.25   thorpej 		printf("pool %s: reentrancy at file %s line %ld\n",
    291       1.25   thorpej 		    pp->pr_wchan, file, line);
    292       1.25   thorpej 		printf("         previous entry at file %s line %ld\n",
    293       1.25   thorpej 		    pp->pr_entered_file, pp->pr_entered_line);
    294       1.25   thorpej 		panic("pr_enter");
    295       1.25   thorpej 	}
    296       1.25   thorpej 
    297       1.25   thorpej 	pp->pr_entered_file = file;
    298       1.25   thorpej 	pp->pr_entered_line = line;
    299       1.25   thorpej }
    300       1.25   thorpej 
    301      1.110     perry static inline void
    302       1.42   thorpej pr_leave(struct pool *pp)
    303       1.25   thorpej {
    304       1.25   thorpej 
    305       1.34   thorpej 	if (__predict_false(pp->pr_entered_file == NULL)) {
    306       1.25   thorpej 		printf("pool %s not entered?\n", pp->pr_wchan);
    307       1.25   thorpej 		panic("pr_leave");
    308       1.25   thorpej 	}
    309       1.25   thorpej 
    310       1.25   thorpej 	pp->pr_entered_file = NULL;
    311       1.25   thorpej 	pp->pr_entered_line = 0;
    312       1.25   thorpej }
    313       1.25   thorpej 
    314      1.110     perry static inline void
    315       1.42   thorpej pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
    316       1.25   thorpej {
    317       1.25   thorpej 
    318       1.25   thorpej 	if (pp->pr_entered_file != NULL)
    319       1.25   thorpej 		(*pr)("\n\tcurrently entered from file %s line %ld\n",
    320       1.25   thorpej 		    pp->pr_entered_file, pp->pr_entered_line);
    321       1.25   thorpej }
    322        1.3        pk #else
    323       1.25   thorpej #define	pr_log(pp, v, action, file, line)
    324       1.25   thorpej #define	pr_printlog(pp, pi, pr)
    325       1.25   thorpej #define	pr_enter(pp, file, line)
    326       1.25   thorpej #define	pr_leave(pp)
    327       1.25   thorpej #define	pr_enter_check(pp, pr)
    328       1.59   thorpej #endif /* POOL_DIAGNOSTIC */
    329        1.3        pk 
    330      1.110     perry static inline int
    331       1.97      yamt pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph,
    332       1.97      yamt     const void *v)
    333       1.97      yamt {
    334       1.97      yamt 	const char *cp = v;
    335       1.97      yamt 	int idx;
    336       1.97      yamt 
    337       1.97      yamt 	KASSERT(pp->pr_roflags & PR_NOTOUCH);
    338      1.128  christos 	idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size;
    339       1.97      yamt 	KASSERT(idx < pp->pr_itemsperpage);
    340       1.97      yamt 	return idx;
    341       1.97      yamt }
    342       1.97      yamt 
    343       1.99      yamt #define	PR_FREELIST_ALIGN(p) \
    344       1.99      yamt 	roundup((uintptr_t)(p), sizeof(pool_item_freelist_t))
    345       1.99      yamt #define	PR_FREELIST(ph)	((pool_item_freelist_t *)PR_FREELIST_ALIGN((ph) + 1))
    346       1.99      yamt #define	PR_INDEX_USED	((pool_item_freelist_t)-1)
    347       1.99      yamt #define	PR_INDEX_EOL	((pool_item_freelist_t)-2)
    348       1.97      yamt 
    349      1.110     perry static inline void
    350       1.97      yamt pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph,
    351       1.97      yamt     void *obj)
    352       1.97      yamt {
    353       1.97      yamt 	int idx = pr_item_notouch_index(pp, ph, obj);
    354       1.99      yamt 	pool_item_freelist_t *freelist = PR_FREELIST(ph);
    355       1.97      yamt 
    356       1.97      yamt 	KASSERT(freelist[idx] == PR_INDEX_USED);
    357       1.97      yamt 	freelist[idx] = ph->ph_firstfree;
    358       1.97      yamt 	ph->ph_firstfree = idx;
    359       1.97      yamt }
    360       1.97      yamt 
    361      1.110     perry static inline void *
    362       1.97      yamt pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph)
    363       1.97      yamt {
    364       1.97      yamt 	int idx = ph->ph_firstfree;
    365       1.99      yamt 	pool_item_freelist_t *freelist = PR_FREELIST(ph);
    366       1.97      yamt 
    367       1.97      yamt 	KASSERT(freelist[idx] != PR_INDEX_USED);
    368       1.97      yamt 	ph->ph_firstfree = freelist[idx];
    369       1.97      yamt 	freelist[idx] = PR_INDEX_USED;
    370       1.97      yamt 
    371      1.128  christos 	return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size;
    372       1.97      yamt }
    373       1.97      yamt 
    374      1.110     perry static inline int
    375       1.88       chs phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
    376       1.88       chs {
    377      1.121      yamt 
    378      1.121      yamt 	/*
    379      1.121      yamt 	 * we consider pool_item_header with smaller ph_page bigger.
    380      1.121      yamt 	 * (this unnatural ordering is for the benefit of pr_find_pagehead.)
    381      1.121      yamt 	 */
    382      1.121      yamt 
    383       1.88       chs 	if (a->ph_page < b->ph_page)
    384      1.121      yamt 		return (1);
    385      1.121      yamt 	else if (a->ph_page > b->ph_page)
    386       1.88       chs 		return (-1);
    387       1.88       chs 	else
    388       1.88       chs 		return (0);
    389       1.88       chs }
    390       1.88       chs 
    391       1.88       chs SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
    392       1.88       chs SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
    393       1.88       chs 
    394        1.3        pk /*
    395      1.121      yamt  * Return the pool page header based on item address.
    396        1.3        pk  */
    397      1.110     perry static inline struct pool_item_header *
    398      1.121      yamt pr_find_pagehead(struct pool *pp, void *v)
    399        1.3        pk {
    400       1.88       chs 	struct pool_item_header *ph, tmp;
    401        1.3        pk 
    402      1.121      yamt 	if ((pp->pr_roflags & PR_NOALIGN) != 0) {
    403      1.128  christos 		tmp.ph_page = (void *)(uintptr_t)v;
    404      1.121      yamt 		ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
    405      1.121      yamt 		if (ph == NULL) {
    406      1.121      yamt 			ph = SPLAY_ROOT(&pp->pr_phtree);
    407      1.121      yamt 			if (ph != NULL && phtree_compare(&tmp, ph) >= 0) {
    408      1.121      yamt 				ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph);
    409      1.121      yamt 			}
    410      1.121      yamt 			KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0);
    411      1.121      yamt 		}
    412      1.121      yamt 	} else {
    413      1.128  christos 		void *page =
    414      1.128  christos 		    (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask);
    415      1.121      yamt 
    416      1.121      yamt 		if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
    417      1.128  christos 			ph = (struct pool_item_header *)((char *)page + pp->pr_phoffset);
    418      1.121      yamt 		} else {
    419      1.121      yamt 			tmp.ph_page = page;
    420      1.121      yamt 			ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
    421      1.121      yamt 		}
    422      1.121      yamt 	}
    423        1.3        pk 
    424      1.121      yamt 	KASSERT(ph == NULL || ((pp->pr_roflags & PR_PHINPAGE) != 0) ||
    425      1.128  christos 	    ((char *)ph->ph_page <= (char *)v &&
    426      1.128  christos 	    (char *)v < (char *)ph->ph_page + pp->pr_alloc->pa_pagesz));
    427       1.88       chs 	return ph;
    428        1.3        pk }
    429        1.3        pk 
    430      1.101   thorpej static void
    431      1.101   thorpej pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq)
    432      1.101   thorpej {
    433      1.101   thorpej 	struct pool_item_header *ph;
    434      1.101   thorpej 
    435      1.101   thorpej 	while ((ph = LIST_FIRST(pq)) != NULL) {
    436      1.101   thorpej 		LIST_REMOVE(ph, ph_pagelist);
    437      1.101   thorpej 		pool_allocator_free(pp, ph->ph_page);
    438  1.128.2.2        ad 		if ((pp->pr_roflags & PR_PHINPAGE) == 0)
    439      1.101   thorpej 			pool_put(pp->pr_phpool, ph);
    440      1.101   thorpej 	}
    441      1.101   thorpej }
    442      1.101   thorpej 
    443        1.3        pk /*
    444        1.3        pk  * Remove a page from the pool.
    445        1.3        pk  */
    446      1.110     perry static inline void
    447       1.61       chs pr_rmpage(struct pool *pp, struct pool_item_header *ph,
    448       1.61       chs      struct pool_pagelist *pq)
    449        1.3        pk {
    450        1.3        pk 
    451  1.128.2.2        ad 	KASSERT(mutex_owned(&pp->pr_lock));
    452       1.91      yamt 
    453        1.3        pk 	/*
    454        1.7   thorpej 	 * If the page was idle, decrement the idle page count.
    455        1.3        pk 	 */
    456        1.6   thorpej 	if (ph->ph_nmissing == 0) {
    457        1.6   thorpej #ifdef DIAGNOSTIC
    458        1.6   thorpej 		if (pp->pr_nidle == 0)
    459        1.6   thorpej 			panic("pr_rmpage: nidle inconsistent");
    460       1.20   thorpej 		if (pp->pr_nitems < pp->pr_itemsperpage)
    461       1.20   thorpej 			panic("pr_rmpage: nitems inconsistent");
    462        1.6   thorpej #endif
    463        1.6   thorpej 		pp->pr_nidle--;
    464        1.6   thorpej 	}
    465        1.7   thorpej 
    466       1.20   thorpej 	pp->pr_nitems -= pp->pr_itemsperpage;
    467       1.20   thorpej 
    468        1.7   thorpej 	/*
    469      1.101   thorpej 	 * Unlink the page from the pool and queue it for release.
    470        1.7   thorpej 	 */
    471       1.88       chs 	LIST_REMOVE(ph, ph_pagelist);
    472       1.91      yamt 	if ((pp->pr_roflags & PR_PHINPAGE) == 0)
    473       1.91      yamt 		SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
    474      1.101   thorpej 	LIST_INSERT_HEAD(pq, ph, ph_pagelist);
    475      1.101   thorpej 
    476        1.7   thorpej 	pp->pr_npages--;
    477        1.7   thorpej 	pp->pr_npagefree++;
    478        1.6   thorpej 
    479       1.88       chs 	pool_update_curpage(pp);
    480        1.3        pk }
    481        1.3        pk 
    482      1.126   thorpej static bool
    483      1.117      yamt pa_starved_p(struct pool_allocator *pa)
    484      1.117      yamt {
    485      1.117      yamt 
    486      1.117      yamt 	if (pa->pa_backingmap != NULL) {
    487      1.117      yamt 		return vm_map_starved_p(pa->pa_backingmap);
    488      1.117      yamt 	}
    489      1.127   thorpej 	return false;
    490      1.117      yamt }
    491      1.117      yamt 
    492      1.117      yamt static int
    493      1.124      yamt pool_reclaim_callback(struct callback_entry *ce, void *obj, void *arg)
    494      1.117      yamt {
    495      1.117      yamt 	struct pool *pp = obj;
    496      1.117      yamt 	struct pool_allocator *pa = pp->pr_alloc;
    497      1.117      yamt 
    498      1.117      yamt 	KASSERT(&pp->pr_reclaimerentry == ce);
    499      1.117      yamt 	pool_reclaim(pp);
    500      1.117      yamt 	if (!pa_starved_p(pa)) {
    501      1.117      yamt 		return CALLBACK_CHAIN_ABORT;
    502      1.117      yamt 	}
    503      1.117      yamt 	return CALLBACK_CHAIN_CONTINUE;
    504      1.117      yamt }
    505      1.117      yamt 
    506      1.117      yamt static void
    507      1.117      yamt pool_reclaim_register(struct pool *pp)
    508      1.117      yamt {
    509      1.117      yamt 	struct vm_map *map = pp->pr_alloc->pa_backingmap;
    510      1.117      yamt 	int s;
    511      1.117      yamt 
    512      1.117      yamt 	if (map == NULL) {
    513      1.117      yamt 		return;
    514      1.117      yamt 	}
    515      1.117      yamt 
    516      1.117      yamt 	s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */
    517      1.117      yamt 	callback_register(&vm_map_to_kernel(map)->vmk_reclaim_callback,
    518      1.117      yamt 	    &pp->pr_reclaimerentry, pp, pool_reclaim_callback);
    519      1.117      yamt 	splx(s);
    520      1.117      yamt }
    521      1.117      yamt 
    522      1.117      yamt static void
    523      1.117      yamt pool_reclaim_unregister(struct pool *pp)
    524      1.117      yamt {
    525      1.117      yamt 	struct vm_map *map = pp->pr_alloc->pa_backingmap;
    526      1.117      yamt 	int s;
    527      1.117      yamt 
    528      1.117      yamt 	if (map == NULL) {
    529      1.117      yamt 		return;
    530      1.117      yamt 	}
    531      1.117      yamt 
    532      1.117      yamt 	s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */
    533      1.117      yamt 	callback_unregister(&vm_map_to_kernel(map)->vmk_reclaim_callback,
    534      1.117      yamt 	    &pp->pr_reclaimerentry);
    535      1.117      yamt 	splx(s);
    536      1.117      yamt }
    537      1.117      yamt 
    538      1.117      yamt static void
    539      1.117      yamt pa_reclaim_register(struct pool_allocator *pa)
    540      1.117      yamt {
    541      1.117      yamt 	struct vm_map *map = *pa->pa_backingmapptr;
    542      1.117      yamt 	struct pool *pp;
    543      1.117      yamt 
    544      1.117      yamt 	KASSERT(pa->pa_backingmap == NULL);
    545      1.117      yamt 	if (map == NULL) {
    546      1.117      yamt 		SLIST_INSERT_HEAD(&pa_deferinitq, pa, pa_q);
    547      1.117      yamt 		return;
    548      1.117      yamt 	}
    549      1.117      yamt 	pa->pa_backingmap = map;
    550      1.117      yamt 	TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
    551      1.117      yamt 		pool_reclaim_register(pp);
    552      1.117      yamt 	}
    553      1.117      yamt }
    554      1.117      yamt 
    555        1.3        pk /*
    556       1.94    simonb  * Initialize all the pools listed in the "pools" link set.
    557       1.94    simonb  */
    558       1.94    simonb void
    559      1.117      yamt pool_subsystem_init(void)
    560       1.94    simonb {
    561      1.117      yamt 	struct pool_allocator *pa;
    562       1.94    simonb 	__link_set_decl(pools, struct link_pool_init);
    563       1.94    simonb 	struct link_pool_init * const *pi;
    564       1.94    simonb 
    565  1.128.2.2        ad 	mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE);
    566  1.128.2.7        ad 	cv_init(&pool_busy, "poolbusy");
    567  1.128.2.2        ad 
    568       1.94    simonb 	__link_set_foreach(pi, pools)
    569       1.94    simonb 		pool_init((*pi)->pp, (*pi)->size, (*pi)->align,
    570       1.94    simonb 		    (*pi)->align_offset, (*pi)->flags, (*pi)->wchan,
    571  1.128.2.1        ad 		    (*pi)->palloc, (*pi)->ipl);
    572      1.117      yamt 
    573      1.117      yamt 	while ((pa = SLIST_FIRST(&pa_deferinitq)) != NULL) {
    574      1.117      yamt 		KASSERT(pa->pa_backingmapptr != NULL);
    575      1.117      yamt 		KASSERT(*pa->pa_backingmapptr != NULL);
    576      1.117      yamt 		SLIST_REMOVE_HEAD(&pa_deferinitq, pa_q);
    577      1.117      yamt 		pa_reclaim_register(pa);
    578      1.117      yamt 	}
    579  1.128.2.7        ad 
    580  1.128.2.7        ad 	pool_init(&cache_pool, sizeof(struct pool_cache), CACHE_LINE_SIZE,
    581  1.128.2.7        ad 	    0, 0, "pcache", &pool_allocator_nointr, IPL_NONE);
    582  1.128.2.7        ad 
    583  1.128.2.7        ad 	pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), CACHE_LINE_SIZE,
    584  1.128.2.7        ad 	    0, 0, "pcachecpu", &pool_allocator_nointr, IPL_NONE);
    585       1.94    simonb }
    586       1.94    simonb 
    587       1.94    simonb /*
    588        1.3        pk  * Initialize the given pool resource structure.
    589        1.3        pk  *
    590        1.3        pk  * We export this routine to allow other kernel parts to declare
    591        1.3        pk  * static pools that must be initialized before malloc() is available.
    592        1.3        pk  */
    593        1.3        pk void
    594       1.42   thorpej pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
    595  1.128.2.1        ad     const char *wchan, struct pool_allocator *palloc, int ipl)
    596        1.3        pk {
    597      1.116    simonb #ifdef DEBUG
    598      1.116    simonb 	struct pool *pp1;
    599      1.116    simonb #endif
    600       1.92     enami 	size_t trysize, phsize;
    601  1.128.2.2        ad 	int off, slack;
    602        1.3        pk 
    603       1.99      yamt 	KASSERT((1UL << (CHAR_BIT * sizeof(pool_item_freelist_t))) - 2 >=
    604       1.99      yamt 	    PHPOOL_FREELIST_NELEM(PHPOOL_MAX - 1));
    605       1.99      yamt 
    606      1.116    simonb #ifdef DEBUG
    607      1.116    simonb 	/*
    608      1.116    simonb 	 * Check that the pool hasn't already been initialised and
    609      1.116    simonb 	 * added to the list of all pools.
    610      1.116    simonb 	 */
    611      1.116    simonb 	LIST_FOREACH(pp1, &pool_head, pr_poollist) {
    612      1.116    simonb 		if (pp == pp1)
    613      1.116    simonb 			panic("pool_init: pool %s already initialised",
    614      1.116    simonb 			    wchan);
    615      1.116    simonb 	}
    616      1.116    simonb #endif
    617      1.116    simonb 
    618       1.25   thorpej #ifdef POOL_DIAGNOSTIC
    619       1.25   thorpej 	/*
    620       1.25   thorpej 	 * Always log if POOL_DIAGNOSTIC is defined.
    621       1.25   thorpej 	 */
    622       1.25   thorpej 	if (pool_logsize != 0)
    623       1.25   thorpej 		flags |= PR_LOGGING;
    624       1.25   thorpej #endif
    625       1.25   thorpej 
    626       1.66   thorpej 	if (palloc == NULL)
    627       1.66   thorpej 		palloc = &pool_allocator_kmem;
    628      1.112     bjh21 #ifdef POOL_SUBPAGE
    629      1.112     bjh21 	if (size > palloc->pa_pagesz) {
    630      1.112     bjh21 		if (palloc == &pool_allocator_kmem)
    631      1.112     bjh21 			palloc = &pool_allocator_kmem_fullpage;
    632      1.112     bjh21 		else if (palloc == &pool_allocator_nointr)
    633      1.112     bjh21 			palloc = &pool_allocator_nointr_fullpage;
    634      1.112     bjh21 	}
    635       1.66   thorpej #endif /* POOL_SUBPAGE */
    636       1.66   thorpej 	if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
    637      1.112     bjh21 		if (palloc->pa_pagesz == 0)
    638       1.66   thorpej 			palloc->pa_pagesz = PAGE_SIZE;
    639       1.66   thorpej 
    640       1.66   thorpej 		TAILQ_INIT(&palloc->pa_list);
    641       1.66   thorpej 
    642  1.128.2.7        ad 		mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM);
    643       1.66   thorpej 		palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
    644       1.66   thorpej 		palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
    645      1.117      yamt 
    646      1.117      yamt 		if (palloc->pa_backingmapptr != NULL) {
    647      1.117      yamt 			pa_reclaim_register(palloc);
    648      1.117      yamt 		}
    649       1.66   thorpej 		palloc->pa_flags |= PA_INITIALIZED;
    650        1.4   thorpej 	}
    651        1.3        pk 
    652        1.3        pk 	if (align == 0)
    653        1.3        pk 		align = ALIGN(1);
    654       1.14   thorpej 
    655      1.120      yamt 	if ((flags & PR_NOTOUCH) == 0 && size < sizeof(struct pool_item))
    656       1.14   thorpej 		size = sizeof(struct pool_item);
    657        1.3        pk 
    658       1.78   thorpej 	size = roundup(size, align);
    659       1.66   thorpej #ifdef DIAGNOSTIC
    660       1.66   thorpej 	if (size > palloc->pa_pagesz)
    661      1.121      yamt 		panic("pool_init: pool item size (%zu) too large", size);
    662       1.66   thorpej #endif
    663       1.35        pk 
    664        1.3        pk 	/*
    665        1.3        pk 	 * Initialize the pool structure.
    666        1.3        pk 	 */
    667       1.88       chs 	LIST_INIT(&pp->pr_emptypages);
    668       1.88       chs 	LIST_INIT(&pp->pr_fullpages);
    669       1.88       chs 	LIST_INIT(&pp->pr_partpages);
    670  1.128.2.7        ad 	pp->pr_cache = NULL;
    671        1.3        pk 	pp->pr_curpage = NULL;
    672        1.3        pk 	pp->pr_npages = 0;
    673        1.3        pk 	pp->pr_minitems = 0;
    674        1.3        pk 	pp->pr_minpages = 0;
    675        1.3        pk 	pp->pr_maxpages = UINT_MAX;
    676       1.20   thorpej 	pp->pr_roflags = flags;
    677       1.20   thorpej 	pp->pr_flags = 0;
    678       1.35        pk 	pp->pr_size = size;
    679        1.3        pk 	pp->pr_align = align;
    680        1.3        pk 	pp->pr_wchan = wchan;
    681       1.66   thorpej 	pp->pr_alloc = palloc;
    682       1.20   thorpej 	pp->pr_nitems = 0;
    683       1.20   thorpej 	pp->pr_nout = 0;
    684       1.20   thorpej 	pp->pr_hardlimit = UINT_MAX;
    685       1.20   thorpej 	pp->pr_hardlimit_warning = NULL;
    686       1.31   thorpej 	pp->pr_hardlimit_ratecap.tv_sec = 0;
    687       1.31   thorpej 	pp->pr_hardlimit_ratecap.tv_usec = 0;
    688       1.31   thorpej 	pp->pr_hardlimit_warning_last.tv_sec = 0;
    689       1.31   thorpej 	pp->pr_hardlimit_warning_last.tv_usec = 0;
    690       1.68   thorpej 	pp->pr_drain_hook = NULL;
    691       1.68   thorpej 	pp->pr_drain_hook_arg = NULL;
    692      1.125        ad 	pp->pr_freecheck = NULL;
    693        1.3        pk 
    694        1.3        pk 	/*
    695        1.3        pk 	 * Decide whether to put the page header off page to avoid
    696       1.92     enami 	 * wasting too large a part of the page or too big item.
    697       1.92     enami 	 * Off-page page headers go on a hash table, so we can match
    698       1.92     enami 	 * a returned item with its header based on the page address.
    699       1.92     enami 	 * We use 1/16 of the page size and about 8 times of the item
    700       1.92     enami 	 * size as the threshold (XXX: tune)
    701       1.92     enami 	 *
    702       1.92     enami 	 * However, we'll put the header into the page if we can put
    703       1.92     enami 	 * it without wasting any items.
    704       1.92     enami 	 *
    705       1.92     enami 	 * Silently enforce `0 <= ioff < align'.
    706        1.3        pk 	 */
    707       1.92     enami 	pp->pr_itemoffset = ioff %= align;
    708       1.92     enami 	/* See the comment below about reserved bytes. */
    709       1.92     enami 	trysize = palloc->pa_pagesz - ((align - ioff) % align);
    710       1.92     enami 	phsize = ALIGN(sizeof(struct pool_item_header));
    711      1.121      yamt 	if ((pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) == 0 &&
    712       1.97      yamt 	    (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
    713       1.97      yamt 	    trysize / pp->pr_size == (trysize - phsize) / pp->pr_size)) {
    714        1.3        pk 		/* Use the end of the page for the page header */
    715       1.20   thorpej 		pp->pr_roflags |= PR_PHINPAGE;
    716       1.92     enami 		pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
    717        1.2        pk 	} else {
    718        1.3        pk 		/* The page header will be taken from our page header pool */
    719        1.3        pk 		pp->pr_phoffset = 0;
    720       1.66   thorpej 		off = palloc->pa_pagesz;
    721       1.88       chs 		SPLAY_INIT(&pp->pr_phtree);
    722        1.2        pk 	}
    723        1.1        pk 
    724        1.3        pk 	/*
    725        1.3        pk 	 * Alignment is to take place at `ioff' within the item. This means
    726        1.3        pk 	 * we must reserve up to `align - 1' bytes on the page to allow
    727        1.3        pk 	 * appropriate positioning of each item.
    728        1.3        pk 	 */
    729        1.3        pk 	pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
    730       1.43   thorpej 	KASSERT(pp->pr_itemsperpage != 0);
    731       1.97      yamt 	if ((pp->pr_roflags & PR_NOTOUCH)) {
    732       1.97      yamt 		int idx;
    733       1.97      yamt 
    734       1.97      yamt 		for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
    735       1.97      yamt 		    idx++) {
    736       1.97      yamt 			/* nothing */
    737       1.97      yamt 		}
    738       1.97      yamt 		if (idx >= PHPOOL_MAX) {
    739       1.97      yamt 			/*
    740       1.97      yamt 			 * if you see this panic, consider to tweak
    741       1.97      yamt 			 * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
    742       1.97      yamt 			 */
    743       1.97      yamt 			panic("%s: too large itemsperpage(%d) for PR_NOTOUCH",
    744       1.97      yamt 			    pp->pr_wchan, pp->pr_itemsperpage);
    745       1.97      yamt 		}
    746       1.97      yamt 		pp->pr_phpool = &phpool[idx];
    747       1.97      yamt 	} else if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
    748       1.97      yamt 		pp->pr_phpool = &phpool[0];
    749       1.97      yamt 	}
    750       1.97      yamt #if defined(DIAGNOSTIC)
    751       1.97      yamt 	else {
    752       1.97      yamt 		pp->pr_phpool = NULL;
    753       1.97      yamt 	}
    754       1.97      yamt #endif
    755        1.3        pk 
    756        1.3        pk 	/*
    757        1.3        pk 	 * Use the slack between the chunks and the page header
    758        1.3        pk 	 * for "cache coloring".
    759        1.3        pk 	 */
    760        1.3        pk 	slack = off - pp->pr_itemsperpage * pp->pr_size;
    761        1.3        pk 	pp->pr_maxcolor = (slack / align) * align;
    762        1.3        pk 	pp->pr_curcolor = 0;
    763        1.3        pk 
    764        1.3        pk 	pp->pr_nget = 0;
    765        1.3        pk 	pp->pr_nfail = 0;
    766        1.3        pk 	pp->pr_nput = 0;
    767        1.3        pk 	pp->pr_npagealloc = 0;
    768        1.3        pk 	pp->pr_npagefree = 0;
    769        1.1        pk 	pp->pr_hiwat = 0;
    770        1.8   thorpej 	pp->pr_nidle = 0;
    771  1.128.2.7        ad 	pp->pr_refcnt = 0;
    772        1.3        pk 
    773       1.59   thorpej #ifdef POOL_DIAGNOSTIC
    774       1.25   thorpej 	if (flags & PR_LOGGING) {
    775       1.25   thorpej 		if (kmem_map == NULL ||
    776       1.25   thorpej 		    (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
    777       1.25   thorpej 		     M_TEMP, M_NOWAIT)) == NULL)
    778       1.20   thorpej 			pp->pr_roflags &= ~PR_LOGGING;
    779        1.3        pk 		pp->pr_curlogentry = 0;
    780        1.3        pk 		pp->pr_logsize = pool_logsize;
    781        1.3        pk 	}
    782       1.59   thorpej #endif
    783       1.25   thorpej 
    784       1.25   thorpej 	pp->pr_entered_file = NULL;
    785       1.25   thorpej 	pp->pr_entered_line = 0;
    786        1.3        pk 
    787  1.128.2.7        ad 	mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl);
    788  1.128.2.2        ad 	cv_init(&pp->pr_cv, wchan);
    789  1.128.2.2        ad 	pp->pr_ipl = ipl;
    790  1.128.2.2        ad 
    791        1.3        pk 	/*
    792       1.43   thorpej 	 * Initialize private page header pool and cache magazine pool if we
    793       1.43   thorpej 	 * haven't done so yet.
    794       1.23   thorpej 	 * XXX LOCKING.
    795        1.3        pk 	 */
    796       1.97      yamt 	if (phpool[0].pr_size == 0) {
    797       1.97      yamt 		int idx;
    798       1.97      yamt 		for (idx = 0; idx < PHPOOL_MAX; idx++) {
    799       1.97      yamt 			static char phpool_names[PHPOOL_MAX][6+1+6+1];
    800       1.97      yamt 			int nelem;
    801       1.97      yamt 			size_t sz;
    802       1.97      yamt 
    803       1.97      yamt 			nelem = PHPOOL_FREELIST_NELEM(idx);
    804       1.97      yamt 			snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
    805       1.97      yamt 			    "phpool-%d", nelem);
    806       1.97      yamt 			sz = sizeof(struct pool_item_header);
    807       1.97      yamt 			if (nelem) {
    808       1.97      yamt 				sz = PR_FREELIST_ALIGN(sz)
    809       1.99      yamt 				    + nelem * sizeof(pool_item_freelist_t);
    810       1.97      yamt 			}
    811       1.97      yamt 			pool_init(&phpool[idx], sz, 0, 0, 0,
    812  1.128.2.1        ad 			    phpool_names[idx], &pool_allocator_meta, IPL_VM);
    813       1.97      yamt 		}
    814       1.62     bjh21 #ifdef POOL_SUBPAGE
    815       1.62     bjh21 		pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
    816  1.128.2.1        ad 		    PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM);
    817       1.62     bjh21 #endif
    818  1.128.2.7        ad 		pool_init(&pcgpool, sizeof(pcg_t), CACHE_LINE_SIZE, 0, 0,
    819  1.128.2.7        ad 		    "cachegrp", &pool_allocator_meta, IPL_VM);
    820        1.1        pk 	}
    821        1.1        pk 
    822  1.128.2.2        ad 	if (__predict_true(!cold)) {
    823  1.128.2.2        ad 		/* Insert into the list of all pools. */
    824  1.128.2.2        ad 		mutex_enter(&pool_head_lock);
    825  1.128.2.2        ad 		LIST_INSERT_HEAD(&pool_head, pp, pr_poollist);
    826  1.128.2.2        ad 		mutex_exit(&pool_head_lock);
    827  1.128.2.2        ad 
    828  1.128.2.2        ad 		/* Insert this into the list of pools using this allocator. */
    829  1.128.2.2        ad 		mutex_enter(&palloc->pa_lock);
    830  1.128.2.2        ad 		TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
    831  1.128.2.2        ad 		mutex_exit(&palloc->pa_lock);
    832  1.128.2.2        ad 	} else {
    833  1.128.2.2        ad 		LIST_INSERT_HEAD(&pool_head, pp, pr_poollist);
    834  1.128.2.2        ad 		TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
    835  1.128.2.2        ad 	}
    836  1.128.2.2        ad 
    837      1.117      yamt 	pool_reclaim_register(pp);
    838        1.1        pk }
    839        1.1        pk 
    840        1.1        pk /*
    841        1.1        pk  * De-commision a pool resource.
    842        1.1        pk  */
    843        1.1        pk void
    844       1.42   thorpej pool_destroy(struct pool *pp)
    845        1.1        pk {
    846      1.101   thorpej 	struct pool_pagelist pq;
    847        1.3        pk 	struct pool_item_header *ph;
    848       1.43   thorpej 
    849      1.101   thorpej 	/* Remove from global pool list */
    850  1.128.2.2        ad 	mutex_enter(&pool_head_lock);
    851  1.128.2.7        ad 	while (pp->pr_refcnt != 0)
    852  1.128.2.7        ad 		cv_wait(&pool_busy, &pool_head_lock);
    853      1.102       chs 	LIST_REMOVE(pp, pr_poollist);
    854      1.101   thorpej 	if (drainpp == pp)
    855      1.101   thorpej 		drainpp = NULL;
    856  1.128.2.2        ad 	mutex_exit(&pool_head_lock);
    857      1.101   thorpej 
    858      1.101   thorpej 	/* Remove this pool from its allocator's list of pools. */
    859      1.117      yamt 	pool_reclaim_unregister(pp);
    860  1.128.2.2        ad 	mutex_enter(&pp->pr_alloc->pa_lock);
    861       1.66   thorpej 	TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
    862  1.128.2.2        ad 	mutex_exit(&pp->pr_alloc->pa_lock);
    863       1.66   thorpej 
    864  1.128.2.2        ad 	mutex_enter(&pp->pr_lock);
    865      1.101   thorpej 
    866  1.128.2.7        ad 	KASSERT(pp->pr_cache == NULL);
    867        1.3        pk 
    868        1.3        pk #ifdef DIAGNOSTIC
    869       1.20   thorpej 	if (pp->pr_nout != 0) {
    870       1.25   thorpej 		pr_printlog(pp, NULL, printf);
    871       1.80    provos 		panic("pool_destroy: pool busy: still out: %u",
    872       1.20   thorpej 		    pp->pr_nout);
    873        1.3        pk 	}
    874        1.3        pk #endif
    875        1.1        pk 
    876      1.101   thorpej 	KASSERT(LIST_EMPTY(&pp->pr_fullpages));
    877      1.101   thorpej 	KASSERT(LIST_EMPTY(&pp->pr_partpages));
    878      1.101   thorpej 
    879        1.3        pk 	/* Remove all pages */
    880      1.101   thorpej 	LIST_INIT(&pq);
    881       1.88       chs 	while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
    882      1.101   thorpej 		pr_rmpage(pp, ph, &pq);
    883      1.101   thorpej 
    884  1.128.2.2        ad 	mutex_exit(&pp->pr_lock);
    885        1.3        pk 
    886      1.101   thorpej 	pr_pagelist_free(pp, &pq);
    887        1.3        pk 
    888       1.59   thorpej #ifdef POOL_DIAGNOSTIC
    889       1.20   thorpej 	if ((pp->pr_roflags & PR_LOGGING) != 0)
    890        1.3        pk 		free(pp->pr_log, M_TEMP);
    891       1.59   thorpej #endif
    892  1.128.2.2        ad 
    893  1.128.2.2        ad 	cv_destroy(&pp->pr_cv);
    894  1.128.2.2        ad 	mutex_destroy(&pp->pr_lock);
    895        1.1        pk }
    896        1.1        pk 
    897       1.68   thorpej void
    898       1.68   thorpej pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
    899       1.68   thorpej {
    900       1.68   thorpej 
    901       1.68   thorpej 	/* XXX no locking -- must be used just after pool_init() */
    902       1.68   thorpej #ifdef DIAGNOSTIC
    903       1.68   thorpej 	if (pp->pr_drain_hook != NULL)
    904       1.68   thorpej 		panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
    905       1.68   thorpej #endif
    906       1.68   thorpej 	pp->pr_drain_hook = fn;
    907       1.68   thorpej 	pp->pr_drain_hook_arg = arg;
    908       1.68   thorpej }
    909       1.68   thorpej 
    910       1.88       chs static struct pool_item_header *
    911      1.128  christos pool_alloc_item_header(struct pool *pp, void *storage, int flags)
    912       1.55   thorpej {
    913       1.55   thorpej 	struct pool_item_header *ph;
    914       1.55   thorpej 
    915       1.55   thorpej 	if ((pp->pr_roflags & PR_PHINPAGE) != 0)
    916      1.128  christos 		ph = (struct pool_item_header *) ((char *)storage + pp->pr_phoffset);
    917  1.128.2.2        ad 	else
    918       1.97      yamt 		ph = pool_get(pp->pr_phpool, flags);
    919       1.55   thorpej 
    920       1.55   thorpej 	return (ph);
    921       1.55   thorpej }
    922        1.1        pk 
    923        1.1        pk /*
    924        1.3        pk  * Grab an item from the pool; must be called at appropriate spl level
    925        1.1        pk  */
    926        1.3        pk void *
    927       1.59   thorpej #ifdef POOL_DIAGNOSTIC
    928       1.42   thorpej _pool_get(struct pool *pp, int flags, const char *file, long line)
    929       1.56  sommerfe #else
    930       1.56  sommerfe pool_get(struct pool *pp, int flags)
    931       1.56  sommerfe #endif
    932        1.1        pk {
    933        1.1        pk 	struct pool_item *pi;
    934        1.3        pk 	struct pool_item_header *ph;
    935       1.55   thorpej 	void *v;
    936        1.1        pk 
    937        1.2        pk #ifdef DIAGNOSTIC
    938       1.95    atatat 	if (__predict_false(pp->pr_itemsperpage == 0))
    939       1.95    atatat 		panic("pool_get: pool %p: pr_itemsperpage is zero, "
    940       1.95    atatat 		    "pool not initialized?", pp);
    941       1.84   thorpej 	if (__predict_false(curlwp == NULL && doing_shutdown == 0 &&
    942       1.37  sommerfe 			    (flags & PR_WAITOK) != 0))
    943       1.77      matt 		panic("pool_get: %s: must have NOWAIT", pp->pr_wchan);
    944       1.58   thorpej 
    945      1.102       chs #endif /* DIAGNOSTIC */
    946       1.58   thorpej #ifdef LOCKDEBUG
    947       1.58   thorpej 	if (flags & PR_WAITOK)
    948      1.119      yamt 		ASSERT_SLEEPABLE(NULL, "pool_get(PR_WAITOK)");
    949       1.56  sommerfe #endif
    950        1.1        pk 
    951  1.128.2.2        ad 	mutex_enter(&pp->pr_lock);
    952       1.25   thorpej 	pr_enter(pp, file, line);
    953       1.20   thorpej 
    954       1.20   thorpej  startover:
    955       1.20   thorpej 	/*
    956       1.20   thorpej 	 * Check to see if we've reached the hard limit.  If we have,
    957       1.20   thorpej 	 * and we can wait, then wait until an item has been returned to
    958       1.20   thorpej 	 * the pool.
    959       1.20   thorpej 	 */
    960       1.20   thorpej #ifdef DIAGNOSTIC
    961       1.34   thorpej 	if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
    962       1.25   thorpej 		pr_leave(pp);
    963  1.128.2.2        ad 		mutex_exit(&pp->pr_lock);
    964       1.20   thorpej 		panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
    965       1.20   thorpej 	}
    966       1.20   thorpej #endif
    967       1.34   thorpej 	if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
    968       1.68   thorpej 		if (pp->pr_drain_hook != NULL) {
    969       1.68   thorpej 			/*
    970       1.68   thorpej 			 * Since the drain hook is going to free things
    971       1.68   thorpej 			 * back to the pool, unlock, call the hook, re-lock,
    972       1.68   thorpej 			 * and check the hardlimit condition again.
    973       1.68   thorpej 			 */
    974       1.68   thorpej 			pr_leave(pp);
    975  1.128.2.2        ad 			mutex_exit(&pp->pr_lock);
    976       1.68   thorpej 			(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
    977  1.128.2.2        ad 			mutex_enter(&pp->pr_lock);
    978       1.68   thorpej 			pr_enter(pp, file, line);
    979       1.68   thorpej 			if (pp->pr_nout < pp->pr_hardlimit)
    980       1.68   thorpej 				goto startover;
    981       1.68   thorpej 		}
    982       1.68   thorpej 
    983       1.29  sommerfe 		if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
    984       1.20   thorpej 			/*
    985       1.20   thorpej 			 * XXX: A warning isn't logged in this case.  Should
    986       1.20   thorpej 			 * it be?
    987       1.20   thorpej 			 */
    988       1.20   thorpej 			pp->pr_flags |= PR_WANTED;
    989       1.25   thorpej 			pr_leave(pp);
    990  1.128.2.2        ad 			cv_wait(&pp->pr_cv, &pp->pr_lock);
    991       1.25   thorpej 			pr_enter(pp, file, line);
    992       1.20   thorpej 			goto startover;
    993       1.20   thorpej 		}
    994       1.31   thorpej 
    995       1.31   thorpej 		/*
    996       1.31   thorpej 		 * Log a message that the hard limit has been hit.
    997       1.31   thorpej 		 */
    998       1.31   thorpej 		if (pp->pr_hardlimit_warning != NULL &&
    999       1.31   thorpej 		    ratecheck(&pp->pr_hardlimit_warning_last,
   1000       1.31   thorpej 			      &pp->pr_hardlimit_ratecap))
   1001       1.31   thorpej 			log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
   1002       1.21   thorpej 
   1003       1.21   thorpej 		pp->pr_nfail++;
   1004       1.21   thorpej 
   1005       1.25   thorpej 		pr_leave(pp);
   1006  1.128.2.2        ad 		mutex_exit(&pp->pr_lock);
   1007       1.20   thorpej 		return (NULL);
   1008       1.20   thorpej 	}
   1009       1.20   thorpej 
   1010        1.3        pk 	/*
   1011        1.3        pk 	 * The convention we use is that if `curpage' is not NULL, then
   1012        1.3        pk 	 * it points at a non-empty bucket. In particular, `curpage'
   1013        1.3        pk 	 * never points at a page header which has PR_PHINPAGE set and
   1014        1.3        pk 	 * has no items in its bucket.
   1015        1.3        pk 	 */
   1016       1.20   thorpej 	if ((ph = pp->pr_curpage) == NULL) {
   1017      1.113      yamt 		int error;
   1018      1.113      yamt 
   1019       1.20   thorpej #ifdef DIAGNOSTIC
   1020       1.20   thorpej 		if (pp->pr_nitems != 0) {
   1021  1.128.2.2        ad 			mutex_exit(&pp->pr_lock);
   1022       1.20   thorpej 			printf("pool_get: %s: curpage NULL, nitems %u\n",
   1023       1.20   thorpej 			    pp->pr_wchan, pp->pr_nitems);
   1024       1.80    provos 			panic("pool_get: nitems inconsistent");
   1025       1.20   thorpej 		}
   1026       1.20   thorpej #endif
   1027       1.20   thorpej 
   1028       1.21   thorpej 		/*
   1029       1.21   thorpej 		 * Call the back-end page allocator for more memory.
   1030       1.21   thorpej 		 * Release the pool lock, as the back-end page allocator
   1031       1.21   thorpej 		 * may block.
   1032       1.21   thorpej 		 */
   1033       1.25   thorpej 		pr_leave(pp);
   1034      1.113      yamt 		error = pool_grow(pp, flags);
   1035      1.113      yamt 		pr_enter(pp, file, line);
   1036      1.113      yamt 		if (error != 0) {
   1037       1.21   thorpej 			/*
   1038       1.55   thorpej 			 * We were unable to allocate a page or item
   1039       1.55   thorpej 			 * header, but we released the lock during
   1040       1.55   thorpej 			 * allocation, so perhaps items were freed
   1041       1.55   thorpej 			 * back to the pool.  Check for this case.
   1042       1.21   thorpej 			 */
   1043       1.21   thorpej 			if (pp->pr_curpage != NULL)
   1044       1.21   thorpej 				goto startover;
   1045       1.15        pk 
   1046      1.117      yamt 			pp->pr_nfail++;
   1047       1.25   thorpej 			pr_leave(pp);
   1048  1.128.2.2        ad 			mutex_exit(&pp->pr_lock);
   1049      1.117      yamt 			return (NULL);
   1050        1.1        pk 		}
   1051        1.3        pk 
   1052       1.20   thorpej 		/* Start the allocation process over. */
   1053       1.20   thorpej 		goto startover;
   1054        1.3        pk 	}
   1055       1.97      yamt 	if (pp->pr_roflags & PR_NOTOUCH) {
   1056       1.97      yamt #ifdef DIAGNOSTIC
   1057       1.97      yamt 		if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) {
   1058       1.97      yamt 			pr_leave(pp);
   1059  1.128.2.2        ad 			mutex_exit(&pp->pr_lock);
   1060       1.97      yamt 			panic("pool_get: %s: page empty", pp->pr_wchan);
   1061       1.97      yamt 		}
   1062       1.97      yamt #endif
   1063       1.97      yamt 		v = pr_item_notouch_get(pp, ph);
   1064       1.97      yamt #ifdef POOL_DIAGNOSTIC
   1065       1.97      yamt 		pr_log(pp, v, PRLOG_GET, file, line);
   1066       1.97      yamt #endif
   1067       1.97      yamt 	} else {
   1068      1.102       chs 		v = pi = LIST_FIRST(&ph->ph_itemlist);
   1069       1.97      yamt 		if (__predict_false(v == NULL)) {
   1070       1.97      yamt 			pr_leave(pp);
   1071  1.128.2.2        ad 			mutex_exit(&pp->pr_lock);
   1072       1.97      yamt 			panic("pool_get: %s: page empty", pp->pr_wchan);
   1073       1.97      yamt 		}
   1074       1.20   thorpej #ifdef DIAGNOSTIC
   1075       1.97      yamt 		if (__predict_false(pp->pr_nitems == 0)) {
   1076       1.97      yamt 			pr_leave(pp);
   1077  1.128.2.2        ad 			mutex_exit(&pp->pr_lock);
   1078       1.97      yamt 			printf("pool_get: %s: items on itemlist, nitems %u\n",
   1079       1.97      yamt 			    pp->pr_wchan, pp->pr_nitems);
   1080       1.97      yamt 			panic("pool_get: nitems inconsistent");
   1081       1.97      yamt 		}
   1082       1.65     enami #endif
   1083       1.56  sommerfe 
   1084       1.65     enami #ifdef POOL_DIAGNOSTIC
   1085       1.97      yamt 		pr_log(pp, v, PRLOG_GET, file, line);
   1086       1.65     enami #endif
   1087        1.3        pk 
   1088       1.65     enami #ifdef DIAGNOSTIC
   1089       1.97      yamt 		if (__predict_false(pi->pi_magic != PI_MAGIC)) {
   1090       1.97      yamt 			pr_printlog(pp, pi, printf);
   1091       1.97      yamt 			panic("pool_get(%s): free list modified: "
   1092       1.97      yamt 			    "magic=%x; page %p; item addr %p\n",
   1093       1.97      yamt 			    pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
   1094       1.97      yamt 		}
   1095        1.3        pk #endif
   1096        1.3        pk 
   1097       1.97      yamt 		/*
   1098       1.97      yamt 		 * Remove from item list.
   1099       1.97      yamt 		 */
   1100      1.102       chs 		LIST_REMOVE(pi, pi_list);
   1101       1.97      yamt 	}
   1102       1.20   thorpej 	pp->pr_nitems--;
   1103       1.20   thorpej 	pp->pr_nout++;
   1104        1.6   thorpej 	if (ph->ph_nmissing == 0) {
   1105        1.6   thorpej #ifdef DIAGNOSTIC
   1106       1.34   thorpej 		if (__predict_false(pp->pr_nidle == 0))
   1107        1.6   thorpej 			panic("pool_get: nidle inconsistent");
   1108        1.6   thorpej #endif
   1109        1.6   thorpej 		pp->pr_nidle--;
   1110       1.88       chs 
   1111       1.88       chs 		/*
   1112       1.88       chs 		 * This page was previously empty.  Move it to the list of
   1113       1.88       chs 		 * partially-full pages.  This page is already curpage.
   1114       1.88       chs 		 */
   1115       1.88       chs 		LIST_REMOVE(ph, ph_pagelist);
   1116       1.88       chs 		LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
   1117        1.6   thorpej 	}
   1118        1.3        pk 	ph->ph_nmissing++;
   1119       1.97      yamt 	if (ph->ph_nmissing == pp->pr_itemsperpage) {
   1120       1.21   thorpej #ifdef DIAGNOSTIC
   1121       1.97      yamt 		if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 &&
   1122      1.102       chs 		    !LIST_EMPTY(&ph->ph_itemlist))) {
   1123       1.25   thorpej 			pr_leave(pp);
   1124  1.128.2.2        ad 			mutex_exit(&pp->pr_lock);
   1125       1.21   thorpej 			panic("pool_get: %s: nmissing inconsistent",
   1126       1.21   thorpej 			    pp->pr_wchan);
   1127       1.21   thorpej 		}
   1128       1.21   thorpej #endif
   1129        1.3        pk 		/*
   1130       1.88       chs 		 * This page is now full.  Move it to the full list
   1131       1.88       chs 		 * and select a new current page.
   1132        1.3        pk 		 */
   1133       1.88       chs 		LIST_REMOVE(ph, ph_pagelist);
   1134       1.88       chs 		LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
   1135       1.88       chs 		pool_update_curpage(pp);
   1136        1.1        pk 	}
   1137        1.3        pk 
   1138        1.3        pk 	pp->pr_nget++;
   1139      1.111  christos 	pr_leave(pp);
   1140       1.20   thorpej 
   1141       1.20   thorpej 	/*
   1142       1.20   thorpej 	 * If we have a low water mark and we are now below that low
   1143       1.20   thorpej 	 * water mark, add more items to the pool.
   1144       1.20   thorpej 	 */
   1145       1.53   thorpej 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
   1146       1.20   thorpej 		/*
   1147       1.20   thorpej 		 * XXX: Should we log a warning?  Should we set up a timeout
   1148       1.20   thorpej 		 * to try again in a second or so?  The latter could break
   1149       1.20   thorpej 		 * a caller's assumptions about interrupt protection, etc.
   1150       1.20   thorpej 		 */
   1151       1.20   thorpej 	}
   1152       1.20   thorpej 
   1153  1.128.2.2        ad 	mutex_exit(&pp->pr_lock);
   1154      1.125        ad 	KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0);
   1155      1.125        ad 	FREECHECK_OUT(&pp->pr_freecheck, v);
   1156        1.1        pk 	return (v);
   1157        1.1        pk }
   1158        1.1        pk 
   1159        1.1        pk /*
   1160       1.43   thorpej  * Internal version of pool_put().  Pool is already locked/entered.
   1161        1.1        pk  */
   1162       1.43   thorpej static void
   1163      1.101   thorpej pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
   1164        1.1        pk {
   1165        1.1        pk 	struct pool_item *pi = v;
   1166        1.3        pk 	struct pool_item_header *ph;
   1167        1.3        pk 
   1168  1.128.2.2        ad 	KASSERT(mutex_owned(&pp->pr_lock));
   1169      1.125        ad 	FREECHECK_IN(&pp->pr_freecheck, v);
   1170  1.128.2.5        ad 	LOCKDEBUG_MEM_CHECK(v, pp->pr_size);
   1171       1.61       chs 
   1172       1.30   thorpej #ifdef DIAGNOSTIC
   1173       1.34   thorpej 	if (__predict_false(pp->pr_nout == 0)) {
   1174       1.30   thorpej 		printf("pool %s: putting with none out\n",
   1175       1.30   thorpej 		    pp->pr_wchan);
   1176       1.30   thorpej 		panic("pool_put");
   1177       1.30   thorpej 	}
   1178       1.30   thorpej #endif
   1179        1.3        pk 
   1180      1.121      yamt 	if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) {
   1181       1.25   thorpej 		pr_printlog(pp, NULL, printf);
   1182        1.3        pk 		panic("pool_put: %s: page header missing", pp->pr_wchan);
   1183        1.3        pk 	}
   1184       1.28   thorpej 
   1185        1.3        pk 	/*
   1186        1.3        pk 	 * Return to item list.
   1187        1.3        pk 	 */
   1188       1.97      yamt 	if (pp->pr_roflags & PR_NOTOUCH) {
   1189       1.97      yamt 		pr_item_notouch_put(pp, ph, v);
   1190       1.97      yamt 	} else {
   1191        1.2        pk #ifdef DIAGNOSTIC
   1192       1.97      yamt 		pi->pi_magic = PI_MAGIC;
   1193        1.3        pk #endif
   1194       1.32       chs #ifdef DEBUG
   1195       1.97      yamt 		{
   1196       1.97      yamt 			int i, *ip = v;
   1197       1.32       chs 
   1198       1.97      yamt 			for (i = 0; i < pp->pr_size / sizeof(int); i++) {
   1199       1.97      yamt 				*ip++ = PI_MAGIC;
   1200       1.97      yamt 			}
   1201       1.32       chs 		}
   1202       1.32       chs #endif
   1203       1.32       chs 
   1204      1.102       chs 		LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
   1205       1.97      yamt 	}
   1206       1.79   thorpej 	KDASSERT(ph->ph_nmissing != 0);
   1207        1.3        pk 	ph->ph_nmissing--;
   1208        1.3        pk 	pp->pr_nput++;
   1209       1.20   thorpej 	pp->pr_nitems++;
   1210       1.20   thorpej 	pp->pr_nout--;
   1211        1.3        pk 
   1212        1.3        pk 	/* Cancel "pool empty" condition if it exists */
   1213        1.3        pk 	if (pp->pr_curpage == NULL)
   1214        1.3        pk 		pp->pr_curpage = ph;
   1215        1.3        pk 
   1216        1.3        pk 	if (pp->pr_flags & PR_WANTED) {
   1217        1.3        pk 		pp->pr_flags &= ~PR_WANTED;
   1218       1.15        pk 		if (ph->ph_nmissing == 0)
   1219       1.15        pk 			pp->pr_nidle++;
   1220  1.128.2.4        ad 		cv_broadcast(&pp->pr_cv);
   1221        1.3        pk 		return;
   1222        1.3        pk 	}
   1223        1.3        pk 
   1224        1.3        pk 	/*
   1225       1.88       chs 	 * If this page is now empty, do one of two things:
   1226       1.21   thorpej 	 *
   1227       1.88       chs 	 *	(1) If we have more pages than the page high water mark,
   1228       1.96   thorpej 	 *	    free the page back to the system.  ONLY CONSIDER
   1229       1.90   thorpej 	 *	    FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
   1230       1.90   thorpej 	 *	    CLAIM.
   1231       1.21   thorpej 	 *
   1232       1.88       chs 	 *	(2) Otherwise, move the page to the empty page list.
   1233       1.88       chs 	 *
   1234       1.88       chs 	 * Either way, select a new current page (so we use a partially-full
   1235       1.88       chs 	 * page if one is available).
   1236        1.3        pk 	 */
   1237        1.3        pk 	if (ph->ph_nmissing == 0) {
   1238        1.6   thorpej 		pp->pr_nidle++;
   1239       1.90   thorpej 		if (pp->pr_npages > pp->pr_minpages &&
   1240       1.90   thorpej 		    (pp->pr_npages > pp->pr_maxpages ||
   1241      1.117      yamt 		     pa_starved_p(pp->pr_alloc))) {
   1242      1.101   thorpej 			pr_rmpage(pp, ph, pq);
   1243        1.3        pk 		} else {
   1244       1.88       chs 			LIST_REMOVE(ph, ph_pagelist);
   1245       1.88       chs 			LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
   1246        1.3        pk 
   1247       1.21   thorpej 			/*
   1248       1.21   thorpej 			 * Update the timestamp on the page.  A page must
   1249       1.21   thorpej 			 * be idle for some period of time before it can
   1250       1.21   thorpej 			 * be reclaimed by the pagedaemon.  This minimizes
   1251       1.21   thorpej 			 * ping-pong'ing for memory.
   1252       1.21   thorpej 			 */
   1253      1.118    kardel 			getmicrotime(&ph->ph_time);
   1254        1.1        pk 		}
   1255       1.88       chs 		pool_update_curpage(pp);
   1256        1.1        pk 	}
   1257       1.88       chs 
   1258       1.21   thorpej 	/*
   1259       1.88       chs 	 * If the page was previously completely full, move it to the
   1260       1.88       chs 	 * partially-full list and make it the current page.  The next
   1261       1.88       chs 	 * allocation will get the item from this page, instead of
   1262       1.88       chs 	 * further fragmenting the pool.
   1263       1.21   thorpej 	 */
   1264       1.21   thorpej 	else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
   1265       1.88       chs 		LIST_REMOVE(ph, ph_pagelist);
   1266       1.88       chs 		LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
   1267       1.21   thorpej 		pp->pr_curpage = ph;
   1268       1.21   thorpej 	}
   1269       1.43   thorpej }
   1270       1.43   thorpej 
   1271       1.43   thorpej /*
   1272       1.43   thorpej  * Return resource to the pool; must be called at appropriate spl level
   1273       1.43   thorpej  */
   1274       1.59   thorpej #ifdef POOL_DIAGNOSTIC
   1275       1.43   thorpej void
   1276       1.43   thorpej _pool_put(struct pool *pp, void *v, const char *file, long line)
   1277       1.43   thorpej {
   1278      1.101   thorpej 	struct pool_pagelist pq;
   1279      1.101   thorpej 
   1280      1.101   thorpej 	LIST_INIT(&pq);
   1281       1.43   thorpej 
   1282  1.128.2.2        ad 	mutex_enter(&pp->pr_lock);
   1283       1.43   thorpej 	pr_enter(pp, file, line);
   1284       1.43   thorpej 
   1285       1.56  sommerfe 	pr_log(pp, v, PRLOG_PUT, file, line);
   1286       1.56  sommerfe 
   1287      1.101   thorpej 	pool_do_put(pp, v, &pq);
   1288       1.21   thorpej 
   1289       1.25   thorpej 	pr_leave(pp);
   1290  1.128.2.2        ad 	mutex_exit(&pp->pr_lock);
   1291      1.101   thorpej 
   1292      1.102       chs 	pr_pagelist_free(pp, &pq);
   1293        1.1        pk }
   1294       1.57  sommerfe #undef pool_put
   1295       1.59   thorpej #endif /* POOL_DIAGNOSTIC */
   1296        1.1        pk 
   1297       1.56  sommerfe void
   1298       1.56  sommerfe pool_put(struct pool *pp, void *v)
   1299       1.56  sommerfe {
   1300      1.101   thorpej 	struct pool_pagelist pq;
   1301      1.101   thorpej 
   1302      1.101   thorpej 	LIST_INIT(&pq);
   1303       1.56  sommerfe 
   1304  1.128.2.2        ad 	mutex_enter(&pp->pr_lock);
   1305      1.101   thorpej 	pool_do_put(pp, v, &pq);
   1306  1.128.2.2        ad 	mutex_exit(&pp->pr_lock);
   1307       1.56  sommerfe 
   1308      1.102       chs 	pr_pagelist_free(pp, &pq);
   1309       1.56  sommerfe }
   1310       1.57  sommerfe 
   1311       1.59   thorpej #ifdef POOL_DIAGNOSTIC
   1312       1.57  sommerfe #define		pool_put(h, v)	_pool_put((h), (v), __FILE__, __LINE__)
   1313       1.56  sommerfe #endif
   1314       1.74   thorpej 
   1315       1.74   thorpej /*
   1316      1.113      yamt  * pool_grow: grow a pool by a page.
   1317      1.113      yamt  *
   1318      1.113      yamt  * => called with pool locked.
   1319      1.113      yamt  * => unlock and relock the pool.
   1320      1.113      yamt  * => return with pool locked.
   1321      1.113      yamt  */
   1322      1.113      yamt 
   1323      1.113      yamt static int
   1324      1.113      yamt pool_grow(struct pool *pp, int flags)
   1325      1.113      yamt {
   1326      1.113      yamt 	struct pool_item_header *ph = NULL;
   1327      1.113      yamt 	char *cp;
   1328      1.113      yamt 
   1329  1.128.2.2        ad 	mutex_exit(&pp->pr_lock);
   1330      1.113      yamt 	cp = pool_allocator_alloc(pp, flags);
   1331      1.113      yamt 	if (__predict_true(cp != NULL)) {
   1332      1.113      yamt 		ph = pool_alloc_item_header(pp, cp, flags);
   1333      1.113      yamt 	}
   1334      1.113      yamt 	if (__predict_false(cp == NULL || ph == NULL)) {
   1335      1.113      yamt 		if (cp != NULL) {
   1336      1.113      yamt 			pool_allocator_free(pp, cp);
   1337      1.113      yamt 		}
   1338  1.128.2.2        ad 		mutex_enter(&pp->pr_lock);
   1339      1.113      yamt 		return ENOMEM;
   1340      1.113      yamt 	}
   1341      1.113      yamt 
   1342  1.128.2.2        ad 	mutex_enter(&pp->pr_lock);
   1343      1.113      yamt 	pool_prime_page(pp, cp, ph);
   1344      1.113      yamt 	pp->pr_npagealloc++;
   1345      1.113      yamt 	return 0;
   1346      1.113      yamt }
   1347      1.113      yamt 
   1348      1.113      yamt /*
   1349       1.74   thorpej  * Add N items to the pool.
   1350       1.74   thorpej  */
   1351       1.74   thorpej int
   1352       1.74   thorpej pool_prime(struct pool *pp, int n)
   1353       1.74   thorpej {
   1354       1.75    simonb 	int newpages;
   1355      1.113      yamt 	int error = 0;
   1356       1.74   thorpej 
   1357  1.128.2.2        ad 	mutex_enter(&pp->pr_lock);
   1358       1.74   thorpej 
   1359       1.74   thorpej 	newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1360       1.74   thorpej 
   1361       1.74   thorpej 	while (newpages-- > 0) {
   1362      1.113      yamt 		error = pool_grow(pp, PR_NOWAIT);
   1363      1.113      yamt 		if (error) {
   1364       1.74   thorpej 			break;
   1365       1.74   thorpej 		}
   1366       1.74   thorpej 		pp->pr_minpages++;
   1367       1.74   thorpej 	}
   1368       1.74   thorpej 
   1369       1.74   thorpej 	if (pp->pr_minpages >= pp->pr_maxpages)
   1370       1.74   thorpej 		pp->pr_maxpages = pp->pr_minpages + 1;	/* XXX */
   1371       1.74   thorpej 
   1372  1.128.2.2        ad 	mutex_exit(&pp->pr_lock);
   1373      1.113      yamt 	return error;
   1374       1.74   thorpej }
   1375       1.55   thorpej 
   1376       1.55   thorpej /*
   1377        1.3        pk  * Add a page worth of items to the pool.
   1378       1.21   thorpej  *
   1379       1.21   thorpej  * Note, we must be called with the pool descriptor LOCKED.
   1380        1.3        pk  */
   1381       1.55   thorpej static void
   1382      1.128  christos pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph)
   1383        1.3        pk {
   1384        1.3        pk 	struct pool_item *pi;
   1385      1.128  christos 	void *cp = storage;
   1386      1.125        ad 	const unsigned int align = pp->pr_align;
   1387      1.125        ad 	const unsigned int ioff = pp->pr_itemoffset;
   1388       1.55   thorpej 	int n;
   1389       1.36        pk 
   1390  1.128.2.2        ad 	KASSERT(mutex_owned(&pp->pr_lock));
   1391       1.91      yamt 
   1392       1.66   thorpej #ifdef DIAGNOSTIC
   1393      1.121      yamt 	if ((pp->pr_roflags & PR_NOALIGN) == 0 &&
   1394      1.121      yamt 	    ((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
   1395       1.36        pk 		panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
   1396       1.66   thorpej #endif
   1397        1.3        pk 
   1398        1.3        pk 	/*
   1399        1.3        pk 	 * Insert page header.
   1400        1.3        pk 	 */
   1401       1.88       chs 	LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
   1402      1.102       chs 	LIST_INIT(&ph->ph_itemlist);
   1403        1.3        pk 	ph->ph_page = storage;
   1404        1.3        pk 	ph->ph_nmissing = 0;
   1405      1.118    kardel 	getmicrotime(&ph->ph_time);
   1406       1.88       chs 	if ((pp->pr_roflags & PR_PHINPAGE) == 0)
   1407       1.88       chs 		SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
   1408        1.3        pk 
   1409        1.6   thorpej 	pp->pr_nidle++;
   1410        1.6   thorpej 
   1411        1.3        pk 	/*
   1412        1.3        pk 	 * Color this page.
   1413        1.3        pk 	 */
   1414      1.128  christos 	cp = (char *)cp + pp->pr_curcolor;
   1415        1.3        pk 	if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
   1416        1.3        pk 		pp->pr_curcolor = 0;
   1417        1.3        pk 
   1418        1.3        pk 	/*
   1419        1.3        pk 	 * Adjust storage to apply aligment to `pr_itemoffset' in each item.
   1420        1.3        pk 	 */
   1421        1.3        pk 	if (ioff != 0)
   1422      1.128  christos 		cp = (char *)cp + align - ioff;
   1423        1.3        pk 
   1424      1.125        ad 	KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
   1425      1.125        ad 
   1426        1.3        pk 	/*
   1427        1.3        pk 	 * Insert remaining chunks on the bucket list.
   1428        1.3        pk 	 */
   1429        1.3        pk 	n = pp->pr_itemsperpage;
   1430       1.20   thorpej 	pp->pr_nitems += n;
   1431        1.3        pk 
   1432       1.97      yamt 	if (pp->pr_roflags & PR_NOTOUCH) {
   1433       1.99      yamt 		pool_item_freelist_t *freelist = PR_FREELIST(ph);
   1434       1.97      yamt 		int i;
   1435       1.97      yamt 
   1436      1.128  christos 		ph->ph_off = (char *)cp - (char *)storage;
   1437       1.97      yamt 		ph->ph_firstfree = 0;
   1438       1.97      yamt 		for (i = 0; i < n - 1; i++)
   1439       1.97      yamt 			freelist[i] = i + 1;
   1440       1.97      yamt 		freelist[n - 1] = PR_INDEX_EOL;
   1441       1.97      yamt 	} else {
   1442       1.97      yamt 		while (n--) {
   1443       1.97      yamt 			pi = (struct pool_item *)cp;
   1444       1.78   thorpej 
   1445       1.97      yamt 			KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
   1446        1.3        pk 
   1447       1.97      yamt 			/* Insert on page list */
   1448      1.102       chs 			LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
   1449        1.3        pk #ifdef DIAGNOSTIC
   1450       1.97      yamt 			pi->pi_magic = PI_MAGIC;
   1451        1.3        pk #endif
   1452      1.128  christos 			cp = (char *)cp + pp->pr_size;
   1453      1.125        ad 
   1454      1.125        ad 			KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
   1455       1.97      yamt 		}
   1456        1.3        pk 	}
   1457        1.3        pk 
   1458        1.3        pk 	/*
   1459        1.3        pk 	 * If the pool was depleted, point at the new page.
   1460        1.3        pk 	 */
   1461        1.3        pk 	if (pp->pr_curpage == NULL)
   1462        1.3        pk 		pp->pr_curpage = ph;
   1463        1.3        pk 
   1464        1.3        pk 	if (++pp->pr_npages > pp->pr_hiwat)
   1465        1.3        pk 		pp->pr_hiwat = pp->pr_npages;
   1466        1.3        pk }
   1467        1.3        pk 
   1468       1.20   thorpej /*
   1469       1.52   thorpej  * Used by pool_get() when nitems drops below the low water mark.  This
   1470       1.88       chs  * is used to catch up pr_nitems with the low water mark.
   1471       1.20   thorpej  *
   1472       1.21   thorpej  * Note 1, we never wait for memory here, we let the caller decide what to do.
   1473       1.20   thorpej  *
   1474       1.73   thorpej  * Note 2, we must be called with the pool already locked, and we return
   1475       1.20   thorpej  * with it locked.
   1476       1.20   thorpej  */
   1477       1.20   thorpej static int
   1478       1.42   thorpej pool_catchup(struct pool *pp)
   1479       1.20   thorpej {
   1480       1.20   thorpej 	int error = 0;
   1481       1.20   thorpej 
   1482       1.54   thorpej 	while (POOL_NEEDS_CATCHUP(pp)) {
   1483      1.113      yamt 		error = pool_grow(pp, PR_NOWAIT);
   1484      1.113      yamt 		if (error) {
   1485       1.20   thorpej 			break;
   1486       1.20   thorpej 		}
   1487       1.20   thorpej 	}
   1488      1.113      yamt 	return error;
   1489       1.20   thorpej }
   1490       1.20   thorpej 
   1491       1.88       chs static void
   1492       1.88       chs pool_update_curpage(struct pool *pp)
   1493       1.88       chs {
   1494       1.88       chs 
   1495       1.88       chs 	pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
   1496       1.88       chs 	if (pp->pr_curpage == NULL) {
   1497       1.88       chs 		pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
   1498       1.88       chs 	}
   1499       1.88       chs }
   1500       1.88       chs 
   1501        1.3        pk void
   1502       1.42   thorpej pool_setlowat(struct pool *pp, int n)
   1503        1.3        pk {
   1504       1.15        pk 
   1505  1.128.2.2        ad 	mutex_enter(&pp->pr_lock);
   1506       1.21   thorpej 
   1507        1.3        pk 	pp->pr_minitems = n;
   1508       1.15        pk 	pp->pr_minpages = (n == 0)
   1509       1.15        pk 		? 0
   1510       1.18   thorpej 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1511       1.20   thorpej 
   1512       1.20   thorpej 	/* Make sure we're caught up with the newly-set low water mark. */
   1513       1.75    simonb 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
   1514       1.20   thorpej 		/*
   1515       1.20   thorpej 		 * XXX: Should we log a warning?  Should we set up a timeout
   1516       1.20   thorpej 		 * to try again in a second or so?  The latter could break
   1517       1.20   thorpej 		 * a caller's assumptions about interrupt protection, etc.
   1518       1.20   thorpej 		 */
   1519       1.20   thorpej 	}
   1520       1.21   thorpej 
   1521  1.128.2.2        ad 	mutex_exit(&pp->pr_lock);
   1522        1.3        pk }
   1523        1.3        pk 
   1524        1.3        pk void
   1525       1.42   thorpej pool_sethiwat(struct pool *pp, int n)
   1526        1.3        pk {
   1527       1.15        pk 
   1528  1.128.2.2        ad 	mutex_enter(&pp->pr_lock);
   1529       1.21   thorpej 
   1530       1.15        pk 	pp->pr_maxpages = (n == 0)
   1531       1.15        pk 		? 0
   1532       1.18   thorpej 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1533       1.21   thorpej 
   1534  1.128.2.2        ad 	mutex_exit(&pp->pr_lock);
   1535        1.3        pk }
   1536        1.3        pk 
   1537       1.20   thorpej void
   1538       1.42   thorpej pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
   1539       1.20   thorpej {
   1540       1.20   thorpej 
   1541  1.128.2.2        ad 	mutex_enter(&pp->pr_lock);
   1542       1.20   thorpej 
   1543       1.20   thorpej 	pp->pr_hardlimit = n;
   1544       1.20   thorpej 	pp->pr_hardlimit_warning = warnmess;
   1545       1.31   thorpej 	pp->pr_hardlimit_ratecap.tv_sec = ratecap;
   1546       1.31   thorpej 	pp->pr_hardlimit_warning_last.tv_sec = 0;
   1547       1.31   thorpej 	pp->pr_hardlimit_warning_last.tv_usec = 0;
   1548       1.20   thorpej 
   1549       1.20   thorpej 	/*
   1550       1.21   thorpej 	 * In-line version of pool_sethiwat(), because we don't want to
   1551       1.21   thorpej 	 * release the lock.
   1552       1.20   thorpej 	 */
   1553       1.20   thorpej 	pp->pr_maxpages = (n == 0)
   1554       1.20   thorpej 		? 0
   1555       1.20   thorpej 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1556       1.21   thorpej 
   1557  1.128.2.2        ad 	mutex_exit(&pp->pr_lock);
   1558       1.20   thorpej }
   1559        1.3        pk 
   1560        1.3        pk /*
   1561        1.3        pk  * Release all complete pages that have not been used recently.
   1562        1.3        pk  */
   1563       1.66   thorpej int
   1564       1.59   thorpej #ifdef POOL_DIAGNOSTIC
   1565       1.42   thorpej _pool_reclaim(struct pool *pp, const char *file, long line)
   1566       1.56  sommerfe #else
   1567       1.56  sommerfe pool_reclaim(struct pool *pp)
   1568       1.56  sommerfe #endif
   1569        1.3        pk {
   1570        1.3        pk 	struct pool_item_header *ph, *phnext;
   1571       1.61       chs 	struct pool_pagelist pq;
   1572      1.102       chs 	struct timeval curtime, diff;
   1573        1.3        pk 
   1574       1.68   thorpej 	if (pp->pr_drain_hook != NULL) {
   1575       1.68   thorpej 		/*
   1576       1.68   thorpej 		 * The drain hook must be called with the pool unlocked.
   1577       1.68   thorpej 		 */
   1578       1.68   thorpej 		(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
   1579       1.68   thorpej 	}
   1580       1.68   thorpej 
   1581  1.128.2.2        ad 	if (mutex_tryenter(&pp->pr_lock) == 0)
   1582       1.66   thorpej 		return (0);
   1583       1.25   thorpej 	pr_enter(pp, file, line);
   1584       1.68   thorpej 
   1585       1.88       chs 	LIST_INIT(&pq);
   1586        1.3        pk 
   1587       1.43   thorpej 	/*
   1588       1.43   thorpej 	 * Reclaim items from the pool's caches.
   1589       1.43   thorpej 	 */
   1590  1.128.2.7        ad 	if (pp->pr_cache != NULL)
   1591  1.128.2.7        ad 		pool_cache_invalidate(pp->pr_cache);
   1592       1.43   thorpej 
   1593      1.118    kardel 	getmicrotime(&curtime);
   1594       1.21   thorpej 
   1595       1.88       chs 	for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
   1596       1.88       chs 		phnext = LIST_NEXT(ph, ph_pagelist);
   1597        1.3        pk 
   1598        1.3        pk 		/* Check our minimum page claim */
   1599        1.3        pk 		if (pp->pr_npages <= pp->pr_minpages)
   1600        1.3        pk 			break;
   1601        1.3        pk 
   1602       1.88       chs 		KASSERT(ph->ph_nmissing == 0);
   1603       1.88       chs 		timersub(&curtime, &ph->ph_time, &diff);
   1604      1.117      yamt 		if (diff.tv_sec < pool_inactive_time
   1605      1.117      yamt 		    && !pa_starved_p(pp->pr_alloc))
   1606       1.88       chs 			continue;
   1607       1.21   thorpej 
   1608       1.88       chs 		/*
   1609       1.88       chs 		 * If freeing this page would put us below
   1610       1.88       chs 		 * the low water mark, stop now.
   1611       1.88       chs 		 */
   1612       1.88       chs 		if ((pp->pr_nitems - pp->pr_itemsperpage) <
   1613       1.88       chs 		    pp->pr_minitems)
   1614       1.88       chs 			break;
   1615       1.21   thorpej 
   1616       1.88       chs 		pr_rmpage(pp, ph, &pq);
   1617        1.3        pk 	}
   1618        1.3        pk 
   1619       1.25   thorpej 	pr_leave(pp);
   1620  1.128.2.2        ad 	mutex_exit(&pp->pr_lock);
   1621  1.128.2.7        ad 	if (LIST_EMPTY(&pq))
   1622      1.102       chs 		return 0;
   1623       1.66   thorpej 
   1624      1.101   thorpej 	pr_pagelist_free(pp, &pq);
   1625  1.128.2.7        ad 
   1626       1.66   thorpej 	return (1);
   1627        1.3        pk }
   1628        1.3        pk 
   1629        1.3        pk /*
   1630        1.3        pk  * Drain pools, one at a time.
   1631       1.21   thorpej  *
   1632       1.21   thorpej  * Note, we must never be called from an interrupt context.
   1633        1.3        pk  */
   1634        1.3        pk void
   1635      1.124      yamt pool_drain(void *arg)
   1636        1.3        pk {
   1637        1.3        pk 	struct pool *pp;
   1638        1.3        pk 
   1639       1.61       chs 	pp = NULL;
   1640  1.128.2.7        ad 
   1641  1.128.2.7        ad 	/* Find next pool to drain, and add a reference. */
   1642  1.128.2.2        ad 	mutex_enter(&pool_head_lock);
   1643       1.61       chs 	if (drainpp == NULL) {
   1644      1.102       chs 		drainpp = LIST_FIRST(&pool_head);
   1645       1.61       chs 	}
   1646  1.128.2.7        ad 	if (drainpp != NULL) {
   1647       1.61       chs 		pp = drainpp;
   1648      1.102       chs 		drainpp = LIST_NEXT(pp, pr_poollist);
   1649       1.61       chs 	}
   1650  1.128.2.7        ad 	if (pp != NULL)
   1651  1.128.2.7        ad 		pp->pr_refcnt++;
   1652  1.128.2.2        ad 	mutex_exit(&pool_head_lock);
   1653  1.128.2.7        ad 
   1654  1.128.2.7        ad 	/* If we have a candidate, drain it and unlock. */
   1655  1.128.2.7        ad 	if (pp != NULL) {
   1656      1.115  christos 		pool_reclaim(pp);
   1657  1.128.2.7        ad 		mutex_enter(&pool_head_lock);
   1658  1.128.2.7        ad 		pp->pr_refcnt--;
   1659  1.128.2.7        ad 		cv_broadcast(&pool_busy);
   1660  1.128.2.7        ad 		mutex_exit(&pool_head_lock);
   1661  1.128.2.7        ad 	}
   1662        1.3        pk }
   1663        1.3        pk 
   1664        1.3        pk /*
   1665        1.3        pk  * Diagnostic helpers.
   1666        1.3        pk  */
   1667        1.3        pk void
   1668       1.42   thorpej pool_print(struct pool *pp, const char *modif)
   1669       1.21   thorpej {
   1670       1.21   thorpej 
   1671       1.25   thorpej 	pool_print1(pp, modif, printf);
   1672       1.21   thorpej }
   1673       1.21   thorpej 
   1674       1.25   thorpej void
   1675      1.108      yamt pool_printall(const char *modif, void (*pr)(const char *, ...))
   1676      1.108      yamt {
   1677      1.108      yamt 	struct pool *pp;
   1678      1.108      yamt 
   1679      1.108      yamt 	LIST_FOREACH(pp, &pool_head, pr_poollist) {
   1680      1.108      yamt 		pool_printit(pp, modif, pr);
   1681      1.108      yamt 	}
   1682      1.108      yamt }
   1683      1.108      yamt 
   1684      1.108      yamt void
   1685       1.42   thorpej pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
   1686       1.25   thorpej {
   1687       1.25   thorpej 
   1688       1.25   thorpej 	if (pp == NULL) {
   1689       1.25   thorpej 		(*pr)("Must specify a pool to print.\n");
   1690       1.25   thorpej 		return;
   1691       1.25   thorpej 	}
   1692       1.25   thorpej 
   1693       1.25   thorpej 	pool_print1(pp, modif, pr);
   1694       1.25   thorpej }
   1695       1.25   thorpej 
   1696       1.21   thorpej static void
   1697      1.124      yamt pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
   1698       1.97      yamt     void (*pr)(const char *, ...))
   1699       1.88       chs {
   1700       1.88       chs 	struct pool_item_header *ph;
   1701       1.88       chs #ifdef DIAGNOSTIC
   1702       1.88       chs 	struct pool_item *pi;
   1703       1.88       chs #endif
   1704       1.88       chs 
   1705       1.88       chs 	LIST_FOREACH(ph, pl, ph_pagelist) {
   1706       1.88       chs 		(*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
   1707       1.88       chs 		    ph->ph_page, ph->ph_nmissing,
   1708       1.88       chs 		    (u_long)ph->ph_time.tv_sec,
   1709       1.88       chs 		    (u_long)ph->ph_time.tv_usec);
   1710       1.88       chs #ifdef DIAGNOSTIC
   1711       1.97      yamt 		if (!(pp->pr_roflags & PR_NOTOUCH)) {
   1712      1.102       chs 			LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
   1713       1.97      yamt 				if (pi->pi_magic != PI_MAGIC) {
   1714       1.97      yamt 					(*pr)("\t\t\titem %p, magic 0x%x\n",
   1715       1.97      yamt 					    pi, pi->pi_magic);
   1716       1.97      yamt 				}
   1717       1.88       chs 			}
   1718       1.88       chs 		}
   1719       1.88       chs #endif
   1720       1.88       chs 	}
   1721       1.88       chs }
   1722       1.88       chs 
   1723       1.88       chs static void
   1724       1.42   thorpej pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
   1725        1.3        pk {
   1726       1.25   thorpej 	struct pool_item_header *ph;
   1727  1.128.2.7        ad 	pool_cache_t pc;
   1728  1.128.2.7        ad 	pcg_t *pcg;
   1729  1.128.2.7        ad 	pool_cache_cpu_t *cc;
   1730  1.128.2.7        ad 	uint64_t cpuhit, cpumiss;
   1731       1.44   thorpej 	int i, print_log = 0, print_pagelist = 0, print_cache = 0;
   1732       1.25   thorpej 	char c;
   1733       1.25   thorpej 
   1734       1.25   thorpej 	while ((c = *modif++) != '\0') {
   1735       1.25   thorpej 		if (c == 'l')
   1736       1.25   thorpej 			print_log = 1;
   1737       1.25   thorpej 		if (c == 'p')
   1738       1.25   thorpej 			print_pagelist = 1;
   1739       1.44   thorpej 		if (c == 'c')
   1740       1.44   thorpej 			print_cache = 1;
   1741       1.25   thorpej 	}
   1742       1.25   thorpej 
   1743  1.128.2.7        ad 	if ((pc = pp->pr_cache) != NULL) {
   1744  1.128.2.7        ad 		(*pr)("POOL CACHE");
   1745  1.128.2.7        ad 	} else {
   1746  1.128.2.7        ad 		(*pr)("POOL");
   1747  1.128.2.7        ad 	}
   1748  1.128.2.7        ad 
   1749  1.128.2.7        ad 	(*pr)(" %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
   1750       1.25   thorpej 	    pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
   1751       1.25   thorpej 	    pp->pr_roflags);
   1752       1.66   thorpej 	(*pr)("\talloc %p\n", pp->pr_alloc);
   1753       1.25   thorpej 	(*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
   1754       1.25   thorpej 	    pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
   1755       1.25   thorpej 	(*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
   1756       1.25   thorpej 	    pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
   1757       1.25   thorpej 
   1758  1.128.2.7        ad 	(*pr)("\tnget %lu, nfail %lu, nput %lu\n",
   1759       1.25   thorpej 	    pp->pr_nget, pp->pr_nfail, pp->pr_nput);
   1760       1.25   thorpej 	(*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
   1761       1.25   thorpej 	    pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
   1762       1.25   thorpej 
   1763       1.25   thorpej 	if (print_pagelist == 0)
   1764       1.25   thorpej 		goto skip_pagelist;
   1765       1.25   thorpej 
   1766       1.88       chs 	if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
   1767       1.88       chs 		(*pr)("\n\tempty page list:\n");
   1768       1.97      yamt 	pool_print_pagelist(pp, &pp->pr_emptypages, pr);
   1769       1.88       chs 	if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
   1770       1.88       chs 		(*pr)("\n\tfull page list:\n");
   1771       1.97      yamt 	pool_print_pagelist(pp, &pp->pr_fullpages, pr);
   1772       1.88       chs 	if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
   1773       1.88       chs 		(*pr)("\n\tpartial-page list:\n");
   1774       1.97      yamt 	pool_print_pagelist(pp, &pp->pr_partpages, pr);
   1775       1.88       chs 
   1776       1.25   thorpej 	if (pp->pr_curpage == NULL)
   1777       1.25   thorpej 		(*pr)("\tno current page\n");
   1778       1.25   thorpej 	else
   1779       1.25   thorpej 		(*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
   1780       1.25   thorpej 
   1781       1.25   thorpej  skip_pagelist:
   1782       1.25   thorpej 	if (print_log == 0)
   1783       1.25   thorpej 		goto skip_log;
   1784       1.25   thorpej 
   1785       1.25   thorpej 	(*pr)("\n");
   1786       1.25   thorpej 	if ((pp->pr_roflags & PR_LOGGING) == 0)
   1787       1.25   thorpej 		(*pr)("\tno log\n");
   1788      1.122  christos 	else {
   1789       1.25   thorpej 		pr_printlog(pp, NULL, pr);
   1790      1.122  christos 	}
   1791        1.3        pk 
   1792       1.25   thorpej  skip_log:
   1793       1.44   thorpej 
   1794      1.102       chs #define PR_GROUPLIST(pcg)						\
   1795      1.102       chs 	(*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);		\
   1796      1.102       chs 	for (i = 0; i < PCG_NOBJECTS; i++) {				\
   1797      1.102       chs 		if (pcg->pcg_objects[i].pcgo_pa !=			\
   1798      1.102       chs 		    POOL_PADDR_INVALID) {				\
   1799      1.102       chs 			(*pr)("\t\t\t%p, 0x%llx\n",			\
   1800      1.102       chs 			    pcg->pcg_objects[i].pcgo_va,		\
   1801      1.102       chs 			    (unsigned long long)			\
   1802      1.102       chs 			    pcg->pcg_objects[i].pcgo_pa);		\
   1803      1.102       chs 		} else {						\
   1804      1.102       chs 			(*pr)("\t\t\t%p\n",				\
   1805      1.102       chs 			    pcg->pcg_objects[i].pcgo_va);		\
   1806      1.102       chs 		}							\
   1807      1.102       chs 	}
   1808      1.102       chs 
   1809  1.128.2.7        ad 	if (pc != NULL) {
   1810  1.128.2.7        ad 		cpuhit = 0;
   1811  1.128.2.7        ad 		cpumiss = 0;
   1812  1.128.2.7        ad 		for (i = 0; i < MAXCPUS; i++) {
   1813  1.128.2.7        ad 			if ((cc = pc->pc_cpus[i]) == NULL)
   1814  1.128.2.7        ad 				continue;
   1815  1.128.2.7        ad 			cpuhit += cc->cc_hits;
   1816  1.128.2.7        ad 			cpumiss += cc->cc_misses;
   1817  1.128.2.7        ad 		}
   1818  1.128.2.7        ad 		(*pr)("\tcpu layer hits %llu misses %llu\n", cpuhit, cpumiss);
   1819  1.128.2.7        ad 		(*pr)("\tcache layer hits %llu misses %llu\n",
   1820  1.128.2.7        ad 		    pc->pc_hits, pc->pc_misses);
   1821  1.128.2.7        ad 		(*pr)("\tcache layer entry uncontended %llu contended %llu\n",
   1822  1.128.2.7        ad 		    pc->pc_hits + pc->pc_misses - pc->pc_contended,
   1823  1.128.2.7        ad 		    pc->pc_contended);
   1824  1.128.2.7        ad 		(*pr)("\tcache layer empty groups %u full groups %u\n",
   1825  1.128.2.7        ad 		    pc->pc_nempty, pc->pc_nfull);
   1826  1.128.2.7        ad 		if (print_cache) {
   1827  1.128.2.7        ad 			(*pr)("\tfull cache groups:\n");
   1828  1.128.2.7        ad 			for (pcg = pc->pc_fullgroups; pcg != NULL;
   1829  1.128.2.7        ad 			    pcg = pcg->pcg_next) {
   1830  1.128.2.7        ad 				PR_GROUPLIST(pcg);
   1831  1.128.2.7        ad 			}
   1832  1.128.2.7        ad 			(*pr)("\tempty cache groups:\n");
   1833  1.128.2.7        ad 			for (pcg = pc->pc_emptygroups; pcg != NULL;
   1834  1.128.2.7        ad 			    pcg = pcg->pcg_next) {
   1835  1.128.2.7        ad 				PR_GROUPLIST(pcg);
   1836  1.128.2.7        ad 			}
   1837      1.103       chs 		}
   1838       1.44   thorpej 	}
   1839      1.102       chs #undef PR_GROUPLIST
   1840       1.44   thorpej 
   1841       1.88       chs 	pr_enter_check(pp, pr);
   1842       1.88       chs }
   1843       1.88       chs 
   1844       1.88       chs static int
   1845       1.88       chs pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
   1846       1.88       chs {
   1847       1.88       chs 	struct pool_item *pi;
   1848      1.128  christos 	void *page;
   1849       1.88       chs 	int n;
   1850       1.88       chs 
   1851      1.121      yamt 	if ((pp->pr_roflags & PR_NOALIGN) == 0) {
   1852      1.128  christos 		page = (void *)((uintptr_t)ph & pp->pr_alloc->pa_pagemask);
   1853      1.121      yamt 		if (page != ph->ph_page &&
   1854      1.121      yamt 		    (pp->pr_roflags & PR_PHINPAGE) != 0) {
   1855      1.121      yamt 			if (label != NULL)
   1856      1.121      yamt 				printf("%s: ", label);
   1857      1.121      yamt 			printf("pool(%p:%s): page inconsistency: page %p;"
   1858      1.121      yamt 			       " at page head addr %p (p %p)\n", pp,
   1859      1.121      yamt 				pp->pr_wchan, ph->ph_page,
   1860      1.121      yamt 				ph, page);
   1861      1.121      yamt 			return 1;
   1862      1.121      yamt 		}
   1863       1.88       chs 	}
   1864        1.3        pk 
   1865       1.97      yamt 	if ((pp->pr_roflags & PR_NOTOUCH) != 0)
   1866       1.97      yamt 		return 0;
   1867       1.97      yamt 
   1868      1.102       chs 	for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0;
   1869       1.88       chs 	     pi != NULL;
   1870      1.102       chs 	     pi = LIST_NEXT(pi,pi_list), n++) {
   1871       1.88       chs 
   1872       1.88       chs #ifdef DIAGNOSTIC
   1873       1.88       chs 		if (pi->pi_magic != PI_MAGIC) {
   1874       1.88       chs 			if (label != NULL)
   1875       1.88       chs 				printf("%s: ", label);
   1876       1.88       chs 			printf("pool(%s): free list modified: magic=%x;"
   1877      1.121      yamt 			       " page %p; item ordinal %d; addr %p\n",
   1878       1.88       chs 				pp->pr_wchan, pi->pi_magic, ph->ph_page,
   1879      1.121      yamt 				n, pi);
   1880       1.88       chs 			panic("pool");
   1881       1.88       chs 		}
   1882       1.88       chs #endif
   1883      1.121      yamt 		if ((pp->pr_roflags & PR_NOALIGN) != 0) {
   1884      1.121      yamt 			continue;
   1885      1.121      yamt 		}
   1886      1.128  christos 		page = (void *)((uintptr_t)pi & pp->pr_alloc->pa_pagemask);
   1887       1.88       chs 		if (page == ph->ph_page)
   1888       1.88       chs 			continue;
   1889       1.88       chs 
   1890       1.88       chs 		if (label != NULL)
   1891       1.88       chs 			printf("%s: ", label);
   1892       1.88       chs 		printf("pool(%p:%s): page inconsistency: page %p;"
   1893       1.88       chs 		       " item ordinal %d; addr %p (p %p)\n", pp,
   1894       1.88       chs 			pp->pr_wchan, ph->ph_page,
   1895       1.88       chs 			n, pi, page);
   1896       1.88       chs 		return 1;
   1897       1.88       chs 	}
   1898       1.88       chs 	return 0;
   1899        1.3        pk }
   1900        1.3        pk 
   1901       1.88       chs 
   1902        1.3        pk int
   1903       1.42   thorpej pool_chk(struct pool *pp, const char *label)
   1904        1.3        pk {
   1905        1.3        pk 	struct pool_item_header *ph;
   1906        1.3        pk 	int r = 0;
   1907        1.3        pk 
   1908  1.128.2.2        ad 	mutex_enter(&pp->pr_lock);
   1909       1.88       chs 	LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
   1910       1.88       chs 		r = pool_chk_page(pp, label, ph);
   1911       1.88       chs 		if (r) {
   1912       1.88       chs 			goto out;
   1913       1.88       chs 		}
   1914       1.88       chs 	}
   1915       1.88       chs 	LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
   1916       1.88       chs 		r = pool_chk_page(pp, label, ph);
   1917       1.88       chs 		if (r) {
   1918        1.3        pk 			goto out;
   1919        1.3        pk 		}
   1920       1.88       chs 	}
   1921       1.88       chs 	LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
   1922       1.88       chs 		r = pool_chk_page(pp, label, ph);
   1923       1.88       chs 		if (r) {
   1924        1.3        pk 			goto out;
   1925        1.3        pk 		}
   1926        1.3        pk 	}
   1927       1.88       chs 
   1928        1.3        pk out:
   1929  1.128.2.2        ad 	mutex_exit(&pp->pr_lock);
   1930        1.3        pk 	return (r);
   1931       1.43   thorpej }
   1932       1.43   thorpej 
   1933       1.43   thorpej /*
   1934       1.43   thorpej  * pool_cache_init:
   1935       1.43   thorpej  *
   1936       1.43   thorpej  *	Initialize a pool cache.
   1937  1.128.2.7        ad  */
   1938  1.128.2.7        ad pool_cache_t
   1939  1.128.2.7        ad pool_cache_init(size_t size, u_int align, u_int align_offset, u_int flags,
   1940  1.128.2.7        ad     const char *wchan, struct pool_allocator *palloc, int ipl,
   1941  1.128.2.7        ad     int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), void *arg)
   1942  1.128.2.7        ad {
   1943  1.128.2.7        ad 	pool_cache_t pc;
   1944  1.128.2.7        ad 
   1945  1.128.2.7        ad 	pc = pool_get(&cache_pool, PR_WAITOK);
   1946  1.128.2.7        ad 	if (pc == NULL)
   1947  1.128.2.7        ad 		return NULL;
   1948  1.128.2.7        ad 
   1949  1.128.2.7        ad 	pool_cache_bootstrap(pc, size, align, align_offset, flags, wchan,
   1950  1.128.2.7        ad 	   palloc, ipl, ctor, dtor, arg);
   1951  1.128.2.7        ad 
   1952  1.128.2.7        ad 	return pc;
   1953  1.128.2.7        ad }
   1954  1.128.2.7        ad 
   1955  1.128.2.7        ad /*
   1956  1.128.2.7        ad  * pool_cache_bootstrap:
   1957       1.43   thorpej  *
   1958  1.128.2.7        ad  *	Kernel-private version of pool_cache_init().  The caller
   1959  1.128.2.7        ad  *	provides initial storage.
   1960       1.43   thorpej  */
   1961       1.43   thorpej void
   1962  1.128.2.7        ad pool_cache_bootstrap(pool_cache_t pc, size_t size, u_int align,
   1963  1.128.2.7        ad     u_int align_offset, u_int flags, const char *wchan,
   1964  1.128.2.7        ad     struct pool_allocator *palloc, int ipl,
   1965  1.128.2.7        ad     int (*ctor)(void *, void *, int), void (*dtor)(void *, void *),
   1966       1.43   thorpej     void *arg)
   1967       1.43   thorpej {
   1968  1.128.2.7        ad 	CPU_INFO_ITERATOR cii;
   1969  1.128.2.7        ad 	struct cpu_info *ci;
   1970  1.128.2.7        ad 	struct pool *pp;
   1971       1.43   thorpej 
   1972  1.128.2.7        ad 	pp = &pc->pc_pool;
   1973  1.128.2.7        ad 	if (palloc == NULL && ipl == IPL_NONE)
   1974  1.128.2.7        ad 		palloc = &pool_allocator_nointr;
   1975  1.128.2.7        ad 	pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl);
   1976       1.43   thorpej 
   1977  1.128.2.7        ad 	mutex_init(&pc->pc_lock, MUTEX_DEFAULT, pp->pr_ipl);
   1978       1.43   thorpej 
   1979  1.128.2.7        ad 	pc->pc_emptygroups = NULL;
   1980  1.128.2.7        ad 	pc->pc_fullgroups = NULL;
   1981       1.43   thorpej 	pc->pc_ctor = ctor;
   1982       1.43   thorpej 	pc->pc_dtor = dtor;
   1983       1.43   thorpej 	pc->pc_arg  = arg;
   1984  1.128.2.7        ad 	pc->pc_hits  = 0;
   1985       1.48   thorpej 	pc->pc_misses = 0;
   1986  1.128.2.7        ad 	pc->pc_nempty = 0;
   1987  1.128.2.7        ad 	pc->pc_nfull = 0;
   1988  1.128.2.7        ad 	pc->pc_contended = 0;
   1989  1.128.2.7        ad 	pc->pc_refcnt = 0;
   1990  1.128.2.7        ad 
   1991  1.128.2.7        ad 	/* Allocate per-CPU caches. */
   1992  1.128.2.7        ad 	memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus));
   1993  1.128.2.7        ad 	pc->pc_ncpu = 0;
   1994  1.128.2.7        ad 	for (CPU_INFO_FOREACH(cii, ci)) {
   1995  1.128.2.7        ad 		pool_cache_cpu_init1(ci, pc);
   1996  1.128.2.7        ad 	}
   1997  1.128.2.7        ad 
   1998  1.128.2.2        ad 	if (__predict_true(!cold)) {
   1999  1.128.2.2        ad 		mutex_enter(&pp->pr_lock);
   2000  1.128.2.7        ad 		pp->pr_cache = pc;
   2001  1.128.2.2        ad 		mutex_exit(&pp->pr_lock);
   2002  1.128.2.7        ad 		mutex_enter(&pool_head_lock);
   2003  1.128.2.7        ad 		LIST_INSERT_HEAD(&pool_cache_head, pc, pc_cachelist);
   2004  1.128.2.7        ad 		mutex_exit(&pool_head_lock);
   2005  1.128.2.7        ad 	} else {
   2006  1.128.2.7        ad 		pp->pr_cache = pc;
   2007  1.128.2.7        ad 		LIST_INSERT_HEAD(&pool_cache_head, pc, pc_cachelist);
   2008  1.128.2.7        ad 	}
   2009       1.43   thorpej }
   2010       1.43   thorpej 
   2011       1.43   thorpej /*
   2012       1.43   thorpej  * pool_cache_destroy:
   2013       1.43   thorpej  *
   2014       1.43   thorpej  *	Destroy a pool cache.
   2015       1.43   thorpej  */
   2016       1.43   thorpej void
   2017  1.128.2.7        ad pool_cache_destroy(pool_cache_t pc)
   2018       1.43   thorpej {
   2019  1.128.2.7        ad 	struct pool *pp = &pc->pc_pool;
   2020  1.128.2.7        ad 	pool_cache_cpu_t *cc;
   2021  1.128.2.7        ad 	pcg_t *pcg;
   2022  1.128.2.7        ad 	int i;
   2023  1.128.2.7        ad 
   2024  1.128.2.7        ad 	/* Remove it from the global list. */
   2025  1.128.2.7        ad 	mutex_enter(&pool_head_lock);
   2026  1.128.2.7        ad 	while (pc->pc_refcnt != 0)
   2027  1.128.2.7        ad 		cv_wait(&pool_busy, &pool_head_lock);
   2028  1.128.2.7        ad 	LIST_REMOVE(pc, pc_cachelist);
   2029  1.128.2.7        ad 	mutex_exit(&pool_head_lock);
   2030       1.43   thorpej 
   2031       1.43   thorpej 	/* First, invalidate the entire cache. */
   2032       1.43   thorpej 	pool_cache_invalidate(pc);
   2033       1.43   thorpej 
   2034  1.128.2.7        ad 	/* Disassociate it from the pool. */
   2035  1.128.2.2        ad 	mutex_enter(&pp->pr_lock);
   2036  1.128.2.7        ad 	pp->pr_cache = NULL;
   2037  1.128.2.2        ad 	mutex_exit(&pp->pr_lock);
   2038  1.128.2.2        ad 
   2039  1.128.2.7        ad 	/* Destroy per-CPU data */
   2040  1.128.2.7        ad 	for (i = 0; i < MAXCPUS; i++) {
   2041  1.128.2.7        ad 		if ((cc = pc->pc_cpus[i]) == NULL)
   2042  1.128.2.7        ad 			continue;
   2043  1.128.2.7        ad 		if ((pcg = cc->cc_current) != NULL) {
   2044  1.128.2.7        ad 			pcg->pcg_next = NULL;
   2045  1.128.2.7        ad 			pool_cache_invalidate_groups(pc, pcg);
   2046  1.128.2.7        ad 		}
   2047  1.128.2.7        ad 		if ((pcg = cc->cc_previous) != NULL) {
   2048  1.128.2.7        ad 			pcg->pcg_next = NULL;
   2049  1.128.2.7        ad 			pool_cache_invalidate_groups(pc, pcg);
   2050  1.128.2.7        ad 		}
   2051  1.128.2.7        ad 		if (cc != &pc->pc_cpu0)
   2052  1.128.2.7        ad 			pool_put(&cache_cpu_pool, cc);
   2053  1.128.2.7        ad 	}
   2054  1.128.2.7        ad 
   2055  1.128.2.7        ad 	/* Finally, destroy it. */
   2056  1.128.2.2        ad 	mutex_destroy(&pc->pc_lock);
   2057  1.128.2.7        ad 	pool_destroy(pp);
   2058  1.128.2.7        ad 	pool_put(&cache_pool, pc);
   2059  1.128.2.7        ad }
   2060  1.128.2.7        ad 
   2061  1.128.2.7        ad /*
   2062  1.128.2.7        ad  * pool_cache_cpu_init1:
   2063  1.128.2.7        ad  *
   2064  1.128.2.7        ad  *	Called for each pool_cache whenever a new CPU is attached.
   2065  1.128.2.7        ad  */
   2066  1.128.2.7        ad static void
   2067  1.128.2.7        ad pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc)
   2068  1.128.2.7        ad {
   2069  1.128.2.7        ad 	pool_cache_cpu_t *cc;
   2070  1.128.2.7        ad 
   2071  1.128.2.7        ad 	KASSERT(((uintptr_t)pc->pc_cpus & (CACHE_LINE_SIZE - 1)) == 0);
   2072  1.128.2.7        ad 
   2073  1.128.2.7        ad 	if ((cc = pc->pc_cpus[ci->ci_index]) != NULL) {
   2074  1.128.2.7        ad 		KASSERT(cc->cc_cpu = ci);
   2075  1.128.2.7        ad 		return;
   2076  1.128.2.7        ad 	}
   2077  1.128.2.7        ad 
   2078  1.128.2.7        ad 	/*
   2079  1.128.2.7        ad 	 * The first CPU is 'free'.  This needs to be the case for
   2080  1.128.2.7        ad 	 * bootstrap - we may not be able to allocate yet.
   2081  1.128.2.7        ad 	 */
   2082  1.128.2.7        ad 	if (pc->pc_ncpu == 0) {
   2083  1.128.2.7        ad 		cc = &pc->pc_cpu0;
   2084  1.128.2.7        ad 		pc->pc_ncpu = 1;
   2085  1.128.2.7        ad 	} else {
   2086  1.128.2.7        ad 		mutex_enter(&pc->pc_lock);
   2087  1.128.2.7        ad 		pc->pc_ncpu++;
   2088  1.128.2.7        ad 		mutex_exit(&pc->pc_lock);
   2089  1.128.2.7        ad 		cc = pool_get(&cache_cpu_pool, PR_WAITOK);
   2090  1.128.2.7        ad 	}
   2091  1.128.2.7        ad 
   2092  1.128.2.7        ad 	cc->cc_ipl = pc->pc_pool.pr_ipl;
   2093  1.128.2.7        ad 	cc->cc_iplcookie = makeiplcookie(cc->cc_ipl);
   2094  1.128.2.7        ad 	cc->cc_cache = pc;
   2095  1.128.2.7        ad 	cc->cc_cpu = ci;
   2096  1.128.2.7        ad 	cc->cc_hits = 0;
   2097  1.128.2.7        ad 	cc->cc_misses = 0;
   2098  1.128.2.7        ad 	cc->cc_current = NULL;
   2099  1.128.2.7        ad 	cc->cc_previous = NULL;
   2100  1.128.2.7        ad 	cc->cc_busy = NULL;
   2101  1.128.2.7        ad 
   2102  1.128.2.7        ad 	pc->pc_cpus[ci->ci_index] = cc;
   2103  1.128.2.7        ad }
   2104  1.128.2.7        ad 
   2105  1.128.2.7        ad /*
   2106  1.128.2.7        ad  * pool_cache_cpu_init:
   2107  1.128.2.7        ad  *
   2108  1.128.2.7        ad  *	Called whenever a new CPU is attached.
   2109  1.128.2.7        ad  */
   2110  1.128.2.7        ad void
   2111  1.128.2.7        ad pool_cache_cpu_init(struct cpu_info *ci)
   2112  1.128.2.7        ad {
   2113  1.128.2.7        ad 	pool_cache_t pc;
   2114  1.128.2.7        ad 
   2115  1.128.2.7        ad 	mutex_enter(&pool_head_lock);
   2116  1.128.2.7        ad 	LIST_FOREACH(pc, &pool_cache_head, pc_cachelist) {
   2117  1.128.2.7        ad 		pc->pc_refcnt++;
   2118  1.128.2.7        ad 		mutex_exit(&pool_head_lock);
   2119  1.128.2.7        ad 
   2120  1.128.2.7        ad 		pool_cache_cpu_init1(ci, pc);
   2121  1.128.2.7        ad 
   2122  1.128.2.7        ad 		mutex_enter(&pool_head_lock);
   2123  1.128.2.7        ad 		pc->pc_refcnt--;
   2124  1.128.2.7        ad 		cv_broadcast(&pool_busy);
   2125  1.128.2.7        ad 	}
   2126  1.128.2.7        ad 	mutex_exit(&pool_head_lock);
   2127  1.128.2.7        ad }
   2128  1.128.2.7        ad 
   2129  1.128.2.7        ad /*
   2130  1.128.2.7        ad  * pool_cache_reclaim:
   2131  1.128.2.7        ad  *
   2132  1.128.2.7        ad  *	Reclaim memory from a pool cache.
   2133  1.128.2.7        ad  */
   2134  1.128.2.7        ad bool
   2135  1.128.2.7        ad pool_cache_reclaim(pool_cache_t pc)
   2136  1.128.2.7        ad {
   2137  1.128.2.7        ad 
   2138  1.128.2.7        ad 	return pool_reclaim(&pc->pc_pool);
   2139       1.43   thorpej }
   2140       1.43   thorpej 
   2141      1.110     perry static inline void *
   2142  1.128.2.7        ad pcg_get(pcg_t *pcg, paddr_t *pap)
   2143       1.43   thorpej {
   2144       1.43   thorpej 	void *object;
   2145       1.43   thorpej 	u_int idx;
   2146       1.43   thorpej 
   2147       1.43   thorpej 	KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
   2148       1.45   thorpej 	KASSERT(pcg->pcg_avail != 0);
   2149       1.43   thorpej 
   2150  1.128.2.7        ad 	idx = --pcg->pcg_avail;
   2151       1.87   thorpej 	object = pcg->pcg_objects[idx].pcgo_va;
   2152       1.87   thorpej 	if (pap != NULL)
   2153       1.87   thorpej 		*pap = pcg->pcg_objects[idx].pcgo_pa;
   2154  1.128.2.7        ad 
   2155  1.128.2.7        ad #ifdef DIAGNOSTIC
   2156       1.87   thorpej 	pcg->pcg_objects[idx].pcgo_va = NULL;
   2157  1.128.2.7        ad 	KASSERT(object != NULL);
   2158  1.128.2.7        ad #endif
   2159       1.43   thorpej 
   2160       1.43   thorpej 	return (object);
   2161       1.43   thorpej }
   2162       1.43   thorpej 
   2163      1.110     perry static inline void
   2164  1.128.2.7        ad pcg_put(pcg_t *pcg, void *object, paddr_t pa)
   2165       1.43   thorpej {
   2166       1.43   thorpej 	u_int idx;
   2167       1.43   thorpej 
   2168       1.43   thorpej 	idx = pcg->pcg_avail++;
   2169       1.43   thorpej 
   2170  1.128.2.7        ad 	KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
   2171       1.87   thorpej 	KASSERT(pcg->pcg_objects[idx].pcgo_va == NULL);
   2172  1.128.2.7        ad 
   2173       1.87   thorpej 	pcg->pcg_objects[idx].pcgo_va = object;
   2174       1.87   thorpej 	pcg->pcg_objects[idx].pcgo_pa = pa;
   2175       1.43   thorpej }
   2176       1.43   thorpej 
   2177  1.128.2.7        ad /*
   2178  1.128.2.7        ad  * pool_cache_destruct_object:
   2179  1.128.2.7        ad  *
   2180  1.128.2.7        ad  *	Force destruction of an object and its release back into
   2181  1.128.2.7        ad  *	the pool.
   2182  1.128.2.7        ad  */
   2183  1.128.2.7        ad void
   2184  1.128.2.7        ad pool_cache_destruct_object(pool_cache_t pc, void *object)
   2185  1.128.2.7        ad {
   2186  1.128.2.7        ad 
   2187  1.128.2.7        ad 	if (pc->pc_dtor != NULL)
   2188  1.128.2.7        ad 		(*pc->pc_dtor)(pc->pc_arg, object);
   2189  1.128.2.7        ad 	pool_put(&pc->pc_pool, object);
   2190  1.128.2.7        ad }
   2191  1.128.2.7        ad 
   2192  1.128.2.7        ad /*
   2193  1.128.2.7        ad  * pool_cache_invalidate_groups:
   2194  1.128.2.7        ad  *
   2195  1.128.2.7        ad  *	Invalidate a chain of groups and destruct all objects.
   2196  1.128.2.7        ad  */
   2197      1.102       chs static void
   2198  1.128.2.7        ad pool_cache_invalidate_groups(pool_cache_t pc, pcg_t *pcg)
   2199      1.102       chs {
   2200  1.128.2.7        ad 	void *object;
   2201  1.128.2.7        ad 	pcg_t *next;
   2202  1.128.2.7        ad 	int i;
   2203  1.128.2.7        ad 
   2204  1.128.2.7        ad 	for (; pcg != NULL; pcg = next) {
   2205  1.128.2.7        ad 		next = pcg->pcg_next;
   2206  1.128.2.7        ad 
   2207  1.128.2.7        ad 		for (i = 0; i < pcg->pcg_avail; i++) {
   2208  1.128.2.7        ad 			object = pcg->pcg_objects[i].pcgo_va;
   2209  1.128.2.7        ad 			if (pc->pc_dtor != NULL)
   2210  1.128.2.7        ad 				(*pc->pc_dtor)(pc->pc_arg, object);
   2211  1.128.2.7        ad 			pool_put(&pc->pc_pool, object);
   2212  1.128.2.7        ad 		}
   2213      1.102       chs 
   2214      1.102       chs 		pool_put(&pcgpool, pcg);
   2215      1.102       chs 	}
   2216      1.102       chs }
   2217      1.102       chs 
   2218       1.43   thorpej /*
   2219  1.128.2.7        ad  * pool_cache_invalidate:
   2220       1.43   thorpej  *
   2221  1.128.2.7        ad  *	Invalidate a pool cache (destruct and release all of the
   2222  1.128.2.7        ad  *	cached objects).  Does not reclaim objects from the pool.
   2223       1.43   thorpej  */
   2224  1.128.2.7        ad void
   2225  1.128.2.7        ad pool_cache_invalidate(pool_cache_t pc)
   2226       1.43   thorpej {
   2227  1.128.2.7        ad 	pcg_t *full, *empty;
   2228       1.43   thorpej 
   2229  1.128.2.2        ad 	mutex_enter(&pc->pc_lock);
   2230  1.128.2.7        ad 	full = pc->pc_fullgroups;
   2231  1.128.2.7        ad 	empty = pc->pc_emptygroups;
   2232  1.128.2.7        ad 	pc->pc_fullgroups = NULL;
   2233  1.128.2.7        ad 	pc->pc_emptygroups = NULL;
   2234  1.128.2.7        ad 	pc->pc_nfull = 0;
   2235  1.128.2.7        ad 	pc->pc_nempty = 0;
   2236  1.128.2.7        ad 	mutex_exit(&pc->pc_lock);
   2237       1.43   thorpej 
   2238  1.128.2.7        ad 	pool_cache_invalidate_groups(pc, full);
   2239  1.128.2.7        ad 	pool_cache_invalidate_groups(pc, empty);
   2240  1.128.2.7        ad }
   2241       1.43   thorpej 
   2242  1.128.2.7        ad void
   2243  1.128.2.7        ad pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg)
   2244  1.128.2.7        ad {
   2245      1.125        ad 
   2246  1.128.2.7        ad 	pool_set_drain_hook(&pc->pc_pool, fn, arg);
   2247  1.128.2.7        ad }
   2248       1.43   thorpej 
   2249  1.128.2.7        ad void
   2250  1.128.2.7        ad pool_cache_setlowat(pool_cache_t pc, int n)
   2251  1.128.2.7        ad {
   2252       1.43   thorpej 
   2253  1.128.2.7        ad 	pool_setlowat(&pc->pc_pool, n);
   2254  1.128.2.7        ad }
   2255       1.43   thorpej 
   2256  1.128.2.7        ad void
   2257  1.128.2.7        ad pool_cache_sethiwat(pool_cache_t pc, int n)
   2258  1.128.2.7        ad {
   2259  1.128.2.7        ad 
   2260  1.128.2.7        ad 	pool_sethiwat(&pc->pc_pool, n);
   2261       1.43   thorpej }
   2262       1.43   thorpej 
   2263       1.43   thorpej void
   2264  1.128.2.7        ad pool_cache_sethardlimit(pool_cache_t pc, int n, const char *warnmess, int ratecap)
   2265       1.43   thorpej {
   2266       1.43   thorpej 
   2267  1.128.2.7        ad 	pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap);
   2268  1.128.2.7        ad }
   2269  1.128.2.7        ad 
   2270  1.128.2.7        ad static inline pool_cache_cpu_t *
   2271  1.128.2.7        ad pool_cache_cpu_enter(pool_cache_t pc, int *s)
   2272  1.128.2.7        ad {
   2273  1.128.2.7        ad 	pool_cache_cpu_t *cc;
   2274  1.128.2.7        ad 	struct cpu_info *ci;
   2275      1.125        ad 
   2276  1.128.2.7        ad 	/*
   2277  1.128.2.7        ad 	 * Prevent other users of the cache from accessing our
   2278  1.128.2.7        ad 	 * CPU-local data.  To avoid touching shared state, we
   2279  1.128.2.7        ad 	 * pull the neccessary information from CPU local data.
   2280  1.128.2.7        ad 	 */
   2281  1.128.2.7        ad 	ci = curcpu();
   2282  1.128.2.7        ad 	cc = pc->pc_cpus[ci->ci_data.cpu_index];
   2283  1.128.2.7        ad 	if (cc->cc_ipl == IPL_NONE) {
   2284  1.128.2.7        ad 		crit_enter();
   2285  1.128.2.7        ad 	} else {
   2286  1.128.2.7        ad 		*s = splraiseipl(cc->cc_iplcookie);
   2287      1.109  christos 	}
   2288      1.109  christos 
   2289  1.128.2.7        ad 	/* Moved to another CPU before disabling preemption? */
   2290  1.128.2.7        ad 	if (__predict_false(ci != curcpu())) {
   2291  1.128.2.7        ad 		ci = curcpu();
   2292  1.128.2.7        ad 		cc = pc->pc_cpus[ci->ci_data.cpu_index];
   2293  1.128.2.7        ad 	}
   2294       1.43   thorpej 
   2295  1.128.2.7        ad #ifdef DIAGNOSTIC
   2296  1.128.2.7        ad 	KASSERT(cc->cc_busy == NULL);
   2297  1.128.2.7        ad 	KASSERT(cc->cc_cpu == ci);
   2298  1.128.2.7        ad 	KASSERT(((uintptr_t)cc & (CACHE_LINE_SIZE - 1)) == 0);
   2299  1.128.2.7        ad 	cc->cc_busy = curlwp;
   2300  1.128.2.7        ad #endif
   2301  1.128.2.7        ad 
   2302  1.128.2.7        ad 	return cc;
   2303  1.128.2.7        ad }
   2304  1.128.2.7        ad 
   2305  1.128.2.7        ad static inline void
   2306  1.128.2.7        ad pool_cache_cpu_exit(pool_cache_cpu_t *cc, int *s)
   2307  1.128.2.7        ad {
   2308  1.128.2.7        ad 
   2309  1.128.2.7        ad #ifdef DIAGNOSTIC
   2310  1.128.2.7        ad 	KASSERT(cc->cc_busy == curlwp);
   2311  1.128.2.7        ad 	cc->cc_busy = NULL;
   2312  1.128.2.7        ad #endif
   2313  1.128.2.7        ad 
   2314  1.128.2.7        ad 	/* No longer need exclusive access to the per-CPU data. */
   2315  1.128.2.7        ad 	if (cc->cc_ipl == IPL_NONE) {
   2316  1.128.2.7        ad 		crit_exit();
   2317  1.128.2.7        ad 	} else {
   2318  1.128.2.7        ad 		splx(*s);
   2319  1.128.2.7        ad 	}
   2320  1.128.2.7        ad }
   2321  1.128.2.7        ad 
   2322  1.128.2.7        ad #if __GNUC_PREREQ__(3, 0)
   2323  1.128.2.7        ad __attribute ((noinline))
   2324  1.128.2.7        ad #endif
   2325  1.128.2.7        ad pool_cache_cpu_t *
   2326  1.128.2.7        ad pool_cache_get_slow(pool_cache_cpu_t *cc, int *s, void **objectp,
   2327  1.128.2.7        ad 		    paddr_t *pap, int flags)
   2328  1.128.2.7        ad {
   2329  1.128.2.7        ad 	pcg_t *pcg, *cur;
   2330  1.128.2.7        ad 	uint64_t ncsw;
   2331  1.128.2.7        ad 	pool_cache_t pc;
   2332  1.128.2.7        ad 	void *object;
   2333  1.128.2.7        ad 
   2334  1.128.2.7        ad 	pc = cc->cc_cache;
   2335  1.128.2.7        ad 	cc->cc_misses++;
   2336  1.128.2.7        ad 
   2337  1.128.2.7        ad 	/*
   2338  1.128.2.7        ad 	 * Nothing was available locally.  Try and grab a group
   2339  1.128.2.7        ad 	 * from the cache.
   2340  1.128.2.7        ad 	 */
   2341  1.128.2.7        ad 	if (!mutex_tryenter(&pc->pc_lock)) {
   2342  1.128.2.7        ad 		ncsw = curlwp->l_ncsw;
   2343  1.128.2.7        ad 		mutex_enter(&pc->pc_lock);
   2344  1.128.2.7        ad 		pc->pc_contended++;
   2345  1.128.2.7        ad 
   2346  1.128.2.7        ad 		/*
   2347  1.128.2.7        ad 		 * If we context switched while locking, then
   2348  1.128.2.7        ad 		 * our view of the per-CPU data is invalid:
   2349  1.128.2.7        ad 		 * retry.
   2350  1.128.2.7        ad 		 */
   2351  1.128.2.7        ad 		if (curlwp->l_ncsw != ncsw) {
   2352  1.128.2.7        ad 			mutex_exit(&pc->pc_lock);
   2353  1.128.2.7        ad 			pool_cache_cpu_exit(cc, s);
   2354  1.128.2.7        ad 			return pool_cache_cpu_enter(pc, s);
   2355       1.43   thorpej 		}
   2356      1.102       chs 	}
   2357       1.43   thorpej 
   2358  1.128.2.7        ad 	if ((pcg = pc->pc_fullgroups) != NULL) {
   2359       1.43   thorpej 		/*
   2360  1.128.2.7        ad 		 * If there's a full group, release our empty
   2361  1.128.2.7        ad 		 * group back to the cache.  Install the full
   2362  1.128.2.7        ad 		 * group as cc_current and return.
   2363       1.43   thorpej 		 */
   2364  1.128.2.7        ad 		if ((cur = cc->cc_current) != NULL) {
   2365  1.128.2.7        ad 			KASSERT(cur->pcg_avail == 0);
   2366  1.128.2.7        ad 			cur->pcg_next = pc->pc_emptygroups;
   2367  1.128.2.7        ad 			pc->pc_emptygroups = cur;
   2368  1.128.2.7        ad 			pc->pc_nempty++;
   2369  1.128.2.7        ad 		}
   2370  1.128.2.7        ad 		KASSERT(pcg->pcg_avail == PCG_NOBJECTS);
   2371  1.128.2.7        ad 		cc->cc_current = pcg;
   2372  1.128.2.7        ad 		pc->pc_fullgroups = pcg->pcg_next;
   2373  1.128.2.7        ad 		pc->pc_hits++;
   2374  1.128.2.7        ad 		pc->pc_nfull--;
   2375  1.128.2.2        ad 		mutex_exit(&pc->pc_lock);
   2376  1.128.2.7        ad 		return cc;
   2377  1.128.2.7        ad 	}
   2378      1.102       chs 
   2379  1.128.2.7        ad 	/*
   2380  1.128.2.7        ad 	 * Nothing available locally or in cache.  Take the slow
   2381  1.128.2.7        ad 	 * path: fetch a new object from the pool and construct
   2382  1.128.2.7        ad 	 * it.
   2383  1.128.2.7        ad 	 */
   2384  1.128.2.7        ad 	pc->pc_misses++;
   2385  1.128.2.7        ad 	mutex_exit(&pc->pc_lock);
   2386  1.128.2.7        ad 	pool_cache_cpu_exit(cc, s);
   2387  1.128.2.7        ad 
   2388  1.128.2.7        ad 	object = pool_get(&pc->pc_pool, flags);
   2389  1.128.2.7        ad 	*objectp = object;
   2390  1.128.2.7        ad 	if (object == NULL)
   2391  1.128.2.7        ad 		return NULL;
   2392  1.128.2.7        ad 
   2393  1.128.2.7        ad 	if (pc->pc_ctor != NULL) {
   2394  1.128.2.7        ad 		if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
   2395  1.128.2.7        ad 			pool_put(&pc->pc_pool, object);
   2396  1.128.2.7        ad 			*objectp = NULL;
   2397  1.128.2.7        ad 			return NULL;
   2398       1.43   thorpej 		}
   2399       1.43   thorpej 	}
   2400       1.43   thorpej 
   2401  1.128.2.7        ad 	KASSERT((((vaddr_t)object + pc->pc_pool.pr_itemoffset) &
   2402  1.128.2.7        ad 	    (pc->pc_pool.pr_align - 1)) == 0);
   2403       1.43   thorpej 
   2404  1.128.2.7        ad 	if (pap != NULL) {
   2405  1.128.2.7        ad #ifdef POOL_VTOPHYS
   2406  1.128.2.7        ad 		*pap = POOL_VTOPHYS(object);
   2407  1.128.2.7        ad #else
   2408  1.128.2.7        ad 		*pap = POOL_PADDR_INVALID;
   2409  1.128.2.7        ad #endif
   2410      1.102       chs 	}
   2411       1.51   thorpej 
   2412  1.128.2.7        ad 	FREECHECK_OUT(&pc->pc_freecheck, object);
   2413  1.128.2.7        ad 	return NULL;
   2414       1.43   thorpej }
   2415       1.43   thorpej 
   2416  1.128.2.6        ad /*
   2417  1.128.2.7        ad  * pool_cache_get{,_paddr}:
   2418  1.128.2.6        ad  *
   2419  1.128.2.7        ad  *	Get an object from a pool cache (optionally returning
   2420  1.128.2.7        ad  *	the physical address of the object).
   2421  1.128.2.6        ad  */
   2422  1.128.2.7        ad void *
   2423  1.128.2.7        ad pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap)
   2424      1.102       chs {
   2425  1.128.2.7        ad 	pool_cache_cpu_t *cc;
   2426  1.128.2.7        ad 	pcg_t *pcg;
   2427      1.102       chs 	void *object;
   2428  1.128.2.7        ad 	int s;
   2429      1.102       chs 
   2430  1.128.2.7        ad #ifdef LOCKDEBUG
   2431  1.128.2.7        ad 	if (flags & PR_WAITOK)
   2432  1.128.2.7        ad 		ASSERT_SLEEPABLE(NULL, "pool_cache_get(PR_WAITOK)");
   2433  1.128.2.7        ad #endif
   2434  1.128.2.6        ad 
   2435  1.128.2.7        ad 	cc = pool_cache_cpu_enter(pc, &s);
   2436  1.128.2.7        ad 	do {
   2437  1.128.2.7        ad 		/* Try and allocate an object from the current group. */
   2438  1.128.2.7        ad 	 	pcg = cc->cc_current;
   2439  1.128.2.7        ad 		if (pcg != NULL && pcg->pcg_avail > 0) {
   2440  1.128.2.7        ad 			object = pcg_get(pcg, pap);
   2441  1.128.2.7        ad 			cc->cc_hits++;
   2442  1.128.2.7        ad 			pool_cache_cpu_exit(cc, &s);
   2443  1.128.2.7        ad 			FREECHECK_OUT(&pc->pc_freecheck, object);
   2444  1.128.2.7        ad 			return object;
   2445  1.128.2.7        ad 		}
   2446  1.128.2.6        ad 
   2447  1.128.2.7        ad 		/*
   2448  1.128.2.7        ad 		 * That failed.  If the previous group isn't empty, swap
   2449  1.128.2.7        ad 		 * it with the current group and allocate from there.
   2450  1.128.2.7        ad 		 */
   2451  1.128.2.7        ad 		pcg = cc->cc_previous;
   2452  1.128.2.7        ad 		if (pcg != NULL && pcg->pcg_avail > 0) {
   2453  1.128.2.7        ad 			cc->cc_previous = cc->cc_current;
   2454  1.128.2.7        ad 			cc->cc_current = pcg;
   2455  1.128.2.7        ad 			continue;
   2456      1.102       chs 		}
   2457  1.128.2.6        ad 
   2458  1.128.2.7        ad 		/*
   2459  1.128.2.7        ad 		 * Can't allocate from either group: try the slow path.
   2460  1.128.2.7        ad 		 * If get_slow() allocated an object for us, or if
   2461  1.128.2.7        ad 		 * no more objects are available, it will return NULL.
   2462  1.128.2.7        ad 		 * Otherwise, we need to retry.
   2463  1.128.2.7        ad 		 */
   2464  1.128.2.7        ad 		cc = pool_cache_get_slow(cc, &s, &object, pap, flags);
   2465  1.128.2.7        ad 	} while (cc != NULL);
   2466  1.128.2.7        ad 
   2467  1.128.2.7        ad 	return object;
   2468      1.105  christos }
   2469      1.105  christos 
   2470  1.128.2.7        ad #if __GNUC_PREREQ__(3, 0)
   2471  1.128.2.7        ad __attribute ((noinline))
   2472  1.128.2.7        ad #endif
   2473  1.128.2.7        ad pool_cache_cpu_t *
   2474  1.128.2.7        ad pool_cache_put_slow(pool_cache_cpu_t *cc, int *s, void *object, paddr_t pa)
   2475      1.105  christos {
   2476  1.128.2.7        ad 	pcg_t *pcg, *cur;
   2477  1.128.2.7        ad 	uint64_t ncsw;
   2478  1.128.2.7        ad 	pool_cache_t pc;
   2479      1.105  christos 
   2480  1.128.2.7        ad 	pc = cc->cc_cache;
   2481  1.128.2.7        ad 	cc->cc_misses++;
   2482      1.105  christos 
   2483  1.128.2.7        ad 	/*
   2484  1.128.2.7        ad 	 * No free slots locally.  Try to grab an empty, unused
   2485  1.128.2.7        ad 	 * group from the cache.
   2486  1.128.2.7        ad 	 */
   2487  1.128.2.7        ad 	if (!mutex_tryenter(&pc->pc_lock)) {
   2488  1.128.2.7        ad 		ncsw = curlwp->l_ncsw;
   2489  1.128.2.7        ad 		mutex_enter(&pc->pc_lock);
   2490  1.128.2.7        ad 		pc->pc_contended++;
   2491      1.102       chs 
   2492  1.128.2.7        ad 		/*
   2493  1.128.2.7        ad 		 * If we context switched while locking, then
   2494  1.128.2.7        ad 		 * our view of the per-CPU data is invalid:
   2495  1.128.2.7        ad 		 * retry.
   2496  1.128.2.7        ad 		 */
   2497  1.128.2.7        ad 		if (curlwp->l_ncsw != ncsw) {
   2498  1.128.2.7        ad 			mutex_exit(&pc->pc_lock);
   2499  1.128.2.7        ad 			pool_cache_cpu_exit(cc, s);
   2500  1.128.2.7        ad 			return pool_cache_cpu_enter(pc, s);
   2501  1.128.2.7        ad 		}
   2502  1.128.2.7        ad 	}
   2503      1.101   thorpej 
   2504  1.128.2.7        ad 	if ((pcg = pc->pc_emptygroups) != NULL) {
   2505  1.128.2.7        ad 		/*
   2506  1.128.2.7        ad 		 * If there's a empty group, release our full
   2507  1.128.2.7        ad 		 * group back to the cache.  Install the empty
   2508  1.128.2.7        ad 		 * group as cc_current and return.
   2509  1.128.2.7        ad 		 */
   2510  1.128.2.7        ad 		if ((cur = cc->cc_current) != NULL) {
   2511  1.128.2.7        ad 			KASSERT(cur->pcg_avail == PCG_NOBJECTS);
   2512  1.128.2.7        ad 			cur->pcg_next = pc->pc_fullgroups;
   2513  1.128.2.7        ad 			pc->pc_fullgroups = cur;
   2514  1.128.2.7        ad 			pc->pc_nfull++;
   2515  1.128.2.7        ad 		}
   2516  1.128.2.7        ad 		KASSERT(pcg->pcg_avail == 0);
   2517  1.128.2.7        ad 		cc->cc_current = pcg;
   2518  1.128.2.7        ad 		pc->pc_emptygroups = pcg->pcg_next;
   2519  1.128.2.7        ad 		pc->pc_hits++;
   2520  1.128.2.7        ad 		pc->pc_nempty--;
   2521  1.128.2.7        ad 		mutex_exit(&pc->pc_lock);
   2522  1.128.2.7        ad 		return cc;
   2523  1.128.2.7        ad 	}
   2524      1.101   thorpej 
   2525  1.128.2.7        ad 	/*
   2526  1.128.2.7        ad 	 * Nothing available locally or in cache.  Take the
   2527  1.128.2.7        ad 	 * slow path and try to allocate a new group that we
   2528  1.128.2.7        ad 	 * can release to.
   2529  1.128.2.7        ad 	 */
   2530  1.128.2.7        ad 	pc->pc_misses++;
   2531  1.128.2.7        ad 	mutex_exit(&pc->pc_lock);
   2532  1.128.2.7        ad 	pool_cache_cpu_exit(cc, s);
   2533       1.43   thorpej 
   2534  1.128.2.7        ad 	/*
   2535  1.128.2.7        ad 	 * If we can't allocate a new group, just throw the
   2536  1.128.2.7        ad 	 * object away.
   2537  1.128.2.7        ad 	 */
   2538  1.128.2.7        ad #ifdef XXXAD	/* Disable the cache layer for now. */
   2539  1.128.2.7        ad 	pcg = pool_get(&pcgpool, PR_NOWAIT);
   2540  1.128.2.7        ad #else
   2541  1.128.2.7        ad 	pcg = NULL;
   2542  1.128.2.7        ad #endif
   2543  1.128.2.7        ad 	if (pcg == NULL) {
   2544  1.128.2.7        ad 		pool_cache_destruct_object(pc, object);
   2545  1.128.2.7        ad 		return NULL;
   2546  1.128.2.7        ad 	}
   2547  1.128.2.7        ad #ifdef DIAGNOSTIC
   2548  1.128.2.7        ad 	memset(pcg, 0, sizeof(*pcg));
   2549  1.128.2.7        ad #else
   2550  1.128.2.7        ad 	pcg->pcg_avail = 0;
   2551  1.128.2.7        ad #endif
   2552       1.43   thorpej 
   2553  1.128.2.7        ad 	/*
   2554  1.128.2.7        ad 	 * Add the empty group to the cache and try again.
   2555  1.128.2.7        ad 	 */
   2556  1.128.2.7        ad 	mutex_enter(&pc->pc_lock);
   2557  1.128.2.7        ad 	pcg->pcg_next = pc->pc_emptygroups;
   2558  1.128.2.7        ad 	pc->pc_emptygroups = pcg;
   2559  1.128.2.7        ad 	pc->pc_nempty++;
   2560  1.128.2.2        ad 	mutex_exit(&pc->pc_lock);
   2561       1.43   thorpej 
   2562  1.128.2.7        ad 	return pool_cache_cpu_enter(pc, s);
   2563  1.128.2.7        ad }
   2564       1.43   thorpej 
   2565       1.43   thorpej /*
   2566  1.128.2.7        ad  * pool_cache_put{,_paddr}:
   2567       1.43   thorpej  *
   2568  1.128.2.7        ad  *	Put an object back to the pool cache (optionally caching the
   2569  1.128.2.7        ad  *	physical address of the object).
   2570       1.43   thorpej  */
   2571  1.128.2.7        ad void
   2572  1.128.2.7        ad pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa)
   2573       1.43   thorpej {
   2574  1.128.2.7        ad 	pool_cache_cpu_t *cc;
   2575  1.128.2.7        ad 	pcg_t *pcg;
   2576  1.128.2.7        ad 	int s;
   2577      1.101   thorpej 
   2578  1.128.2.7        ad 	FREECHECK_IN(&pc->pc_freecheck, object);
   2579      1.101   thorpej 
   2580  1.128.2.7        ad 	cc = pool_cache_cpu_enter(pc, &s);
   2581  1.128.2.7        ad 	do {
   2582  1.128.2.7        ad 		/* If the current group isn't full, release it there. */
   2583  1.128.2.7        ad 	 	pcg = cc->cc_current;
   2584  1.128.2.7        ad 		if (pcg != NULL && pcg->pcg_avail < PCG_NOBJECTS) {
   2585  1.128.2.7        ad 			pcg_put(pcg, object, pa);
   2586  1.128.2.7        ad 			cc->cc_hits++;
   2587  1.128.2.7        ad 			pool_cache_cpu_exit(cc, &s);
   2588  1.128.2.7        ad 			return;
   2589  1.128.2.7        ad 		}
   2590       1.43   thorpej 
   2591  1.128.2.7        ad 		/*
   2592  1.128.2.7        ad 		 * That failed.  If the previous group is empty, swap
   2593  1.128.2.7        ad 		 * it with the current group and try again.
   2594  1.128.2.7        ad 		 */
   2595  1.128.2.7        ad 		pcg = cc->cc_previous;
   2596  1.128.2.7        ad 		if (pcg != NULL && pcg->pcg_avail == 0) {
   2597  1.128.2.7        ad 			cc->cc_previous = cc->cc_current;
   2598  1.128.2.7        ad 			cc->cc_current = pcg;
   2599  1.128.2.7        ad 			continue;
   2600  1.128.2.7        ad 		}
   2601  1.128.2.7        ad 
   2602  1.128.2.7        ad 		/*
   2603  1.128.2.7        ad 		 * Can't free to either group: try the slow path.
   2604  1.128.2.7        ad 		 * If put_slow() releases the object for us, it
   2605  1.128.2.7        ad 		 * will return NULL.  Otherwise we need to retry.
   2606  1.128.2.7        ad 		 */
   2607  1.128.2.7        ad 		cc = pool_cache_put_slow(cc, &s, object, pa);
   2608  1.128.2.7        ad 	} while (cc != NULL);
   2609        1.3        pk }
   2610       1.66   thorpej 
   2611       1.66   thorpej /*
   2612       1.66   thorpej  * Pool backend allocators.
   2613       1.66   thorpej  *
   2614       1.66   thorpej  * Each pool has a backend allocator that handles allocation, deallocation,
   2615       1.66   thorpej  * and any additional draining that might be needed.
   2616       1.66   thorpej  *
   2617       1.66   thorpej  * We provide two standard allocators:
   2618       1.66   thorpej  *
   2619       1.66   thorpej  *	pool_allocator_kmem - the default when no allocator is specified
   2620       1.66   thorpej  *
   2621       1.66   thorpej  *	pool_allocator_nointr - used for pools that will not be accessed
   2622       1.66   thorpej  *	in interrupt context.
   2623       1.66   thorpej  */
   2624       1.66   thorpej void	*pool_page_alloc(struct pool *, int);
   2625       1.66   thorpej void	pool_page_free(struct pool *, void *);
   2626       1.66   thorpej 
   2627      1.112     bjh21 #ifdef POOL_SUBPAGE
   2628      1.112     bjh21 struct pool_allocator pool_allocator_kmem_fullpage = {
   2629      1.112     bjh21 	pool_page_alloc, pool_page_free, 0,
   2630      1.117      yamt 	.pa_backingmapptr = &kmem_map,
   2631      1.112     bjh21 };
   2632      1.112     bjh21 #else
   2633       1.66   thorpej struct pool_allocator pool_allocator_kmem = {
   2634       1.66   thorpej 	pool_page_alloc, pool_page_free, 0,
   2635      1.117      yamt 	.pa_backingmapptr = &kmem_map,
   2636       1.66   thorpej };
   2637      1.112     bjh21 #endif
   2638       1.66   thorpej 
   2639       1.66   thorpej void	*pool_page_alloc_nointr(struct pool *, int);
   2640       1.66   thorpej void	pool_page_free_nointr(struct pool *, void *);
   2641       1.66   thorpej 
   2642      1.112     bjh21 #ifdef POOL_SUBPAGE
   2643      1.112     bjh21 struct pool_allocator pool_allocator_nointr_fullpage = {
   2644      1.112     bjh21 	pool_page_alloc_nointr, pool_page_free_nointr, 0,
   2645      1.117      yamt 	.pa_backingmapptr = &kernel_map,
   2646      1.112     bjh21 };
   2647      1.112     bjh21 #else
   2648       1.66   thorpej struct pool_allocator pool_allocator_nointr = {
   2649       1.66   thorpej 	pool_page_alloc_nointr, pool_page_free_nointr, 0,
   2650      1.117      yamt 	.pa_backingmapptr = &kernel_map,
   2651       1.66   thorpej };
   2652      1.112     bjh21 #endif
   2653       1.66   thorpej 
   2654       1.66   thorpej #ifdef POOL_SUBPAGE
   2655       1.66   thorpej void	*pool_subpage_alloc(struct pool *, int);
   2656       1.66   thorpej void	pool_subpage_free(struct pool *, void *);
   2657       1.66   thorpej 
   2658      1.112     bjh21 struct pool_allocator pool_allocator_kmem = {
   2659      1.112     bjh21 	pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
   2660      1.117      yamt 	.pa_backingmapptr = &kmem_map,
   2661      1.112     bjh21 };
   2662      1.112     bjh21 
   2663      1.112     bjh21 void	*pool_subpage_alloc_nointr(struct pool *, int);
   2664      1.112     bjh21 void	pool_subpage_free_nointr(struct pool *, void *);
   2665      1.112     bjh21 
   2666      1.112     bjh21 struct pool_allocator pool_allocator_nointr = {
   2667      1.112     bjh21 	pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
   2668      1.117      yamt 	.pa_backingmapptr = &kmem_map,
   2669       1.66   thorpej };
   2670       1.66   thorpej #endif /* POOL_SUBPAGE */
   2671       1.66   thorpej 
   2672      1.117      yamt static void *
   2673      1.117      yamt pool_allocator_alloc(struct pool *pp, int flags)
   2674       1.66   thorpej {
   2675      1.117      yamt 	struct pool_allocator *pa = pp->pr_alloc;
   2676       1.66   thorpej 	void *res;
   2677       1.66   thorpej 
   2678      1.117      yamt 	res = (*pa->pa_alloc)(pp, flags);
   2679      1.117      yamt 	if (res == NULL && (flags & PR_WAITOK) == 0) {
   2680       1.66   thorpej 		/*
   2681      1.117      yamt 		 * We only run the drain hook here if PR_NOWAIT.
   2682      1.117      yamt 		 * In other cases, the hook will be run in
   2683      1.117      yamt 		 * pool_reclaim().
   2684       1.66   thorpej 		 */
   2685      1.117      yamt 		if (pp->pr_drain_hook != NULL) {
   2686      1.117      yamt 			(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
   2687      1.117      yamt 			res = (*pa->pa_alloc)(pp, flags);
   2688       1.66   thorpej 		}
   2689      1.117      yamt 	}
   2690      1.117      yamt 	return res;
   2691       1.66   thorpej }
   2692       1.66   thorpej 
   2693      1.117      yamt static void
   2694       1.66   thorpej pool_allocator_free(struct pool *pp, void *v)
   2695       1.66   thorpej {
   2696       1.66   thorpej 	struct pool_allocator *pa = pp->pr_alloc;
   2697       1.66   thorpej 
   2698       1.66   thorpej 	(*pa->pa_free)(pp, v);
   2699       1.66   thorpej }
   2700       1.66   thorpej 
   2701       1.66   thorpej void *
   2702      1.124      yamt pool_page_alloc(struct pool *pp, int flags)
   2703       1.66   thorpej {
   2704      1.127   thorpej 	bool waitok = (flags & PR_WAITOK) ? true : false;
   2705       1.66   thorpej 
   2706      1.100      yamt 	return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, waitok));
   2707       1.66   thorpej }
   2708       1.66   thorpej 
   2709       1.66   thorpej void
   2710      1.124      yamt pool_page_free(struct pool *pp, void *v)
   2711       1.66   thorpej {
   2712       1.66   thorpej 
   2713       1.98      yamt 	uvm_km_free_poolpage_cache(kmem_map, (vaddr_t) v);
   2714       1.98      yamt }
   2715       1.98      yamt 
   2716       1.98      yamt static void *
   2717      1.124      yamt pool_page_alloc_meta(struct pool *pp, int flags)
   2718       1.98      yamt {
   2719      1.127   thorpej 	bool waitok = (flags & PR_WAITOK) ? true : false;
   2720       1.98      yamt 
   2721      1.100      yamt 	return ((void *) uvm_km_alloc_poolpage(kmem_map, waitok));
   2722       1.98      yamt }
   2723       1.98      yamt 
   2724       1.98      yamt static void
   2725      1.124      yamt pool_page_free_meta(struct pool *pp, void *v)
   2726       1.98      yamt {
   2727       1.98      yamt 
   2728      1.100      yamt 	uvm_km_free_poolpage(kmem_map, (vaddr_t) v);
   2729       1.66   thorpej }
   2730       1.66   thorpej 
   2731       1.66   thorpej #ifdef POOL_SUBPAGE
   2732       1.66   thorpej /* Sub-page allocator, for machines with large hardware pages. */
   2733       1.66   thorpej void *
   2734       1.66   thorpej pool_subpage_alloc(struct pool *pp, int flags)
   2735       1.66   thorpej {
   2736  1.128.2.2        ad 	return pool_get(&psppool, flags);
   2737       1.66   thorpej }
   2738       1.66   thorpej 
   2739       1.66   thorpej void
   2740       1.66   thorpej pool_subpage_free(struct pool *pp, void *v)
   2741       1.66   thorpej {
   2742       1.66   thorpej 	pool_put(&psppool, v);
   2743       1.66   thorpej }
   2744       1.66   thorpej 
   2745       1.66   thorpej /* We don't provide a real nointr allocator.  Maybe later. */
   2746       1.66   thorpej void *
   2747      1.112     bjh21 pool_subpage_alloc_nointr(struct pool *pp, int flags)
   2748       1.66   thorpej {
   2749       1.66   thorpej 
   2750       1.66   thorpej 	return (pool_subpage_alloc(pp, flags));
   2751       1.66   thorpej }
   2752       1.66   thorpej 
   2753       1.66   thorpej void
   2754      1.112     bjh21 pool_subpage_free_nointr(struct pool *pp, void *v)
   2755       1.66   thorpej {
   2756       1.66   thorpej 
   2757       1.66   thorpej 	pool_subpage_free(pp, v);
   2758       1.66   thorpej }
   2759      1.112     bjh21 #endif /* POOL_SUBPAGE */
   2760       1.66   thorpej void *
   2761      1.124      yamt pool_page_alloc_nointr(struct pool *pp, int flags)
   2762       1.66   thorpej {
   2763      1.127   thorpej 	bool waitok = (flags & PR_WAITOK) ? true : false;
   2764       1.66   thorpej 
   2765      1.100      yamt 	return ((void *) uvm_km_alloc_poolpage_cache(kernel_map, waitok));
   2766       1.66   thorpej }
   2767       1.66   thorpej 
   2768       1.66   thorpej void
   2769      1.124      yamt pool_page_free_nointr(struct pool *pp, void *v)
   2770       1.66   thorpej {
   2771       1.66   thorpej 
   2772       1.98      yamt 	uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v);
   2773       1.66   thorpej }
   2774