Home | History | Annotate | Line # | Download | only in kern
subr_pool.c revision 1.122.2.2
      1  1.122.2.2        ad /*	$NetBSD: subr_pool.c,v 1.122.2.2 2006/10/20 20:03:56 ad Exp $	*/
      2        1.1        pk 
      3        1.1        pk /*-
      4  1.122.2.1        ad  * Copyright (c) 1997, 1999, 2000, 2002 The NetBSD Foundation, Inc.
      5        1.1        pk  * All rights reserved.
      6        1.1        pk  *
      7        1.1        pk  * This code is derived from software contributed to The NetBSD Foundation
      8       1.20   thorpej  * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
      9       1.20   thorpej  * Simulation Facility, NASA Ames Research Center.
     10        1.1        pk  *
     11        1.1        pk  * Redistribution and use in source and binary forms, with or without
     12        1.1        pk  * modification, are permitted provided that the following conditions
     13        1.1        pk  * are met:
     14        1.1        pk  * 1. Redistributions of source code must retain the above copyright
     15        1.1        pk  *    notice, this list of conditions and the following disclaimer.
     16        1.1        pk  * 2. Redistributions in binary form must reproduce the above copyright
     17        1.1        pk  *    notice, this list of conditions and the following disclaimer in the
     18        1.1        pk  *    documentation and/or other materials provided with the distribution.
     19        1.1        pk  * 3. All advertising materials mentioning features or use of this software
     20        1.1        pk  *    must display the following acknowledgement:
     21       1.13  christos  *	This product includes software developed by the NetBSD
     22       1.13  christos  *	Foundation, Inc. and its contributors.
     23        1.1        pk  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24        1.1        pk  *    contributors may be used to endorse or promote products derived
     25        1.1        pk  *    from this software without specific prior written permission.
     26        1.1        pk  *
     27        1.1        pk  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28        1.1        pk  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29        1.1        pk  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30        1.1        pk  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31        1.1        pk  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32        1.1        pk  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33        1.1        pk  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34        1.1        pk  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35        1.1        pk  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36        1.1        pk  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37        1.1        pk  * POSSIBILITY OF SUCH DAMAGE.
     38        1.1        pk  */
     39       1.64     lukem 
     40       1.64     lukem #include <sys/cdefs.h>
     41  1.122.2.2        ad __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.122.2.2 2006/10/20 20:03:56 ad Exp $");
     42       1.24    scottr 
     43       1.25   thorpej #include "opt_pool.h"
     44       1.24    scottr #include "opt_poollog.h"
     45       1.28   thorpej #include "opt_lockdebug.h"
     46        1.1        pk 
     47        1.1        pk #include <sys/param.h>
     48        1.1        pk #include <sys/systm.h>
     49        1.1        pk #include <sys/proc.h>
     50        1.1        pk #include <sys/errno.h>
     51        1.1        pk #include <sys/kernel.h>
     52        1.1        pk #include <sys/malloc.h>
     53        1.1        pk #include <sys/lock.h>
     54        1.1        pk #include <sys/pool.h>
     55       1.20   thorpej #include <sys/syslog.h>
     56        1.3        pk 
     57        1.3        pk #include <uvm/uvm.h>
     58        1.3        pk 
     59        1.1        pk /*
     60        1.1        pk  * Pool resource management utility.
     61        1.3        pk  *
     62       1.88       chs  * Memory is allocated in pages which are split into pieces according to
     63       1.88       chs  * the pool item size. Each page is kept on one of three lists in the
     64       1.88       chs  * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
     65       1.88       chs  * for empty, full and partially-full pages respectively. The individual
     66       1.88       chs  * pool items are on a linked list headed by `ph_itemlist' in each page
     67       1.88       chs  * header. The memory for building the page list is either taken from
     68       1.88       chs  * the allocated pages themselves (for small pool items) or taken from
     69       1.88       chs  * an internal pool of page headers (`phpool').
     70        1.1        pk  */
     71        1.1        pk 
     72        1.3        pk /* List of all pools */
     73      1.102       chs LIST_HEAD(,pool) pool_head = LIST_HEAD_INITIALIZER(pool_head);
     74        1.3        pk 
     75        1.3        pk /* Private pool for page header structures */
     76       1.97      yamt #define	PHPOOL_MAX	8
     77       1.97      yamt static struct pool phpool[PHPOOL_MAX];
     78       1.97      yamt #define	PHPOOL_FREELIST_NELEM(idx)	(((idx) == 0) ? 0 : (1 << (idx)))
     79        1.3        pk 
     80       1.62     bjh21 #ifdef POOL_SUBPAGE
     81       1.62     bjh21 /* Pool of subpages for use by normal pools. */
     82       1.62     bjh21 static struct pool psppool;
     83       1.62     bjh21 #endif
     84       1.62     bjh21 
     85      1.117      yamt static SLIST_HEAD(, pool_allocator) pa_deferinitq =
     86      1.117      yamt     SLIST_HEAD_INITIALIZER(pa_deferinitq);
     87      1.117      yamt 
     88       1.98      yamt static void *pool_page_alloc_meta(struct pool *, int);
     89       1.98      yamt static void pool_page_free_meta(struct pool *, void *);
     90       1.98      yamt 
     91       1.98      yamt /* allocator for pool metadata */
     92       1.98      yamt static struct pool_allocator pool_allocator_meta = {
     93      1.117      yamt 	pool_page_alloc_meta, pool_page_free_meta,
     94      1.117      yamt 	.pa_backingmapptr = &kmem_map,
     95       1.98      yamt };
     96       1.98      yamt 
     97        1.3        pk /* # of seconds to retain page after last use */
     98        1.3        pk int pool_inactive_time = 10;
     99        1.3        pk 
    100        1.3        pk /* Next candidate for drainage (see pool_drain()) */
    101       1.23   thorpej static struct pool	*drainpp;
    102       1.23   thorpej 
    103       1.23   thorpej /* This spin lock protects both pool_head and drainpp. */
    104       1.23   thorpej struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
    105        1.3        pk 
    106       1.99      yamt typedef uint8_t pool_item_freelist_t;
    107       1.99      yamt 
    108        1.3        pk struct pool_item_header {
    109        1.3        pk 	/* Page headers */
    110       1.88       chs 	LIST_ENTRY(pool_item_header)
    111        1.3        pk 				ph_pagelist;	/* pool page list */
    112       1.88       chs 	SPLAY_ENTRY(pool_item_header)
    113       1.88       chs 				ph_node;	/* Off-page page headers */
    114        1.3        pk 	caddr_t			ph_page;	/* this page's address */
    115        1.3        pk 	struct timeval		ph_time;	/* last referenced */
    116       1.97      yamt 	union {
    117       1.97      yamt 		/* !PR_NOTOUCH */
    118       1.97      yamt 		struct {
    119      1.102       chs 			LIST_HEAD(, pool_item)
    120       1.97      yamt 				phu_itemlist;	/* chunk list for this page */
    121       1.97      yamt 		} phu_normal;
    122       1.97      yamt 		/* PR_NOTOUCH */
    123       1.97      yamt 		struct {
    124       1.97      yamt 			uint16_t
    125       1.97      yamt 				phu_off;	/* start offset in page */
    126       1.99      yamt 			pool_item_freelist_t
    127       1.97      yamt 				phu_firstfree;	/* first free item */
    128       1.99      yamt 			/*
    129       1.99      yamt 			 * XXX it might be better to use
    130       1.99      yamt 			 * a simple bitmap and ffs(3)
    131       1.99      yamt 			 */
    132       1.97      yamt 		} phu_notouch;
    133       1.97      yamt 	} ph_u;
    134       1.97      yamt 	uint16_t		ph_nmissing;	/* # of chunks in use */
    135        1.3        pk };
    136       1.97      yamt #define	ph_itemlist	ph_u.phu_normal.phu_itemlist
    137       1.97      yamt #define	ph_off		ph_u.phu_notouch.phu_off
    138       1.97      yamt #define	ph_firstfree	ph_u.phu_notouch.phu_firstfree
    139        1.3        pk 
    140        1.1        pk struct pool_item {
    141        1.3        pk #ifdef DIAGNOSTIC
    142       1.82   thorpej 	u_int pi_magic;
    143       1.33       chs #endif
    144       1.82   thorpej #define	PI_MAGIC 0xdeadbeefU
    145        1.3        pk 	/* Other entries use only this list entry */
    146      1.102       chs 	LIST_ENTRY(pool_item)	pi_list;
    147        1.3        pk };
    148        1.3        pk 
    149       1.53   thorpej #define	POOL_NEEDS_CATCHUP(pp)						\
    150       1.53   thorpej 	((pp)->pr_nitems < (pp)->pr_minitems)
    151       1.53   thorpej 
    152       1.43   thorpej /*
    153       1.43   thorpej  * Pool cache management.
    154       1.43   thorpej  *
    155       1.43   thorpej  * Pool caches provide a way for constructed objects to be cached by the
    156       1.43   thorpej  * pool subsystem.  This can lead to performance improvements by avoiding
    157       1.43   thorpej  * needless object construction/destruction; it is deferred until absolutely
    158       1.43   thorpej  * necessary.
    159       1.43   thorpej  *
    160       1.43   thorpej  * Caches are grouped into cache groups.  Each cache group references
    161       1.43   thorpej  * up to 16 constructed objects.  When a cache allocates an object
    162       1.43   thorpej  * from the pool, it calls the object's constructor and places it into
    163       1.43   thorpej  * a cache group.  When a cache group frees an object back to the pool,
    164       1.43   thorpej  * it first calls the object's destructor.  This allows the object to
    165       1.43   thorpej  * persist in constructed form while freed to the cache.
    166       1.43   thorpej  *
    167       1.43   thorpej  * Multiple caches may exist for each pool.  This allows a single
    168       1.43   thorpej  * object type to have multiple constructed forms.  The pool references
    169       1.43   thorpej  * each cache, so that when a pool is drained by the pagedaemon, it can
    170       1.43   thorpej  * drain each individual cache as well.  Each time a cache is drained,
    171       1.43   thorpej  * the most idle cache group is freed to the pool in its entirety.
    172       1.43   thorpej  *
    173       1.43   thorpej  * Pool caches are layed on top of pools.  By layering them, we can avoid
    174       1.43   thorpej  * the complexity of cache management for pools which would not benefit
    175       1.43   thorpej  * from it.
    176       1.43   thorpej  */
    177       1.43   thorpej 
    178       1.43   thorpej /* The cache group pool. */
    179       1.43   thorpej static struct pool pcgpool;
    180        1.3        pk 
    181      1.102       chs static void	pool_cache_reclaim(struct pool_cache *, struct pool_pagelist *,
    182      1.102       chs 				   struct pool_cache_grouplist *);
    183      1.102       chs static void	pcg_grouplist_free(struct pool_cache_grouplist *);
    184        1.3        pk 
    185       1.42   thorpej static int	pool_catchup(struct pool *);
    186       1.55   thorpej static void	pool_prime_page(struct pool *, caddr_t,
    187       1.55   thorpej 		    struct pool_item_header *);
    188       1.88       chs static void	pool_update_curpage(struct pool *);
    189       1.66   thorpej 
    190      1.113      yamt static int	pool_grow(struct pool *, int);
    191      1.117      yamt static void	*pool_allocator_alloc(struct pool *, int);
    192      1.117      yamt static void	pool_allocator_free(struct pool *, void *);
    193        1.3        pk 
    194       1.97      yamt static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
    195       1.88       chs 	void (*)(const char *, ...));
    196       1.42   thorpej static void pool_print1(struct pool *, const char *,
    197       1.42   thorpej 	void (*)(const char *, ...));
    198        1.3        pk 
    199       1.88       chs static int pool_chk_page(struct pool *, const char *,
    200       1.88       chs 			 struct pool_item_header *);
    201       1.88       chs 
    202        1.3        pk /*
    203       1.52   thorpej  * Pool log entry. An array of these is allocated in pool_init().
    204        1.3        pk  */
    205        1.3        pk struct pool_log {
    206        1.3        pk 	const char	*pl_file;
    207        1.3        pk 	long		pl_line;
    208        1.3        pk 	int		pl_action;
    209       1.25   thorpej #define	PRLOG_GET	1
    210       1.25   thorpej #define	PRLOG_PUT	2
    211        1.3        pk 	void		*pl_addr;
    212        1.1        pk };
    213        1.1        pk 
    214       1.86      matt #ifdef POOL_DIAGNOSTIC
    215        1.3        pk /* Number of entries in pool log buffers */
    216       1.17   thorpej #ifndef POOL_LOGSIZE
    217       1.17   thorpej #define	POOL_LOGSIZE	10
    218       1.17   thorpej #endif
    219       1.17   thorpej 
    220       1.17   thorpej int pool_logsize = POOL_LOGSIZE;
    221        1.1        pk 
    222      1.110     perry static inline void
    223       1.42   thorpej pr_log(struct pool *pp, void *v, int action, const char *file, long line)
    224        1.3        pk {
    225        1.3        pk 	int n = pp->pr_curlogentry;
    226        1.3        pk 	struct pool_log *pl;
    227        1.3        pk 
    228       1.20   thorpej 	if ((pp->pr_roflags & PR_LOGGING) == 0)
    229        1.3        pk 		return;
    230        1.3        pk 
    231        1.3        pk 	/*
    232        1.3        pk 	 * Fill in the current entry. Wrap around and overwrite
    233        1.3        pk 	 * the oldest entry if necessary.
    234        1.3        pk 	 */
    235        1.3        pk 	pl = &pp->pr_log[n];
    236        1.3        pk 	pl->pl_file = file;
    237        1.3        pk 	pl->pl_line = line;
    238        1.3        pk 	pl->pl_action = action;
    239        1.3        pk 	pl->pl_addr = v;
    240        1.3        pk 	if (++n >= pp->pr_logsize)
    241        1.3        pk 		n = 0;
    242        1.3        pk 	pp->pr_curlogentry = n;
    243        1.3        pk }
    244        1.3        pk 
    245        1.3        pk static void
    246       1.42   thorpej pr_printlog(struct pool *pp, struct pool_item *pi,
    247       1.42   thorpej     void (*pr)(const char *, ...))
    248        1.3        pk {
    249        1.3        pk 	int i = pp->pr_logsize;
    250        1.3        pk 	int n = pp->pr_curlogentry;
    251        1.3        pk 
    252       1.20   thorpej 	if ((pp->pr_roflags & PR_LOGGING) == 0)
    253        1.3        pk 		return;
    254        1.3        pk 
    255        1.3        pk 	/*
    256        1.3        pk 	 * Print all entries in this pool's log.
    257        1.3        pk 	 */
    258        1.3        pk 	while (i-- > 0) {
    259        1.3        pk 		struct pool_log *pl = &pp->pr_log[n];
    260        1.3        pk 		if (pl->pl_action != 0) {
    261       1.25   thorpej 			if (pi == NULL || pi == pl->pl_addr) {
    262       1.25   thorpej 				(*pr)("\tlog entry %d:\n", i);
    263       1.25   thorpej 				(*pr)("\t\taction = %s, addr = %p\n",
    264       1.25   thorpej 				    pl->pl_action == PRLOG_GET ? "get" : "put",
    265       1.25   thorpej 				    pl->pl_addr);
    266       1.25   thorpej 				(*pr)("\t\tfile: %s at line %lu\n",
    267       1.25   thorpej 				    pl->pl_file, pl->pl_line);
    268       1.25   thorpej 			}
    269        1.3        pk 		}
    270        1.3        pk 		if (++n >= pp->pr_logsize)
    271        1.3        pk 			n = 0;
    272        1.3        pk 	}
    273        1.3        pk }
    274       1.25   thorpej 
    275      1.110     perry static inline void
    276       1.42   thorpej pr_enter(struct pool *pp, const char *file, long line)
    277       1.25   thorpej {
    278       1.25   thorpej 
    279       1.34   thorpej 	if (__predict_false(pp->pr_entered_file != NULL)) {
    280       1.25   thorpej 		printf("pool %s: reentrancy at file %s line %ld\n",
    281       1.25   thorpej 		    pp->pr_wchan, file, line);
    282       1.25   thorpej 		printf("         previous entry at file %s line %ld\n",
    283       1.25   thorpej 		    pp->pr_entered_file, pp->pr_entered_line);
    284       1.25   thorpej 		panic("pr_enter");
    285       1.25   thorpej 	}
    286       1.25   thorpej 
    287       1.25   thorpej 	pp->pr_entered_file = file;
    288       1.25   thorpej 	pp->pr_entered_line = line;
    289       1.25   thorpej }
    290       1.25   thorpej 
    291      1.110     perry static inline void
    292       1.42   thorpej pr_leave(struct pool *pp)
    293       1.25   thorpej {
    294       1.25   thorpej 
    295       1.34   thorpej 	if (__predict_false(pp->pr_entered_file == NULL)) {
    296       1.25   thorpej 		printf("pool %s not entered?\n", pp->pr_wchan);
    297       1.25   thorpej 		panic("pr_leave");
    298       1.25   thorpej 	}
    299       1.25   thorpej 
    300       1.25   thorpej 	pp->pr_entered_file = NULL;
    301       1.25   thorpej 	pp->pr_entered_line = 0;
    302       1.25   thorpej }
    303       1.25   thorpej 
    304      1.110     perry static inline void
    305       1.42   thorpej pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
    306       1.25   thorpej {
    307       1.25   thorpej 
    308       1.25   thorpej 	if (pp->pr_entered_file != NULL)
    309       1.25   thorpej 		(*pr)("\n\tcurrently entered from file %s line %ld\n",
    310       1.25   thorpej 		    pp->pr_entered_file, pp->pr_entered_line);
    311       1.25   thorpej }
    312        1.3        pk #else
    313       1.25   thorpej #define	pr_log(pp, v, action, file, line)
    314       1.25   thorpej #define	pr_printlog(pp, pi, pr)
    315       1.25   thorpej #define	pr_enter(pp, file, line)
    316       1.25   thorpej #define	pr_leave(pp)
    317       1.25   thorpej #define	pr_enter_check(pp, pr)
    318       1.59   thorpej #endif /* POOL_DIAGNOSTIC */
    319        1.3        pk 
    320      1.110     perry static inline int
    321       1.97      yamt pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph,
    322       1.97      yamt     const void *v)
    323       1.97      yamt {
    324       1.97      yamt 	const char *cp = v;
    325       1.97      yamt 	int idx;
    326       1.97      yamt 
    327       1.97      yamt 	KASSERT(pp->pr_roflags & PR_NOTOUCH);
    328       1.97      yamt 	idx = (cp - ph->ph_page - ph->ph_off) / pp->pr_size;
    329       1.97      yamt 	KASSERT(idx < pp->pr_itemsperpage);
    330       1.97      yamt 	return idx;
    331       1.97      yamt }
    332       1.97      yamt 
    333       1.99      yamt #define	PR_FREELIST_ALIGN(p) \
    334       1.99      yamt 	roundup((uintptr_t)(p), sizeof(pool_item_freelist_t))
    335       1.99      yamt #define	PR_FREELIST(ph)	((pool_item_freelist_t *)PR_FREELIST_ALIGN((ph) + 1))
    336       1.99      yamt #define	PR_INDEX_USED	((pool_item_freelist_t)-1)
    337       1.99      yamt #define	PR_INDEX_EOL	((pool_item_freelist_t)-2)
    338       1.97      yamt 
    339      1.110     perry static inline void
    340       1.97      yamt pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph,
    341       1.97      yamt     void *obj)
    342       1.97      yamt {
    343       1.97      yamt 	int idx = pr_item_notouch_index(pp, ph, obj);
    344       1.99      yamt 	pool_item_freelist_t *freelist = PR_FREELIST(ph);
    345       1.97      yamt 
    346       1.97      yamt 	KASSERT(freelist[idx] == PR_INDEX_USED);
    347       1.97      yamt 	freelist[idx] = ph->ph_firstfree;
    348       1.97      yamt 	ph->ph_firstfree = idx;
    349       1.97      yamt }
    350       1.97      yamt 
    351      1.110     perry static inline void *
    352       1.97      yamt pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph)
    353       1.97      yamt {
    354       1.97      yamt 	int idx = ph->ph_firstfree;
    355       1.99      yamt 	pool_item_freelist_t *freelist = PR_FREELIST(ph);
    356       1.97      yamt 
    357       1.97      yamt 	KASSERT(freelist[idx] != PR_INDEX_USED);
    358       1.97      yamt 	ph->ph_firstfree = freelist[idx];
    359       1.97      yamt 	freelist[idx] = PR_INDEX_USED;
    360       1.97      yamt 
    361       1.97      yamt 	return ph->ph_page + ph->ph_off + idx * pp->pr_size;
    362       1.97      yamt }
    363       1.97      yamt 
    364      1.110     perry static inline int
    365       1.88       chs phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
    366       1.88       chs {
    367      1.121      yamt 
    368      1.121      yamt 	/*
    369      1.121      yamt 	 * we consider pool_item_header with smaller ph_page bigger.
    370      1.121      yamt 	 * (this unnatural ordering is for the benefit of pr_find_pagehead.)
    371      1.121      yamt 	 */
    372      1.121      yamt 
    373       1.88       chs 	if (a->ph_page < b->ph_page)
    374      1.121      yamt 		return (1);
    375      1.121      yamt 	else if (a->ph_page > b->ph_page)
    376       1.88       chs 		return (-1);
    377       1.88       chs 	else
    378       1.88       chs 		return (0);
    379       1.88       chs }
    380       1.88       chs 
    381       1.88       chs SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
    382       1.88       chs SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
    383       1.88       chs 
    384        1.3        pk /*
    385      1.121      yamt  * Return the pool page header based on item address.
    386        1.3        pk  */
    387      1.110     perry static inline struct pool_item_header *
    388      1.121      yamt pr_find_pagehead(struct pool *pp, void *v)
    389        1.3        pk {
    390       1.88       chs 	struct pool_item_header *ph, tmp;
    391        1.3        pk 
    392      1.121      yamt 	if ((pp->pr_roflags & PR_NOALIGN) != 0) {
    393      1.121      yamt 		tmp.ph_page = (caddr_t)(uintptr_t)v;
    394      1.121      yamt 		ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
    395      1.121      yamt 		if (ph == NULL) {
    396      1.121      yamt 			ph = SPLAY_ROOT(&pp->pr_phtree);
    397      1.121      yamt 			if (ph != NULL && phtree_compare(&tmp, ph) >= 0) {
    398      1.121      yamt 				ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph);
    399      1.121      yamt 			}
    400      1.121      yamt 			KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0);
    401      1.121      yamt 		}
    402      1.121      yamt 	} else {
    403      1.121      yamt 		caddr_t page =
    404      1.121      yamt 		    (caddr_t)((uintptr_t)v & pp->pr_alloc->pa_pagemask);
    405      1.121      yamt 
    406      1.121      yamt 		if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
    407      1.121      yamt 			ph = (void *)(page + pp->pr_phoffset);
    408      1.121      yamt 		} else {
    409      1.121      yamt 			tmp.ph_page = page;
    410      1.121      yamt 			ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
    411      1.121      yamt 		}
    412      1.121      yamt 	}
    413        1.3        pk 
    414      1.121      yamt 	KASSERT(ph == NULL || ((pp->pr_roflags & PR_PHINPAGE) != 0) ||
    415      1.121      yamt 	    (ph->ph_page <= (char *)v &&
    416      1.121      yamt 	    (char *)v < ph->ph_page + pp->pr_alloc->pa_pagesz));
    417       1.88       chs 	return ph;
    418        1.3        pk }
    419        1.3        pk 
    420      1.101   thorpej static void
    421      1.101   thorpej pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq)
    422      1.101   thorpej {
    423      1.101   thorpej 	struct pool_item_header *ph;
    424      1.101   thorpej 	int s;
    425      1.101   thorpej 
    426      1.101   thorpej 	while ((ph = LIST_FIRST(pq)) != NULL) {
    427      1.101   thorpej 		LIST_REMOVE(ph, ph_pagelist);
    428      1.101   thorpej 		pool_allocator_free(pp, ph->ph_page);
    429      1.101   thorpej 		if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
    430      1.101   thorpej 			s = splvm();
    431      1.101   thorpej 			pool_put(pp->pr_phpool, ph);
    432      1.101   thorpej 			splx(s);
    433      1.101   thorpej 		}
    434      1.101   thorpej 	}
    435      1.101   thorpej }
    436      1.101   thorpej 
    437        1.3        pk /*
    438        1.3        pk  * Remove a page from the pool.
    439        1.3        pk  */
    440      1.110     perry static inline void
    441       1.61       chs pr_rmpage(struct pool *pp, struct pool_item_header *ph,
    442       1.61       chs      struct pool_pagelist *pq)
    443        1.3        pk {
    444        1.3        pk 
    445      1.101   thorpej 	LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
    446       1.91      yamt 
    447        1.3        pk 	/*
    448        1.7   thorpej 	 * If the page was idle, decrement the idle page count.
    449        1.3        pk 	 */
    450        1.6   thorpej 	if (ph->ph_nmissing == 0) {
    451        1.6   thorpej #ifdef DIAGNOSTIC
    452        1.6   thorpej 		if (pp->pr_nidle == 0)
    453        1.6   thorpej 			panic("pr_rmpage: nidle inconsistent");
    454       1.20   thorpej 		if (pp->pr_nitems < pp->pr_itemsperpage)
    455       1.20   thorpej 			panic("pr_rmpage: nitems inconsistent");
    456        1.6   thorpej #endif
    457        1.6   thorpej 		pp->pr_nidle--;
    458        1.6   thorpej 	}
    459        1.7   thorpej 
    460       1.20   thorpej 	pp->pr_nitems -= pp->pr_itemsperpage;
    461       1.20   thorpej 
    462        1.7   thorpej 	/*
    463      1.101   thorpej 	 * Unlink the page from the pool and queue it for release.
    464        1.7   thorpej 	 */
    465       1.88       chs 	LIST_REMOVE(ph, ph_pagelist);
    466       1.91      yamt 	if ((pp->pr_roflags & PR_PHINPAGE) == 0)
    467       1.91      yamt 		SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
    468      1.101   thorpej 	LIST_INSERT_HEAD(pq, ph, ph_pagelist);
    469      1.101   thorpej 
    470        1.7   thorpej 	pp->pr_npages--;
    471        1.7   thorpej 	pp->pr_npagefree++;
    472        1.6   thorpej 
    473       1.88       chs 	pool_update_curpage(pp);
    474        1.3        pk }
    475        1.3        pk 
    476      1.117      yamt static boolean_t
    477      1.117      yamt pa_starved_p(struct pool_allocator *pa)
    478      1.117      yamt {
    479      1.117      yamt 
    480      1.117      yamt 	if (pa->pa_backingmap != NULL) {
    481      1.117      yamt 		return vm_map_starved_p(pa->pa_backingmap);
    482      1.117      yamt 	}
    483      1.117      yamt 	return FALSE;
    484      1.117      yamt }
    485      1.117      yamt 
    486      1.117      yamt static int
    487      1.117      yamt pool_reclaim_callback(struct callback_entry *ce, void *obj, void *arg)
    488      1.117      yamt {
    489      1.117      yamt 	struct pool *pp = obj;
    490      1.117      yamt 	struct pool_allocator *pa = pp->pr_alloc;
    491      1.117      yamt 
    492      1.117      yamt 	KASSERT(&pp->pr_reclaimerentry == ce);
    493      1.117      yamt 	pool_reclaim(pp);
    494      1.117      yamt 	if (!pa_starved_p(pa)) {
    495      1.117      yamt 		return CALLBACK_CHAIN_ABORT;
    496      1.117      yamt 	}
    497      1.117      yamt 	return CALLBACK_CHAIN_CONTINUE;
    498      1.117      yamt }
    499      1.117      yamt 
    500      1.117      yamt static void
    501      1.117      yamt pool_reclaim_register(struct pool *pp)
    502      1.117      yamt {
    503      1.117      yamt 	struct vm_map *map = pp->pr_alloc->pa_backingmap;
    504      1.117      yamt 	int s;
    505      1.117      yamt 
    506      1.117      yamt 	if (map == NULL) {
    507      1.117      yamt 		return;
    508      1.117      yamt 	}
    509      1.117      yamt 
    510      1.117      yamt 	s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */
    511      1.117      yamt 	callback_register(&vm_map_to_kernel(map)->vmk_reclaim_callback,
    512      1.117      yamt 	    &pp->pr_reclaimerentry, pp, pool_reclaim_callback);
    513      1.117      yamt 	splx(s);
    514      1.117      yamt }
    515      1.117      yamt 
    516      1.117      yamt static void
    517      1.117      yamt pool_reclaim_unregister(struct pool *pp)
    518      1.117      yamt {
    519      1.117      yamt 	struct vm_map *map = pp->pr_alloc->pa_backingmap;
    520      1.117      yamt 	int s;
    521      1.117      yamt 
    522      1.117      yamt 	if (map == NULL) {
    523      1.117      yamt 		return;
    524      1.117      yamt 	}
    525      1.117      yamt 
    526      1.117      yamt 	s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */
    527      1.117      yamt 	callback_unregister(&vm_map_to_kernel(map)->vmk_reclaim_callback,
    528      1.117      yamt 	    &pp->pr_reclaimerentry);
    529      1.117      yamt 	splx(s);
    530      1.117      yamt }
    531      1.117      yamt 
    532      1.117      yamt static void
    533      1.117      yamt pa_reclaim_register(struct pool_allocator *pa)
    534      1.117      yamt {
    535      1.117      yamt 	struct vm_map *map = *pa->pa_backingmapptr;
    536      1.117      yamt 	struct pool *pp;
    537      1.117      yamt 
    538      1.117      yamt 	KASSERT(pa->pa_backingmap == NULL);
    539      1.117      yamt 	if (map == NULL) {
    540      1.117      yamt 		SLIST_INSERT_HEAD(&pa_deferinitq, pa, pa_q);
    541      1.117      yamt 		return;
    542      1.117      yamt 	}
    543      1.117      yamt 	pa->pa_backingmap = map;
    544      1.117      yamt 	TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
    545      1.117      yamt 		pool_reclaim_register(pp);
    546      1.117      yamt 	}
    547      1.117      yamt }
    548      1.117      yamt 
    549        1.3        pk /*
    550       1.94    simonb  * Initialize all the pools listed in the "pools" link set.
    551       1.94    simonb  */
    552       1.94    simonb void
    553      1.117      yamt pool_subsystem_init(void)
    554       1.94    simonb {
    555      1.117      yamt 	struct pool_allocator *pa;
    556       1.94    simonb 	__link_set_decl(pools, struct link_pool_init);
    557       1.94    simonb 	struct link_pool_init * const *pi;
    558       1.94    simonb 
    559       1.94    simonb 	__link_set_foreach(pi, pools)
    560       1.94    simonb 		pool_init((*pi)->pp, (*pi)->size, (*pi)->align,
    561       1.94    simonb 		    (*pi)->align_offset, (*pi)->flags, (*pi)->wchan,
    562       1.94    simonb 		    (*pi)->palloc);
    563      1.117      yamt 
    564      1.117      yamt 	while ((pa = SLIST_FIRST(&pa_deferinitq)) != NULL) {
    565      1.117      yamt 		KASSERT(pa->pa_backingmapptr != NULL);
    566      1.117      yamt 		KASSERT(*pa->pa_backingmapptr != NULL);
    567      1.117      yamt 		SLIST_REMOVE_HEAD(&pa_deferinitq, pa_q);
    568      1.117      yamt 		pa_reclaim_register(pa);
    569      1.117      yamt 	}
    570       1.94    simonb }
    571       1.94    simonb 
    572       1.94    simonb /*
    573        1.3        pk  * Initialize the given pool resource structure.
    574        1.3        pk  *
    575        1.3        pk  * We export this routine to allow other kernel parts to declare
    576        1.3        pk  * static pools that must be initialized before malloc() is available.
    577        1.3        pk  */
    578        1.3        pk void
    579       1.42   thorpej pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
    580       1.66   thorpej     const char *wchan, struct pool_allocator *palloc)
    581        1.3        pk {
    582      1.116    simonb #ifdef DEBUG
    583      1.116    simonb 	struct pool *pp1;
    584      1.116    simonb #endif
    585       1.92     enami 	size_t trysize, phsize;
    586      1.116    simonb 	int off, slack, s;
    587        1.3        pk 
    588       1.99      yamt 	KASSERT((1UL << (CHAR_BIT * sizeof(pool_item_freelist_t))) - 2 >=
    589       1.99      yamt 	    PHPOOL_FREELIST_NELEM(PHPOOL_MAX - 1));
    590       1.99      yamt 
    591      1.116    simonb #ifdef DEBUG
    592      1.116    simonb 	/*
    593      1.116    simonb 	 * Check that the pool hasn't already been initialised and
    594      1.116    simonb 	 * added to the list of all pools.
    595      1.116    simonb 	 */
    596      1.116    simonb 	LIST_FOREACH(pp1, &pool_head, pr_poollist) {
    597      1.116    simonb 		if (pp == pp1)
    598      1.116    simonb 			panic("pool_init: pool %s already initialised",
    599      1.116    simonb 			    wchan);
    600      1.116    simonb 	}
    601      1.116    simonb #endif
    602      1.116    simonb 
    603       1.25   thorpej #ifdef POOL_DIAGNOSTIC
    604       1.25   thorpej 	/*
    605       1.25   thorpej 	 * Always log if POOL_DIAGNOSTIC is defined.
    606       1.25   thorpej 	 */
    607       1.25   thorpej 	if (pool_logsize != 0)
    608       1.25   thorpej 		flags |= PR_LOGGING;
    609       1.25   thorpej #endif
    610       1.25   thorpej 
    611       1.66   thorpej 	if (palloc == NULL)
    612       1.66   thorpej 		palloc = &pool_allocator_kmem;
    613      1.112     bjh21 #ifdef POOL_SUBPAGE
    614      1.112     bjh21 	if (size > palloc->pa_pagesz) {
    615      1.112     bjh21 		if (palloc == &pool_allocator_kmem)
    616      1.112     bjh21 			palloc = &pool_allocator_kmem_fullpage;
    617      1.112     bjh21 		else if (palloc == &pool_allocator_nointr)
    618      1.112     bjh21 			palloc = &pool_allocator_nointr_fullpage;
    619      1.112     bjh21 	}
    620       1.66   thorpej #endif /* POOL_SUBPAGE */
    621       1.66   thorpej 	if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
    622      1.112     bjh21 		if (palloc->pa_pagesz == 0)
    623       1.66   thorpej 			palloc->pa_pagesz = PAGE_SIZE;
    624       1.66   thorpej 
    625       1.66   thorpej 		TAILQ_INIT(&palloc->pa_list);
    626       1.66   thorpej 
    627       1.66   thorpej 		simple_lock_init(&palloc->pa_slock);
    628       1.66   thorpej 		palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
    629       1.66   thorpej 		palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
    630      1.117      yamt 
    631      1.117      yamt 		if (palloc->pa_backingmapptr != NULL) {
    632      1.117      yamt 			pa_reclaim_register(palloc);
    633      1.117      yamt 		}
    634       1.66   thorpej 		palloc->pa_flags |= PA_INITIALIZED;
    635        1.4   thorpej 	}
    636        1.3        pk 
    637        1.3        pk 	if (align == 0)
    638        1.3        pk 		align = ALIGN(1);
    639       1.14   thorpej 
    640      1.120      yamt 	if ((flags & PR_NOTOUCH) == 0 && size < sizeof(struct pool_item))
    641       1.14   thorpej 		size = sizeof(struct pool_item);
    642        1.3        pk 
    643       1.78   thorpej 	size = roundup(size, align);
    644       1.66   thorpej #ifdef DIAGNOSTIC
    645       1.66   thorpej 	if (size > palloc->pa_pagesz)
    646      1.121      yamt 		panic("pool_init: pool item size (%zu) too large", size);
    647       1.66   thorpej #endif
    648       1.35        pk 
    649        1.3        pk 	/*
    650        1.3        pk 	 * Initialize the pool structure.
    651        1.3        pk 	 */
    652       1.88       chs 	LIST_INIT(&pp->pr_emptypages);
    653       1.88       chs 	LIST_INIT(&pp->pr_fullpages);
    654       1.88       chs 	LIST_INIT(&pp->pr_partpages);
    655      1.102       chs 	LIST_INIT(&pp->pr_cachelist);
    656        1.3        pk 	pp->pr_curpage = NULL;
    657        1.3        pk 	pp->pr_npages = 0;
    658        1.3        pk 	pp->pr_minitems = 0;
    659        1.3        pk 	pp->pr_minpages = 0;
    660        1.3        pk 	pp->pr_maxpages = UINT_MAX;
    661       1.20   thorpej 	pp->pr_roflags = flags;
    662       1.20   thorpej 	pp->pr_flags = 0;
    663       1.35        pk 	pp->pr_size = size;
    664        1.3        pk 	pp->pr_align = align;
    665        1.3        pk 	pp->pr_wchan = wchan;
    666       1.66   thorpej 	pp->pr_alloc = palloc;
    667       1.20   thorpej 	pp->pr_nitems = 0;
    668       1.20   thorpej 	pp->pr_nout = 0;
    669       1.20   thorpej 	pp->pr_hardlimit = UINT_MAX;
    670       1.20   thorpej 	pp->pr_hardlimit_warning = NULL;
    671       1.31   thorpej 	pp->pr_hardlimit_ratecap.tv_sec = 0;
    672       1.31   thorpej 	pp->pr_hardlimit_ratecap.tv_usec = 0;
    673       1.31   thorpej 	pp->pr_hardlimit_warning_last.tv_sec = 0;
    674       1.31   thorpej 	pp->pr_hardlimit_warning_last.tv_usec = 0;
    675       1.68   thorpej 	pp->pr_drain_hook = NULL;
    676       1.68   thorpej 	pp->pr_drain_hook_arg = NULL;
    677        1.3        pk 
    678        1.3        pk 	/*
    679        1.3        pk 	 * Decide whether to put the page header off page to avoid
    680       1.92     enami 	 * wasting too large a part of the page or too big item.
    681       1.92     enami 	 * Off-page page headers go on a hash table, so we can match
    682       1.92     enami 	 * a returned item with its header based on the page address.
    683       1.92     enami 	 * We use 1/16 of the page size and about 8 times of the item
    684       1.92     enami 	 * size as the threshold (XXX: tune)
    685       1.92     enami 	 *
    686       1.92     enami 	 * However, we'll put the header into the page if we can put
    687       1.92     enami 	 * it without wasting any items.
    688       1.92     enami 	 *
    689       1.92     enami 	 * Silently enforce `0 <= ioff < align'.
    690        1.3        pk 	 */
    691       1.92     enami 	pp->pr_itemoffset = ioff %= align;
    692       1.92     enami 	/* See the comment below about reserved bytes. */
    693       1.92     enami 	trysize = palloc->pa_pagesz - ((align - ioff) % align);
    694       1.92     enami 	phsize = ALIGN(sizeof(struct pool_item_header));
    695      1.121      yamt 	if ((pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) == 0 &&
    696       1.97      yamt 	    (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
    697       1.97      yamt 	    trysize / pp->pr_size == (trysize - phsize) / pp->pr_size)) {
    698        1.3        pk 		/* Use the end of the page for the page header */
    699       1.20   thorpej 		pp->pr_roflags |= PR_PHINPAGE;
    700       1.92     enami 		pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
    701        1.2        pk 	} else {
    702        1.3        pk 		/* The page header will be taken from our page header pool */
    703        1.3        pk 		pp->pr_phoffset = 0;
    704       1.66   thorpej 		off = palloc->pa_pagesz;
    705       1.88       chs 		SPLAY_INIT(&pp->pr_phtree);
    706        1.2        pk 	}
    707        1.1        pk 
    708        1.3        pk 	/*
    709        1.3        pk 	 * Alignment is to take place at `ioff' within the item. This means
    710        1.3        pk 	 * we must reserve up to `align - 1' bytes on the page to allow
    711        1.3        pk 	 * appropriate positioning of each item.
    712        1.3        pk 	 */
    713        1.3        pk 	pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
    714       1.43   thorpej 	KASSERT(pp->pr_itemsperpage != 0);
    715       1.97      yamt 	if ((pp->pr_roflags & PR_NOTOUCH)) {
    716       1.97      yamt 		int idx;
    717       1.97      yamt 
    718       1.97      yamt 		for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
    719       1.97      yamt 		    idx++) {
    720       1.97      yamt 			/* nothing */
    721       1.97      yamt 		}
    722       1.97      yamt 		if (idx >= PHPOOL_MAX) {
    723       1.97      yamt 			/*
    724       1.97      yamt 			 * if you see this panic, consider to tweak
    725       1.97      yamt 			 * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
    726       1.97      yamt 			 */
    727       1.97      yamt 			panic("%s: too large itemsperpage(%d) for PR_NOTOUCH",
    728       1.97      yamt 			    pp->pr_wchan, pp->pr_itemsperpage);
    729       1.97      yamt 		}
    730       1.97      yamt 		pp->pr_phpool = &phpool[idx];
    731       1.97      yamt 	} else if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
    732       1.97      yamt 		pp->pr_phpool = &phpool[0];
    733       1.97      yamt 	}
    734       1.97      yamt #if defined(DIAGNOSTIC)
    735       1.97      yamt 	else {
    736       1.97      yamt 		pp->pr_phpool = NULL;
    737       1.97      yamt 	}
    738       1.97      yamt #endif
    739        1.3        pk 
    740        1.3        pk 	/*
    741        1.3        pk 	 * Use the slack between the chunks and the page header
    742        1.3        pk 	 * for "cache coloring".
    743        1.3        pk 	 */
    744        1.3        pk 	slack = off - pp->pr_itemsperpage * pp->pr_size;
    745        1.3        pk 	pp->pr_maxcolor = (slack / align) * align;
    746        1.3        pk 	pp->pr_curcolor = 0;
    747        1.3        pk 
    748        1.3        pk 	pp->pr_nget = 0;
    749        1.3        pk 	pp->pr_nfail = 0;
    750        1.3        pk 	pp->pr_nput = 0;
    751        1.3        pk 	pp->pr_npagealloc = 0;
    752        1.3        pk 	pp->pr_npagefree = 0;
    753        1.1        pk 	pp->pr_hiwat = 0;
    754        1.8   thorpej 	pp->pr_nidle = 0;
    755        1.3        pk 
    756       1.59   thorpej #ifdef POOL_DIAGNOSTIC
    757       1.25   thorpej 	if (flags & PR_LOGGING) {
    758       1.25   thorpej 		if (kmem_map == NULL ||
    759       1.25   thorpej 		    (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
    760       1.25   thorpej 		     M_TEMP, M_NOWAIT)) == NULL)
    761       1.20   thorpej 			pp->pr_roflags &= ~PR_LOGGING;
    762        1.3        pk 		pp->pr_curlogentry = 0;
    763        1.3        pk 		pp->pr_logsize = pool_logsize;
    764        1.3        pk 	}
    765       1.59   thorpej #endif
    766       1.25   thorpej 
    767       1.25   thorpej 	pp->pr_entered_file = NULL;
    768       1.25   thorpej 	pp->pr_entered_line = 0;
    769        1.3        pk 
    770       1.21   thorpej 	simple_lock_init(&pp->pr_slock);
    771        1.1        pk 
    772        1.3        pk 	/*
    773       1.43   thorpej 	 * Initialize private page header pool and cache magazine pool if we
    774       1.43   thorpej 	 * haven't done so yet.
    775       1.23   thorpej 	 * XXX LOCKING.
    776        1.3        pk 	 */
    777       1.97      yamt 	if (phpool[0].pr_size == 0) {
    778       1.97      yamt 		int idx;
    779       1.97      yamt 		for (idx = 0; idx < PHPOOL_MAX; idx++) {
    780       1.97      yamt 			static char phpool_names[PHPOOL_MAX][6+1+6+1];
    781       1.97      yamt 			int nelem;
    782       1.97      yamt 			size_t sz;
    783       1.97      yamt 
    784       1.97      yamt 			nelem = PHPOOL_FREELIST_NELEM(idx);
    785       1.97      yamt 			snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
    786       1.97      yamt 			    "phpool-%d", nelem);
    787       1.97      yamt 			sz = sizeof(struct pool_item_header);
    788       1.97      yamt 			if (nelem) {
    789       1.97      yamt 				sz = PR_FREELIST_ALIGN(sz)
    790       1.99      yamt 				    + nelem * sizeof(pool_item_freelist_t);
    791       1.97      yamt 			}
    792       1.97      yamt 			pool_init(&phpool[idx], sz, 0, 0, 0,
    793       1.98      yamt 			    phpool_names[idx], &pool_allocator_meta);
    794       1.97      yamt 		}
    795       1.62     bjh21 #ifdef POOL_SUBPAGE
    796       1.62     bjh21 		pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
    797       1.98      yamt 		    PR_RECURSIVE, "psppool", &pool_allocator_meta);
    798       1.62     bjh21 #endif
    799       1.43   thorpej 		pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
    800       1.98      yamt 		    0, "pcgpool", &pool_allocator_meta);
    801        1.1        pk 	}
    802        1.1        pk 
    803       1.23   thorpej 	/* Insert into the list of all pools. */
    804       1.23   thorpej 	simple_lock(&pool_head_slock);
    805      1.102       chs 	LIST_INSERT_HEAD(&pool_head, pp, pr_poollist);
    806       1.23   thorpej 	simple_unlock(&pool_head_slock);
    807       1.66   thorpej 
    808       1.66   thorpej 	/* Insert this into the list of pools using this allocator. */
    809       1.93       dbj 	s = splvm();
    810       1.66   thorpej 	simple_lock(&palloc->pa_slock);
    811       1.66   thorpej 	TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
    812       1.66   thorpej 	simple_unlock(&palloc->pa_slock);
    813       1.93       dbj 	splx(s);
    814      1.117      yamt 	pool_reclaim_register(pp);
    815        1.1        pk }
    816        1.1        pk 
    817        1.1        pk /*
    818        1.1        pk  * De-commision a pool resource.
    819        1.1        pk  */
    820        1.1        pk void
    821       1.42   thorpej pool_destroy(struct pool *pp)
    822        1.1        pk {
    823      1.101   thorpej 	struct pool_pagelist pq;
    824        1.3        pk 	struct pool_item_header *ph;
    825       1.93       dbj 	int s;
    826       1.43   thorpej 
    827      1.101   thorpej 	/* Remove from global pool list */
    828      1.101   thorpej 	simple_lock(&pool_head_slock);
    829      1.102       chs 	LIST_REMOVE(pp, pr_poollist);
    830      1.101   thorpej 	if (drainpp == pp)
    831      1.101   thorpej 		drainpp = NULL;
    832      1.101   thorpej 	simple_unlock(&pool_head_slock);
    833      1.101   thorpej 
    834      1.101   thorpej 	/* Remove this pool from its allocator's list of pools. */
    835      1.117      yamt 	pool_reclaim_unregister(pp);
    836       1.93       dbj 	s = splvm();
    837       1.66   thorpej 	simple_lock(&pp->pr_alloc->pa_slock);
    838       1.66   thorpej 	TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
    839       1.66   thorpej 	simple_unlock(&pp->pr_alloc->pa_slock);
    840       1.93       dbj 	splx(s);
    841       1.66   thorpej 
    842      1.101   thorpej 	s = splvm();
    843      1.101   thorpej 	simple_lock(&pp->pr_slock);
    844      1.101   thorpej 
    845      1.102       chs 	KASSERT(LIST_EMPTY(&pp->pr_cachelist));
    846        1.3        pk 
    847        1.3        pk #ifdef DIAGNOSTIC
    848       1.20   thorpej 	if (pp->pr_nout != 0) {
    849       1.25   thorpej 		pr_printlog(pp, NULL, printf);
    850       1.80    provos 		panic("pool_destroy: pool busy: still out: %u",
    851       1.20   thorpej 		    pp->pr_nout);
    852        1.3        pk 	}
    853        1.3        pk #endif
    854        1.1        pk 
    855      1.101   thorpej 	KASSERT(LIST_EMPTY(&pp->pr_fullpages));
    856      1.101   thorpej 	KASSERT(LIST_EMPTY(&pp->pr_partpages));
    857      1.101   thorpej 
    858        1.3        pk 	/* Remove all pages */
    859      1.101   thorpej 	LIST_INIT(&pq);
    860       1.88       chs 	while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
    861      1.101   thorpej 		pr_rmpage(pp, ph, &pq);
    862      1.101   thorpej 
    863      1.101   thorpej 	simple_unlock(&pp->pr_slock);
    864      1.101   thorpej 	splx(s);
    865        1.3        pk 
    866      1.101   thorpej 	pr_pagelist_free(pp, &pq);
    867        1.3        pk 
    868       1.59   thorpej #ifdef POOL_DIAGNOSTIC
    869       1.20   thorpej 	if ((pp->pr_roflags & PR_LOGGING) != 0)
    870        1.3        pk 		free(pp->pr_log, M_TEMP);
    871       1.59   thorpej #endif
    872        1.1        pk }
    873        1.1        pk 
    874       1.68   thorpej void
    875       1.68   thorpej pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
    876       1.68   thorpej {
    877       1.68   thorpej 
    878       1.68   thorpej 	/* XXX no locking -- must be used just after pool_init() */
    879       1.68   thorpej #ifdef DIAGNOSTIC
    880       1.68   thorpej 	if (pp->pr_drain_hook != NULL)
    881       1.68   thorpej 		panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
    882       1.68   thorpej #endif
    883       1.68   thorpej 	pp->pr_drain_hook = fn;
    884       1.68   thorpej 	pp->pr_drain_hook_arg = arg;
    885       1.68   thorpej }
    886       1.68   thorpej 
    887       1.88       chs static struct pool_item_header *
    888       1.55   thorpej pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags)
    889       1.55   thorpej {
    890       1.55   thorpej 	struct pool_item_header *ph;
    891       1.55   thorpej 	int s;
    892       1.55   thorpej 
    893       1.55   thorpej 	LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0);
    894       1.55   thorpej 
    895       1.55   thorpej 	if ((pp->pr_roflags & PR_PHINPAGE) != 0)
    896       1.55   thorpej 		ph = (struct pool_item_header *) (storage + pp->pr_phoffset);
    897       1.55   thorpej 	else {
    898       1.85        pk 		s = splvm();
    899       1.97      yamt 		ph = pool_get(pp->pr_phpool, flags);
    900       1.55   thorpej 		splx(s);
    901       1.55   thorpej 	}
    902       1.55   thorpej 
    903       1.55   thorpej 	return (ph);
    904       1.55   thorpej }
    905        1.1        pk 
    906        1.1        pk /*
    907        1.3        pk  * Grab an item from the pool; must be called at appropriate spl level
    908        1.1        pk  */
    909        1.3        pk void *
    910       1.59   thorpej #ifdef POOL_DIAGNOSTIC
    911       1.42   thorpej _pool_get(struct pool *pp, int flags, const char *file, long line)
    912       1.56  sommerfe #else
    913       1.56  sommerfe pool_get(struct pool *pp, int flags)
    914       1.56  sommerfe #endif
    915        1.1        pk {
    916        1.1        pk 	struct pool_item *pi;
    917        1.3        pk 	struct pool_item_header *ph;
    918       1.55   thorpej 	void *v;
    919        1.1        pk 
    920        1.2        pk #ifdef DIAGNOSTIC
    921       1.95    atatat 	if (__predict_false(pp->pr_itemsperpage == 0))
    922       1.95    atatat 		panic("pool_get: pool %p: pr_itemsperpage is zero, "
    923       1.95    atatat 		    "pool not initialized?", pp);
    924       1.84   thorpej 	if (__predict_false(curlwp == NULL && doing_shutdown == 0 &&
    925       1.37  sommerfe 			    (flags & PR_WAITOK) != 0))
    926       1.77      matt 		panic("pool_get: %s: must have NOWAIT", pp->pr_wchan);
    927       1.58   thorpej 
    928      1.102       chs #endif /* DIAGNOSTIC */
    929       1.58   thorpej #ifdef LOCKDEBUG
    930       1.58   thorpej 	if (flags & PR_WAITOK)
    931      1.119      yamt 		ASSERT_SLEEPABLE(NULL, "pool_get(PR_WAITOK)");
    932       1.56  sommerfe #endif
    933        1.1        pk 
    934       1.21   thorpej 	simple_lock(&pp->pr_slock);
    935       1.25   thorpej 	pr_enter(pp, file, line);
    936       1.20   thorpej 
    937       1.20   thorpej  startover:
    938       1.20   thorpej 	/*
    939       1.20   thorpej 	 * Check to see if we've reached the hard limit.  If we have,
    940       1.20   thorpej 	 * and we can wait, then wait until an item has been returned to
    941       1.20   thorpej 	 * the pool.
    942       1.20   thorpej 	 */
    943       1.20   thorpej #ifdef DIAGNOSTIC
    944       1.34   thorpej 	if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
    945       1.25   thorpej 		pr_leave(pp);
    946       1.21   thorpej 		simple_unlock(&pp->pr_slock);
    947       1.20   thorpej 		panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
    948       1.20   thorpej 	}
    949       1.20   thorpej #endif
    950       1.34   thorpej 	if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
    951       1.68   thorpej 		if (pp->pr_drain_hook != NULL) {
    952       1.68   thorpej 			/*
    953       1.68   thorpej 			 * Since the drain hook is going to free things
    954       1.68   thorpej 			 * back to the pool, unlock, call the hook, re-lock,
    955       1.68   thorpej 			 * and check the hardlimit condition again.
    956       1.68   thorpej 			 */
    957       1.68   thorpej 			pr_leave(pp);
    958       1.68   thorpej 			simple_unlock(&pp->pr_slock);
    959       1.68   thorpej 			(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
    960       1.68   thorpej 			simple_lock(&pp->pr_slock);
    961       1.68   thorpej 			pr_enter(pp, file, line);
    962       1.68   thorpej 			if (pp->pr_nout < pp->pr_hardlimit)
    963       1.68   thorpej 				goto startover;
    964       1.68   thorpej 		}
    965       1.68   thorpej 
    966       1.29  sommerfe 		if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
    967       1.20   thorpej 			/*
    968       1.20   thorpej 			 * XXX: A warning isn't logged in this case.  Should
    969       1.20   thorpej 			 * it be?
    970       1.20   thorpej 			 */
    971       1.20   thorpej 			pp->pr_flags |= PR_WANTED;
    972       1.25   thorpej 			pr_leave(pp);
    973       1.40  sommerfe 			ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
    974       1.25   thorpej 			pr_enter(pp, file, line);
    975       1.20   thorpej 			goto startover;
    976       1.20   thorpej 		}
    977       1.31   thorpej 
    978       1.31   thorpej 		/*
    979       1.31   thorpej 		 * Log a message that the hard limit has been hit.
    980       1.31   thorpej 		 */
    981       1.31   thorpej 		if (pp->pr_hardlimit_warning != NULL &&
    982       1.31   thorpej 		    ratecheck(&pp->pr_hardlimit_warning_last,
    983       1.31   thorpej 			      &pp->pr_hardlimit_ratecap))
    984       1.31   thorpej 			log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
    985       1.21   thorpej 
    986       1.21   thorpej 		pp->pr_nfail++;
    987       1.21   thorpej 
    988       1.25   thorpej 		pr_leave(pp);
    989       1.21   thorpej 		simple_unlock(&pp->pr_slock);
    990       1.20   thorpej 		return (NULL);
    991       1.20   thorpej 	}
    992       1.20   thorpej 
    993        1.3        pk 	/*
    994        1.3        pk 	 * The convention we use is that if `curpage' is not NULL, then
    995        1.3        pk 	 * it points at a non-empty bucket. In particular, `curpage'
    996        1.3        pk 	 * never points at a page header which has PR_PHINPAGE set and
    997        1.3        pk 	 * has no items in its bucket.
    998        1.3        pk 	 */
    999       1.20   thorpej 	if ((ph = pp->pr_curpage) == NULL) {
   1000      1.113      yamt 		int error;
   1001      1.113      yamt 
   1002       1.20   thorpej #ifdef DIAGNOSTIC
   1003       1.20   thorpej 		if (pp->pr_nitems != 0) {
   1004       1.21   thorpej 			simple_unlock(&pp->pr_slock);
   1005       1.20   thorpej 			printf("pool_get: %s: curpage NULL, nitems %u\n",
   1006       1.20   thorpej 			    pp->pr_wchan, pp->pr_nitems);
   1007       1.80    provos 			panic("pool_get: nitems inconsistent");
   1008       1.20   thorpej 		}
   1009       1.20   thorpej #endif
   1010       1.20   thorpej 
   1011       1.21   thorpej 		/*
   1012       1.21   thorpej 		 * Call the back-end page allocator for more memory.
   1013       1.21   thorpej 		 * Release the pool lock, as the back-end page allocator
   1014       1.21   thorpej 		 * may block.
   1015       1.21   thorpej 		 */
   1016       1.25   thorpej 		pr_leave(pp);
   1017      1.113      yamt 		error = pool_grow(pp, flags);
   1018      1.113      yamt 		pr_enter(pp, file, line);
   1019      1.113      yamt 		if (error != 0) {
   1020       1.21   thorpej 			/*
   1021       1.55   thorpej 			 * We were unable to allocate a page or item
   1022       1.55   thorpej 			 * header, but we released the lock during
   1023       1.55   thorpej 			 * allocation, so perhaps items were freed
   1024       1.55   thorpej 			 * back to the pool.  Check for this case.
   1025       1.21   thorpej 			 */
   1026       1.21   thorpej 			if (pp->pr_curpage != NULL)
   1027       1.21   thorpej 				goto startover;
   1028       1.15        pk 
   1029      1.117      yamt 			pp->pr_nfail++;
   1030       1.25   thorpej 			pr_leave(pp);
   1031      1.117      yamt 			simple_unlock(&pp->pr_slock);
   1032      1.117      yamt 			return (NULL);
   1033        1.1        pk 		}
   1034        1.3        pk 
   1035       1.20   thorpej 		/* Start the allocation process over. */
   1036       1.20   thorpej 		goto startover;
   1037        1.3        pk 	}
   1038       1.97      yamt 	if (pp->pr_roflags & PR_NOTOUCH) {
   1039       1.97      yamt #ifdef DIAGNOSTIC
   1040       1.97      yamt 		if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) {
   1041       1.97      yamt 			pr_leave(pp);
   1042       1.97      yamt 			simple_unlock(&pp->pr_slock);
   1043       1.97      yamt 			panic("pool_get: %s: page empty", pp->pr_wchan);
   1044       1.97      yamt 		}
   1045       1.97      yamt #endif
   1046       1.97      yamt 		v = pr_item_notouch_get(pp, ph);
   1047       1.97      yamt #ifdef POOL_DIAGNOSTIC
   1048       1.97      yamt 		pr_log(pp, v, PRLOG_GET, file, line);
   1049       1.97      yamt #endif
   1050       1.97      yamt 	} else {
   1051      1.102       chs 		v = pi = LIST_FIRST(&ph->ph_itemlist);
   1052       1.97      yamt 		if (__predict_false(v == NULL)) {
   1053       1.97      yamt 			pr_leave(pp);
   1054       1.97      yamt 			simple_unlock(&pp->pr_slock);
   1055       1.97      yamt 			panic("pool_get: %s: page empty", pp->pr_wchan);
   1056       1.97      yamt 		}
   1057       1.20   thorpej #ifdef DIAGNOSTIC
   1058       1.97      yamt 		if (__predict_false(pp->pr_nitems == 0)) {
   1059       1.97      yamt 			pr_leave(pp);
   1060       1.97      yamt 			simple_unlock(&pp->pr_slock);
   1061       1.97      yamt 			printf("pool_get: %s: items on itemlist, nitems %u\n",
   1062       1.97      yamt 			    pp->pr_wchan, pp->pr_nitems);
   1063       1.97      yamt 			panic("pool_get: nitems inconsistent");
   1064       1.97      yamt 		}
   1065       1.65     enami #endif
   1066       1.56  sommerfe 
   1067       1.65     enami #ifdef POOL_DIAGNOSTIC
   1068       1.97      yamt 		pr_log(pp, v, PRLOG_GET, file, line);
   1069       1.65     enami #endif
   1070        1.3        pk 
   1071       1.65     enami #ifdef DIAGNOSTIC
   1072       1.97      yamt 		if (__predict_false(pi->pi_magic != PI_MAGIC)) {
   1073       1.97      yamt 			pr_printlog(pp, pi, printf);
   1074       1.97      yamt 			panic("pool_get(%s): free list modified: "
   1075       1.97      yamt 			    "magic=%x; page %p; item addr %p\n",
   1076       1.97      yamt 			    pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
   1077       1.97      yamt 		}
   1078        1.3        pk #endif
   1079        1.3        pk 
   1080       1.97      yamt 		/*
   1081       1.97      yamt 		 * Remove from item list.
   1082       1.97      yamt 		 */
   1083      1.102       chs 		LIST_REMOVE(pi, pi_list);
   1084       1.97      yamt 	}
   1085       1.20   thorpej 	pp->pr_nitems--;
   1086       1.20   thorpej 	pp->pr_nout++;
   1087        1.6   thorpej 	if (ph->ph_nmissing == 0) {
   1088        1.6   thorpej #ifdef DIAGNOSTIC
   1089       1.34   thorpej 		if (__predict_false(pp->pr_nidle == 0))
   1090        1.6   thorpej 			panic("pool_get: nidle inconsistent");
   1091        1.6   thorpej #endif
   1092        1.6   thorpej 		pp->pr_nidle--;
   1093       1.88       chs 
   1094       1.88       chs 		/*
   1095       1.88       chs 		 * This page was previously empty.  Move it to the list of
   1096       1.88       chs 		 * partially-full pages.  This page is already curpage.
   1097       1.88       chs 		 */
   1098       1.88       chs 		LIST_REMOVE(ph, ph_pagelist);
   1099       1.88       chs 		LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
   1100        1.6   thorpej 	}
   1101        1.3        pk 	ph->ph_nmissing++;
   1102       1.97      yamt 	if (ph->ph_nmissing == pp->pr_itemsperpage) {
   1103       1.21   thorpej #ifdef DIAGNOSTIC
   1104       1.97      yamt 		if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 &&
   1105      1.102       chs 		    !LIST_EMPTY(&ph->ph_itemlist))) {
   1106       1.25   thorpej 			pr_leave(pp);
   1107       1.21   thorpej 			simple_unlock(&pp->pr_slock);
   1108       1.21   thorpej 			panic("pool_get: %s: nmissing inconsistent",
   1109       1.21   thorpej 			    pp->pr_wchan);
   1110       1.21   thorpej 		}
   1111       1.21   thorpej #endif
   1112        1.3        pk 		/*
   1113       1.88       chs 		 * This page is now full.  Move it to the full list
   1114       1.88       chs 		 * and select a new current page.
   1115        1.3        pk 		 */
   1116       1.88       chs 		LIST_REMOVE(ph, ph_pagelist);
   1117       1.88       chs 		LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
   1118       1.88       chs 		pool_update_curpage(pp);
   1119        1.1        pk 	}
   1120        1.3        pk 
   1121        1.3        pk 	pp->pr_nget++;
   1122      1.111  christos 	pr_leave(pp);
   1123       1.20   thorpej 
   1124       1.20   thorpej 	/*
   1125       1.20   thorpej 	 * If we have a low water mark and we are now below that low
   1126       1.20   thorpej 	 * water mark, add more items to the pool.
   1127       1.20   thorpej 	 */
   1128       1.53   thorpej 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
   1129       1.20   thorpej 		/*
   1130       1.20   thorpej 		 * XXX: Should we log a warning?  Should we set up a timeout
   1131       1.20   thorpej 		 * to try again in a second or so?  The latter could break
   1132       1.20   thorpej 		 * a caller's assumptions about interrupt protection, etc.
   1133       1.20   thorpej 		 */
   1134       1.20   thorpej 	}
   1135       1.20   thorpej 
   1136       1.21   thorpej 	simple_unlock(&pp->pr_slock);
   1137  1.122.2.1        ad 	KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0);
   1138        1.1        pk 	return (v);
   1139        1.1        pk }
   1140        1.1        pk 
   1141        1.1        pk /*
   1142       1.43   thorpej  * Internal version of pool_put().  Pool is already locked/entered.
   1143        1.1        pk  */
   1144       1.43   thorpej static void
   1145      1.101   thorpej pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
   1146        1.1        pk {
   1147        1.1        pk 	struct pool_item *pi = v;
   1148        1.3        pk 	struct pool_item_header *ph;
   1149        1.3        pk 
   1150       1.61       chs 	LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
   1151       1.61       chs 
   1152       1.30   thorpej #ifdef DIAGNOSTIC
   1153       1.34   thorpej 	if (__predict_false(pp->pr_nout == 0)) {
   1154       1.30   thorpej 		printf("pool %s: putting with none out\n",
   1155       1.30   thorpej 		    pp->pr_wchan);
   1156       1.30   thorpej 		panic("pool_put");
   1157       1.30   thorpej 	}
   1158       1.30   thorpej #endif
   1159        1.3        pk 
   1160      1.121      yamt 	if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) {
   1161       1.25   thorpej 		pr_printlog(pp, NULL, printf);
   1162        1.3        pk 		panic("pool_put: %s: page header missing", pp->pr_wchan);
   1163        1.3        pk 	}
   1164       1.28   thorpej 
   1165       1.28   thorpej #ifdef LOCKDEBUG
   1166       1.28   thorpej 	/*
   1167       1.28   thorpej 	 * Check if we're freeing a locked simple lock.
   1168       1.28   thorpej 	 */
   1169       1.28   thorpej 	simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
   1170       1.28   thorpej #endif
   1171        1.3        pk 
   1172        1.3        pk 	/*
   1173        1.3        pk 	 * Return to item list.
   1174        1.3        pk 	 */
   1175       1.97      yamt 	if (pp->pr_roflags & PR_NOTOUCH) {
   1176       1.97      yamt 		pr_item_notouch_put(pp, ph, v);
   1177       1.97      yamt 	} else {
   1178        1.2        pk #ifdef DIAGNOSTIC
   1179       1.97      yamt 		pi->pi_magic = PI_MAGIC;
   1180        1.3        pk #endif
   1181       1.32       chs #ifdef DEBUG
   1182       1.97      yamt 		{
   1183       1.97      yamt 			int i, *ip = v;
   1184       1.32       chs 
   1185       1.97      yamt 			for (i = 0; i < pp->pr_size / sizeof(int); i++) {
   1186       1.97      yamt 				*ip++ = PI_MAGIC;
   1187       1.97      yamt 			}
   1188       1.32       chs 		}
   1189       1.32       chs #endif
   1190       1.32       chs 
   1191      1.102       chs 		LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
   1192       1.97      yamt 	}
   1193       1.79   thorpej 	KDASSERT(ph->ph_nmissing != 0);
   1194        1.3        pk 	ph->ph_nmissing--;
   1195        1.3        pk 	pp->pr_nput++;
   1196       1.20   thorpej 	pp->pr_nitems++;
   1197       1.20   thorpej 	pp->pr_nout--;
   1198        1.3        pk 
   1199        1.3        pk 	/* Cancel "pool empty" condition if it exists */
   1200        1.3        pk 	if (pp->pr_curpage == NULL)
   1201        1.3        pk 		pp->pr_curpage = ph;
   1202        1.3        pk 
   1203        1.3        pk 	if (pp->pr_flags & PR_WANTED) {
   1204        1.3        pk 		pp->pr_flags &= ~PR_WANTED;
   1205       1.15        pk 		if (ph->ph_nmissing == 0)
   1206       1.15        pk 			pp->pr_nidle++;
   1207        1.3        pk 		wakeup((caddr_t)pp);
   1208        1.3        pk 		return;
   1209        1.3        pk 	}
   1210        1.3        pk 
   1211        1.3        pk 	/*
   1212       1.88       chs 	 * If this page is now empty, do one of two things:
   1213       1.21   thorpej 	 *
   1214       1.88       chs 	 *	(1) If we have more pages than the page high water mark,
   1215       1.96   thorpej 	 *	    free the page back to the system.  ONLY CONSIDER
   1216       1.90   thorpej 	 *	    FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
   1217       1.90   thorpej 	 *	    CLAIM.
   1218       1.21   thorpej 	 *
   1219       1.88       chs 	 *	(2) Otherwise, move the page to the empty page list.
   1220       1.88       chs 	 *
   1221       1.88       chs 	 * Either way, select a new current page (so we use a partially-full
   1222       1.88       chs 	 * page if one is available).
   1223        1.3        pk 	 */
   1224        1.3        pk 	if (ph->ph_nmissing == 0) {
   1225        1.6   thorpej 		pp->pr_nidle++;
   1226       1.90   thorpej 		if (pp->pr_npages > pp->pr_minpages &&
   1227       1.90   thorpej 		    (pp->pr_npages > pp->pr_maxpages ||
   1228      1.117      yamt 		     pa_starved_p(pp->pr_alloc))) {
   1229      1.101   thorpej 			pr_rmpage(pp, ph, pq);
   1230        1.3        pk 		} else {
   1231       1.88       chs 			LIST_REMOVE(ph, ph_pagelist);
   1232       1.88       chs 			LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
   1233        1.3        pk 
   1234       1.21   thorpej 			/*
   1235       1.21   thorpej 			 * Update the timestamp on the page.  A page must
   1236       1.21   thorpej 			 * be idle for some period of time before it can
   1237       1.21   thorpej 			 * be reclaimed by the pagedaemon.  This minimizes
   1238       1.21   thorpej 			 * ping-pong'ing for memory.
   1239       1.21   thorpej 			 */
   1240      1.118    kardel 			getmicrotime(&ph->ph_time);
   1241        1.1        pk 		}
   1242       1.88       chs 		pool_update_curpage(pp);
   1243        1.1        pk 	}
   1244       1.88       chs 
   1245       1.21   thorpej 	/*
   1246       1.88       chs 	 * If the page was previously completely full, move it to the
   1247       1.88       chs 	 * partially-full list and make it the current page.  The next
   1248       1.88       chs 	 * allocation will get the item from this page, instead of
   1249       1.88       chs 	 * further fragmenting the pool.
   1250       1.21   thorpej 	 */
   1251       1.21   thorpej 	else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
   1252       1.88       chs 		LIST_REMOVE(ph, ph_pagelist);
   1253       1.88       chs 		LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
   1254       1.21   thorpej 		pp->pr_curpage = ph;
   1255       1.21   thorpej 	}
   1256       1.43   thorpej }
   1257       1.43   thorpej 
   1258       1.43   thorpej /*
   1259       1.43   thorpej  * Return resource to the pool; must be called at appropriate spl level
   1260       1.43   thorpej  */
   1261       1.59   thorpej #ifdef POOL_DIAGNOSTIC
   1262       1.43   thorpej void
   1263       1.43   thorpej _pool_put(struct pool *pp, void *v, const char *file, long line)
   1264       1.43   thorpej {
   1265      1.101   thorpej 	struct pool_pagelist pq;
   1266      1.101   thorpej 
   1267      1.101   thorpej 	LIST_INIT(&pq);
   1268       1.43   thorpej 
   1269       1.43   thorpej 	simple_lock(&pp->pr_slock);
   1270       1.43   thorpej 	pr_enter(pp, file, line);
   1271       1.43   thorpej 
   1272       1.56  sommerfe 	pr_log(pp, v, PRLOG_PUT, file, line);
   1273       1.56  sommerfe 
   1274      1.101   thorpej 	pool_do_put(pp, v, &pq);
   1275       1.21   thorpej 
   1276       1.25   thorpej 	pr_leave(pp);
   1277       1.21   thorpej 	simple_unlock(&pp->pr_slock);
   1278      1.101   thorpej 
   1279      1.102       chs 	pr_pagelist_free(pp, &pq);
   1280        1.1        pk }
   1281       1.57  sommerfe #undef pool_put
   1282       1.59   thorpej #endif /* POOL_DIAGNOSTIC */
   1283        1.1        pk 
   1284       1.56  sommerfe void
   1285       1.56  sommerfe pool_put(struct pool *pp, void *v)
   1286       1.56  sommerfe {
   1287      1.101   thorpej 	struct pool_pagelist pq;
   1288      1.101   thorpej 
   1289      1.101   thorpej 	LIST_INIT(&pq);
   1290       1.56  sommerfe 
   1291       1.56  sommerfe 	simple_lock(&pp->pr_slock);
   1292      1.101   thorpej 	pool_do_put(pp, v, &pq);
   1293      1.101   thorpej 	simple_unlock(&pp->pr_slock);
   1294       1.56  sommerfe 
   1295      1.102       chs 	pr_pagelist_free(pp, &pq);
   1296       1.56  sommerfe }
   1297       1.57  sommerfe 
   1298       1.59   thorpej #ifdef POOL_DIAGNOSTIC
   1299       1.57  sommerfe #define		pool_put(h, v)	_pool_put((h), (v), __FILE__, __LINE__)
   1300       1.56  sommerfe #endif
   1301       1.74   thorpej 
   1302       1.74   thorpej /*
   1303      1.113      yamt  * pool_grow: grow a pool by a page.
   1304      1.113      yamt  *
   1305      1.113      yamt  * => called with pool locked.
   1306      1.113      yamt  * => unlock and relock the pool.
   1307      1.113      yamt  * => return with pool locked.
   1308      1.113      yamt  */
   1309      1.113      yamt 
   1310      1.113      yamt static int
   1311      1.113      yamt pool_grow(struct pool *pp, int flags)
   1312      1.113      yamt {
   1313      1.113      yamt 	struct pool_item_header *ph = NULL;
   1314      1.113      yamt 	char *cp;
   1315      1.113      yamt 
   1316      1.113      yamt 	simple_unlock(&pp->pr_slock);
   1317      1.113      yamt 	cp = pool_allocator_alloc(pp, flags);
   1318      1.113      yamt 	if (__predict_true(cp != NULL)) {
   1319      1.113      yamt 		ph = pool_alloc_item_header(pp, cp, flags);
   1320      1.113      yamt 	}
   1321      1.113      yamt 	if (__predict_false(cp == NULL || ph == NULL)) {
   1322      1.113      yamt 		if (cp != NULL) {
   1323      1.113      yamt 			pool_allocator_free(pp, cp);
   1324      1.113      yamt 		}
   1325      1.113      yamt 		simple_lock(&pp->pr_slock);
   1326      1.113      yamt 		return ENOMEM;
   1327      1.113      yamt 	}
   1328      1.113      yamt 
   1329      1.113      yamt 	simple_lock(&pp->pr_slock);
   1330      1.113      yamt 	pool_prime_page(pp, cp, ph);
   1331      1.113      yamt 	pp->pr_npagealloc++;
   1332      1.113      yamt 	return 0;
   1333      1.113      yamt }
   1334      1.113      yamt 
   1335      1.113      yamt /*
   1336       1.74   thorpej  * Add N items to the pool.
   1337       1.74   thorpej  */
   1338       1.74   thorpej int
   1339       1.74   thorpej pool_prime(struct pool *pp, int n)
   1340       1.74   thorpej {
   1341       1.75    simonb 	int newpages;
   1342      1.113      yamt 	int error = 0;
   1343       1.74   thorpej 
   1344       1.74   thorpej 	simple_lock(&pp->pr_slock);
   1345       1.74   thorpej 
   1346       1.74   thorpej 	newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1347       1.74   thorpej 
   1348       1.74   thorpej 	while (newpages-- > 0) {
   1349      1.113      yamt 		error = pool_grow(pp, PR_NOWAIT);
   1350      1.113      yamt 		if (error) {
   1351       1.74   thorpej 			break;
   1352       1.74   thorpej 		}
   1353       1.74   thorpej 		pp->pr_minpages++;
   1354       1.74   thorpej 	}
   1355       1.74   thorpej 
   1356       1.74   thorpej 	if (pp->pr_minpages >= pp->pr_maxpages)
   1357       1.74   thorpej 		pp->pr_maxpages = pp->pr_minpages + 1;	/* XXX */
   1358       1.74   thorpej 
   1359       1.74   thorpej 	simple_unlock(&pp->pr_slock);
   1360      1.113      yamt 	return error;
   1361       1.74   thorpej }
   1362       1.55   thorpej 
   1363       1.55   thorpej /*
   1364        1.3        pk  * Add a page worth of items to the pool.
   1365       1.21   thorpej  *
   1366       1.21   thorpej  * Note, we must be called with the pool descriptor LOCKED.
   1367        1.3        pk  */
   1368       1.55   thorpej static void
   1369       1.55   thorpej pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
   1370        1.3        pk {
   1371        1.3        pk 	struct pool_item *pi;
   1372        1.3        pk 	caddr_t cp = storage;
   1373  1.122.2.1        ad 	const unsigned int align = pp->pr_align;
   1374  1.122.2.1        ad 	const unsigned int ioff = pp->pr_itemoffset;
   1375       1.55   thorpej 	int n;
   1376       1.36        pk 
   1377       1.91      yamt 	LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
   1378       1.91      yamt 
   1379       1.66   thorpej #ifdef DIAGNOSTIC
   1380      1.121      yamt 	if ((pp->pr_roflags & PR_NOALIGN) == 0 &&
   1381      1.121      yamt 	    ((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
   1382       1.36        pk 		panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
   1383       1.66   thorpej #endif
   1384        1.3        pk 
   1385        1.3        pk 	/*
   1386        1.3        pk 	 * Insert page header.
   1387        1.3        pk 	 */
   1388       1.88       chs 	LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
   1389      1.102       chs 	LIST_INIT(&ph->ph_itemlist);
   1390        1.3        pk 	ph->ph_page = storage;
   1391        1.3        pk 	ph->ph_nmissing = 0;
   1392      1.118    kardel 	getmicrotime(&ph->ph_time);
   1393       1.88       chs 	if ((pp->pr_roflags & PR_PHINPAGE) == 0)
   1394       1.88       chs 		SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
   1395        1.3        pk 
   1396        1.6   thorpej 	pp->pr_nidle++;
   1397        1.6   thorpej 
   1398        1.3        pk 	/*
   1399        1.3        pk 	 * Color this page.
   1400        1.3        pk 	 */
   1401        1.3        pk 	cp = (caddr_t)(cp + pp->pr_curcolor);
   1402        1.3        pk 	if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
   1403        1.3        pk 		pp->pr_curcolor = 0;
   1404        1.3        pk 
   1405        1.3        pk 	/*
   1406        1.3        pk 	 * Adjust storage to apply aligment to `pr_itemoffset' in each item.
   1407        1.3        pk 	 */
   1408        1.3        pk 	if (ioff != 0)
   1409        1.3        pk 		cp = (caddr_t)(cp + (align - ioff));
   1410        1.3        pk 
   1411  1.122.2.1        ad 	KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
   1412  1.122.2.1        ad 
   1413        1.3        pk 	/*
   1414        1.3        pk 	 * Insert remaining chunks on the bucket list.
   1415        1.3        pk 	 */
   1416        1.3        pk 	n = pp->pr_itemsperpage;
   1417       1.20   thorpej 	pp->pr_nitems += n;
   1418        1.3        pk 
   1419       1.97      yamt 	if (pp->pr_roflags & PR_NOTOUCH) {
   1420       1.99      yamt 		pool_item_freelist_t *freelist = PR_FREELIST(ph);
   1421       1.97      yamt 		int i;
   1422       1.97      yamt 
   1423       1.99      yamt 		ph->ph_off = cp - storage;
   1424       1.97      yamt 		ph->ph_firstfree = 0;
   1425       1.97      yamt 		for (i = 0; i < n - 1; i++)
   1426       1.97      yamt 			freelist[i] = i + 1;
   1427       1.97      yamt 		freelist[n - 1] = PR_INDEX_EOL;
   1428       1.97      yamt 	} else {
   1429       1.97      yamt 		while (n--) {
   1430       1.97      yamt 			pi = (struct pool_item *)cp;
   1431       1.78   thorpej 
   1432       1.97      yamt 			KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
   1433        1.3        pk 
   1434       1.97      yamt 			/* Insert on page list */
   1435      1.102       chs 			LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
   1436        1.3        pk #ifdef DIAGNOSTIC
   1437       1.97      yamt 			pi->pi_magic = PI_MAGIC;
   1438        1.3        pk #endif
   1439       1.97      yamt 			cp = (caddr_t)(cp + pp->pr_size);
   1440  1.122.2.1        ad 
   1441  1.122.2.1        ad 			KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
   1442       1.97      yamt 		}
   1443        1.3        pk 	}
   1444        1.3        pk 
   1445        1.3        pk 	/*
   1446        1.3        pk 	 * If the pool was depleted, point at the new page.
   1447        1.3        pk 	 */
   1448        1.3        pk 	if (pp->pr_curpage == NULL)
   1449        1.3        pk 		pp->pr_curpage = ph;
   1450        1.3        pk 
   1451        1.3        pk 	if (++pp->pr_npages > pp->pr_hiwat)
   1452        1.3        pk 		pp->pr_hiwat = pp->pr_npages;
   1453        1.3        pk }
   1454        1.3        pk 
   1455       1.20   thorpej /*
   1456       1.52   thorpej  * Used by pool_get() when nitems drops below the low water mark.  This
   1457       1.88       chs  * is used to catch up pr_nitems with the low water mark.
   1458       1.20   thorpej  *
   1459       1.21   thorpej  * Note 1, we never wait for memory here, we let the caller decide what to do.
   1460       1.20   thorpej  *
   1461       1.73   thorpej  * Note 2, we must be called with the pool already locked, and we return
   1462       1.20   thorpej  * with it locked.
   1463       1.20   thorpej  */
   1464       1.20   thorpej static int
   1465       1.42   thorpej pool_catchup(struct pool *pp)
   1466       1.20   thorpej {
   1467       1.20   thorpej 	int error = 0;
   1468       1.20   thorpej 
   1469       1.54   thorpej 	while (POOL_NEEDS_CATCHUP(pp)) {
   1470      1.113      yamt 		error = pool_grow(pp, PR_NOWAIT);
   1471      1.113      yamt 		if (error) {
   1472       1.20   thorpej 			break;
   1473       1.20   thorpej 		}
   1474       1.20   thorpej 	}
   1475      1.113      yamt 	return error;
   1476       1.20   thorpej }
   1477       1.20   thorpej 
   1478       1.88       chs static void
   1479       1.88       chs pool_update_curpage(struct pool *pp)
   1480       1.88       chs {
   1481       1.88       chs 
   1482       1.88       chs 	pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
   1483       1.88       chs 	if (pp->pr_curpage == NULL) {
   1484       1.88       chs 		pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
   1485       1.88       chs 	}
   1486       1.88       chs }
   1487       1.88       chs 
   1488        1.3        pk void
   1489       1.42   thorpej pool_setlowat(struct pool *pp, int n)
   1490        1.3        pk {
   1491       1.15        pk 
   1492       1.21   thorpej 	simple_lock(&pp->pr_slock);
   1493       1.21   thorpej 
   1494        1.3        pk 	pp->pr_minitems = n;
   1495       1.15        pk 	pp->pr_minpages = (n == 0)
   1496       1.15        pk 		? 0
   1497       1.18   thorpej 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1498       1.20   thorpej 
   1499       1.20   thorpej 	/* Make sure we're caught up with the newly-set low water mark. */
   1500       1.75    simonb 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
   1501       1.20   thorpej 		/*
   1502       1.20   thorpej 		 * XXX: Should we log a warning?  Should we set up a timeout
   1503       1.20   thorpej 		 * to try again in a second or so?  The latter could break
   1504       1.20   thorpej 		 * a caller's assumptions about interrupt protection, etc.
   1505       1.20   thorpej 		 */
   1506       1.20   thorpej 	}
   1507       1.21   thorpej 
   1508       1.21   thorpej 	simple_unlock(&pp->pr_slock);
   1509        1.3        pk }
   1510        1.3        pk 
   1511        1.3        pk void
   1512       1.42   thorpej pool_sethiwat(struct pool *pp, int n)
   1513        1.3        pk {
   1514       1.15        pk 
   1515       1.21   thorpej 	simple_lock(&pp->pr_slock);
   1516       1.21   thorpej 
   1517       1.15        pk 	pp->pr_maxpages = (n == 0)
   1518       1.15        pk 		? 0
   1519       1.18   thorpej 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1520       1.21   thorpej 
   1521       1.21   thorpej 	simple_unlock(&pp->pr_slock);
   1522        1.3        pk }
   1523        1.3        pk 
   1524       1.20   thorpej void
   1525       1.42   thorpej pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
   1526       1.20   thorpej {
   1527       1.20   thorpej 
   1528       1.21   thorpej 	simple_lock(&pp->pr_slock);
   1529       1.20   thorpej 
   1530       1.20   thorpej 	pp->pr_hardlimit = n;
   1531       1.20   thorpej 	pp->pr_hardlimit_warning = warnmess;
   1532       1.31   thorpej 	pp->pr_hardlimit_ratecap.tv_sec = ratecap;
   1533       1.31   thorpej 	pp->pr_hardlimit_warning_last.tv_sec = 0;
   1534       1.31   thorpej 	pp->pr_hardlimit_warning_last.tv_usec = 0;
   1535       1.20   thorpej 
   1536       1.20   thorpej 	/*
   1537       1.21   thorpej 	 * In-line version of pool_sethiwat(), because we don't want to
   1538       1.21   thorpej 	 * release the lock.
   1539       1.20   thorpej 	 */
   1540       1.20   thorpej 	pp->pr_maxpages = (n == 0)
   1541       1.20   thorpej 		? 0
   1542       1.20   thorpej 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1543       1.21   thorpej 
   1544       1.21   thorpej 	simple_unlock(&pp->pr_slock);
   1545       1.20   thorpej }
   1546        1.3        pk 
   1547        1.3        pk /*
   1548        1.3        pk  * Release all complete pages that have not been used recently.
   1549        1.3        pk  */
   1550       1.66   thorpej int
   1551       1.59   thorpej #ifdef POOL_DIAGNOSTIC
   1552       1.42   thorpej _pool_reclaim(struct pool *pp, const char *file, long line)
   1553       1.56  sommerfe #else
   1554       1.56  sommerfe pool_reclaim(struct pool *pp)
   1555       1.56  sommerfe #endif
   1556        1.3        pk {
   1557        1.3        pk 	struct pool_item_header *ph, *phnext;
   1558       1.43   thorpej 	struct pool_cache *pc;
   1559       1.61       chs 	struct pool_pagelist pq;
   1560      1.102       chs 	struct pool_cache_grouplist pcgl;
   1561      1.102       chs 	struct timeval curtime, diff;
   1562        1.3        pk 
   1563       1.68   thorpej 	if (pp->pr_drain_hook != NULL) {
   1564       1.68   thorpej 		/*
   1565       1.68   thorpej 		 * The drain hook must be called with the pool unlocked.
   1566       1.68   thorpej 		 */
   1567       1.68   thorpej 		(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
   1568       1.68   thorpej 	}
   1569       1.68   thorpej 
   1570       1.21   thorpej 	if (simple_lock_try(&pp->pr_slock) == 0)
   1571       1.66   thorpej 		return (0);
   1572       1.25   thorpej 	pr_enter(pp, file, line);
   1573       1.68   thorpej 
   1574       1.88       chs 	LIST_INIT(&pq);
   1575      1.102       chs 	LIST_INIT(&pcgl);
   1576        1.3        pk 
   1577       1.43   thorpej 	/*
   1578       1.43   thorpej 	 * Reclaim items from the pool's caches.
   1579       1.43   thorpej 	 */
   1580      1.102       chs 	LIST_FOREACH(pc, &pp->pr_cachelist, pc_poollist)
   1581      1.102       chs 		pool_cache_reclaim(pc, &pq, &pcgl);
   1582       1.43   thorpej 
   1583      1.118    kardel 	getmicrotime(&curtime);
   1584       1.21   thorpej 
   1585       1.88       chs 	for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
   1586       1.88       chs 		phnext = LIST_NEXT(ph, ph_pagelist);
   1587        1.3        pk 
   1588        1.3        pk 		/* Check our minimum page claim */
   1589        1.3        pk 		if (pp->pr_npages <= pp->pr_minpages)
   1590        1.3        pk 			break;
   1591        1.3        pk 
   1592       1.88       chs 		KASSERT(ph->ph_nmissing == 0);
   1593       1.88       chs 		timersub(&curtime, &ph->ph_time, &diff);
   1594      1.117      yamt 		if (diff.tv_sec < pool_inactive_time
   1595      1.117      yamt 		    && !pa_starved_p(pp->pr_alloc))
   1596       1.88       chs 			continue;
   1597       1.21   thorpej 
   1598       1.88       chs 		/*
   1599       1.88       chs 		 * If freeing this page would put us below
   1600       1.88       chs 		 * the low water mark, stop now.
   1601       1.88       chs 		 */
   1602       1.88       chs 		if ((pp->pr_nitems - pp->pr_itemsperpage) <
   1603       1.88       chs 		    pp->pr_minitems)
   1604       1.88       chs 			break;
   1605       1.21   thorpej 
   1606       1.88       chs 		pr_rmpage(pp, ph, &pq);
   1607        1.3        pk 	}
   1608        1.3        pk 
   1609       1.25   thorpej 	pr_leave(pp);
   1610       1.21   thorpej 	simple_unlock(&pp->pr_slock);
   1611      1.102       chs 	if (LIST_EMPTY(&pq) && LIST_EMPTY(&pcgl))
   1612      1.102       chs 		return 0;
   1613       1.66   thorpej 
   1614      1.101   thorpej 	pr_pagelist_free(pp, &pq);
   1615      1.102       chs 	pcg_grouplist_free(&pcgl);
   1616       1.66   thorpej 	return (1);
   1617        1.3        pk }
   1618        1.3        pk 
   1619        1.3        pk /*
   1620        1.3        pk  * Drain pools, one at a time.
   1621       1.21   thorpej  *
   1622       1.21   thorpej  * Note, we must never be called from an interrupt context.
   1623        1.3        pk  */
   1624        1.3        pk void
   1625       1.42   thorpej pool_drain(void *arg)
   1626        1.3        pk {
   1627        1.3        pk 	struct pool *pp;
   1628       1.23   thorpej 	int s;
   1629        1.3        pk 
   1630       1.61       chs 	pp = NULL;
   1631       1.49   thorpej 	s = splvm();
   1632       1.23   thorpej 	simple_lock(&pool_head_slock);
   1633       1.61       chs 	if (drainpp == NULL) {
   1634      1.102       chs 		drainpp = LIST_FIRST(&pool_head);
   1635       1.61       chs 	}
   1636       1.61       chs 	if (drainpp) {
   1637       1.61       chs 		pp = drainpp;
   1638      1.102       chs 		drainpp = LIST_NEXT(pp, pr_poollist);
   1639       1.61       chs 	}
   1640       1.61       chs 	simple_unlock(&pool_head_slock);
   1641      1.115  christos 	if (pp)
   1642      1.115  christos 		pool_reclaim(pp);
   1643       1.61       chs 	splx(s);
   1644        1.3        pk }
   1645        1.3        pk 
   1646        1.3        pk /*
   1647        1.3        pk  * Diagnostic helpers.
   1648        1.3        pk  */
   1649        1.3        pk void
   1650       1.42   thorpej pool_print(struct pool *pp, const char *modif)
   1651       1.21   thorpej {
   1652       1.21   thorpej 	int s;
   1653       1.21   thorpej 
   1654       1.49   thorpej 	s = splvm();
   1655       1.25   thorpej 	if (simple_lock_try(&pp->pr_slock) == 0) {
   1656       1.25   thorpej 		printf("pool %s is locked; try again later\n",
   1657       1.25   thorpej 		    pp->pr_wchan);
   1658       1.25   thorpej 		splx(s);
   1659       1.25   thorpej 		return;
   1660       1.25   thorpej 	}
   1661       1.25   thorpej 	pool_print1(pp, modif, printf);
   1662       1.21   thorpej 	simple_unlock(&pp->pr_slock);
   1663       1.21   thorpej 	splx(s);
   1664       1.21   thorpej }
   1665       1.21   thorpej 
   1666       1.25   thorpej void
   1667      1.108      yamt pool_printall(const char *modif, void (*pr)(const char *, ...))
   1668      1.108      yamt {
   1669      1.108      yamt 	struct pool *pp;
   1670      1.108      yamt 
   1671      1.108      yamt 	if (simple_lock_try(&pool_head_slock) == 0) {
   1672      1.108      yamt 		(*pr)("WARNING: pool_head_slock is locked\n");
   1673      1.108      yamt 	} else {
   1674      1.108      yamt 		simple_unlock(&pool_head_slock);
   1675      1.108      yamt 	}
   1676      1.108      yamt 
   1677      1.108      yamt 	LIST_FOREACH(pp, &pool_head, pr_poollist) {
   1678      1.108      yamt 		pool_printit(pp, modif, pr);
   1679      1.108      yamt 	}
   1680      1.108      yamt }
   1681      1.108      yamt 
   1682      1.108      yamt void
   1683       1.42   thorpej pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
   1684       1.25   thorpej {
   1685       1.25   thorpej 
   1686       1.25   thorpej 	if (pp == NULL) {
   1687       1.25   thorpej 		(*pr)("Must specify a pool to print.\n");
   1688       1.25   thorpej 		return;
   1689       1.25   thorpej 	}
   1690       1.25   thorpej 
   1691       1.25   thorpej 	/*
   1692       1.25   thorpej 	 * Called from DDB; interrupts should be blocked, and all
   1693       1.25   thorpej 	 * other processors should be paused.  We can skip locking
   1694       1.25   thorpej 	 * the pool in this case.
   1695       1.25   thorpej 	 *
   1696       1.25   thorpej 	 * We do a simple_lock_try() just to print the lock
   1697       1.25   thorpej 	 * status, however.
   1698       1.25   thorpej 	 */
   1699       1.25   thorpej 
   1700       1.25   thorpej 	if (simple_lock_try(&pp->pr_slock) == 0)
   1701       1.25   thorpej 		(*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
   1702       1.25   thorpej 	else
   1703      1.107      yamt 		simple_unlock(&pp->pr_slock);
   1704       1.25   thorpej 
   1705       1.25   thorpej 	pool_print1(pp, modif, pr);
   1706       1.25   thorpej }
   1707       1.25   thorpej 
   1708       1.21   thorpej static void
   1709       1.97      yamt pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
   1710       1.97      yamt     void (*pr)(const char *, ...))
   1711       1.88       chs {
   1712       1.88       chs 	struct pool_item_header *ph;
   1713       1.88       chs #ifdef DIAGNOSTIC
   1714       1.88       chs 	struct pool_item *pi;
   1715       1.88       chs #endif
   1716       1.88       chs 
   1717       1.88       chs 	LIST_FOREACH(ph, pl, ph_pagelist) {
   1718       1.88       chs 		(*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
   1719       1.88       chs 		    ph->ph_page, ph->ph_nmissing,
   1720       1.88       chs 		    (u_long)ph->ph_time.tv_sec,
   1721       1.88       chs 		    (u_long)ph->ph_time.tv_usec);
   1722       1.88       chs #ifdef DIAGNOSTIC
   1723       1.97      yamt 		if (!(pp->pr_roflags & PR_NOTOUCH)) {
   1724      1.102       chs 			LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
   1725       1.97      yamt 				if (pi->pi_magic != PI_MAGIC) {
   1726       1.97      yamt 					(*pr)("\t\t\titem %p, magic 0x%x\n",
   1727       1.97      yamt 					    pi, pi->pi_magic);
   1728       1.97      yamt 				}
   1729       1.88       chs 			}
   1730       1.88       chs 		}
   1731       1.88       chs #endif
   1732       1.88       chs 	}
   1733       1.88       chs }
   1734       1.88       chs 
   1735       1.88       chs static void
   1736       1.42   thorpej pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
   1737        1.3        pk {
   1738       1.25   thorpej 	struct pool_item_header *ph;
   1739       1.44   thorpej 	struct pool_cache *pc;
   1740       1.44   thorpej 	struct pool_cache_group *pcg;
   1741       1.44   thorpej 	int i, print_log = 0, print_pagelist = 0, print_cache = 0;
   1742       1.25   thorpej 	char c;
   1743       1.25   thorpej 
   1744       1.25   thorpej 	while ((c = *modif++) != '\0') {
   1745       1.25   thorpej 		if (c == 'l')
   1746       1.25   thorpej 			print_log = 1;
   1747       1.25   thorpej 		if (c == 'p')
   1748       1.25   thorpej 			print_pagelist = 1;
   1749       1.44   thorpej 		if (c == 'c')
   1750       1.44   thorpej 			print_cache = 1;
   1751       1.25   thorpej 	}
   1752       1.25   thorpej 
   1753       1.25   thorpej 	(*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
   1754       1.25   thorpej 	    pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
   1755       1.25   thorpej 	    pp->pr_roflags);
   1756       1.66   thorpej 	(*pr)("\talloc %p\n", pp->pr_alloc);
   1757       1.25   thorpej 	(*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
   1758       1.25   thorpej 	    pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
   1759       1.25   thorpej 	(*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
   1760       1.25   thorpej 	    pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
   1761       1.25   thorpej 
   1762       1.25   thorpej 	(*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
   1763       1.25   thorpej 	    pp->pr_nget, pp->pr_nfail, pp->pr_nput);
   1764       1.25   thorpej 	(*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
   1765       1.25   thorpej 	    pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
   1766       1.25   thorpej 
   1767       1.25   thorpej 	if (print_pagelist == 0)
   1768       1.25   thorpej 		goto skip_pagelist;
   1769       1.25   thorpej 
   1770       1.88       chs 	if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
   1771       1.88       chs 		(*pr)("\n\tempty page list:\n");
   1772       1.97      yamt 	pool_print_pagelist(pp, &pp->pr_emptypages, pr);
   1773       1.88       chs 	if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
   1774       1.88       chs 		(*pr)("\n\tfull page list:\n");
   1775       1.97      yamt 	pool_print_pagelist(pp, &pp->pr_fullpages, pr);
   1776       1.88       chs 	if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
   1777       1.88       chs 		(*pr)("\n\tpartial-page list:\n");
   1778       1.97      yamt 	pool_print_pagelist(pp, &pp->pr_partpages, pr);
   1779       1.88       chs 
   1780       1.25   thorpej 	if (pp->pr_curpage == NULL)
   1781       1.25   thorpej 		(*pr)("\tno current page\n");
   1782       1.25   thorpej 	else
   1783       1.25   thorpej 		(*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
   1784       1.25   thorpej 
   1785       1.25   thorpej  skip_pagelist:
   1786       1.25   thorpej 	if (print_log == 0)
   1787       1.25   thorpej 		goto skip_log;
   1788       1.25   thorpej 
   1789       1.25   thorpej 	(*pr)("\n");
   1790       1.25   thorpej 	if ((pp->pr_roflags & PR_LOGGING) == 0)
   1791       1.25   thorpej 		(*pr)("\tno log\n");
   1792      1.122  christos 	else {
   1793       1.25   thorpej 		pr_printlog(pp, NULL, pr);
   1794      1.122  christos 	}
   1795        1.3        pk 
   1796       1.25   thorpej  skip_log:
   1797       1.44   thorpej 	if (print_cache == 0)
   1798       1.44   thorpej 		goto skip_cache;
   1799       1.44   thorpej 
   1800      1.102       chs #define PR_GROUPLIST(pcg)						\
   1801      1.102       chs 	(*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);		\
   1802      1.102       chs 	for (i = 0; i < PCG_NOBJECTS; i++) {				\
   1803      1.102       chs 		if (pcg->pcg_objects[i].pcgo_pa !=			\
   1804      1.102       chs 		    POOL_PADDR_INVALID) {				\
   1805      1.102       chs 			(*pr)("\t\t\t%p, 0x%llx\n",			\
   1806      1.102       chs 			    pcg->pcg_objects[i].pcgo_va,		\
   1807      1.102       chs 			    (unsigned long long)			\
   1808      1.102       chs 			    pcg->pcg_objects[i].pcgo_pa);		\
   1809      1.102       chs 		} else {						\
   1810      1.102       chs 			(*pr)("\t\t\t%p\n",				\
   1811      1.102       chs 			    pcg->pcg_objects[i].pcgo_va);		\
   1812      1.102       chs 		}							\
   1813      1.102       chs 	}
   1814      1.102       chs 
   1815      1.102       chs 	LIST_FOREACH(pc, &pp->pr_cachelist, pc_poollist) {
   1816      1.103       chs 		(*pr)("\tcache %p\n", pc);
   1817       1.48   thorpej 		(*pr)("\t    hits %lu misses %lu ngroups %lu nitems %lu\n",
   1818       1.48   thorpej 		    pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
   1819      1.102       chs 		(*pr)("\t    full groups:\n");
   1820      1.103       chs 		LIST_FOREACH(pcg, &pc->pc_fullgroups, pcg_list) {
   1821      1.102       chs 			PR_GROUPLIST(pcg);
   1822      1.103       chs 		}
   1823      1.102       chs 		(*pr)("\t    partial groups:\n");
   1824      1.103       chs 		LIST_FOREACH(pcg, &pc->pc_partgroups, pcg_list) {
   1825      1.102       chs 			PR_GROUPLIST(pcg);
   1826      1.103       chs 		}
   1827      1.102       chs 		(*pr)("\t    empty groups:\n");
   1828      1.103       chs 		LIST_FOREACH(pcg, &pc->pc_emptygroups, pcg_list) {
   1829      1.102       chs 			PR_GROUPLIST(pcg);
   1830      1.103       chs 		}
   1831       1.44   thorpej 	}
   1832      1.102       chs #undef PR_GROUPLIST
   1833       1.44   thorpej 
   1834       1.44   thorpej  skip_cache:
   1835       1.88       chs 	pr_enter_check(pp, pr);
   1836       1.88       chs }
   1837       1.88       chs 
   1838       1.88       chs static int
   1839       1.88       chs pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
   1840       1.88       chs {
   1841       1.88       chs 	struct pool_item *pi;
   1842       1.88       chs 	caddr_t page;
   1843       1.88       chs 	int n;
   1844       1.88       chs 
   1845      1.121      yamt 	if ((pp->pr_roflags & PR_NOALIGN) == 0) {
   1846      1.121      yamt 		page = (caddr_t)((uintptr_t)ph & pp->pr_alloc->pa_pagemask);
   1847      1.121      yamt 		if (page != ph->ph_page &&
   1848      1.121      yamt 		    (pp->pr_roflags & PR_PHINPAGE) != 0) {
   1849      1.121      yamt 			if (label != NULL)
   1850      1.121      yamt 				printf("%s: ", label);
   1851      1.121      yamt 			printf("pool(%p:%s): page inconsistency: page %p;"
   1852      1.121      yamt 			       " at page head addr %p (p %p)\n", pp,
   1853      1.121      yamt 				pp->pr_wchan, ph->ph_page,
   1854      1.121      yamt 				ph, page);
   1855      1.121      yamt 			return 1;
   1856      1.121      yamt 		}
   1857       1.88       chs 	}
   1858        1.3        pk 
   1859       1.97      yamt 	if ((pp->pr_roflags & PR_NOTOUCH) != 0)
   1860       1.97      yamt 		return 0;
   1861       1.97      yamt 
   1862      1.102       chs 	for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0;
   1863       1.88       chs 	     pi != NULL;
   1864      1.102       chs 	     pi = LIST_NEXT(pi,pi_list), n++) {
   1865       1.88       chs 
   1866       1.88       chs #ifdef DIAGNOSTIC
   1867       1.88       chs 		if (pi->pi_magic != PI_MAGIC) {
   1868       1.88       chs 			if (label != NULL)
   1869       1.88       chs 				printf("%s: ", label);
   1870       1.88       chs 			printf("pool(%s): free list modified: magic=%x;"
   1871      1.121      yamt 			       " page %p; item ordinal %d; addr %p\n",
   1872       1.88       chs 				pp->pr_wchan, pi->pi_magic, ph->ph_page,
   1873      1.121      yamt 				n, pi);
   1874       1.88       chs 			panic("pool");
   1875       1.88       chs 		}
   1876       1.88       chs #endif
   1877      1.121      yamt 		if ((pp->pr_roflags & PR_NOALIGN) != 0) {
   1878      1.121      yamt 			continue;
   1879      1.121      yamt 		}
   1880      1.121      yamt 		page = (caddr_t)((uintptr_t)pi & pp->pr_alloc->pa_pagemask);
   1881       1.88       chs 		if (page == ph->ph_page)
   1882       1.88       chs 			continue;
   1883       1.88       chs 
   1884       1.88       chs 		if (label != NULL)
   1885       1.88       chs 			printf("%s: ", label);
   1886       1.88       chs 		printf("pool(%p:%s): page inconsistency: page %p;"
   1887       1.88       chs 		       " item ordinal %d; addr %p (p %p)\n", pp,
   1888       1.88       chs 			pp->pr_wchan, ph->ph_page,
   1889       1.88       chs 			n, pi, page);
   1890       1.88       chs 		return 1;
   1891       1.88       chs 	}
   1892       1.88       chs 	return 0;
   1893        1.3        pk }
   1894        1.3        pk 
   1895       1.88       chs 
   1896        1.3        pk int
   1897       1.42   thorpej pool_chk(struct pool *pp, const char *label)
   1898        1.3        pk {
   1899        1.3        pk 	struct pool_item_header *ph;
   1900        1.3        pk 	int r = 0;
   1901        1.3        pk 
   1902       1.21   thorpej 	simple_lock(&pp->pr_slock);
   1903       1.88       chs 	LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
   1904       1.88       chs 		r = pool_chk_page(pp, label, ph);
   1905       1.88       chs 		if (r) {
   1906       1.88       chs 			goto out;
   1907       1.88       chs 		}
   1908       1.88       chs 	}
   1909       1.88       chs 	LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
   1910       1.88       chs 		r = pool_chk_page(pp, label, ph);
   1911       1.88       chs 		if (r) {
   1912        1.3        pk 			goto out;
   1913        1.3        pk 		}
   1914       1.88       chs 	}
   1915       1.88       chs 	LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
   1916       1.88       chs 		r = pool_chk_page(pp, label, ph);
   1917       1.88       chs 		if (r) {
   1918        1.3        pk 			goto out;
   1919        1.3        pk 		}
   1920        1.3        pk 	}
   1921       1.88       chs 
   1922        1.3        pk out:
   1923       1.21   thorpej 	simple_unlock(&pp->pr_slock);
   1924        1.3        pk 	return (r);
   1925       1.43   thorpej }
   1926       1.43   thorpej 
   1927       1.43   thorpej /*
   1928       1.43   thorpej  * pool_cache_init:
   1929       1.43   thorpej  *
   1930       1.43   thorpej  *	Initialize a pool cache.
   1931       1.43   thorpej  *
   1932       1.43   thorpej  *	NOTE: If the pool must be protected from interrupts, we expect
   1933       1.43   thorpej  *	to be called at the appropriate interrupt priority level.
   1934       1.43   thorpej  */
   1935       1.43   thorpej void
   1936       1.43   thorpej pool_cache_init(struct pool_cache *pc, struct pool *pp,
   1937       1.43   thorpej     int (*ctor)(void *, void *, int),
   1938       1.43   thorpej     void (*dtor)(void *, void *),
   1939       1.43   thorpej     void *arg)
   1940       1.43   thorpej {
   1941       1.43   thorpej 
   1942      1.102       chs 	LIST_INIT(&pc->pc_emptygroups);
   1943      1.102       chs 	LIST_INIT(&pc->pc_fullgroups);
   1944      1.102       chs 	LIST_INIT(&pc->pc_partgroups);
   1945       1.43   thorpej 	simple_lock_init(&pc->pc_slock);
   1946       1.43   thorpej 
   1947       1.43   thorpej 	pc->pc_pool = pp;
   1948       1.43   thorpej 
   1949       1.43   thorpej 	pc->pc_ctor = ctor;
   1950       1.43   thorpej 	pc->pc_dtor = dtor;
   1951       1.43   thorpej 	pc->pc_arg  = arg;
   1952       1.43   thorpej 
   1953       1.48   thorpej 	pc->pc_hits   = 0;
   1954       1.48   thorpej 	pc->pc_misses = 0;
   1955       1.48   thorpej 
   1956       1.48   thorpej 	pc->pc_ngroups = 0;
   1957       1.48   thorpej 
   1958       1.48   thorpej 	pc->pc_nitems = 0;
   1959       1.48   thorpej 
   1960       1.43   thorpej 	simple_lock(&pp->pr_slock);
   1961      1.102       chs 	LIST_INSERT_HEAD(&pp->pr_cachelist, pc, pc_poollist);
   1962       1.43   thorpej 	simple_unlock(&pp->pr_slock);
   1963       1.43   thorpej }
   1964       1.43   thorpej 
   1965       1.43   thorpej /*
   1966       1.43   thorpej  * pool_cache_destroy:
   1967       1.43   thorpej  *
   1968       1.43   thorpej  *	Destroy a pool cache.
   1969       1.43   thorpej  */
   1970       1.43   thorpej void
   1971       1.43   thorpej pool_cache_destroy(struct pool_cache *pc)
   1972       1.43   thorpej {
   1973       1.43   thorpej 	struct pool *pp = pc->pc_pool;
   1974       1.43   thorpej 
   1975       1.43   thorpej 	/* First, invalidate the entire cache. */
   1976       1.43   thorpej 	pool_cache_invalidate(pc);
   1977       1.43   thorpej 
   1978       1.43   thorpej 	/* ...and remove it from the pool's cache list. */
   1979       1.43   thorpej 	simple_lock(&pp->pr_slock);
   1980      1.102       chs 	LIST_REMOVE(pc, pc_poollist);
   1981       1.43   thorpej 	simple_unlock(&pp->pr_slock);
   1982       1.43   thorpej }
   1983       1.43   thorpej 
   1984      1.110     perry static inline void *
   1985       1.87   thorpej pcg_get(struct pool_cache_group *pcg, paddr_t *pap)
   1986       1.43   thorpej {
   1987       1.43   thorpej 	void *object;
   1988       1.43   thorpej 	u_int idx;
   1989       1.43   thorpej 
   1990       1.43   thorpej 	KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
   1991       1.45   thorpej 	KASSERT(pcg->pcg_avail != 0);
   1992       1.43   thorpej 	idx = --pcg->pcg_avail;
   1993       1.43   thorpej 
   1994       1.87   thorpej 	KASSERT(pcg->pcg_objects[idx].pcgo_va != NULL);
   1995       1.87   thorpej 	object = pcg->pcg_objects[idx].pcgo_va;
   1996       1.87   thorpej 	if (pap != NULL)
   1997       1.87   thorpej 		*pap = pcg->pcg_objects[idx].pcgo_pa;
   1998       1.87   thorpej 	pcg->pcg_objects[idx].pcgo_va = NULL;
   1999       1.43   thorpej 
   2000       1.43   thorpej 	return (object);
   2001       1.43   thorpej }
   2002       1.43   thorpej 
   2003      1.110     perry static inline void
   2004       1.87   thorpej pcg_put(struct pool_cache_group *pcg, void *object, paddr_t pa)
   2005       1.43   thorpej {
   2006       1.43   thorpej 	u_int idx;
   2007       1.43   thorpej 
   2008       1.43   thorpej 	KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
   2009       1.43   thorpej 	idx = pcg->pcg_avail++;
   2010       1.43   thorpej 
   2011       1.87   thorpej 	KASSERT(pcg->pcg_objects[idx].pcgo_va == NULL);
   2012       1.87   thorpej 	pcg->pcg_objects[idx].pcgo_va = object;
   2013       1.87   thorpej 	pcg->pcg_objects[idx].pcgo_pa = pa;
   2014       1.43   thorpej }
   2015       1.43   thorpej 
   2016      1.102       chs static void
   2017      1.102       chs pcg_grouplist_free(struct pool_cache_grouplist *pcgl)
   2018      1.102       chs {
   2019      1.102       chs 	struct pool_cache_group *pcg;
   2020      1.102       chs 	int s;
   2021      1.102       chs 
   2022      1.102       chs 	s = splvm();
   2023      1.102       chs 	while ((pcg = LIST_FIRST(pcgl)) != NULL) {
   2024      1.102       chs 		LIST_REMOVE(pcg, pcg_list);
   2025      1.102       chs 		pool_put(&pcgpool, pcg);
   2026      1.102       chs 	}
   2027      1.102       chs 	splx(s);
   2028      1.102       chs }
   2029      1.102       chs 
   2030       1.43   thorpej /*
   2031       1.87   thorpej  * pool_cache_get{,_paddr}:
   2032       1.43   thorpej  *
   2033       1.87   thorpej  *	Get an object from a pool cache (optionally returning
   2034       1.87   thorpej  *	the physical address of the object).
   2035       1.43   thorpej  */
   2036       1.43   thorpej void *
   2037       1.87   thorpej pool_cache_get_paddr(struct pool_cache *pc, int flags, paddr_t *pap)
   2038       1.43   thorpej {
   2039       1.43   thorpej 	struct pool_cache_group *pcg;
   2040       1.43   thorpej 	void *object;
   2041       1.58   thorpej 
   2042       1.58   thorpej #ifdef LOCKDEBUG
   2043       1.58   thorpej 	if (flags & PR_WAITOK)
   2044      1.119      yamt 		ASSERT_SLEEPABLE(NULL, "pool_cache_get(PR_WAITOK)");
   2045       1.58   thorpej #endif
   2046       1.43   thorpej 
   2047       1.43   thorpej 	simple_lock(&pc->pc_slock);
   2048       1.43   thorpej 
   2049      1.102       chs 	pcg = LIST_FIRST(&pc->pc_partgroups);
   2050      1.102       chs 	if (pcg == NULL) {
   2051      1.102       chs 		pcg = LIST_FIRST(&pc->pc_fullgroups);
   2052      1.102       chs 		if (pcg != NULL) {
   2053      1.102       chs 			LIST_REMOVE(pcg, pcg_list);
   2054      1.102       chs 			LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list);
   2055       1.43   thorpej 		}
   2056      1.102       chs 	}
   2057      1.102       chs 	if (pcg == NULL) {
   2058       1.43   thorpej 
   2059       1.43   thorpej 		/*
   2060       1.43   thorpej 		 * No groups with any available objects.  Allocate
   2061       1.43   thorpej 		 * a new object, construct it, and return it to
   2062       1.43   thorpej 		 * the caller.  We will allocate a group, if necessary,
   2063       1.43   thorpej 		 * when the object is freed back to the cache.
   2064       1.43   thorpej 		 */
   2065       1.48   thorpej 		pc->pc_misses++;
   2066       1.43   thorpej 		simple_unlock(&pc->pc_slock);
   2067       1.43   thorpej 		object = pool_get(pc->pc_pool, flags);
   2068       1.43   thorpej 		if (object != NULL && pc->pc_ctor != NULL) {
   2069       1.43   thorpej 			if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
   2070       1.43   thorpej 				pool_put(pc->pc_pool, object);
   2071       1.43   thorpej 				return (NULL);
   2072       1.43   thorpej 			}
   2073       1.43   thorpej 		}
   2074  1.122.2.1        ad 		KASSERT((((vaddr_t)object + pc->pc_pool->pr_itemoffset) &
   2075  1.122.2.1        ad 		    (pc->pc_pool->pr_align - 1)) == 0);
   2076       1.87   thorpej 		if (object != NULL && pap != NULL) {
   2077       1.87   thorpej #ifdef POOL_VTOPHYS
   2078       1.87   thorpej 			*pap = POOL_VTOPHYS(object);
   2079       1.87   thorpej #else
   2080       1.87   thorpej 			*pap = POOL_PADDR_INVALID;
   2081       1.87   thorpej #endif
   2082       1.87   thorpej 		}
   2083       1.43   thorpej 		return (object);
   2084       1.43   thorpej 	}
   2085       1.43   thorpej 
   2086       1.48   thorpej 	pc->pc_hits++;
   2087       1.48   thorpej 	pc->pc_nitems--;
   2088       1.87   thorpej 	object = pcg_get(pcg, pap);
   2089       1.43   thorpej 
   2090      1.102       chs 	if (pcg->pcg_avail == 0) {
   2091      1.102       chs 		LIST_REMOVE(pcg, pcg_list);
   2092      1.102       chs 		LIST_INSERT_HEAD(&pc->pc_emptygroups, pcg, pcg_list);
   2093      1.102       chs 	}
   2094       1.43   thorpej 	simple_unlock(&pc->pc_slock);
   2095       1.43   thorpej 
   2096  1.122.2.1        ad 	KASSERT((((vaddr_t)object + pc->pc_pool->pr_itemoffset) &
   2097  1.122.2.1        ad 	    (pc->pc_pool->pr_align - 1)) == 0);
   2098       1.43   thorpej 	return (object);
   2099       1.43   thorpej }
   2100       1.43   thorpej 
   2101       1.43   thorpej /*
   2102       1.87   thorpej  * pool_cache_put{,_paddr}:
   2103       1.43   thorpej  *
   2104       1.87   thorpej  *	Put an object back to the pool cache (optionally caching the
   2105       1.87   thorpej  *	physical address of the object).
   2106       1.43   thorpej  */
   2107       1.43   thorpej void
   2108       1.87   thorpej pool_cache_put_paddr(struct pool_cache *pc, void *object, paddr_t pa)
   2109       1.43   thorpej {
   2110       1.43   thorpej 	struct pool_cache_group *pcg;
   2111       1.60   thorpej 	int s;
   2112       1.43   thorpej 
   2113      1.109  christos 	if (__predict_false((pc->pc_pool->pr_flags & PR_WANTED) != 0)) {
   2114      1.109  christos 		goto destruct;
   2115      1.109  christos 	}
   2116      1.109  christos 
   2117       1.43   thorpej 	simple_lock(&pc->pc_slock);
   2118       1.43   thorpej 
   2119      1.102       chs 	pcg = LIST_FIRST(&pc->pc_partgroups);
   2120      1.102       chs 	if (pcg == NULL) {
   2121      1.102       chs 		pcg = LIST_FIRST(&pc->pc_emptygroups);
   2122      1.102       chs 		if (pcg != NULL) {
   2123      1.102       chs 			LIST_REMOVE(pcg, pcg_list);
   2124      1.102       chs 			LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list);
   2125       1.43   thorpej 		}
   2126      1.102       chs 	}
   2127      1.102       chs 	if (pcg == NULL) {
   2128       1.43   thorpej 
   2129       1.43   thorpej 		/*
   2130       1.43   thorpej 		 * No empty groups to free the object to.  Attempt to
   2131       1.47   thorpej 		 * allocate one.
   2132       1.43   thorpej 		 */
   2133       1.47   thorpej 		simple_unlock(&pc->pc_slock);
   2134       1.60   thorpej 		s = splvm();
   2135       1.43   thorpej 		pcg = pool_get(&pcgpool, PR_NOWAIT);
   2136       1.60   thorpej 		splx(s);
   2137      1.102       chs 		if (pcg == NULL) {
   2138      1.109  christos destruct:
   2139      1.102       chs 
   2140      1.102       chs 			/*
   2141      1.102       chs 			 * Unable to allocate a cache group; destruct the object
   2142      1.102       chs 			 * and free it back to the pool.
   2143      1.102       chs 			 */
   2144      1.102       chs 			pool_cache_destruct_object(pc, object);
   2145      1.102       chs 			return;
   2146       1.43   thorpej 		}
   2147      1.102       chs 		memset(pcg, 0, sizeof(*pcg));
   2148      1.102       chs 		simple_lock(&pc->pc_slock);
   2149      1.102       chs 		pc->pc_ngroups++;
   2150      1.102       chs 		LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list);
   2151       1.43   thorpej 	}
   2152       1.43   thorpej 
   2153       1.48   thorpej 	pc->pc_nitems++;
   2154       1.87   thorpej 	pcg_put(pcg, object, pa);
   2155       1.43   thorpej 
   2156      1.102       chs 	if (pcg->pcg_avail == PCG_NOBJECTS) {
   2157      1.102       chs 		LIST_REMOVE(pcg, pcg_list);
   2158      1.102       chs 		LIST_INSERT_HEAD(&pc->pc_fullgroups, pcg, pcg_list);
   2159      1.102       chs 	}
   2160       1.43   thorpej 	simple_unlock(&pc->pc_slock);
   2161       1.51   thorpej }
   2162       1.51   thorpej 
   2163       1.51   thorpej /*
   2164       1.51   thorpej  * pool_cache_destruct_object:
   2165       1.51   thorpej  *
   2166       1.51   thorpej  *	Force destruction of an object and its release back into
   2167       1.51   thorpej  *	the pool.
   2168       1.51   thorpej  */
   2169       1.51   thorpej void
   2170       1.51   thorpej pool_cache_destruct_object(struct pool_cache *pc, void *object)
   2171       1.51   thorpej {
   2172       1.51   thorpej 
   2173       1.51   thorpej 	if (pc->pc_dtor != NULL)
   2174       1.51   thorpej 		(*pc->pc_dtor)(pc->pc_arg, object);
   2175       1.51   thorpej 	pool_put(pc->pc_pool, object);
   2176       1.43   thorpej }
   2177       1.43   thorpej 
   2178      1.102       chs static void
   2179      1.106  christos pool_do_cache_invalidate_grouplist(struct pool_cache_grouplist *pcgsl,
   2180      1.105  christos     struct pool_cache *pc, struct pool_pagelist *pq,
   2181      1.106  christos     struct pool_cache_grouplist *pcgdl)
   2182      1.102       chs {
   2183      1.106  christos 	struct pool_cache_group *pcg, *npcg;
   2184      1.102       chs 	void *object;
   2185      1.102       chs 
   2186      1.106  christos 	for (pcg = LIST_FIRST(pcgsl); pcg != NULL; pcg = npcg) {
   2187      1.102       chs 		npcg = LIST_NEXT(pcg, pcg_list);
   2188      1.102       chs 		while (pcg->pcg_avail != 0) {
   2189      1.102       chs 			pc->pc_nitems--;
   2190      1.102       chs 			object = pcg_get(pcg, NULL);
   2191      1.102       chs 			if (pc->pc_dtor != NULL)
   2192      1.102       chs 				(*pc->pc_dtor)(pc->pc_arg, object);
   2193      1.102       chs 			pool_do_put(pc->pc_pool, object, pq);
   2194      1.102       chs 		}
   2195      1.103       chs 		pc->pc_ngroups--;
   2196      1.102       chs 		LIST_REMOVE(pcg, pcg_list);
   2197      1.106  christos 		LIST_INSERT_HEAD(pcgdl, pcg, pcg_list);
   2198      1.102       chs 	}
   2199      1.105  christos }
   2200      1.105  christos 
   2201      1.105  christos static void
   2202      1.105  christos pool_do_cache_invalidate(struct pool_cache *pc, struct pool_pagelist *pq,
   2203      1.105  christos     struct pool_cache_grouplist *pcgl)
   2204      1.105  christos {
   2205      1.105  christos 
   2206      1.105  christos 	LOCK_ASSERT(simple_lock_held(&pc->pc_slock));
   2207      1.105  christos 	LOCK_ASSERT(simple_lock_held(&pc->pc_pool->pr_slock));
   2208      1.105  christos 
   2209      1.106  christos 	pool_do_cache_invalidate_grouplist(&pc->pc_fullgroups, pc, pq, pcgl);
   2210      1.106  christos 	pool_do_cache_invalidate_grouplist(&pc->pc_partgroups, pc, pq, pcgl);
   2211      1.103       chs 
   2212      1.103       chs 	KASSERT(LIST_EMPTY(&pc->pc_partgroups));
   2213      1.103       chs 	KASSERT(LIST_EMPTY(&pc->pc_fullgroups));
   2214      1.103       chs 	KASSERT(pc->pc_nitems == 0);
   2215      1.102       chs }
   2216      1.102       chs 
   2217       1.43   thorpej /*
   2218      1.101   thorpej  * pool_cache_invalidate:
   2219       1.43   thorpej  *
   2220      1.101   thorpej  *	Invalidate a pool cache (destruct and release all of the
   2221      1.101   thorpej  *	cached objects).
   2222       1.43   thorpej  */
   2223      1.101   thorpej void
   2224      1.101   thorpej pool_cache_invalidate(struct pool_cache *pc)
   2225       1.43   thorpej {
   2226      1.101   thorpej 	struct pool_pagelist pq;
   2227      1.102       chs 	struct pool_cache_grouplist pcgl;
   2228      1.101   thorpej 
   2229      1.101   thorpej 	LIST_INIT(&pq);
   2230      1.102       chs 	LIST_INIT(&pcgl);
   2231      1.101   thorpej 
   2232      1.101   thorpej 	simple_lock(&pc->pc_slock);
   2233      1.101   thorpej 	simple_lock(&pc->pc_pool->pr_slock);
   2234       1.43   thorpej 
   2235      1.102       chs 	pool_do_cache_invalidate(pc, &pq, &pcgl);
   2236       1.43   thorpej 
   2237      1.101   thorpej 	simple_unlock(&pc->pc_pool->pr_slock);
   2238      1.101   thorpej 	simple_unlock(&pc->pc_slock);
   2239       1.43   thorpej 
   2240      1.102       chs 	pr_pagelist_free(pc->pc_pool, &pq);
   2241      1.102       chs 	pcg_grouplist_free(&pcgl);
   2242       1.43   thorpej }
   2243       1.43   thorpej 
   2244       1.43   thorpej /*
   2245       1.43   thorpej  * pool_cache_reclaim:
   2246       1.43   thorpej  *
   2247       1.43   thorpej  *	Reclaim a pool cache for pool_reclaim().
   2248       1.43   thorpej  */
   2249       1.43   thorpej static void
   2250      1.102       chs pool_cache_reclaim(struct pool_cache *pc, struct pool_pagelist *pq,
   2251      1.102       chs     struct pool_cache_grouplist *pcgl)
   2252       1.43   thorpej {
   2253      1.101   thorpej 
   2254      1.101   thorpej 	/*
   2255      1.101   thorpej 	 * We're locking in the wrong order (normally pool_cache -> pool,
   2256      1.101   thorpej 	 * but the pool is already locked when we get here), so we have
   2257      1.101   thorpej 	 * to use trylock.  If we can't lock the pool_cache, it's not really
   2258      1.101   thorpej 	 * a big deal here.
   2259      1.101   thorpej 	 */
   2260      1.101   thorpej 	if (simple_lock_try(&pc->pc_slock) == 0)
   2261      1.101   thorpej 		return;
   2262      1.101   thorpej 
   2263      1.102       chs 	pool_do_cache_invalidate(pc, pq, pcgl);
   2264       1.43   thorpej 
   2265       1.43   thorpej 	simple_unlock(&pc->pc_slock);
   2266        1.3        pk }
   2267       1.66   thorpej 
   2268       1.66   thorpej /*
   2269       1.66   thorpej  * Pool backend allocators.
   2270       1.66   thorpej  *
   2271       1.66   thorpej  * Each pool has a backend allocator that handles allocation, deallocation,
   2272       1.66   thorpej  * and any additional draining that might be needed.
   2273       1.66   thorpej  *
   2274       1.66   thorpej  * We provide two standard allocators:
   2275       1.66   thorpej  *
   2276       1.66   thorpej  *	pool_allocator_kmem - the default when no allocator is specified
   2277       1.66   thorpej  *
   2278       1.66   thorpej  *	pool_allocator_nointr - used for pools that will not be accessed
   2279       1.66   thorpej  *	in interrupt context.
   2280       1.66   thorpej  */
   2281       1.66   thorpej void	*pool_page_alloc(struct pool *, int);
   2282       1.66   thorpej void	pool_page_free(struct pool *, void *);
   2283       1.66   thorpej 
   2284      1.112     bjh21 #ifdef POOL_SUBPAGE
   2285      1.112     bjh21 struct pool_allocator pool_allocator_kmem_fullpage = {
   2286      1.112     bjh21 	pool_page_alloc, pool_page_free, 0,
   2287      1.117      yamt 	.pa_backingmapptr = &kmem_map,
   2288      1.112     bjh21 };
   2289      1.112     bjh21 #else
   2290       1.66   thorpej struct pool_allocator pool_allocator_kmem = {
   2291       1.66   thorpej 	pool_page_alloc, pool_page_free, 0,
   2292      1.117      yamt 	.pa_backingmapptr = &kmem_map,
   2293       1.66   thorpej };
   2294      1.112     bjh21 #endif
   2295       1.66   thorpej 
   2296       1.66   thorpej void	*pool_page_alloc_nointr(struct pool *, int);
   2297       1.66   thorpej void	pool_page_free_nointr(struct pool *, void *);
   2298       1.66   thorpej 
   2299      1.112     bjh21 #ifdef POOL_SUBPAGE
   2300      1.112     bjh21 struct pool_allocator pool_allocator_nointr_fullpage = {
   2301      1.112     bjh21 	pool_page_alloc_nointr, pool_page_free_nointr, 0,
   2302      1.117      yamt 	.pa_backingmapptr = &kernel_map,
   2303      1.112     bjh21 };
   2304      1.112     bjh21 #else
   2305       1.66   thorpej struct pool_allocator pool_allocator_nointr = {
   2306       1.66   thorpej 	pool_page_alloc_nointr, pool_page_free_nointr, 0,
   2307      1.117      yamt 	.pa_backingmapptr = &kernel_map,
   2308       1.66   thorpej };
   2309      1.112     bjh21 #endif
   2310       1.66   thorpej 
   2311       1.66   thorpej #ifdef POOL_SUBPAGE
   2312       1.66   thorpej void	*pool_subpage_alloc(struct pool *, int);
   2313       1.66   thorpej void	pool_subpage_free(struct pool *, void *);
   2314       1.66   thorpej 
   2315      1.112     bjh21 struct pool_allocator pool_allocator_kmem = {
   2316      1.112     bjh21 	pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
   2317      1.117      yamt 	.pa_backingmapptr = &kmem_map,
   2318      1.112     bjh21 };
   2319      1.112     bjh21 
   2320      1.112     bjh21 void	*pool_subpage_alloc_nointr(struct pool *, int);
   2321      1.112     bjh21 void	pool_subpage_free_nointr(struct pool *, void *);
   2322      1.112     bjh21 
   2323      1.112     bjh21 struct pool_allocator pool_allocator_nointr = {
   2324      1.112     bjh21 	pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
   2325      1.117      yamt 	.pa_backingmapptr = &kmem_map,
   2326       1.66   thorpej };
   2327       1.66   thorpej #endif /* POOL_SUBPAGE */
   2328       1.66   thorpej 
   2329      1.117      yamt static void *
   2330      1.117      yamt pool_allocator_alloc(struct pool *pp, int flags)
   2331       1.66   thorpej {
   2332      1.117      yamt 	struct pool_allocator *pa = pp->pr_alloc;
   2333       1.66   thorpej 	void *res;
   2334       1.66   thorpej 
   2335      1.117      yamt 	LOCK_ASSERT(!simple_lock_held(&pp->pr_slock));
   2336       1.66   thorpej 
   2337      1.117      yamt 	res = (*pa->pa_alloc)(pp, flags);
   2338      1.117      yamt 	if (res == NULL && (flags & PR_WAITOK) == 0) {
   2339       1.66   thorpej 		/*
   2340      1.117      yamt 		 * We only run the drain hook here if PR_NOWAIT.
   2341      1.117      yamt 		 * In other cases, the hook will be run in
   2342      1.117      yamt 		 * pool_reclaim().
   2343       1.66   thorpej 		 */
   2344      1.117      yamt 		if (pp->pr_drain_hook != NULL) {
   2345      1.117      yamt 			(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
   2346      1.117      yamt 			res = (*pa->pa_alloc)(pp, flags);
   2347       1.66   thorpej 		}
   2348      1.117      yamt 	}
   2349      1.117      yamt 	return res;
   2350       1.66   thorpej }
   2351       1.66   thorpej 
   2352      1.117      yamt static void
   2353       1.66   thorpej pool_allocator_free(struct pool *pp, void *v)
   2354       1.66   thorpej {
   2355       1.66   thorpej 	struct pool_allocator *pa = pp->pr_alloc;
   2356       1.66   thorpej 
   2357       1.91      yamt 	LOCK_ASSERT(!simple_lock_held(&pp->pr_slock));
   2358       1.91      yamt 
   2359       1.66   thorpej 	(*pa->pa_free)(pp, v);
   2360       1.66   thorpej }
   2361       1.66   thorpej 
   2362       1.66   thorpej void *
   2363       1.66   thorpej pool_page_alloc(struct pool *pp, int flags)
   2364       1.66   thorpej {
   2365       1.66   thorpej 	boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
   2366       1.66   thorpej 
   2367      1.100      yamt 	return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, waitok));
   2368       1.66   thorpej }
   2369       1.66   thorpej 
   2370       1.66   thorpej void
   2371       1.66   thorpej pool_page_free(struct pool *pp, void *v)
   2372       1.66   thorpej {
   2373       1.66   thorpej 
   2374       1.98      yamt 	uvm_km_free_poolpage_cache(kmem_map, (vaddr_t) v);
   2375       1.98      yamt }
   2376       1.98      yamt 
   2377       1.98      yamt static void *
   2378       1.98      yamt pool_page_alloc_meta(struct pool *pp, int flags)
   2379       1.98      yamt {
   2380       1.98      yamt 	boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
   2381       1.98      yamt 
   2382      1.100      yamt 	return ((void *) uvm_km_alloc_poolpage(kmem_map, waitok));
   2383       1.98      yamt }
   2384       1.98      yamt 
   2385       1.98      yamt static void
   2386       1.98      yamt pool_page_free_meta(struct pool *pp, void *v)
   2387       1.98      yamt {
   2388       1.98      yamt 
   2389      1.100      yamt 	uvm_km_free_poolpage(kmem_map, (vaddr_t) v);
   2390       1.66   thorpej }
   2391       1.66   thorpej 
   2392       1.66   thorpej #ifdef POOL_SUBPAGE
   2393       1.66   thorpej /* Sub-page allocator, for machines with large hardware pages. */
   2394       1.66   thorpej void *
   2395       1.66   thorpej pool_subpage_alloc(struct pool *pp, int flags)
   2396       1.66   thorpej {
   2397       1.93       dbj 	void *v;
   2398       1.93       dbj 	int s;
   2399       1.93       dbj 	s = splvm();
   2400       1.93       dbj 	v = pool_get(&psppool, flags);
   2401       1.93       dbj 	splx(s);
   2402       1.93       dbj 	return v;
   2403       1.66   thorpej }
   2404       1.66   thorpej 
   2405       1.66   thorpej void
   2406       1.66   thorpej pool_subpage_free(struct pool *pp, void *v)
   2407       1.66   thorpej {
   2408       1.93       dbj 	int s;
   2409       1.93       dbj 	s = splvm();
   2410       1.66   thorpej 	pool_put(&psppool, v);
   2411       1.93       dbj 	splx(s);
   2412       1.66   thorpej }
   2413       1.66   thorpej 
   2414       1.66   thorpej /* We don't provide a real nointr allocator.  Maybe later. */
   2415       1.66   thorpej void *
   2416      1.112     bjh21 pool_subpage_alloc_nointr(struct pool *pp, int flags)
   2417       1.66   thorpej {
   2418       1.66   thorpej 
   2419       1.66   thorpej 	return (pool_subpage_alloc(pp, flags));
   2420       1.66   thorpej }
   2421       1.66   thorpej 
   2422       1.66   thorpej void
   2423      1.112     bjh21 pool_subpage_free_nointr(struct pool *pp, void *v)
   2424       1.66   thorpej {
   2425       1.66   thorpej 
   2426       1.66   thorpej 	pool_subpage_free(pp, v);
   2427       1.66   thorpej }
   2428      1.112     bjh21 #endif /* POOL_SUBPAGE */
   2429       1.66   thorpej void *
   2430       1.66   thorpej pool_page_alloc_nointr(struct pool *pp, int flags)
   2431       1.66   thorpej {
   2432       1.66   thorpej 	boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
   2433       1.66   thorpej 
   2434      1.100      yamt 	return ((void *) uvm_km_alloc_poolpage_cache(kernel_map, waitok));
   2435       1.66   thorpej }
   2436       1.66   thorpej 
   2437       1.66   thorpej void
   2438       1.66   thorpej pool_page_free_nointr(struct pool *pp, void *v)
   2439       1.66   thorpej {
   2440       1.66   thorpej 
   2441       1.98      yamt 	uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v);
   2442       1.66   thorpej }
   2443