Home | History | Annotate | Line # | Download | only in kern
subr_pool.c revision 1.99.8.2
      1  1.99.8.1      tron /*	$NetBSD: subr_pool.c,v 1.99.8.2 2006/03/10 13:19:42 tron Exp $	*/
      2       1.1        pk 
      3       1.1        pk /*-
      4      1.43   thorpej  * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
      5       1.1        pk  * All rights reserved.
      6       1.1        pk  *
      7       1.1        pk  * This code is derived from software contributed to The NetBSD Foundation
      8      1.20   thorpej  * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
      9      1.20   thorpej  * Simulation Facility, NASA Ames Research Center.
     10       1.1        pk  *
     11       1.1        pk  * Redistribution and use in source and binary forms, with or without
     12       1.1        pk  * modification, are permitted provided that the following conditions
     13       1.1        pk  * are met:
     14       1.1        pk  * 1. Redistributions of source code must retain the above copyright
     15       1.1        pk  *    notice, this list of conditions and the following disclaimer.
     16       1.1        pk  * 2. Redistributions in binary form must reproduce the above copyright
     17       1.1        pk  *    notice, this list of conditions and the following disclaimer in the
     18       1.1        pk  *    documentation and/or other materials provided with the distribution.
     19       1.1        pk  * 3. All advertising materials mentioning features or use of this software
     20       1.1        pk  *    must display the following acknowledgement:
     21      1.13  christos  *	This product includes software developed by the NetBSD
     22      1.13  christos  *	Foundation, Inc. and its contributors.
     23       1.1        pk  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24       1.1        pk  *    contributors may be used to endorse or promote products derived
     25       1.1        pk  *    from this software without specific prior written permission.
     26       1.1        pk  *
     27       1.1        pk  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28       1.1        pk  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29       1.1        pk  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30       1.1        pk  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31       1.1        pk  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32       1.1        pk  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33       1.1        pk  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34       1.1        pk  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35       1.1        pk  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36       1.1        pk  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37       1.1        pk  * POSSIBILITY OF SUCH DAMAGE.
     38       1.1        pk  */
     39      1.64     lukem 
     40      1.64     lukem #include <sys/cdefs.h>
     41  1.99.8.1      tron __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.99.8.2 2006/03/10 13:19:42 tron Exp $");
     42      1.24    scottr 
     43      1.25   thorpej #include "opt_pool.h"
     44      1.24    scottr #include "opt_poollog.h"
     45      1.28   thorpej #include "opt_lockdebug.h"
     46       1.1        pk 
     47       1.1        pk #include <sys/param.h>
     48       1.1        pk #include <sys/systm.h>
     49       1.1        pk #include <sys/proc.h>
     50       1.1        pk #include <sys/errno.h>
     51       1.1        pk #include <sys/kernel.h>
     52       1.1        pk #include <sys/malloc.h>
     53       1.1        pk #include <sys/lock.h>
     54       1.1        pk #include <sys/pool.h>
     55      1.20   thorpej #include <sys/syslog.h>
     56       1.3        pk 
     57       1.3        pk #include <uvm/uvm.h>
     58       1.3        pk 
     59       1.1        pk /*
     60       1.1        pk  * Pool resource management utility.
     61       1.3        pk  *
     62      1.88       chs  * Memory is allocated in pages which are split into pieces according to
     63      1.88       chs  * the pool item size. Each page is kept on one of three lists in the
     64      1.88       chs  * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
     65      1.88       chs  * for empty, full and partially-full pages respectively. The individual
     66      1.88       chs  * pool items are on a linked list headed by `ph_itemlist' in each page
     67      1.88       chs  * header. The memory for building the page list is either taken from
     68      1.88       chs  * the allocated pages themselves (for small pool items) or taken from
     69      1.88       chs  * an internal pool of page headers (`phpool').
     70       1.1        pk  */
     71       1.1        pk 
     72       1.3        pk /* List of all pools */
     73       1.5   thorpej TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
     74       1.3        pk 
     75       1.3        pk /* Private pool for page header structures */
     76      1.97      yamt #define	PHPOOL_MAX	8
     77      1.97      yamt static struct pool phpool[PHPOOL_MAX];
     78      1.97      yamt #define	PHPOOL_FREELIST_NELEM(idx)	(((idx) == 0) ? 0 : (1 << (idx)))
     79       1.3        pk 
     80      1.62     bjh21 #ifdef POOL_SUBPAGE
     81      1.62     bjh21 /* Pool of subpages for use by normal pools. */
     82      1.62     bjh21 static struct pool psppool;
     83      1.62     bjh21 #endif
     84      1.62     bjh21 
     85      1.98      yamt static void *pool_page_alloc_meta(struct pool *, int);
     86      1.98      yamt static void pool_page_free_meta(struct pool *, void *);
     87      1.98      yamt 
     88      1.98      yamt /* allocator for pool metadata */
     89      1.98      yamt static struct pool_allocator pool_allocator_meta = {
     90      1.98      yamt 	pool_page_alloc_meta, pool_page_free_meta
     91      1.98      yamt };
     92      1.98      yamt 
     93       1.3        pk /* # of seconds to retain page after last use */
     94       1.3        pk int pool_inactive_time = 10;
     95       1.3        pk 
     96       1.3        pk /* Next candidate for drainage (see pool_drain()) */
     97      1.23   thorpej static struct pool	*drainpp;
     98      1.23   thorpej 
     99      1.23   thorpej /* This spin lock protects both pool_head and drainpp. */
    100      1.23   thorpej struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
    101       1.3        pk 
    102      1.99      yamt typedef uint8_t pool_item_freelist_t;
    103      1.99      yamt 
    104       1.3        pk struct pool_item_header {
    105       1.3        pk 	/* Page headers */
    106      1.88       chs 	LIST_ENTRY(pool_item_header)
    107       1.3        pk 				ph_pagelist;	/* pool page list */
    108      1.88       chs 	SPLAY_ENTRY(pool_item_header)
    109      1.88       chs 				ph_node;	/* Off-page page headers */
    110       1.3        pk 	caddr_t			ph_page;	/* this page's address */
    111       1.3        pk 	struct timeval		ph_time;	/* last referenced */
    112      1.97      yamt 	union {
    113      1.97      yamt 		/* !PR_NOTOUCH */
    114      1.97      yamt 		struct {
    115      1.97      yamt 			TAILQ_HEAD(, pool_item)
    116      1.97      yamt 				phu_itemlist;	/* chunk list for this page */
    117      1.97      yamt 		} phu_normal;
    118      1.97      yamt 		/* PR_NOTOUCH */
    119      1.97      yamt 		struct {
    120      1.97      yamt 			uint16_t
    121      1.97      yamt 				phu_off;	/* start offset in page */
    122      1.99      yamt 			pool_item_freelist_t
    123      1.97      yamt 				phu_firstfree;	/* first free item */
    124      1.99      yamt 			/*
    125      1.99      yamt 			 * XXX it might be better to use
    126      1.99      yamt 			 * a simple bitmap and ffs(3)
    127      1.99      yamt 			 */
    128      1.97      yamt 		} phu_notouch;
    129      1.97      yamt 	} ph_u;
    130      1.97      yamt 	uint16_t		ph_nmissing;	/* # of chunks in use */
    131       1.3        pk };
    132      1.97      yamt #define	ph_itemlist	ph_u.phu_normal.phu_itemlist
    133      1.97      yamt #define	ph_off		ph_u.phu_notouch.phu_off
    134      1.97      yamt #define	ph_firstfree	ph_u.phu_notouch.phu_firstfree
    135       1.3        pk 
    136       1.1        pk struct pool_item {
    137       1.3        pk #ifdef DIAGNOSTIC
    138      1.82   thorpej 	u_int pi_magic;
    139      1.33       chs #endif
    140      1.82   thorpej #define	PI_MAGIC 0xdeadbeefU
    141       1.3        pk 	/* Other entries use only this list entry */
    142       1.3        pk 	TAILQ_ENTRY(pool_item)	pi_list;
    143       1.3        pk };
    144       1.3        pk 
    145      1.53   thorpej #define	POOL_NEEDS_CATCHUP(pp)						\
    146      1.53   thorpej 	((pp)->pr_nitems < (pp)->pr_minitems)
    147      1.53   thorpej 
    148      1.43   thorpej /*
    149      1.43   thorpej  * Pool cache management.
    150      1.43   thorpej  *
    151      1.43   thorpej  * Pool caches provide a way for constructed objects to be cached by the
    152      1.43   thorpej  * pool subsystem.  This can lead to performance improvements by avoiding
    153      1.43   thorpej  * needless object construction/destruction; it is deferred until absolutely
    154      1.43   thorpej  * necessary.
    155      1.43   thorpej  *
    156      1.43   thorpej  * Caches are grouped into cache groups.  Each cache group references
    157      1.43   thorpej  * up to 16 constructed objects.  When a cache allocates an object
    158      1.43   thorpej  * from the pool, it calls the object's constructor and places it into
    159      1.43   thorpej  * a cache group.  When a cache group frees an object back to the pool,
    160      1.43   thorpej  * it first calls the object's destructor.  This allows the object to
    161      1.43   thorpej  * persist in constructed form while freed to the cache.
    162      1.43   thorpej  *
    163      1.43   thorpej  * Multiple caches may exist for each pool.  This allows a single
    164      1.43   thorpej  * object type to have multiple constructed forms.  The pool references
    165      1.43   thorpej  * each cache, so that when a pool is drained by the pagedaemon, it can
    166      1.43   thorpej  * drain each individual cache as well.  Each time a cache is drained,
    167      1.43   thorpej  * the most idle cache group is freed to the pool in its entirety.
    168      1.43   thorpej  *
    169      1.43   thorpej  * Pool caches are layed on top of pools.  By layering them, we can avoid
    170      1.43   thorpej  * the complexity of cache management for pools which would not benefit
    171      1.43   thorpej  * from it.
    172      1.43   thorpej  */
    173      1.43   thorpej 
    174      1.43   thorpej /* The cache group pool. */
    175      1.43   thorpej static struct pool pcgpool;
    176       1.3        pk 
    177  1.99.8.1      tron static void	pool_cache_reclaim(struct pool_cache *, struct pool_pagelist *);
    178       1.3        pk 
    179      1.42   thorpej static int	pool_catchup(struct pool *);
    180      1.55   thorpej static void	pool_prime_page(struct pool *, caddr_t,
    181      1.55   thorpej 		    struct pool_item_header *);
    182      1.88       chs static void	pool_update_curpage(struct pool *);
    183      1.66   thorpej 
    184      1.66   thorpej void		*pool_allocator_alloc(struct pool *, int);
    185      1.66   thorpej void		pool_allocator_free(struct pool *, void *);
    186       1.3        pk 
    187      1.97      yamt static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
    188      1.88       chs 	void (*)(const char *, ...));
    189      1.42   thorpej static void pool_print1(struct pool *, const char *,
    190      1.42   thorpej 	void (*)(const char *, ...));
    191       1.3        pk 
    192      1.88       chs static int pool_chk_page(struct pool *, const char *,
    193      1.88       chs 			 struct pool_item_header *);
    194      1.88       chs 
    195       1.3        pk /*
    196      1.52   thorpej  * Pool log entry. An array of these is allocated in pool_init().
    197       1.3        pk  */
    198       1.3        pk struct pool_log {
    199       1.3        pk 	const char	*pl_file;
    200       1.3        pk 	long		pl_line;
    201       1.3        pk 	int		pl_action;
    202      1.25   thorpej #define	PRLOG_GET	1
    203      1.25   thorpej #define	PRLOG_PUT	2
    204       1.3        pk 	void		*pl_addr;
    205       1.1        pk };
    206       1.1        pk 
    207      1.86      matt #ifdef POOL_DIAGNOSTIC
    208       1.3        pk /* Number of entries in pool log buffers */
    209      1.17   thorpej #ifndef POOL_LOGSIZE
    210      1.17   thorpej #define	POOL_LOGSIZE	10
    211      1.17   thorpej #endif
    212      1.17   thorpej 
    213      1.17   thorpej int pool_logsize = POOL_LOGSIZE;
    214       1.1        pk 
    215      1.42   thorpej static __inline void
    216      1.42   thorpej pr_log(struct pool *pp, void *v, int action, const char *file, long line)
    217       1.3        pk {
    218       1.3        pk 	int n = pp->pr_curlogentry;
    219       1.3        pk 	struct pool_log *pl;
    220       1.3        pk 
    221      1.20   thorpej 	if ((pp->pr_roflags & PR_LOGGING) == 0)
    222       1.3        pk 		return;
    223       1.3        pk 
    224       1.3        pk 	/*
    225       1.3        pk 	 * Fill in the current entry. Wrap around and overwrite
    226       1.3        pk 	 * the oldest entry if necessary.
    227       1.3        pk 	 */
    228       1.3        pk 	pl = &pp->pr_log[n];
    229       1.3        pk 	pl->pl_file = file;
    230       1.3        pk 	pl->pl_line = line;
    231       1.3        pk 	pl->pl_action = action;
    232       1.3        pk 	pl->pl_addr = v;
    233       1.3        pk 	if (++n >= pp->pr_logsize)
    234       1.3        pk 		n = 0;
    235       1.3        pk 	pp->pr_curlogentry = n;
    236       1.3        pk }
    237       1.3        pk 
    238       1.3        pk static void
    239      1.42   thorpej pr_printlog(struct pool *pp, struct pool_item *pi,
    240      1.42   thorpej     void (*pr)(const char *, ...))
    241       1.3        pk {
    242       1.3        pk 	int i = pp->pr_logsize;
    243       1.3        pk 	int n = pp->pr_curlogentry;
    244       1.3        pk 
    245      1.20   thorpej 	if ((pp->pr_roflags & PR_LOGGING) == 0)
    246       1.3        pk 		return;
    247       1.3        pk 
    248       1.3        pk 	/*
    249       1.3        pk 	 * Print all entries in this pool's log.
    250       1.3        pk 	 */
    251       1.3        pk 	while (i-- > 0) {
    252       1.3        pk 		struct pool_log *pl = &pp->pr_log[n];
    253       1.3        pk 		if (pl->pl_action != 0) {
    254      1.25   thorpej 			if (pi == NULL || pi == pl->pl_addr) {
    255      1.25   thorpej 				(*pr)("\tlog entry %d:\n", i);
    256      1.25   thorpej 				(*pr)("\t\taction = %s, addr = %p\n",
    257      1.25   thorpej 				    pl->pl_action == PRLOG_GET ? "get" : "put",
    258      1.25   thorpej 				    pl->pl_addr);
    259      1.25   thorpej 				(*pr)("\t\tfile: %s at line %lu\n",
    260      1.25   thorpej 				    pl->pl_file, pl->pl_line);
    261      1.25   thorpej 			}
    262       1.3        pk 		}
    263       1.3        pk 		if (++n >= pp->pr_logsize)
    264       1.3        pk 			n = 0;
    265       1.3        pk 	}
    266       1.3        pk }
    267      1.25   thorpej 
    268      1.42   thorpej static __inline void
    269      1.42   thorpej pr_enter(struct pool *pp, const char *file, long line)
    270      1.25   thorpej {
    271      1.25   thorpej 
    272      1.34   thorpej 	if (__predict_false(pp->pr_entered_file != NULL)) {
    273      1.25   thorpej 		printf("pool %s: reentrancy at file %s line %ld\n",
    274      1.25   thorpej 		    pp->pr_wchan, file, line);
    275      1.25   thorpej 		printf("         previous entry at file %s line %ld\n",
    276      1.25   thorpej 		    pp->pr_entered_file, pp->pr_entered_line);
    277      1.25   thorpej 		panic("pr_enter");
    278      1.25   thorpej 	}
    279      1.25   thorpej 
    280      1.25   thorpej 	pp->pr_entered_file = file;
    281      1.25   thorpej 	pp->pr_entered_line = line;
    282      1.25   thorpej }
    283      1.25   thorpej 
    284      1.42   thorpej static __inline void
    285      1.42   thorpej pr_leave(struct pool *pp)
    286      1.25   thorpej {
    287      1.25   thorpej 
    288      1.34   thorpej 	if (__predict_false(pp->pr_entered_file == NULL)) {
    289      1.25   thorpej 		printf("pool %s not entered?\n", pp->pr_wchan);
    290      1.25   thorpej 		panic("pr_leave");
    291      1.25   thorpej 	}
    292      1.25   thorpej 
    293      1.25   thorpej 	pp->pr_entered_file = NULL;
    294      1.25   thorpej 	pp->pr_entered_line = 0;
    295      1.25   thorpej }
    296      1.25   thorpej 
    297      1.42   thorpej static __inline void
    298      1.42   thorpej pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
    299      1.25   thorpej {
    300      1.25   thorpej 
    301      1.25   thorpej 	if (pp->pr_entered_file != NULL)
    302      1.25   thorpej 		(*pr)("\n\tcurrently entered from file %s line %ld\n",
    303      1.25   thorpej 		    pp->pr_entered_file, pp->pr_entered_line);
    304      1.25   thorpej }
    305       1.3        pk #else
    306      1.25   thorpej #define	pr_log(pp, v, action, file, line)
    307      1.25   thorpej #define	pr_printlog(pp, pi, pr)
    308      1.25   thorpej #define	pr_enter(pp, file, line)
    309      1.25   thorpej #define	pr_leave(pp)
    310      1.25   thorpej #define	pr_enter_check(pp, pr)
    311      1.59   thorpej #endif /* POOL_DIAGNOSTIC */
    312       1.3        pk 
    313      1.88       chs static __inline int
    314      1.97      yamt pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph,
    315      1.97      yamt     const void *v)
    316      1.97      yamt {
    317      1.97      yamt 	const char *cp = v;
    318      1.97      yamt 	int idx;
    319      1.97      yamt 
    320      1.97      yamt 	KASSERT(pp->pr_roflags & PR_NOTOUCH);
    321      1.97      yamt 	idx = (cp - ph->ph_page - ph->ph_off) / pp->pr_size;
    322      1.97      yamt 	KASSERT(idx < pp->pr_itemsperpage);
    323      1.97      yamt 	return idx;
    324      1.97      yamt }
    325      1.97      yamt 
    326      1.99      yamt #define	PR_FREELIST_ALIGN(p) \
    327      1.99      yamt 	roundup((uintptr_t)(p), sizeof(pool_item_freelist_t))
    328      1.99      yamt #define	PR_FREELIST(ph)	((pool_item_freelist_t *)PR_FREELIST_ALIGN((ph) + 1))
    329      1.99      yamt #define	PR_INDEX_USED	((pool_item_freelist_t)-1)
    330      1.99      yamt #define	PR_INDEX_EOL	((pool_item_freelist_t)-2)
    331      1.97      yamt 
    332      1.97      yamt static __inline void
    333      1.97      yamt pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph,
    334      1.97      yamt     void *obj)
    335      1.97      yamt {
    336      1.97      yamt 	int idx = pr_item_notouch_index(pp, ph, obj);
    337      1.99      yamt 	pool_item_freelist_t *freelist = PR_FREELIST(ph);
    338      1.97      yamt 
    339      1.97      yamt 	KASSERT(freelist[idx] == PR_INDEX_USED);
    340      1.97      yamt 	freelist[idx] = ph->ph_firstfree;
    341      1.97      yamt 	ph->ph_firstfree = idx;
    342      1.97      yamt }
    343      1.97      yamt 
    344      1.97      yamt static __inline void *
    345      1.97      yamt pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph)
    346      1.97      yamt {
    347      1.97      yamt 	int idx = ph->ph_firstfree;
    348      1.99      yamt 	pool_item_freelist_t *freelist = PR_FREELIST(ph);
    349      1.97      yamt 
    350      1.97      yamt 	KASSERT(freelist[idx] != PR_INDEX_USED);
    351      1.97      yamt 	ph->ph_firstfree = freelist[idx];
    352      1.97      yamt 	freelist[idx] = PR_INDEX_USED;
    353      1.97      yamt 
    354      1.97      yamt 	return ph->ph_page + ph->ph_off + idx * pp->pr_size;
    355      1.97      yamt }
    356      1.97      yamt 
    357      1.97      yamt static __inline int
    358      1.88       chs phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
    359      1.88       chs {
    360      1.88       chs 	if (a->ph_page < b->ph_page)
    361      1.88       chs 		return (-1);
    362      1.88       chs 	else if (a->ph_page > b->ph_page)
    363      1.88       chs 		return (1);
    364      1.88       chs 	else
    365      1.88       chs 		return (0);
    366      1.88       chs }
    367      1.88       chs 
    368      1.88       chs SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
    369      1.88       chs SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
    370      1.88       chs 
    371       1.3        pk /*
    372       1.3        pk  * Return the pool page header based on page address.
    373       1.3        pk  */
    374      1.42   thorpej static __inline struct pool_item_header *
    375      1.42   thorpej pr_find_pagehead(struct pool *pp, caddr_t page)
    376       1.3        pk {
    377      1.88       chs 	struct pool_item_header *ph, tmp;
    378       1.3        pk 
    379      1.20   thorpej 	if ((pp->pr_roflags & PR_PHINPAGE) != 0)
    380       1.3        pk 		return ((struct pool_item_header *)(page + pp->pr_phoffset));
    381       1.3        pk 
    382      1.88       chs 	tmp.ph_page = page;
    383      1.88       chs 	ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
    384      1.88       chs 	return ph;
    385       1.3        pk }
    386       1.3        pk 
    387  1.99.8.1      tron static void
    388  1.99.8.1      tron pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq)
    389  1.99.8.1      tron {
    390  1.99.8.1      tron 	struct pool_item_header *ph;
    391  1.99.8.1      tron 	int s;
    392  1.99.8.1      tron 
    393  1.99.8.1      tron 	while ((ph = LIST_FIRST(pq)) != NULL) {
    394  1.99.8.1      tron 		LIST_REMOVE(ph, ph_pagelist);
    395  1.99.8.1      tron 		pool_allocator_free(pp, ph->ph_page);
    396  1.99.8.1      tron 		if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
    397  1.99.8.1      tron 			s = splvm();
    398  1.99.8.1      tron 			pool_put(pp->pr_phpool, ph);
    399  1.99.8.1      tron 			splx(s);
    400  1.99.8.1      tron 		}
    401  1.99.8.1      tron 	}
    402  1.99.8.1      tron }
    403  1.99.8.1      tron 
    404       1.3        pk /*
    405       1.3        pk  * Remove a page from the pool.
    406       1.3        pk  */
    407      1.42   thorpej static __inline void
    408      1.61       chs pr_rmpage(struct pool *pp, struct pool_item_header *ph,
    409      1.61       chs      struct pool_pagelist *pq)
    410       1.3        pk {
    411       1.3        pk 
    412  1.99.8.1      tron 	LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
    413      1.91      yamt 
    414       1.3        pk 	/*
    415       1.7   thorpej 	 * If the page was idle, decrement the idle page count.
    416       1.3        pk 	 */
    417       1.6   thorpej 	if (ph->ph_nmissing == 0) {
    418       1.6   thorpej #ifdef DIAGNOSTIC
    419       1.6   thorpej 		if (pp->pr_nidle == 0)
    420       1.6   thorpej 			panic("pr_rmpage: nidle inconsistent");
    421      1.20   thorpej 		if (pp->pr_nitems < pp->pr_itemsperpage)
    422      1.20   thorpej 			panic("pr_rmpage: nitems inconsistent");
    423       1.6   thorpej #endif
    424       1.6   thorpej 		pp->pr_nidle--;
    425       1.6   thorpej 	}
    426       1.7   thorpej 
    427      1.20   thorpej 	pp->pr_nitems -= pp->pr_itemsperpage;
    428      1.20   thorpej 
    429       1.7   thorpej 	/*
    430  1.99.8.1      tron 	 * Unlink the page from the pool and queue it for release.
    431       1.7   thorpej 	 */
    432      1.88       chs 	LIST_REMOVE(ph, ph_pagelist);
    433      1.91      yamt 	if ((pp->pr_roflags & PR_PHINPAGE) == 0)
    434      1.91      yamt 		SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
    435  1.99.8.1      tron 	LIST_INSERT_HEAD(pq, ph, ph_pagelist);
    436  1.99.8.1      tron 
    437       1.7   thorpej 	pp->pr_npages--;
    438       1.7   thorpej 	pp->pr_npagefree++;
    439       1.6   thorpej 
    440      1.88       chs 	pool_update_curpage(pp);
    441       1.3        pk }
    442       1.3        pk 
    443       1.3        pk /*
    444      1.94    simonb  * Initialize all the pools listed in the "pools" link set.
    445      1.94    simonb  */
    446      1.94    simonb void
    447      1.94    simonb link_pool_init(void)
    448      1.94    simonb {
    449      1.94    simonb 	__link_set_decl(pools, struct link_pool_init);
    450      1.94    simonb 	struct link_pool_init * const *pi;
    451      1.94    simonb 
    452      1.94    simonb 	__link_set_foreach(pi, pools)
    453      1.94    simonb 		pool_init((*pi)->pp, (*pi)->size, (*pi)->align,
    454      1.94    simonb 		    (*pi)->align_offset, (*pi)->flags, (*pi)->wchan,
    455      1.94    simonb 		    (*pi)->palloc);
    456      1.94    simonb }
    457      1.94    simonb 
    458      1.94    simonb /*
    459       1.3        pk  * Initialize the given pool resource structure.
    460       1.3        pk  *
    461       1.3        pk  * We export this routine to allow other kernel parts to declare
    462       1.3        pk  * static pools that must be initialized before malloc() is available.
    463       1.3        pk  */
    464       1.3        pk void
    465      1.42   thorpej pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
    466      1.66   thorpej     const char *wchan, struct pool_allocator *palloc)
    467       1.3        pk {
    468      1.88       chs 	int off, slack;
    469      1.92     enami 	size_t trysize, phsize;
    470      1.93       dbj 	int s;
    471       1.3        pk 
    472      1.99      yamt 	KASSERT((1UL << (CHAR_BIT * sizeof(pool_item_freelist_t))) - 2 >=
    473      1.99      yamt 	    PHPOOL_FREELIST_NELEM(PHPOOL_MAX - 1));
    474      1.99      yamt 
    475      1.25   thorpej #ifdef POOL_DIAGNOSTIC
    476      1.25   thorpej 	/*
    477      1.25   thorpej 	 * Always log if POOL_DIAGNOSTIC is defined.
    478      1.25   thorpej 	 */
    479      1.25   thorpej 	if (pool_logsize != 0)
    480      1.25   thorpej 		flags |= PR_LOGGING;
    481      1.25   thorpej #endif
    482      1.25   thorpej 
    483      1.66   thorpej 	if (palloc == NULL)
    484      1.66   thorpej 		palloc = &pool_allocator_kmem;
    485  1.99.8.2      tron #ifdef POOL_SUBPAGE
    486  1.99.8.2      tron 	if (size > palloc->pa_pagesz) {
    487  1.99.8.2      tron 		if (palloc == &pool_allocator_kmem)
    488  1.99.8.2      tron 			palloc = &pool_allocator_kmem_fullpage;
    489  1.99.8.2      tron 		else if (palloc == &pool_allocator_nointr)
    490  1.99.8.2      tron 			palloc = &pool_allocator_nointr_fullpage;
    491  1.99.8.2      tron 	}
    492      1.66   thorpej #endif /* POOL_SUBPAGE */
    493      1.66   thorpej 	if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
    494  1.99.8.2      tron 		if (palloc->pa_pagesz == 0)
    495      1.66   thorpej 			palloc->pa_pagesz = PAGE_SIZE;
    496      1.66   thorpej 
    497      1.66   thorpej 		TAILQ_INIT(&palloc->pa_list);
    498      1.66   thorpej 
    499      1.66   thorpej 		simple_lock_init(&palloc->pa_slock);
    500      1.66   thorpej 		palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
    501      1.66   thorpej 		palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
    502      1.66   thorpej 		palloc->pa_flags |= PA_INITIALIZED;
    503       1.4   thorpej 	}
    504       1.3        pk 
    505       1.3        pk 	if (align == 0)
    506       1.3        pk 		align = ALIGN(1);
    507      1.14   thorpej 
    508      1.14   thorpej 	if (size < sizeof(struct pool_item))
    509      1.14   thorpej 		size = sizeof(struct pool_item);
    510       1.3        pk 
    511      1.78   thorpej 	size = roundup(size, align);
    512      1.66   thorpej #ifdef DIAGNOSTIC
    513      1.66   thorpej 	if (size > palloc->pa_pagesz)
    514      1.35        pk 		panic("pool_init: pool item size (%lu) too large",
    515      1.35        pk 		      (u_long)size);
    516      1.66   thorpej #endif
    517      1.35        pk 
    518       1.3        pk 	/*
    519       1.3        pk 	 * Initialize the pool structure.
    520       1.3        pk 	 */
    521      1.88       chs 	LIST_INIT(&pp->pr_emptypages);
    522      1.88       chs 	LIST_INIT(&pp->pr_fullpages);
    523      1.88       chs 	LIST_INIT(&pp->pr_partpages);
    524      1.43   thorpej 	TAILQ_INIT(&pp->pr_cachelist);
    525       1.3        pk 	pp->pr_curpage = NULL;
    526       1.3        pk 	pp->pr_npages = 0;
    527       1.3        pk 	pp->pr_minitems = 0;
    528       1.3        pk 	pp->pr_minpages = 0;
    529       1.3        pk 	pp->pr_maxpages = UINT_MAX;
    530      1.20   thorpej 	pp->pr_roflags = flags;
    531      1.20   thorpej 	pp->pr_flags = 0;
    532      1.35        pk 	pp->pr_size = size;
    533       1.3        pk 	pp->pr_align = align;
    534       1.3        pk 	pp->pr_wchan = wchan;
    535      1.66   thorpej 	pp->pr_alloc = palloc;
    536      1.20   thorpej 	pp->pr_nitems = 0;
    537      1.20   thorpej 	pp->pr_nout = 0;
    538      1.20   thorpej 	pp->pr_hardlimit = UINT_MAX;
    539      1.20   thorpej 	pp->pr_hardlimit_warning = NULL;
    540      1.31   thorpej 	pp->pr_hardlimit_ratecap.tv_sec = 0;
    541      1.31   thorpej 	pp->pr_hardlimit_ratecap.tv_usec = 0;
    542      1.31   thorpej 	pp->pr_hardlimit_warning_last.tv_sec = 0;
    543      1.31   thorpej 	pp->pr_hardlimit_warning_last.tv_usec = 0;
    544      1.68   thorpej 	pp->pr_drain_hook = NULL;
    545      1.68   thorpej 	pp->pr_drain_hook_arg = NULL;
    546       1.3        pk 
    547       1.3        pk 	/*
    548       1.3        pk 	 * Decide whether to put the page header off page to avoid
    549      1.92     enami 	 * wasting too large a part of the page or too big item.
    550      1.92     enami 	 * Off-page page headers go on a hash table, so we can match
    551      1.92     enami 	 * a returned item with its header based on the page address.
    552      1.92     enami 	 * We use 1/16 of the page size and about 8 times of the item
    553      1.92     enami 	 * size as the threshold (XXX: tune)
    554      1.92     enami 	 *
    555      1.92     enami 	 * However, we'll put the header into the page if we can put
    556      1.92     enami 	 * it without wasting any items.
    557      1.92     enami 	 *
    558      1.92     enami 	 * Silently enforce `0 <= ioff < align'.
    559       1.3        pk 	 */
    560      1.92     enami 	pp->pr_itemoffset = ioff %= align;
    561      1.92     enami 	/* See the comment below about reserved bytes. */
    562      1.92     enami 	trysize = palloc->pa_pagesz - ((align - ioff) % align);
    563      1.92     enami 	phsize = ALIGN(sizeof(struct pool_item_header));
    564      1.97      yamt 	if ((pp->pr_roflags & PR_NOTOUCH) == 0 &&
    565      1.97      yamt 	    (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
    566      1.97      yamt 	    trysize / pp->pr_size == (trysize - phsize) / pp->pr_size)) {
    567       1.3        pk 		/* Use the end of the page for the page header */
    568      1.20   thorpej 		pp->pr_roflags |= PR_PHINPAGE;
    569      1.92     enami 		pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
    570       1.2        pk 	} else {
    571       1.3        pk 		/* The page header will be taken from our page header pool */
    572       1.3        pk 		pp->pr_phoffset = 0;
    573      1.66   thorpej 		off = palloc->pa_pagesz;
    574      1.88       chs 		SPLAY_INIT(&pp->pr_phtree);
    575       1.2        pk 	}
    576       1.1        pk 
    577       1.3        pk 	/*
    578       1.3        pk 	 * Alignment is to take place at `ioff' within the item. This means
    579       1.3        pk 	 * we must reserve up to `align - 1' bytes on the page to allow
    580       1.3        pk 	 * appropriate positioning of each item.
    581       1.3        pk 	 */
    582       1.3        pk 	pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
    583      1.43   thorpej 	KASSERT(pp->pr_itemsperpage != 0);
    584      1.97      yamt 	if ((pp->pr_roflags & PR_NOTOUCH)) {
    585      1.97      yamt 		int idx;
    586      1.97      yamt 
    587      1.97      yamt 		for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
    588      1.97      yamt 		    idx++) {
    589      1.97      yamt 			/* nothing */
    590      1.97      yamt 		}
    591      1.97      yamt 		if (idx >= PHPOOL_MAX) {
    592      1.97      yamt 			/*
    593      1.97      yamt 			 * if you see this panic, consider to tweak
    594      1.97      yamt 			 * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
    595      1.97      yamt 			 */
    596      1.97      yamt 			panic("%s: too large itemsperpage(%d) for PR_NOTOUCH",
    597      1.97      yamt 			    pp->pr_wchan, pp->pr_itemsperpage);
    598      1.97      yamt 		}
    599      1.97      yamt 		pp->pr_phpool = &phpool[idx];
    600      1.97      yamt 	} else if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
    601      1.97      yamt 		pp->pr_phpool = &phpool[0];
    602      1.97      yamt 	}
    603      1.97      yamt #if defined(DIAGNOSTIC)
    604      1.97      yamt 	else {
    605      1.97      yamt 		pp->pr_phpool = NULL;
    606      1.97      yamt 	}
    607      1.97      yamt #endif
    608       1.3        pk 
    609       1.3        pk 	/*
    610       1.3        pk 	 * Use the slack between the chunks and the page header
    611       1.3        pk 	 * for "cache coloring".
    612       1.3        pk 	 */
    613       1.3        pk 	slack = off - pp->pr_itemsperpage * pp->pr_size;
    614       1.3        pk 	pp->pr_maxcolor = (slack / align) * align;
    615       1.3        pk 	pp->pr_curcolor = 0;
    616       1.3        pk 
    617       1.3        pk 	pp->pr_nget = 0;
    618       1.3        pk 	pp->pr_nfail = 0;
    619       1.3        pk 	pp->pr_nput = 0;
    620       1.3        pk 	pp->pr_npagealloc = 0;
    621       1.3        pk 	pp->pr_npagefree = 0;
    622       1.1        pk 	pp->pr_hiwat = 0;
    623       1.8   thorpej 	pp->pr_nidle = 0;
    624       1.3        pk 
    625      1.59   thorpej #ifdef POOL_DIAGNOSTIC
    626      1.25   thorpej 	if (flags & PR_LOGGING) {
    627      1.25   thorpej 		if (kmem_map == NULL ||
    628      1.25   thorpej 		    (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
    629      1.25   thorpej 		     M_TEMP, M_NOWAIT)) == NULL)
    630      1.20   thorpej 			pp->pr_roflags &= ~PR_LOGGING;
    631       1.3        pk 		pp->pr_curlogentry = 0;
    632       1.3        pk 		pp->pr_logsize = pool_logsize;
    633       1.3        pk 	}
    634      1.59   thorpej #endif
    635      1.25   thorpej 
    636      1.25   thorpej 	pp->pr_entered_file = NULL;
    637      1.25   thorpej 	pp->pr_entered_line = 0;
    638       1.3        pk 
    639      1.21   thorpej 	simple_lock_init(&pp->pr_slock);
    640       1.1        pk 
    641       1.3        pk 	/*
    642      1.43   thorpej 	 * Initialize private page header pool and cache magazine pool if we
    643      1.43   thorpej 	 * haven't done so yet.
    644      1.23   thorpej 	 * XXX LOCKING.
    645       1.3        pk 	 */
    646      1.97      yamt 	if (phpool[0].pr_size == 0) {
    647      1.97      yamt 		int idx;
    648      1.97      yamt 		for (idx = 0; idx < PHPOOL_MAX; idx++) {
    649      1.97      yamt 			static char phpool_names[PHPOOL_MAX][6+1+6+1];
    650      1.97      yamt 			int nelem;
    651      1.97      yamt 			size_t sz;
    652      1.97      yamt 
    653      1.97      yamt 			nelem = PHPOOL_FREELIST_NELEM(idx);
    654      1.97      yamt 			snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
    655      1.97      yamt 			    "phpool-%d", nelem);
    656      1.97      yamt 			sz = sizeof(struct pool_item_header);
    657      1.97      yamt 			if (nelem) {
    658      1.97      yamt 				sz = PR_FREELIST_ALIGN(sz)
    659      1.99      yamt 				    + nelem * sizeof(pool_item_freelist_t);
    660      1.97      yamt 			}
    661      1.97      yamt 			pool_init(&phpool[idx], sz, 0, 0, 0,
    662      1.98      yamt 			    phpool_names[idx], &pool_allocator_meta);
    663      1.97      yamt 		}
    664      1.62     bjh21 #ifdef POOL_SUBPAGE
    665      1.62     bjh21 		pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
    666      1.98      yamt 		    PR_RECURSIVE, "psppool", &pool_allocator_meta);
    667      1.62     bjh21 #endif
    668      1.43   thorpej 		pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
    669      1.98      yamt 		    0, "pcgpool", &pool_allocator_meta);
    670       1.1        pk 	}
    671       1.1        pk 
    672      1.23   thorpej 	/* Insert into the list of all pools. */
    673      1.23   thorpej 	simple_lock(&pool_head_slock);
    674      1.23   thorpej 	TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
    675      1.23   thorpej 	simple_unlock(&pool_head_slock);
    676      1.66   thorpej 
    677      1.66   thorpej 	/* Insert this into the list of pools using this allocator. */
    678      1.93       dbj 	s = splvm();
    679      1.66   thorpej 	simple_lock(&palloc->pa_slock);
    680      1.66   thorpej 	TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
    681      1.66   thorpej 	simple_unlock(&palloc->pa_slock);
    682      1.93       dbj 	splx(s);
    683       1.1        pk }
    684       1.1        pk 
    685       1.1        pk /*
    686       1.1        pk  * De-commision a pool resource.
    687       1.1        pk  */
    688       1.1        pk void
    689      1.42   thorpej pool_destroy(struct pool *pp)
    690       1.1        pk {
    691  1.99.8.1      tron 	struct pool_pagelist pq;
    692       1.3        pk 	struct pool_item_header *ph;
    693      1.93       dbj 	int s;
    694      1.43   thorpej 
    695  1.99.8.1      tron 	/* Remove from global pool list */
    696  1.99.8.1      tron 	simple_lock(&pool_head_slock);
    697  1.99.8.1      tron 	TAILQ_REMOVE(&pool_head, pp, pr_poollist);
    698  1.99.8.1      tron 	if (drainpp == pp)
    699  1.99.8.1      tron 		drainpp = NULL;
    700  1.99.8.1      tron 	simple_unlock(&pool_head_slock);
    701  1.99.8.1      tron 
    702  1.99.8.1      tron 	/* Remove this pool from its allocator's list of pools. */
    703      1.93       dbj 	s = splvm();
    704      1.66   thorpej 	simple_lock(&pp->pr_alloc->pa_slock);
    705      1.66   thorpej 	TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
    706      1.66   thorpej 	simple_unlock(&pp->pr_alloc->pa_slock);
    707      1.93       dbj 	splx(s);
    708      1.66   thorpej 
    709  1.99.8.1      tron 	s = splvm();
    710  1.99.8.1      tron 	simple_lock(&pp->pr_slock);
    711  1.99.8.1      tron 
    712  1.99.8.1      tron 	KASSERT(TAILQ_EMPTY(&pp->pr_cachelist));
    713       1.3        pk 
    714       1.3        pk #ifdef DIAGNOSTIC
    715      1.20   thorpej 	if (pp->pr_nout != 0) {
    716      1.25   thorpej 		pr_printlog(pp, NULL, printf);
    717      1.80    provos 		panic("pool_destroy: pool busy: still out: %u",
    718      1.20   thorpej 		    pp->pr_nout);
    719       1.3        pk 	}
    720       1.3        pk #endif
    721       1.1        pk 
    722      1.88       chs 	KASSERT(LIST_EMPTY(&pp->pr_fullpages));
    723      1.88       chs 	KASSERT(LIST_EMPTY(&pp->pr_partpages));
    724       1.3        pk 
    725  1.99.8.1      tron 	/* Remove all pages */
    726  1.99.8.1      tron 	LIST_INIT(&pq);
    727  1.99.8.1      tron 	while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
    728  1.99.8.1      tron 		pr_rmpage(pp, ph, &pq);
    729  1.99.8.1      tron 
    730  1.99.8.1      tron 	simple_unlock(&pp->pr_slock);
    731  1.99.8.1      tron 	splx(s);
    732  1.99.8.1      tron 
    733  1.99.8.1      tron 	pr_pagelist_free(pp, &pq);
    734       1.3        pk 
    735      1.59   thorpej #ifdef POOL_DIAGNOSTIC
    736      1.20   thorpej 	if ((pp->pr_roflags & PR_LOGGING) != 0)
    737       1.3        pk 		free(pp->pr_log, M_TEMP);
    738      1.59   thorpej #endif
    739       1.1        pk }
    740       1.1        pk 
    741      1.68   thorpej void
    742      1.68   thorpej pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
    743      1.68   thorpej {
    744      1.68   thorpej 
    745      1.68   thorpej 	/* XXX no locking -- must be used just after pool_init() */
    746      1.68   thorpej #ifdef DIAGNOSTIC
    747      1.68   thorpej 	if (pp->pr_drain_hook != NULL)
    748      1.68   thorpej 		panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
    749      1.68   thorpej #endif
    750      1.68   thorpej 	pp->pr_drain_hook = fn;
    751      1.68   thorpej 	pp->pr_drain_hook_arg = arg;
    752      1.68   thorpej }
    753      1.68   thorpej 
    754      1.88       chs static struct pool_item_header *
    755      1.55   thorpej pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags)
    756      1.55   thorpej {
    757      1.55   thorpej 	struct pool_item_header *ph;
    758      1.55   thorpej 	int s;
    759      1.55   thorpej 
    760      1.55   thorpej 	LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0);
    761      1.55   thorpej 
    762      1.55   thorpej 	if ((pp->pr_roflags & PR_PHINPAGE) != 0)
    763      1.55   thorpej 		ph = (struct pool_item_header *) (storage + pp->pr_phoffset);
    764      1.55   thorpej 	else {
    765      1.85        pk 		s = splvm();
    766      1.97      yamt 		ph = pool_get(pp->pr_phpool, flags);
    767      1.55   thorpej 		splx(s);
    768      1.55   thorpej 	}
    769      1.55   thorpej 
    770      1.55   thorpej 	return (ph);
    771      1.55   thorpej }
    772       1.1        pk 
    773       1.1        pk /*
    774       1.3        pk  * Grab an item from the pool; must be called at appropriate spl level
    775       1.1        pk  */
    776       1.3        pk void *
    777      1.59   thorpej #ifdef POOL_DIAGNOSTIC
    778      1.42   thorpej _pool_get(struct pool *pp, int flags, const char *file, long line)
    779      1.56  sommerfe #else
    780      1.56  sommerfe pool_get(struct pool *pp, int flags)
    781      1.56  sommerfe #endif
    782       1.1        pk {
    783       1.1        pk 	struct pool_item *pi;
    784       1.3        pk 	struct pool_item_header *ph;
    785      1.55   thorpej 	void *v;
    786       1.1        pk 
    787       1.2        pk #ifdef DIAGNOSTIC
    788      1.95    atatat 	if (__predict_false(pp->pr_itemsperpage == 0))
    789      1.95    atatat 		panic("pool_get: pool %p: pr_itemsperpage is zero, "
    790      1.95    atatat 		    "pool not initialized?", pp);
    791      1.84   thorpej 	if (__predict_false(curlwp == NULL && doing_shutdown == 0 &&
    792      1.37  sommerfe 			    (flags & PR_WAITOK) != 0))
    793      1.77      matt 		panic("pool_get: %s: must have NOWAIT", pp->pr_wchan);
    794      1.58   thorpej 
    795      1.58   thorpej #ifdef LOCKDEBUG
    796      1.58   thorpej 	if (flags & PR_WAITOK)
    797      1.58   thorpej 		simple_lock_only_held(NULL, "pool_get(PR_WAITOK)");
    798      1.56  sommerfe #endif
    799      1.58   thorpej #endif /* DIAGNOSTIC */
    800       1.1        pk 
    801      1.21   thorpej 	simple_lock(&pp->pr_slock);
    802      1.25   thorpej 	pr_enter(pp, file, line);
    803      1.20   thorpej 
    804      1.20   thorpej  startover:
    805      1.20   thorpej 	/*
    806      1.20   thorpej 	 * Check to see if we've reached the hard limit.  If we have,
    807      1.20   thorpej 	 * and we can wait, then wait until an item has been returned to
    808      1.20   thorpej 	 * the pool.
    809      1.20   thorpej 	 */
    810      1.20   thorpej #ifdef DIAGNOSTIC
    811      1.34   thorpej 	if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
    812      1.25   thorpej 		pr_leave(pp);
    813      1.21   thorpej 		simple_unlock(&pp->pr_slock);
    814      1.20   thorpej 		panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
    815      1.20   thorpej 	}
    816      1.20   thorpej #endif
    817      1.34   thorpej 	if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
    818      1.68   thorpej 		if (pp->pr_drain_hook != NULL) {
    819      1.68   thorpej 			/*
    820      1.68   thorpej 			 * Since the drain hook is going to free things
    821      1.68   thorpej 			 * back to the pool, unlock, call the hook, re-lock,
    822      1.68   thorpej 			 * and check the hardlimit condition again.
    823      1.68   thorpej 			 */
    824      1.68   thorpej 			pr_leave(pp);
    825      1.68   thorpej 			simple_unlock(&pp->pr_slock);
    826      1.68   thorpej 			(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
    827      1.68   thorpej 			simple_lock(&pp->pr_slock);
    828      1.68   thorpej 			pr_enter(pp, file, line);
    829      1.68   thorpej 			if (pp->pr_nout < pp->pr_hardlimit)
    830      1.68   thorpej 				goto startover;
    831      1.68   thorpej 		}
    832      1.68   thorpej 
    833      1.29  sommerfe 		if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
    834      1.20   thorpej 			/*
    835      1.20   thorpej 			 * XXX: A warning isn't logged in this case.  Should
    836      1.20   thorpej 			 * it be?
    837      1.20   thorpej 			 */
    838      1.20   thorpej 			pp->pr_flags |= PR_WANTED;
    839      1.25   thorpej 			pr_leave(pp);
    840      1.40  sommerfe 			ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
    841      1.25   thorpej 			pr_enter(pp, file, line);
    842      1.20   thorpej 			goto startover;
    843      1.20   thorpej 		}
    844      1.31   thorpej 
    845      1.31   thorpej 		/*
    846      1.31   thorpej 		 * Log a message that the hard limit has been hit.
    847      1.31   thorpej 		 */
    848      1.31   thorpej 		if (pp->pr_hardlimit_warning != NULL &&
    849      1.31   thorpej 		    ratecheck(&pp->pr_hardlimit_warning_last,
    850      1.31   thorpej 			      &pp->pr_hardlimit_ratecap))
    851      1.31   thorpej 			log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
    852      1.21   thorpej 
    853      1.21   thorpej 		pp->pr_nfail++;
    854      1.21   thorpej 
    855      1.25   thorpej 		pr_leave(pp);
    856      1.21   thorpej 		simple_unlock(&pp->pr_slock);
    857      1.20   thorpej 		return (NULL);
    858      1.20   thorpej 	}
    859      1.20   thorpej 
    860       1.3        pk 	/*
    861       1.3        pk 	 * The convention we use is that if `curpage' is not NULL, then
    862       1.3        pk 	 * it points at a non-empty bucket. In particular, `curpage'
    863       1.3        pk 	 * never points at a page header which has PR_PHINPAGE set and
    864       1.3        pk 	 * has no items in its bucket.
    865       1.3        pk 	 */
    866      1.20   thorpej 	if ((ph = pp->pr_curpage) == NULL) {
    867      1.20   thorpej #ifdef DIAGNOSTIC
    868      1.20   thorpej 		if (pp->pr_nitems != 0) {
    869      1.21   thorpej 			simple_unlock(&pp->pr_slock);
    870      1.20   thorpej 			printf("pool_get: %s: curpage NULL, nitems %u\n",
    871      1.20   thorpej 			    pp->pr_wchan, pp->pr_nitems);
    872      1.80    provos 			panic("pool_get: nitems inconsistent");
    873      1.20   thorpej 		}
    874      1.20   thorpej #endif
    875      1.20   thorpej 
    876      1.21   thorpej 		/*
    877      1.21   thorpej 		 * Call the back-end page allocator for more memory.
    878      1.21   thorpej 		 * Release the pool lock, as the back-end page allocator
    879      1.21   thorpej 		 * may block.
    880      1.21   thorpej 		 */
    881      1.25   thorpej 		pr_leave(pp);
    882      1.21   thorpej 		simple_unlock(&pp->pr_slock);
    883      1.66   thorpej 		v = pool_allocator_alloc(pp, flags);
    884      1.55   thorpej 		if (__predict_true(v != NULL))
    885      1.55   thorpej 			ph = pool_alloc_item_header(pp, v, flags);
    886      1.15        pk 
    887      1.55   thorpej 		if (__predict_false(v == NULL || ph == NULL)) {
    888      1.55   thorpej 			if (v != NULL)
    889      1.66   thorpej 				pool_allocator_free(pp, v);
    890      1.55   thorpej 
    891      1.91      yamt 			simple_lock(&pp->pr_slock);
    892      1.91      yamt 			pr_enter(pp, file, line);
    893      1.91      yamt 
    894      1.21   thorpej 			/*
    895      1.55   thorpej 			 * We were unable to allocate a page or item
    896      1.55   thorpej 			 * header, but we released the lock during
    897      1.55   thorpej 			 * allocation, so perhaps items were freed
    898      1.55   thorpej 			 * back to the pool.  Check for this case.
    899      1.21   thorpej 			 */
    900      1.21   thorpej 			if (pp->pr_curpage != NULL)
    901      1.21   thorpej 				goto startover;
    902      1.15        pk 
    903       1.3        pk 			if ((flags & PR_WAITOK) == 0) {
    904       1.3        pk 				pp->pr_nfail++;
    905      1.25   thorpej 				pr_leave(pp);
    906      1.21   thorpej 				simple_unlock(&pp->pr_slock);
    907       1.1        pk 				return (NULL);
    908       1.3        pk 			}
    909       1.3        pk 
    910      1.15        pk 			/*
    911      1.15        pk 			 * Wait for items to be returned to this pool.
    912      1.21   thorpej 			 *
    913      1.20   thorpej 			 * XXX: maybe we should wake up once a second and
    914      1.20   thorpej 			 * try again?
    915      1.15        pk 			 */
    916       1.1        pk 			pp->pr_flags |= PR_WANTED;
    917      1.66   thorpej 			/* PA_WANTED is already set on the allocator. */
    918      1.25   thorpej 			pr_leave(pp);
    919      1.40  sommerfe 			ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
    920      1.25   thorpej 			pr_enter(pp, file, line);
    921      1.20   thorpej 			goto startover;
    922       1.1        pk 		}
    923       1.3        pk 
    924      1.15        pk 		/* We have more memory; add it to the pool */
    925      1.91      yamt 		simple_lock(&pp->pr_slock);
    926      1.91      yamt 		pr_enter(pp, file, line);
    927      1.55   thorpej 		pool_prime_page(pp, v, ph);
    928      1.15        pk 		pp->pr_npagealloc++;
    929      1.15        pk 
    930      1.20   thorpej 		/* Start the allocation process over. */
    931      1.20   thorpej 		goto startover;
    932       1.3        pk 	}
    933      1.97      yamt 	if (pp->pr_roflags & PR_NOTOUCH) {
    934      1.97      yamt #ifdef DIAGNOSTIC
    935      1.97      yamt 		if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) {
    936      1.97      yamt 			pr_leave(pp);
    937      1.97      yamt 			simple_unlock(&pp->pr_slock);
    938      1.97      yamt 			panic("pool_get: %s: page empty", pp->pr_wchan);
    939      1.97      yamt 		}
    940      1.97      yamt #endif
    941      1.97      yamt 		v = pr_item_notouch_get(pp, ph);
    942      1.97      yamt #ifdef POOL_DIAGNOSTIC
    943      1.97      yamt 		pr_log(pp, v, PRLOG_GET, file, line);
    944      1.97      yamt #endif
    945      1.97      yamt 	} else {
    946      1.97      yamt 		v = pi = TAILQ_FIRST(&ph->ph_itemlist);
    947      1.97      yamt 		if (__predict_false(v == NULL)) {
    948      1.97      yamt 			pr_leave(pp);
    949      1.97      yamt 			simple_unlock(&pp->pr_slock);
    950      1.97      yamt 			panic("pool_get: %s: page empty", pp->pr_wchan);
    951      1.97      yamt 		}
    952      1.20   thorpej #ifdef DIAGNOSTIC
    953      1.97      yamt 		if (__predict_false(pp->pr_nitems == 0)) {
    954      1.97      yamt 			pr_leave(pp);
    955      1.97      yamt 			simple_unlock(&pp->pr_slock);
    956      1.97      yamt 			printf("pool_get: %s: items on itemlist, nitems %u\n",
    957      1.97      yamt 			    pp->pr_wchan, pp->pr_nitems);
    958      1.97      yamt 			panic("pool_get: nitems inconsistent");
    959      1.97      yamt 		}
    960      1.65     enami #endif
    961      1.56  sommerfe 
    962      1.65     enami #ifdef POOL_DIAGNOSTIC
    963      1.97      yamt 		pr_log(pp, v, PRLOG_GET, file, line);
    964      1.65     enami #endif
    965       1.3        pk 
    966      1.65     enami #ifdef DIAGNOSTIC
    967      1.97      yamt 		if (__predict_false(pi->pi_magic != PI_MAGIC)) {
    968      1.97      yamt 			pr_printlog(pp, pi, printf);
    969      1.97      yamt 			panic("pool_get(%s): free list modified: "
    970      1.97      yamt 			    "magic=%x; page %p; item addr %p\n",
    971      1.97      yamt 			    pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
    972      1.97      yamt 		}
    973       1.3        pk #endif
    974       1.3        pk 
    975      1.97      yamt 		/*
    976      1.97      yamt 		 * Remove from item list.
    977      1.97      yamt 		 */
    978      1.97      yamt 		TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
    979      1.97      yamt 	}
    980      1.20   thorpej 	pp->pr_nitems--;
    981      1.20   thorpej 	pp->pr_nout++;
    982       1.6   thorpej 	if (ph->ph_nmissing == 0) {
    983       1.6   thorpej #ifdef DIAGNOSTIC
    984      1.34   thorpej 		if (__predict_false(pp->pr_nidle == 0))
    985       1.6   thorpej 			panic("pool_get: nidle inconsistent");
    986       1.6   thorpej #endif
    987       1.6   thorpej 		pp->pr_nidle--;
    988      1.88       chs 
    989      1.88       chs 		/*
    990      1.88       chs 		 * This page was previously empty.  Move it to the list of
    991      1.88       chs 		 * partially-full pages.  This page is already curpage.
    992      1.88       chs 		 */
    993      1.88       chs 		LIST_REMOVE(ph, ph_pagelist);
    994      1.88       chs 		LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
    995       1.6   thorpej 	}
    996       1.3        pk 	ph->ph_nmissing++;
    997      1.97      yamt 	if (ph->ph_nmissing == pp->pr_itemsperpage) {
    998      1.21   thorpej #ifdef DIAGNOSTIC
    999      1.97      yamt 		if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 &&
   1000      1.97      yamt 		    !TAILQ_EMPTY(&ph->ph_itemlist))) {
   1001      1.25   thorpej 			pr_leave(pp);
   1002      1.21   thorpej 			simple_unlock(&pp->pr_slock);
   1003      1.21   thorpej 			panic("pool_get: %s: nmissing inconsistent",
   1004      1.21   thorpej 			    pp->pr_wchan);
   1005      1.21   thorpej 		}
   1006      1.21   thorpej #endif
   1007       1.3        pk 		/*
   1008      1.88       chs 		 * This page is now full.  Move it to the full list
   1009      1.88       chs 		 * and select a new current page.
   1010       1.3        pk 		 */
   1011      1.88       chs 		LIST_REMOVE(ph, ph_pagelist);
   1012      1.88       chs 		LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
   1013      1.88       chs 		pool_update_curpage(pp);
   1014       1.1        pk 	}
   1015       1.3        pk 
   1016       1.3        pk 	pp->pr_nget++;
   1017      1.20   thorpej 
   1018      1.20   thorpej 	/*
   1019      1.20   thorpej 	 * If we have a low water mark and we are now below that low
   1020      1.20   thorpej 	 * water mark, add more items to the pool.
   1021      1.20   thorpej 	 */
   1022      1.53   thorpej 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
   1023      1.20   thorpej 		/*
   1024      1.20   thorpej 		 * XXX: Should we log a warning?  Should we set up a timeout
   1025      1.20   thorpej 		 * to try again in a second or so?  The latter could break
   1026      1.20   thorpej 		 * a caller's assumptions about interrupt protection, etc.
   1027      1.20   thorpej 		 */
   1028      1.20   thorpej 	}
   1029      1.20   thorpej 
   1030      1.25   thorpej 	pr_leave(pp);
   1031      1.21   thorpej 	simple_unlock(&pp->pr_slock);
   1032       1.1        pk 	return (v);
   1033       1.1        pk }
   1034       1.1        pk 
   1035       1.1        pk /*
   1036      1.43   thorpej  * Internal version of pool_put().  Pool is already locked/entered.
   1037       1.1        pk  */
   1038      1.43   thorpej static void
   1039  1.99.8.1      tron pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
   1040       1.1        pk {
   1041       1.1        pk 	struct pool_item *pi = v;
   1042       1.3        pk 	struct pool_item_header *ph;
   1043       1.3        pk 	caddr_t page;
   1044      1.21   thorpej 	int s;
   1045       1.3        pk 
   1046      1.61       chs 	LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
   1047      1.61       chs 
   1048      1.66   thorpej 	page = (caddr_t)((u_long)v & pp->pr_alloc->pa_pagemask);
   1049       1.1        pk 
   1050      1.30   thorpej #ifdef DIAGNOSTIC
   1051      1.34   thorpej 	if (__predict_false(pp->pr_nout == 0)) {
   1052      1.30   thorpej 		printf("pool %s: putting with none out\n",
   1053      1.30   thorpej 		    pp->pr_wchan);
   1054      1.30   thorpej 		panic("pool_put");
   1055      1.30   thorpej 	}
   1056      1.30   thorpej #endif
   1057       1.3        pk 
   1058      1.34   thorpej 	if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {
   1059      1.25   thorpej 		pr_printlog(pp, NULL, printf);
   1060       1.3        pk 		panic("pool_put: %s: page header missing", pp->pr_wchan);
   1061       1.3        pk 	}
   1062      1.28   thorpej 
   1063      1.28   thorpej #ifdef LOCKDEBUG
   1064      1.28   thorpej 	/*
   1065      1.28   thorpej 	 * Check if we're freeing a locked simple lock.
   1066      1.28   thorpej 	 */
   1067      1.28   thorpej 	simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
   1068      1.28   thorpej #endif
   1069       1.3        pk 
   1070       1.3        pk 	/*
   1071       1.3        pk 	 * Return to item list.
   1072       1.3        pk 	 */
   1073      1.97      yamt 	if (pp->pr_roflags & PR_NOTOUCH) {
   1074      1.97      yamt 		pr_item_notouch_put(pp, ph, v);
   1075      1.97      yamt 	} else {
   1076       1.2        pk #ifdef DIAGNOSTIC
   1077      1.97      yamt 		pi->pi_magic = PI_MAGIC;
   1078       1.3        pk #endif
   1079      1.32       chs #ifdef DEBUG
   1080      1.97      yamt 		{
   1081      1.97      yamt 			int i, *ip = v;
   1082      1.32       chs 
   1083      1.97      yamt 			for (i = 0; i < pp->pr_size / sizeof(int); i++) {
   1084      1.97      yamt 				*ip++ = PI_MAGIC;
   1085      1.97      yamt 			}
   1086      1.32       chs 		}
   1087      1.32       chs #endif
   1088      1.32       chs 
   1089      1.97      yamt 		TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
   1090      1.97      yamt 	}
   1091      1.79   thorpej 	KDASSERT(ph->ph_nmissing != 0);
   1092       1.3        pk 	ph->ph_nmissing--;
   1093       1.3        pk 	pp->pr_nput++;
   1094      1.20   thorpej 	pp->pr_nitems++;
   1095      1.20   thorpej 	pp->pr_nout--;
   1096       1.3        pk 
   1097       1.3        pk 	/* Cancel "pool empty" condition if it exists */
   1098       1.3        pk 	if (pp->pr_curpage == NULL)
   1099       1.3        pk 		pp->pr_curpage = ph;
   1100       1.3        pk 
   1101       1.3        pk 	if (pp->pr_flags & PR_WANTED) {
   1102       1.3        pk 		pp->pr_flags &= ~PR_WANTED;
   1103      1.15        pk 		if (ph->ph_nmissing == 0)
   1104      1.15        pk 			pp->pr_nidle++;
   1105       1.3        pk 		wakeup((caddr_t)pp);
   1106       1.3        pk 		return;
   1107       1.3        pk 	}
   1108       1.3        pk 
   1109       1.3        pk 	/*
   1110      1.88       chs 	 * If this page is now empty, do one of two things:
   1111      1.21   thorpej 	 *
   1112      1.88       chs 	 *	(1) If we have more pages than the page high water mark,
   1113      1.96   thorpej 	 *	    free the page back to the system.  ONLY CONSIDER
   1114      1.90   thorpej 	 *	    FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
   1115      1.90   thorpej 	 *	    CLAIM.
   1116      1.21   thorpej 	 *
   1117      1.88       chs 	 *	(2) Otherwise, move the page to the empty page list.
   1118      1.88       chs 	 *
   1119      1.88       chs 	 * Either way, select a new current page (so we use a partially-full
   1120      1.88       chs 	 * page if one is available).
   1121       1.3        pk 	 */
   1122       1.3        pk 	if (ph->ph_nmissing == 0) {
   1123       1.6   thorpej 		pp->pr_nidle++;
   1124      1.90   thorpej 		if (pp->pr_npages > pp->pr_minpages &&
   1125      1.90   thorpej 		    (pp->pr_npages > pp->pr_maxpages ||
   1126      1.90   thorpej 		     (pp->pr_alloc->pa_flags & PA_WANT) != 0)) {
   1127  1.99.8.1      tron 			pr_rmpage(pp, ph, pq);
   1128       1.3        pk 		} else {
   1129      1.88       chs 			LIST_REMOVE(ph, ph_pagelist);
   1130      1.88       chs 			LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
   1131       1.3        pk 
   1132      1.21   thorpej 			/*
   1133      1.21   thorpej 			 * Update the timestamp on the page.  A page must
   1134      1.21   thorpej 			 * be idle for some period of time before it can
   1135      1.21   thorpej 			 * be reclaimed by the pagedaemon.  This minimizes
   1136      1.21   thorpej 			 * ping-pong'ing for memory.
   1137      1.21   thorpej 			 */
   1138      1.21   thorpej 			s = splclock();
   1139      1.21   thorpej 			ph->ph_time = mono_time;
   1140      1.21   thorpej 			splx(s);
   1141       1.1        pk 		}
   1142      1.88       chs 		pool_update_curpage(pp);
   1143       1.1        pk 	}
   1144      1.88       chs 
   1145      1.21   thorpej 	/*
   1146      1.88       chs 	 * If the page was previously completely full, move it to the
   1147      1.88       chs 	 * partially-full list and make it the current page.  The next
   1148      1.88       chs 	 * allocation will get the item from this page, instead of
   1149      1.88       chs 	 * further fragmenting the pool.
   1150      1.21   thorpej 	 */
   1151      1.21   thorpej 	else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
   1152      1.88       chs 		LIST_REMOVE(ph, ph_pagelist);
   1153      1.88       chs 		LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
   1154      1.21   thorpej 		pp->pr_curpage = ph;
   1155      1.21   thorpej 	}
   1156      1.43   thorpej }
   1157      1.43   thorpej 
   1158      1.43   thorpej /*
   1159      1.43   thorpej  * Return resource to the pool; must be called at appropriate spl level
   1160      1.43   thorpej  */
   1161      1.59   thorpej #ifdef POOL_DIAGNOSTIC
   1162      1.43   thorpej void
   1163      1.43   thorpej _pool_put(struct pool *pp, void *v, const char *file, long line)
   1164      1.43   thorpej {
   1165  1.99.8.1      tron 	struct pool_pagelist pq;
   1166  1.99.8.1      tron 
   1167  1.99.8.1      tron 	LIST_INIT(&pq);
   1168      1.43   thorpej 
   1169      1.43   thorpej 	simple_lock(&pp->pr_slock);
   1170      1.43   thorpej 	pr_enter(pp, file, line);
   1171      1.43   thorpej 
   1172      1.56  sommerfe 	pr_log(pp, v, PRLOG_PUT, file, line);
   1173      1.56  sommerfe 
   1174  1.99.8.1      tron 	pool_do_put(pp, v, &pq);
   1175      1.21   thorpej 
   1176      1.25   thorpej 	pr_leave(pp);
   1177      1.21   thorpej 	simple_unlock(&pp->pr_slock);
   1178  1.99.8.1      tron 
   1179  1.99.8.1      tron 	if (! LIST_EMPTY(&pq))
   1180  1.99.8.1      tron 		pr_pagelist_free(pp, &pq);
   1181       1.1        pk }
   1182      1.57  sommerfe #undef pool_put
   1183      1.59   thorpej #endif /* POOL_DIAGNOSTIC */
   1184       1.1        pk 
   1185      1.56  sommerfe void
   1186      1.56  sommerfe pool_put(struct pool *pp, void *v)
   1187      1.56  sommerfe {
   1188  1.99.8.1      tron 	struct pool_pagelist pq;
   1189      1.56  sommerfe 
   1190  1.99.8.1      tron 	LIST_INIT(&pq);
   1191      1.56  sommerfe 
   1192  1.99.8.1      tron 	simple_lock(&pp->pr_slock);
   1193  1.99.8.1      tron 	pool_do_put(pp, v, &pq);
   1194      1.56  sommerfe 	simple_unlock(&pp->pr_slock);
   1195  1.99.8.1      tron 
   1196  1.99.8.1      tron 	if (! LIST_EMPTY(&pq))
   1197  1.99.8.1      tron 		pr_pagelist_free(pp, &pq);
   1198      1.56  sommerfe }
   1199      1.57  sommerfe 
   1200      1.59   thorpej #ifdef POOL_DIAGNOSTIC
   1201      1.57  sommerfe #define		pool_put(h, v)	_pool_put((h), (v), __FILE__, __LINE__)
   1202      1.56  sommerfe #endif
   1203      1.74   thorpej 
   1204      1.74   thorpej /*
   1205      1.74   thorpej  * Add N items to the pool.
   1206      1.74   thorpej  */
   1207      1.74   thorpej int
   1208      1.74   thorpej pool_prime(struct pool *pp, int n)
   1209      1.74   thorpej {
   1210      1.83       scw 	struct pool_item_header *ph = NULL;
   1211      1.74   thorpej 	caddr_t cp;
   1212      1.75    simonb 	int newpages;
   1213      1.74   thorpej 
   1214      1.74   thorpej 	simple_lock(&pp->pr_slock);
   1215      1.74   thorpej 
   1216      1.74   thorpej 	newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1217      1.74   thorpej 
   1218      1.74   thorpej 	while (newpages-- > 0) {
   1219      1.74   thorpej 		simple_unlock(&pp->pr_slock);
   1220      1.74   thorpej 		cp = pool_allocator_alloc(pp, PR_NOWAIT);
   1221      1.74   thorpej 		if (__predict_true(cp != NULL))
   1222      1.74   thorpej 			ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
   1223      1.74   thorpej 
   1224      1.74   thorpej 		if (__predict_false(cp == NULL || ph == NULL)) {
   1225      1.74   thorpej 			if (cp != NULL)
   1226      1.74   thorpej 				pool_allocator_free(pp, cp);
   1227      1.91      yamt 			simple_lock(&pp->pr_slock);
   1228      1.74   thorpej 			break;
   1229      1.74   thorpej 		}
   1230      1.74   thorpej 
   1231      1.91      yamt 		simple_lock(&pp->pr_slock);
   1232      1.74   thorpej 		pool_prime_page(pp, cp, ph);
   1233      1.74   thorpej 		pp->pr_npagealloc++;
   1234      1.74   thorpej 		pp->pr_minpages++;
   1235      1.74   thorpej 	}
   1236      1.74   thorpej 
   1237      1.74   thorpej 	if (pp->pr_minpages >= pp->pr_maxpages)
   1238      1.74   thorpej 		pp->pr_maxpages = pp->pr_minpages + 1;	/* XXX */
   1239      1.74   thorpej 
   1240      1.74   thorpej 	simple_unlock(&pp->pr_slock);
   1241      1.74   thorpej 	return (0);
   1242      1.74   thorpej }
   1243      1.55   thorpej 
   1244      1.55   thorpej /*
   1245       1.3        pk  * Add a page worth of items to the pool.
   1246      1.21   thorpej  *
   1247      1.21   thorpej  * Note, we must be called with the pool descriptor LOCKED.
   1248       1.3        pk  */
   1249      1.55   thorpej static void
   1250      1.55   thorpej pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
   1251       1.3        pk {
   1252       1.3        pk 	struct pool_item *pi;
   1253       1.3        pk 	caddr_t cp = storage;
   1254       1.3        pk 	unsigned int align = pp->pr_align;
   1255       1.3        pk 	unsigned int ioff = pp->pr_itemoffset;
   1256      1.55   thorpej 	int n;
   1257      1.89      yamt 	int s;
   1258      1.36        pk 
   1259      1.91      yamt 	LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
   1260      1.91      yamt 
   1261      1.66   thorpej #ifdef DIAGNOSTIC
   1262      1.66   thorpej 	if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
   1263      1.36        pk 		panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
   1264      1.66   thorpej #endif
   1265       1.3        pk 
   1266       1.3        pk 	/*
   1267       1.3        pk 	 * Insert page header.
   1268       1.3        pk 	 */
   1269      1.88       chs 	LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
   1270       1.3        pk 	TAILQ_INIT(&ph->ph_itemlist);
   1271       1.3        pk 	ph->ph_page = storage;
   1272       1.3        pk 	ph->ph_nmissing = 0;
   1273      1.89      yamt 	s = splclock();
   1274      1.89      yamt 	ph->ph_time = mono_time;
   1275      1.89      yamt 	splx(s);
   1276      1.88       chs 	if ((pp->pr_roflags & PR_PHINPAGE) == 0)
   1277      1.88       chs 		SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
   1278       1.3        pk 
   1279       1.6   thorpej 	pp->pr_nidle++;
   1280       1.6   thorpej 
   1281       1.3        pk 	/*
   1282       1.3        pk 	 * Color this page.
   1283       1.3        pk 	 */
   1284       1.3        pk 	cp = (caddr_t)(cp + pp->pr_curcolor);
   1285       1.3        pk 	if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
   1286       1.3        pk 		pp->pr_curcolor = 0;
   1287       1.3        pk 
   1288       1.3        pk 	/*
   1289       1.3        pk 	 * Adjust storage to apply aligment to `pr_itemoffset' in each item.
   1290       1.3        pk 	 */
   1291       1.3        pk 	if (ioff != 0)
   1292       1.3        pk 		cp = (caddr_t)(cp + (align - ioff));
   1293       1.3        pk 
   1294       1.3        pk 	/*
   1295       1.3        pk 	 * Insert remaining chunks on the bucket list.
   1296       1.3        pk 	 */
   1297       1.3        pk 	n = pp->pr_itemsperpage;
   1298      1.20   thorpej 	pp->pr_nitems += n;
   1299       1.3        pk 
   1300      1.97      yamt 	if (pp->pr_roflags & PR_NOTOUCH) {
   1301      1.99      yamt 		pool_item_freelist_t *freelist = PR_FREELIST(ph);
   1302      1.97      yamt 		int i;
   1303      1.97      yamt 
   1304      1.99      yamt 		ph->ph_off = cp - storage;
   1305      1.97      yamt 		ph->ph_firstfree = 0;
   1306      1.97      yamt 		for (i = 0; i < n - 1; i++)
   1307      1.97      yamt 			freelist[i] = i + 1;
   1308      1.97      yamt 		freelist[n - 1] = PR_INDEX_EOL;
   1309      1.97      yamt 	} else {
   1310      1.97      yamt 		while (n--) {
   1311      1.97      yamt 			pi = (struct pool_item *)cp;
   1312      1.78   thorpej 
   1313      1.97      yamt 			KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
   1314       1.3        pk 
   1315      1.97      yamt 			/* Insert on page list */
   1316      1.97      yamt 			TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
   1317       1.3        pk #ifdef DIAGNOSTIC
   1318      1.97      yamt 			pi->pi_magic = PI_MAGIC;
   1319       1.3        pk #endif
   1320      1.97      yamt 			cp = (caddr_t)(cp + pp->pr_size);
   1321      1.97      yamt 		}
   1322       1.3        pk 	}
   1323       1.3        pk 
   1324       1.3        pk 	/*
   1325       1.3        pk 	 * If the pool was depleted, point at the new page.
   1326       1.3        pk 	 */
   1327       1.3        pk 	if (pp->pr_curpage == NULL)
   1328       1.3        pk 		pp->pr_curpage = ph;
   1329       1.3        pk 
   1330       1.3        pk 	if (++pp->pr_npages > pp->pr_hiwat)
   1331       1.3        pk 		pp->pr_hiwat = pp->pr_npages;
   1332       1.3        pk }
   1333       1.3        pk 
   1334      1.20   thorpej /*
   1335      1.52   thorpej  * Used by pool_get() when nitems drops below the low water mark.  This
   1336      1.88       chs  * is used to catch up pr_nitems with the low water mark.
   1337      1.20   thorpej  *
   1338      1.21   thorpej  * Note 1, we never wait for memory here, we let the caller decide what to do.
   1339      1.20   thorpej  *
   1340      1.73   thorpej  * Note 2, we must be called with the pool already locked, and we return
   1341      1.20   thorpej  * with it locked.
   1342      1.20   thorpej  */
   1343      1.20   thorpej static int
   1344      1.42   thorpej pool_catchup(struct pool *pp)
   1345      1.20   thorpej {
   1346      1.83       scw 	struct pool_item_header *ph = NULL;
   1347      1.20   thorpej 	caddr_t cp;
   1348      1.20   thorpej 	int error = 0;
   1349      1.20   thorpej 
   1350      1.54   thorpej 	while (POOL_NEEDS_CATCHUP(pp)) {
   1351      1.20   thorpej 		/*
   1352      1.21   thorpej 		 * Call the page back-end allocator for more memory.
   1353      1.21   thorpej 		 *
   1354      1.21   thorpej 		 * XXX: We never wait, so should we bother unlocking
   1355      1.21   thorpej 		 * the pool descriptor?
   1356      1.20   thorpej 		 */
   1357      1.21   thorpej 		simple_unlock(&pp->pr_slock);
   1358      1.66   thorpej 		cp = pool_allocator_alloc(pp, PR_NOWAIT);
   1359      1.55   thorpej 		if (__predict_true(cp != NULL))
   1360      1.55   thorpej 			ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
   1361      1.55   thorpej 		if (__predict_false(cp == NULL || ph == NULL)) {
   1362      1.55   thorpej 			if (cp != NULL)
   1363      1.66   thorpej 				pool_allocator_free(pp, cp);
   1364      1.20   thorpej 			error = ENOMEM;
   1365      1.91      yamt 			simple_lock(&pp->pr_slock);
   1366      1.20   thorpej 			break;
   1367      1.20   thorpej 		}
   1368      1.91      yamt 		simple_lock(&pp->pr_slock);
   1369      1.55   thorpej 		pool_prime_page(pp, cp, ph);
   1370      1.26   thorpej 		pp->pr_npagealloc++;
   1371      1.20   thorpej 	}
   1372      1.20   thorpej 
   1373      1.20   thorpej 	return (error);
   1374      1.20   thorpej }
   1375      1.20   thorpej 
   1376      1.88       chs static void
   1377      1.88       chs pool_update_curpage(struct pool *pp)
   1378      1.88       chs {
   1379      1.88       chs 
   1380      1.88       chs 	pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
   1381      1.88       chs 	if (pp->pr_curpage == NULL) {
   1382      1.88       chs 		pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
   1383      1.88       chs 	}
   1384      1.88       chs }
   1385      1.88       chs 
   1386       1.3        pk void
   1387      1.42   thorpej pool_setlowat(struct pool *pp, int n)
   1388       1.3        pk {
   1389      1.15        pk 
   1390      1.21   thorpej 	simple_lock(&pp->pr_slock);
   1391      1.21   thorpej 
   1392       1.3        pk 	pp->pr_minitems = n;
   1393      1.15        pk 	pp->pr_minpages = (n == 0)
   1394      1.15        pk 		? 0
   1395      1.18   thorpej 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1396      1.20   thorpej 
   1397      1.20   thorpej 	/* Make sure we're caught up with the newly-set low water mark. */
   1398      1.75    simonb 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
   1399      1.20   thorpej 		/*
   1400      1.20   thorpej 		 * XXX: Should we log a warning?  Should we set up a timeout
   1401      1.20   thorpej 		 * to try again in a second or so?  The latter could break
   1402      1.20   thorpej 		 * a caller's assumptions about interrupt protection, etc.
   1403      1.20   thorpej 		 */
   1404      1.20   thorpej 	}
   1405      1.21   thorpej 
   1406      1.21   thorpej 	simple_unlock(&pp->pr_slock);
   1407       1.3        pk }
   1408       1.3        pk 
   1409       1.3        pk void
   1410      1.42   thorpej pool_sethiwat(struct pool *pp, int n)
   1411       1.3        pk {
   1412      1.15        pk 
   1413      1.21   thorpej 	simple_lock(&pp->pr_slock);
   1414      1.21   thorpej 
   1415      1.15        pk 	pp->pr_maxpages = (n == 0)
   1416      1.15        pk 		? 0
   1417      1.18   thorpej 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1418      1.21   thorpej 
   1419      1.21   thorpej 	simple_unlock(&pp->pr_slock);
   1420       1.3        pk }
   1421       1.3        pk 
   1422      1.20   thorpej void
   1423      1.42   thorpej pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
   1424      1.20   thorpej {
   1425      1.20   thorpej 
   1426      1.21   thorpej 	simple_lock(&pp->pr_slock);
   1427      1.20   thorpej 
   1428      1.20   thorpej 	pp->pr_hardlimit = n;
   1429      1.20   thorpej 	pp->pr_hardlimit_warning = warnmess;
   1430      1.31   thorpej 	pp->pr_hardlimit_ratecap.tv_sec = ratecap;
   1431      1.31   thorpej 	pp->pr_hardlimit_warning_last.tv_sec = 0;
   1432      1.31   thorpej 	pp->pr_hardlimit_warning_last.tv_usec = 0;
   1433      1.20   thorpej 
   1434      1.20   thorpej 	/*
   1435      1.21   thorpej 	 * In-line version of pool_sethiwat(), because we don't want to
   1436      1.21   thorpej 	 * release the lock.
   1437      1.20   thorpej 	 */
   1438      1.20   thorpej 	pp->pr_maxpages = (n == 0)
   1439      1.20   thorpej 		? 0
   1440      1.20   thorpej 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1441      1.21   thorpej 
   1442      1.21   thorpej 	simple_unlock(&pp->pr_slock);
   1443      1.20   thorpej }
   1444       1.3        pk 
   1445       1.3        pk /*
   1446       1.3        pk  * Release all complete pages that have not been used recently.
   1447       1.3        pk  */
   1448      1.66   thorpej int
   1449      1.59   thorpej #ifdef POOL_DIAGNOSTIC
   1450      1.42   thorpej _pool_reclaim(struct pool *pp, const char *file, long line)
   1451      1.56  sommerfe #else
   1452      1.56  sommerfe pool_reclaim(struct pool *pp)
   1453      1.56  sommerfe #endif
   1454       1.3        pk {
   1455       1.3        pk 	struct pool_item_header *ph, *phnext;
   1456      1.43   thorpej 	struct pool_cache *pc;
   1457      1.21   thorpej 	struct timeval curtime;
   1458      1.61       chs 	struct pool_pagelist pq;
   1459      1.88       chs 	struct timeval diff;
   1460      1.21   thorpej 	int s;
   1461       1.3        pk 
   1462      1.68   thorpej 	if (pp->pr_drain_hook != NULL) {
   1463      1.68   thorpej 		/*
   1464      1.68   thorpej 		 * The drain hook must be called with the pool unlocked.
   1465      1.68   thorpej 		 */
   1466      1.68   thorpej 		(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
   1467      1.68   thorpej 	}
   1468      1.68   thorpej 
   1469      1.21   thorpej 	if (simple_lock_try(&pp->pr_slock) == 0)
   1470      1.66   thorpej 		return (0);
   1471      1.25   thorpej 	pr_enter(pp, file, line);
   1472      1.68   thorpej 
   1473      1.88       chs 	LIST_INIT(&pq);
   1474       1.3        pk 
   1475      1.43   thorpej 	/*
   1476      1.43   thorpej 	 * Reclaim items from the pool's caches.
   1477      1.43   thorpej 	 */
   1478      1.61       chs 	TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist)
   1479  1.99.8.1      tron 		pool_cache_reclaim(pc, &pq);
   1480      1.43   thorpej 
   1481      1.21   thorpej 	s = splclock();
   1482      1.21   thorpej 	curtime = mono_time;
   1483      1.21   thorpej 	splx(s);
   1484      1.21   thorpej 
   1485      1.88       chs 	for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
   1486      1.88       chs 		phnext = LIST_NEXT(ph, ph_pagelist);
   1487       1.3        pk 
   1488       1.3        pk 		/* Check our minimum page claim */
   1489       1.3        pk 		if (pp->pr_npages <= pp->pr_minpages)
   1490       1.3        pk 			break;
   1491       1.3        pk 
   1492      1.88       chs 		KASSERT(ph->ph_nmissing == 0);
   1493      1.88       chs 		timersub(&curtime, &ph->ph_time, &diff);
   1494      1.88       chs 		if (diff.tv_sec < pool_inactive_time)
   1495      1.88       chs 			continue;
   1496      1.21   thorpej 
   1497      1.88       chs 		/*
   1498      1.88       chs 		 * If freeing this page would put us below
   1499      1.88       chs 		 * the low water mark, stop now.
   1500      1.88       chs 		 */
   1501      1.88       chs 		if ((pp->pr_nitems - pp->pr_itemsperpage) <
   1502      1.88       chs 		    pp->pr_minitems)
   1503      1.88       chs 			break;
   1504      1.21   thorpej 
   1505      1.88       chs 		pr_rmpage(pp, ph, &pq);
   1506       1.3        pk 	}
   1507       1.3        pk 
   1508      1.25   thorpej 	pr_leave(pp);
   1509      1.21   thorpej 	simple_unlock(&pp->pr_slock);
   1510      1.88       chs 	if (LIST_EMPTY(&pq))
   1511      1.66   thorpej 		return (0);
   1512      1.66   thorpej 
   1513  1.99.8.1      tron 	pr_pagelist_free(pp, &pq);
   1514      1.66   thorpej 	return (1);
   1515       1.3        pk }
   1516       1.3        pk 
   1517       1.3        pk /*
   1518       1.3        pk  * Drain pools, one at a time.
   1519      1.21   thorpej  *
   1520      1.21   thorpej  * Note, we must never be called from an interrupt context.
   1521       1.3        pk  */
   1522       1.3        pk void
   1523      1.42   thorpej pool_drain(void *arg)
   1524       1.3        pk {
   1525       1.3        pk 	struct pool *pp;
   1526      1.23   thorpej 	int s;
   1527       1.3        pk 
   1528      1.61       chs 	pp = NULL;
   1529      1.49   thorpej 	s = splvm();
   1530      1.23   thorpej 	simple_lock(&pool_head_slock);
   1531      1.61       chs 	if (drainpp == NULL) {
   1532      1.61       chs 		drainpp = TAILQ_FIRST(&pool_head);
   1533      1.61       chs 	}
   1534      1.61       chs 	if (drainpp) {
   1535      1.61       chs 		pp = drainpp;
   1536      1.61       chs 		drainpp = TAILQ_NEXT(pp, pr_poollist);
   1537      1.61       chs 	}
   1538      1.61       chs 	simple_unlock(&pool_head_slock);
   1539      1.63       chs 	pool_reclaim(pp);
   1540      1.61       chs 	splx(s);
   1541       1.3        pk }
   1542       1.3        pk 
   1543       1.3        pk /*
   1544       1.3        pk  * Diagnostic helpers.
   1545       1.3        pk  */
   1546       1.3        pk void
   1547      1.42   thorpej pool_print(struct pool *pp, const char *modif)
   1548      1.21   thorpej {
   1549      1.21   thorpej 	int s;
   1550      1.21   thorpej 
   1551      1.49   thorpej 	s = splvm();
   1552      1.25   thorpej 	if (simple_lock_try(&pp->pr_slock) == 0) {
   1553      1.25   thorpej 		printf("pool %s is locked; try again later\n",
   1554      1.25   thorpej 		    pp->pr_wchan);
   1555      1.25   thorpej 		splx(s);
   1556      1.25   thorpej 		return;
   1557      1.25   thorpej 	}
   1558      1.25   thorpej 	pool_print1(pp, modif, printf);
   1559      1.21   thorpej 	simple_unlock(&pp->pr_slock);
   1560      1.21   thorpej 	splx(s);
   1561      1.21   thorpej }
   1562      1.21   thorpej 
   1563      1.25   thorpej void
   1564      1.42   thorpej pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
   1565      1.25   thorpej {
   1566      1.25   thorpej 	int didlock = 0;
   1567      1.25   thorpej 
   1568      1.25   thorpej 	if (pp == NULL) {
   1569      1.25   thorpej 		(*pr)("Must specify a pool to print.\n");
   1570      1.25   thorpej 		return;
   1571      1.25   thorpej 	}
   1572      1.25   thorpej 
   1573      1.25   thorpej 	/*
   1574      1.25   thorpej 	 * Called from DDB; interrupts should be blocked, and all
   1575      1.25   thorpej 	 * other processors should be paused.  We can skip locking
   1576      1.25   thorpej 	 * the pool in this case.
   1577      1.25   thorpej 	 *
   1578      1.25   thorpej 	 * We do a simple_lock_try() just to print the lock
   1579      1.25   thorpej 	 * status, however.
   1580      1.25   thorpej 	 */
   1581      1.25   thorpej 
   1582      1.25   thorpej 	if (simple_lock_try(&pp->pr_slock) == 0)
   1583      1.25   thorpej 		(*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
   1584      1.25   thorpej 	else
   1585      1.25   thorpej 		didlock = 1;
   1586      1.25   thorpej 
   1587      1.25   thorpej 	pool_print1(pp, modif, pr);
   1588      1.25   thorpej 
   1589      1.25   thorpej 	if (didlock)
   1590      1.25   thorpej 		simple_unlock(&pp->pr_slock);
   1591      1.25   thorpej }
   1592      1.25   thorpej 
   1593      1.21   thorpej static void
   1594      1.97      yamt pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
   1595      1.97      yamt     void (*pr)(const char *, ...))
   1596      1.88       chs {
   1597      1.88       chs 	struct pool_item_header *ph;
   1598      1.88       chs #ifdef DIAGNOSTIC
   1599      1.88       chs 	struct pool_item *pi;
   1600      1.88       chs #endif
   1601      1.88       chs 
   1602      1.88       chs 	LIST_FOREACH(ph, pl, ph_pagelist) {
   1603      1.88       chs 		(*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
   1604      1.88       chs 		    ph->ph_page, ph->ph_nmissing,
   1605      1.88       chs 		    (u_long)ph->ph_time.tv_sec,
   1606      1.88       chs 		    (u_long)ph->ph_time.tv_usec);
   1607      1.88       chs #ifdef DIAGNOSTIC
   1608      1.97      yamt 		if (!(pp->pr_roflags & PR_NOTOUCH)) {
   1609      1.97      yamt 			TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) {
   1610      1.97      yamt 				if (pi->pi_magic != PI_MAGIC) {
   1611      1.97      yamt 					(*pr)("\t\t\titem %p, magic 0x%x\n",
   1612      1.97      yamt 					    pi, pi->pi_magic);
   1613      1.97      yamt 				}
   1614      1.88       chs 			}
   1615      1.88       chs 		}
   1616      1.88       chs #endif
   1617      1.88       chs 	}
   1618      1.88       chs }
   1619      1.88       chs 
   1620      1.88       chs static void
   1621      1.42   thorpej pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
   1622       1.3        pk {
   1623      1.25   thorpej 	struct pool_item_header *ph;
   1624      1.44   thorpej 	struct pool_cache *pc;
   1625      1.44   thorpej 	struct pool_cache_group *pcg;
   1626      1.44   thorpej 	int i, print_log = 0, print_pagelist = 0, print_cache = 0;
   1627      1.25   thorpej 	char c;
   1628      1.25   thorpej 
   1629      1.25   thorpej 	while ((c = *modif++) != '\0') {
   1630      1.25   thorpej 		if (c == 'l')
   1631      1.25   thorpej 			print_log = 1;
   1632      1.25   thorpej 		if (c == 'p')
   1633      1.25   thorpej 			print_pagelist = 1;
   1634      1.44   thorpej 		if (c == 'c')
   1635      1.44   thorpej 			print_cache = 1;
   1636      1.25   thorpej 	}
   1637      1.25   thorpej 
   1638      1.25   thorpej 	(*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
   1639      1.25   thorpej 	    pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
   1640      1.25   thorpej 	    pp->pr_roflags);
   1641      1.66   thorpej 	(*pr)("\talloc %p\n", pp->pr_alloc);
   1642      1.25   thorpej 	(*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
   1643      1.25   thorpej 	    pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
   1644      1.25   thorpej 	(*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
   1645      1.25   thorpej 	    pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
   1646      1.25   thorpej 
   1647      1.25   thorpej 	(*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
   1648      1.25   thorpej 	    pp->pr_nget, pp->pr_nfail, pp->pr_nput);
   1649      1.25   thorpej 	(*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
   1650      1.25   thorpej 	    pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
   1651      1.25   thorpej 
   1652      1.25   thorpej 	if (print_pagelist == 0)
   1653      1.25   thorpej 		goto skip_pagelist;
   1654      1.25   thorpej 
   1655      1.88       chs 	if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
   1656      1.88       chs 		(*pr)("\n\tempty page list:\n");
   1657      1.97      yamt 	pool_print_pagelist(pp, &pp->pr_emptypages, pr);
   1658      1.88       chs 	if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
   1659      1.88       chs 		(*pr)("\n\tfull page list:\n");
   1660      1.97      yamt 	pool_print_pagelist(pp, &pp->pr_fullpages, pr);
   1661      1.88       chs 	if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
   1662      1.88       chs 		(*pr)("\n\tpartial-page list:\n");
   1663      1.97      yamt 	pool_print_pagelist(pp, &pp->pr_partpages, pr);
   1664      1.88       chs 
   1665      1.25   thorpej 	if (pp->pr_curpage == NULL)
   1666      1.25   thorpej 		(*pr)("\tno current page\n");
   1667      1.25   thorpej 	else
   1668      1.25   thorpej 		(*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
   1669      1.25   thorpej 
   1670      1.25   thorpej  skip_pagelist:
   1671      1.25   thorpej 	if (print_log == 0)
   1672      1.25   thorpej 		goto skip_log;
   1673      1.25   thorpej 
   1674      1.25   thorpej 	(*pr)("\n");
   1675      1.25   thorpej 	if ((pp->pr_roflags & PR_LOGGING) == 0)
   1676      1.25   thorpej 		(*pr)("\tno log\n");
   1677      1.25   thorpej 	else
   1678      1.25   thorpej 		pr_printlog(pp, NULL, pr);
   1679       1.3        pk 
   1680      1.25   thorpej  skip_log:
   1681      1.44   thorpej 	if (print_cache == 0)
   1682      1.44   thorpej 		goto skip_cache;
   1683      1.44   thorpej 
   1684      1.61       chs 	TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) {
   1685      1.44   thorpej 		(*pr)("\tcache %p: allocfrom %p freeto %p\n", pc,
   1686      1.44   thorpej 		    pc->pc_allocfrom, pc->pc_freeto);
   1687      1.48   thorpej 		(*pr)("\t    hits %lu misses %lu ngroups %lu nitems %lu\n",
   1688      1.48   thorpej 		    pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
   1689      1.61       chs 		TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
   1690      1.44   thorpej 			(*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);
   1691      1.87   thorpej 			for (i = 0; i < PCG_NOBJECTS; i++) {
   1692      1.87   thorpej 				if (pcg->pcg_objects[i].pcgo_pa !=
   1693      1.87   thorpej 				    POOL_PADDR_INVALID) {
   1694      1.87   thorpej 					(*pr)("\t\t\t%p, 0x%llx\n",
   1695      1.87   thorpej 					    pcg->pcg_objects[i].pcgo_va,
   1696      1.87   thorpej 					    (unsigned long long)
   1697      1.87   thorpej 					    pcg->pcg_objects[i].pcgo_pa);
   1698      1.87   thorpej 				} else {
   1699      1.87   thorpej 					(*pr)("\t\t\t%p\n",
   1700      1.87   thorpej 					    pcg->pcg_objects[i].pcgo_va);
   1701      1.87   thorpej 				}
   1702      1.87   thorpej 			}
   1703      1.44   thorpej 		}
   1704      1.44   thorpej 	}
   1705      1.44   thorpej 
   1706      1.44   thorpej  skip_cache:
   1707      1.88       chs 	pr_enter_check(pp, pr);
   1708      1.88       chs }
   1709      1.88       chs 
   1710      1.88       chs static int
   1711      1.88       chs pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
   1712      1.88       chs {
   1713      1.88       chs 	struct pool_item *pi;
   1714      1.88       chs 	caddr_t page;
   1715      1.88       chs 	int n;
   1716      1.88       chs 
   1717      1.88       chs 	page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask);
   1718      1.88       chs 	if (page != ph->ph_page &&
   1719      1.88       chs 	    (pp->pr_roflags & PR_PHINPAGE) != 0) {
   1720      1.88       chs 		if (label != NULL)
   1721      1.88       chs 			printf("%s: ", label);
   1722      1.88       chs 		printf("pool(%p:%s): page inconsistency: page %p;"
   1723      1.88       chs 		       " at page head addr %p (p %p)\n", pp,
   1724      1.88       chs 			pp->pr_wchan, ph->ph_page,
   1725      1.88       chs 			ph, page);
   1726      1.88       chs 		return 1;
   1727      1.88       chs 	}
   1728       1.3        pk 
   1729      1.97      yamt 	if ((pp->pr_roflags & PR_NOTOUCH) != 0)
   1730      1.97      yamt 		return 0;
   1731      1.97      yamt 
   1732      1.88       chs 	for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
   1733      1.88       chs 	     pi != NULL;
   1734      1.88       chs 	     pi = TAILQ_NEXT(pi,pi_list), n++) {
   1735      1.88       chs 
   1736      1.88       chs #ifdef DIAGNOSTIC
   1737      1.88       chs 		if (pi->pi_magic != PI_MAGIC) {
   1738      1.88       chs 			if (label != NULL)
   1739      1.88       chs 				printf("%s: ", label);
   1740      1.88       chs 			printf("pool(%s): free list modified: magic=%x;"
   1741      1.88       chs 			       " page %p; item ordinal %d;"
   1742      1.88       chs 			       " addr %p (p %p)\n",
   1743      1.88       chs 				pp->pr_wchan, pi->pi_magic, ph->ph_page,
   1744      1.88       chs 				n, pi, page);
   1745      1.88       chs 			panic("pool");
   1746      1.88       chs 		}
   1747      1.88       chs #endif
   1748      1.88       chs 		page =
   1749      1.88       chs 		    (caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask);
   1750      1.88       chs 		if (page == ph->ph_page)
   1751      1.88       chs 			continue;
   1752      1.88       chs 
   1753      1.88       chs 		if (label != NULL)
   1754      1.88       chs 			printf("%s: ", label);
   1755      1.88       chs 		printf("pool(%p:%s): page inconsistency: page %p;"
   1756      1.88       chs 		       " item ordinal %d; addr %p (p %p)\n", pp,
   1757      1.88       chs 			pp->pr_wchan, ph->ph_page,
   1758      1.88       chs 			n, pi, page);
   1759      1.88       chs 		return 1;
   1760      1.88       chs 	}
   1761      1.88       chs 	return 0;
   1762       1.3        pk }
   1763       1.3        pk 
   1764      1.88       chs 
   1765       1.3        pk int
   1766      1.42   thorpej pool_chk(struct pool *pp, const char *label)
   1767       1.3        pk {
   1768       1.3        pk 	struct pool_item_header *ph;
   1769       1.3        pk 	int r = 0;
   1770       1.3        pk 
   1771      1.21   thorpej 	simple_lock(&pp->pr_slock);
   1772      1.88       chs 	LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
   1773      1.88       chs 		r = pool_chk_page(pp, label, ph);
   1774      1.88       chs 		if (r) {
   1775      1.88       chs 			goto out;
   1776      1.88       chs 		}
   1777      1.88       chs 	}
   1778      1.88       chs 	LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
   1779      1.88       chs 		r = pool_chk_page(pp, label, ph);
   1780      1.88       chs 		if (r) {
   1781       1.3        pk 			goto out;
   1782       1.3        pk 		}
   1783      1.88       chs 	}
   1784      1.88       chs 	LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
   1785      1.88       chs 		r = pool_chk_page(pp, label, ph);
   1786      1.88       chs 		if (r) {
   1787       1.3        pk 			goto out;
   1788       1.3        pk 		}
   1789       1.3        pk 	}
   1790      1.88       chs 
   1791       1.3        pk out:
   1792      1.21   thorpej 	simple_unlock(&pp->pr_slock);
   1793       1.3        pk 	return (r);
   1794      1.43   thorpej }
   1795      1.43   thorpej 
   1796      1.43   thorpej /*
   1797      1.43   thorpej  * pool_cache_init:
   1798      1.43   thorpej  *
   1799      1.43   thorpej  *	Initialize a pool cache.
   1800      1.43   thorpej  *
   1801      1.43   thorpej  *	NOTE: If the pool must be protected from interrupts, we expect
   1802      1.43   thorpej  *	to be called at the appropriate interrupt priority level.
   1803      1.43   thorpej  */
   1804      1.43   thorpej void
   1805      1.43   thorpej pool_cache_init(struct pool_cache *pc, struct pool *pp,
   1806      1.43   thorpej     int (*ctor)(void *, void *, int),
   1807      1.43   thorpej     void (*dtor)(void *, void *),
   1808      1.43   thorpej     void *arg)
   1809      1.43   thorpej {
   1810      1.43   thorpej 
   1811      1.43   thorpej 	TAILQ_INIT(&pc->pc_grouplist);
   1812      1.43   thorpej 	simple_lock_init(&pc->pc_slock);
   1813      1.43   thorpej 
   1814      1.43   thorpej 	pc->pc_allocfrom = NULL;
   1815      1.43   thorpej 	pc->pc_freeto = NULL;
   1816      1.43   thorpej 	pc->pc_pool = pp;
   1817      1.43   thorpej 
   1818      1.43   thorpej 	pc->pc_ctor = ctor;
   1819      1.43   thorpej 	pc->pc_dtor = dtor;
   1820      1.43   thorpej 	pc->pc_arg  = arg;
   1821      1.43   thorpej 
   1822      1.48   thorpej 	pc->pc_hits   = 0;
   1823      1.48   thorpej 	pc->pc_misses = 0;
   1824      1.48   thorpej 
   1825      1.48   thorpej 	pc->pc_ngroups = 0;
   1826      1.48   thorpej 
   1827      1.48   thorpej 	pc->pc_nitems = 0;
   1828      1.48   thorpej 
   1829      1.43   thorpej 	simple_lock(&pp->pr_slock);
   1830      1.43   thorpej 	TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist);
   1831      1.43   thorpej 	simple_unlock(&pp->pr_slock);
   1832      1.43   thorpej }
   1833      1.43   thorpej 
   1834      1.43   thorpej /*
   1835      1.43   thorpej  * pool_cache_destroy:
   1836      1.43   thorpej  *
   1837      1.43   thorpej  *	Destroy a pool cache.
   1838      1.43   thorpej  */
   1839      1.43   thorpej void
   1840      1.43   thorpej pool_cache_destroy(struct pool_cache *pc)
   1841      1.43   thorpej {
   1842      1.43   thorpej 	struct pool *pp = pc->pc_pool;
   1843      1.43   thorpej 
   1844      1.43   thorpej 	/* First, invalidate the entire cache. */
   1845      1.43   thorpej 	pool_cache_invalidate(pc);
   1846      1.43   thorpej 
   1847      1.43   thorpej 	/* ...and remove it from the pool's cache list. */
   1848      1.43   thorpej 	simple_lock(&pp->pr_slock);
   1849      1.43   thorpej 	TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist);
   1850      1.43   thorpej 	simple_unlock(&pp->pr_slock);
   1851      1.43   thorpej }
   1852      1.43   thorpej 
   1853      1.43   thorpej static __inline void *
   1854      1.87   thorpej pcg_get(struct pool_cache_group *pcg, paddr_t *pap)
   1855      1.43   thorpej {
   1856      1.43   thorpej 	void *object;
   1857      1.43   thorpej 	u_int idx;
   1858      1.43   thorpej 
   1859      1.43   thorpej 	KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
   1860      1.45   thorpej 	KASSERT(pcg->pcg_avail != 0);
   1861      1.43   thorpej 	idx = --pcg->pcg_avail;
   1862      1.43   thorpej 
   1863      1.87   thorpej 	KASSERT(pcg->pcg_objects[idx].pcgo_va != NULL);
   1864      1.87   thorpej 	object = pcg->pcg_objects[idx].pcgo_va;
   1865      1.87   thorpej 	if (pap != NULL)
   1866      1.87   thorpej 		*pap = pcg->pcg_objects[idx].pcgo_pa;
   1867      1.87   thorpej 	pcg->pcg_objects[idx].pcgo_va = NULL;
   1868      1.43   thorpej 
   1869      1.43   thorpej 	return (object);
   1870      1.43   thorpej }
   1871      1.43   thorpej 
   1872      1.43   thorpej static __inline void
   1873      1.87   thorpej pcg_put(struct pool_cache_group *pcg, void *object, paddr_t pa)
   1874      1.43   thorpej {
   1875      1.43   thorpej 	u_int idx;
   1876      1.43   thorpej 
   1877      1.43   thorpej 	KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
   1878      1.43   thorpej 	idx = pcg->pcg_avail++;
   1879      1.43   thorpej 
   1880      1.87   thorpej 	KASSERT(pcg->pcg_objects[idx].pcgo_va == NULL);
   1881      1.87   thorpej 	pcg->pcg_objects[idx].pcgo_va = object;
   1882      1.87   thorpej 	pcg->pcg_objects[idx].pcgo_pa = pa;
   1883      1.43   thorpej }
   1884      1.43   thorpej 
   1885      1.43   thorpej /*
   1886      1.87   thorpej  * pool_cache_get{,_paddr}:
   1887      1.43   thorpej  *
   1888      1.87   thorpej  *	Get an object from a pool cache (optionally returning
   1889      1.87   thorpej  *	the physical address of the object).
   1890      1.43   thorpej  */
   1891      1.43   thorpej void *
   1892      1.87   thorpej pool_cache_get_paddr(struct pool_cache *pc, int flags, paddr_t *pap)
   1893      1.43   thorpej {
   1894      1.43   thorpej 	struct pool_cache_group *pcg;
   1895      1.43   thorpej 	void *object;
   1896      1.58   thorpej 
   1897      1.58   thorpej #ifdef LOCKDEBUG
   1898      1.58   thorpej 	if (flags & PR_WAITOK)
   1899      1.58   thorpej 		simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)");
   1900      1.58   thorpej #endif
   1901      1.43   thorpej 
   1902      1.43   thorpej 	simple_lock(&pc->pc_slock);
   1903      1.43   thorpej 
   1904      1.43   thorpej 	if ((pcg = pc->pc_allocfrom) == NULL) {
   1905      1.61       chs 		TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
   1906      1.43   thorpej 			if (pcg->pcg_avail != 0) {
   1907      1.43   thorpej 				pc->pc_allocfrom = pcg;
   1908      1.43   thorpej 				goto have_group;
   1909      1.43   thorpej 			}
   1910      1.43   thorpej 		}
   1911      1.43   thorpej 
   1912      1.43   thorpej 		/*
   1913      1.43   thorpej 		 * No groups with any available objects.  Allocate
   1914      1.43   thorpej 		 * a new object, construct it, and return it to
   1915      1.43   thorpej 		 * the caller.  We will allocate a group, if necessary,
   1916      1.43   thorpej 		 * when the object is freed back to the cache.
   1917      1.43   thorpej 		 */
   1918      1.48   thorpej 		pc->pc_misses++;
   1919      1.43   thorpej 		simple_unlock(&pc->pc_slock);
   1920      1.43   thorpej 		object = pool_get(pc->pc_pool, flags);
   1921      1.43   thorpej 		if (object != NULL && pc->pc_ctor != NULL) {
   1922      1.43   thorpej 			if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
   1923      1.43   thorpej 				pool_put(pc->pc_pool, object);
   1924      1.43   thorpej 				return (NULL);
   1925      1.43   thorpej 			}
   1926      1.43   thorpej 		}
   1927      1.87   thorpej 		if (object != NULL && pap != NULL) {
   1928      1.87   thorpej #ifdef POOL_VTOPHYS
   1929      1.87   thorpej 			*pap = POOL_VTOPHYS(object);
   1930      1.87   thorpej #else
   1931      1.87   thorpej 			*pap = POOL_PADDR_INVALID;
   1932      1.87   thorpej #endif
   1933      1.87   thorpej 		}
   1934      1.43   thorpej 		return (object);
   1935      1.43   thorpej 	}
   1936      1.43   thorpej 
   1937      1.43   thorpej  have_group:
   1938      1.48   thorpej 	pc->pc_hits++;
   1939      1.48   thorpej 	pc->pc_nitems--;
   1940      1.87   thorpej 	object = pcg_get(pcg, pap);
   1941      1.43   thorpej 
   1942      1.43   thorpej 	if (pcg->pcg_avail == 0)
   1943      1.43   thorpej 		pc->pc_allocfrom = NULL;
   1944      1.45   thorpej 
   1945      1.43   thorpej 	simple_unlock(&pc->pc_slock);
   1946      1.43   thorpej 
   1947      1.43   thorpej 	return (object);
   1948      1.43   thorpej }
   1949      1.43   thorpej 
   1950      1.43   thorpej /*
   1951      1.87   thorpej  * pool_cache_put{,_paddr}:
   1952      1.43   thorpej  *
   1953      1.87   thorpej  *	Put an object back to the pool cache (optionally caching the
   1954      1.87   thorpej  *	physical address of the object).
   1955      1.43   thorpej  */
   1956      1.43   thorpej void
   1957      1.87   thorpej pool_cache_put_paddr(struct pool_cache *pc, void *object, paddr_t pa)
   1958      1.43   thorpej {
   1959      1.43   thorpej 	struct pool_cache_group *pcg;
   1960      1.60   thorpej 	int s;
   1961      1.43   thorpej 
   1962      1.43   thorpej 	simple_lock(&pc->pc_slock);
   1963      1.43   thorpej 
   1964      1.43   thorpej 	if ((pcg = pc->pc_freeto) == NULL) {
   1965      1.61       chs 		TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
   1966      1.43   thorpej 			if (pcg->pcg_avail != PCG_NOBJECTS) {
   1967      1.43   thorpej 				pc->pc_freeto = pcg;
   1968      1.43   thorpej 				goto have_group;
   1969      1.43   thorpej 			}
   1970      1.43   thorpej 		}
   1971      1.43   thorpej 
   1972      1.43   thorpej 		/*
   1973      1.43   thorpej 		 * No empty groups to free the object to.  Attempt to
   1974      1.47   thorpej 		 * allocate one.
   1975      1.43   thorpej 		 */
   1976      1.47   thorpej 		simple_unlock(&pc->pc_slock);
   1977      1.60   thorpej 		s = splvm();
   1978      1.43   thorpej 		pcg = pool_get(&pcgpool, PR_NOWAIT);
   1979      1.60   thorpej 		splx(s);
   1980      1.43   thorpej 		if (pcg != NULL) {
   1981      1.43   thorpej 			memset(pcg, 0, sizeof(*pcg));
   1982      1.47   thorpej 			simple_lock(&pc->pc_slock);
   1983      1.48   thorpej 			pc->pc_ngroups++;
   1984      1.43   thorpej 			TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list);
   1985      1.47   thorpej 			if (pc->pc_freeto == NULL)
   1986      1.47   thorpej 				pc->pc_freeto = pcg;
   1987      1.43   thorpej 			goto have_group;
   1988      1.43   thorpej 		}
   1989      1.43   thorpej 
   1990      1.43   thorpej 		/*
   1991      1.43   thorpej 		 * Unable to allocate a cache group; destruct the object
   1992      1.43   thorpej 		 * and free it back to the pool.
   1993      1.43   thorpej 		 */
   1994      1.51   thorpej 		pool_cache_destruct_object(pc, object);
   1995      1.43   thorpej 		return;
   1996      1.43   thorpej 	}
   1997      1.43   thorpej 
   1998      1.43   thorpej  have_group:
   1999      1.48   thorpej 	pc->pc_nitems++;
   2000      1.87   thorpej 	pcg_put(pcg, object, pa);
   2001      1.43   thorpej 
   2002      1.43   thorpej 	if (pcg->pcg_avail == PCG_NOBJECTS)
   2003      1.43   thorpej 		pc->pc_freeto = NULL;
   2004      1.43   thorpej 
   2005      1.43   thorpej 	simple_unlock(&pc->pc_slock);
   2006      1.51   thorpej }
   2007      1.51   thorpej 
   2008      1.51   thorpej /*
   2009      1.51   thorpej  * pool_cache_destruct_object:
   2010      1.51   thorpej  *
   2011      1.51   thorpej  *	Force destruction of an object and its release back into
   2012      1.51   thorpej  *	the pool.
   2013      1.51   thorpej  */
   2014      1.51   thorpej void
   2015      1.51   thorpej pool_cache_destruct_object(struct pool_cache *pc, void *object)
   2016      1.51   thorpej {
   2017      1.51   thorpej 
   2018      1.51   thorpej 	if (pc->pc_dtor != NULL)
   2019      1.51   thorpej 		(*pc->pc_dtor)(pc->pc_arg, object);
   2020      1.51   thorpej 	pool_put(pc->pc_pool, object);
   2021      1.43   thorpej }
   2022      1.43   thorpej 
   2023      1.43   thorpej /*
   2024  1.99.8.1      tron  * pool_cache_invalidate:
   2025      1.43   thorpej  *
   2026  1.99.8.1      tron  *	Invalidate a pool cache (destruct and release all of the
   2027  1.99.8.1      tron  *	cached objects).
   2028      1.43   thorpej  */
   2029  1.99.8.1      tron void
   2030  1.99.8.1      tron pool_cache_invalidate(struct pool_cache *pc)
   2031      1.43   thorpej {
   2032  1.99.8.1      tron 	struct pool_pagelist pq;
   2033      1.43   thorpej 	struct pool_cache_group *pcg, *npcg;
   2034      1.43   thorpej 	void *object;
   2035  1.99.8.1      tron 
   2036  1.99.8.1      tron 	LIST_INIT(&pq);
   2037  1.99.8.1      tron 
   2038  1.99.8.1      tron 	simple_lock(&pc->pc_slock);
   2039  1.99.8.1      tron 	simple_lock(&pc->pc_pool->pr_slock);
   2040      1.43   thorpej 
   2041      1.43   thorpej 	for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
   2042      1.43   thorpej 	     pcg = npcg) {
   2043      1.43   thorpej 		npcg = TAILQ_NEXT(pcg, pcg_list);
   2044      1.43   thorpej 		while (pcg->pcg_avail != 0) {
   2045      1.48   thorpej 			pc->pc_nitems--;
   2046      1.87   thorpej 			object = pcg_get(pcg, NULL);
   2047      1.45   thorpej 			if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg)
   2048      1.45   thorpej 				pc->pc_allocfrom = NULL;
   2049      1.43   thorpej 			if (pc->pc_dtor != NULL)
   2050      1.43   thorpej 				(*pc->pc_dtor)(pc->pc_arg, object);
   2051  1.99.8.1      tron 			pool_do_put(pc->pc_pool, object, &pq);
   2052      1.43   thorpej 		}
   2053      1.43   thorpej 	}
   2054      1.43   thorpej 
   2055  1.99.8.1      tron 	simple_unlock(&pc->pc_pool->pr_slock);
   2056      1.43   thorpej 	simple_unlock(&pc->pc_slock);
   2057  1.99.8.1      tron 
   2058  1.99.8.1      tron 	if (! LIST_EMPTY(&pq))
   2059  1.99.8.1      tron 		pr_pagelist_free(pc->pc_pool, &pq);
   2060      1.43   thorpej }
   2061      1.43   thorpej 
   2062      1.43   thorpej /*
   2063      1.43   thorpej  * pool_cache_reclaim:
   2064      1.43   thorpej  *
   2065      1.43   thorpej  *	Reclaim a pool cache for pool_reclaim().
   2066      1.43   thorpej  */
   2067      1.43   thorpej static void
   2068  1.99.8.1      tron pool_cache_reclaim(struct pool_cache *pc, struct pool_pagelist *pq)
   2069      1.43   thorpej {
   2070  1.99.8.1      tron 	struct pool_cache_group *pcg, *npcg;
   2071  1.99.8.1      tron 	void *object;
   2072  1.99.8.1      tron 	int s;
   2073  1.99.8.1      tron 
   2074  1.99.8.1      tron 	/*
   2075  1.99.8.1      tron 	 * We're locking in the wrong order (normally pool_cache -> pool,
   2076  1.99.8.1      tron 	 * but the pool is already locked when we get here), so we have
   2077  1.99.8.1      tron 	 * to use trylock.  If we can't lock the pool_cache, it's not really
   2078  1.99.8.1      tron 	 * a big deal here.
   2079  1.99.8.1      tron 	 */
   2080  1.99.8.1      tron 	if (simple_lock_try(&pc->pc_slock) == 0)
   2081  1.99.8.1      tron 		return;
   2082  1.99.8.1      tron 
   2083  1.99.8.1      tron 	for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
   2084  1.99.8.1      tron 	     pcg = npcg) {
   2085  1.99.8.1      tron 		npcg = TAILQ_NEXT(pcg, pcg_list);
   2086  1.99.8.1      tron 		while (pcg->pcg_avail != 0) {
   2087  1.99.8.1      tron 			pc->pc_nitems--;
   2088  1.99.8.1      tron 			object = pcg_get(pcg, NULL);
   2089  1.99.8.1      tron 			if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg)
   2090  1.99.8.1      tron 				pc->pc_allocfrom = NULL;
   2091  1.99.8.1      tron 			if (pc->pc_dtor != NULL)
   2092  1.99.8.1      tron 				(*pc->pc_dtor)(pc->pc_arg, object);
   2093  1.99.8.1      tron 			pool_do_put(pc->pc_pool, object, pq);
   2094  1.99.8.1      tron 		}
   2095  1.99.8.1      tron 		pc->pc_ngroups--;
   2096  1.99.8.1      tron 		TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list);
   2097  1.99.8.1      tron 		if (pc->pc_freeto == pcg)
   2098  1.99.8.1      tron 			pc->pc_freeto = NULL;
   2099  1.99.8.1      tron 		s = splvm();
   2100  1.99.8.1      tron 		pool_put(&pcgpool, pcg);
   2101  1.99.8.1      tron 		splx(s);
   2102  1.99.8.1      tron 	}
   2103      1.43   thorpej 
   2104      1.43   thorpej 	simple_unlock(&pc->pc_slock);
   2105       1.3        pk }
   2106      1.66   thorpej 
   2107      1.66   thorpej /*
   2108      1.66   thorpej  * Pool backend allocators.
   2109      1.66   thorpej  *
   2110      1.66   thorpej  * Each pool has a backend allocator that handles allocation, deallocation,
   2111      1.66   thorpej  * and any additional draining that might be needed.
   2112      1.66   thorpej  *
   2113      1.66   thorpej  * We provide two standard allocators:
   2114      1.66   thorpej  *
   2115      1.66   thorpej  *	pool_allocator_kmem - the default when no allocator is specified
   2116      1.66   thorpej  *
   2117      1.66   thorpej  *	pool_allocator_nointr - used for pools that will not be accessed
   2118      1.66   thorpej  *	in interrupt context.
   2119      1.66   thorpej  */
   2120      1.66   thorpej void	*pool_page_alloc(struct pool *, int);
   2121      1.66   thorpej void	pool_page_free(struct pool *, void *);
   2122      1.66   thorpej 
   2123  1.99.8.2      tron #ifdef POOL_SUBPAGE
   2124  1.99.8.2      tron struct pool_allocator pool_allocator_kmem_fullpage = {
   2125  1.99.8.2      tron 	pool_page_alloc, pool_page_free, 0,
   2126  1.99.8.2      tron };
   2127  1.99.8.2      tron #else
   2128      1.66   thorpej struct pool_allocator pool_allocator_kmem = {
   2129      1.66   thorpej 	pool_page_alloc, pool_page_free, 0,
   2130      1.66   thorpej };
   2131  1.99.8.2      tron #endif
   2132      1.66   thorpej 
   2133      1.66   thorpej void	*pool_page_alloc_nointr(struct pool *, int);
   2134      1.66   thorpej void	pool_page_free_nointr(struct pool *, void *);
   2135      1.66   thorpej 
   2136  1.99.8.2      tron #ifdef POOL_SUBPAGE
   2137  1.99.8.2      tron struct pool_allocator pool_allocator_nointr_fullpage = {
   2138  1.99.8.2      tron 	pool_page_alloc_nointr, pool_page_free_nointr, 0,
   2139  1.99.8.2      tron };
   2140  1.99.8.2      tron #else
   2141      1.66   thorpej struct pool_allocator pool_allocator_nointr = {
   2142      1.66   thorpej 	pool_page_alloc_nointr, pool_page_free_nointr, 0,
   2143      1.66   thorpej };
   2144  1.99.8.2      tron #endif
   2145      1.66   thorpej 
   2146      1.66   thorpej #ifdef POOL_SUBPAGE
   2147      1.66   thorpej void	*pool_subpage_alloc(struct pool *, int);
   2148      1.66   thorpej void	pool_subpage_free(struct pool *, void *);
   2149      1.66   thorpej 
   2150  1.99.8.2      tron struct pool_allocator pool_allocator_kmem = {
   2151  1.99.8.2      tron 	pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
   2152  1.99.8.2      tron };
   2153  1.99.8.2      tron 
   2154  1.99.8.2      tron void	*pool_subpage_alloc_nointr(struct pool *, int);
   2155  1.99.8.2      tron void	pool_subpage_free_nointr(struct pool *, void *);
   2156  1.99.8.2      tron 
   2157  1.99.8.2      tron struct pool_allocator pool_allocator_nointr = {
   2158  1.99.8.2      tron 	pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
   2159      1.66   thorpej };
   2160      1.66   thorpej #endif /* POOL_SUBPAGE */
   2161      1.66   thorpej 
   2162      1.66   thorpej /*
   2163      1.66   thorpej  * We have at least three different resources for the same allocation and
   2164      1.66   thorpej  * each resource can be depleted.  First, we have the ready elements in the
   2165      1.66   thorpej  * pool.  Then we have the resource (typically a vm_map) for this allocator.
   2166      1.66   thorpej  * Finally, we have physical memory.  Waiting for any of these can be
   2167      1.66   thorpej  * unnecessary when any other is freed, but the kernel doesn't support
   2168      1.66   thorpej  * sleeping on multiple wait channels, so we have to employ another strategy.
   2169      1.66   thorpej  *
   2170      1.66   thorpej  * The caller sleeps on the pool (so that it can be awakened when an item
   2171      1.66   thorpej  * is returned to the pool), but we set PA_WANT on the allocator.  When a
   2172      1.66   thorpej  * page is returned to the allocator and PA_WANT is set, pool_allocator_free
   2173      1.66   thorpej  * will wake up all sleeping pools belonging to this allocator.
   2174      1.66   thorpej  *
   2175      1.66   thorpej  * XXX Thundering herd.
   2176      1.66   thorpej  */
   2177      1.66   thorpej void *
   2178      1.66   thorpej pool_allocator_alloc(struct pool *org, int flags)
   2179      1.66   thorpej {
   2180      1.66   thorpej 	struct pool_allocator *pa = org->pr_alloc;
   2181      1.66   thorpej 	struct pool *pp, *start;
   2182      1.66   thorpej 	int s, freed;
   2183      1.66   thorpej 	void *res;
   2184      1.66   thorpej 
   2185      1.91      yamt 	LOCK_ASSERT(!simple_lock_held(&org->pr_slock));
   2186      1.91      yamt 
   2187      1.66   thorpej 	do {
   2188      1.66   thorpej 		if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
   2189      1.66   thorpej 			return (res);
   2190      1.68   thorpej 		if ((flags & PR_WAITOK) == 0) {
   2191      1.68   thorpej 			/*
   2192      1.68   thorpej 			 * We only run the drain hookhere if PR_NOWAIT.
   2193      1.68   thorpej 			 * In other cases, the hook will be run in
   2194      1.68   thorpej 			 * pool_reclaim().
   2195      1.68   thorpej 			 */
   2196      1.68   thorpej 			if (org->pr_drain_hook != NULL) {
   2197      1.68   thorpej 				(*org->pr_drain_hook)(org->pr_drain_hook_arg,
   2198      1.68   thorpej 				    flags);
   2199      1.68   thorpej 				if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
   2200      1.68   thorpej 					return (res);
   2201      1.68   thorpej 			}
   2202      1.66   thorpej 			break;
   2203      1.68   thorpej 		}
   2204      1.66   thorpej 
   2205      1.66   thorpej 		/*
   2206      1.66   thorpej 		 * Drain all pools, except "org", that use this
   2207      1.66   thorpej 		 * allocator.  We do this to reclaim VA space.
   2208      1.66   thorpej 		 * pa_alloc is responsible for waiting for
   2209      1.66   thorpej 		 * physical memory.
   2210      1.66   thorpej 		 *
   2211      1.66   thorpej 		 * XXX We risk looping forever if start if someone
   2212      1.66   thorpej 		 * calls pool_destroy on "start".  But there is no
   2213      1.66   thorpej 		 * other way to have potentially sleeping pool_reclaim,
   2214      1.66   thorpej 		 * non-sleeping locks on pool_allocator, and some
   2215      1.66   thorpej 		 * stirring of drained pools in the allocator.
   2216      1.68   thorpej 		 *
   2217      1.68   thorpej 		 * XXX Maybe we should use pool_head_slock for locking
   2218      1.68   thorpej 		 * the allocators?
   2219      1.66   thorpej 		 */
   2220      1.66   thorpej 		freed = 0;
   2221      1.66   thorpej 
   2222      1.66   thorpej 		s = splvm();
   2223      1.66   thorpej 		simple_lock(&pa->pa_slock);
   2224      1.66   thorpej 		pp = start = TAILQ_FIRST(&pa->pa_list);
   2225      1.66   thorpej 		do {
   2226      1.66   thorpej 			TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list);
   2227      1.66   thorpej 			TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list);
   2228      1.66   thorpej 			if (pp == org)
   2229      1.66   thorpej 				continue;
   2230      1.73   thorpej 			simple_unlock(&pa->pa_slock);
   2231      1.66   thorpej 			freed = pool_reclaim(pp);
   2232      1.73   thorpej 			simple_lock(&pa->pa_slock);
   2233      1.66   thorpej 		} while ((pp = TAILQ_FIRST(&pa->pa_list)) != start &&
   2234      1.66   thorpej 			 freed == 0);
   2235      1.66   thorpej 
   2236      1.66   thorpej 		if (freed == 0) {
   2237      1.66   thorpej 			/*
   2238      1.66   thorpej 			 * We set PA_WANT here, the caller will most likely
   2239      1.66   thorpej 			 * sleep waiting for pages (if not, this won't hurt
   2240      1.66   thorpej 			 * that much), and there is no way to set this in
   2241      1.66   thorpej 			 * the caller without violating locking order.
   2242      1.66   thorpej 			 */
   2243      1.66   thorpej 			pa->pa_flags |= PA_WANT;
   2244      1.66   thorpej 		}
   2245      1.66   thorpej 		simple_unlock(&pa->pa_slock);
   2246      1.66   thorpej 		splx(s);
   2247      1.66   thorpej 	} while (freed);
   2248      1.66   thorpej 	return (NULL);
   2249      1.66   thorpej }
   2250      1.66   thorpej 
   2251      1.66   thorpej void
   2252      1.66   thorpej pool_allocator_free(struct pool *pp, void *v)
   2253      1.66   thorpej {
   2254      1.66   thorpej 	struct pool_allocator *pa = pp->pr_alloc;
   2255      1.66   thorpej 	int s;
   2256      1.66   thorpej 
   2257      1.91      yamt 	LOCK_ASSERT(!simple_lock_held(&pp->pr_slock));
   2258      1.91      yamt 
   2259      1.66   thorpej 	(*pa->pa_free)(pp, v);
   2260      1.66   thorpej 
   2261      1.66   thorpej 	s = splvm();
   2262      1.66   thorpej 	simple_lock(&pa->pa_slock);
   2263      1.66   thorpej 	if ((pa->pa_flags & PA_WANT) == 0) {
   2264      1.66   thorpej 		simple_unlock(&pa->pa_slock);
   2265      1.66   thorpej 		splx(s);
   2266      1.66   thorpej 		return;
   2267      1.66   thorpej 	}
   2268      1.66   thorpej 
   2269      1.66   thorpej 	TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
   2270      1.66   thorpej 		simple_lock(&pp->pr_slock);
   2271      1.66   thorpej 		if ((pp->pr_flags & PR_WANTED) != 0) {
   2272      1.66   thorpej 			pp->pr_flags &= ~PR_WANTED;
   2273      1.66   thorpej 			wakeup(pp);
   2274      1.66   thorpej 		}
   2275      1.69   thorpej 		simple_unlock(&pp->pr_slock);
   2276      1.66   thorpej 	}
   2277      1.66   thorpej 	pa->pa_flags &= ~PA_WANT;
   2278      1.66   thorpej 	simple_unlock(&pa->pa_slock);
   2279      1.66   thorpej 	splx(s);
   2280      1.66   thorpej }
   2281      1.66   thorpej 
   2282      1.66   thorpej void *
   2283      1.66   thorpej pool_page_alloc(struct pool *pp, int flags)
   2284      1.66   thorpej {
   2285      1.66   thorpej 	boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
   2286      1.66   thorpej 
   2287      1.98      yamt 	return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, NULL, waitok));
   2288      1.66   thorpej }
   2289      1.66   thorpej 
   2290      1.66   thorpej void
   2291      1.66   thorpej pool_page_free(struct pool *pp, void *v)
   2292      1.66   thorpej {
   2293      1.66   thorpej 
   2294      1.98      yamt 	uvm_km_free_poolpage_cache(kmem_map, (vaddr_t) v);
   2295      1.98      yamt }
   2296      1.98      yamt 
   2297      1.98      yamt static void *
   2298      1.98      yamt pool_page_alloc_meta(struct pool *pp, int flags)
   2299      1.98      yamt {
   2300      1.98      yamt 	boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
   2301      1.98      yamt 
   2302      1.98      yamt 	return ((void *) uvm_km_alloc_poolpage1(kmem_map, NULL, waitok));
   2303      1.98      yamt }
   2304      1.98      yamt 
   2305      1.98      yamt static void
   2306      1.98      yamt pool_page_free_meta(struct pool *pp, void *v)
   2307      1.98      yamt {
   2308      1.98      yamt 
   2309      1.98      yamt 	uvm_km_free_poolpage1(kmem_map, (vaddr_t) v);
   2310      1.66   thorpej }
   2311      1.66   thorpej 
   2312      1.66   thorpej #ifdef POOL_SUBPAGE
   2313      1.66   thorpej /* Sub-page allocator, for machines with large hardware pages. */
   2314      1.66   thorpej void *
   2315      1.66   thorpej pool_subpage_alloc(struct pool *pp, int flags)
   2316      1.66   thorpej {
   2317      1.93       dbj 	void *v;
   2318      1.93       dbj 	int s;
   2319      1.93       dbj 	s = splvm();
   2320      1.93       dbj 	v = pool_get(&psppool, flags);
   2321      1.93       dbj 	splx(s);
   2322      1.93       dbj 	return v;
   2323      1.66   thorpej }
   2324      1.66   thorpej 
   2325      1.66   thorpej void
   2326      1.66   thorpej pool_subpage_free(struct pool *pp, void *v)
   2327      1.66   thorpej {
   2328      1.93       dbj 	int s;
   2329      1.93       dbj 	s = splvm();
   2330      1.66   thorpej 	pool_put(&psppool, v);
   2331      1.93       dbj 	splx(s);
   2332      1.66   thorpej }
   2333      1.66   thorpej 
   2334      1.66   thorpej /* We don't provide a real nointr allocator.  Maybe later. */
   2335      1.66   thorpej void *
   2336  1.99.8.2      tron pool_subpage_alloc_nointr(struct pool *pp, int flags)
   2337      1.66   thorpej {
   2338      1.66   thorpej 
   2339      1.66   thorpej 	return (pool_subpage_alloc(pp, flags));
   2340      1.66   thorpej }
   2341      1.66   thorpej 
   2342      1.66   thorpej void
   2343  1.99.8.2      tron pool_subpage_free_nointr(struct pool *pp, void *v)
   2344      1.66   thorpej {
   2345      1.66   thorpej 
   2346      1.66   thorpej 	pool_subpage_free(pp, v);
   2347      1.66   thorpej }
   2348  1.99.8.2      tron #endif /* POOL_SUBPAGE */
   2349      1.66   thorpej void *
   2350      1.66   thorpej pool_page_alloc_nointr(struct pool *pp, int flags)
   2351      1.66   thorpej {
   2352      1.66   thorpej 	boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
   2353      1.66   thorpej 
   2354      1.98      yamt 	return ((void *) uvm_km_alloc_poolpage_cache(kernel_map,
   2355      1.66   thorpej 	    uvm.kernel_object, waitok));
   2356      1.66   thorpej }
   2357      1.66   thorpej 
   2358      1.66   thorpej void
   2359      1.66   thorpej pool_page_free_nointr(struct pool *pp, void *v)
   2360      1.66   thorpej {
   2361      1.66   thorpej 
   2362      1.98      yamt 	uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v);
   2363      1.66   thorpej }
   2364