Home | History | Annotate | Line # | Download | only in kern
subr_pool.c revision 1.50.2.4
      1  1.50.2.4   nathanw /*	$NetBSD: subr_pool.c,v 1.50.2.4 2001/10/22 20:41:50 nathanw Exp $	*/
      2       1.1        pk 
      3       1.1        pk /*-
      4      1.43   thorpej  * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
      5       1.1        pk  * All rights reserved.
      6       1.1        pk  *
      7       1.1        pk  * This code is derived from software contributed to The NetBSD Foundation
      8      1.20   thorpej  * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
      9      1.20   thorpej  * Simulation Facility, NASA Ames Research Center.
     10       1.1        pk  *
     11       1.1        pk  * Redistribution and use in source and binary forms, with or without
     12       1.1        pk  * modification, are permitted provided that the following conditions
     13       1.1        pk  * are met:
     14       1.1        pk  * 1. Redistributions of source code must retain the above copyright
     15       1.1        pk  *    notice, this list of conditions and the following disclaimer.
     16       1.1        pk  * 2. Redistributions in binary form must reproduce the above copyright
     17       1.1        pk  *    notice, this list of conditions and the following disclaimer in the
     18       1.1        pk  *    documentation and/or other materials provided with the distribution.
     19       1.1        pk  * 3. All advertising materials mentioning features or use of this software
     20       1.1        pk  *    must display the following acknowledgement:
     21      1.13  christos  *	This product includes software developed by the NetBSD
     22      1.13  christos  *	Foundation, Inc. and its contributors.
     23       1.1        pk  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24       1.1        pk  *    contributors may be used to endorse or promote products derived
     25       1.1        pk  *    from this software without specific prior written permission.
     26       1.1        pk  *
     27       1.1        pk  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28       1.1        pk  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29       1.1        pk  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30       1.1        pk  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31       1.1        pk  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32       1.1        pk  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33       1.1        pk  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34       1.1        pk  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35       1.1        pk  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36       1.1        pk  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37       1.1        pk  * POSSIBILITY OF SUCH DAMAGE.
     38       1.1        pk  */
     39      1.24    scottr 
     40      1.25   thorpej #include "opt_pool.h"
     41      1.24    scottr #include "opt_poollog.h"
     42      1.28   thorpej #include "opt_lockdebug.h"
     43       1.1        pk 
     44       1.1        pk #include <sys/param.h>
     45       1.1        pk #include <sys/systm.h>
     46       1.1        pk #include <sys/proc.h>
     47       1.1        pk #include <sys/errno.h>
     48       1.1        pk #include <sys/kernel.h>
     49       1.1        pk #include <sys/malloc.h>
     50       1.1        pk #include <sys/lock.h>
     51       1.1        pk #include <sys/pool.h>
     52      1.20   thorpej #include <sys/syslog.h>
     53       1.3        pk 
     54       1.3        pk #include <uvm/uvm.h>
     55       1.3        pk 
     56       1.1        pk /*
     57       1.1        pk  * Pool resource management utility.
     58       1.3        pk  *
     59       1.3        pk  * Memory is allocated in pages which are split into pieces according
     60       1.3        pk  * to the pool item size. Each page is kept on a list headed by `pr_pagelist'
     61       1.3        pk  * in the pool structure and the individual pool items are on a linked list
     62       1.3        pk  * headed by `ph_itemlist' in each page header. The memory for building
     63       1.3        pk  * the page list is either taken from the allocated pages themselves (for
     64       1.3        pk  * small pool items) or taken from an internal pool of page headers (`phpool').
     65       1.1        pk  */
     66       1.1        pk 
     67       1.3        pk /* List of all pools */
     68       1.5   thorpej TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
     69       1.3        pk 
     70       1.3        pk /* Private pool for page header structures */
     71       1.3        pk static struct pool phpool;
     72       1.3        pk 
     73  1.50.2.4   nathanw #ifdef POOL_SUBPAGE
     74  1.50.2.4   nathanw /* Pool of subpages for use by normal pools. */
     75  1.50.2.4   nathanw static struct pool psppool;
     76  1.50.2.4   nathanw #endif
     77  1.50.2.4   nathanw 
     78       1.3        pk /* # of seconds to retain page after last use */
     79       1.3        pk int pool_inactive_time = 10;
     80       1.3        pk 
     81       1.3        pk /* Next candidate for drainage (see pool_drain()) */
     82      1.23   thorpej static struct pool	*drainpp;
     83      1.23   thorpej 
     84      1.23   thorpej /* This spin lock protects both pool_head and drainpp. */
     85      1.23   thorpej struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
     86       1.3        pk 
     87       1.3        pk struct pool_item_header {
     88       1.3        pk 	/* Page headers */
     89       1.3        pk 	TAILQ_ENTRY(pool_item_header)
     90       1.3        pk 				ph_pagelist;	/* pool page list */
     91       1.3        pk 	TAILQ_HEAD(,pool_item)	ph_itemlist;	/* chunk list for this page */
     92       1.3        pk 	LIST_ENTRY(pool_item_header)
     93       1.3        pk 				ph_hashlist;	/* Off-page page headers */
     94       1.3        pk 	int			ph_nmissing;	/* # of chunks in use */
     95       1.3        pk 	caddr_t			ph_page;	/* this page's address */
     96       1.3        pk 	struct timeval		ph_time;	/* last referenced */
     97       1.3        pk };
     98  1.50.2.3   nathanw TAILQ_HEAD(pool_pagelist,pool_item_header);
     99       1.3        pk 
    100       1.1        pk struct pool_item {
    101       1.3        pk #ifdef DIAGNOSTIC
    102       1.3        pk 	int pi_magic;
    103      1.33       chs #endif
    104      1.25   thorpej #define	PI_MAGIC 0xdeadbeef
    105       1.3        pk 	/* Other entries use only this list entry */
    106       1.3        pk 	TAILQ_ENTRY(pool_item)	pi_list;
    107       1.3        pk };
    108       1.3        pk 
    109      1.25   thorpej #define	PR_HASH_INDEX(pp,addr) \
    110       1.3        pk 	(((u_long)(addr) >> (pp)->pr_pageshift) & (PR_HASHTABSIZE - 1))
    111       1.3        pk 
    112  1.50.2.1   nathanw #define	POOL_NEEDS_CATCHUP(pp)						\
    113  1.50.2.1   nathanw 	((pp)->pr_nitems < (pp)->pr_minitems)
    114  1.50.2.1   nathanw 
    115      1.43   thorpej /*
    116      1.43   thorpej  * Pool cache management.
    117      1.43   thorpej  *
    118      1.43   thorpej  * Pool caches provide a way for constructed objects to be cached by the
    119      1.43   thorpej  * pool subsystem.  This can lead to performance improvements by avoiding
    120      1.43   thorpej  * needless object construction/destruction; it is deferred until absolutely
    121      1.43   thorpej  * necessary.
    122      1.43   thorpej  *
    123      1.43   thorpej  * Caches are grouped into cache groups.  Each cache group references
    124      1.43   thorpej  * up to 16 constructed objects.  When a cache allocates an object
    125      1.43   thorpej  * from the pool, it calls the object's constructor and places it into
    126      1.43   thorpej  * a cache group.  When a cache group frees an object back to the pool,
    127      1.43   thorpej  * it first calls the object's destructor.  This allows the object to
    128      1.43   thorpej  * persist in constructed form while freed to the cache.
    129      1.43   thorpej  *
    130      1.43   thorpej  * Multiple caches may exist for each pool.  This allows a single
    131      1.43   thorpej  * object type to have multiple constructed forms.  The pool references
    132      1.43   thorpej  * each cache, so that when a pool is drained by the pagedaemon, it can
    133      1.43   thorpej  * drain each individual cache as well.  Each time a cache is drained,
    134      1.43   thorpej  * the most idle cache group is freed to the pool in its entirety.
    135      1.43   thorpej  *
    136      1.43   thorpej  * Pool caches are layed on top of pools.  By layering them, we can avoid
    137      1.43   thorpej  * the complexity of cache management for pools which would not benefit
    138      1.43   thorpej  * from it.
    139      1.43   thorpej  */
    140      1.43   thorpej 
    141      1.43   thorpej /* The cache group pool. */
    142      1.43   thorpej static struct pool pcgpool;
    143      1.43   thorpej 
    144      1.43   thorpej /* The pool cache group. */
    145      1.43   thorpej #define	PCG_NOBJECTS		16
    146      1.43   thorpej struct pool_cache_group {
    147      1.43   thorpej 	TAILQ_ENTRY(pool_cache_group)
    148      1.43   thorpej 		pcg_list;	/* link in the pool cache's group list */
    149      1.43   thorpej 	u_int	pcg_avail;	/* # available objects */
    150      1.43   thorpej 				/* pointers to the objects */
    151      1.43   thorpej 	void	*pcg_objects[PCG_NOBJECTS];
    152      1.43   thorpej };
    153       1.3        pk 
    154      1.43   thorpej static void	pool_cache_reclaim(struct pool_cache *);
    155       1.3        pk 
    156      1.42   thorpej static int	pool_catchup(struct pool *);
    157  1.50.2.1   nathanw static void	pool_prime_page(struct pool *, caddr_t,
    158  1.50.2.1   nathanw 		    struct pool_item_header *);
    159      1.42   thorpej static void	*pool_page_alloc(unsigned long, int, int);
    160      1.42   thorpej static void	pool_page_free(void *, unsigned long, int);
    161  1.50.2.4   nathanw #ifdef POOL_SUBPAGE
    162  1.50.2.4   nathanw static void	*pool_subpage_alloc(unsigned long, int, int);
    163  1.50.2.4   nathanw static void	pool_subpage_free(void *, unsigned long, int);
    164  1.50.2.4   nathanw #endif
    165       1.3        pk 
    166      1.42   thorpej static void pool_print1(struct pool *, const char *,
    167      1.42   thorpej 	void (*)(const char *, ...));
    168       1.3        pk 
    169       1.3        pk /*
    170  1.50.2.1   nathanw  * Pool log entry. An array of these is allocated in pool_init().
    171       1.3        pk  */
    172       1.3        pk struct pool_log {
    173       1.3        pk 	const char	*pl_file;
    174       1.3        pk 	long		pl_line;
    175       1.3        pk 	int		pl_action;
    176      1.25   thorpej #define	PRLOG_GET	1
    177      1.25   thorpej #define	PRLOG_PUT	2
    178       1.3        pk 	void		*pl_addr;
    179       1.1        pk };
    180       1.1        pk 
    181       1.3        pk /* Number of entries in pool log buffers */
    182      1.17   thorpej #ifndef POOL_LOGSIZE
    183      1.17   thorpej #define	POOL_LOGSIZE	10
    184      1.17   thorpej #endif
    185      1.17   thorpej 
    186      1.17   thorpej int pool_logsize = POOL_LOGSIZE;
    187       1.1        pk 
    188  1.50.2.1   nathanw #ifdef POOL_DIAGNOSTIC
    189      1.42   thorpej static __inline void
    190      1.42   thorpej pr_log(struct pool *pp, void *v, int action, const char *file, long line)
    191       1.3        pk {
    192       1.3        pk 	int n = pp->pr_curlogentry;
    193       1.3        pk 	struct pool_log *pl;
    194       1.3        pk 
    195      1.20   thorpej 	if ((pp->pr_roflags & PR_LOGGING) == 0)
    196       1.3        pk 		return;
    197       1.3        pk 
    198       1.3        pk 	/*
    199       1.3        pk 	 * Fill in the current entry. Wrap around and overwrite
    200       1.3        pk 	 * the oldest entry if necessary.
    201       1.3        pk 	 */
    202       1.3        pk 	pl = &pp->pr_log[n];
    203       1.3        pk 	pl->pl_file = file;
    204       1.3        pk 	pl->pl_line = line;
    205       1.3        pk 	pl->pl_action = action;
    206       1.3        pk 	pl->pl_addr = v;
    207       1.3        pk 	if (++n >= pp->pr_logsize)
    208       1.3        pk 		n = 0;
    209       1.3        pk 	pp->pr_curlogentry = n;
    210       1.3        pk }
    211       1.3        pk 
    212       1.3        pk static void
    213      1.42   thorpej pr_printlog(struct pool *pp, struct pool_item *pi,
    214      1.42   thorpej     void (*pr)(const char *, ...))
    215       1.3        pk {
    216       1.3        pk 	int i = pp->pr_logsize;
    217       1.3        pk 	int n = pp->pr_curlogentry;
    218       1.3        pk 
    219      1.20   thorpej 	if ((pp->pr_roflags & PR_LOGGING) == 0)
    220       1.3        pk 		return;
    221       1.3        pk 
    222       1.3        pk 	/*
    223       1.3        pk 	 * Print all entries in this pool's log.
    224       1.3        pk 	 */
    225       1.3        pk 	while (i-- > 0) {
    226       1.3        pk 		struct pool_log *pl = &pp->pr_log[n];
    227       1.3        pk 		if (pl->pl_action != 0) {
    228      1.25   thorpej 			if (pi == NULL || pi == pl->pl_addr) {
    229      1.25   thorpej 				(*pr)("\tlog entry %d:\n", i);
    230      1.25   thorpej 				(*pr)("\t\taction = %s, addr = %p\n",
    231      1.25   thorpej 				    pl->pl_action == PRLOG_GET ? "get" : "put",
    232      1.25   thorpej 				    pl->pl_addr);
    233      1.25   thorpej 				(*pr)("\t\tfile: %s at line %lu\n",
    234      1.25   thorpej 				    pl->pl_file, pl->pl_line);
    235      1.25   thorpej 			}
    236       1.3        pk 		}
    237       1.3        pk 		if (++n >= pp->pr_logsize)
    238       1.3        pk 			n = 0;
    239       1.3        pk 	}
    240       1.3        pk }
    241      1.25   thorpej 
    242      1.42   thorpej static __inline void
    243      1.42   thorpej pr_enter(struct pool *pp, const char *file, long line)
    244      1.25   thorpej {
    245      1.25   thorpej 
    246      1.34   thorpej 	if (__predict_false(pp->pr_entered_file != NULL)) {
    247      1.25   thorpej 		printf("pool %s: reentrancy at file %s line %ld\n",
    248      1.25   thorpej 		    pp->pr_wchan, file, line);
    249      1.25   thorpej 		printf("         previous entry at file %s line %ld\n",
    250      1.25   thorpej 		    pp->pr_entered_file, pp->pr_entered_line);
    251      1.25   thorpej 		panic("pr_enter");
    252      1.25   thorpej 	}
    253      1.25   thorpej 
    254      1.25   thorpej 	pp->pr_entered_file = file;
    255      1.25   thorpej 	pp->pr_entered_line = line;
    256      1.25   thorpej }
    257      1.25   thorpej 
    258      1.42   thorpej static __inline void
    259      1.42   thorpej pr_leave(struct pool *pp)
    260      1.25   thorpej {
    261      1.25   thorpej 
    262      1.34   thorpej 	if (__predict_false(pp->pr_entered_file == NULL)) {
    263      1.25   thorpej 		printf("pool %s not entered?\n", pp->pr_wchan);
    264      1.25   thorpej 		panic("pr_leave");
    265      1.25   thorpej 	}
    266      1.25   thorpej 
    267      1.25   thorpej 	pp->pr_entered_file = NULL;
    268      1.25   thorpej 	pp->pr_entered_line = 0;
    269      1.25   thorpej }
    270      1.25   thorpej 
    271      1.42   thorpej static __inline void
    272      1.42   thorpej pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
    273      1.25   thorpej {
    274      1.25   thorpej 
    275      1.25   thorpej 	if (pp->pr_entered_file != NULL)
    276      1.25   thorpej 		(*pr)("\n\tcurrently entered from file %s line %ld\n",
    277      1.25   thorpej 		    pp->pr_entered_file, pp->pr_entered_line);
    278      1.25   thorpej }
    279       1.3        pk #else
    280      1.25   thorpej #define	pr_log(pp, v, action, file, line)
    281      1.25   thorpej #define	pr_printlog(pp, pi, pr)
    282      1.25   thorpej #define	pr_enter(pp, file, line)
    283      1.25   thorpej #define	pr_leave(pp)
    284      1.25   thorpej #define	pr_enter_check(pp, pr)
    285  1.50.2.1   nathanw #endif /* POOL_DIAGNOSTIC */
    286       1.3        pk 
    287       1.3        pk /*
    288       1.3        pk  * Return the pool page header based on page address.
    289       1.3        pk  */
    290      1.42   thorpej static __inline struct pool_item_header *
    291      1.42   thorpej pr_find_pagehead(struct pool *pp, caddr_t page)
    292       1.3        pk {
    293       1.3        pk 	struct pool_item_header *ph;
    294       1.3        pk 
    295      1.20   thorpej 	if ((pp->pr_roflags & PR_PHINPAGE) != 0)
    296       1.3        pk 		return ((struct pool_item_header *)(page + pp->pr_phoffset));
    297       1.3        pk 
    298       1.3        pk 	for (ph = LIST_FIRST(&pp->pr_hashtab[PR_HASH_INDEX(pp, page)]);
    299       1.3        pk 	     ph != NULL;
    300       1.3        pk 	     ph = LIST_NEXT(ph, ph_hashlist)) {
    301       1.3        pk 		if (ph->ph_page == page)
    302       1.3        pk 			return (ph);
    303       1.3        pk 	}
    304       1.3        pk 	return (NULL);
    305       1.3        pk }
    306       1.3        pk 
    307       1.3        pk /*
    308       1.3        pk  * Remove a page from the pool.
    309       1.3        pk  */
    310      1.42   thorpej static __inline void
    311  1.50.2.3   nathanw pr_rmpage(struct pool *pp, struct pool_item_header *ph,
    312  1.50.2.3   nathanw      struct pool_pagelist *pq)
    313       1.3        pk {
    314  1.50.2.3   nathanw 	int s;
    315       1.3        pk 
    316       1.3        pk 	/*
    317       1.7   thorpej 	 * If the page was idle, decrement the idle page count.
    318       1.3        pk 	 */
    319       1.6   thorpej 	if (ph->ph_nmissing == 0) {
    320       1.6   thorpej #ifdef DIAGNOSTIC
    321       1.6   thorpej 		if (pp->pr_nidle == 0)
    322       1.6   thorpej 			panic("pr_rmpage: nidle inconsistent");
    323      1.20   thorpej 		if (pp->pr_nitems < pp->pr_itemsperpage)
    324      1.20   thorpej 			panic("pr_rmpage: nitems inconsistent");
    325       1.6   thorpej #endif
    326       1.6   thorpej 		pp->pr_nidle--;
    327       1.6   thorpej 	}
    328       1.7   thorpej 
    329      1.20   thorpej 	pp->pr_nitems -= pp->pr_itemsperpage;
    330      1.20   thorpej 
    331       1.7   thorpej 	/*
    332  1.50.2.3   nathanw 	 * Unlink a page from the pool and release it (or queue it for release).
    333       1.7   thorpej 	 */
    334       1.7   thorpej 	TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
    335  1.50.2.3   nathanw 	if (pq) {
    336  1.50.2.3   nathanw 		TAILQ_INSERT_HEAD(pq, ph, ph_pagelist);
    337  1.50.2.3   nathanw 	} else {
    338  1.50.2.3   nathanw 		(*pp->pr_free)(ph->ph_page, pp->pr_pagesz, pp->pr_mtype);
    339  1.50.2.3   nathanw 		if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
    340  1.50.2.3   nathanw 			LIST_REMOVE(ph, ph_hashlist);
    341  1.50.2.3   nathanw 			s = splhigh();
    342  1.50.2.3   nathanw 			pool_put(&phpool, ph);
    343  1.50.2.3   nathanw 			splx(s);
    344  1.50.2.3   nathanw 		}
    345  1.50.2.3   nathanw 	}
    346       1.7   thorpej 	pp->pr_npages--;
    347       1.7   thorpej 	pp->pr_npagefree++;
    348       1.6   thorpej 
    349       1.3        pk 	if (pp->pr_curpage == ph) {
    350       1.3        pk 		/*
    351       1.3        pk 		 * Find a new non-empty page header, if any.
    352       1.3        pk 		 * Start search from the page head, to increase the
    353       1.3        pk 		 * chance for "high water" pages to be freed.
    354       1.3        pk 		 */
    355  1.50.2.3   nathanw 		TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
    356       1.3        pk 			if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
    357       1.3        pk 				break;
    358       1.3        pk 
    359       1.3        pk 		pp->pr_curpage = ph;
    360      1.21   thorpej 	}
    361       1.3        pk }
    362       1.3        pk 
    363       1.3        pk /*
    364       1.3        pk  * Initialize the given pool resource structure.
    365       1.3        pk  *
    366       1.3        pk  * We export this routine to allow other kernel parts to declare
    367       1.3        pk  * static pools that must be initialized before malloc() is available.
    368       1.3        pk  */
    369       1.3        pk void
    370      1.42   thorpej pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
    371      1.42   thorpej     const char *wchan, size_t pagesz,
    372      1.42   thorpej     void *(*alloc)(unsigned long, int, int),
    373      1.42   thorpej     void (*release)(void *, unsigned long, int),
    374      1.42   thorpej     int mtype)
    375       1.3        pk {
    376      1.16    briggs 	int off, slack, i;
    377       1.3        pk 
    378      1.25   thorpej #ifdef POOL_DIAGNOSTIC
    379      1.25   thorpej 	/*
    380      1.25   thorpej 	 * Always log if POOL_DIAGNOSTIC is defined.
    381      1.25   thorpej 	 */
    382      1.25   thorpej 	if (pool_logsize != 0)
    383      1.25   thorpej 		flags |= PR_LOGGING;
    384      1.25   thorpej #endif
    385      1.25   thorpej 
    386       1.3        pk 	/*
    387       1.3        pk 	 * Check arguments and construct default values.
    388       1.3        pk 	 */
    389      1.36        pk 	if (!powerof2(pagesz))
    390       1.3        pk 		panic("pool_init: page size invalid (%lx)\n", (u_long)pagesz);
    391       1.3        pk 
    392       1.4   thorpej 	if (alloc == NULL && release == NULL) {
    393  1.50.2.4   nathanw #ifdef POOL_SUBPAGE
    394  1.50.2.4   nathanw 		alloc = pool_subpage_alloc;
    395  1.50.2.4   nathanw 		release = pool_subpage_free;
    396  1.50.2.4   nathanw 		pagesz = POOL_SUBPAGE;
    397  1.50.2.4   nathanw #else
    398       1.3        pk 		alloc = pool_page_alloc;
    399       1.3        pk 		release = pool_page_free;
    400       1.4   thorpej 		pagesz = PAGE_SIZE;	/* Rounds to PAGE_SIZE anyhow. */
    401  1.50.2.4   nathanw #endif
    402       1.4   thorpej 	} else if ((alloc != NULL && release != NULL) == 0) {
    403       1.4   thorpej 		/* If you specifiy one, must specify both. */
    404       1.4   thorpej 		panic("pool_init: must specify alloc and release together");
    405       1.4   thorpej 	}
    406  1.50.2.4   nathanw #ifdef POOL_SUBPAGE
    407  1.50.2.4   nathanw 	else if (alloc == pool_page_alloc_nointr &&
    408  1.50.2.4   nathanw 	    release == pool_page_free_nointr)
    409  1.50.2.4   nathanw 		pagesz = POOL_SUBPAGE;
    410  1.50.2.4   nathanw #endif
    411       1.4   thorpej 
    412       1.3        pk 	if (pagesz == 0)
    413       1.3        pk 		pagesz = PAGE_SIZE;
    414       1.3        pk 
    415       1.3        pk 	if (align == 0)
    416       1.3        pk 		align = ALIGN(1);
    417      1.14   thorpej 
    418      1.14   thorpej 	if (size < sizeof(struct pool_item))
    419      1.14   thorpej 		size = sizeof(struct pool_item);
    420       1.3        pk 
    421      1.35        pk 	size = ALIGN(size);
    422      1.43   thorpej 	if (size > pagesz)
    423      1.35        pk 		panic("pool_init: pool item size (%lu) too large",
    424      1.35        pk 		      (u_long)size);
    425      1.35        pk 
    426       1.3        pk 	/*
    427       1.3        pk 	 * Initialize the pool structure.
    428       1.3        pk 	 */
    429       1.3        pk 	TAILQ_INIT(&pp->pr_pagelist);
    430      1.43   thorpej 	TAILQ_INIT(&pp->pr_cachelist);
    431       1.3        pk 	pp->pr_curpage = NULL;
    432       1.3        pk 	pp->pr_npages = 0;
    433       1.3        pk 	pp->pr_minitems = 0;
    434       1.3        pk 	pp->pr_minpages = 0;
    435       1.3        pk 	pp->pr_maxpages = UINT_MAX;
    436      1.20   thorpej 	pp->pr_roflags = flags;
    437      1.20   thorpej 	pp->pr_flags = 0;
    438      1.35        pk 	pp->pr_size = size;
    439       1.3        pk 	pp->pr_align = align;
    440       1.3        pk 	pp->pr_wchan = wchan;
    441       1.3        pk 	pp->pr_mtype = mtype;
    442       1.3        pk 	pp->pr_alloc = alloc;
    443       1.3        pk 	pp->pr_free = release;
    444       1.3        pk 	pp->pr_pagesz = pagesz;
    445       1.3        pk 	pp->pr_pagemask = ~(pagesz - 1);
    446       1.3        pk 	pp->pr_pageshift = ffs(pagesz) - 1;
    447      1.20   thorpej 	pp->pr_nitems = 0;
    448      1.20   thorpej 	pp->pr_nout = 0;
    449      1.20   thorpej 	pp->pr_hardlimit = UINT_MAX;
    450      1.20   thorpej 	pp->pr_hardlimit_warning = NULL;
    451      1.31   thorpej 	pp->pr_hardlimit_ratecap.tv_sec = 0;
    452      1.31   thorpej 	pp->pr_hardlimit_ratecap.tv_usec = 0;
    453      1.31   thorpej 	pp->pr_hardlimit_warning_last.tv_sec = 0;
    454      1.31   thorpej 	pp->pr_hardlimit_warning_last.tv_usec = 0;
    455       1.3        pk 
    456       1.3        pk 	/*
    457       1.3        pk 	 * Decide whether to put the page header off page to avoid
    458       1.3        pk 	 * wasting too large a part of the page. Off-page page headers
    459       1.3        pk 	 * go on a hash table, so we can match a returned item
    460       1.3        pk 	 * with its header based on the page address.
    461       1.3        pk 	 * We use 1/16 of the page size as the threshold (XXX: tune)
    462       1.3        pk 	 */
    463       1.3        pk 	if (pp->pr_size < pagesz/16) {
    464       1.3        pk 		/* Use the end of the page for the page header */
    465      1.20   thorpej 		pp->pr_roflags |= PR_PHINPAGE;
    466       1.3        pk 		pp->pr_phoffset = off =
    467       1.3        pk 			pagesz - ALIGN(sizeof(struct pool_item_header));
    468       1.2        pk 	} else {
    469       1.3        pk 		/* The page header will be taken from our page header pool */
    470       1.3        pk 		pp->pr_phoffset = 0;
    471       1.3        pk 		off = pagesz;
    472      1.16    briggs 		for (i = 0; i < PR_HASHTABSIZE; i++) {
    473      1.16    briggs 			LIST_INIT(&pp->pr_hashtab[i]);
    474      1.16    briggs 		}
    475       1.2        pk 	}
    476       1.1        pk 
    477       1.3        pk 	/*
    478       1.3        pk 	 * Alignment is to take place at `ioff' within the item. This means
    479       1.3        pk 	 * we must reserve up to `align - 1' bytes on the page to allow
    480       1.3        pk 	 * appropriate positioning of each item.
    481       1.3        pk 	 *
    482       1.3        pk 	 * Silently enforce `0 <= ioff < align'.
    483       1.3        pk 	 */
    484       1.3        pk 	pp->pr_itemoffset = ioff = ioff % align;
    485       1.3        pk 	pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
    486      1.43   thorpej 	KASSERT(pp->pr_itemsperpage != 0);
    487       1.3        pk 
    488       1.3        pk 	/*
    489       1.3        pk 	 * Use the slack between the chunks and the page header
    490       1.3        pk 	 * for "cache coloring".
    491       1.3        pk 	 */
    492       1.3        pk 	slack = off - pp->pr_itemsperpage * pp->pr_size;
    493       1.3        pk 	pp->pr_maxcolor = (slack / align) * align;
    494       1.3        pk 	pp->pr_curcolor = 0;
    495       1.3        pk 
    496       1.3        pk 	pp->pr_nget = 0;
    497       1.3        pk 	pp->pr_nfail = 0;
    498       1.3        pk 	pp->pr_nput = 0;
    499       1.3        pk 	pp->pr_npagealloc = 0;
    500       1.3        pk 	pp->pr_npagefree = 0;
    501       1.1        pk 	pp->pr_hiwat = 0;
    502       1.8   thorpej 	pp->pr_nidle = 0;
    503       1.3        pk 
    504  1.50.2.1   nathanw #ifdef POOL_DIAGNOSTIC
    505      1.25   thorpej 	if (flags & PR_LOGGING) {
    506      1.25   thorpej 		if (kmem_map == NULL ||
    507      1.25   thorpej 		    (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
    508      1.25   thorpej 		     M_TEMP, M_NOWAIT)) == NULL)
    509      1.20   thorpej 			pp->pr_roflags &= ~PR_LOGGING;
    510       1.3        pk 		pp->pr_curlogentry = 0;
    511       1.3        pk 		pp->pr_logsize = pool_logsize;
    512       1.3        pk 	}
    513  1.50.2.1   nathanw #endif
    514      1.25   thorpej 
    515      1.25   thorpej 	pp->pr_entered_file = NULL;
    516      1.25   thorpej 	pp->pr_entered_line = 0;
    517       1.3        pk 
    518      1.21   thorpej 	simple_lock_init(&pp->pr_slock);
    519       1.1        pk 
    520       1.3        pk 	/*
    521      1.43   thorpej 	 * Initialize private page header pool and cache magazine pool if we
    522      1.43   thorpej 	 * haven't done so yet.
    523      1.23   thorpej 	 * XXX LOCKING.
    524       1.3        pk 	 */
    525       1.3        pk 	if (phpool.pr_size == 0) {
    526  1.50.2.4   nathanw #ifdef POOL_SUBPAGE
    527  1.50.2.4   nathanw 		pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, 0,
    528  1.50.2.4   nathanw 		    "phpool", PAGE_SIZE, pool_page_alloc, pool_page_free, 0);
    529  1.50.2.4   nathanw 		pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
    530  1.50.2.4   nathanw 		    PR_RECURSIVE, "psppool", PAGE_SIZE,
    531  1.50.2.4   nathanw 		    pool_page_alloc, pool_page_free, 0);
    532  1.50.2.4   nathanw #else
    533       1.3        pk 		pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,
    534      1.43   thorpej 		    0, "phpool", 0, 0, 0, 0);
    535  1.50.2.4   nathanw #endif
    536      1.43   thorpej 		pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
    537      1.43   thorpej 		    0, "pcgpool", 0, 0, 0, 0);
    538       1.1        pk 	}
    539       1.1        pk 
    540      1.23   thorpej 	/* Insert into the list of all pools. */
    541      1.23   thorpej 	simple_lock(&pool_head_slock);
    542      1.23   thorpej 	TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
    543      1.23   thorpej 	simple_unlock(&pool_head_slock);
    544       1.1        pk }
    545       1.1        pk 
    546       1.1        pk /*
    547       1.1        pk  * De-commision a pool resource.
    548       1.1        pk  */
    549       1.1        pk void
    550      1.42   thorpej pool_destroy(struct pool *pp)
    551       1.1        pk {
    552       1.3        pk 	struct pool_item_header *ph;
    553      1.43   thorpej 	struct pool_cache *pc;
    554      1.43   thorpej 
    555      1.43   thorpej 	/* Destroy all caches for this pool. */
    556      1.43   thorpej 	while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL)
    557      1.43   thorpej 		pool_cache_destroy(pc);
    558       1.3        pk 
    559       1.3        pk #ifdef DIAGNOSTIC
    560      1.20   thorpej 	if (pp->pr_nout != 0) {
    561      1.25   thorpej 		pr_printlog(pp, NULL, printf);
    562      1.20   thorpej 		panic("pool_destroy: pool busy: still out: %u\n",
    563      1.20   thorpej 		    pp->pr_nout);
    564       1.3        pk 	}
    565       1.3        pk #endif
    566       1.1        pk 
    567       1.3        pk 	/* Remove all pages */
    568      1.20   thorpej 	if ((pp->pr_roflags & PR_STATIC) == 0)
    569  1.50.2.3   nathanw 		while ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL)
    570  1.50.2.3   nathanw 			pr_rmpage(pp, ph, NULL);
    571       1.3        pk 
    572       1.3        pk 	/* Remove from global pool list */
    573      1.23   thorpej 	simple_lock(&pool_head_slock);
    574       1.3        pk 	TAILQ_REMOVE(&pool_head, pp, pr_poollist);
    575  1.50.2.3   nathanw 	if (drainpp == pp) {
    576  1.50.2.3   nathanw 		drainpp = NULL;
    577  1.50.2.3   nathanw 	}
    578      1.23   thorpej 	simple_unlock(&pool_head_slock);
    579       1.3        pk 
    580  1.50.2.1   nathanw #ifdef POOL_DIAGNOSTIC
    581      1.20   thorpej 	if ((pp->pr_roflags & PR_LOGGING) != 0)
    582       1.3        pk 		free(pp->pr_log, M_TEMP);
    583  1.50.2.1   nathanw #endif
    584       1.2        pk 
    585      1.20   thorpej 	if (pp->pr_roflags & PR_FREEHEADER)
    586       1.3        pk 		free(pp, M_POOL);
    587       1.1        pk }
    588       1.1        pk 
    589  1.50.2.1   nathanw static __inline struct pool_item_header *
    590  1.50.2.1   nathanw pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags)
    591  1.50.2.1   nathanw {
    592  1.50.2.1   nathanw 	struct pool_item_header *ph;
    593  1.50.2.1   nathanw 	int s;
    594  1.50.2.1   nathanw 
    595  1.50.2.1   nathanw 	LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0);
    596  1.50.2.1   nathanw 
    597  1.50.2.1   nathanw 	if ((pp->pr_roflags & PR_PHINPAGE) != 0)
    598  1.50.2.1   nathanw 		ph = (struct pool_item_header *) (storage + pp->pr_phoffset);
    599  1.50.2.1   nathanw 	else {
    600  1.50.2.1   nathanw 		s = splhigh();
    601  1.50.2.1   nathanw 		ph = pool_get(&phpool, flags);
    602  1.50.2.1   nathanw 		splx(s);
    603  1.50.2.1   nathanw 	}
    604  1.50.2.1   nathanw 
    605  1.50.2.1   nathanw 	return (ph);
    606  1.50.2.1   nathanw }
    607       1.1        pk 
    608       1.1        pk /*
    609       1.3        pk  * Grab an item from the pool; must be called at appropriate spl level
    610       1.1        pk  */
    611       1.3        pk void *
    612  1.50.2.1   nathanw #ifdef POOL_DIAGNOSTIC
    613      1.42   thorpej _pool_get(struct pool *pp, int flags, const char *file, long line)
    614  1.50.2.1   nathanw #else
    615  1.50.2.1   nathanw pool_get(struct pool *pp, int flags)
    616  1.50.2.1   nathanw #endif
    617       1.1        pk {
    618       1.1        pk 	struct pool_item *pi;
    619       1.3        pk 	struct pool_item_header *ph;
    620  1.50.2.1   nathanw 	void *v;
    621       1.1        pk 
    622       1.2        pk #ifdef DIAGNOSTIC
    623      1.34   thorpej 	if (__predict_false((pp->pr_roflags & PR_STATIC) &&
    624      1.34   thorpej 			    (flags & PR_MALLOCOK))) {
    625      1.25   thorpej 		pr_printlog(pp, NULL, printf);
    626       1.2        pk 		panic("pool_get: static");
    627       1.3        pk 	}
    628       1.2        pk 
    629      1.37  sommerfe 	if (__predict_false(curproc == NULL && doing_shutdown == 0 &&
    630      1.37  sommerfe 			    (flags & PR_WAITOK) != 0))
    631       1.3        pk 		panic("pool_get: must have NOWAIT");
    632       1.1        pk 
    633  1.50.2.1   nathanw #ifdef LOCKDEBUG
    634  1.50.2.1   nathanw 	if (flags & PR_WAITOK)
    635  1.50.2.1   nathanw 		simple_lock_only_held(NULL, "pool_get(PR_WAITOK)");
    636  1.50.2.1   nathanw #endif
    637  1.50.2.1   nathanw #endif /* DIAGNOSTIC */
    638  1.50.2.1   nathanw 
    639      1.21   thorpej 	simple_lock(&pp->pr_slock);
    640      1.25   thorpej 	pr_enter(pp, file, line);
    641      1.20   thorpej 
    642      1.20   thorpej  startover:
    643      1.20   thorpej 	/*
    644      1.20   thorpej 	 * Check to see if we've reached the hard limit.  If we have,
    645      1.20   thorpej 	 * and we can wait, then wait until an item has been returned to
    646      1.20   thorpej 	 * the pool.
    647      1.20   thorpej 	 */
    648      1.20   thorpej #ifdef DIAGNOSTIC
    649      1.34   thorpej 	if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
    650      1.25   thorpej 		pr_leave(pp);
    651      1.21   thorpej 		simple_unlock(&pp->pr_slock);
    652      1.20   thorpej 		panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
    653      1.20   thorpej 	}
    654      1.20   thorpej #endif
    655      1.34   thorpej 	if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
    656      1.29  sommerfe 		if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
    657      1.20   thorpej 			/*
    658      1.20   thorpej 			 * XXX: A warning isn't logged in this case.  Should
    659      1.20   thorpej 			 * it be?
    660      1.20   thorpej 			 */
    661      1.20   thorpej 			pp->pr_flags |= PR_WANTED;
    662      1.25   thorpej 			pr_leave(pp);
    663      1.40  sommerfe 			ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
    664      1.25   thorpej 			pr_enter(pp, file, line);
    665      1.20   thorpej 			goto startover;
    666      1.20   thorpej 		}
    667      1.31   thorpej 
    668      1.31   thorpej 		/*
    669      1.31   thorpej 		 * Log a message that the hard limit has been hit.
    670      1.31   thorpej 		 */
    671      1.31   thorpej 		if (pp->pr_hardlimit_warning != NULL &&
    672      1.31   thorpej 		    ratecheck(&pp->pr_hardlimit_warning_last,
    673      1.31   thorpej 			      &pp->pr_hardlimit_ratecap))
    674      1.31   thorpej 			log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
    675      1.21   thorpej 
    676      1.21   thorpej 		if (flags & PR_URGENT)
    677      1.21   thorpej 			panic("pool_get: urgent");
    678      1.21   thorpej 
    679      1.21   thorpej 		pp->pr_nfail++;
    680      1.21   thorpej 
    681      1.25   thorpej 		pr_leave(pp);
    682      1.21   thorpej 		simple_unlock(&pp->pr_slock);
    683      1.20   thorpej 		return (NULL);
    684      1.20   thorpej 	}
    685      1.20   thorpej 
    686       1.3        pk 	/*
    687       1.3        pk 	 * The convention we use is that if `curpage' is not NULL, then
    688       1.3        pk 	 * it points at a non-empty bucket. In particular, `curpage'
    689       1.3        pk 	 * never points at a page header which has PR_PHINPAGE set and
    690       1.3        pk 	 * has no items in its bucket.
    691       1.3        pk 	 */
    692      1.20   thorpej 	if ((ph = pp->pr_curpage) == NULL) {
    693      1.20   thorpej #ifdef DIAGNOSTIC
    694      1.20   thorpej 		if (pp->pr_nitems != 0) {
    695      1.21   thorpej 			simple_unlock(&pp->pr_slock);
    696      1.20   thorpej 			printf("pool_get: %s: curpage NULL, nitems %u\n",
    697      1.20   thorpej 			    pp->pr_wchan, pp->pr_nitems);
    698      1.20   thorpej 			panic("pool_get: nitems inconsistent\n");
    699      1.20   thorpej 		}
    700      1.20   thorpej #endif
    701      1.20   thorpej 
    702      1.21   thorpej 		/*
    703      1.21   thorpej 		 * Call the back-end page allocator for more memory.
    704      1.21   thorpej 		 * Release the pool lock, as the back-end page allocator
    705      1.21   thorpej 		 * may block.
    706      1.21   thorpej 		 */
    707      1.25   thorpej 		pr_leave(pp);
    708      1.21   thorpej 		simple_unlock(&pp->pr_slock);
    709      1.21   thorpej 		v = (*pp->pr_alloc)(pp->pr_pagesz, flags, pp->pr_mtype);
    710  1.50.2.1   nathanw 		if (__predict_true(v != NULL))
    711  1.50.2.1   nathanw 			ph = pool_alloc_item_header(pp, v, flags);
    712      1.21   thorpej 		simple_lock(&pp->pr_slock);
    713      1.25   thorpej 		pr_enter(pp, file, line);
    714      1.15        pk 
    715  1.50.2.1   nathanw 		if (__predict_false(v == NULL || ph == NULL)) {
    716  1.50.2.1   nathanw 			if (v != NULL)
    717  1.50.2.1   nathanw 				(*pp->pr_free)(v, pp->pr_pagesz, pp->pr_mtype);
    718  1.50.2.1   nathanw 
    719      1.21   thorpej 			/*
    720  1.50.2.1   nathanw 			 * We were unable to allocate a page or item
    721  1.50.2.1   nathanw 			 * header, but we released the lock during
    722  1.50.2.1   nathanw 			 * allocation, so perhaps items were freed
    723  1.50.2.1   nathanw 			 * back to the pool.  Check for this case.
    724      1.21   thorpej 			 */
    725      1.21   thorpej 			if (pp->pr_curpage != NULL)
    726      1.21   thorpej 				goto startover;
    727      1.15        pk 
    728       1.3        pk 			if (flags & PR_URGENT)
    729       1.3        pk 				panic("pool_get: urgent");
    730      1.21   thorpej 
    731       1.3        pk 			if ((flags & PR_WAITOK) == 0) {
    732       1.3        pk 				pp->pr_nfail++;
    733      1.25   thorpej 				pr_leave(pp);
    734      1.21   thorpej 				simple_unlock(&pp->pr_slock);
    735       1.1        pk 				return (NULL);
    736       1.3        pk 			}
    737       1.3        pk 
    738      1.15        pk 			/*
    739      1.15        pk 			 * Wait for items to be returned to this pool.
    740      1.21   thorpej 			 *
    741      1.15        pk 			 * XXX: we actually want to wait just until
    742      1.15        pk 			 * the page allocator has memory again. Depending
    743      1.15        pk 			 * on this pool's usage, we might get stuck here
    744      1.15        pk 			 * for a long time.
    745      1.20   thorpej 			 *
    746      1.20   thorpej 			 * XXX: maybe we should wake up once a second and
    747      1.20   thorpej 			 * try again?
    748      1.15        pk 			 */
    749       1.1        pk 			pp->pr_flags |= PR_WANTED;
    750      1.25   thorpej 			pr_leave(pp);
    751      1.40  sommerfe 			ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
    752      1.25   thorpej 			pr_enter(pp, file, line);
    753      1.20   thorpej 			goto startover;
    754       1.1        pk 		}
    755       1.3        pk 
    756      1.15        pk 		/* We have more memory; add it to the pool */
    757  1.50.2.1   nathanw 		pool_prime_page(pp, v, ph);
    758      1.15        pk 		pp->pr_npagealloc++;
    759      1.15        pk 
    760      1.20   thorpej 		/* Start the allocation process over. */
    761      1.20   thorpej 		goto startover;
    762       1.3        pk 	}
    763       1.3        pk 
    764      1.34   thorpej 	if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) {
    765      1.25   thorpej 		pr_leave(pp);
    766      1.21   thorpej 		simple_unlock(&pp->pr_slock);
    767       1.3        pk 		panic("pool_get: %s: page empty", pp->pr_wchan);
    768      1.21   thorpej 	}
    769      1.20   thorpej #ifdef DIAGNOSTIC
    770      1.34   thorpej 	if (__predict_false(pp->pr_nitems == 0)) {
    771      1.25   thorpej 		pr_leave(pp);
    772      1.21   thorpej 		simple_unlock(&pp->pr_slock);
    773      1.20   thorpej 		printf("pool_get: %s: items on itemlist, nitems %u\n",
    774      1.20   thorpej 		    pp->pr_wchan, pp->pr_nitems);
    775      1.20   thorpej 		panic("pool_get: nitems inconsistent\n");
    776      1.20   thorpej 	}
    777  1.50.2.1   nathanw 
    778       1.3        pk 	pr_log(pp, v, PRLOG_GET, file, line);
    779       1.3        pk 
    780      1.34   thorpej 	if (__predict_false(pi->pi_magic != PI_MAGIC)) {
    781      1.25   thorpej 		pr_printlog(pp, pi, printf);
    782       1.3        pk 		panic("pool_get(%s): free list modified: magic=%x; page %p;"
    783       1.3        pk 		       " item addr %p\n",
    784       1.3        pk 			pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
    785       1.3        pk 	}
    786       1.3        pk #endif
    787       1.3        pk 
    788       1.3        pk 	/*
    789       1.3        pk 	 * Remove from item list.
    790       1.3        pk 	 */
    791       1.3        pk 	TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
    792      1.20   thorpej 	pp->pr_nitems--;
    793      1.20   thorpej 	pp->pr_nout++;
    794       1.6   thorpej 	if (ph->ph_nmissing == 0) {
    795       1.6   thorpej #ifdef DIAGNOSTIC
    796      1.34   thorpej 		if (__predict_false(pp->pr_nidle == 0))
    797       1.6   thorpej 			panic("pool_get: nidle inconsistent");
    798       1.6   thorpej #endif
    799       1.6   thorpej 		pp->pr_nidle--;
    800       1.6   thorpej 	}
    801       1.3        pk 	ph->ph_nmissing++;
    802       1.3        pk 	if (TAILQ_FIRST(&ph->ph_itemlist) == NULL) {
    803      1.21   thorpej #ifdef DIAGNOSTIC
    804      1.34   thorpej 		if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) {
    805      1.25   thorpej 			pr_leave(pp);
    806      1.21   thorpej 			simple_unlock(&pp->pr_slock);
    807      1.21   thorpej 			panic("pool_get: %s: nmissing inconsistent",
    808      1.21   thorpej 			    pp->pr_wchan);
    809      1.21   thorpej 		}
    810      1.21   thorpej #endif
    811       1.3        pk 		/*
    812       1.3        pk 		 * Find a new non-empty page header, if any.
    813       1.3        pk 		 * Start search from the page head, to increase
    814       1.3        pk 		 * the chance for "high water" pages to be freed.
    815       1.3        pk 		 *
    816      1.21   thorpej 		 * Migrate empty pages to the end of the list.  This
    817      1.21   thorpej 		 * will speed the update of curpage as pages become
    818      1.21   thorpej 		 * idle.  Empty pages intermingled with idle pages
    819      1.21   thorpej 		 * is no big deal.  As soon as a page becomes un-empty,
    820      1.21   thorpej 		 * it will move back to the head of the list.
    821       1.3        pk 		 */
    822       1.3        pk 		TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
    823      1.21   thorpej 		TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
    824  1.50.2.3   nathanw 		TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
    825       1.3        pk 			if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
    826       1.3        pk 				break;
    827       1.3        pk 
    828       1.3        pk 		pp->pr_curpage = ph;
    829       1.1        pk 	}
    830       1.3        pk 
    831       1.3        pk 	pp->pr_nget++;
    832      1.20   thorpej 
    833      1.20   thorpej 	/*
    834      1.20   thorpej 	 * If we have a low water mark and we are now below that low
    835      1.20   thorpej 	 * water mark, add more items to the pool.
    836      1.20   thorpej 	 */
    837  1.50.2.1   nathanw 	if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
    838      1.20   thorpej 		/*
    839      1.20   thorpej 		 * XXX: Should we log a warning?  Should we set up a timeout
    840      1.20   thorpej 		 * to try again in a second or so?  The latter could break
    841      1.20   thorpej 		 * a caller's assumptions about interrupt protection, etc.
    842      1.20   thorpej 		 */
    843      1.20   thorpej 	}
    844      1.20   thorpej 
    845      1.25   thorpej 	pr_leave(pp);
    846      1.21   thorpej 	simple_unlock(&pp->pr_slock);
    847       1.1        pk 	return (v);
    848       1.1        pk }
    849       1.1        pk 
    850       1.1        pk /*
    851      1.43   thorpej  * Internal version of pool_put().  Pool is already locked/entered.
    852       1.1        pk  */
    853      1.43   thorpej static void
    854  1.50.2.1   nathanw pool_do_put(struct pool *pp, void *v)
    855       1.1        pk {
    856       1.1        pk 	struct pool_item *pi = v;
    857       1.3        pk 	struct pool_item_header *ph;
    858       1.3        pk 	caddr_t page;
    859      1.21   thorpej 	int s;
    860       1.3        pk 
    861  1.50.2.3   nathanw 	LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
    862  1.50.2.3   nathanw 
    863       1.3        pk 	page = (caddr_t)((u_long)v & pp->pr_pagemask);
    864       1.1        pk 
    865      1.30   thorpej #ifdef DIAGNOSTIC
    866      1.34   thorpej 	if (__predict_false(pp->pr_nout == 0)) {
    867      1.30   thorpej 		printf("pool %s: putting with none out\n",
    868      1.30   thorpej 		    pp->pr_wchan);
    869      1.30   thorpej 		panic("pool_put");
    870      1.30   thorpej 	}
    871      1.30   thorpej #endif
    872       1.3        pk 
    873      1.34   thorpej 	if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {
    874      1.25   thorpej 		pr_printlog(pp, NULL, printf);
    875       1.3        pk 		panic("pool_put: %s: page header missing", pp->pr_wchan);
    876       1.3        pk 	}
    877      1.28   thorpej 
    878      1.28   thorpej #ifdef LOCKDEBUG
    879      1.28   thorpej 	/*
    880      1.28   thorpej 	 * Check if we're freeing a locked simple lock.
    881      1.28   thorpej 	 */
    882      1.28   thorpej 	simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
    883      1.28   thorpej #endif
    884       1.3        pk 
    885       1.3        pk 	/*
    886       1.3        pk 	 * Return to item list.
    887       1.3        pk 	 */
    888       1.2        pk #ifdef DIAGNOSTIC
    889       1.3        pk 	pi->pi_magic = PI_MAGIC;
    890       1.3        pk #endif
    891      1.32       chs #ifdef DEBUG
    892      1.32       chs 	{
    893      1.32       chs 		int i, *ip = v;
    894      1.32       chs 
    895      1.32       chs 		for (i = 0; i < pp->pr_size / sizeof(int); i++) {
    896      1.32       chs 			*ip++ = PI_MAGIC;
    897      1.32       chs 		}
    898      1.32       chs 	}
    899      1.32       chs #endif
    900      1.32       chs 
    901       1.3        pk 	TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
    902       1.3        pk 	ph->ph_nmissing--;
    903       1.3        pk 	pp->pr_nput++;
    904      1.20   thorpej 	pp->pr_nitems++;
    905      1.20   thorpej 	pp->pr_nout--;
    906       1.3        pk 
    907       1.3        pk 	/* Cancel "pool empty" condition if it exists */
    908       1.3        pk 	if (pp->pr_curpage == NULL)
    909       1.3        pk 		pp->pr_curpage = ph;
    910       1.3        pk 
    911       1.3        pk 	if (pp->pr_flags & PR_WANTED) {
    912       1.3        pk 		pp->pr_flags &= ~PR_WANTED;
    913      1.15        pk 		if (ph->ph_nmissing == 0)
    914      1.15        pk 			pp->pr_nidle++;
    915       1.3        pk 		wakeup((caddr_t)pp);
    916       1.3        pk 		return;
    917       1.3        pk 	}
    918       1.3        pk 
    919       1.3        pk 	/*
    920      1.21   thorpej 	 * If this page is now complete, do one of two things:
    921      1.21   thorpej 	 *
    922      1.21   thorpej 	 *	(1) If we have more pages than the page high water
    923      1.21   thorpej 	 *	    mark, free the page back to the system.
    924      1.21   thorpej 	 *
    925      1.21   thorpej 	 *	(2) Move it to the end of the page list, so that
    926      1.21   thorpej 	 *	    we minimize our chances of fragmenting the
    927      1.21   thorpej 	 *	    pool.  Idle pages migrate to the end (along with
    928      1.21   thorpej 	 *	    completely empty pages, so that we find un-empty
    929      1.21   thorpej 	 *	    pages more quickly when we update curpage) of the
    930      1.21   thorpej 	 *	    list so they can be more easily swept up by
    931      1.21   thorpej 	 *	    the pagedaemon when pages are scarce.
    932       1.3        pk 	 */
    933       1.3        pk 	if (ph->ph_nmissing == 0) {
    934       1.6   thorpej 		pp->pr_nidle++;
    935       1.3        pk 		if (pp->pr_npages > pp->pr_maxpages) {
    936  1.50.2.3   nathanw 			pr_rmpage(pp, ph, NULL);
    937       1.3        pk 		} else {
    938       1.3        pk 			TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
    939       1.3        pk 			TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
    940       1.3        pk 
    941      1.21   thorpej 			/*
    942      1.21   thorpej 			 * Update the timestamp on the page.  A page must
    943      1.21   thorpej 			 * be idle for some period of time before it can
    944      1.21   thorpej 			 * be reclaimed by the pagedaemon.  This minimizes
    945      1.21   thorpej 			 * ping-pong'ing for memory.
    946      1.21   thorpej 			 */
    947      1.21   thorpej 			s = splclock();
    948      1.21   thorpej 			ph->ph_time = mono_time;
    949      1.21   thorpej 			splx(s);
    950      1.21   thorpej 
    951      1.21   thorpej 			/*
    952      1.21   thorpej 			 * Update the current page pointer.  Just look for
    953      1.21   thorpej 			 * the first page with any free items.
    954      1.21   thorpej 			 *
    955      1.21   thorpej 			 * XXX: Maybe we want an option to look for the
    956      1.21   thorpej 			 * page with the fewest available items, to minimize
    957      1.21   thorpej 			 * fragmentation?
    958      1.21   thorpej 			 */
    959  1.50.2.3   nathanw 			TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
    960       1.3        pk 				if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
    961       1.3        pk 					break;
    962       1.1        pk 
    963       1.3        pk 			pp->pr_curpage = ph;
    964       1.1        pk 		}
    965       1.1        pk 	}
    966      1.21   thorpej 	/*
    967      1.21   thorpej 	 * If the page has just become un-empty, move it to the head of
    968      1.21   thorpej 	 * the list, and make it the current page.  The next allocation
    969      1.21   thorpej 	 * will get the item from this page, instead of further fragmenting
    970      1.21   thorpej 	 * the pool.
    971      1.21   thorpej 	 */
    972      1.21   thorpej 	else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
    973      1.21   thorpej 		TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
    974      1.21   thorpej 		TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
    975      1.21   thorpej 		pp->pr_curpage = ph;
    976      1.21   thorpej 	}
    977      1.43   thorpej }
    978      1.43   thorpej 
    979      1.43   thorpej /*
    980      1.43   thorpej  * Return resource to the pool; must be called at appropriate spl level
    981      1.43   thorpej  */
    982  1.50.2.1   nathanw #ifdef POOL_DIAGNOSTIC
    983      1.43   thorpej void
    984      1.43   thorpej _pool_put(struct pool *pp, void *v, const char *file, long line)
    985      1.43   thorpej {
    986      1.43   thorpej 
    987      1.43   thorpej 	simple_lock(&pp->pr_slock);
    988      1.43   thorpej 	pr_enter(pp, file, line);
    989      1.43   thorpej 
    990  1.50.2.1   nathanw 	pr_log(pp, v, PRLOG_PUT, file, line);
    991  1.50.2.1   nathanw 
    992  1.50.2.1   nathanw 	pool_do_put(pp, v);
    993      1.21   thorpej 
    994      1.25   thorpej 	pr_leave(pp);
    995      1.21   thorpej 	simple_unlock(&pp->pr_slock);
    996       1.1        pk }
    997  1.50.2.1   nathanw #undef pool_put
    998  1.50.2.1   nathanw #endif /* POOL_DIAGNOSTIC */
    999  1.50.2.1   nathanw 
   1000  1.50.2.1   nathanw void
   1001  1.50.2.1   nathanw pool_put(struct pool *pp, void *v)
   1002  1.50.2.1   nathanw {
   1003  1.50.2.1   nathanw 
   1004  1.50.2.1   nathanw 	simple_lock(&pp->pr_slock);
   1005  1.50.2.1   nathanw 
   1006  1.50.2.1   nathanw 	pool_do_put(pp, v);
   1007  1.50.2.1   nathanw 
   1008  1.50.2.1   nathanw 	simple_unlock(&pp->pr_slock);
   1009  1.50.2.1   nathanw }
   1010  1.50.2.1   nathanw 
   1011  1.50.2.1   nathanw #ifdef POOL_DIAGNOSTIC
   1012  1.50.2.1   nathanw #define		pool_put(h, v)	_pool_put((h), (v), __FILE__, __LINE__)
   1013  1.50.2.1   nathanw #endif
   1014       1.1        pk 
   1015       1.1        pk /*
   1016       1.3        pk  * Add N items to the pool.
   1017       1.1        pk  */
   1018       1.1        pk int
   1019  1.50.2.1   nathanw pool_prime(struct pool *pp, int n)
   1020       1.1        pk {
   1021  1.50.2.1   nathanw 	struct pool_item_header *ph;
   1022       1.3        pk 	caddr_t cp;
   1023  1.50.2.1   nathanw 	int newpages, error = 0;
   1024       1.1        pk 
   1025      1.21   thorpej 	simple_lock(&pp->pr_slock);
   1026      1.21   thorpej 
   1027  1.50.2.1   nathanw 	newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1028       1.3        pk 
   1029       1.3        pk 	while (newpages-- > 0) {
   1030  1.50.2.1   nathanw 		simple_unlock(&pp->pr_slock);
   1031  1.50.2.1   nathanw 		cp = (*pp->pr_alloc)(pp->pr_pagesz, PR_NOWAIT, pp->pr_mtype);
   1032  1.50.2.1   nathanw 		if (__predict_true(cp != NULL))
   1033  1.50.2.1   nathanw 			ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
   1034  1.50.2.1   nathanw 		simple_lock(&pp->pr_slock);
   1035       1.2        pk 
   1036  1.50.2.1   nathanw 		if (__predict_false(cp == NULL || ph == NULL)) {
   1037  1.50.2.1   nathanw 			error = ENOMEM;
   1038  1.50.2.1   nathanw 			if (cp != NULL)
   1039  1.50.2.1   nathanw 				(*pp->pr_free)(cp, pp->pr_pagesz, pp->pr_mtype);
   1040  1.50.2.1   nathanw 			break;
   1041       1.1        pk 		}
   1042       1.1        pk 
   1043  1.50.2.1   nathanw 		pool_prime_page(pp, cp, ph);
   1044      1.26   thorpej 		pp->pr_npagealloc++;
   1045       1.3        pk 		pp->pr_minpages++;
   1046       1.1        pk 	}
   1047       1.3        pk 
   1048       1.3        pk 	if (pp->pr_minpages >= pp->pr_maxpages)
   1049       1.3        pk 		pp->pr_maxpages = pp->pr_minpages + 1;	/* XXX */
   1050       1.3        pk 
   1051      1.21   thorpej 	simple_unlock(&pp->pr_slock);
   1052       1.1        pk 	return (0);
   1053       1.1        pk }
   1054       1.3        pk 
   1055       1.3        pk /*
   1056       1.3        pk  * Add a page worth of items to the pool.
   1057      1.21   thorpej  *
   1058      1.21   thorpej  * Note, we must be called with the pool descriptor LOCKED.
   1059       1.3        pk  */
   1060  1.50.2.1   nathanw static void
   1061  1.50.2.1   nathanw pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
   1062       1.3        pk {
   1063       1.3        pk 	struct pool_item *pi;
   1064       1.3        pk 	caddr_t cp = storage;
   1065       1.3        pk 	unsigned int align = pp->pr_align;
   1066       1.3        pk 	unsigned int ioff = pp->pr_itemoffset;
   1067  1.50.2.1   nathanw 	int n;
   1068      1.36        pk 
   1069      1.36        pk 	if (((u_long)cp & (pp->pr_pagesz - 1)) != 0)
   1070      1.36        pk 		panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
   1071       1.3        pk 
   1072  1.50.2.1   nathanw 	if ((pp->pr_roflags & PR_PHINPAGE) == 0)
   1073       1.3        pk 		LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)],
   1074  1.50.2.1   nathanw 		    ph, ph_hashlist);
   1075       1.3        pk 
   1076       1.3        pk 	/*
   1077       1.3        pk 	 * Insert page header.
   1078       1.3        pk 	 */
   1079       1.3        pk 	TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
   1080       1.3        pk 	TAILQ_INIT(&ph->ph_itemlist);
   1081       1.3        pk 	ph->ph_page = storage;
   1082       1.3        pk 	ph->ph_nmissing = 0;
   1083      1.21   thorpej 	memset(&ph->ph_time, 0, sizeof(ph->ph_time));
   1084       1.3        pk 
   1085       1.6   thorpej 	pp->pr_nidle++;
   1086       1.6   thorpej 
   1087       1.3        pk 	/*
   1088       1.3        pk 	 * Color this page.
   1089       1.3        pk 	 */
   1090       1.3        pk 	cp = (caddr_t)(cp + pp->pr_curcolor);
   1091       1.3        pk 	if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
   1092       1.3        pk 		pp->pr_curcolor = 0;
   1093       1.3        pk 
   1094       1.3        pk 	/*
   1095       1.3        pk 	 * Adjust storage to apply aligment to `pr_itemoffset' in each item.
   1096       1.3        pk 	 */
   1097       1.3        pk 	if (ioff != 0)
   1098       1.3        pk 		cp = (caddr_t)(cp + (align - ioff));
   1099       1.3        pk 
   1100       1.3        pk 	/*
   1101       1.3        pk 	 * Insert remaining chunks on the bucket list.
   1102       1.3        pk 	 */
   1103       1.3        pk 	n = pp->pr_itemsperpage;
   1104      1.20   thorpej 	pp->pr_nitems += n;
   1105       1.3        pk 
   1106       1.3        pk 	while (n--) {
   1107       1.3        pk 		pi = (struct pool_item *)cp;
   1108       1.3        pk 
   1109       1.3        pk 		/* Insert on page list */
   1110       1.3        pk 		TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
   1111       1.3        pk #ifdef DIAGNOSTIC
   1112       1.3        pk 		pi->pi_magic = PI_MAGIC;
   1113       1.3        pk #endif
   1114       1.3        pk 		cp = (caddr_t)(cp + pp->pr_size);
   1115       1.3        pk 	}
   1116       1.3        pk 
   1117       1.3        pk 	/*
   1118       1.3        pk 	 * If the pool was depleted, point at the new page.
   1119       1.3        pk 	 */
   1120       1.3        pk 	if (pp->pr_curpage == NULL)
   1121       1.3        pk 		pp->pr_curpage = ph;
   1122       1.3        pk 
   1123       1.3        pk 	if (++pp->pr_npages > pp->pr_hiwat)
   1124       1.3        pk 		pp->pr_hiwat = pp->pr_npages;
   1125       1.3        pk }
   1126       1.3        pk 
   1127      1.20   thorpej /*
   1128  1.50.2.1   nathanw  * Used by pool_get() when nitems drops below the low water mark.  This
   1129  1.50.2.1   nathanw  * is used to catch up nitmes with the low water mark.
   1130      1.20   thorpej  *
   1131      1.21   thorpej  * Note 1, we never wait for memory here, we let the caller decide what to do.
   1132      1.20   thorpej  *
   1133      1.20   thorpej  * Note 2, this doesn't work with static pools.
   1134      1.20   thorpej  *
   1135      1.20   thorpej  * Note 3, we must be called with the pool already locked, and we return
   1136      1.20   thorpej  * with it locked.
   1137      1.20   thorpej  */
   1138      1.20   thorpej static int
   1139      1.42   thorpej pool_catchup(struct pool *pp)
   1140      1.20   thorpej {
   1141  1.50.2.1   nathanw 	struct pool_item_header *ph;
   1142      1.20   thorpej 	caddr_t cp;
   1143      1.20   thorpej 	int error = 0;
   1144      1.20   thorpej 
   1145      1.20   thorpej 	if (pp->pr_roflags & PR_STATIC) {
   1146      1.20   thorpej 		/*
   1147      1.20   thorpej 		 * We dropped below the low water mark, and this is not a
   1148      1.20   thorpej 		 * good thing.  Log a warning.
   1149      1.21   thorpej 		 *
   1150      1.21   thorpej 		 * XXX: rate-limit this?
   1151      1.20   thorpej 		 */
   1152      1.20   thorpej 		printf("WARNING: static pool `%s' dropped below low water "
   1153      1.20   thorpej 		    "mark\n", pp->pr_wchan);
   1154      1.20   thorpej 		return (0);
   1155      1.20   thorpej 	}
   1156      1.20   thorpej 
   1157  1.50.2.1   nathanw 	while (POOL_NEEDS_CATCHUP(pp)) {
   1158      1.20   thorpej 		/*
   1159      1.21   thorpej 		 * Call the page back-end allocator for more memory.
   1160      1.21   thorpej 		 *
   1161      1.21   thorpej 		 * XXX: We never wait, so should we bother unlocking
   1162      1.21   thorpej 		 * the pool descriptor?
   1163      1.20   thorpej 		 */
   1164      1.21   thorpej 		simple_unlock(&pp->pr_slock);
   1165  1.50.2.1   nathanw 		cp = (*pp->pr_alloc)(pp->pr_pagesz, PR_NOWAIT, pp->pr_mtype);
   1166  1.50.2.1   nathanw 		if (__predict_true(cp != NULL))
   1167  1.50.2.1   nathanw 			ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
   1168      1.21   thorpej 		simple_lock(&pp->pr_slock);
   1169  1.50.2.1   nathanw 		if (__predict_false(cp == NULL || ph == NULL)) {
   1170  1.50.2.1   nathanw 			if (cp != NULL)
   1171  1.50.2.1   nathanw 				(*pp->pr_free)(cp, pp->pr_pagesz, pp->pr_mtype);
   1172      1.20   thorpej 			error = ENOMEM;
   1173      1.20   thorpej 			break;
   1174      1.20   thorpej 		}
   1175  1.50.2.1   nathanw 		pool_prime_page(pp, cp, ph);
   1176      1.26   thorpej 		pp->pr_npagealloc++;
   1177      1.20   thorpej 	}
   1178      1.20   thorpej 
   1179      1.20   thorpej 	return (error);
   1180      1.20   thorpej }
   1181      1.20   thorpej 
   1182       1.3        pk void
   1183      1.42   thorpej pool_setlowat(struct pool *pp, int n)
   1184       1.3        pk {
   1185      1.20   thorpej 	int error;
   1186      1.15        pk 
   1187      1.21   thorpej 	simple_lock(&pp->pr_slock);
   1188      1.21   thorpej 
   1189       1.3        pk 	pp->pr_minitems = n;
   1190      1.15        pk 	pp->pr_minpages = (n == 0)
   1191      1.15        pk 		? 0
   1192      1.18   thorpej 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1193      1.20   thorpej 
   1194      1.20   thorpej 	/* Make sure we're caught up with the newly-set low water mark. */
   1195  1.50.2.1   nathanw 	if (POOL_NEEDS_CATCHUP(pp) && (error = pool_catchup(pp) != 0)) {
   1196      1.20   thorpej 		/*
   1197      1.20   thorpej 		 * XXX: Should we log a warning?  Should we set up a timeout
   1198      1.20   thorpej 		 * to try again in a second or so?  The latter could break
   1199      1.20   thorpej 		 * a caller's assumptions about interrupt protection, etc.
   1200      1.20   thorpej 		 */
   1201      1.20   thorpej 	}
   1202      1.21   thorpej 
   1203      1.21   thorpej 	simple_unlock(&pp->pr_slock);
   1204       1.3        pk }
   1205       1.3        pk 
   1206       1.3        pk void
   1207      1.42   thorpej pool_sethiwat(struct pool *pp, int n)
   1208       1.3        pk {
   1209      1.15        pk 
   1210      1.21   thorpej 	simple_lock(&pp->pr_slock);
   1211      1.21   thorpej 
   1212      1.15        pk 	pp->pr_maxpages = (n == 0)
   1213      1.15        pk 		? 0
   1214      1.18   thorpej 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1215      1.21   thorpej 
   1216      1.21   thorpej 	simple_unlock(&pp->pr_slock);
   1217       1.3        pk }
   1218       1.3        pk 
   1219      1.20   thorpej void
   1220      1.42   thorpej pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
   1221      1.20   thorpej {
   1222      1.20   thorpej 
   1223      1.21   thorpej 	simple_lock(&pp->pr_slock);
   1224      1.20   thorpej 
   1225      1.20   thorpej 	pp->pr_hardlimit = n;
   1226      1.20   thorpej 	pp->pr_hardlimit_warning = warnmess;
   1227      1.31   thorpej 	pp->pr_hardlimit_ratecap.tv_sec = ratecap;
   1228      1.31   thorpej 	pp->pr_hardlimit_warning_last.tv_sec = 0;
   1229      1.31   thorpej 	pp->pr_hardlimit_warning_last.tv_usec = 0;
   1230      1.20   thorpej 
   1231      1.20   thorpej 	/*
   1232      1.21   thorpej 	 * In-line version of pool_sethiwat(), because we don't want to
   1233      1.21   thorpej 	 * release the lock.
   1234      1.20   thorpej 	 */
   1235      1.20   thorpej 	pp->pr_maxpages = (n == 0)
   1236      1.20   thorpej 		? 0
   1237      1.20   thorpej 		: roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
   1238      1.21   thorpej 
   1239      1.21   thorpej 	simple_unlock(&pp->pr_slock);
   1240      1.20   thorpej }
   1241       1.3        pk 
   1242       1.3        pk /*
   1243       1.3        pk  * Default page allocator.
   1244       1.3        pk  */
   1245       1.3        pk static void *
   1246      1.42   thorpej pool_page_alloc(unsigned long sz, int flags, int mtype)
   1247       1.3        pk {
   1248      1.11   thorpej 	boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
   1249       1.3        pk 
   1250      1.11   thorpej 	return ((void *)uvm_km_alloc_poolpage(waitok));
   1251       1.3        pk }
   1252       1.3        pk 
   1253       1.3        pk static void
   1254      1.42   thorpej pool_page_free(void *v, unsigned long sz, int mtype)
   1255       1.3        pk {
   1256       1.3        pk 
   1257      1.10       eeh 	uvm_km_free_poolpage((vaddr_t)v);
   1258       1.3        pk }
   1259      1.12   thorpej 
   1260  1.50.2.4   nathanw #ifdef POOL_SUBPAGE
   1261  1.50.2.4   nathanw /*
   1262  1.50.2.4   nathanw  * Sub-page allocator, for machines with large hardware pages.
   1263  1.50.2.4   nathanw  */
   1264  1.50.2.4   nathanw static void *
   1265  1.50.2.4   nathanw pool_subpage_alloc(unsigned long sz, int flags, int mtype)
   1266  1.50.2.4   nathanw {
   1267  1.50.2.4   nathanw 
   1268  1.50.2.4   nathanw 	return pool_get(&psppool, flags);
   1269  1.50.2.4   nathanw }
   1270  1.50.2.4   nathanw 
   1271  1.50.2.4   nathanw static void
   1272  1.50.2.4   nathanw pool_subpage_free(void *v, unsigned long sz, int mtype)
   1273  1.50.2.4   nathanw {
   1274  1.50.2.4   nathanw 
   1275  1.50.2.4   nathanw 	pool_put(&psppool, v);
   1276  1.50.2.4   nathanw }
   1277  1.50.2.4   nathanw #endif
   1278  1.50.2.4   nathanw 
   1279  1.50.2.4   nathanw #ifdef POOL_SUBPAGE
   1280  1.50.2.4   nathanw /* We don't provide a real nointr allocator.  Maybe later. */
   1281  1.50.2.4   nathanw void *
   1282  1.50.2.4   nathanw pool_page_alloc_nointr(unsigned long sz, int flags, int mtype)
   1283  1.50.2.4   nathanw {
   1284  1.50.2.4   nathanw 
   1285  1.50.2.4   nathanw 	return pool_subpage_alloc(sz, flags, mtype);
   1286  1.50.2.4   nathanw }
   1287  1.50.2.4   nathanw 
   1288  1.50.2.4   nathanw void
   1289  1.50.2.4   nathanw pool_page_free_nointr(void *v, unsigned long sz, int mtype)
   1290  1.50.2.4   nathanw {
   1291  1.50.2.4   nathanw 
   1292  1.50.2.4   nathanw 	pool_subpage_free(v, sz, mtype);
   1293  1.50.2.4   nathanw }
   1294  1.50.2.4   nathanw #else
   1295      1.12   thorpej /*
   1296      1.12   thorpej  * Alternate pool page allocator for pools that know they will
   1297      1.12   thorpej  * never be accessed in interrupt context.
   1298      1.12   thorpej  */
   1299      1.12   thorpej void *
   1300      1.42   thorpej pool_page_alloc_nointr(unsigned long sz, int flags, int mtype)
   1301      1.12   thorpej {
   1302      1.12   thorpej 	boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
   1303      1.12   thorpej 
   1304      1.12   thorpej 	return ((void *)uvm_km_alloc_poolpage1(kernel_map, uvm.kernel_object,
   1305      1.12   thorpej 	    waitok));
   1306      1.12   thorpej }
   1307      1.12   thorpej 
   1308      1.12   thorpej void
   1309      1.42   thorpej pool_page_free_nointr(void *v, unsigned long sz, int mtype)
   1310      1.12   thorpej {
   1311      1.12   thorpej 
   1312      1.12   thorpej 	uvm_km_free_poolpage1(kernel_map, (vaddr_t)v);
   1313      1.12   thorpej }
   1314  1.50.2.4   nathanw #endif
   1315      1.12   thorpej 
   1316       1.3        pk 
   1317       1.3        pk /*
   1318       1.3        pk  * Release all complete pages that have not been used recently.
   1319       1.3        pk  */
   1320       1.3        pk void
   1321  1.50.2.1   nathanw #ifdef POOL_DIAGNOSTIC
   1322      1.42   thorpej _pool_reclaim(struct pool *pp, const char *file, long line)
   1323  1.50.2.1   nathanw #else
   1324  1.50.2.1   nathanw pool_reclaim(struct pool *pp)
   1325  1.50.2.1   nathanw #endif
   1326       1.3        pk {
   1327       1.3        pk 	struct pool_item_header *ph, *phnext;
   1328      1.43   thorpej 	struct pool_cache *pc;
   1329      1.21   thorpej 	struct timeval curtime;
   1330  1.50.2.3   nathanw 	struct pool_pagelist pq;
   1331      1.21   thorpej 	int s;
   1332       1.3        pk 
   1333      1.20   thorpej 	if (pp->pr_roflags & PR_STATIC)
   1334       1.3        pk 		return;
   1335       1.3        pk 
   1336      1.21   thorpej 	if (simple_lock_try(&pp->pr_slock) == 0)
   1337       1.3        pk 		return;
   1338      1.25   thorpej 	pr_enter(pp, file, line);
   1339  1.50.2.3   nathanw 	TAILQ_INIT(&pq);
   1340       1.3        pk 
   1341      1.43   thorpej 	/*
   1342      1.43   thorpej 	 * Reclaim items from the pool's caches.
   1343      1.43   thorpej 	 */
   1344  1.50.2.3   nathanw 	TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist)
   1345      1.43   thorpej 		pool_cache_reclaim(pc);
   1346      1.43   thorpej 
   1347      1.21   thorpej 	s = splclock();
   1348      1.21   thorpej 	curtime = mono_time;
   1349      1.21   thorpej 	splx(s);
   1350      1.21   thorpej 
   1351       1.3        pk 	for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; ph = phnext) {
   1352       1.3        pk 		phnext = TAILQ_NEXT(ph, ph_pagelist);
   1353       1.3        pk 
   1354       1.3        pk 		/* Check our minimum page claim */
   1355       1.3        pk 		if (pp->pr_npages <= pp->pr_minpages)
   1356       1.3        pk 			break;
   1357       1.3        pk 
   1358       1.3        pk 		if (ph->ph_nmissing == 0) {
   1359       1.3        pk 			struct timeval diff;
   1360       1.3        pk 			timersub(&curtime, &ph->ph_time, &diff);
   1361       1.3        pk 			if (diff.tv_sec < pool_inactive_time)
   1362       1.3        pk 				continue;
   1363      1.21   thorpej 
   1364      1.21   thorpej 			/*
   1365      1.21   thorpej 			 * If freeing this page would put us below
   1366      1.21   thorpej 			 * the low water mark, stop now.
   1367      1.21   thorpej 			 */
   1368      1.21   thorpej 			if ((pp->pr_nitems - pp->pr_itemsperpage) <
   1369      1.21   thorpej 			    pp->pr_minitems)
   1370      1.21   thorpej 				break;
   1371      1.21   thorpej 
   1372  1.50.2.3   nathanw 			pr_rmpage(pp, ph, &pq);
   1373       1.3        pk 		}
   1374       1.3        pk 	}
   1375       1.3        pk 
   1376      1.25   thorpej 	pr_leave(pp);
   1377      1.21   thorpej 	simple_unlock(&pp->pr_slock);
   1378  1.50.2.3   nathanw 	if (TAILQ_EMPTY(&pq)) {
   1379  1.50.2.3   nathanw 		return;
   1380  1.50.2.3   nathanw 	}
   1381  1.50.2.3   nathanw 	while ((ph = TAILQ_FIRST(&pq)) != NULL) {
   1382  1.50.2.3   nathanw 		TAILQ_REMOVE(&pq, ph, ph_pagelist);
   1383  1.50.2.3   nathanw 		(*pp->pr_free)(ph->ph_page, pp->pr_pagesz, pp->pr_mtype);
   1384  1.50.2.3   nathanw 		if (pp->pr_roflags & PR_PHINPAGE) {
   1385  1.50.2.3   nathanw 			continue;
   1386  1.50.2.3   nathanw 		}
   1387  1.50.2.3   nathanw 		LIST_REMOVE(ph, ph_hashlist);
   1388  1.50.2.3   nathanw 		s = splhigh();
   1389  1.50.2.3   nathanw 		pool_put(&phpool, ph);
   1390  1.50.2.3   nathanw 		splx(s);
   1391  1.50.2.3   nathanw 	}
   1392       1.3        pk }
   1393       1.3        pk 
   1394       1.3        pk 
   1395       1.3        pk /*
   1396       1.3        pk  * Drain pools, one at a time.
   1397      1.21   thorpej  *
   1398      1.21   thorpej  * Note, we must never be called from an interrupt context.
   1399       1.3        pk  */
   1400       1.3        pk void
   1401      1.42   thorpej pool_drain(void *arg)
   1402       1.3        pk {
   1403       1.3        pk 	struct pool *pp;
   1404      1.23   thorpej 	int s;
   1405       1.3        pk 
   1406  1.50.2.3   nathanw 	pp = NULL;
   1407      1.49   thorpej 	s = splvm();
   1408      1.23   thorpej 	simple_lock(&pool_head_slock);
   1409  1.50.2.3   nathanw 	if (drainpp == NULL) {
   1410  1.50.2.3   nathanw 		drainpp = TAILQ_FIRST(&pool_head);
   1411  1.50.2.3   nathanw 	}
   1412  1.50.2.3   nathanw 	if (drainpp) {
   1413  1.50.2.3   nathanw 		pp = drainpp;
   1414  1.50.2.3   nathanw 		drainpp = TAILQ_NEXT(pp, pr_poollist);
   1415  1.50.2.3   nathanw 	}
   1416      1.23   thorpej 	simple_unlock(&pool_head_slock);
   1417  1.50.2.3   nathanw 	pool_reclaim(pp);
   1418  1.50.2.4   nathanw 	splx(s);
   1419       1.3        pk }
   1420       1.3        pk 
   1421       1.3        pk 
   1422       1.3        pk /*
   1423       1.3        pk  * Diagnostic helpers.
   1424       1.3        pk  */
   1425       1.3        pk void
   1426      1.42   thorpej pool_print(struct pool *pp, const char *modif)
   1427      1.21   thorpej {
   1428      1.21   thorpej 	int s;
   1429      1.21   thorpej 
   1430      1.49   thorpej 	s = splvm();
   1431      1.25   thorpej 	if (simple_lock_try(&pp->pr_slock) == 0) {
   1432      1.25   thorpej 		printf("pool %s is locked; try again later\n",
   1433      1.25   thorpej 		    pp->pr_wchan);
   1434      1.25   thorpej 		splx(s);
   1435      1.25   thorpej 		return;
   1436      1.25   thorpej 	}
   1437      1.25   thorpej 	pool_print1(pp, modif, printf);
   1438      1.21   thorpej 	simple_unlock(&pp->pr_slock);
   1439      1.21   thorpej 	splx(s);
   1440      1.21   thorpej }
   1441      1.21   thorpej 
   1442      1.25   thorpej void
   1443      1.42   thorpej pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
   1444      1.25   thorpej {
   1445      1.25   thorpej 	int didlock = 0;
   1446      1.25   thorpej 
   1447      1.25   thorpej 	if (pp == NULL) {
   1448      1.25   thorpej 		(*pr)("Must specify a pool to print.\n");
   1449      1.25   thorpej 		return;
   1450      1.25   thorpej 	}
   1451      1.25   thorpej 
   1452      1.25   thorpej 	/*
   1453      1.25   thorpej 	 * Called from DDB; interrupts should be blocked, and all
   1454      1.25   thorpej 	 * other processors should be paused.  We can skip locking
   1455      1.25   thorpej 	 * the pool in this case.
   1456      1.25   thorpej 	 *
   1457      1.25   thorpej 	 * We do a simple_lock_try() just to print the lock
   1458      1.25   thorpej 	 * status, however.
   1459      1.25   thorpej 	 */
   1460      1.25   thorpej 
   1461      1.25   thorpej 	if (simple_lock_try(&pp->pr_slock) == 0)
   1462      1.25   thorpej 		(*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
   1463      1.25   thorpej 	else
   1464      1.25   thorpej 		didlock = 1;
   1465      1.25   thorpej 
   1466      1.25   thorpej 	pool_print1(pp, modif, pr);
   1467      1.25   thorpej 
   1468      1.25   thorpej 	if (didlock)
   1469      1.25   thorpej 		simple_unlock(&pp->pr_slock);
   1470      1.25   thorpej }
   1471      1.25   thorpej 
   1472      1.21   thorpej static void
   1473      1.42   thorpej pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
   1474       1.3        pk {
   1475      1.25   thorpej 	struct pool_item_header *ph;
   1476      1.44   thorpej 	struct pool_cache *pc;
   1477      1.44   thorpej 	struct pool_cache_group *pcg;
   1478      1.25   thorpej #ifdef DIAGNOSTIC
   1479      1.25   thorpej 	struct pool_item *pi;
   1480      1.25   thorpej #endif
   1481      1.44   thorpej 	int i, print_log = 0, print_pagelist = 0, print_cache = 0;
   1482      1.25   thorpej 	char c;
   1483      1.25   thorpej 
   1484      1.25   thorpej 	while ((c = *modif++) != '\0') {
   1485      1.25   thorpej 		if (c == 'l')
   1486      1.25   thorpej 			print_log = 1;
   1487      1.25   thorpej 		if (c == 'p')
   1488      1.25   thorpej 			print_pagelist = 1;
   1489      1.44   thorpej 		if (c == 'c')
   1490      1.44   thorpej 			print_cache = 1;
   1491      1.25   thorpej 		modif++;
   1492      1.25   thorpej 	}
   1493      1.25   thorpej 
   1494      1.25   thorpej 	(*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
   1495      1.25   thorpej 	    pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
   1496      1.25   thorpej 	    pp->pr_roflags);
   1497      1.25   thorpej 	(*pr)("\tpagesz %u, mtype %d\n", pp->pr_pagesz, pp->pr_mtype);
   1498      1.25   thorpej 	(*pr)("\talloc %p, release %p\n", pp->pr_alloc, pp->pr_free);
   1499      1.25   thorpej 	(*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
   1500      1.25   thorpej 	    pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
   1501      1.25   thorpej 	(*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
   1502      1.25   thorpej 	    pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
   1503      1.25   thorpej 
   1504      1.25   thorpej 	(*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
   1505      1.25   thorpej 	    pp->pr_nget, pp->pr_nfail, pp->pr_nput);
   1506      1.25   thorpej 	(*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
   1507      1.25   thorpej 	    pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
   1508      1.25   thorpej 
   1509      1.25   thorpej 	if (print_pagelist == 0)
   1510      1.25   thorpej 		goto skip_pagelist;
   1511      1.25   thorpej 
   1512      1.25   thorpej 	if ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL)
   1513      1.25   thorpej 		(*pr)("\n\tpage list:\n");
   1514      1.25   thorpej 	for (; ph != NULL; ph = TAILQ_NEXT(ph, ph_pagelist)) {
   1515      1.25   thorpej 		(*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
   1516      1.25   thorpej 		    ph->ph_page, ph->ph_nmissing,
   1517      1.25   thorpej 		    (u_long)ph->ph_time.tv_sec,
   1518      1.25   thorpej 		    (u_long)ph->ph_time.tv_usec);
   1519      1.25   thorpej #ifdef DIAGNOSTIC
   1520  1.50.2.3   nathanw 		TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) {
   1521      1.25   thorpej 			if (pi->pi_magic != PI_MAGIC) {
   1522      1.25   thorpej 				(*pr)("\t\t\titem %p, magic 0x%x\n",
   1523      1.25   thorpej 				    pi, pi->pi_magic);
   1524      1.25   thorpej 			}
   1525      1.25   thorpej 		}
   1526      1.25   thorpej #endif
   1527      1.25   thorpej 	}
   1528      1.25   thorpej 	if (pp->pr_curpage == NULL)
   1529      1.25   thorpej 		(*pr)("\tno current page\n");
   1530      1.25   thorpej 	else
   1531      1.25   thorpej 		(*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
   1532      1.25   thorpej 
   1533      1.25   thorpej  skip_pagelist:
   1534      1.25   thorpej 
   1535      1.25   thorpej 	if (print_log == 0)
   1536      1.25   thorpej 		goto skip_log;
   1537      1.25   thorpej 
   1538      1.25   thorpej 	(*pr)("\n");
   1539      1.25   thorpej 	if ((pp->pr_roflags & PR_LOGGING) == 0)
   1540      1.25   thorpej 		(*pr)("\tno log\n");
   1541      1.25   thorpej 	else
   1542      1.25   thorpej 		pr_printlog(pp, NULL, pr);
   1543       1.3        pk 
   1544      1.25   thorpej  skip_log:
   1545      1.44   thorpej 
   1546      1.44   thorpej 	if (print_cache == 0)
   1547      1.44   thorpej 		goto skip_cache;
   1548      1.44   thorpej 
   1549  1.50.2.3   nathanw 	TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) {
   1550      1.44   thorpej 		(*pr)("\tcache %p: allocfrom %p freeto %p\n", pc,
   1551      1.44   thorpej 		    pc->pc_allocfrom, pc->pc_freeto);
   1552      1.48   thorpej 		(*pr)("\t    hits %lu misses %lu ngroups %lu nitems %lu\n",
   1553      1.48   thorpej 		    pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
   1554  1.50.2.3   nathanw 		TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
   1555      1.44   thorpej 			(*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);
   1556      1.44   thorpej 			for (i = 0; i < PCG_NOBJECTS; i++)
   1557      1.44   thorpej 				(*pr)("\t\t\t%p\n", pcg->pcg_objects[i]);
   1558      1.44   thorpej 		}
   1559      1.44   thorpej 	}
   1560      1.44   thorpej 
   1561      1.44   thorpej  skip_cache:
   1562       1.3        pk 
   1563      1.25   thorpej 	pr_enter_check(pp, pr);
   1564       1.3        pk }
   1565       1.3        pk 
   1566       1.3        pk int
   1567      1.42   thorpej pool_chk(struct pool *pp, const char *label)
   1568       1.3        pk {
   1569       1.3        pk 	struct pool_item_header *ph;
   1570       1.3        pk 	int r = 0;
   1571       1.3        pk 
   1572      1.21   thorpej 	simple_lock(&pp->pr_slock);
   1573       1.3        pk 
   1574  1.50.2.3   nathanw 	TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) {
   1575       1.3        pk 		struct pool_item *pi;
   1576       1.3        pk 		int n;
   1577       1.3        pk 		caddr_t page;
   1578       1.3        pk 
   1579       1.3        pk 		page = (caddr_t)((u_long)ph & pp->pr_pagemask);
   1580      1.20   thorpej 		if (page != ph->ph_page &&
   1581      1.20   thorpej 		    (pp->pr_roflags & PR_PHINPAGE) != 0) {
   1582       1.3        pk 			if (label != NULL)
   1583       1.3        pk 				printf("%s: ", label);
   1584      1.16    briggs 			printf("pool(%p:%s): page inconsistency: page %p;"
   1585      1.16    briggs 			       " at page head addr %p (p %p)\n", pp,
   1586       1.3        pk 				pp->pr_wchan, ph->ph_page,
   1587       1.3        pk 				ph, page);
   1588       1.3        pk 			r++;
   1589       1.3        pk 			goto out;
   1590       1.3        pk 		}
   1591       1.3        pk 
   1592       1.3        pk 		for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
   1593       1.3        pk 		     pi != NULL;
   1594       1.3        pk 		     pi = TAILQ_NEXT(pi,pi_list), n++) {
   1595       1.3        pk 
   1596       1.3        pk #ifdef DIAGNOSTIC
   1597       1.3        pk 			if (pi->pi_magic != PI_MAGIC) {
   1598       1.3        pk 				if (label != NULL)
   1599       1.3        pk 					printf("%s: ", label);
   1600       1.3        pk 				printf("pool(%s): free list modified: magic=%x;"
   1601       1.3        pk 				       " page %p; item ordinal %d;"
   1602       1.3        pk 				       " addr %p (p %p)\n",
   1603       1.3        pk 					pp->pr_wchan, pi->pi_magic, ph->ph_page,
   1604       1.3        pk 					n, pi, page);
   1605       1.3        pk 				panic("pool");
   1606       1.3        pk 			}
   1607       1.3        pk #endif
   1608       1.3        pk 			page = (caddr_t)((u_long)pi & pp->pr_pagemask);
   1609       1.3        pk 			if (page == ph->ph_page)
   1610       1.3        pk 				continue;
   1611       1.3        pk 
   1612       1.3        pk 			if (label != NULL)
   1613       1.3        pk 				printf("%s: ", label);
   1614      1.16    briggs 			printf("pool(%p:%s): page inconsistency: page %p;"
   1615      1.16    briggs 			       " item ordinal %d; addr %p (p %p)\n", pp,
   1616       1.3        pk 				pp->pr_wchan, ph->ph_page,
   1617       1.3        pk 				n, pi, page);
   1618       1.3        pk 			r++;
   1619       1.3        pk 			goto out;
   1620       1.3        pk 		}
   1621       1.3        pk 	}
   1622       1.3        pk out:
   1623      1.21   thorpej 	simple_unlock(&pp->pr_slock);
   1624       1.3        pk 	return (r);
   1625      1.43   thorpej }
   1626      1.43   thorpej 
   1627      1.43   thorpej /*
   1628      1.43   thorpej  * pool_cache_init:
   1629      1.43   thorpej  *
   1630      1.43   thorpej  *	Initialize a pool cache.
   1631      1.43   thorpej  *
   1632      1.43   thorpej  *	NOTE: If the pool must be protected from interrupts, we expect
   1633      1.43   thorpej  *	to be called at the appropriate interrupt priority level.
   1634      1.43   thorpej  */
   1635      1.43   thorpej void
   1636      1.43   thorpej pool_cache_init(struct pool_cache *pc, struct pool *pp,
   1637      1.43   thorpej     int (*ctor)(void *, void *, int),
   1638      1.43   thorpej     void (*dtor)(void *, void *),
   1639      1.43   thorpej     void *arg)
   1640      1.43   thorpej {
   1641      1.43   thorpej 
   1642      1.43   thorpej 	TAILQ_INIT(&pc->pc_grouplist);
   1643      1.43   thorpej 	simple_lock_init(&pc->pc_slock);
   1644      1.43   thorpej 
   1645      1.43   thorpej 	pc->pc_allocfrom = NULL;
   1646      1.43   thorpej 	pc->pc_freeto = NULL;
   1647      1.43   thorpej 	pc->pc_pool = pp;
   1648      1.43   thorpej 
   1649      1.43   thorpej 	pc->pc_ctor = ctor;
   1650      1.43   thorpej 	pc->pc_dtor = dtor;
   1651      1.43   thorpej 	pc->pc_arg  = arg;
   1652      1.43   thorpej 
   1653      1.48   thorpej 	pc->pc_hits   = 0;
   1654      1.48   thorpej 	pc->pc_misses = 0;
   1655      1.48   thorpej 
   1656      1.48   thorpej 	pc->pc_ngroups = 0;
   1657      1.48   thorpej 
   1658      1.48   thorpej 	pc->pc_nitems = 0;
   1659      1.48   thorpej 
   1660      1.43   thorpej 	simple_lock(&pp->pr_slock);
   1661      1.43   thorpej 	TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist);
   1662      1.43   thorpej 	simple_unlock(&pp->pr_slock);
   1663      1.43   thorpej }
   1664      1.43   thorpej 
   1665      1.43   thorpej /*
   1666      1.43   thorpej  * pool_cache_destroy:
   1667      1.43   thorpej  *
   1668      1.43   thorpej  *	Destroy a pool cache.
   1669      1.43   thorpej  */
   1670      1.43   thorpej void
   1671      1.43   thorpej pool_cache_destroy(struct pool_cache *pc)
   1672      1.43   thorpej {
   1673      1.43   thorpej 	struct pool *pp = pc->pc_pool;
   1674      1.43   thorpej 
   1675      1.43   thorpej 	/* First, invalidate the entire cache. */
   1676      1.43   thorpej 	pool_cache_invalidate(pc);
   1677      1.43   thorpej 
   1678      1.43   thorpej 	/* ...and remove it from the pool's cache list. */
   1679      1.43   thorpej 	simple_lock(&pp->pr_slock);
   1680      1.43   thorpej 	TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist);
   1681      1.43   thorpej 	simple_unlock(&pp->pr_slock);
   1682      1.43   thorpej }
   1683      1.43   thorpej 
   1684      1.43   thorpej static __inline void *
   1685      1.43   thorpej pcg_get(struct pool_cache_group *pcg)
   1686      1.43   thorpej {
   1687      1.43   thorpej 	void *object;
   1688      1.43   thorpej 	u_int idx;
   1689      1.43   thorpej 
   1690      1.43   thorpej 	KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
   1691      1.45   thorpej 	KASSERT(pcg->pcg_avail != 0);
   1692      1.43   thorpej 	idx = --pcg->pcg_avail;
   1693      1.43   thorpej 
   1694      1.43   thorpej 	KASSERT(pcg->pcg_objects[idx] != NULL);
   1695      1.43   thorpej 	object = pcg->pcg_objects[idx];
   1696      1.43   thorpej 	pcg->pcg_objects[idx] = NULL;
   1697      1.43   thorpej 
   1698      1.43   thorpej 	return (object);
   1699      1.43   thorpej }
   1700      1.43   thorpej 
   1701      1.43   thorpej static __inline void
   1702      1.43   thorpej pcg_put(struct pool_cache_group *pcg, void *object)
   1703      1.43   thorpej {
   1704      1.43   thorpej 	u_int idx;
   1705      1.43   thorpej 
   1706      1.43   thorpej 	KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
   1707      1.43   thorpej 	idx = pcg->pcg_avail++;
   1708      1.43   thorpej 
   1709      1.43   thorpej 	KASSERT(pcg->pcg_objects[idx] == NULL);
   1710      1.43   thorpej 	pcg->pcg_objects[idx] = object;
   1711      1.43   thorpej }
   1712      1.43   thorpej 
   1713      1.43   thorpej /*
   1714      1.43   thorpej  * pool_cache_get:
   1715      1.43   thorpej  *
   1716      1.43   thorpej  *	Get an object from a pool cache.
   1717      1.43   thorpej  */
   1718      1.43   thorpej void *
   1719      1.43   thorpej pool_cache_get(struct pool_cache *pc, int flags)
   1720      1.43   thorpej {
   1721      1.43   thorpej 	struct pool_cache_group *pcg;
   1722      1.43   thorpej 	void *object;
   1723      1.43   thorpej 
   1724  1.50.2.1   nathanw #ifdef LOCKDEBUG
   1725  1.50.2.1   nathanw 	if (flags & PR_WAITOK)
   1726  1.50.2.1   nathanw 		simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)");
   1727  1.50.2.1   nathanw #endif
   1728  1.50.2.1   nathanw 
   1729      1.43   thorpej 	simple_lock(&pc->pc_slock);
   1730      1.43   thorpej 
   1731      1.43   thorpej 	if ((pcg = pc->pc_allocfrom) == NULL) {
   1732  1.50.2.3   nathanw 		TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
   1733      1.43   thorpej 			if (pcg->pcg_avail != 0) {
   1734      1.43   thorpej 				pc->pc_allocfrom = pcg;
   1735      1.43   thorpej 				goto have_group;
   1736      1.43   thorpej 			}
   1737      1.43   thorpej 		}
   1738      1.43   thorpej 
   1739      1.43   thorpej 		/*
   1740      1.43   thorpej 		 * No groups with any available objects.  Allocate
   1741      1.43   thorpej 		 * a new object, construct it, and return it to
   1742      1.43   thorpej 		 * the caller.  We will allocate a group, if necessary,
   1743      1.43   thorpej 		 * when the object is freed back to the cache.
   1744      1.43   thorpej 		 */
   1745      1.48   thorpej 		pc->pc_misses++;
   1746      1.43   thorpej 		simple_unlock(&pc->pc_slock);
   1747      1.43   thorpej 		object = pool_get(pc->pc_pool, flags);
   1748      1.43   thorpej 		if (object != NULL && pc->pc_ctor != NULL) {
   1749      1.43   thorpej 			if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
   1750      1.43   thorpej 				pool_put(pc->pc_pool, object);
   1751      1.43   thorpej 				return (NULL);
   1752      1.43   thorpej 			}
   1753      1.43   thorpej 		}
   1754      1.43   thorpej 		return (object);
   1755      1.43   thorpej 	}
   1756      1.43   thorpej 
   1757      1.43   thorpej  have_group:
   1758      1.48   thorpej 	pc->pc_hits++;
   1759      1.48   thorpej 	pc->pc_nitems--;
   1760      1.43   thorpej 	object = pcg_get(pcg);
   1761      1.43   thorpej 
   1762      1.43   thorpej 	if (pcg->pcg_avail == 0)
   1763      1.43   thorpej 		pc->pc_allocfrom = NULL;
   1764      1.45   thorpej 
   1765      1.43   thorpej 	simple_unlock(&pc->pc_slock);
   1766      1.43   thorpej 
   1767      1.43   thorpej 	return (object);
   1768      1.43   thorpej }
   1769      1.43   thorpej 
   1770      1.43   thorpej /*
   1771      1.43   thorpej  * pool_cache_put:
   1772      1.43   thorpej  *
   1773      1.43   thorpej  *	Put an object back to the pool cache.
   1774      1.43   thorpej  */
   1775      1.43   thorpej void
   1776      1.43   thorpej pool_cache_put(struct pool_cache *pc, void *object)
   1777      1.43   thorpej {
   1778      1.43   thorpej 	struct pool_cache_group *pcg;
   1779  1.50.2.2   nathanw 	int s;
   1780      1.43   thorpej 
   1781      1.43   thorpej 	simple_lock(&pc->pc_slock);
   1782      1.43   thorpej 
   1783      1.43   thorpej 	if ((pcg = pc->pc_freeto) == NULL) {
   1784  1.50.2.3   nathanw 		TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
   1785      1.43   thorpej 			if (pcg->pcg_avail != PCG_NOBJECTS) {
   1786      1.43   thorpej 				pc->pc_freeto = pcg;
   1787      1.43   thorpej 				goto have_group;
   1788      1.43   thorpej 			}
   1789      1.43   thorpej 		}
   1790      1.43   thorpej 
   1791      1.43   thorpej 		/*
   1792      1.43   thorpej 		 * No empty groups to free the object to.  Attempt to
   1793      1.47   thorpej 		 * allocate one.
   1794      1.43   thorpej 		 */
   1795      1.47   thorpej 		simple_unlock(&pc->pc_slock);
   1796  1.50.2.2   nathanw 		s = splvm();
   1797      1.43   thorpej 		pcg = pool_get(&pcgpool, PR_NOWAIT);
   1798  1.50.2.2   nathanw 		splx(s);
   1799      1.43   thorpej 		if (pcg != NULL) {
   1800      1.43   thorpej 			memset(pcg, 0, sizeof(*pcg));
   1801      1.47   thorpej 			simple_lock(&pc->pc_slock);
   1802      1.48   thorpej 			pc->pc_ngroups++;
   1803      1.43   thorpej 			TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list);
   1804      1.47   thorpej 			if (pc->pc_freeto == NULL)
   1805      1.47   thorpej 				pc->pc_freeto = pcg;
   1806      1.43   thorpej 			goto have_group;
   1807      1.43   thorpej 		}
   1808      1.43   thorpej 
   1809      1.43   thorpej 		/*
   1810      1.43   thorpej 		 * Unable to allocate a cache group; destruct the object
   1811      1.43   thorpej 		 * and free it back to the pool.
   1812      1.43   thorpej 		 */
   1813  1.50.2.1   nathanw 		pool_cache_destruct_object(pc, object);
   1814      1.43   thorpej 		return;
   1815      1.43   thorpej 	}
   1816      1.43   thorpej 
   1817      1.43   thorpej  have_group:
   1818      1.48   thorpej 	pc->pc_nitems++;
   1819      1.43   thorpej 	pcg_put(pcg, object);
   1820      1.43   thorpej 
   1821      1.43   thorpej 	if (pcg->pcg_avail == PCG_NOBJECTS)
   1822      1.43   thorpej 		pc->pc_freeto = NULL;
   1823      1.43   thorpej 
   1824      1.43   thorpej 	simple_unlock(&pc->pc_slock);
   1825      1.43   thorpej }
   1826      1.43   thorpej 
   1827      1.43   thorpej /*
   1828  1.50.2.1   nathanw  * pool_cache_destruct_object:
   1829  1.50.2.1   nathanw  *
   1830  1.50.2.1   nathanw  *	Force destruction of an object and its release back into
   1831  1.50.2.1   nathanw  *	the pool.
   1832  1.50.2.1   nathanw  */
   1833  1.50.2.1   nathanw void
   1834  1.50.2.1   nathanw pool_cache_destruct_object(struct pool_cache *pc, void *object)
   1835  1.50.2.1   nathanw {
   1836  1.50.2.1   nathanw 
   1837  1.50.2.1   nathanw 	if (pc->pc_dtor != NULL)
   1838  1.50.2.1   nathanw 		(*pc->pc_dtor)(pc->pc_arg, object);
   1839  1.50.2.1   nathanw 	pool_put(pc->pc_pool, object);
   1840  1.50.2.1   nathanw }
   1841  1.50.2.1   nathanw 
   1842  1.50.2.1   nathanw /*
   1843      1.43   thorpej  * pool_cache_do_invalidate:
   1844      1.43   thorpej  *
   1845      1.43   thorpej  *	This internal function implements pool_cache_invalidate() and
   1846      1.43   thorpej  *	pool_cache_reclaim().
   1847      1.43   thorpej  */
   1848      1.43   thorpej static void
   1849      1.43   thorpej pool_cache_do_invalidate(struct pool_cache *pc, int free_groups,
   1850  1.50.2.1   nathanw     void (*putit)(struct pool *, void *))
   1851      1.43   thorpej {
   1852      1.43   thorpej 	struct pool_cache_group *pcg, *npcg;
   1853      1.43   thorpej 	void *object;
   1854  1.50.2.2   nathanw 	int s;
   1855      1.43   thorpej 
   1856      1.43   thorpej 	for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
   1857      1.43   thorpej 	     pcg = npcg) {
   1858      1.43   thorpej 		npcg = TAILQ_NEXT(pcg, pcg_list);
   1859      1.43   thorpej 		while (pcg->pcg_avail != 0) {
   1860      1.48   thorpej 			pc->pc_nitems--;
   1861      1.43   thorpej 			object = pcg_get(pcg);
   1862      1.45   thorpej 			if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg)
   1863      1.45   thorpej 				pc->pc_allocfrom = NULL;
   1864      1.43   thorpej 			if (pc->pc_dtor != NULL)
   1865      1.43   thorpej 				(*pc->pc_dtor)(pc->pc_arg, object);
   1866  1.50.2.1   nathanw 			(*putit)(pc->pc_pool, object);
   1867      1.43   thorpej 		}
   1868      1.43   thorpej 		if (free_groups) {
   1869      1.48   thorpej 			pc->pc_ngroups--;
   1870      1.43   thorpej 			TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list);
   1871      1.46   thorpej 			if (pc->pc_freeto == pcg)
   1872      1.46   thorpej 				pc->pc_freeto = NULL;
   1873  1.50.2.2   nathanw 			s = splvm();
   1874      1.43   thorpej 			pool_put(&pcgpool, pcg);
   1875  1.50.2.2   nathanw 			splx(s);
   1876      1.43   thorpej 		}
   1877      1.43   thorpej 	}
   1878      1.43   thorpej }
   1879      1.43   thorpej 
   1880      1.43   thorpej /*
   1881      1.43   thorpej  * pool_cache_invalidate:
   1882      1.43   thorpej  *
   1883      1.43   thorpej  *	Invalidate a pool cache (destruct and release all of the
   1884      1.43   thorpej  *	cached objects).
   1885      1.43   thorpej  */
   1886      1.43   thorpej void
   1887      1.43   thorpej pool_cache_invalidate(struct pool_cache *pc)
   1888      1.43   thorpej {
   1889      1.43   thorpej 
   1890      1.43   thorpej 	simple_lock(&pc->pc_slock);
   1891  1.50.2.1   nathanw 	pool_cache_do_invalidate(pc, 0, pool_put);
   1892      1.43   thorpej 	simple_unlock(&pc->pc_slock);
   1893      1.43   thorpej }
   1894      1.43   thorpej 
   1895      1.43   thorpej /*
   1896      1.43   thorpej  * pool_cache_reclaim:
   1897      1.43   thorpej  *
   1898      1.43   thorpej  *	Reclaim a pool cache for pool_reclaim().
   1899      1.43   thorpej  */
   1900      1.43   thorpej static void
   1901      1.43   thorpej pool_cache_reclaim(struct pool_cache *pc)
   1902      1.43   thorpej {
   1903      1.43   thorpej 
   1904      1.47   thorpej 	simple_lock(&pc->pc_slock);
   1905      1.43   thorpej 	pool_cache_do_invalidate(pc, 1, pool_do_put);
   1906      1.43   thorpej 	simple_unlock(&pc->pc_slock);
   1907       1.3        pk }
   1908