subr_pool.c revision 1.58 1 1.58 thorpej /* $NetBSD: subr_pool.c,v 1.58 2001/06/05 04:40:39 thorpej Exp $ */
2 1.1 pk
3 1.1 pk /*-
4 1.43 thorpej * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
5 1.1 pk * All rights reserved.
6 1.1 pk *
7 1.1 pk * This code is derived from software contributed to The NetBSD Foundation
8 1.20 thorpej * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
9 1.20 thorpej * Simulation Facility, NASA Ames Research Center.
10 1.1 pk *
11 1.1 pk * Redistribution and use in source and binary forms, with or without
12 1.1 pk * modification, are permitted provided that the following conditions
13 1.1 pk * are met:
14 1.1 pk * 1. Redistributions of source code must retain the above copyright
15 1.1 pk * notice, this list of conditions and the following disclaimer.
16 1.1 pk * 2. Redistributions in binary form must reproduce the above copyright
17 1.1 pk * notice, this list of conditions and the following disclaimer in the
18 1.1 pk * documentation and/or other materials provided with the distribution.
19 1.1 pk * 3. All advertising materials mentioning features or use of this software
20 1.1 pk * must display the following acknowledgement:
21 1.13 christos * This product includes software developed by the NetBSD
22 1.13 christos * Foundation, Inc. and its contributors.
23 1.1 pk * 4. Neither the name of The NetBSD Foundation nor the names of its
24 1.1 pk * contributors may be used to endorse or promote products derived
25 1.1 pk * from this software without specific prior written permission.
26 1.1 pk *
27 1.1 pk * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 1.1 pk * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 1.1 pk * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 1.1 pk * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 1.1 pk * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 1.1 pk * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 1.1 pk * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 1.1 pk * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 1.1 pk * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 1.1 pk * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 1.1 pk * POSSIBILITY OF SUCH DAMAGE.
38 1.1 pk */
39 1.24 scottr
40 1.25 thorpej #include "opt_pool.h"
41 1.24 scottr #include "opt_poollog.h"
42 1.28 thorpej #include "opt_lockdebug.h"
43 1.1 pk
44 1.1 pk #include <sys/param.h>
45 1.1 pk #include <sys/systm.h>
46 1.1 pk #include <sys/proc.h>
47 1.1 pk #include <sys/errno.h>
48 1.1 pk #include <sys/kernel.h>
49 1.1 pk #include <sys/malloc.h>
50 1.1 pk #include <sys/lock.h>
51 1.1 pk #include <sys/pool.h>
52 1.20 thorpej #include <sys/syslog.h>
53 1.3 pk
54 1.3 pk #include <uvm/uvm.h>
55 1.3 pk
56 1.1 pk /*
57 1.1 pk * Pool resource management utility.
58 1.3 pk *
59 1.3 pk * Memory is allocated in pages which are split into pieces according
60 1.3 pk * to the pool item size. Each page is kept on a list headed by `pr_pagelist'
61 1.3 pk * in the pool structure and the individual pool items are on a linked list
62 1.3 pk * headed by `ph_itemlist' in each page header. The memory for building
63 1.3 pk * the page list is either taken from the allocated pages themselves (for
64 1.3 pk * small pool items) or taken from an internal pool of page headers (`phpool').
65 1.1 pk */
66 1.1 pk
67 1.3 pk /* List of all pools */
68 1.5 thorpej TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
69 1.3 pk
70 1.3 pk /* Private pool for page header structures */
71 1.3 pk static struct pool phpool;
72 1.3 pk
73 1.3 pk /* # of seconds to retain page after last use */
74 1.3 pk int pool_inactive_time = 10;
75 1.3 pk
76 1.3 pk /* Next candidate for drainage (see pool_drain()) */
77 1.23 thorpej static struct pool *drainpp;
78 1.23 thorpej
79 1.23 thorpej /* This spin lock protects both pool_head and drainpp. */
80 1.23 thorpej struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
81 1.3 pk
82 1.3 pk struct pool_item_header {
83 1.3 pk /* Page headers */
84 1.3 pk TAILQ_ENTRY(pool_item_header)
85 1.3 pk ph_pagelist; /* pool page list */
86 1.3 pk TAILQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */
87 1.3 pk LIST_ENTRY(pool_item_header)
88 1.3 pk ph_hashlist; /* Off-page page headers */
89 1.3 pk int ph_nmissing; /* # of chunks in use */
90 1.3 pk caddr_t ph_page; /* this page's address */
91 1.3 pk struct timeval ph_time; /* last referenced */
92 1.3 pk };
93 1.3 pk
94 1.1 pk struct pool_item {
95 1.3 pk #ifdef DIAGNOSTIC
96 1.3 pk int pi_magic;
97 1.33 chs #endif
98 1.25 thorpej #define PI_MAGIC 0xdeadbeef
99 1.3 pk /* Other entries use only this list entry */
100 1.3 pk TAILQ_ENTRY(pool_item) pi_list;
101 1.3 pk };
102 1.3 pk
103 1.25 thorpej #define PR_HASH_INDEX(pp,addr) \
104 1.3 pk (((u_long)(addr) >> (pp)->pr_pageshift) & (PR_HASHTABSIZE - 1))
105 1.3 pk
106 1.53 thorpej #define POOL_NEEDS_CATCHUP(pp) \
107 1.53 thorpej ((pp)->pr_nitems < (pp)->pr_minitems)
108 1.53 thorpej
109 1.43 thorpej /*
110 1.43 thorpej * Pool cache management.
111 1.43 thorpej *
112 1.43 thorpej * Pool caches provide a way for constructed objects to be cached by the
113 1.43 thorpej * pool subsystem. This can lead to performance improvements by avoiding
114 1.43 thorpej * needless object construction/destruction; it is deferred until absolutely
115 1.43 thorpej * necessary.
116 1.43 thorpej *
117 1.43 thorpej * Caches are grouped into cache groups. Each cache group references
118 1.43 thorpej * up to 16 constructed objects. When a cache allocates an object
119 1.43 thorpej * from the pool, it calls the object's constructor and places it into
120 1.43 thorpej * a cache group. When a cache group frees an object back to the pool,
121 1.43 thorpej * it first calls the object's destructor. This allows the object to
122 1.43 thorpej * persist in constructed form while freed to the cache.
123 1.43 thorpej *
124 1.43 thorpej * Multiple caches may exist for each pool. This allows a single
125 1.43 thorpej * object type to have multiple constructed forms. The pool references
126 1.43 thorpej * each cache, so that when a pool is drained by the pagedaemon, it can
127 1.43 thorpej * drain each individual cache as well. Each time a cache is drained,
128 1.43 thorpej * the most idle cache group is freed to the pool in its entirety.
129 1.43 thorpej *
130 1.43 thorpej * Pool caches are layed on top of pools. By layering them, we can avoid
131 1.43 thorpej * the complexity of cache management for pools which would not benefit
132 1.43 thorpej * from it.
133 1.43 thorpej */
134 1.43 thorpej
135 1.43 thorpej /* The cache group pool. */
136 1.43 thorpej static struct pool pcgpool;
137 1.43 thorpej
138 1.43 thorpej /* The pool cache group. */
139 1.43 thorpej #define PCG_NOBJECTS 16
140 1.43 thorpej struct pool_cache_group {
141 1.43 thorpej TAILQ_ENTRY(pool_cache_group)
142 1.43 thorpej pcg_list; /* link in the pool cache's group list */
143 1.43 thorpej u_int pcg_avail; /* # available objects */
144 1.43 thorpej /* pointers to the objects */
145 1.43 thorpej void *pcg_objects[PCG_NOBJECTS];
146 1.43 thorpej };
147 1.3 pk
148 1.43 thorpej static void pool_cache_reclaim(struct pool_cache *);
149 1.3 pk
150 1.42 thorpej static int pool_catchup(struct pool *);
151 1.55 thorpej static void pool_prime_page(struct pool *, caddr_t,
152 1.55 thorpej struct pool_item_header *);
153 1.42 thorpej static void *pool_page_alloc(unsigned long, int, int);
154 1.42 thorpej static void pool_page_free(void *, unsigned long, int);
155 1.3 pk
156 1.42 thorpej static void pool_print1(struct pool *, const char *,
157 1.42 thorpej void (*)(const char *, ...));
158 1.3 pk
159 1.3 pk /*
160 1.52 thorpej * Pool log entry. An array of these is allocated in pool_init().
161 1.3 pk */
162 1.3 pk struct pool_log {
163 1.3 pk const char *pl_file;
164 1.3 pk long pl_line;
165 1.3 pk int pl_action;
166 1.25 thorpej #define PRLOG_GET 1
167 1.25 thorpej #define PRLOG_PUT 2
168 1.3 pk void *pl_addr;
169 1.1 pk };
170 1.1 pk
171 1.3 pk /* Number of entries in pool log buffers */
172 1.17 thorpej #ifndef POOL_LOGSIZE
173 1.17 thorpej #define POOL_LOGSIZE 10
174 1.17 thorpej #endif
175 1.17 thorpej
176 1.17 thorpej int pool_logsize = POOL_LOGSIZE;
177 1.1 pk
178 1.25 thorpej #ifdef DIAGNOSTIC
179 1.42 thorpej static __inline void
180 1.42 thorpej pr_log(struct pool *pp, void *v, int action, const char *file, long line)
181 1.3 pk {
182 1.3 pk int n = pp->pr_curlogentry;
183 1.3 pk struct pool_log *pl;
184 1.3 pk
185 1.20 thorpej if ((pp->pr_roflags & PR_LOGGING) == 0)
186 1.3 pk return;
187 1.3 pk
188 1.3 pk /*
189 1.3 pk * Fill in the current entry. Wrap around and overwrite
190 1.3 pk * the oldest entry if necessary.
191 1.3 pk */
192 1.3 pk pl = &pp->pr_log[n];
193 1.3 pk pl->pl_file = file;
194 1.3 pk pl->pl_line = line;
195 1.3 pk pl->pl_action = action;
196 1.3 pk pl->pl_addr = v;
197 1.3 pk if (++n >= pp->pr_logsize)
198 1.3 pk n = 0;
199 1.3 pk pp->pr_curlogentry = n;
200 1.3 pk }
201 1.3 pk
202 1.3 pk static void
203 1.42 thorpej pr_printlog(struct pool *pp, struct pool_item *pi,
204 1.42 thorpej void (*pr)(const char *, ...))
205 1.3 pk {
206 1.3 pk int i = pp->pr_logsize;
207 1.3 pk int n = pp->pr_curlogentry;
208 1.3 pk
209 1.20 thorpej if ((pp->pr_roflags & PR_LOGGING) == 0)
210 1.3 pk return;
211 1.3 pk
212 1.3 pk /*
213 1.3 pk * Print all entries in this pool's log.
214 1.3 pk */
215 1.3 pk while (i-- > 0) {
216 1.3 pk struct pool_log *pl = &pp->pr_log[n];
217 1.3 pk if (pl->pl_action != 0) {
218 1.25 thorpej if (pi == NULL || pi == pl->pl_addr) {
219 1.25 thorpej (*pr)("\tlog entry %d:\n", i);
220 1.25 thorpej (*pr)("\t\taction = %s, addr = %p\n",
221 1.25 thorpej pl->pl_action == PRLOG_GET ? "get" : "put",
222 1.25 thorpej pl->pl_addr);
223 1.25 thorpej (*pr)("\t\tfile: %s at line %lu\n",
224 1.25 thorpej pl->pl_file, pl->pl_line);
225 1.25 thorpej }
226 1.3 pk }
227 1.3 pk if (++n >= pp->pr_logsize)
228 1.3 pk n = 0;
229 1.3 pk }
230 1.3 pk }
231 1.25 thorpej
232 1.42 thorpej static __inline void
233 1.42 thorpej pr_enter(struct pool *pp, const char *file, long line)
234 1.25 thorpej {
235 1.25 thorpej
236 1.34 thorpej if (__predict_false(pp->pr_entered_file != NULL)) {
237 1.25 thorpej printf("pool %s: reentrancy at file %s line %ld\n",
238 1.25 thorpej pp->pr_wchan, file, line);
239 1.25 thorpej printf(" previous entry at file %s line %ld\n",
240 1.25 thorpej pp->pr_entered_file, pp->pr_entered_line);
241 1.25 thorpej panic("pr_enter");
242 1.25 thorpej }
243 1.25 thorpej
244 1.25 thorpej pp->pr_entered_file = file;
245 1.25 thorpej pp->pr_entered_line = line;
246 1.25 thorpej }
247 1.25 thorpej
248 1.42 thorpej static __inline void
249 1.42 thorpej pr_leave(struct pool *pp)
250 1.25 thorpej {
251 1.25 thorpej
252 1.34 thorpej if (__predict_false(pp->pr_entered_file == NULL)) {
253 1.25 thorpej printf("pool %s not entered?\n", pp->pr_wchan);
254 1.25 thorpej panic("pr_leave");
255 1.25 thorpej }
256 1.25 thorpej
257 1.25 thorpej pp->pr_entered_file = NULL;
258 1.25 thorpej pp->pr_entered_line = 0;
259 1.25 thorpej }
260 1.25 thorpej
261 1.42 thorpej static __inline void
262 1.42 thorpej pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
263 1.25 thorpej {
264 1.25 thorpej
265 1.25 thorpej if (pp->pr_entered_file != NULL)
266 1.25 thorpej (*pr)("\n\tcurrently entered from file %s line %ld\n",
267 1.25 thorpej pp->pr_entered_file, pp->pr_entered_line);
268 1.25 thorpej }
269 1.3 pk #else
270 1.25 thorpej #define pr_log(pp, v, action, file, line)
271 1.25 thorpej #define pr_printlog(pp, pi, pr)
272 1.25 thorpej #define pr_enter(pp, file, line)
273 1.25 thorpej #define pr_leave(pp)
274 1.25 thorpej #define pr_enter_check(pp, pr)
275 1.25 thorpej #endif /* DIAGNOSTIC */
276 1.3 pk
277 1.3 pk /*
278 1.3 pk * Return the pool page header based on page address.
279 1.3 pk */
280 1.42 thorpej static __inline struct pool_item_header *
281 1.42 thorpej pr_find_pagehead(struct pool *pp, caddr_t page)
282 1.3 pk {
283 1.3 pk struct pool_item_header *ph;
284 1.3 pk
285 1.20 thorpej if ((pp->pr_roflags & PR_PHINPAGE) != 0)
286 1.3 pk return ((struct pool_item_header *)(page + pp->pr_phoffset));
287 1.3 pk
288 1.3 pk for (ph = LIST_FIRST(&pp->pr_hashtab[PR_HASH_INDEX(pp, page)]);
289 1.3 pk ph != NULL;
290 1.3 pk ph = LIST_NEXT(ph, ph_hashlist)) {
291 1.3 pk if (ph->ph_page == page)
292 1.3 pk return (ph);
293 1.3 pk }
294 1.3 pk return (NULL);
295 1.3 pk }
296 1.3 pk
297 1.3 pk /*
298 1.3 pk * Remove a page from the pool.
299 1.3 pk */
300 1.42 thorpej static __inline void
301 1.42 thorpej pr_rmpage(struct pool *pp, struct pool_item_header *ph)
302 1.3 pk {
303 1.3 pk
304 1.3 pk /*
305 1.7 thorpej * If the page was idle, decrement the idle page count.
306 1.3 pk */
307 1.6 thorpej if (ph->ph_nmissing == 0) {
308 1.6 thorpej #ifdef DIAGNOSTIC
309 1.6 thorpej if (pp->pr_nidle == 0)
310 1.6 thorpej panic("pr_rmpage: nidle inconsistent");
311 1.20 thorpej if (pp->pr_nitems < pp->pr_itemsperpage)
312 1.20 thorpej panic("pr_rmpage: nitems inconsistent");
313 1.6 thorpej #endif
314 1.6 thorpej pp->pr_nidle--;
315 1.6 thorpej }
316 1.7 thorpej
317 1.20 thorpej pp->pr_nitems -= pp->pr_itemsperpage;
318 1.20 thorpej
319 1.7 thorpej /*
320 1.7 thorpej * Unlink a page from the pool and release it.
321 1.7 thorpej */
322 1.7 thorpej TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
323 1.7 thorpej (*pp->pr_free)(ph->ph_page, pp->pr_pagesz, pp->pr_mtype);
324 1.7 thorpej pp->pr_npages--;
325 1.7 thorpej pp->pr_npagefree++;
326 1.6 thorpej
327 1.22 chs if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
328 1.27 pk int s;
329 1.22 chs LIST_REMOVE(ph, ph_hashlist);
330 1.27 pk s = splhigh();
331 1.22 chs pool_put(&phpool, ph);
332 1.27 pk splx(s);
333 1.22 chs }
334 1.22 chs
335 1.3 pk if (pp->pr_curpage == ph) {
336 1.3 pk /*
337 1.3 pk * Find a new non-empty page header, if any.
338 1.3 pk * Start search from the page head, to increase the
339 1.3 pk * chance for "high water" pages to be freed.
340 1.3 pk */
341 1.3 pk for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
342 1.3 pk ph = TAILQ_NEXT(ph, ph_pagelist))
343 1.3 pk if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
344 1.3 pk break;
345 1.3 pk
346 1.3 pk pp->pr_curpage = ph;
347 1.21 thorpej }
348 1.3 pk }
349 1.3 pk
350 1.3 pk /*
351 1.3 pk * Initialize the given pool resource structure.
352 1.3 pk *
353 1.3 pk * We export this routine to allow other kernel parts to declare
354 1.3 pk * static pools that must be initialized before malloc() is available.
355 1.3 pk */
356 1.3 pk void
357 1.42 thorpej pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
358 1.42 thorpej const char *wchan, size_t pagesz,
359 1.42 thorpej void *(*alloc)(unsigned long, int, int),
360 1.42 thorpej void (*release)(void *, unsigned long, int),
361 1.42 thorpej int mtype)
362 1.3 pk {
363 1.16 briggs int off, slack, i;
364 1.3 pk
365 1.25 thorpej #ifdef POOL_DIAGNOSTIC
366 1.25 thorpej /*
367 1.25 thorpej * Always log if POOL_DIAGNOSTIC is defined.
368 1.25 thorpej */
369 1.25 thorpej if (pool_logsize != 0)
370 1.25 thorpej flags |= PR_LOGGING;
371 1.25 thorpej #endif
372 1.25 thorpej
373 1.3 pk /*
374 1.3 pk * Check arguments and construct default values.
375 1.3 pk */
376 1.36 pk if (!powerof2(pagesz))
377 1.3 pk panic("pool_init: page size invalid (%lx)\n", (u_long)pagesz);
378 1.3 pk
379 1.4 thorpej if (alloc == NULL && release == NULL) {
380 1.3 pk alloc = pool_page_alloc;
381 1.3 pk release = pool_page_free;
382 1.4 thorpej pagesz = PAGE_SIZE; /* Rounds to PAGE_SIZE anyhow. */
383 1.4 thorpej } else if ((alloc != NULL && release != NULL) == 0) {
384 1.4 thorpej /* If you specifiy one, must specify both. */
385 1.4 thorpej panic("pool_init: must specify alloc and release together");
386 1.4 thorpej }
387 1.4 thorpej
388 1.3 pk if (pagesz == 0)
389 1.3 pk pagesz = PAGE_SIZE;
390 1.3 pk
391 1.3 pk if (align == 0)
392 1.3 pk align = ALIGN(1);
393 1.14 thorpej
394 1.14 thorpej if (size < sizeof(struct pool_item))
395 1.14 thorpej size = sizeof(struct pool_item);
396 1.3 pk
397 1.35 pk size = ALIGN(size);
398 1.43 thorpej if (size > pagesz)
399 1.35 pk panic("pool_init: pool item size (%lu) too large",
400 1.35 pk (u_long)size);
401 1.35 pk
402 1.3 pk /*
403 1.3 pk * Initialize the pool structure.
404 1.3 pk */
405 1.3 pk TAILQ_INIT(&pp->pr_pagelist);
406 1.43 thorpej TAILQ_INIT(&pp->pr_cachelist);
407 1.3 pk pp->pr_curpage = NULL;
408 1.3 pk pp->pr_npages = 0;
409 1.3 pk pp->pr_minitems = 0;
410 1.3 pk pp->pr_minpages = 0;
411 1.3 pk pp->pr_maxpages = UINT_MAX;
412 1.20 thorpej pp->pr_roflags = flags;
413 1.20 thorpej pp->pr_flags = 0;
414 1.35 pk pp->pr_size = size;
415 1.3 pk pp->pr_align = align;
416 1.3 pk pp->pr_wchan = wchan;
417 1.3 pk pp->pr_mtype = mtype;
418 1.3 pk pp->pr_alloc = alloc;
419 1.3 pk pp->pr_free = release;
420 1.3 pk pp->pr_pagesz = pagesz;
421 1.3 pk pp->pr_pagemask = ~(pagesz - 1);
422 1.3 pk pp->pr_pageshift = ffs(pagesz) - 1;
423 1.20 thorpej pp->pr_nitems = 0;
424 1.20 thorpej pp->pr_nout = 0;
425 1.20 thorpej pp->pr_hardlimit = UINT_MAX;
426 1.20 thorpej pp->pr_hardlimit_warning = NULL;
427 1.31 thorpej pp->pr_hardlimit_ratecap.tv_sec = 0;
428 1.31 thorpej pp->pr_hardlimit_ratecap.tv_usec = 0;
429 1.31 thorpej pp->pr_hardlimit_warning_last.tv_sec = 0;
430 1.31 thorpej pp->pr_hardlimit_warning_last.tv_usec = 0;
431 1.3 pk
432 1.3 pk /*
433 1.3 pk * Decide whether to put the page header off page to avoid
434 1.3 pk * wasting too large a part of the page. Off-page page headers
435 1.3 pk * go on a hash table, so we can match a returned item
436 1.3 pk * with its header based on the page address.
437 1.3 pk * We use 1/16 of the page size as the threshold (XXX: tune)
438 1.3 pk */
439 1.3 pk if (pp->pr_size < pagesz/16) {
440 1.3 pk /* Use the end of the page for the page header */
441 1.20 thorpej pp->pr_roflags |= PR_PHINPAGE;
442 1.3 pk pp->pr_phoffset = off =
443 1.3 pk pagesz - ALIGN(sizeof(struct pool_item_header));
444 1.2 pk } else {
445 1.3 pk /* The page header will be taken from our page header pool */
446 1.3 pk pp->pr_phoffset = 0;
447 1.3 pk off = pagesz;
448 1.16 briggs for (i = 0; i < PR_HASHTABSIZE; i++) {
449 1.16 briggs LIST_INIT(&pp->pr_hashtab[i]);
450 1.16 briggs }
451 1.2 pk }
452 1.1 pk
453 1.3 pk /*
454 1.3 pk * Alignment is to take place at `ioff' within the item. This means
455 1.3 pk * we must reserve up to `align - 1' bytes on the page to allow
456 1.3 pk * appropriate positioning of each item.
457 1.3 pk *
458 1.3 pk * Silently enforce `0 <= ioff < align'.
459 1.3 pk */
460 1.3 pk pp->pr_itemoffset = ioff = ioff % align;
461 1.3 pk pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
462 1.43 thorpej KASSERT(pp->pr_itemsperpage != 0);
463 1.3 pk
464 1.3 pk /*
465 1.3 pk * Use the slack between the chunks and the page header
466 1.3 pk * for "cache coloring".
467 1.3 pk */
468 1.3 pk slack = off - pp->pr_itemsperpage * pp->pr_size;
469 1.3 pk pp->pr_maxcolor = (slack / align) * align;
470 1.3 pk pp->pr_curcolor = 0;
471 1.3 pk
472 1.3 pk pp->pr_nget = 0;
473 1.3 pk pp->pr_nfail = 0;
474 1.3 pk pp->pr_nput = 0;
475 1.3 pk pp->pr_npagealloc = 0;
476 1.3 pk pp->pr_npagefree = 0;
477 1.1 pk pp->pr_hiwat = 0;
478 1.8 thorpej pp->pr_nidle = 0;
479 1.3 pk
480 1.25 thorpej if (flags & PR_LOGGING) {
481 1.25 thorpej if (kmem_map == NULL ||
482 1.25 thorpej (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
483 1.25 thorpej M_TEMP, M_NOWAIT)) == NULL)
484 1.20 thorpej pp->pr_roflags &= ~PR_LOGGING;
485 1.3 pk pp->pr_curlogentry = 0;
486 1.3 pk pp->pr_logsize = pool_logsize;
487 1.3 pk }
488 1.25 thorpej
489 1.25 thorpej pp->pr_entered_file = NULL;
490 1.25 thorpej pp->pr_entered_line = 0;
491 1.3 pk
492 1.21 thorpej simple_lock_init(&pp->pr_slock);
493 1.1 pk
494 1.3 pk /*
495 1.43 thorpej * Initialize private page header pool and cache magazine pool if we
496 1.43 thorpej * haven't done so yet.
497 1.23 thorpej * XXX LOCKING.
498 1.3 pk */
499 1.3 pk if (phpool.pr_size == 0) {
500 1.3 pk pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,
501 1.43 thorpej 0, "phpool", 0, 0, 0, 0);
502 1.43 thorpej pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
503 1.43 thorpej 0, "pcgpool", 0, 0, 0, 0);
504 1.1 pk }
505 1.1 pk
506 1.23 thorpej /* Insert into the list of all pools. */
507 1.23 thorpej simple_lock(&pool_head_slock);
508 1.23 thorpej TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
509 1.23 thorpej simple_unlock(&pool_head_slock);
510 1.1 pk }
511 1.1 pk
512 1.1 pk /*
513 1.1 pk * De-commision a pool resource.
514 1.1 pk */
515 1.1 pk void
516 1.42 thorpej pool_destroy(struct pool *pp)
517 1.1 pk {
518 1.3 pk struct pool_item_header *ph;
519 1.43 thorpej struct pool_cache *pc;
520 1.43 thorpej
521 1.43 thorpej /* Destroy all caches for this pool. */
522 1.43 thorpej while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL)
523 1.43 thorpej pool_cache_destroy(pc);
524 1.3 pk
525 1.3 pk #ifdef DIAGNOSTIC
526 1.20 thorpej if (pp->pr_nout != 0) {
527 1.25 thorpej pr_printlog(pp, NULL, printf);
528 1.20 thorpej panic("pool_destroy: pool busy: still out: %u\n",
529 1.20 thorpej pp->pr_nout);
530 1.3 pk }
531 1.3 pk #endif
532 1.1 pk
533 1.3 pk /* Remove all pages */
534 1.20 thorpej if ((pp->pr_roflags & PR_STATIC) == 0)
535 1.3 pk while ((ph = pp->pr_pagelist.tqh_first) != NULL)
536 1.3 pk pr_rmpage(pp, ph);
537 1.3 pk
538 1.3 pk /* Remove from global pool list */
539 1.23 thorpej simple_lock(&pool_head_slock);
540 1.3 pk TAILQ_REMOVE(&pool_head, pp, pr_poollist);
541 1.23 thorpej /* XXX Only clear this if we were drainpp? */
542 1.3 pk drainpp = NULL;
543 1.23 thorpej simple_unlock(&pool_head_slock);
544 1.3 pk
545 1.20 thorpej if ((pp->pr_roflags & PR_LOGGING) != 0)
546 1.3 pk free(pp->pr_log, M_TEMP);
547 1.2 pk
548 1.20 thorpej if (pp->pr_roflags & PR_FREEHEADER)
549 1.3 pk free(pp, M_POOL);
550 1.1 pk }
551 1.1 pk
552 1.55 thorpej static __inline struct pool_item_header *
553 1.55 thorpej pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags)
554 1.55 thorpej {
555 1.55 thorpej struct pool_item_header *ph;
556 1.55 thorpej int s;
557 1.55 thorpej
558 1.55 thorpej LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0);
559 1.55 thorpej
560 1.55 thorpej if ((pp->pr_roflags & PR_PHINPAGE) != 0)
561 1.55 thorpej ph = (struct pool_item_header *) (storage + pp->pr_phoffset);
562 1.55 thorpej else {
563 1.55 thorpej s = splhigh();
564 1.55 thorpej ph = pool_get(&phpool, flags);
565 1.55 thorpej splx(s);
566 1.55 thorpej }
567 1.55 thorpej
568 1.55 thorpej return (ph);
569 1.55 thorpej }
570 1.1 pk
571 1.1 pk /*
572 1.3 pk * Grab an item from the pool; must be called at appropriate spl level
573 1.1 pk */
574 1.3 pk void *
575 1.56 sommerfe #ifdef DIAGNOSTIC
576 1.42 thorpej _pool_get(struct pool *pp, int flags, const char *file, long line)
577 1.56 sommerfe #else
578 1.56 sommerfe pool_get(struct pool *pp, int flags)
579 1.56 sommerfe #endif
580 1.1 pk {
581 1.1 pk struct pool_item *pi;
582 1.3 pk struct pool_item_header *ph;
583 1.55 thorpej void *v;
584 1.1 pk
585 1.2 pk #ifdef DIAGNOSTIC
586 1.34 thorpej if (__predict_false((pp->pr_roflags & PR_STATIC) &&
587 1.34 thorpej (flags & PR_MALLOCOK))) {
588 1.25 thorpej pr_printlog(pp, NULL, printf);
589 1.2 pk panic("pool_get: static");
590 1.3 pk }
591 1.2 pk
592 1.37 sommerfe if (__predict_false(curproc == NULL && doing_shutdown == 0 &&
593 1.37 sommerfe (flags & PR_WAITOK) != 0))
594 1.3 pk panic("pool_get: must have NOWAIT");
595 1.58 thorpej
596 1.58 thorpej #ifdef LOCKDEBUG
597 1.58 thorpej if (flags & PR_WAITOK)
598 1.58 thorpej simple_lock_only_held(NULL, "pool_get(PR_WAITOK)");
599 1.56 sommerfe #endif
600 1.58 thorpej #endif /* DIAGNOSTIC */
601 1.1 pk
602 1.21 thorpej simple_lock(&pp->pr_slock);
603 1.25 thorpej pr_enter(pp, file, line);
604 1.20 thorpej
605 1.20 thorpej startover:
606 1.20 thorpej /*
607 1.20 thorpej * Check to see if we've reached the hard limit. If we have,
608 1.20 thorpej * and we can wait, then wait until an item has been returned to
609 1.20 thorpej * the pool.
610 1.20 thorpej */
611 1.20 thorpej #ifdef DIAGNOSTIC
612 1.34 thorpej if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
613 1.25 thorpej pr_leave(pp);
614 1.21 thorpej simple_unlock(&pp->pr_slock);
615 1.20 thorpej panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
616 1.20 thorpej }
617 1.20 thorpej #endif
618 1.34 thorpej if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
619 1.29 sommerfe if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
620 1.20 thorpej /*
621 1.20 thorpej * XXX: A warning isn't logged in this case. Should
622 1.20 thorpej * it be?
623 1.20 thorpej */
624 1.20 thorpej pp->pr_flags |= PR_WANTED;
625 1.25 thorpej pr_leave(pp);
626 1.40 sommerfe ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
627 1.25 thorpej pr_enter(pp, file, line);
628 1.20 thorpej goto startover;
629 1.20 thorpej }
630 1.31 thorpej
631 1.31 thorpej /*
632 1.31 thorpej * Log a message that the hard limit has been hit.
633 1.31 thorpej */
634 1.31 thorpej if (pp->pr_hardlimit_warning != NULL &&
635 1.31 thorpej ratecheck(&pp->pr_hardlimit_warning_last,
636 1.31 thorpej &pp->pr_hardlimit_ratecap))
637 1.31 thorpej log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
638 1.21 thorpej
639 1.21 thorpej if (flags & PR_URGENT)
640 1.21 thorpej panic("pool_get: urgent");
641 1.21 thorpej
642 1.21 thorpej pp->pr_nfail++;
643 1.21 thorpej
644 1.25 thorpej pr_leave(pp);
645 1.21 thorpej simple_unlock(&pp->pr_slock);
646 1.20 thorpej return (NULL);
647 1.20 thorpej }
648 1.20 thorpej
649 1.3 pk /*
650 1.3 pk * The convention we use is that if `curpage' is not NULL, then
651 1.3 pk * it points at a non-empty bucket. In particular, `curpage'
652 1.3 pk * never points at a page header which has PR_PHINPAGE set and
653 1.3 pk * has no items in its bucket.
654 1.3 pk */
655 1.20 thorpej if ((ph = pp->pr_curpage) == NULL) {
656 1.20 thorpej #ifdef DIAGNOSTIC
657 1.20 thorpej if (pp->pr_nitems != 0) {
658 1.21 thorpej simple_unlock(&pp->pr_slock);
659 1.20 thorpej printf("pool_get: %s: curpage NULL, nitems %u\n",
660 1.20 thorpej pp->pr_wchan, pp->pr_nitems);
661 1.20 thorpej panic("pool_get: nitems inconsistent\n");
662 1.20 thorpej }
663 1.20 thorpej #endif
664 1.20 thorpej
665 1.21 thorpej /*
666 1.21 thorpej * Call the back-end page allocator for more memory.
667 1.21 thorpej * Release the pool lock, as the back-end page allocator
668 1.21 thorpej * may block.
669 1.21 thorpej */
670 1.25 thorpej pr_leave(pp);
671 1.21 thorpej simple_unlock(&pp->pr_slock);
672 1.21 thorpej v = (*pp->pr_alloc)(pp->pr_pagesz, flags, pp->pr_mtype);
673 1.55 thorpej if (__predict_true(v != NULL))
674 1.55 thorpej ph = pool_alloc_item_header(pp, v, flags);
675 1.21 thorpej simple_lock(&pp->pr_slock);
676 1.25 thorpej pr_enter(pp, file, line);
677 1.15 pk
678 1.55 thorpej if (__predict_false(v == NULL || ph == NULL)) {
679 1.55 thorpej if (v != NULL)
680 1.55 thorpej (*pp->pr_free)(v, pp->pr_pagesz, pp->pr_mtype);
681 1.55 thorpej
682 1.21 thorpej /*
683 1.55 thorpej * We were unable to allocate a page or item
684 1.55 thorpej * header, but we released the lock during
685 1.55 thorpej * allocation, so perhaps items were freed
686 1.55 thorpej * back to the pool. Check for this case.
687 1.21 thorpej */
688 1.21 thorpej if (pp->pr_curpage != NULL)
689 1.21 thorpej goto startover;
690 1.15 pk
691 1.3 pk if (flags & PR_URGENT)
692 1.3 pk panic("pool_get: urgent");
693 1.21 thorpej
694 1.3 pk if ((flags & PR_WAITOK) == 0) {
695 1.3 pk pp->pr_nfail++;
696 1.25 thorpej pr_leave(pp);
697 1.21 thorpej simple_unlock(&pp->pr_slock);
698 1.1 pk return (NULL);
699 1.3 pk }
700 1.3 pk
701 1.15 pk /*
702 1.15 pk * Wait for items to be returned to this pool.
703 1.21 thorpej *
704 1.15 pk * XXX: we actually want to wait just until
705 1.15 pk * the page allocator has memory again. Depending
706 1.15 pk * on this pool's usage, we might get stuck here
707 1.15 pk * for a long time.
708 1.20 thorpej *
709 1.20 thorpej * XXX: maybe we should wake up once a second and
710 1.20 thorpej * try again?
711 1.15 pk */
712 1.1 pk pp->pr_flags |= PR_WANTED;
713 1.25 thorpej pr_leave(pp);
714 1.40 sommerfe ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
715 1.25 thorpej pr_enter(pp, file, line);
716 1.20 thorpej goto startover;
717 1.1 pk }
718 1.3 pk
719 1.15 pk /* We have more memory; add it to the pool */
720 1.55 thorpej pool_prime_page(pp, v, ph);
721 1.15 pk pp->pr_npagealloc++;
722 1.15 pk
723 1.20 thorpej /* Start the allocation process over. */
724 1.20 thorpej goto startover;
725 1.3 pk }
726 1.3 pk
727 1.34 thorpej if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) {
728 1.25 thorpej pr_leave(pp);
729 1.21 thorpej simple_unlock(&pp->pr_slock);
730 1.3 pk panic("pool_get: %s: page empty", pp->pr_wchan);
731 1.21 thorpej }
732 1.20 thorpej #ifdef DIAGNOSTIC
733 1.34 thorpej if (__predict_false(pp->pr_nitems == 0)) {
734 1.25 thorpej pr_leave(pp);
735 1.21 thorpej simple_unlock(&pp->pr_slock);
736 1.20 thorpej printf("pool_get: %s: items on itemlist, nitems %u\n",
737 1.20 thorpej pp->pr_wchan, pp->pr_nitems);
738 1.20 thorpej panic("pool_get: nitems inconsistent\n");
739 1.20 thorpej }
740 1.56 sommerfe
741 1.3 pk pr_log(pp, v, PRLOG_GET, file, line);
742 1.3 pk
743 1.34 thorpej if (__predict_false(pi->pi_magic != PI_MAGIC)) {
744 1.25 thorpej pr_printlog(pp, pi, printf);
745 1.3 pk panic("pool_get(%s): free list modified: magic=%x; page %p;"
746 1.3 pk " item addr %p\n",
747 1.3 pk pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
748 1.3 pk }
749 1.3 pk #endif
750 1.3 pk
751 1.3 pk /*
752 1.3 pk * Remove from item list.
753 1.3 pk */
754 1.3 pk TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
755 1.20 thorpej pp->pr_nitems--;
756 1.20 thorpej pp->pr_nout++;
757 1.6 thorpej if (ph->ph_nmissing == 0) {
758 1.6 thorpej #ifdef DIAGNOSTIC
759 1.34 thorpej if (__predict_false(pp->pr_nidle == 0))
760 1.6 thorpej panic("pool_get: nidle inconsistent");
761 1.6 thorpej #endif
762 1.6 thorpej pp->pr_nidle--;
763 1.6 thorpej }
764 1.3 pk ph->ph_nmissing++;
765 1.3 pk if (TAILQ_FIRST(&ph->ph_itemlist) == NULL) {
766 1.21 thorpej #ifdef DIAGNOSTIC
767 1.34 thorpej if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) {
768 1.25 thorpej pr_leave(pp);
769 1.21 thorpej simple_unlock(&pp->pr_slock);
770 1.21 thorpej panic("pool_get: %s: nmissing inconsistent",
771 1.21 thorpej pp->pr_wchan);
772 1.21 thorpej }
773 1.21 thorpej #endif
774 1.3 pk /*
775 1.3 pk * Find a new non-empty page header, if any.
776 1.3 pk * Start search from the page head, to increase
777 1.3 pk * the chance for "high water" pages to be freed.
778 1.3 pk *
779 1.21 thorpej * Migrate empty pages to the end of the list. This
780 1.21 thorpej * will speed the update of curpage as pages become
781 1.21 thorpej * idle. Empty pages intermingled with idle pages
782 1.21 thorpej * is no big deal. As soon as a page becomes un-empty,
783 1.21 thorpej * it will move back to the head of the list.
784 1.3 pk */
785 1.3 pk TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
786 1.21 thorpej TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
787 1.21 thorpej for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
788 1.21 thorpej ph = TAILQ_NEXT(ph, ph_pagelist))
789 1.3 pk if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
790 1.3 pk break;
791 1.3 pk
792 1.3 pk pp->pr_curpage = ph;
793 1.1 pk }
794 1.3 pk
795 1.3 pk pp->pr_nget++;
796 1.20 thorpej
797 1.20 thorpej /*
798 1.20 thorpej * If we have a low water mark and we are now below that low
799 1.20 thorpej * water mark, add more items to the pool.
800 1.20 thorpej */
801 1.53 thorpej if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
802 1.20 thorpej /*
803 1.20 thorpej * XXX: Should we log a warning? Should we set up a timeout
804 1.20 thorpej * to try again in a second or so? The latter could break
805 1.20 thorpej * a caller's assumptions about interrupt protection, etc.
806 1.20 thorpej */
807 1.20 thorpej }
808 1.20 thorpej
809 1.25 thorpej pr_leave(pp);
810 1.21 thorpej simple_unlock(&pp->pr_slock);
811 1.1 pk return (v);
812 1.1 pk }
813 1.1 pk
814 1.1 pk /*
815 1.43 thorpej * Internal version of pool_put(). Pool is already locked/entered.
816 1.1 pk */
817 1.43 thorpej static void
818 1.56 sommerfe pool_do_put(struct pool *pp, void *v)
819 1.1 pk {
820 1.1 pk struct pool_item *pi = v;
821 1.3 pk struct pool_item_header *ph;
822 1.3 pk caddr_t page;
823 1.21 thorpej int s;
824 1.3 pk
825 1.3 pk page = (caddr_t)((u_long)v & pp->pr_pagemask);
826 1.1 pk
827 1.30 thorpej #ifdef DIAGNOSTIC
828 1.34 thorpej if (__predict_false(pp->pr_nout == 0)) {
829 1.30 thorpej printf("pool %s: putting with none out\n",
830 1.30 thorpej pp->pr_wchan);
831 1.30 thorpej panic("pool_put");
832 1.30 thorpej }
833 1.30 thorpej #endif
834 1.3 pk
835 1.34 thorpej if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {
836 1.25 thorpej pr_printlog(pp, NULL, printf);
837 1.3 pk panic("pool_put: %s: page header missing", pp->pr_wchan);
838 1.3 pk }
839 1.28 thorpej
840 1.28 thorpej #ifdef LOCKDEBUG
841 1.28 thorpej /*
842 1.28 thorpej * Check if we're freeing a locked simple lock.
843 1.28 thorpej */
844 1.28 thorpej simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
845 1.28 thorpej #endif
846 1.3 pk
847 1.3 pk /*
848 1.3 pk * Return to item list.
849 1.3 pk */
850 1.2 pk #ifdef DIAGNOSTIC
851 1.3 pk pi->pi_magic = PI_MAGIC;
852 1.3 pk #endif
853 1.32 chs #ifdef DEBUG
854 1.32 chs {
855 1.32 chs int i, *ip = v;
856 1.32 chs
857 1.32 chs for (i = 0; i < pp->pr_size / sizeof(int); i++) {
858 1.32 chs *ip++ = PI_MAGIC;
859 1.32 chs }
860 1.32 chs }
861 1.32 chs #endif
862 1.32 chs
863 1.3 pk TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
864 1.3 pk ph->ph_nmissing--;
865 1.3 pk pp->pr_nput++;
866 1.20 thorpej pp->pr_nitems++;
867 1.20 thorpej pp->pr_nout--;
868 1.3 pk
869 1.3 pk /* Cancel "pool empty" condition if it exists */
870 1.3 pk if (pp->pr_curpage == NULL)
871 1.3 pk pp->pr_curpage = ph;
872 1.3 pk
873 1.3 pk if (pp->pr_flags & PR_WANTED) {
874 1.3 pk pp->pr_flags &= ~PR_WANTED;
875 1.15 pk if (ph->ph_nmissing == 0)
876 1.15 pk pp->pr_nidle++;
877 1.3 pk wakeup((caddr_t)pp);
878 1.3 pk return;
879 1.3 pk }
880 1.3 pk
881 1.3 pk /*
882 1.21 thorpej * If this page is now complete, do one of two things:
883 1.21 thorpej *
884 1.21 thorpej * (1) If we have more pages than the page high water
885 1.21 thorpej * mark, free the page back to the system.
886 1.21 thorpej *
887 1.21 thorpej * (2) Move it to the end of the page list, so that
888 1.21 thorpej * we minimize our chances of fragmenting the
889 1.21 thorpej * pool. Idle pages migrate to the end (along with
890 1.21 thorpej * completely empty pages, so that we find un-empty
891 1.21 thorpej * pages more quickly when we update curpage) of the
892 1.21 thorpej * list so they can be more easily swept up by
893 1.21 thorpej * the pagedaemon when pages are scarce.
894 1.3 pk */
895 1.3 pk if (ph->ph_nmissing == 0) {
896 1.6 thorpej pp->pr_nidle++;
897 1.3 pk if (pp->pr_npages > pp->pr_maxpages) {
898 1.3 pk pr_rmpage(pp, ph);
899 1.3 pk } else {
900 1.3 pk TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
901 1.3 pk TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
902 1.3 pk
903 1.21 thorpej /*
904 1.21 thorpej * Update the timestamp on the page. A page must
905 1.21 thorpej * be idle for some period of time before it can
906 1.21 thorpej * be reclaimed by the pagedaemon. This minimizes
907 1.21 thorpej * ping-pong'ing for memory.
908 1.21 thorpej */
909 1.21 thorpej s = splclock();
910 1.21 thorpej ph->ph_time = mono_time;
911 1.21 thorpej splx(s);
912 1.21 thorpej
913 1.21 thorpej /*
914 1.21 thorpej * Update the current page pointer. Just look for
915 1.21 thorpej * the first page with any free items.
916 1.21 thorpej *
917 1.21 thorpej * XXX: Maybe we want an option to look for the
918 1.21 thorpej * page with the fewest available items, to minimize
919 1.21 thorpej * fragmentation?
920 1.21 thorpej */
921 1.3 pk for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
922 1.3 pk ph = TAILQ_NEXT(ph, ph_pagelist))
923 1.3 pk if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
924 1.3 pk break;
925 1.1 pk
926 1.3 pk pp->pr_curpage = ph;
927 1.1 pk }
928 1.1 pk }
929 1.21 thorpej /*
930 1.21 thorpej * If the page has just become un-empty, move it to the head of
931 1.21 thorpej * the list, and make it the current page. The next allocation
932 1.21 thorpej * will get the item from this page, instead of further fragmenting
933 1.21 thorpej * the pool.
934 1.21 thorpej */
935 1.21 thorpej else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
936 1.21 thorpej TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
937 1.21 thorpej TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
938 1.21 thorpej pp->pr_curpage = ph;
939 1.21 thorpej }
940 1.43 thorpej }
941 1.43 thorpej
942 1.43 thorpej /*
943 1.43 thorpej * Return resource to the pool; must be called at appropriate spl level
944 1.43 thorpej */
945 1.56 sommerfe #ifdef DIAGNOSTIC
946 1.43 thorpej void
947 1.43 thorpej _pool_put(struct pool *pp, void *v, const char *file, long line)
948 1.43 thorpej {
949 1.43 thorpej
950 1.43 thorpej simple_lock(&pp->pr_slock);
951 1.43 thorpej pr_enter(pp, file, line);
952 1.43 thorpej
953 1.56 sommerfe pr_log(pp, v, PRLOG_PUT, file, line);
954 1.56 sommerfe
955 1.56 sommerfe pool_do_put(pp, v);
956 1.21 thorpej
957 1.25 thorpej pr_leave(pp);
958 1.21 thorpej simple_unlock(&pp->pr_slock);
959 1.1 pk }
960 1.57 sommerfe #undef pool_put
961 1.58 thorpej #endif /* DIAGNOSTIC */
962 1.1 pk
963 1.56 sommerfe void
964 1.56 sommerfe pool_put(struct pool *pp, void *v)
965 1.56 sommerfe {
966 1.56 sommerfe
967 1.56 sommerfe simple_lock(&pp->pr_slock);
968 1.56 sommerfe
969 1.56 sommerfe pool_do_put(pp, v);
970 1.56 sommerfe
971 1.56 sommerfe simple_unlock(&pp->pr_slock);
972 1.56 sommerfe }
973 1.57 sommerfe
974 1.57 sommerfe #ifdef DIAGNOSTIC
975 1.57 sommerfe #define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__)
976 1.56 sommerfe #endif
977 1.56 sommerfe
978 1.1 pk /*
979 1.55 thorpej * Add N items to the pool.
980 1.55 thorpej */
981 1.55 thorpej int
982 1.55 thorpej pool_prime(struct pool *pp, int n)
983 1.55 thorpej {
984 1.55 thorpej struct pool_item_header *ph;
985 1.55 thorpej caddr_t cp;
986 1.55 thorpej int newpages, error = 0;
987 1.55 thorpej
988 1.55 thorpej simple_lock(&pp->pr_slock);
989 1.55 thorpej
990 1.55 thorpej newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
991 1.55 thorpej
992 1.55 thorpej while (newpages-- > 0) {
993 1.55 thorpej simple_unlock(&pp->pr_slock);
994 1.55 thorpej cp = (*pp->pr_alloc)(pp->pr_pagesz, PR_NOWAIT, pp->pr_mtype);
995 1.55 thorpej if (__predict_true(cp != NULL))
996 1.55 thorpej ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
997 1.55 thorpej simple_lock(&pp->pr_slock);
998 1.55 thorpej
999 1.55 thorpej if (__predict_false(cp == NULL || ph == NULL)) {
1000 1.55 thorpej error = ENOMEM;
1001 1.55 thorpej if (cp != NULL)
1002 1.55 thorpej (*pp->pr_free)(cp, pp->pr_pagesz, pp->pr_mtype);
1003 1.55 thorpej break;
1004 1.55 thorpej }
1005 1.55 thorpej
1006 1.55 thorpej pool_prime_page(pp, cp, ph);
1007 1.55 thorpej pp->pr_npagealloc++;
1008 1.55 thorpej pp->pr_minpages++;
1009 1.55 thorpej }
1010 1.55 thorpej
1011 1.55 thorpej if (pp->pr_minpages >= pp->pr_maxpages)
1012 1.55 thorpej pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1013 1.55 thorpej
1014 1.55 thorpej simple_unlock(&pp->pr_slock);
1015 1.55 thorpej return (0);
1016 1.55 thorpej }
1017 1.55 thorpej
1018 1.55 thorpej /*
1019 1.3 pk * Add a page worth of items to the pool.
1020 1.21 thorpej *
1021 1.21 thorpej * Note, we must be called with the pool descriptor LOCKED.
1022 1.3 pk */
1023 1.55 thorpej static void
1024 1.55 thorpej pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
1025 1.3 pk {
1026 1.3 pk struct pool_item *pi;
1027 1.3 pk caddr_t cp = storage;
1028 1.3 pk unsigned int align = pp->pr_align;
1029 1.3 pk unsigned int ioff = pp->pr_itemoffset;
1030 1.55 thorpej int n;
1031 1.36 pk
1032 1.36 pk if (((u_long)cp & (pp->pr_pagesz - 1)) != 0)
1033 1.36 pk panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1034 1.3 pk
1035 1.55 thorpej if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1036 1.3 pk LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)],
1037 1.55 thorpej ph, ph_hashlist);
1038 1.3 pk
1039 1.3 pk /*
1040 1.3 pk * Insert page header.
1041 1.3 pk */
1042 1.3 pk TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
1043 1.3 pk TAILQ_INIT(&ph->ph_itemlist);
1044 1.3 pk ph->ph_page = storage;
1045 1.3 pk ph->ph_nmissing = 0;
1046 1.21 thorpej memset(&ph->ph_time, 0, sizeof(ph->ph_time));
1047 1.3 pk
1048 1.6 thorpej pp->pr_nidle++;
1049 1.6 thorpej
1050 1.3 pk /*
1051 1.3 pk * Color this page.
1052 1.3 pk */
1053 1.3 pk cp = (caddr_t)(cp + pp->pr_curcolor);
1054 1.3 pk if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1055 1.3 pk pp->pr_curcolor = 0;
1056 1.3 pk
1057 1.3 pk /*
1058 1.3 pk * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1059 1.3 pk */
1060 1.3 pk if (ioff != 0)
1061 1.3 pk cp = (caddr_t)(cp + (align - ioff));
1062 1.3 pk
1063 1.3 pk /*
1064 1.3 pk * Insert remaining chunks on the bucket list.
1065 1.3 pk */
1066 1.3 pk n = pp->pr_itemsperpage;
1067 1.20 thorpej pp->pr_nitems += n;
1068 1.3 pk
1069 1.3 pk while (n--) {
1070 1.3 pk pi = (struct pool_item *)cp;
1071 1.3 pk
1072 1.3 pk /* Insert on page list */
1073 1.3 pk TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
1074 1.3 pk #ifdef DIAGNOSTIC
1075 1.3 pk pi->pi_magic = PI_MAGIC;
1076 1.3 pk #endif
1077 1.3 pk cp = (caddr_t)(cp + pp->pr_size);
1078 1.3 pk }
1079 1.3 pk
1080 1.3 pk /*
1081 1.3 pk * If the pool was depleted, point at the new page.
1082 1.3 pk */
1083 1.3 pk if (pp->pr_curpage == NULL)
1084 1.3 pk pp->pr_curpage = ph;
1085 1.3 pk
1086 1.3 pk if (++pp->pr_npages > pp->pr_hiwat)
1087 1.3 pk pp->pr_hiwat = pp->pr_npages;
1088 1.3 pk }
1089 1.3 pk
1090 1.20 thorpej /*
1091 1.52 thorpej * Used by pool_get() when nitems drops below the low water mark. This
1092 1.52 thorpej * is used to catch up nitmes with the low water mark.
1093 1.20 thorpej *
1094 1.21 thorpej * Note 1, we never wait for memory here, we let the caller decide what to do.
1095 1.20 thorpej *
1096 1.20 thorpej * Note 2, this doesn't work with static pools.
1097 1.20 thorpej *
1098 1.20 thorpej * Note 3, we must be called with the pool already locked, and we return
1099 1.20 thorpej * with it locked.
1100 1.20 thorpej */
1101 1.20 thorpej static int
1102 1.42 thorpej pool_catchup(struct pool *pp)
1103 1.20 thorpej {
1104 1.55 thorpej struct pool_item_header *ph;
1105 1.20 thorpej caddr_t cp;
1106 1.20 thorpej int error = 0;
1107 1.20 thorpej
1108 1.20 thorpej if (pp->pr_roflags & PR_STATIC) {
1109 1.20 thorpej /*
1110 1.20 thorpej * We dropped below the low water mark, and this is not a
1111 1.20 thorpej * good thing. Log a warning.
1112 1.21 thorpej *
1113 1.21 thorpej * XXX: rate-limit this?
1114 1.20 thorpej */
1115 1.20 thorpej printf("WARNING: static pool `%s' dropped below low water "
1116 1.20 thorpej "mark\n", pp->pr_wchan);
1117 1.20 thorpej return (0);
1118 1.20 thorpej }
1119 1.20 thorpej
1120 1.54 thorpej while (POOL_NEEDS_CATCHUP(pp)) {
1121 1.20 thorpej /*
1122 1.21 thorpej * Call the page back-end allocator for more memory.
1123 1.21 thorpej *
1124 1.21 thorpej * XXX: We never wait, so should we bother unlocking
1125 1.21 thorpej * the pool descriptor?
1126 1.20 thorpej */
1127 1.21 thorpej simple_unlock(&pp->pr_slock);
1128 1.55 thorpej cp = (*pp->pr_alloc)(pp->pr_pagesz, PR_NOWAIT, pp->pr_mtype);
1129 1.55 thorpej if (__predict_true(cp != NULL))
1130 1.55 thorpej ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1131 1.21 thorpej simple_lock(&pp->pr_slock);
1132 1.55 thorpej if (__predict_false(cp == NULL || ph == NULL)) {
1133 1.55 thorpej if (cp != NULL)
1134 1.55 thorpej (*pp->pr_free)(cp, pp->pr_pagesz, pp->pr_mtype);
1135 1.20 thorpej error = ENOMEM;
1136 1.20 thorpej break;
1137 1.20 thorpej }
1138 1.55 thorpej pool_prime_page(pp, cp, ph);
1139 1.26 thorpej pp->pr_npagealloc++;
1140 1.20 thorpej }
1141 1.20 thorpej
1142 1.20 thorpej return (error);
1143 1.20 thorpej }
1144 1.20 thorpej
1145 1.3 pk void
1146 1.42 thorpej pool_setlowat(struct pool *pp, int n)
1147 1.3 pk {
1148 1.20 thorpej int error;
1149 1.15 pk
1150 1.21 thorpej simple_lock(&pp->pr_slock);
1151 1.21 thorpej
1152 1.3 pk pp->pr_minitems = n;
1153 1.15 pk pp->pr_minpages = (n == 0)
1154 1.15 pk ? 0
1155 1.18 thorpej : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1156 1.20 thorpej
1157 1.20 thorpej /* Make sure we're caught up with the newly-set low water mark. */
1158 1.53 thorpej if (POOL_NEEDS_CATCHUP(pp) && (error = pool_catchup(pp) != 0)) {
1159 1.20 thorpej /*
1160 1.20 thorpej * XXX: Should we log a warning? Should we set up a timeout
1161 1.20 thorpej * to try again in a second or so? The latter could break
1162 1.20 thorpej * a caller's assumptions about interrupt protection, etc.
1163 1.20 thorpej */
1164 1.20 thorpej }
1165 1.21 thorpej
1166 1.21 thorpej simple_unlock(&pp->pr_slock);
1167 1.3 pk }
1168 1.3 pk
1169 1.3 pk void
1170 1.42 thorpej pool_sethiwat(struct pool *pp, int n)
1171 1.3 pk {
1172 1.15 pk
1173 1.21 thorpej simple_lock(&pp->pr_slock);
1174 1.21 thorpej
1175 1.15 pk pp->pr_maxpages = (n == 0)
1176 1.15 pk ? 0
1177 1.18 thorpej : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1178 1.21 thorpej
1179 1.21 thorpej simple_unlock(&pp->pr_slock);
1180 1.3 pk }
1181 1.3 pk
1182 1.20 thorpej void
1183 1.42 thorpej pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1184 1.20 thorpej {
1185 1.20 thorpej
1186 1.21 thorpej simple_lock(&pp->pr_slock);
1187 1.20 thorpej
1188 1.20 thorpej pp->pr_hardlimit = n;
1189 1.20 thorpej pp->pr_hardlimit_warning = warnmess;
1190 1.31 thorpej pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1191 1.31 thorpej pp->pr_hardlimit_warning_last.tv_sec = 0;
1192 1.31 thorpej pp->pr_hardlimit_warning_last.tv_usec = 0;
1193 1.20 thorpej
1194 1.20 thorpej /*
1195 1.21 thorpej * In-line version of pool_sethiwat(), because we don't want to
1196 1.21 thorpej * release the lock.
1197 1.20 thorpej */
1198 1.20 thorpej pp->pr_maxpages = (n == 0)
1199 1.20 thorpej ? 0
1200 1.20 thorpej : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1201 1.21 thorpej
1202 1.21 thorpej simple_unlock(&pp->pr_slock);
1203 1.20 thorpej }
1204 1.3 pk
1205 1.3 pk /*
1206 1.3 pk * Default page allocator.
1207 1.3 pk */
1208 1.3 pk static void *
1209 1.42 thorpej pool_page_alloc(unsigned long sz, int flags, int mtype)
1210 1.3 pk {
1211 1.11 thorpej boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
1212 1.3 pk
1213 1.11 thorpej return ((void *)uvm_km_alloc_poolpage(waitok));
1214 1.3 pk }
1215 1.3 pk
1216 1.3 pk static void
1217 1.42 thorpej pool_page_free(void *v, unsigned long sz, int mtype)
1218 1.3 pk {
1219 1.3 pk
1220 1.10 eeh uvm_km_free_poolpage((vaddr_t)v);
1221 1.3 pk }
1222 1.12 thorpej
1223 1.12 thorpej /*
1224 1.12 thorpej * Alternate pool page allocator for pools that know they will
1225 1.12 thorpej * never be accessed in interrupt context.
1226 1.12 thorpej */
1227 1.12 thorpej void *
1228 1.42 thorpej pool_page_alloc_nointr(unsigned long sz, int flags, int mtype)
1229 1.12 thorpej {
1230 1.12 thorpej boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
1231 1.12 thorpej
1232 1.12 thorpej return ((void *)uvm_km_alloc_poolpage1(kernel_map, uvm.kernel_object,
1233 1.12 thorpej waitok));
1234 1.12 thorpej }
1235 1.12 thorpej
1236 1.12 thorpej void
1237 1.42 thorpej pool_page_free_nointr(void *v, unsigned long sz, int mtype)
1238 1.12 thorpej {
1239 1.12 thorpej
1240 1.12 thorpej uvm_km_free_poolpage1(kernel_map, (vaddr_t)v);
1241 1.12 thorpej }
1242 1.12 thorpej
1243 1.3 pk
1244 1.3 pk /*
1245 1.3 pk * Release all complete pages that have not been used recently.
1246 1.3 pk */
1247 1.3 pk void
1248 1.56 sommerfe #ifdef DIAGNOSTIC
1249 1.42 thorpej _pool_reclaim(struct pool *pp, const char *file, long line)
1250 1.56 sommerfe #else
1251 1.56 sommerfe pool_reclaim(struct pool *pp)
1252 1.56 sommerfe #endif
1253 1.3 pk {
1254 1.3 pk struct pool_item_header *ph, *phnext;
1255 1.43 thorpej struct pool_cache *pc;
1256 1.21 thorpej struct timeval curtime;
1257 1.21 thorpej int s;
1258 1.3 pk
1259 1.20 thorpej if (pp->pr_roflags & PR_STATIC)
1260 1.3 pk return;
1261 1.3 pk
1262 1.21 thorpej if (simple_lock_try(&pp->pr_slock) == 0)
1263 1.3 pk return;
1264 1.25 thorpej pr_enter(pp, file, line);
1265 1.3 pk
1266 1.43 thorpej /*
1267 1.43 thorpej * Reclaim items from the pool's caches.
1268 1.43 thorpej */
1269 1.43 thorpej for (pc = TAILQ_FIRST(&pp->pr_cachelist); pc != NULL;
1270 1.43 thorpej pc = TAILQ_NEXT(pc, pc_poollist))
1271 1.43 thorpej pool_cache_reclaim(pc);
1272 1.43 thorpej
1273 1.21 thorpej s = splclock();
1274 1.21 thorpej curtime = mono_time;
1275 1.21 thorpej splx(s);
1276 1.21 thorpej
1277 1.3 pk for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; ph = phnext) {
1278 1.3 pk phnext = TAILQ_NEXT(ph, ph_pagelist);
1279 1.3 pk
1280 1.3 pk /* Check our minimum page claim */
1281 1.3 pk if (pp->pr_npages <= pp->pr_minpages)
1282 1.3 pk break;
1283 1.3 pk
1284 1.3 pk if (ph->ph_nmissing == 0) {
1285 1.3 pk struct timeval diff;
1286 1.3 pk timersub(&curtime, &ph->ph_time, &diff);
1287 1.3 pk if (diff.tv_sec < pool_inactive_time)
1288 1.3 pk continue;
1289 1.21 thorpej
1290 1.21 thorpej /*
1291 1.21 thorpej * If freeing this page would put us below
1292 1.21 thorpej * the low water mark, stop now.
1293 1.21 thorpej */
1294 1.21 thorpej if ((pp->pr_nitems - pp->pr_itemsperpage) <
1295 1.21 thorpej pp->pr_minitems)
1296 1.21 thorpej break;
1297 1.21 thorpej
1298 1.3 pk pr_rmpage(pp, ph);
1299 1.3 pk }
1300 1.3 pk }
1301 1.3 pk
1302 1.25 thorpej pr_leave(pp);
1303 1.21 thorpej simple_unlock(&pp->pr_slock);
1304 1.3 pk }
1305 1.3 pk
1306 1.3 pk
1307 1.3 pk /*
1308 1.3 pk * Drain pools, one at a time.
1309 1.21 thorpej *
1310 1.21 thorpej * Note, we must never be called from an interrupt context.
1311 1.3 pk */
1312 1.3 pk void
1313 1.42 thorpej pool_drain(void *arg)
1314 1.3 pk {
1315 1.3 pk struct pool *pp;
1316 1.23 thorpej int s;
1317 1.3 pk
1318 1.49 thorpej s = splvm();
1319 1.23 thorpej simple_lock(&pool_head_slock);
1320 1.23 thorpej
1321 1.23 thorpej if (drainpp == NULL && (drainpp = TAILQ_FIRST(&pool_head)) == NULL)
1322 1.23 thorpej goto out;
1323 1.3 pk
1324 1.3 pk pp = drainpp;
1325 1.3 pk drainpp = TAILQ_NEXT(pp, pr_poollist);
1326 1.3 pk
1327 1.3 pk pool_reclaim(pp);
1328 1.23 thorpej
1329 1.23 thorpej out:
1330 1.23 thorpej simple_unlock(&pool_head_slock);
1331 1.3 pk splx(s);
1332 1.3 pk }
1333 1.3 pk
1334 1.3 pk
1335 1.3 pk /*
1336 1.3 pk * Diagnostic helpers.
1337 1.3 pk */
1338 1.3 pk void
1339 1.42 thorpej pool_print(struct pool *pp, const char *modif)
1340 1.21 thorpej {
1341 1.21 thorpej int s;
1342 1.21 thorpej
1343 1.49 thorpej s = splvm();
1344 1.25 thorpej if (simple_lock_try(&pp->pr_slock) == 0) {
1345 1.25 thorpej printf("pool %s is locked; try again later\n",
1346 1.25 thorpej pp->pr_wchan);
1347 1.25 thorpej splx(s);
1348 1.25 thorpej return;
1349 1.25 thorpej }
1350 1.25 thorpej pool_print1(pp, modif, printf);
1351 1.21 thorpej simple_unlock(&pp->pr_slock);
1352 1.21 thorpej splx(s);
1353 1.21 thorpej }
1354 1.21 thorpej
1355 1.25 thorpej void
1356 1.42 thorpej pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1357 1.25 thorpej {
1358 1.25 thorpej int didlock = 0;
1359 1.25 thorpej
1360 1.25 thorpej if (pp == NULL) {
1361 1.25 thorpej (*pr)("Must specify a pool to print.\n");
1362 1.25 thorpej return;
1363 1.25 thorpej }
1364 1.25 thorpej
1365 1.25 thorpej /*
1366 1.25 thorpej * Called from DDB; interrupts should be blocked, and all
1367 1.25 thorpej * other processors should be paused. We can skip locking
1368 1.25 thorpej * the pool in this case.
1369 1.25 thorpej *
1370 1.25 thorpej * We do a simple_lock_try() just to print the lock
1371 1.25 thorpej * status, however.
1372 1.25 thorpej */
1373 1.25 thorpej
1374 1.25 thorpej if (simple_lock_try(&pp->pr_slock) == 0)
1375 1.25 thorpej (*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
1376 1.25 thorpej else
1377 1.25 thorpej didlock = 1;
1378 1.25 thorpej
1379 1.25 thorpej pool_print1(pp, modif, pr);
1380 1.25 thorpej
1381 1.25 thorpej if (didlock)
1382 1.25 thorpej simple_unlock(&pp->pr_slock);
1383 1.25 thorpej }
1384 1.25 thorpej
1385 1.21 thorpej static void
1386 1.42 thorpej pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1387 1.3 pk {
1388 1.25 thorpej struct pool_item_header *ph;
1389 1.44 thorpej struct pool_cache *pc;
1390 1.44 thorpej struct pool_cache_group *pcg;
1391 1.25 thorpej #ifdef DIAGNOSTIC
1392 1.25 thorpej struct pool_item *pi;
1393 1.25 thorpej #endif
1394 1.44 thorpej int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1395 1.25 thorpej char c;
1396 1.25 thorpej
1397 1.25 thorpej while ((c = *modif++) != '\0') {
1398 1.25 thorpej if (c == 'l')
1399 1.25 thorpej print_log = 1;
1400 1.25 thorpej if (c == 'p')
1401 1.25 thorpej print_pagelist = 1;
1402 1.44 thorpej if (c == 'c')
1403 1.44 thorpej print_cache = 1;
1404 1.25 thorpej modif++;
1405 1.25 thorpej }
1406 1.25 thorpej
1407 1.25 thorpej (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1408 1.25 thorpej pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1409 1.25 thorpej pp->pr_roflags);
1410 1.25 thorpej (*pr)("\tpagesz %u, mtype %d\n", pp->pr_pagesz, pp->pr_mtype);
1411 1.25 thorpej (*pr)("\talloc %p, release %p\n", pp->pr_alloc, pp->pr_free);
1412 1.25 thorpej (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1413 1.25 thorpej pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1414 1.25 thorpej (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1415 1.25 thorpej pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1416 1.25 thorpej
1417 1.25 thorpej (*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
1418 1.25 thorpej pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1419 1.25 thorpej (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1420 1.25 thorpej pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1421 1.25 thorpej
1422 1.25 thorpej if (print_pagelist == 0)
1423 1.25 thorpej goto skip_pagelist;
1424 1.25 thorpej
1425 1.25 thorpej if ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL)
1426 1.25 thorpej (*pr)("\n\tpage list:\n");
1427 1.25 thorpej for (; ph != NULL; ph = TAILQ_NEXT(ph, ph_pagelist)) {
1428 1.25 thorpej (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
1429 1.25 thorpej ph->ph_page, ph->ph_nmissing,
1430 1.25 thorpej (u_long)ph->ph_time.tv_sec,
1431 1.25 thorpej (u_long)ph->ph_time.tv_usec);
1432 1.25 thorpej #ifdef DIAGNOSTIC
1433 1.25 thorpej for (pi = TAILQ_FIRST(&ph->ph_itemlist); pi != NULL;
1434 1.25 thorpej pi = TAILQ_NEXT(pi, pi_list)) {
1435 1.25 thorpej if (pi->pi_magic != PI_MAGIC) {
1436 1.25 thorpej (*pr)("\t\t\titem %p, magic 0x%x\n",
1437 1.25 thorpej pi, pi->pi_magic);
1438 1.25 thorpej }
1439 1.25 thorpej }
1440 1.25 thorpej #endif
1441 1.25 thorpej }
1442 1.25 thorpej if (pp->pr_curpage == NULL)
1443 1.25 thorpej (*pr)("\tno current page\n");
1444 1.25 thorpej else
1445 1.25 thorpej (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1446 1.25 thorpej
1447 1.25 thorpej skip_pagelist:
1448 1.25 thorpej
1449 1.25 thorpej if (print_log == 0)
1450 1.25 thorpej goto skip_log;
1451 1.25 thorpej
1452 1.25 thorpej (*pr)("\n");
1453 1.25 thorpej if ((pp->pr_roflags & PR_LOGGING) == 0)
1454 1.25 thorpej (*pr)("\tno log\n");
1455 1.25 thorpej else
1456 1.25 thorpej pr_printlog(pp, NULL, pr);
1457 1.3 pk
1458 1.25 thorpej skip_log:
1459 1.44 thorpej
1460 1.44 thorpej if (print_cache == 0)
1461 1.44 thorpej goto skip_cache;
1462 1.44 thorpej
1463 1.44 thorpej for (pc = TAILQ_FIRST(&pp->pr_cachelist); pc != NULL;
1464 1.44 thorpej pc = TAILQ_NEXT(pc, pc_poollist)) {
1465 1.44 thorpej (*pr)("\tcache %p: allocfrom %p freeto %p\n", pc,
1466 1.44 thorpej pc->pc_allocfrom, pc->pc_freeto);
1467 1.48 thorpej (*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n",
1468 1.48 thorpej pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
1469 1.44 thorpej for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
1470 1.44 thorpej pcg = TAILQ_NEXT(pcg, pcg_list)) {
1471 1.44 thorpej (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);
1472 1.44 thorpej for (i = 0; i < PCG_NOBJECTS; i++)
1473 1.44 thorpej (*pr)("\t\t\t%p\n", pcg->pcg_objects[i]);
1474 1.44 thorpej }
1475 1.44 thorpej }
1476 1.44 thorpej
1477 1.44 thorpej skip_cache:
1478 1.3 pk
1479 1.25 thorpej pr_enter_check(pp, pr);
1480 1.3 pk }
1481 1.3 pk
1482 1.3 pk int
1483 1.42 thorpej pool_chk(struct pool *pp, const char *label)
1484 1.3 pk {
1485 1.3 pk struct pool_item_header *ph;
1486 1.3 pk int r = 0;
1487 1.3 pk
1488 1.21 thorpej simple_lock(&pp->pr_slock);
1489 1.3 pk
1490 1.3 pk for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
1491 1.3 pk ph = TAILQ_NEXT(ph, ph_pagelist)) {
1492 1.3 pk
1493 1.3 pk struct pool_item *pi;
1494 1.3 pk int n;
1495 1.3 pk caddr_t page;
1496 1.3 pk
1497 1.3 pk page = (caddr_t)((u_long)ph & pp->pr_pagemask);
1498 1.20 thorpej if (page != ph->ph_page &&
1499 1.20 thorpej (pp->pr_roflags & PR_PHINPAGE) != 0) {
1500 1.3 pk if (label != NULL)
1501 1.3 pk printf("%s: ", label);
1502 1.16 briggs printf("pool(%p:%s): page inconsistency: page %p;"
1503 1.16 briggs " at page head addr %p (p %p)\n", pp,
1504 1.3 pk pp->pr_wchan, ph->ph_page,
1505 1.3 pk ph, page);
1506 1.3 pk r++;
1507 1.3 pk goto out;
1508 1.3 pk }
1509 1.3 pk
1510 1.3 pk for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
1511 1.3 pk pi != NULL;
1512 1.3 pk pi = TAILQ_NEXT(pi,pi_list), n++) {
1513 1.3 pk
1514 1.3 pk #ifdef DIAGNOSTIC
1515 1.3 pk if (pi->pi_magic != PI_MAGIC) {
1516 1.3 pk if (label != NULL)
1517 1.3 pk printf("%s: ", label);
1518 1.3 pk printf("pool(%s): free list modified: magic=%x;"
1519 1.3 pk " page %p; item ordinal %d;"
1520 1.3 pk " addr %p (p %p)\n",
1521 1.3 pk pp->pr_wchan, pi->pi_magic, ph->ph_page,
1522 1.3 pk n, pi, page);
1523 1.3 pk panic("pool");
1524 1.3 pk }
1525 1.3 pk #endif
1526 1.3 pk page = (caddr_t)((u_long)pi & pp->pr_pagemask);
1527 1.3 pk if (page == ph->ph_page)
1528 1.3 pk continue;
1529 1.3 pk
1530 1.3 pk if (label != NULL)
1531 1.3 pk printf("%s: ", label);
1532 1.16 briggs printf("pool(%p:%s): page inconsistency: page %p;"
1533 1.16 briggs " item ordinal %d; addr %p (p %p)\n", pp,
1534 1.3 pk pp->pr_wchan, ph->ph_page,
1535 1.3 pk n, pi, page);
1536 1.3 pk r++;
1537 1.3 pk goto out;
1538 1.3 pk }
1539 1.3 pk }
1540 1.3 pk out:
1541 1.21 thorpej simple_unlock(&pp->pr_slock);
1542 1.3 pk return (r);
1543 1.43 thorpej }
1544 1.43 thorpej
1545 1.43 thorpej /*
1546 1.43 thorpej * pool_cache_init:
1547 1.43 thorpej *
1548 1.43 thorpej * Initialize a pool cache.
1549 1.43 thorpej *
1550 1.43 thorpej * NOTE: If the pool must be protected from interrupts, we expect
1551 1.43 thorpej * to be called at the appropriate interrupt priority level.
1552 1.43 thorpej */
1553 1.43 thorpej void
1554 1.43 thorpej pool_cache_init(struct pool_cache *pc, struct pool *pp,
1555 1.43 thorpej int (*ctor)(void *, void *, int),
1556 1.43 thorpej void (*dtor)(void *, void *),
1557 1.43 thorpej void *arg)
1558 1.43 thorpej {
1559 1.43 thorpej
1560 1.43 thorpej TAILQ_INIT(&pc->pc_grouplist);
1561 1.43 thorpej simple_lock_init(&pc->pc_slock);
1562 1.43 thorpej
1563 1.43 thorpej pc->pc_allocfrom = NULL;
1564 1.43 thorpej pc->pc_freeto = NULL;
1565 1.43 thorpej pc->pc_pool = pp;
1566 1.43 thorpej
1567 1.43 thorpej pc->pc_ctor = ctor;
1568 1.43 thorpej pc->pc_dtor = dtor;
1569 1.43 thorpej pc->pc_arg = arg;
1570 1.43 thorpej
1571 1.48 thorpej pc->pc_hits = 0;
1572 1.48 thorpej pc->pc_misses = 0;
1573 1.48 thorpej
1574 1.48 thorpej pc->pc_ngroups = 0;
1575 1.48 thorpej
1576 1.48 thorpej pc->pc_nitems = 0;
1577 1.48 thorpej
1578 1.43 thorpej simple_lock(&pp->pr_slock);
1579 1.43 thorpej TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist);
1580 1.43 thorpej simple_unlock(&pp->pr_slock);
1581 1.43 thorpej }
1582 1.43 thorpej
1583 1.43 thorpej /*
1584 1.43 thorpej * pool_cache_destroy:
1585 1.43 thorpej *
1586 1.43 thorpej * Destroy a pool cache.
1587 1.43 thorpej */
1588 1.43 thorpej void
1589 1.43 thorpej pool_cache_destroy(struct pool_cache *pc)
1590 1.43 thorpej {
1591 1.43 thorpej struct pool *pp = pc->pc_pool;
1592 1.43 thorpej
1593 1.43 thorpej /* First, invalidate the entire cache. */
1594 1.43 thorpej pool_cache_invalidate(pc);
1595 1.43 thorpej
1596 1.43 thorpej /* ...and remove it from the pool's cache list. */
1597 1.43 thorpej simple_lock(&pp->pr_slock);
1598 1.43 thorpej TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist);
1599 1.43 thorpej simple_unlock(&pp->pr_slock);
1600 1.43 thorpej }
1601 1.43 thorpej
1602 1.43 thorpej static __inline void *
1603 1.43 thorpej pcg_get(struct pool_cache_group *pcg)
1604 1.43 thorpej {
1605 1.43 thorpej void *object;
1606 1.43 thorpej u_int idx;
1607 1.43 thorpej
1608 1.43 thorpej KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
1609 1.45 thorpej KASSERT(pcg->pcg_avail != 0);
1610 1.43 thorpej idx = --pcg->pcg_avail;
1611 1.43 thorpej
1612 1.43 thorpej KASSERT(pcg->pcg_objects[idx] != NULL);
1613 1.43 thorpej object = pcg->pcg_objects[idx];
1614 1.43 thorpej pcg->pcg_objects[idx] = NULL;
1615 1.43 thorpej
1616 1.43 thorpej return (object);
1617 1.43 thorpej }
1618 1.43 thorpej
1619 1.43 thorpej static __inline void
1620 1.43 thorpej pcg_put(struct pool_cache_group *pcg, void *object)
1621 1.43 thorpej {
1622 1.43 thorpej u_int idx;
1623 1.43 thorpej
1624 1.43 thorpej KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
1625 1.43 thorpej idx = pcg->pcg_avail++;
1626 1.43 thorpej
1627 1.43 thorpej KASSERT(pcg->pcg_objects[idx] == NULL);
1628 1.43 thorpej pcg->pcg_objects[idx] = object;
1629 1.43 thorpej }
1630 1.43 thorpej
1631 1.43 thorpej /*
1632 1.43 thorpej * pool_cache_get:
1633 1.43 thorpej *
1634 1.43 thorpej * Get an object from a pool cache.
1635 1.43 thorpej */
1636 1.43 thorpej void *
1637 1.43 thorpej pool_cache_get(struct pool_cache *pc, int flags)
1638 1.43 thorpej {
1639 1.43 thorpej struct pool_cache_group *pcg;
1640 1.43 thorpej void *object;
1641 1.58 thorpej
1642 1.58 thorpej #ifdef LOCKDEBUG
1643 1.58 thorpej if (flags & PR_WAITOK)
1644 1.58 thorpej simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)");
1645 1.58 thorpej #endif
1646 1.43 thorpej
1647 1.43 thorpej simple_lock(&pc->pc_slock);
1648 1.43 thorpej
1649 1.43 thorpej if ((pcg = pc->pc_allocfrom) == NULL) {
1650 1.43 thorpej for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
1651 1.43 thorpej pcg = TAILQ_NEXT(pcg, pcg_list)) {
1652 1.43 thorpej if (pcg->pcg_avail != 0) {
1653 1.43 thorpej pc->pc_allocfrom = pcg;
1654 1.43 thorpej goto have_group;
1655 1.43 thorpej }
1656 1.43 thorpej }
1657 1.43 thorpej
1658 1.43 thorpej /*
1659 1.43 thorpej * No groups with any available objects. Allocate
1660 1.43 thorpej * a new object, construct it, and return it to
1661 1.43 thorpej * the caller. We will allocate a group, if necessary,
1662 1.43 thorpej * when the object is freed back to the cache.
1663 1.43 thorpej */
1664 1.48 thorpej pc->pc_misses++;
1665 1.43 thorpej simple_unlock(&pc->pc_slock);
1666 1.43 thorpej object = pool_get(pc->pc_pool, flags);
1667 1.43 thorpej if (object != NULL && pc->pc_ctor != NULL) {
1668 1.43 thorpej if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
1669 1.43 thorpej pool_put(pc->pc_pool, object);
1670 1.43 thorpej return (NULL);
1671 1.43 thorpej }
1672 1.43 thorpej }
1673 1.43 thorpej return (object);
1674 1.43 thorpej }
1675 1.43 thorpej
1676 1.43 thorpej have_group:
1677 1.48 thorpej pc->pc_hits++;
1678 1.48 thorpej pc->pc_nitems--;
1679 1.43 thorpej object = pcg_get(pcg);
1680 1.43 thorpej
1681 1.43 thorpej if (pcg->pcg_avail == 0)
1682 1.43 thorpej pc->pc_allocfrom = NULL;
1683 1.45 thorpej
1684 1.43 thorpej simple_unlock(&pc->pc_slock);
1685 1.43 thorpej
1686 1.43 thorpej return (object);
1687 1.43 thorpej }
1688 1.43 thorpej
1689 1.43 thorpej /*
1690 1.43 thorpej * pool_cache_put:
1691 1.43 thorpej *
1692 1.43 thorpej * Put an object back to the pool cache.
1693 1.43 thorpej */
1694 1.43 thorpej void
1695 1.43 thorpej pool_cache_put(struct pool_cache *pc, void *object)
1696 1.43 thorpej {
1697 1.43 thorpej struct pool_cache_group *pcg;
1698 1.43 thorpej
1699 1.43 thorpej simple_lock(&pc->pc_slock);
1700 1.43 thorpej
1701 1.43 thorpej if ((pcg = pc->pc_freeto) == NULL) {
1702 1.43 thorpej for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
1703 1.43 thorpej pcg = TAILQ_NEXT(pcg, pcg_list)) {
1704 1.43 thorpej if (pcg->pcg_avail != PCG_NOBJECTS) {
1705 1.43 thorpej pc->pc_freeto = pcg;
1706 1.43 thorpej goto have_group;
1707 1.43 thorpej }
1708 1.43 thorpej }
1709 1.43 thorpej
1710 1.43 thorpej /*
1711 1.43 thorpej * No empty groups to free the object to. Attempt to
1712 1.47 thorpej * allocate one.
1713 1.43 thorpej */
1714 1.47 thorpej simple_unlock(&pc->pc_slock);
1715 1.43 thorpej pcg = pool_get(&pcgpool, PR_NOWAIT);
1716 1.43 thorpej if (pcg != NULL) {
1717 1.43 thorpej memset(pcg, 0, sizeof(*pcg));
1718 1.47 thorpej simple_lock(&pc->pc_slock);
1719 1.48 thorpej pc->pc_ngroups++;
1720 1.43 thorpej TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list);
1721 1.47 thorpej if (pc->pc_freeto == NULL)
1722 1.47 thorpej pc->pc_freeto = pcg;
1723 1.43 thorpej goto have_group;
1724 1.43 thorpej }
1725 1.43 thorpej
1726 1.43 thorpej /*
1727 1.43 thorpej * Unable to allocate a cache group; destruct the object
1728 1.43 thorpej * and free it back to the pool.
1729 1.43 thorpej */
1730 1.51 thorpej pool_cache_destruct_object(pc, object);
1731 1.43 thorpej return;
1732 1.43 thorpej }
1733 1.43 thorpej
1734 1.43 thorpej have_group:
1735 1.48 thorpej pc->pc_nitems++;
1736 1.43 thorpej pcg_put(pcg, object);
1737 1.43 thorpej
1738 1.43 thorpej if (pcg->pcg_avail == PCG_NOBJECTS)
1739 1.43 thorpej pc->pc_freeto = NULL;
1740 1.43 thorpej
1741 1.43 thorpej simple_unlock(&pc->pc_slock);
1742 1.51 thorpej }
1743 1.51 thorpej
1744 1.51 thorpej /*
1745 1.51 thorpej * pool_cache_destruct_object:
1746 1.51 thorpej *
1747 1.51 thorpej * Force destruction of an object and its release back into
1748 1.51 thorpej * the pool.
1749 1.51 thorpej */
1750 1.51 thorpej void
1751 1.51 thorpej pool_cache_destruct_object(struct pool_cache *pc, void *object)
1752 1.51 thorpej {
1753 1.51 thorpej
1754 1.51 thorpej if (pc->pc_dtor != NULL)
1755 1.51 thorpej (*pc->pc_dtor)(pc->pc_arg, object);
1756 1.51 thorpej pool_put(pc->pc_pool, object);
1757 1.43 thorpej }
1758 1.43 thorpej
1759 1.43 thorpej /*
1760 1.43 thorpej * pool_cache_do_invalidate:
1761 1.43 thorpej *
1762 1.43 thorpej * This internal function implements pool_cache_invalidate() and
1763 1.43 thorpej * pool_cache_reclaim().
1764 1.43 thorpej */
1765 1.43 thorpej static void
1766 1.43 thorpej pool_cache_do_invalidate(struct pool_cache *pc, int free_groups,
1767 1.56 sommerfe void (*putit)(struct pool *, void *))
1768 1.43 thorpej {
1769 1.43 thorpej struct pool_cache_group *pcg, *npcg;
1770 1.43 thorpej void *object;
1771 1.43 thorpej
1772 1.43 thorpej for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
1773 1.43 thorpej pcg = npcg) {
1774 1.43 thorpej npcg = TAILQ_NEXT(pcg, pcg_list);
1775 1.43 thorpej while (pcg->pcg_avail != 0) {
1776 1.48 thorpej pc->pc_nitems--;
1777 1.43 thorpej object = pcg_get(pcg);
1778 1.45 thorpej if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg)
1779 1.45 thorpej pc->pc_allocfrom = NULL;
1780 1.43 thorpej if (pc->pc_dtor != NULL)
1781 1.43 thorpej (*pc->pc_dtor)(pc->pc_arg, object);
1782 1.56 sommerfe (*putit)(pc->pc_pool, object);
1783 1.43 thorpej }
1784 1.43 thorpej if (free_groups) {
1785 1.48 thorpej pc->pc_ngroups--;
1786 1.43 thorpej TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list);
1787 1.46 thorpej if (pc->pc_freeto == pcg)
1788 1.46 thorpej pc->pc_freeto = NULL;
1789 1.43 thorpej pool_put(&pcgpool, pcg);
1790 1.43 thorpej }
1791 1.43 thorpej }
1792 1.43 thorpej }
1793 1.43 thorpej
1794 1.43 thorpej /*
1795 1.43 thorpej * pool_cache_invalidate:
1796 1.43 thorpej *
1797 1.43 thorpej * Invalidate a pool cache (destruct and release all of the
1798 1.43 thorpej * cached objects).
1799 1.43 thorpej */
1800 1.43 thorpej void
1801 1.43 thorpej pool_cache_invalidate(struct pool_cache *pc)
1802 1.43 thorpej {
1803 1.43 thorpej
1804 1.43 thorpej simple_lock(&pc->pc_slock);
1805 1.56 sommerfe pool_cache_do_invalidate(pc, 0, pool_put);
1806 1.43 thorpej simple_unlock(&pc->pc_slock);
1807 1.43 thorpej }
1808 1.43 thorpej
1809 1.43 thorpej /*
1810 1.43 thorpej * pool_cache_reclaim:
1811 1.43 thorpej *
1812 1.43 thorpej * Reclaim a pool cache for pool_reclaim().
1813 1.43 thorpej */
1814 1.43 thorpej static void
1815 1.43 thorpej pool_cache_reclaim(struct pool_cache *pc)
1816 1.43 thorpej {
1817 1.43 thorpej
1818 1.47 thorpej simple_lock(&pc->pc_slock);
1819 1.43 thorpej pool_cache_do_invalidate(pc, 1, pool_do_put);
1820 1.43 thorpej simple_unlock(&pc->pc_slock);
1821 1.3 pk }
1822