subr_pool.c revision 1.53 1 1.53 thorpej /* $NetBSD: subr_pool.c,v 1.53 2001/05/10 01:37:40 thorpej Exp $ */
2 1.1 pk
3 1.1 pk /*-
4 1.43 thorpej * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
5 1.1 pk * All rights reserved.
6 1.1 pk *
7 1.1 pk * This code is derived from software contributed to The NetBSD Foundation
8 1.20 thorpej * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
9 1.20 thorpej * Simulation Facility, NASA Ames Research Center.
10 1.1 pk *
11 1.1 pk * Redistribution and use in source and binary forms, with or without
12 1.1 pk * modification, are permitted provided that the following conditions
13 1.1 pk * are met:
14 1.1 pk * 1. Redistributions of source code must retain the above copyright
15 1.1 pk * notice, this list of conditions and the following disclaimer.
16 1.1 pk * 2. Redistributions in binary form must reproduce the above copyright
17 1.1 pk * notice, this list of conditions and the following disclaimer in the
18 1.1 pk * documentation and/or other materials provided with the distribution.
19 1.1 pk * 3. All advertising materials mentioning features or use of this software
20 1.1 pk * must display the following acknowledgement:
21 1.13 christos * This product includes software developed by the NetBSD
22 1.13 christos * Foundation, Inc. and its contributors.
23 1.1 pk * 4. Neither the name of The NetBSD Foundation nor the names of its
24 1.1 pk * contributors may be used to endorse or promote products derived
25 1.1 pk * from this software without specific prior written permission.
26 1.1 pk *
27 1.1 pk * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 1.1 pk * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 1.1 pk * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 1.1 pk * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 1.1 pk * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 1.1 pk * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 1.1 pk * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 1.1 pk * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 1.1 pk * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 1.1 pk * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 1.1 pk * POSSIBILITY OF SUCH DAMAGE.
38 1.1 pk */
39 1.24 scottr
40 1.25 thorpej #include "opt_pool.h"
41 1.24 scottr #include "opt_poollog.h"
42 1.28 thorpej #include "opt_lockdebug.h"
43 1.1 pk
44 1.1 pk #include <sys/param.h>
45 1.1 pk #include <sys/systm.h>
46 1.1 pk #include <sys/proc.h>
47 1.1 pk #include <sys/errno.h>
48 1.1 pk #include <sys/kernel.h>
49 1.1 pk #include <sys/malloc.h>
50 1.1 pk #include <sys/lock.h>
51 1.1 pk #include <sys/pool.h>
52 1.20 thorpej #include <sys/syslog.h>
53 1.3 pk
54 1.3 pk #include <uvm/uvm.h>
55 1.3 pk
56 1.1 pk /*
57 1.1 pk * Pool resource management utility.
58 1.3 pk *
59 1.3 pk * Memory is allocated in pages which are split into pieces according
60 1.3 pk * to the pool item size. Each page is kept on a list headed by `pr_pagelist'
61 1.3 pk * in the pool structure and the individual pool items are on a linked list
62 1.3 pk * headed by `ph_itemlist' in each page header. The memory for building
63 1.3 pk * the page list is either taken from the allocated pages themselves (for
64 1.3 pk * small pool items) or taken from an internal pool of page headers (`phpool').
65 1.1 pk */
66 1.1 pk
67 1.3 pk /* List of all pools */
68 1.5 thorpej TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
69 1.3 pk
70 1.3 pk /* Private pool for page header structures */
71 1.3 pk static struct pool phpool;
72 1.3 pk
73 1.3 pk /* # of seconds to retain page after last use */
74 1.3 pk int pool_inactive_time = 10;
75 1.3 pk
76 1.3 pk /* Next candidate for drainage (see pool_drain()) */
77 1.23 thorpej static struct pool *drainpp;
78 1.23 thorpej
79 1.23 thorpej /* This spin lock protects both pool_head and drainpp. */
80 1.23 thorpej struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
81 1.3 pk
82 1.3 pk struct pool_item_header {
83 1.3 pk /* Page headers */
84 1.3 pk TAILQ_ENTRY(pool_item_header)
85 1.3 pk ph_pagelist; /* pool page list */
86 1.3 pk TAILQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */
87 1.3 pk LIST_ENTRY(pool_item_header)
88 1.3 pk ph_hashlist; /* Off-page page headers */
89 1.3 pk int ph_nmissing; /* # of chunks in use */
90 1.3 pk caddr_t ph_page; /* this page's address */
91 1.3 pk struct timeval ph_time; /* last referenced */
92 1.3 pk };
93 1.3 pk
94 1.1 pk struct pool_item {
95 1.3 pk #ifdef DIAGNOSTIC
96 1.3 pk int pi_magic;
97 1.33 chs #endif
98 1.25 thorpej #define PI_MAGIC 0xdeadbeef
99 1.3 pk /* Other entries use only this list entry */
100 1.3 pk TAILQ_ENTRY(pool_item) pi_list;
101 1.3 pk };
102 1.3 pk
103 1.25 thorpej #define PR_HASH_INDEX(pp,addr) \
104 1.3 pk (((u_long)(addr) >> (pp)->pr_pageshift) & (PR_HASHTABSIZE - 1))
105 1.3 pk
106 1.53 thorpej #define POOL_NEEDS_CATCHUP(pp) \
107 1.53 thorpej ((pp)->pr_nitems < (pp)->pr_minitems)
108 1.53 thorpej
109 1.43 thorpej /*
110 1.43 thorpej * Pool cache management.
111 1.43 thorpej *
112 1.43 thorpej * Pool caches provide a way for constructed objects to be cached by the
113 1.43 thorpej * pool subsystem. This can lead to performance improvements by avoiding
114 1.43 thorpej * needless object construction/destruction; it is deferred until absolutely
115 1.43 thorpej * necessary.
116 1.43 thorpej *
117 1.43 thorpej * Caches are grouped into cache groups. Each cache group references
118 1.43 thorpej * up to 16 constructed objects. When a cache allocates an object
119 1.43 thorpej * from the pool, it calls the object's constructor and places it into
120 1.43 thorpej * a cache group. When a cache group frees an object back to the pool,
121 1.43 thorpej * it first calls the object's destructor. This allows the object to
122 1.43 thorpej * persist in constructed form while freed to the cache.
123 1.43 thorpej *
124 1.43 thorpej * Multiple caches may exist for each pool. This allows a single
125 1.43 thorpej * object type to have multiple constructed forms. The pool references
126 1.43 thorpej * each cache, so that when a pool is drained by the pagedaemon, it can
127 1.43 thorpej * drain each individual cache as well. Each time a cache is drained,
128 1.43 thorpej * the most idle cache group is freed to the pool in its entirety.
129 1.43 thorpej *
130 1.43 thorpej * Pool caches are layed on top of pools. By layering them, we can avoid
131 1.43 thorpej * the complexity of cache management for pools which would not benefit
132 1.43 thorpej * from it.
133 1.43 thorpej */
134 1.43 thorpej
135 1.43 thorpej /* The cache group pool. */
136 1.43 thorpej static struct pool pcgpool;
137 1.43 thorpej
138 1.43 thorpej /* The pool cache group. */
139 1.43 thorpej #define PCG_NOBJECTS 16
140 1.43 thorpej struct pool_cache_group {
141 1.43 thorpej TAILQ_ENTRY(pool_cache_group)
142 1.43 thorpej pcg_list; /* link in the pool cache's group list */
143 1.43 thorpej u_int pcg_avail; /* # available objects */
144 1.43 thorpej /* pointers to the objects */
145 1.43 thorpej void *pcg_objects[PCG_NOBJECTS];
146 1.43 thorpej };
147 1.3 pk
148 1.43 thorpej static void pool_cache_reclaim(struct pool_cache *);
149 1.3 pk
150 1.42 thorpej static int pool_catchup(struct pool *);
151 1.50 enami static int pool_prime_page(struct pool *, caddr_t, int);
152 1.42 thorpej static void *pool_page_alloc(unsigned long, int, int);
153 1.42 thorpej static void pool_page_free(void *, unsigned long, int);
154 1.3 pk
155 1.42 thorpej static void pool_print1(struct pool *, const char *,
156 1.42 thorpej void (*)(const char *, ...));
157 1.3 pk
158 1.3 pk /*
159 1.52 thorpej * Pool log entry. An array of these is allocated in pool_init().
160 1.3 pk */
161 1.3 pk struct pool_log {
162 1.3 pk const char *pl_file;
163 1.3 pk long pl_line;
164 1.3 pk int pl_action;
165 1.25 thorpej #define PRLOG_GET 1
166 1.25 thorpej #define PRLOG_PUT 2
167 1.3 pk void *pl_addr;
168 1.1 pk };
169 1.1 pk
170 1.3 pk /* Number of entries in pool log buffers */
171 1.17 thorpej #ifndef POOL_LOGSIZE
172 1.17 thorpej #define POOL_LOGSIZE 10
173 1.17 thorpej #endif
174 1.17 thorpej
175 1.17 thorpej int pool_logsize = POOL_LOGSIZE;
176 1.1 pk
177 1.25 thorpej #ifdef DIAGNOSTIC
178 1.42 thorpej static __inline void
179 1.42 thorpej pr_log(struct pool *pp, void *v, int action, const char *file, long line)
180 1.3 pk {
181 1.3 pk int n = pp->pr_curlogentry;
182 1.3 pk struct pool_log *pl;
183 1.3 pk
184 1.20 thorpej if ((pp->pr_roflags & PR_LOGGING) == 0)
185 1.3 pk return;
186 1.3 pk
187 1.3 pk /*
188 1.3 pk * Fill in the current entry. Wrap around and overwrite
189 1.3 pk * the oldest entry if necessary.
190 1.3 pk */
191 1.3 pk pl = &pp->pr_log[n];
192 1.3 pk pl->pl_file = file;
193 1.3 pk pl->pl_line = line;
194 1.3 pk pl->pl_action = action;
195 1.3 pk pl->pl_addr = v;
196 1.3 pk if (++n >= pp->pr_logsize)
197 1.3 pk n = 0;
198 1.3 pk pp->pr_curlogentry = n;
199 1.3 pk }
200 1.3 pk
201 1.3 pk static void
202 1.42 thorpej pr_printlog(struct pool *pp, struct pool_item *pi,
203 1.42 thorpej void (*pr)(const char *, ...))
204 1.3 pk {
205 1.3 pk int i = pp->pr_logsize;
206 1.3 pk int n = pp->pr_curlogentry;
207 1.3 pk
208 1.20 thorpej if ((pp->pr_roflags & PR_LOGGING) == 0)
209 1.3 pk return;
210 1.3 pk
211 1.3 pk /*
212 1.3 pk * Print all entries in this pool's log.
213 1.3 pk */
214 1.3 pk while (i-- > 0) {
215 1.3 pk struct pool_log *pl = &pp->pr_log[n];
216 1.3 pk if (pl->pl_action != 0) {
217 1.25 thorpej if (pi == NULL || pi == pl->pl_addr) {
218 1.25 thorpej (*pr)("\tlog entry %d:\n", i);
219 1.25 thorpej (*pr)("\t\taction = %s, addr = %p\n",
220 1.25 thorpej pl->pl_action == PRLOG_GET ? "get" : "put",
221 1.25 thorpej pl->pl_addr);
222 1.25 thorpej (*pr)("\t\tfile: %s at line %lu\n",
223 1.25 thorpej pl->pl_file, pl->pl_line);
224 1.25 thorpej }
225 1.3 pk }
226 1.3 pk if (++n >= pp->pr_logsize)
227 1.3 pk n = 0;
228 1.3 pk }
229 1.3 pk }
230 1.25 thorpej
231 1.42 thorpej static __inline void
232 1.42 thorpej pr_enter(struct pool *pp, const char *file, long line)
233 1.25 thorpej {
234 1.25 thorpej
235 1.34 thorpej if (__predict_false(pp->pr_entered_file != NULL)) {
236 1.25 thorpej printf("pool %s: reentrancy at file %s line %ld\n",
237 1.25 thorpej pp->pr_wchan, file, line);
238 1.25 thorpej printf(" previous entry at file %s line %ld\n",
239 1.25 thorpej pp->pr_entered_file, pp->pr_entered_line);
240 1.25 thorpej panic("pr_enter");
241 1.25 thorpej }
242 1.25 thorpej
243 1.25 thorpej pp->pr_entered_file = file;
244 1.25 thorpej pp->pr_entered_line = line;
245 1.25 thorpej }
246 1.25 thorpej
247 1.42 thorpej static __inline void
248 1.42 thorpej pr_leave(struct pool *pp)
249 1.25 thorpej {
250 1.25 thorpej
251 1.34 thorpej if (__predict_false(pp->pr_entered_file == NULL)) {
252 1.25 thorpej printf("pool %s not entered?\n", pp->pr_wchan);
253 1.25 thorpej panic("pr_leave");
254 1.25 thorpej }
255 1.25 thorpej
256 1.25 thorpej pp->pr_entered_file = NULL;
257 1.25 thorpej pp->pr_entered_line = 0;
258 1.25 thorpej }
259 1.25 thorpej
260 1.42 thorpej static __inline void
261 1.42 thorpej pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
262 1.25 thorpej {
263 1.25 thorpej
264 1.25 thorpej if (pp->pr_entered_file != NULL)
265 1.25 thorpej (*pr)("\n\tcurrently entered from file %s line %ld\n",
266 1.25 thorpej pp->pr_entered_file, pp->pr_entered_line);
267 1.25 thorpej }
268 1.3 pk #else
269 1.25 thorpej #define pr_log(pp, v, action, file, line)
270 1.25 thorpej #define pr_printlog(pp, pi, pr)
271 1.25 thorpej #define pr_enter(pp, file, line)
272 1.25 thorpej #define pr_leave(pp)
273 1.25 thorpej #define pr_enter_check(pp, pr)
274 1.25 thorpej #endif /* DIAGNOSTIC */
275 1.3 pk
276 1.3 pk /*
277 1.3 pk * Return the pool page header based on page address.
278 1.3 pk */
279 1.42 thorpej static __inline struct pool_item_header *
280 1.42 thorpej pr_find_pagehead(struct pool *pp, caddr_t page)
281 1.3 pk {
282 1.3 pk struct pool_item_header *ph;
283 1.3 pk
284 1.20 thorpej if ((pp->pr_roflags & PR_PHINPAGE) != 0)
285 1.3 pk return ((struct pool_item_header *)(page + pp->pr_phoffset));
286 1.3 pk
287 1.3 pk for (ph = LIST_FIRST(&pp->pr_hashtab[PR_HASH_INDEX(pp, page)]);
288 1.3 pk ph != NULL;
289 1.3 pk ph = LIST_NEXT(ph, ph_hashlist)) {
290 1.3 pk if (ph->ph_page == page)
291 1.3 pk return (ph);
292 1.3 pk }
293 1.3 pk return (NULL);
294 1.3 pk }
295 1.3 pk
296 1.3 pk /*
297 1.3 pk * Remove a page from the pool.
298 1.3 pk */
299 1.42 thorpej static __inline void
300 1.42 thorpej pr_rmpage(struct pool *pp, struct pool_item_header *ph)
301 1.3 pk {
302 1.3 pk
303 1.3 pk /*
304 1.7 thorpej * If the page was idle, decrement the idle page count.
305 1.3 pk */
306 1.6 thorpej if (ph->ph_nmissing == 0) {
307 1.6 thorpej #ifdef DIAGNOSTIC
308 1.6 thorpej if (pp->pr_nidle == 0)
309 1.6 thorpej panic("pr_rmpage: nidle inconsistent");
310 1.20 thorpej if (pp->pr_nitems < pp->pr_itemsperpage)
311 1.20 thorpej panic("pr_rmpage: nitems inconsistent");
312 1.6 thorpej #endif
313 1.6 thorpej pp->pr_nidle--;
314 1.6 thorpej }
315 1.7 thorpej
316 1.20 thorpej pp->pr_nitems -= pp->pr_itemsperpage;
317 1.20 thorpej
318 1.7 thorpej /*
319 1.7 thorpej * Unlink a page from the pool and release it.
320 1.7 thorpej */
321 1.7 thorpej TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
322 1.7 thorpej (*pp->pr_free)(ph->ph_page, pp->pr_pagesz, pp->pr_mtype);
323 1.7 thorpej pp->pr_npages--;
324 1.7 thorpej pp->pr_npagefree++;
325 1.6 thorpej
326 1.22 chs if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
327 1.27 pk int s;
328 1.22 chs LIST_REMOVE(ph, ph_hashlist);
329 1.27 pk s = splhigh();
330 1.22 chs pool_put(&phpool, ph);
331 1.27 pk splx(s);
332 1.22 chs }
333 1.22 chs
334 1.3 pk if (pp->pr_curpage == ph) {
335 1.3 pk /*
336 1.3 pk * Find a new non-empty page header, if any.
337 1.3 pk * Start search from the page head, to increase the
338 1.3 pk * chance for "high water" pages to be freed.
339 1.3 pk */
340 1.3 pk for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
341 1.3 pk ph = TAILQ_NEXT(ph, ph_pagelist))
342 1.3 pk if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
343 1.3 pk break;
344 1.3 pk
345 1.3 pk pp->pr_curpage = ph;
346 1.21 thorpej }
347 1.3 pk }
348 1.3 pk
349 1.3 pk /*
350 1.3 pk * Initialize the given pool resource structure.
351 1.3 pk *
352 1.3 pk * We export this routine to allow other kernel parts to declare
353 1.3 pk * static pools that must be initialized before malloc() is available.
354 1.3 pk */
355 1.3 pk void
356 1.42 thorpej pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
357 1.42 thorpej const char *wchan, size_t pagesz,
358 1.42 thorpej void *(*alloc)(unsigned long, int, int),
359 1.42 thorpej void (*release)(void *, unsigned long, int),
360 1.42 thorpej int mtype)
361 1.3 pk {
362 1.16 briggs int off, slack, i;
363 1.3 pk
364 1.25 thorpej #ifdef POOL_DIAGNOSTIC
365 1.25 thorpej /*
366 1.25 thorpej * Always log if POOL_DIAGNOSTIC is defined.
367 1.25 thorpej */
368 1.25 thorpej if (pool_logsize != 0)
369 1.25 thorpej flags |= PR_LOGGING;
370 1.25 thorpej #endif
371 1.25 thorpej
372 1.3 pk /*
373 1.3 pk * Check arguments and construct default values.
374 1.3 pk */
375 1.36 pk if (!powerof2(pagesz))
376 1.3 pk panic("pool_init: page size invalid (%lx)\n", (u_long)pagesz);
377 1.3 pk
378 1.4 thorpej if (alloc == NULL && release == NULL) {
379 1.3 pk alloc = pool_page_alloc;
380 1.3 pk release = pool_page_free;
381 1.4 thorpej pagesz = PAGE_SIZE; /* Rounds to PAGE_SIZE anyhow. */
382 1.4 thorpej } else if ((alloc != NULL && release != NULL) == 0) {
383 1.4 thorpej /* If you specifiy one, must specify both. */
384 1.4 thorpej panic("pool_init: must specify alloc and release together");
385 1.4 thorpej }
386 1.4 thorpej
387 1.3 pk if (pagesz == 0)
388 1.3 pk pagesz = PAGE_SIZE;
389 1.3 pk
390 1.3 pk if (align == 0)
391 1.3 pk align = ALIGN(1);
392 1.14 thorpej
393 1.14 thorpej if (size < sizeof(struct pool_item))
394 1.14 thorpej size = sizeof(struct pool_item);
395 1.3 pk
396 1.35 pk size = ALIGN(size);
397 1.43 thorpej if (size > pagesz)
398 1.35 pk panic("pool_init: pool item size (%lu) too large",
399 1.35 pk (u_long)size);
400 1.35 pk
401 1.3 pk /*
402 1.3 pk * Initialize the pool structure.
403 1.3 pk */
404 1.3 pk TAILQ_INIT(&pp->pr_pagelist);
405 1.43 thorpej TAILQ_INIT(&pp->pr_cachelist);
406 1.3 pk pp->pr_curpage = NULL;
407 1.3 pk pp->pr_npages = 0;
408 1.3 pk pp->pr_minitems = 0;
409 1.3 pk pp->pr_minpages = 0;
410 1.3 pk pp->pr_maxpages = UINT_MAX;
411 1.20 thorpej pp->pr_roflags = flags;
412 1.20 thorpej pp->pr_flags = 0;
413 1.35 pk pp->pr_size = size;
414 1.3 pk pp->pr_align = align;
415 1.3 pk pp->pr_wchan = wchan;
416 1.3 pk pp->pr_mtype = mtype;
417 1.3 pk pp->pr_alloc = alloc;
418 1.3 pk pp->pr_free = release;
419 1.3 pk pp->pr_pagesz = pagesz;
420 1.3 pk pp->pr_pagemask = ~(pagesz - 1);
421 1.3 pk pp->pr_pageshift = ffs(pagesz) - 1;
422 1.20 thorpej pp->pr_nitems = 0;
423 1.20 thorpej pp->pr_nout = 0;
424 1.20 thorpej pp->pr_hardlimit = UINT_MAX;
425 1.20 thorpej pp->pr_hardlimit_warning = NULL;
426 1.31 thorpej pp->pr_hardlimit_ratecap.tv_sec = 0;
427 1.31 thorpej pp->pr_hardlimit_ratecap.tv_usec = 0;
428 1.31 thorpej pp->pr_hardlimit_warning_last.tv_sec = 0;
429 1.31 thorpej pp->pr_hardlimit_warning_last.tv_usec = 0;
430 1.3 pk
431 1.3 pk /*
432 1.3 pk * Decide whether to put the page header off page to avoid
433 1.3 pk * wasting too large a part of the page. Off-page page headers
434 1.3 pk * go on a hash table, so we can match a returned item
435 1.3 pk * with its header based on the page address.
436 1.3 pk * We use 1/16 of the page size as the threshold (XXX: tune)
437 1.3 pk */
438 1.3 pk if (pp->pr_size < pagesz/16) {
439 1.3 pk /* Use the end of the page for the page header */
440 1.20 thorpej pp->pr_roflags |= PR_PHINPAGE;
441 1.3 pk pp->pr_phoffset = off =
442 1.3 pk pagesz - ALIGN(sizeof(struct pool_item_header));
443 1.2 pk } else {
444 1.3 pk /* The page header will be taken from our page header pool */
445 1.3 pk pp->pr_phoffset = 0;
446 1.3 pk off = pagesz;
447 1.16 briggs for (i = 0; i < PR_HASHTABSIZE; i++) {
448 1.16 briggs LIST_INIT(&pp->pr_hashtab[i]);
449 1.16 briggs }
450 1.2 pk }
451 1.1 pk
452 1.3 pk /*
453 1.3 pk * Alignment is to take place at `ioff' within the item. This means
454 1.3 pk * we must reserve up to `align - 1' bytes on the page to allow
455 1.3 pk * appropriate positioning of each item.
456 1.3 pk *
457 1.3 pk * Silently enforce `0 <= ioff < align'.
458 1.3 pk */
459 1.3 pk pp->pr_itemoffset = ioff = ioff % align;
460 1.3 pk pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
461 1.43 thorpej KASSERT(pp->pr_itemsperpage != 0);
462 1.3 pk
463 1.3 pk /*
464 1.3 pk * Use the slack between the chunks and the page header
465 1.3 pk * for "cache coloring".
466 1.3 pk */
467 1.3 pk slack = off - pp->pr_itemsperpage * pp->pr_size;
468 1.3 pk pp->pr_maxcolor = (slack / align) * align;
469 1.3 pk pp->pr_curcolor = 0;
470 1.3 pk
471 1.3 pk pp->pr_nget = 0;
472 1.3 pk pp->pr_nfail = 0;
473 1.3 pk pp->pr_nput = 0;
474 1.3 pk pp->pr_npagealloc = 0;
475 1.3 pk pp->pr_npagefree = 0;
476 1.1 pk pp->pr_hiwat = 0;
477 1.8 thorpej pp->pr_nidle = 0;
478 1.3 pk
479 1.25 thorpej if (flags & PR_LOGGING) {
480 1.25 thorpej if (kmem_map == NULL ||
481 1.25 thorpej (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
482 1.25 thorpej M_TEMP, M_NOWAIT)) == NULL)
483 1.20 thorpej pp->pr_roflags &= ~PR_LOGGING;
484 1.3 pk pp->pr_curlogentry = 0;
485 1.3 pk pp->pr_logsize = pool_logsize;
486 1.3 pk }
487 1.25 thorpej
488 1.25 thorpej pp->pr_entered_file = NULL;
489 1.25 thorpej pp->pr_entered_line = 0;
490 1.3 pk
491 1.21 thorpej simple_lock_init(&pp->pr_slock);
492 1.1 pk
493 1.3 pk /*
494 1.43 thorpej * Initialize private page header pool and cache magazine pool if we
495 1.43 thorpej * haven't done so yet.
496 1.23 thorpej * XXX LOCKING.
497 1.3 pk */
498 1.3 pk if (phpool.pr_size == 0) {
499 1.3 pk pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,
500 1.43 thorpej 0, "phpool", 0, 0, 0, 0);
501 1.43 thorpej pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
502 1.43 thorpej 0, "pcgpool", 0, 0, 0, 0);
503 1.1 pk }
504 1.1 pk
505 1.23 thorpej /* Insert into the list of all pools. */
506 1.23 thorpej simple_lock(&pool_head_slock);
507 1.23 thorpej TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
508 1.23 thorpej simple_unlock(&pool_head_slock);
509 1.1 pk }
510 1.1 pk
511 1.1 pk /*
512 1.1 pk * De-commision a pool resource.
513 1.1 pk */
514 1.1 pk void
515 1.42 thorpej pool_destroy(struct pool *pp)
516 1.1 pk {
517 1.3 pk struct pool_item_header *ph;
518 1.43 thorpej struct pool_cache *pc;
519 1.43 thorpej
520 1.43 thorpej /* Destroy all caches for this pool. */
521 1.43 thorpej while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL)
522 1.43 thorpej pool_cache_destroy(pc);
523 1.3 pk
524 1.3 pk #ifdef DIAGNOSTIC
525 1.20 thorpej if (pp->pr_nout != 0) {
526 1.25 thorpej pr_printlog(pp, NULL, printf);
527 1.20 thorpej panic("pool_destroy: pool busy: still out: %u\n",
528 1.20 thorpej pp->pr_nout);
529 1.3 pk }
530 1.3 pk #endif
531 1.1 pk
532 1.3 pk /* Remove all pages */
533 1.20 thorpej if ((pp->pr_roflags & PR_STATIC) == 0)
534 1.3 pk while ((ph = pp->pr_pagelist.tqh_first) != NULL)
535 1.3 pk pr_rmpage(pp, ph);
536 1.3 pk
537 1.3 pk /* Remove from global pool list */
538 1.23 thorpej simple_lock(&pool_head_slock);
539 1.3 pk TAILQ_REMOVE(&pool_head, pp, pr_poollist);
540 1.23 thorpej /* XXX Only clear this if we were drainpp? */
541 1.3 pk drainpp = NULL;
542 1.23 thorpej simple_unlock(&pool_head_slock);
543 1.3 pk
544 1.20 thorpej if ((pp->pr_roflags & PR_LOGGING) != 0)
545 1.3 pk free(pp->pr_log, M_TEMP);
546 1.2 pk
547 1.20 thorpej if (pp->pr_roflags & PR_FREEHEADER)
548 1.3 pk free(pp, M_POOL);
549 1.1 pk }
550 1.1 pk
551 1.1 pk
552 1.1 pk /*
553 1.3 pk * Grab an item from the pool; must be called at appropriate spl level
554 1.1 pk */
555 1.3 pk void *
556 1.42 thorpej _pool_get(struct pool *pp, int flags, const char *file, long line)
557 1.1 pk {
558 1.1 pk void *v;
559 1.1 pk struct pool_item *pi;
560 1.3 pk struct pool_item_header *ph;
561 1.1 pk
562 1.2 pk #ifdef DIAGNOSTIC
563 1.34 thorpej if (__predict_false((pp->pr_roflags & PR_STATIC) &&
564 1.34 thorpej (flags & PR_MALLOCOK))) {
565 1.25 thorpej pr_printlog(pp, NULL, printf);
566 1.2 pk panic("pool_get: static");
567 1.3 pk }
568 1.2 pk #endif
569 1.2 pk
570 1.37 sommerfe if (__predict_false(curproc == NULL && doing_shutdown == 0 &&
571 1.37 sommerfe (flags & PR_WAITOK) != 0))
572 1.3 pk panic("pool_get: must have NOWAIT");
573 1.1 pk
574 1.21 thorpej simple_lock(&pp->pr_slock);
575 1.25 thorpej pr_enter(pp, file, line);
576 1.20 thorpej
577 1.20 thorpej startover:
578 1.20 thorpej /*
579 1.20 thorpej * Check to see if we've reached the hard limit. If we have,
580 1.20 thorpej * and we can wait, then wait until an item has been returned to
581 1.20 thorpej * the pool.
582 1.20 thorpej */
583 1.20 thorpej #ifdef DIAGNOSTIC
584 1.34 thorpej if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
585 1.25 thorpej pr_leave(pp);
586 1.21 thorpej simple_unlock(&pp->pr_slock);
587 1.20 thorpej panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
588 1.20 thorpej }
589 1.20 thorpej #endif
590 1.34 thorpej if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
591 1.29 sommerfe if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
592 1.20 thorpej /*
593 1.20 thorpej * XXX: A warning isn't logged in this case. Should
594 1.20 thorpej * it be?
595 1.20 thorpej */
596 1.20 thorpej pp->pr_flags |= PR_WANTED;
597 1.25 thorpej pr_leave(pp);
598 1.40 sommerfe ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
599 1.25 thorpej pr_enter(pp, file, line);
600 1.20 thorpej goto startover;
601 1.20 thorpej }
602 1.31 thorpej
603 1.31 thorpej /*
604 1.31 thorpej * Log a message that the hard limit has been hit.
605 1.31 thorpej */
606 1.31 thorpej if (pp->pr_hardlimit_warning != NULL &&
607 1.31 thorpej ratecheck(&pp->pr_hardlimit_warning_last,
608 1.31 thorpej &pp->pr_hardlimit_ratecap))
609 1.31 thorpej log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
610 1.21 thorpej
611 1.21 thorpej if (flags & PR_URGENT)
612 1.21 thorpej panic("pool_get: urgent");
613 1.21 thorpej
614 1.21 thorpej pp->pr_nfail++;
615 1.21 thorpej
616 1.25 thorpej pr_leave(pp);
617 1.21 thorpej simple_unlock(&pp->pr_slock);
618 1.20 thorpej return (NULL);
619 1.20 thorpej }
620 1.20 thorpej
621 1.3 pk /*
622 1.3 pk * The convention we use is that if `curpage' is not NULL, then
623 1.3 pk * it points at a non-empty bucket. In particular, `curpage'
624 1.3 pk * never points at a page header which has PR_PHINPAGE set and
625 1.3 pk * has no items in its bucket.
626 1.3 pk */
627 1.20 thorpej if ((ph = pp->pr_curpage) == NULL) {
628 1.15 pk void *v;
629 1.15 pk
630 1.20 thorpej #ifdef DIAGNOSTIC
631 1.20 thorpej if (pp->pr_nitems != 0) {
632 1.21 thorpej simple_unlock(&pp->pr_slock);
633 1.20 thorpej printf("pool_get: %s: curpage NULL, nitems %u\n",
634 1.20 thorpej pp->pr_wchan, pp->pr_nitems);
635 1.20 thorpej panic("pool_get: nitems inconsistent\n");
636 1.20 thorpej }
637 1.20 thorpej #endif
638 1.20 thorpej
639 1.21 thorpej /*
640 1.21 thorpej * Call the back-end page allocator for more memory.
641 1.21 thorpej * Release the pool lock, as the back-end page allocator
642 1.21 thorpej * may block.
643 1.21 thorpej */
644 1.25 thorpej pr_leave(pp);
645 1.21 thorpej simple_unlock(&pp->pr_slock);
646 1.21 thorpej v = (*pp->pr_alloc)(pp->pr_pagesz, flags, pp->pr_mtype);
647 1.21 thorpej simple_lock(&pp->pr_slock);
648 1.25 thorpej pr_enter(pp, file, line);
649 1.15 pk
650 1.21 thorpej if (v == NULL) {
651 1.21 thorpej /*
652 1.21 thorpej * We were unable to allocate a page, but
653 1.21 thorpej * we released the lock during allocation,
654 1.21 thorpej * so perhaps items were freed back to the
655 1.21 thorpej * pool. Check for this case.
656 1.21 thorpej */
657 1.21 thorpej if (pp->pr_curpage != NULL)
658 1.21 thorpej goto startover;
659 1.15 pk
660 1.3 pk if (flags & PR_URGENT)
661 1.3 pk panic("pool_get: urgent");
662 1.21 thorpej
663 1.3 pk if ((flags & PR_WAITOK) == 0) {
664 1.3 pk pp->pr_nfail++;
665 1.25 thorpej pr_leave(pp);
666 1.21 thorpej simple_unlock(&pp->pr_slock);
667 1.1 pk return (NULL);
668 1.3 pk }
669 1.3 pk
670 1.15 pk /*
671 1.15 pk * Wait for items to be returned to this pool.
672 1.21 thorpej *
673 1.15 pk * XXX: we actually want to wait just until
674 1.15 pk * the page allocator has memory again. Depending
675 1.15 pk * on this pool's usage, we might get stuck here
676 1.15 pk * for a long time.
677 1.20 thorpej *
678 1.20 thorpej * XXX: maybe we should wake up once a second and
679 1.20 thorpej * try again?
680 1.15 pk */
681 1.1 pk pp->pr_flags |= PR_WANTED;
682 1.25 thorpej pr_leave(pp);
683 1.40 sommerfe ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
684 1.25 thorpej pr_enter(pp, file, line);
685 1.20 thorpej goto startover;
686 1.1 pk }
687 1.3 pk
688 1.15 pk /* We have more memory; add it to the pool */
689 1.50 enami if (pool_prime_page(pp, v, flags & PR_WAITOK) != 0) {
690 1.50 enami /*
691 1.50 enami * Probably, we don't allowed to wait and
692 1.50 enami * couldn't allocate a page header.
693 1.50 enami */
694 1.50 enami (*pp->pr_free)(v, pp->pr_pagesz, pp->pr_mtype);
695 1.50 enami pp->pr_nfail++;
696 1.50 enami pr_leave(pp);
697 1.50 enami simple_unlock(&pp->pr_slock);
698 1.50 enami return (NULL);
699 1.50 enami }
700 1.15 pk pp->pr_npagealloc++;
701 1.15 pk
702 1.20 thorpej /* Start the allocation process over. */
703 1.20 thorpej goto startover;
704 1.3 pk }
705 1.3 pk
706 1.34 thorpej if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) {
707 1.25 thorpej pr_leave(pp);
708 1.21 thorpej simple_unlock(&pp->pr_slock);
709 1.3 pk panic("pool_get: %s: page empty", pp->pr_wchan);
710 1.21 thorpej }
711 1.20 thorpej #ifdef DIAGNOSTIC
712 1.34 thorpej if (__predict_false(pp->pr_nitems == 0)) {
713 1.25 thorpej pr_leave(pp);
714 1.21 thorpej simple_unlock(&pp->pr_slock);
715 1.20 thorpej printf("pool_get: %s: items on itemlist, nitems %u\n",
716 1.20 thorpej pp->pr_wchan, pp->pr_nitems);
717 1.20 thorpej panic("pool_get: nitems inconsistent\n");
718 1.20 thorpej }
719 1.20 thorpej #endif
720 1.3 pk pr_log(pp, v, PRLOG_GET, file, line);
721 1.3 pk
722 1.3 pk #ifdef DIAGNOSTIC
723 1.34 thorpej if (__predict_false(pi->pi_magic != PI_MAGIC)) {
724 1.25 thorpej pr_printlog(pp, pi, printf);
725 1.3 pk panic("pool_get(%s): free list modified: magic=%x; page %p;"
726 1.3 pk " item addr %p\n",
727 1.3 pk pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
728 1.3 pk }
729 1.3 pk #endif
730 1.3 pk
731 1.3 pk /*
732 1.3 pk * Remove from item list.
733 1.3 pk */
734 1.3 pk TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
735 1.20 thorpej pp->pr_nitems--;
736 1.20 thorpej pp->pr_nout++;
737 1.6 thorpej if (ph->ph_nmissing == 0) {
738 1.6 thorpej #ifdef DIAGNOSTIC
739 1.34 thorpej if (__predict_false(pp->pr_nidle == 0))
740 1.6 thorpej panic("pool_get: nidle inconsistent");
741 1.6 thorpej #endif
742 1.6 thorpej pp->pr_nidle--;
743 1.6 thorpej }
744 1.3 pk ph->ph_nmissing++;
745 1.3 pk if (TAILQ_FIRST(&ph->ph_itemlist) == NULL) {
746 1.21 thorpej #ifdef DIAGNOSTIC
747 1.34 thorpej if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) {
748 1.25 thorpej pr_leave(pp);
749 1.21 thorpej simple_unlock(&pp->pr_slock);
750 1.21 thorpej panic("pool_get: %s: nmissing inconsistent",
751 1.21 thorpej pp->pr_wchan);
752 1.21 thorpej }
753 1.21 thorpej #endif
754 1.3 pk /*
755 1.3 pk * Find a new non-empty page header, if any.
756 1.3 pk * Start search from the page head, to increase
757 1.3 pk * the chance for "high water" pages to be freed.
758 1.3 pk *
759 1.21 thorpej * Migrate empty pages to the end of the list. This
760 1.21 thorpej * will speed the update of curpage as pages become
761 1.21 thorpej * idle. Empty pages intermingled with idle pages
762 1.21 thorpej * is no big deal. As soon as a page becomes un-empty,
763 1.21 thorpej * it will move back to the head of the list.
764 1.3 pk */
765 1.3 pk TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
766 1.21 thorpej TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
767 1.21 thorpej for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
768 1.21 thorpej ph = TAILQ_NEXT(ph, ph_pagelist))
769 1.3 pk if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
770 1.3 pk break;
771 1.3 pk
772 1.3 pk pp->pr_curpage = ph;
773 1.1 pk }
774 1.3 pk
775 1.3 pk pp->pr_nget++;
776 1.20 thorpej
777 1.20 thorpej /*
778 1.20 thorpej * If we have a low water mark and we are now below that low
779 1.20 thorpej * water mark, add more items to the pool.
780 1.20 thorpej */
781 1.53 thorpej if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
782 1.20 thorpej /*
783 1.20 thorpej * XXX: Should we log a warning? Should we set up a timeout
784 1.20 thorpej * to try again in a second or so? The latter could break
785 1.20 thorpej * a caller's assumptions about interrupt protection, etc.
786 1.20 thorpej */
787 1.20 thorpej }
788 1.20 thorpej
789 1.25 thorpej pr_leave(pp);
790 1.21 thorpej simple_unlock(&pp->pr_slock);
791 1.1 pk return (v);
792 1.1 pk }
793 1.1 pk
794 1.1 pk /*
795 1.43 thorpej * Internal version of pool_put(). Pool is already locked/entered.
796 1.1 pk */
797 1.43 thorpej static void
798 1.43 thorpej pool_do_put(struct pool *pp, void *v, const char *file, long line)
799 1.1 pk {
800 1.1 pk struct pool_item *pi = v;
801 1.3 pk struct pool_item_header *ph;
802 1.3 pk caddr_t page;
803 1.21 thorpej int s;
804 1.3 pk
805 1.3 pk page = (caddr_t)((u_long)v & pp->pr_pagemask);
806 1.1 pk
807 1.30 thorpej #ifdef DIAGNOSTIC
808 1.34 thorpej if (__predict_false(pp->pr_nout == 0)) {
809 1.30 thorpej printf("pool %s: putting with none out\n",
810 1.30 thorpej pp->pr_wchan);
811 1.30 thorpej panic("pool_put");
812 1.30 thorpej }
813 1.30 thorpej #endif
814 1.3 pk
815 1.3 pk pr_log(pp, v, PRLOG_PUT, file, line);
816 1.3 pk
817 1.34 thorpej if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {
818 1.25 thorpej pr_printlog(pp, NULL, printf);
819 1.3 pk panic("pool_put: %s: page header missing", pp->pr_wchan);
820 1.3 pk }
821 1.28 thorpej
822 1.28 thorpej #ifdef LOCKDEBUG
823 1.28 thorpej /*
824 1.28 thorpej * Check if we're freeing a locked simple lock.
825 1.28 thorpej */
826 1.28 thorpej simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
827 1.28 thorpej #endif
828 1.3 pk
829 1.3 pk /*
830 1.3 pk * Return to item list.
831 1.3 pk */
832 1.2 pk #ifdef DIAGNOSTIC
833 1.3 pk pi->pi_magic = PI_MAGIC;
834 1.3 pk #endif
835 1.32 chs #ifdef DEBUG
836 1.32 chs {
837 1.32 chs int i, *ip = v;
838 1.32 chs
839 1.32 chs for (i = 0; i < pp->pr_size / sizeof(int); i++) {
840 1.32 chs *ip++ = PI_MAGIC;
841 1.32 chs }
842 1.32 chs }
843 1.32 chs #endif
844 1.32 chs
845 1.3 pk TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
846 1.3 pk ph->ph_nmissing--;
847 1.3 pk pp->pr_nput++;
848 1.20 thorpej pp->pr_nitems++;
849 1.20 thorpej pp->pr_nout--;
850 1.3 pk
851 1.3 pk /* Cancel "pool empty" condition if it exists */
852 1.3 pk if (pp->pr_curpage == NULL)
853 1.3 pk pp->pr_curpage = ph;
854 1.3 pk
855 1.3 pk if (pp->pr_flags & PR_WANTED) {
856 1.3 pk pp->pr_flags &= ~PR_WANTED;
857 1.15 pk if (ph->ph_nmissing == 0)
858 1.15 pk pp->pr_nidle++;
859 1.3 pk wakeup((caddr_t)pp);
860 1.3 pk return;
861 1.3 pk }
862 1.3 pk
863 1.3 pk /*
864 1.21 thorpej * If this page is now complete, do one of two things:
865 1.21 thorpej *
866 1.21 thorpej * (1) If we have more pages than the page high water
867 1.21 thorpej * mark, free the page back to the system.
868 1.21 thorpej *
869 1.21 thorpej * (2) Move it to the end of the page list, so that
870 1.21 thorpej * we minimize our chances of fragmenting the
871 1.21 thorpej * pool. Idle pages migrate to the end (along with
872 1.21 thorpej * completely empty pages, so that we find un-empty
873 1.21 thorpej * pages more quickly when we update curpage) of the
874 1.21 thorpej * list so they can be more easily swept up by
875 1.21 thorpej * the pagedaemon when pages are scarce.
876 1.3 pk */
877 1.3 pk if (ph->ph_nmissing == 0) {
878 1.6 thorpej pp->pr_nidle++;
879 1.3 pk if (pp->pr_npages > pp->pr_maxpages) {
880 1.3 pk pr_rmpage(pp, ph);
881 1.3 pk } else {
882 1.3 pk TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
883 1.3 pk TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
884 1.3 pk
885 1.21 thorpej /*
886 1.21 thorpej * Update the timestamp on the page. A page must
887 1.21 thorpej * be idle for some period of time before it can
888 1.21 thorpej * be reclaimed by the pagedaemon. This minimizes
889 1.21 thorpej * ping-pong'ing for memory.
890 1.21 thorpej */
891 1.21 thorpej s = splclock();
892 1.21 thorpej ph->ph_time = mono_time;
893 1.21 thorpej splx(s);
894 1.21 thorpej
895 1.21 thorpej /*
896 1.21 thorpej * Update the current page pointer. Just look for
897 1.21 thorpej * the first page with any free items.
898 1.21 thorpej *
899 1.21 thorpej * XXX: Maybe we want an option to look for the
900 1.21 thorpej * page with the fewest available items, to minimize
901 1.21 thorpej * fragmentation?
902 1.21 thorpej */
903 1.3 pk for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
904 1.3 pk ph = TAILQ_NEXT(ph, ph_pagelist))
905 1.3 pk if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
906 1.3 pk break;
907 1.1 pk
908 1.3 pk pp->pr_curpage = ph;
909 1.1 pk }
910 1.1 pk }
911 1.21 thorpej /*
912 1.21 thorpej * If the page has just become un-empty, move it to the head of
913 1.21 thorpej * the list, and make it the current page. The next allocation
914 1.21 thorpej * will get the item from this page, instead of further fragmenting
915 1.21 thorpej * the pool.
916 1.21 thorpej */
917 1.21 thorpej else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
918 1.21 thorpej TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
919 1.21 thorpej TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
920 1.21 thorpej pp->pr_curpage = ph;
921 1.21 thorpej }
922 1.43 thorpej }
923 1.43 thorpej
924 1.43 thorpej /*
925 1.43 thorpej * Return resource to the pool; must be called at appropriate spl level
926 1.43 thorpej */
927 1.43 thorpej void
928 1.43 thorpej _pool_put(struct pool *pp, void *v, const char *file, long line)
929 1.43 thorpej {
930 1.43 thorpej
931 1.43 thorpej simple_lock(&pp->pr_slock);
932 1.43 thorpej pr_enter(pp, file, line);
933 1.43 thorpej
934 1.43 thorpej pool_do_put(pp, v, file, line);
935 1.21 thorpej
936 1.25 thorpej pr_leave(pp);
937 1.21 thorpej simple_unlock(&pp->pr_slock);
938 1.1 pk }
939 1.1 pk
940 1.1 pk /*
941 1.3 pk * Add a page worth of items to the pool.
942 1.21 thorpej *
943 1.21 thorpej * Note, we must be called with the pool descriptor LOCKED.
944 1.3 pk */
945 1.50 enami static int
946 1.50 enami pool_prime_page(struct pool *pp, caddr_t storage, int flags)
947 1.3 pk {
948 1.3 pk struct pool_item *pi;
949 1.3 pk struct pool_item_header *ph;
950 1.3 pk caddr_t cp = storage;
951 1.3 pk unsigned int align = pp->pr_align;
952 1.3 pk unsigned int ioff = pp->pr_itemoffset;
953 1.27 pk int s, n;
954 1.36 pk
955 1.36 pk if (((u_long)cp & (pp->pr_pagesz - 1)) != 0)
956 1.36 pk panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
957 1.3 pk
958 1.20 thorpej if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
959 1.3 pk ph = (struct pool_item_header *)(cp + pp->pr_phoffset);
960 1.3 pk } else {
961 1.27 pk s = splhigh();
962 1.50 enami ph = pool_get(&phpool, flags);
963 1.27 pk splx(s);
964 1.50 enami if (ph == NULL)
965 1.50 enami return (ENOMEM);
966 1.3 pk LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)],
967 1.3 pk ph, ph_hashlist);
968 1.3 pk }
969 1.3 pk
970 1.3 pk /*
971 1.3 pk * Insert page header.
972 1.3 pk */
973 1.3 pk TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
974 1.3 pk TAILQ_INIT(&ph->ph_itemlist);
975 1.3 pk ph->ph_page = storage;
976 1.3 pk ph->ph_nmissing = 0;
977 1.21 thorpej memset(&ph->ph_time, 0, sizeof(ph->ph_time));
978 1.3 pk
979 1.6 thorpej pp->pr_nidle++;
980 1.6 thorpej
981 1.3 pk /*
982 1.3 pk * Color this page.
983 1.3 pk */
984 1.3 pk cp = (caddr_t)(cp + pp->pr_curcolor);
985 1.3 pk if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
986 1.3 pk pp->pr_curcolor = 0;
987 1.3 pk
988 1.3 pk /*
989 1.3 pk * Adjust storage to apply aligment to `pr_itemoffset' in each item.
990 1.3 pk */
991 1.3 pk if (ioff != 0)
992 1.3 pk cp = (caddr_t)(cp + (align - ioff));
993 1.3 pk
994 1.3 pk /*
995 1.3 pk * Insert remaining chunks on the bucket list.
996 1.3 pk */
997 1.3 pk n = pp->pr_itemsperpage;
998 1.20 thorpej pp->pr_nitems += n;
999 1.3 pk
1000 1.3 pk while (n--) {
1001 1.3 pk pi = (struct pool_item *)cp;
1002 1.3 pk
1003 1.3 pk /* Insert on page list */
1004 1.3 pk TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
1005 1.3 pk #ifdef DIAGNOSTIC
1006 1.3 pk pi->pi_magic = PI_MAGIC;
1007 1.3 pk #endif
1008 1.3 pk cp = (caddr_t)(cp + pp->pr_size);
1009 1.3 pk }
1010 1.3 pk
1011 1.3 pk /*
1012 1.3 pk * If the pool was depleted, point at the new page.
1013 1.3 pk */
1014 1.3 pk if (pp->pr_curpage == NULL)
1015 1.3 pk pp->pr_curpage = ph;
1016 1.3 pk
1017 1.3 pk if (++pp->pr_npages > pp->pr_hiwat)
1018 1.3 pk pp->pr_hiwat = pp->pr_npages;
1019 1.50 enami
1020 1.50 enami return (0);
1021 1.3 pk }
1022 1.3 pk
1023 1.20 thorpej /*
1024 1.52 thorpej * Used by pool_get() when nitems drops below the low water mark. This
1025 1.52 thorpej * is used to catch up nitmes with the low water mark.
1026 1.20 thorpej *
1027 1.21 thorpej * Note 1, we never wait for memory here, we let the caller decide what to do.
1028 1.20 thorpej *
1029 1.20 thorpej * Note 2, this doesn't work with static pools.
1030 1.20 thorpej *
1031 1.20 thorpej * Note 3, we must be called with the pool already locked, and we return
1032 1.20 thorpej * with it locked.
1033 1.20 thorpej */
1034 1.20 thorpej static int
1035 1.42 thorpej pool_catchup(struct pool *pp)
1036 1.20 thorpej {
1037 1.20 thorpej caddr_t cp;
1038 1.20 thorpej int error = 0;
1039 1.20 thorpej
1040 1.20 thorpej if (pp->pr_roflags & PR_STATIC) {
1041 1.20 thorpej /*
1042 1.20 thorpej * We dropped below the low water mark, and this is not a
1043 1.20 thorpej * good thing. Log a warning.
1044 1.21 thorpej *
1045 1.21 thorpej * XXX: rate-limit this?
1046 1.20 thorpej */
1047 1.20 thorpej printf("WARNING: static pool `%s' dropped below low water "
1048 1.20 thorpej "mark\n", pp->pr_wchan);
1049 1.20 thorpej return (0);
1050 1.20 thorpej }
1051 1.20 thorpej
1052 1.21 thorpej while (pp->pr_nitems < pp->pr_minitems) {
1053 1.20 thorpej /*
1054 1.21 thorpej * Call the page back-end allocator for more memory.
1055 1.21 thorpej *
1056 1.21 thorpej * XXX: We never wait, so should we bother unlocking
1057 1.21 thorpej * the pool descriptor?
1058 1.20 thorpej */
1059 1.21 thorpej simple_unlock(&pp->pr_slock);
1060 1.20 thorpej cp = (*pp->pr_alloc)(pp->pr_pagesz, 0, pp->pr_mtype);
1061 1.21 thorpej simple_lock(&pp->pr_slock);
1062 1.34 thorpej if (__predict_false(cp == NULL)) {
1063 1.20 thorpej error = ENOMEM;
1064 1.20 thorpej break;
1065 1.20 thorpej }
1066 1.50 enami if ((error = pool_prime_page(pp, cp, PR_NOWAIT)) != 0) {
1067 1.50 enami (*pp->pr_free)(cp, pp->pr_pagesz, pp->pr_mtype);
1068 1.50 enami break;
1069 1.50 enami }
1070 1.26 thorpej pp->pr_npagealloc++;
1071 1.20 thorpej }
1072 1.20 thorpej
1073 1.20 thorpej return (error);
1074 1.20 thorpej }
1075 1.20 thorpej
1076 1.3 pk void
1077 1.42 thorpej pool_setlowat(struct pool *pp, int n)
1078 1.3 pk {
1079 1.20 thorpej int error;
1080 1.15 pk
1081 1.21 thorpej simple_lock(&pp->pr_slock);
1082 1.21 thorpej
1083 1.3 pk pp->pr_minitems = n;
1084 1.15 pk pp->pr_minpages = (n == 0)
1085 1.15 pk ? 0
1086 1.18 thorpej : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1087 1.20 thorpej
1088 1.20 thorpej /* Make sure we're caught up with the newly-set low water mark. */
1089 1.53 thorpej if (POOL_NEEDS_CATCHUP(pp) && (error = pool_catchup(pp) != 0)) {
1090 1.20 thorpej /*
1091 1.20 thorpej * XXX: Should we log a warning? Should we set up a timeout
1092 1.20 thorpej * to try again in a second or so? The latter could break
1093 1.20 thorpej * a caller's assumptions about interrupt protection, etc.
1094 1.20 thorpej */
1095 1.20 thorpej }
1096 1.21 thorpej
1097 1.21 thorpej simple_unlock(&pp->pr_slock);
1098 1.3 pk }
1099 1.3 pk
1100 1.3 pk void
1101 1.42 thorpej pool_sethiwat(struct pool *pp, int n)
1102 1.3 pk {
1103 1.15 pk
1104 1.21 thorpej simple_lock(&pp->pr_slock);
1105 1.21 thorpej
1106 1.15 pk pp->pr_maxpages = (n == 0)
1107 1.15 pk ? 0
1108 1.18 thorpej : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1109 1.21 thorpej
1110 1.21 thorpej simple_unlock(&pp->pr_slock);
1111 1.3 pk }
1112 1.3 pk
1113 1.20 thorpej void
1114 1.42 thorpej pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1115 1.20 thorpej {
1116 1.20 thorpej
1117 1.21 thorpej simple_lock(&pp->pr_slock);
1118 1.20 thorpej
1119 1.20 thorpej pp->pr_hardlimit = n;
1120 1.20 thorpej pp->pr_hardlimit_warning = warnmess;
1121 1.31 thorpej pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1122 1.31 thorpej pp->pr_hardlimit_warning_last.tv_sec = 0;
1123 1.31 thorpej pp->pr_hardlimit_warning_last.tv_usec = 0;
1124 1.20 thorpej
1125 1.20 thorpej /*
1126 1.21 thorpej * In-line version of pool_sethiwat(), because we don't want to
1127 1.21 thorpej * release the lock.
1128 1.20 thorpej */
1129 1.20 thorpej pp->pr_maxpages = (n == 0)
1130 1.20 thorpej ? 0
1131 1.20 thorpej : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1132 1.21 thorpej
1133 1.21 thorpej simple_unlock(&pp->pr_slock);
1134 1.20 thorpej }
1135 1.3 pk
1136 1.3 pk /*
1137 1.3 pk * Default page allocator.
1138 1.3 pk */
1139 1.3 pk static void *
1140 1.42 thorpej pool_page_alloc(unsigned long sz, int flags, int mtype)
1141 1.3 pk {
1142 1.11 thorpej boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
1143 1.3 pk
1144 1.11 thorpej return ((void *)uvm_km_alloc_poolpage(waitok));
1145 1.3 pk }
1146 1.3 pk
1147 1.3 pk static void
1148 1.42 thorpej pool_page_free(void *v, unsigned long sz, int mtype)
1149 1.3 pk {
1150 1.3 pk
1151 1.10 eeh uvm_km_free_poolpage((vaddr_t)v);
1152 1.3 pk }
1153 1.12 thorpej
1154 1.12 thorpej /*
1155 1.12 thorpej * Alternate pool page allocator for pools that know they will
1156 1.12 thorpej * never be accessed in interrupt context.
1157 1.12 thorpej */
1158 1.12 thorpej void *
1159 1.42 thorpej pool_page_alloc_nointr(unsigned long sz, int flags, int mtype)
1160 1.12 thorpej {
1161 1.12 thorpej boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
1162 1.12 thorpej
1163 1.12 thorpej return ((void *)uvm_km_alloc_poolpage1(kernel_map, uvm.kernel_object,
1164 1.12 thorpej waitok));
1165 1.12 thorpej }
1166 1.12 thorpej
1167 1.12 thorpej void
1168 1.42 thorpej pool_page_free_nointr(void *v, unsigned long sz, int mtype)
1169 1.12 thorpej {
1170 1.12 thorpej
1171 1.12 thorpej uvm_km_free_poolpage1(kernel_map, (vaddr_t)v);
1172 1.12 thorpej }
1173 1.12 thorpej
1174 1.3 pk
1175 1.3 pk /*
1176 1.3 pk * Release all complete pages that have not been used recently.
1177 1.3 pk */
1178 1.3 pk void
1179 1.42 thorpej _pool_reclaim(struct pool *pp, const char *file, long line)
1180 1.3 pk {
1181 1.3 pk struct pool_item_header *ph, *phnext;
1182 1.43 thorpej struct pool_cache *pc;
1183 1.21 thorpej struct timeval curtime;
1184 1.21 thorpej int s;
1185 1.3 pk
1186 1.20 thorpej if (pp->pr_roflags & PR_STATIC)
1187 1.3 pk return;
1188 1.3 pk
1189 1.21 thorpej if (simple_lock_try(&pp->pr_slock) == 0)
1190 1.3 pk return;
1191 1.25 thorpej pr_enter(pp, file, line);
1192 1.3 pk
1193 1.43 thorpej /*
1194 1.43 thorpej * Reclaim items from the pool's caches.
1195 1.43 thorpej */
1196 1.43 thorpej for (pc = TAILQ_FIRST(&pp->pr_cachelist); pc != NULL;
1197 1.43 thorpej pc = TAILQ_NEXT(pc, pc_poollist))
1198 1.43 thorpej pool_cache_reclaim(pc);
1199 1.43 thorpej
1200 1.21 thorpej s = splclock();
1201 1.21 thorpej curtime = mono_time;
1202 1.21 thorpej splx(s);
1203 1.21 thorpej
1204 1.3 pk for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; ph = phnext) {
1205 1.3 pk phnext = TAILQ_NEXT(ph, ph_pagelist);
1206 1.3 pk
1207 1.3 pk /* Check our minimum page claim */
1208 1.3 pk if (pp->pr_npages <= pp->pr_minpages)
1209 1.3 pk break;
1210 1.3 pk
1211 1.3 pk if (ph->ph_nmissing == 0) {
1212 1.3 pk struct timeval diff;
1213 1.3 pk timersub(&curtime, &ph->ph_time, &diff);
1214 1.3 pk if (diff.tv_sec < pool_inactive_time)
1215 1.3 pk continue;
1216 1.21 thorpej
1217 1.21 thorpej /*
1218 1.21 thorpej * If freeing this page would put us below
1219 1.21 thorpej * the low water mark, stop now.
1220 1.21 thorpej */
1221 1.21 thorpej if ((pp->pr_nitems - pp->pr_itemsperpage) <
1222 1.21 thorpej pp->pr_minitems)
1223 1.21 thorpej break;
1224 1.21 thorpej
1225 1.3 pk pr_rmpage(pp, ph);
1226 1.3 pk }
1227 1.3 pk }
1228 1.3 pk
1229 1.25 thorpej pr_leave(pp);
1230 1.21 thorpej simple_unlock(&pp->pr_slock);
1231 1.3 pk }
1232 1.3 pk
1233 1.3 pk
1234 1.3 pk /*
1235 1.3 pk * Drain pools, one at a time.
1236 1.21 thorpej *
1237 1.21 thorpej * Note, we must never be called from an interrupt context.
1238 1.3 pk */
1239 1.3 pk void
1240 1.42 thorpej pool_drain(void *arg)
1241 1.3 pk {
1242 1.3 pk struct pool *pp;
1243 1.23 thorpej int s;
1244 1.3 pk
1245 1.49 thorpej s = splvm();
1246 1.23 thorpej simple_lock(&pool_head_slock);
1247 1.23 thorpej
1248 1.23 thorpej if (drainpp == NULL && (drainpp = TAILQ_FIRST(&pool_head)) == NULL)
1249 1.23 thorpej goto out;
1250 1.3 pk
1251 1.3 pk pp = drainpp;
1252 1.3 pk drainpp = TAILQ_NEXT(pp, pr_poollist);
1253 1.3 pk
1254 1.3 pk pool_reclaim(pp);
1255 1.23 thorpej
1256 1.23 thorpej out:
1257 1.23 thorpej simple_unlock(&pool_head_slock);
1258 1.3 pk splx(s);
1259 1.3 pk }
1260 1.3 pk
1261 1.3 pk
1262 1.3 pk /*
1263 1.3 pk * Diagnostic helpers.
1264 1.3 pk */
1265 1.3 pk void
1266 1.42 thorpej pool_print(struct pool *pp, const char *modif)
1267 1.21 thorpej {
1268 1.21 thorpej int s;
1269 1.21 thorpej
1270 1.49 thorpej s = splvm();
1271 1.25 thorpej if (simple_lock_try(&pp->pr_slock) == 0) {
1272 1.25 thorpej printf("pool %s is locked; try again later\n",
1273 1.25 thorpej pp->pr_wchan);
1274 1.25 thorpej splx(s);
1275 1.25 thorpej return;
1276 1.25 thorpej }
1277 1.25 thorpej pool_print1(pp, modif, printf);
1278 1.21 thorpej simple_unlock(&pp->pr_slock);
1279 1.21 thorpej splx(s);
1280 1.21 thorpej }
1281 1.21 thorpej
1282 1.25 thorpej void
1283 1.42 thorpej pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1284 1.25 thorpej {
1285 1.25 thorpej int didlock = 0;
1286 1.25 thorpej
1287 1.25 thorpej if (pp == NULL) {
1288 1.25 thorpej (*pr)("Must specify a pool to print.\n");
1289 1.25 thorpej return;
1290 1.25 thorpej }
1291 1.25 thorpej
1292 1.25 thorpej /*
1293 1.25 thorpej * Called from DDB; interrupts should be blocked, and all
1294 1.25 thorpej * other processors should be paused. We can skip locking
1295 1.25 thorpej * the pool in this case.
1296 1.25 thorpej *
1297 1.25 thorpej * We do a simple_lock_try() just to print the lock
1298 1.25 thorpej * status, however.
1299 1.25 thorpej */
1300 1.25 thorpej
1301 1.25 thorpej if (simple_lock_try(&pp->pr_slock) == 0)
1302 1.25 thorpej (*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
1303 1.25 thorpej else
1304 1.25 thorpej didlock = 1;
1305 1.25 thorpej
1306 1.25 thorpej pool_print1(pp, modif, pr);
1307 1.25 thorpej
1308 1.25 thorpej if (didlock)
1309 1.25 thorpej simple_unlock(&pp->pr_slock);
1310 1.25 thorpej }
1311 1.25 thorpej
1312 1.21 thorpej static void
1313 1.42 thorpej pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1314 1.3 pk {
1315 1.25 thorpej struct pool_item_header *ph;
1316 1.44 thorpej struct pool_cache *pc;
1317 1.44 thorpej struct pool_cache_group *pcg;
1318 1.25 thorpej #ifdef DIAGNOSTIC
1319 1.25 thorpej struct pool_item *pi;
1320 1.25 thorpej #endif
1321 1.44 thorpej int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1322 1.25 thorpej char c;
1323 1.25 thorpej
1324 1.25 thorpej while ((c = *modif++) != '\0') {
1325 1.25 thorpej if (c == 'l')
1326 1.25 thorpej print_log = 1;
1327 1.25 thorpej if (c == 'p')
1328 1.25 thorpej print_pagelist = 1;
1329 1.44 thorpej if (c == 'c')
1330 1.44 thorpej print_cache = 1;
1331 1.25 thorpej modif++;
1332 1.25 thorpej }
1333 1.25 thorpej
1334 1.25 thorpej (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1335 1.25 thorpej pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1336 1.25 thorpej pp->pr_roflags);
1337 1.25 thorpej (*pr)("\tpagesz %u, mtype %d\n", pp->pr_pagesz, pp->pr_mtype);
1338 1.25 thorpej (*pr)("\talloc %p, release %p\n", pp->pr_alloc, pp->pr_free);
1339 1.25 thorpej (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1340 1.25 thorpej pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1341 1.25 thorpej (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1342 1.25 thorpej pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1343 1.25 thorpej
1344 1.25 thorpej (*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
1345 1.25 thorpej pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1346 1.25 thorpej (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1347 1.25 thorpej pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1348 1.25 thorpej
1349 1.25 thorpej if (print_pagelist == 0)
1350 1.25 thorpej goto skip_pagelist;
1351 1.25 thorpej
1352 1.25 thorpej if ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL)
1353 1.25 thorpej (*pr)("\n\tpage list:\n");
1354 1.25 thorpej for (; ph != NULL; ph = TAILQ_NEXT(ph, ph_pagelist)) {
1355 1.25 thorpej (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
1356 1.25 thorpej ph->ph_page, ph->ph_nmissing,
1357 1.25 thorpej (u_long)ph->ph_time.tv_sec,
1358 1.25 thorpej (u_long)ph->ph_time.tv_usec);
1359 1.25 thorpej #ifdef DIAGNOSTIC
1360 1.25 thorpej for (pi = TAILQ_FIRST(&ph->ph_itemlist); pi != NULL;
1361 1.25 thorpej pi = TAILQ_NEXT(pi, pi_list)) {
1362 1.25 thorpej if (pi->pi_magic != PI_MAGIC) {
1363 1.25 thorpej (*pr)("\t\t\titem %p, magic 0x%x\n",
1364 1.25 thorpej pi, pi->pi_magic);
1365 1.25 thorpej }
1366 1.25 thorpej }
1367 1.25 thorpej #endif
1368 1.25 thorpej }
1369 1.25 thorpej if (pp->pr_curpage == NULL)
1370 1.25 thorpej (*pr)("\tno current page\n");
1371 1.25 thorpej else
1372 1.25 thorpej (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1373 1.25 thorpej
1374 1.25 thorpej skip_pagelist:
1375 1.25 thorpej
1376 1.25 thorpej if (print_log == 0)
1377 1.25 thorpej goto skip_log;
1378 1.25 thorpej
1379 1.25 thorpej (*pr)("\n");
1380 1.25 thorpej if ((pp->pr_roflags & PR_LOGGING) == 0)
1381 1.25 thorpej (*pr)("\tno log\n");
1382 1.25 thorpej else
1383 1.25 thorpej pr_printlog(pp, NULL, pr);
1384 1.3 pk
1385 1.25 thorpej skip_log:
1386 1.44 thorpej
1387 1.44 thorpej if (print_cache == 0)
1388 1.44 thorpej goto skip_cache;
1389 1.44 thorpej
1390 1.44 thorpej for (pc = TAILQ_FIRST(&pp->pr_cachelist); pc != NULL;
1391 1.44 thorpej pc = TAILQ_NEXT(pc, pc_poollist)) {
1392 1.44 thorpej (*pr)("\tcache %p: allocfrom %p freeto %p\n", pc,
1393 1.44 thorpej pc->pc_allocfrom, pc->pc_freeto);
1394 1.48 thorpej (*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n",
1395 1.48 thorpej pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
1396 1.44 thorpej for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
1397 1.44 thorpej pcg = TAILQ_NEXT(pcg, pcg_list)) {
1398 1.44 thorpej (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);
1399 1.44 thorpej for (i = 0; i < PCG_NOBJECTS; i++)
1400 1.44 thorpej (*pr)("\t\t\t%p\n", pcg->pcg_objects[i]);
1401 1.44 thorpej }
1402 1.44 thorpej }
1403 1.44 thorpej
1404 1.44 thorpej skip_cache:
1405 1.3 pk
1406 1.25 thorpej pr_enter_check(pp, pr);
1407 1.3 pk }
1408 1.3 pk
1409 1.3 pk int
1410 1.42 thorpej pool_chk(struct pool *pp, const char *label)
1411 1.3 pk {
1412 1.3 pk struct pool_item_header *ph;
1413 1.3 pk int r = 0;
1414 1.3 pk
1415 1.21 thorpej simple_lock(&pp->pr_slock);
1416 1.3 pk
1417 1.3 pk for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
1418 1.3 pk ph = TAILQ_NEXT(ph, ph_pagelist)) {
1419 1.3 pk
1420 1.3 pk struct pool_item *pi;
1421 1.3 pk int n;
1422 1.3 pk caddr_t page;
1423 1.3 pk
1424 1.3 pk page = (caddr_t)((u_long)ph & pp->pr_pagemask);
1425 1.20 thorpej if (page != ph->ph_page &&
1426 1.20 thorpej (pp->pr_roflags & PR_PHINPAGE) != 0) {
1427 1.3 pk if (label != NULL)
1428 1.3 pk printf("%s: ", label);
1429 1.16 briggs printf("pool(%p:%s): page inconsistency: page %p;"
1430 1.16 briggs " at page head addr %p (p %p)\n", pp,
1431 1.3 pk pp->pr_wchan, ph->ph_page,
1432 1.3 pk ph, page);
1433 1.3 pk r++;
1434 1.3 pk goto out;
1435 1.3 pk }
1436 1.3 pk
1437 1.3 pk for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
1438 1.3 pk pi != NULL;
1439 1.3 pk pi = TAILQ_NEXT(pi,pi_list), n++) {
1440 1.3 pk
1441 1.3 pk #ifdef DIAGNOSTIC
1442 1.3 pk if (pi->pi_magic != PI_MAGIC) {
1443 1.3 pk if (label != NULL)
1444 1.3 pk printf("%s: ", label);
1445 1.3 pk printf("pool(%s): free list modified: magic=%x;"
1446 1.3 pk " page %p; item ordinal %d;"
1447 1.3 pk " addr %p (p %p)\n",
1448 1.3 pk pp->pr_wchan, pi->pi_magic, ph->ph_page,
1449 1.3 pk n, pi, page);
1450 1.3 pk panic("pool");
1451 1.3 pk }
1452 1.3 pk #endif
1453 1.3 pk page = (caddr_t)((u_long)pi & pp->pr_pagemask);
1454 1.3 pk if (page == ph->ph_page)
1455 1.3 pk continue;
1456 1.3 pk
1457 1.3 pk if (label != NULL)
1458 1.3 pk printf("%s: ", label);
1459 1.16 briggs printf("pool(%p:%s): page inconsistency: page %p;"
1460 1.16 briggs " item ordinal %d; addr %p (p %p)\n", pp,
1461 1.3 pk pp->pr_wchan, ph->ph_page,
1462 1.3 pk n, pi, page);
1463 1.3 pk r++;
1464 1.3 pk goto out;
1465 1.3 pk }
1466 1.3 pk }
1467 1.3 pk out:
1468 1.21 thorpej simple_unlock(&pp->pr_slock);
1469 1.3 pk return (r);
1470 1.43 thorpej }
1471 1.43 thorpej
1472 1.43 thorpej /*
1473 1.43 thorpej * pool_cache_init:
1474 1.43 thorpej *
1475 1.43 thorpej * Initialize a pool cache.
1476 1.43 thorpej *
1477 1.43 thorpej * NOTE: If the pool must be protected from interrupts, we expect
1478 1.43 thorpej * to be called at the appropriate interrupt priority level.
1479 1.43 thorpej */
1480 1.43 thorpej void
1481 1.43 thorpej pool_cache_init(struct pool_cache *pc, struct pool *pp,
1482 1.43 thorpej int (*ctor)(void *, void *, int),
1483 1.43 thorpej void (*dtor)(void *, void *),
1484 1.43 thorpej void *arg)
1485 1.43 thorpej {
1486 1.43 thorpej
1487 1.43 thorpej TAILQ_INIT(&pc->pc_grouplist);
1488 1.43 thorpej simple_lock_init(&pc->pc_slock);
1489 1.43 thorpej
1490 1.43 thorpej pc->pc_allocfrom = NULL;
1491 1.43 thorpej pc->pc_freeto = NULL;
1492 1.43 thorpej pc->pc_pool = pp;
1493 1.43 thorpej
1494 1.43 thorpej pc->pc_ctor = ctor;
1495 1.43 thorpej pc->pc_dtor = dtor;
1496 1.43 thorpej pc->pc_arg = arg;
1497 1.43 thorpej
1498 1.48 thorpej pc->pc_hits = 0;
1499 1.48 thorpej pc->pc_misses = 0;
1500 1.48 thorpej
1501 1.48 thorpej pc->pc_ngroups = 0;
1502 1.48 thorpej
1503 1.48 thorpej pc->pc_nitems = 0;
1504 1.48 thorpej
1505 1.43 thorpej simple_lock(&pp->pr_slock);
1506 1.43 thorpej TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist);
1507 1.43 thorpej simple_unlock(&pp->pr_slock);
1508 1.43 thorpej }
1509 1.43 thorpej
1510 1.43 thorpej /*
1511 1.43 thorpej * pool_cache_destroy:
1512 1.43 thorpej *
1513 1.43 thorpej * Destroy a pool cache.
1514 1.43 thorpej */
1515 1.43 thorpej void
1516 1.43 thorpej pool_cache_destroy(struct pool_cache *pc)
1517 1.43 thorpej {
1518 1.43 thorpej struct pool *pp = pc->pc_pool;
1519 1.43 thorpej
1520 1.43 thorpej /* First, invalidate the entire cache. */
1521 1.43 thorpej pool_cache_invalidate(pc);
1522 1.43 thorpej
1523 1.43 thorpej /* ...and remove it from the pool's cache list. */
1524 1.43 thorpej simple_lock(&pp->pr_slock);
1525 1.43 thorpej TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist);
1526 1.43 thorpej simple_unlock(&pp->pr_slock);
1527 1.43 thorpej }
1528 1.43 thorpej
1529 1.43 thorpej static __inline void *
1530 1.43 thorpej pcg_get(struct pool_cache_group *pcg)
1531 1.43 thorpej {
1532 1.43 thorpej void *object;
1533 1.43 thorpej u_int idx;
1534 1.43 thorpej
1535 1.43 thorpej KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
1536 1.45 thorpej KASSERT(pcg->pcg_avail != 0);
1537 1.43 thorpej idx = --pcg->pcg_avail;
1538 1.43 thorpej
1539 1.43 thorpej KASSERT(pcg->pcg_objects[idx] != NULL);
1540 1.43 thorpej object = pcg->pcg_objects[idx];
1541 1.43 thorpej pcg->pcg_objects[idx] = NULL;
1542 1.43 thorpej
1543 1.43 thorpej return (object);
1544 1.43 thorpej }
1545 1.43 thorpej
1546 1.43 thorpej static __inline void
1547 1.43 thorpej pcg_put(struct pool_cache_group *pcg, void *object)
1548 1.43 thorpej {
1549 1.43 thorpej u_int idx;
1550 1.43 thorpej
1551 1.43 thorpej KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
1552 1.43 thorpej idx = pcg->pcg_avail++;
1553 1.43 thorpej
1554 1.43 thorpej KASSERT(pcg->pcg_objects[idx] == NULL);
1555 1.43 thorpej pcg->pcg_objects[idx] = object;
1556 1.43 thorpej }
1557 1.43 thorpej
1558 1.43 thorpej /*
1559 1.43 thorpej * pool_cache_get:
1560 1.43 thorpej *
1561 1.43 thorpej * Get an object from a pool cache.
1562 1.43 thorpej */
1563 1.43 thorpej void *
1564 1.43 thorpej pool_cache_get(struct pool_cache *pc, int flags)
1565 1.43 thorpej {
1566 1.43 thorpej struct pool_cache_group *pcg;
1567 1.43 thorpej void *object;
1568 1.43 thorpej
1569 1.43 thorpej simple_lock(&pc->pc_slock);
1570 1.43 thorpej
1571 1.43 thorpej if ((pcg = pc->pc_allocfrom) == NULL) {
1572 1.43 thorpej for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
1573 1.43 thorpej pcg = TAILQ_NEXT(pcg, pcg_list)) {
1574 1.43 thorpej if (pcg->pcg_avail != 0) {
1575 1.43 thorpej pc->pc_allocfrom = pcg;
1576 1.43 thorpej goto have_group;
1577 1.43 thorpej }
1578 1.43 thorpej }
1579 1.43 thorpej
1580 1.43 thorpej /*
1581 1.43 thorpej * No groups with any available objects. Allocate
1582 1.43 thorpej * a new object, construct it, and return it to
1583 1.43 thorpej * the caller. We will allocate a group, if necessary,
1584 1.43 thorpej * when the object is freed back to the cache.
1585 1.43 thorpej */
1586 1.48 thorpej pc->pc_misses++;
1587 1.43 thorpej simple_unlock(&pc->pc_slock);
1588 1.43 thorpej object = pool_get(pc->pc_pool, flags);
1589 1.43 thorpej if (object != NULL && pc->pc_ctor != NULL) {
1590 1.43 thorpej if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
1591 1.43 thorpej pool_put(pc->pc_pool, object);
1592 1.43 thorpej return (NULL);
1593 1.43 thorpej }
1594 1.43 thorpej }
1595 1.43 thorpej return (object);
1596 1.43 thorpej }
1597 1.43 thorpej
1598 1.43 thorpej have_group:
1599 1.48 thorpej pc->pc_hits++;
1600 1.48 thorpej pc->pc_nitems--;
1601 1.43 thorpej object = pcg_get(pcg);
1602 1.43 thorpej
1603 1.43 thorpej if (pcg->pcg_avail == 0)
1604 1.43 thorpej pc->pc_allocfrom = NULL;
1605 1.45 thorpej
1606 1.43 thorpej simple_unlock(&pc->pc_slock);
1607 1.43 thorpej
1608 1.43 thorpej return (object);
1609 1.43 thorpej }
1610 1.43 thorpej
1611 1.43 thorpej /*
1612 1.43 thorpej * pool_cache_put:
1613 1.43 thorpej *
1614 1.43 thorpej * Put an object back to the pool cache.
1615 1.43 thorpej */
1616 1.43 thorpej void
1617 1.43 thorpej pool_cache_put(struct pool_cache *pc, void *object)
1618 1.43 thorpej {
1619 1.43 thorpej struct pool_cache_group *pcg;
1620 1.43 thorpej
1621 1.43 thorpej simple_lock(&pc->pc_slock);
1622 1.43 thorpej
1623 1.43 thorpej if ((pcg = pc->pc_freeto) == NULL) {
1624 1.43 thorpej for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
1625 1.43 thorpej pcg = TAILQ_NEXT(pcg, pcg_list)) {
1626 1.43 thorpej if (pcg->pcg_avail != PCG_NOBJECTS) {
1627 1.43 thorpej pc->pc_freeto = pcg;
1628 1.43 thorpej goto have_group;
1629 1.43 thorpej }
1630 1.43 thorpej }
1631 1.43 thorpej
1632 1.43 thorpej /*
1633 1.43 thorpej * No empty groups to free the object to. Attempt to
1634 1.47 thorpej * allocate one.
1635 1.43 thorpej */
1636 1.47 thorpej simple_unlock(&pc->pc_slock);
1637 1.43 thorpej pcg = pool_get(&pcgpool, PR_NOWAIT);
1638 1.43 thorpej if (pcg != NULL) {
1639 1.43 thorpej memset(pcg, 0, sizeof(*pcg));
1640 1.47 thorpej simple_lock(&pc->pc_slock);
1641 1.48 thorpej pc->pc_ngroups++;
1642 1.43 thorpej TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list);
1643 1.47 thorpej if (pc->pc_freeto == NULL)
1644 1.47 thorpej pc->pc_freeto = pcg;
1645 1.43 thorpej goto have_group;
1646 1.43 thorpej }
1647 1.43 thorpej
1648 1.43 thorpej /*
1649 1.43 thorpej * Unable to allocate a cache group; destruct the object
1650 1.43 thorpej * and free it back to the pool.
1651 1.43 thorpej */
1652 1.51 thorpej pool_cache_destruct_object(pc, object);
1653 1.43 thorpej return;
1654 1.43 thorpej }
1655 1.43 thorpej
1656 1.43 thorpej have_group:
1657 1.48 thorpej pc->pc_nitems++;
1658 1.43 thorpej pcg_put(pcg, object);
1659 1.43 thorpej
1660 1.43 thorpej if (pcg->pcg_avail == PCG_NOBJECTS)
1661 1.43 thorpej pc->pc_freeto = NULL;
1662 1.43 thorpej
1663 1.43 thorpej simple_unlock(&pc->pc_slock);
1664 1.51 thorpej }
1665 1.51 thorpej
1666 1.51 thorpej /*
1667 1.51 thorpej * pool_cache_destruct_object:
1668 1.51 thorpej *
1669 1.51 thorpej * Force destruction of an object and its release back into
1670 1.51 thorpej * the pool.
1671 1.51 thorpej */
1672 1.51 thorpej void
1673 1.51 thorpej pool_cache_destruct_object(struct pool_cache *pc, void *object)
1674 1.51 thorpej {
1675 1.51 thorpej
1676 1.51 thorpej if (pc->pc_dtor != NULL)
1677 1.51 thorpej (*pc->pc_dtor)(pc->pc_arg, object);
1678 1.51 thorpej pool_put(pc->pc_pool, object);
1679 1.43 thorpej }
1680 1.43 thorpej
1681 1.43 thorpej /*
1682 1.43 thorpej * pool_cache_do_invalidate:
1683 1.43 thorpej *
1684 1.43 thorpej * This internal function implements pool_cache_invalidate() and
1685 1.43 thorpej * pool_cache_reclaim().
1686 1.43 thorpej */
1687 1.43 thorpej static void
1688 1.43 thorpej pool_cache_do_invalidate(struct pool_cache *pc, int free_groups,
1689 1.43 thorpej void (*putit)(struct pool *, void *, const char *, long))
1690 1.43 thorpej {
1691 1.43 thorpej struct pool_cache_group *pcg, *npcg;
1692 1.43 thorpej void *object;
1693 1.43 thorpej
1694 1.43 thorpej for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
1695 1.43 thorpej pcg = npcg) {
1696 1.43 thorpej npcg = TAILQ_NEXT(pcg, pcg_list);
1697 1.43 thorpej while (pcg->pcg_avail != 0) {
1698 1.48 thorpej pc->pc_nitems--;
1699 1.43 thorpej object = pcg_get(pcg);
1700 1.45 thorpej if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg)
1701 1.45 thorpej pc->pc_allocfrom = NULL;
1702 1.43 thorpej if (pc->pc_dtor != NULL)
1703 1.43 thorpej (*pc->pc_dtor)(pc->pc_arg, object);
1704 1.43 thorpej (*putit)(pc->pc_pool, object, __FILE__, __LINE__);
1705 1.43 thorpej }
1706 1.43 thorpej if (free_groups) {
1707 1.48 thorpej pc->pc_ngroups--;
1708 1.43 thorpej TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list);
1709 1.46 thorpej if (pc->pc_freeto == pcg)
1710 1.46 thorpej pc->pc_freeto = NULL;
1711 1.43 thorpej pool_put(&pcgpool, pcg);
1712 1.43 thorpej }
1713 1.43 thorpej }
1714 1.43 thorpej }
1715 1.43 thorpej
1716 1.43 thorpej /*
1717 1.43 thorpej * pool_cache_invalidate:
1718 1.43 thorpej *
1719 1.43 thorpej * Invalidate a pool cache (destruct and release all of the
1720 1.43 thorpej * cached objects).
1721 1.43 thorpej */
1722 1.43 thorpej void
1723 1.43 thorpej pool_cache_invalidate(struct pool_cache *pc)
1724 1.43 thorpej {
1725 1.43 thorpej
1726 1.43 thorpej simple_lock(&pc->pc_slock);
1727 1.43 thorpej pool_cache_do_invalidate(pc, 0, _pool_put);
1728 1.43 thorpej simple_unlock(&pc->pc_slock);
1729 1.43 thorpej }
1730 1.43 thorpej
1731 1.43 thorpej /*
1732 1.43 thorpej * pool_cache_reclaim:
1733 1.43 thorpej *
1734 1.43 thorpej * Reclaim a pool cache for pool_reclaim().
1735 1.43 thorpej */
1736 1.43 thorpej static void
1737 1.43 thorpej pool_cache_reclaim(struct pool_cache *pc)
1738 1.43 thorpej {
1739 1.43 thorpej
1740 1.47 thorpej simple_lock(&pc->pc_slock);
1741 1.43 thorpej pool_cache_do_invalidate(pc, 1, pool_do_put);
1742 1.43 thorpej simple_unlock(&pc->pc_slock);
1743 1.3 pk }
1744