subr_pool.c revision 1.52 1 1.52 thorpej /* $NetBSD: subr_pool.c,v 1.52 2001/05/09 23:46:03 thorpej Exp $ */
2 1.1 pk
3 1.1 pk /*-
4 1.43 thorpej * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
5 1.1 pk * All rights reserved.
6 1.1 pk *
7 1.1 pk * This code is derived from software contributed to The NetBSD Foundation
8 1.20 thorpej * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
9 1.20 thorpej * Simulation Facility, NASA Ames Research Center.
10 1.1 pk *
11 1.1 pk * Redistribution and use in source and binary forms, with or without
12 1.1 pk * modification, are permitted provided that the following conditions
13 1.1 pk * are met:
14 1.1 pk * 1. Redistributions of source code must retain the above copyright
15 1.1 pk * notice, this list of conditions and the following disclaimer.
16 1.1 pk * 2. Redistributions in binary form must reproduce the above copyright
17 1.1 pk * notice, this list of conditions and the following disclaimer in the
18 1.1 pk * documentation and/or other materials provided with the distribution.
19 1.1 pk * 3. All advertising materials mentioning features or use of this software
20 1.1 pk * must display the following acknowledgement:
21 1.13 christos * This product includes software developed by the NetBSD
22 1.13 christos * Foundation, Inc. and its contributors.
23 1.1 pk * 4. Neither the name of The NetBSD Foundation nor the names of its
24 1.1 pk * contributors may be used to endorse or promote products derived
25 1.1 pk * from this software without specific prior written permission.
26 1.1 pk *
27 1.1 pk * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 1.1 pk * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 1.1 pk * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 1.1 pk * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 1.1 pk * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 1.1 pk * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 1.1 pk * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 1.1 pk * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 1.1 pk * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 1.1 pk * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 1.1 pk * POSSIBILITY OF SUCH DAMAGE.
38 1.1 pk */
39 1.24 scottr
40 1.25 thorpej #include "opt_pool.h"
41 1.24 scottr #include "opt_poollog.h"
42 1.28 thorpej #include "opt_lockdebug.h"
43 1.1 pk
44 1.1 pk #include <sys/param.h>
45 1.1 pk #include <sys/systm.h>
46 1.1 pk #include <sys/proc.h>
47 1.1 pk #include <sys/errno.h>
48 1.1 pk #include <sys/kernel.h>
49 1.1 pk #include <sys/malloc.h>
50 1.1 pk #include <sys/lock.h>
51 1.1 pk #include <sys/pool.h>
52 1.20 thorpej #include <sys/syslog.h>
53 1.3 pk
54 1.3 pk #include <uvm/uvm.h>
55 1.3 pk
56 1.1 pk /*
57 1.1 pk * Pool resource management utility.
58 1.3 pk *
59 1.3 pk * Memory is allocated in pages which are split into pieces according
60 1.3 pk * to the pool item size. Each page is kept on a list headed by `pr_pagelist'
61 1.3 pk * in the pool structure and the individual pool items are on a linked list
62 1.3 pk * headed by `ph_itemlist' in each page header. The memory for building
63 1.3 pk * the page list is either taken from the allocated pages themselves (for
64 1.3 pk * small pool items) or taken from an internal pool of page headers (`phpool').
65 1.1 pk */
66 1.1 pk
67 1.3 pk /* List of all pools */
68 1.5 thorpej TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
69 1.3 pk
70 1.3 pk /* Private pool for page header structures */
71 1.3 pk static struct pool phpool;
72 1.3 pk
73 1.3 pk /* # of seconds to retain page after last use */
74 1.3 pk int pool_inactive_time = 10;
75 1.3 pk
76 1.3 pk /* Next candidate for drainage (see pool_drain()) */
77 1.23 thorpej static struct pool *drainpp;
78 1.23 thorpej
79 1.23 thorpej /* This spin lock protects both pool_head and drainpp. */
80 1.23 thorpej struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
81 1.3 pk
82 1.3 pk struct pool_item_header {
83 1.3 pk /* Page headers */
84 1.3 pk TAILQ_ENTRY(pool_item_header)
85 1.3 pk ph_pagelist; /* pool page list */
86 1.3 pk TAILQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */
87 1.3 pk LIST_ENTRY(pool_item_header)
88 1.3 pk ph_hashlist; /* Off-page page headers */
89 1.3 pk int ph_nmissing; /* # of chunks in use */
90 1.3 pk caddr_t ph_page; /* this page's address */
91 1.3 pk struct timeval ph_time; /* last referenced */
92 1.3 pk };
93 1.3 pk
94 1.1 pk struct pool_item {
95 1.3 pk #ifdef DIAGNOSTIC
96 1.3 pk int pi_magic;
97 1.33 chs #endif
98 1.25 thorpej #define PI_MAGIC 0xdeadbeef
99 1.3 pk /* Other entries use only this list entry */
100 1.3 pk TAILQ_ENTRY(pool_item) pi_list;
101 1.3 pk };
102 1.3 pk
103 1.25 thorpej #define PR_HASH_INDEX(pp,addr) \
104 1.3 pk (((u_long)(addr) >> (pp)->pr_pageshift) & (PR_HASHTABSIZE - 1))
105 1.3 pk
106 1.43 thorpej /*
107 1.43 thorpej * Pool cache management.
108 1.43 thorpej *
109 1.43 thorpej * Pool caches provide a way for constructed objects to be cached by the
110 1.43 thorpej * pool subsystem. This can lead to performance improvements by avoiding
111 1.43 thorpej * needless object construction/destruction; it is deferred until absolutely
112 1.43 thorpej * necessary.
113 1.43 thorpej *
114 1.43 thorpej * Caches are grouped into cache groups. Each cache group references
115 1.43 thorpej * up to 16 constructed objects. When a cache allocates an object
116 1.43 thorpej * from the pool, it calls the object's constructor and places it into
117 1.43 thorpej * a cache group. When a cache group frees an object back to the pool,
118 1.43 thorpej * it first calls the object's destructor. This allows the object to
119 1.43 thorpej * persist in constructed form while freed to the cache.
120 1.43 thorpej *
121 1.43 thorpej * Multiple caches may exist for each pool. This allows a single
122 1.43 thorpej * object type to have multiple constructed forms. The pool references
123 1.43 thorpej * each cache, so that when a pool is drained by the pagedaemon, it can
124 1.43 thorpej * drain each individual cache as well. Each time a cache is drained,
125 1.43 thorpej * the most idle cache group is freed to the pool in its entirety.
126 1.43 thorpej *
127 1.43 thorpej * Pool caches are layed on top of pools. By layering them, we can avoid
128 1.43 thorpej * the complexity of cache management for pools which would not benefit
129 1.43 thorpej * from it.
130 1.43 thorpej */
131 1.43 thorpej
132 1.43 thorpej /* The cache group pool. */
133 1.43 thorpej static struct pool pcgpool;
134 1.43 thorpej
135 1.43 thorpej /* The pool cache group. */
136 1.43 thorpej #define PCG_NOBJECTS 16
137 1.43 thorpej struct pool_cache_group {
138 1.43 thorpej TAILQ_ENTRY(pool_cache_group)
139 1.43 thorpej pcg_list; /* link in the pool cache's group list */
140 1.43 thorpej u_int pcg_avail; /* # available objects */
141 1.43 thorpej /* pointers to the objects */
142 1.43 thorpej void *pcg_objects[PCG_NOBJECTS];
143 1.43 thorpej };
144 1.3 pk
145 1.43 thorpej static void pool_cache_reclaim(struct pool_cache *);
146 1.3 pk
147 1.42 thorpej static int pool_catchup(struct pool *);
148 1.50 enami static int pool_prime_page(struct pool *, caddr_t, int);
149 1.42 thorpej static void *pool_page_alloc(unsigned long, int, int);
150 1.42 thorpej static void pool_page_free(void *, unsigned long, int);
151 1.3 pk
152 1.42 thorpej static void pool_print1(struct pool *, const char *,
153 1.42 thorpej void (*)(const char *, ...));
154 1.3 pk
155 1.3 pk /*
156 1.52 thorpej * Pool log entry. An array of these is allocated in pool_init().
157 1.3 pk */
158 1.3 pk struct pool_log {
159 1.3 pk const char *pl_file;
160 1.3 pk long pl_line;
161 1.3 pk int pl_action;
162 1.25 thorpej #define PRLOG_GET 1
163 1.25 thorpej #define PRLOG_PUT 2
164 1.3 pk void *pl_addr;
165 1.1 pk };
166 1.1 pk
167 1.3 pk /* Number of entries in pool log buffers */
168 1.17 thorpej #ifndef POOL_LOGSIZE
169 1.17 thorpej #define POOL_LOGSIZE 10
170 1.17 thorpej #endif
171 1.17 thorpej
172 1.17 thorpej int pool_logsize = POOL_LOGSIZE;
173 1.1 pk
174 1.25 thorpej #ifdef DIAGNOSTIC
175 1.42 thorpej static __inline void
176 1.42 thorpej pr_log(struct pool *pp, void *v, int action, const char *file, long line)
177 1.3 pk {
178 1.3 pk int n = pp->pr_curlogentry;
179 1.3 pk struct pool_log *pl;
180 1.3 pk
181 1.20 thorpej if ((pp->pr_roflags & PR_LOGGING) == 0)
182 1.3 pk return;
183 1.3 pk
184 1.3 pk /*
185 1.3 pk * Fill in the current entry. Wrap around and overwrite
186 1.3 pk * the oldest entry if necessary.
187 1.3 pk */
188 1.3 pk pl = &pp->pr_log[n];
189 1.3 pk pl->pl_file = file;
190 1.3 pk pl->pl_line = line;
191 1.3 pk pl->pl_action = action;
192 1.3 pk pl->pl_addr = v;
193 1.3 pk if (++n >= pp->pr_logsize)
194 1.3 pk n = 0;
195 1.3 pk pp->pr_curlogentry = n;
196 1.3 pk }
197 1.3 pk
198 1.3 pk static void
199 1.42 thorpej pr_printlog(struct pool *pp, struct pool_item *pi,
200 1.42 thorpej void (*pr)(const char *, ...))
201 1.3 pk {
202 1.3 pk int i = pp->pr_logsize;
203 1.3 pk int n = pp->pr_curlogentry;
204 1.3 pk
205 1.20 thorpej if ((pp->pr_roflags & PR_LOGGING) == 0)
206 1.3 pk return;
207 1.3 pk
208 1.3 pk /*
209 1.3 pk * Print all entries in this pool's log.
210 1.3 pk */
211 1.3 pk while (i-- > 0) {
212 1.3 pk struct pool_log *pl = &pp->pr_log[n];
213 1.3 pk if (pl->pl_action != 0) {
214 1.25 thorpej if (pi == NULL || pi == pl->pl_addr) {
215 1.25 thorpej (*pr)("\tlog entry %d:\n", i);
216 1.25 thorpej (*pr)("\t\taction = %s, addr = %p\n",
217 1.25 thorpej pl->pl_action == PRLOG_GET ? "get" : "put",
218 1.25 thorpej pl->pl_addr);
219 1.25 thorpej (*pr)("\t\tfile: %s at line %lu\n",
220 1.25 thorpej pl->pl_file, pl->pl_line);
221 1.25 thorpej }
222 1.3 pk }
223 1.3 pk if (++n >= pp->pr_logsize)
224 1.3 pk n = 0;
225 1.3 pk }
226 1.3 pk }
227 1.25 thorpej
228 1.42 thorpej static __inline void
229 1.42 thorpej pr_enter(struct pool *pp, const char *file, long line)
230 1.25 thorpej {
231 1.25 thorpej
232 1.34 thorpej if (__predict_false(pp->pr_entered_file != NULL)) {
233 1.25 thorpej printf("pool %s: reentrancy at file %s line %ld\n",
234 1.25 thorpej pp->pr_wchan, file, line);
235 1.25 thorpej printf(" previous entry at file %s line %ld\n",
236 1.25 thorpej pp->pr_entered_file, pp->pr_entered_line);
237 1.25 thorpej panic("pr_enter");
238 1.25 thorpej }
239 1.25 thorpej
240 1.25 thorpej pp->pr_entered_file = file;
241 1.25 thorpej pp->pr_entered_line = line;
242 1.25 thorpej }
243 1.25 thorpej
244 1.42 thorpej static __inline void
245 1.42 thorpej pr_leave(struct pool *pp)
246 1.25 thorpej {
247 1.25 thorpej
248 1.34 thorpej if (__predict_false(pp->pr_entered_file == NULL)) {
249 1.25 thorpej printf("pool %s not entered?\n", pp->pr_wchan);
250 1.25 thorpej panic("pr_leave");
251 1.25 thorpej }
252 1.25 thorpej
253 1.25 thorpej pp->pr_entered_file = NULL;
254 1.25 thorpej pp->pr_entered_line = 0;
255 1.25 thorpej }
256 1.25 thorpej
257 1.42 thorpej static __inline void
258 1.42 thorpej pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
259 1.25 thorpej {
260 1.25 thorpej
261 1.25 thorpej if (pp->pr_entered_file != NULL)
262 1.25 thorpej (*pr)("\n\tcurrently entered from file %s line %ld\n",
263 1.25 thorpej pp->pr_entered_file, pp->pr_entered_line);
264 1.25 thorpej }
265 1.3 pk #else
266 1.25 thorpej #define pr_log(pp, v, action, file, line)
267 1.25 thorpej #define pr_printlog(pp, pi, pr)
268 1.25 thorpej #define pr_enter(pp, file, line)
269 1.25 thorpej #define pr_leave(pp)
270 1.25 thorpej #define pr_enter_check(pp, pr)
271 1.25 thorpej #endif /* DIAGNOSTIC */
272 1.3 pk
273 1.3 pk /*
274 1.3 pk * Return the pool page header based on page address.
275 1.3 pk */
276 1.42 thorpej static __inline struct pool_item_header *
277 1.42 thorpej pr_find_pagehead(struct pool *pp, caddr_t page)
278 1.3 pk {
279 1.3 pk struct pool_item_header *ph;
280 1.3 pk
281 1.20 thorpej if ((pp->pr_roflags & PR_PHINPAGE) != 0)
282 1.3 pk return ((struct pool_item_header *)(page + pp->pr_phoffset));
283 1.3 pk
284 1.3 pk for (ph = LIST_FIRST(&pp->pr_hashtab[PR_HASH_INDEX(pp, page)]);
285 1.3 pk ph != NULL;
286 1.3 pk ph = LIST_NEXT(ph, ph_hashlist)) {
287 1.3 pk if (ph->ph_page == page)
288 1.3 pk return (ph);
289 1.3 pk }
290 1.3 pk return (NULL);
291 1.3 pk }
292 1.3 pk
293 1.3 pk /*
294 1.3 pk * Remove a page from the pool.
295 1.3 pk */
296 1.42 thorpej static __inline void
297 1.42 thorpej pr_rmpage(struct pool *pp, struct pool_item_header *ph)
298 1.3 pk {
299 1.3 pk
300 1.3 pk /*
301 1.7 thorpej * If the page was idle, decrement the idle page count.
302 1.3 pk */
303 1.6 thorpej if (ph->ph_nmissing == 0) {
304 1.6 thorpej #ifdef DIAGNOSTIC
305 1.6 thorpej if (pp->pr_nidle == 0)
306 1.6 thorpej panic("pr_rmpage: nidle inconsistent");
307 1.20 thorpej if (pp->pr_nitems < pp->pr_itemsperpage)
308 1.20 thorpej panic("pr_rmpage: nitems inconsistent");
309 1.6 thorpej #endif
310 1.6 thorpej pp->pr_nidle--;
311 1.6 thorpej }
312 1.7 thorpej
313 1.20 thorpej pp->pr_nitems -= pp->pr_itemsperpage;
314 1.20 thorpej
315 1.7 thorpej /*
316 1.7 thorpej * Unlink a page from the pool and release it.
317 1.7 thorpej */
318 1.7 thorpej TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
319 1.7 thorpej (*pp->pr_free)(ph->ph_page, pp->pr_pagesz, pp->pr_mtype);
320 1.7 thorpej pp->pr_npages--;
321 1.7 thorpej pp->pr_npagefree++;
322 1.6 thorpej
323 1.22 chs if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
324 1.27 pk int s;
325 1.22 chs LIST_REMOVE(ph, ph_hashlist);
326 1.27 pk s = splhigh();
327 1.22 chs pool_put(&phpool, ph);
328 1.27 pk splx(s);
329 1.22 chs }
330 1.22 chs
331 1.3 pk if (pp->pr_curpage == ph) {
332 1.3 pk /*
333 1.3 pk * Find a new non-empty page header, if any.
334 1.3 pk * Start search from the page head, to increase the
335 1.3 pk * chance for "high water" pages to be freed.
336 1.3 pk */
337 1.3 pk for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
338 1.3 pk ph = TAILQ_NEXT(ph, ph_pagelist))
339 1.3 pk if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
340 1.3 pk break;
341 1.3 pk
342 1.3 pk pp->pr_curpage = ph;
343 1.21 thorpej }
344 1.3 pk }
345 1.3 pk
346 1.3 pk /*
347 1.3 pk * Initialize the given pool resource structure.
348 1.3 pk *
349 1.3 pk * We export this routine to allow other kernel parts to declare
350 1.3 pk * static pools that must be initialized before malloc() is available.
351 1.3 pk */
352 1.3 pk void
353 1.42 thorpej pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
354 1.42 thorpej const char *wchan, size_t pagesz,
355 1.42 thorpej void *(*alloc)(unsigned long, int, int),
356 1.42 thorpej void (*release)(void *, unsigned long, int),
357 1.42 thorpej int mtype)
358 1.3 pk {
359 1.16 briggs int off, slack, i;
360 1.3 pk
361 1.25 thorpej #ifdef POOL_DIAGNOSTIC
362 1.25 thorpej /*
363 1.25 thorpej * Always log if POOL_DIAGNOSTIC is defined.
364 1.25 thorpej */
365 1.25 thorpej if (pool_logsize != 0)
366 1.25 thorpej flags |= PR_LOGGING;
367 1.25 thorpej #endif
368 1.25 thorpej
369 1.3 pk /*
370 1.3 pk * Check arguments and construct default values.
371 1.3 pk */
372 1.36 pk if (!powerof2(pagesz))
373 1.3 pk panic("pool_init: page size invalid (%lx)\n", (u_long)pagesz);
374 1.3 pk
375 1.4 thorpej if (alloc == NULL && release == NULL) {
376 1.3 pk alloc = pool_page_alloc;
377 1.3 pk release = pool_page_free;
378 1.4 thorpej pagesz = PAGE_SIZE; /* Rounds to PAGE_SIZE anyhow. */
379 1.4 thorpej } else if ((alloc != NULL && release != NULL) == 0) {
380 1.4 thorpej /* If you specifiy one, must specify both. */
381 1.4 thorpej panic("pool_init: must specify alloc and release together");
382 1.4 thorpej }
383 1.4 thorpej
384 1.3 pk if (pagesz == 0)
385 1.3 pk pagesz = PAGE_SIZE;
386 1.3 pk
387 1.3 pk if (align == 0)
388 1.3 pk align = ALIGN(1);
389 1.14 thorpej
390 1.14 thorpej if (size < sizeof(struct pool_item))
391 1.14 thorpej size = sizeof(struct pool_item);
392 1.3 pk
393 1.35 pk size = ALIGN(size);
394 1.43 thorpej if (size > pagesz)
395 1.35 pk panic("pool_init: pool item size (%lu) too large",
396 1.35 pk (u_long)size);
397 1.35 pk
398 1.3 pk /*
399 1.3 pk * Initialize the pool structure.
400 1.3 pk */
401 1.3 pk TAILQ_INIT(&pp->pr_pagelist);
402 1.43 thorpej TAILQ_INIT(&pp->pr_cachelist);
403 1.3 pk pp->pr_curpage = NULL;
404 1.3 pk pp->pr_npages = 0;
405 1.3 pk pp->pr_minitems = 0;
406 1.3 pk pp->pr_minpages = 0;
407 1.3 pk pp->pr_maxpages = UINT_MAX;
408 1.20 thorpej pp->pr_roflags = flags;
409 1.20 thorpej pp->pr_flags = 0;
410 1.35 pk pp->pr_size = size;
411 1.3 pk pp->pr_align = align;
412 1.3 pk pp->pr_wchan = wchan;
413 1.3 pk pp->pr_mtype = mtype;
414 1.3 pk pp->pr_alloc = alloc;
415 1.3 pk pp->pr_free = release;
416 1.3 pk pp->pr_pagesz = pagesz;
417 1.3 pk pp->pr_pagemask = ~(pagesz - 1);
418 1.3 pk pp->pr_pageshift = ffs(pagesz) - 1;
419 1.20 thorpej pp->pr_nitems = 0;
420 1.20 thorpej pp->pr_nout = 0;
421 1.20 thorpej pp->pr_hardlimit = UINT_MAX;
422 1.20 thorpej pp->pr_hardlimit_warning = NULL;
423 1.31 thorpej pp->pr_hardlimit_ratecap.tv_sec = 0;
424 1.31 thorpej pp->pr_hardlimit_ratecap.tv_usec = 0;
425 1.31 thorpej pp->pr_hardlimit_warning_last.tv_sec = 0;
426 1.31 thorpej pp->pr_hardlimit_warning_last.tv_usec = 0;
427 1.3 pk
428 1.3 pk /*
429 1.3 pk * Decide whether to put the page header off page to avoid
430 1.3 pk * wasting too large a part of the page. Off-page page headers
431 1.3 pk * go on a hash table, so we can match a returned item
432 1.3 pk * with its header based on the page address.
433 1.3 pk * We use 1/16 of the page size as the threshold (XXX: tune)
434 1.3 pk */
435 1.3 pk if (pp->pr_size < pagesz/16) {
436 1.3 pk /* Use the end of the page for the page header */
437 1.20 thorpej pp->pr_roflags |= PR_PHINPAGE;
438 1.3 pk pp->pr_phoffset = off =
439 1.3 pk pagesz - ALIGN(sizeof(struct pool_item_header));
440 1.2 pk } else {
441 1.3 pk /* The page header will be taken from our page header pool */
442 1.3 pk pp->pr_phoffset = 0;
443 1.3 pk off = pagesz;
444 1.16 briggs for (i = 0; i < PR_HASHTABSIZE; i++) {
445 1.16 briggs LIST_INIT(&pp->pr_hashtab[i]);
446 1.16 briggs }
447 1.2 pk }
448 1.1 pk
449 1.3 pk /*
450 1.3 pk * Alignment is to take place at `ioff' within the item. This means
451 1.3 pk * we must reserve up to `align - 1' bytes on the page to allow
452 1.3 pk * appropriate positioning of each item.
453 1.3 pk *
454 1.3 pk * Silently enforce `0 <= ioff < align'.
455 1.3 pk */
456 1.3 pk pp->pr_itemoffset = ioff = ioff % align;
457 1.3 pk pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
458 1.43 thorpej KASSERT(pp->pr_itemsperpage != 0);
459 1.3 pk
460 1.3 pk /*
461 1.3 pk * Use the slack between the chunks and the page header
462 1.3 pk * for "cache coloring".
463 1.3 pk */
464 1.3 pk slack = off - pp->pr_itemsperpage * pp->pr_size;
465 1.3 pk pp->pr_maxcolor = (slack / align) * align;
466 1.3 pk pp->pr_curcolor = 0;
467 1.3 pk
468 1.3 pk pp->pr_nget = 0;
469 1.3 pk pp->pr_nfail = 0;
470 1.3 pk pp->pr_nput = 0;
471 1.3 pk pp->pr_npagealloc = 0;
472 1.3 pk pp->pr_npagefree = 0;
473 1.1 pk pp->pr_hiwat = 0;
474 1.8 thorpej pp->pr_nidle = 0;
475 1.3 pk
476 1.25 thorpej if (flags & PR_LOGGING) {
477 1.25 thorpej if (kmem_map == NULL ||
478 1.25 thorpej (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
479 1.25 thorpej M_TEMP, M_NOWAIT)) == NULL)
480 1.20 thorpej pp->pr_roflags &= ~PR_LOGGING;
481 1.3 pk pp->pr_curlogentry = 0;
482 1.3 pk pp->pr_logsize = pool_logsize;
483 1.3 pk }
484 1.25 thorpej
485 1.25 thorpej pp->pr_entered_file = NULL;
486 1.25 thorpej pp->pr_entered_line = 0;
487 1.3 pk
488 1.21 thorpej simple_lock_init(&pp->pr_slock);
489 1.1 pk
490 1.3 pk /*
491 1.43 thorpej * Initialize private page header pool and cache magazine pool if we
492 1.43 thorpej * haven't done so yet.
493 1.23 thorpej * XXX LOCKING.
494 1.3 pk */
495 1.3 pk if (phpool.pr_size == 0) {
496 1.3 pk pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,
497 1.43 thorpej 0, "phpool", 0, 0, 0, 0);
498 1.43 thorpej pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
499 1.43 thorpej 0, "pcgpool", 0, 0, 0, 0);
500 1.1 pk }
501 1.1 pk
502 1.23 thorpej /* Insert into the list of all pools. */
503 1.23 thorpej simple_lock(&pool_head_slock);
504 1.23 thorpej TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
505 1.23 thorpej simple_unlock(&pool_head_slock);
506 1.1 pk }
507 1.1 pk
508 1.1 pk /*
509 1.1 pk * De-commision a pool resource.
510 1.1 pk */
511 1.1 pk void
512 1.42 thorpej pool_destroy(struct pool *pp)
513 1.1 pk {
514 1.3 pk struct pool_item_header *ph;
515 1.43 thorpej struct pool_cache *pc;
516 1.43 thorpej
517 1.43 thorpej /* Destroy all caches for this pool. */
518 1.43 thorpej while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL)
519 1.43 thorpej pool_cache_destroy(pc);
520 1.3 pk
521 1.3 pk #ifdef DIAGNOSTIC
522 1.20 thorpej if (pp->pr_nout != 0) {
523 1.25 thorpej pr_printlog(pp, NULL, printf);
524 1.20 thorpej panic("pool_destroy: pool busy: still out: %u\n",
525 1.20 thorpej pp->pr_nout);
526 1.3 pk }
527 1.3 pk #endif
528 1.1 pk
529 1.3 pk /* Remove all pages */
530 1.20 thorpej if ((pp->pr_roflags & PR_STATIC) == 0)
531 1.3 pk while ((ph = pp->pr_pagelist.tqh_first) != NULL)
532 1.3 pk pr_rmpage(pp, ph);
533 1.3 pk
534 1.3 pk /* Remove from global pool list */
535 1.23 thorpej simple_lock(&pool_head_slock);
536 1.3 pk TAILQ_REMOVE(&pool_head, pp, pr_poollist);
537 1.23 thorpej /* XXX Only clear this if we were drainpp? */
538 1.3 pk drainpp = NULL;
539 1.23 thorpej simple_unlock(&pool_head_slock);
540 1.3 pk
541 1.20 thorpej if ((pp->pr_roflags & PR_LOGGING) != 0)
542 1.3 pk free(pp->pr_log, M_TEMP);
543 1.2 pk
544 1.20 thorpej if (pp->pr_roflags & PR_FREEHEADER)
545 1.3 pk free(pp, M_POOL);
546 1.1 pk }
547 1.1 pk
548 1.1 pk
549 1.1 pk /*
550 1.3 pk * Grab an item from the pool; must be called at appropriate spl level
551 1.1 pk */
552 1.3 pk void *
553 1.42 thorpej _pool_get(struct pool *pp, int flags, const char *file, long line)
554 1.1 pk {
555 1.1 pk void *v;
556 1.1 pk struct pool_item *pi;
557 1.3 pk struct pool_item_header *ph;
558 1.1 pk
559 1.2 pk #ifdef DIAGNOSTIC
560 1.34 thorpej if (__predict_false((pp->pr_roflags & PR_STATIC) &&
561 1.34 thorpej (flags & PR_MALLOCOK))) {
562 1.25 thorpej pr_printlog(pp, NULL, printf);
563 1.2 pk panic("pool_get: static");
564 1.3 pk }
565 1.2 pk #endif
566 1.2 pk
567 1.37 sommerfe if (__predict_false(curproc == NULL && doing_shutdown == 0 &&
568 1.37 sommerfe (flags & PR_WAITOK) != 0))
569 1.3 pk panic("pool_get: must have NOWAIT");
570 1.1 pk
571 1.21 thorpej simple_lock(&pp->pr_slock);
572 1.25 thorpej pr_enter(pp, file, line);
573 1.20 thorpej
574 1.20 thorpej startover:
575 1.20 thorpej /*
576 1.20 thorpej * Check to see if we've reached the hard limit. If we have,
577 1.20 thorpej * and we can wait, then wait until an item has been returned to
578 1.20 thorpej * the pool.
579 1.20 thorpej */
580 1.20 thorpej #ifdef DIAGNOSTIC
581 1.34 thorpej if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
582 1.25 thorpej pr_leave(pp);
583 1.21 thorpej simple_unlock(&pp->pr_slock);
584 1.20 thorpej panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
585 1.20 thorpej }
586 1.20 thorpej #endif
587 1.34 thorpej if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
588 1.29 sommerfe if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
589 1.20 thorpej /*
590 1.20 thorpej * XXX: A warning isn't logged in this case. Should
591 1.20 thorpej * it be?
592 1.20 thorpej */
593 1.20 thorpej pp->pr_flags |= PR_WANTED;
594 1.25 thorpej pr_leave(pp);
595 1.40 sommerfe ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
596 1.25 thorpej pr_enter(pp, file, line);
597 1.20 thorpej goto startover;
598 1.20 thorpej }
599 1.31 thorpej
600 1.31 thorpej /*
601 1.31 thorpej * Log a message that the hard limit has been hit.
602 1.31 thorpej */
603 1.31 thorpej if (pp->pr_hardlimit_warning != NULL &&
604 1.31 thorpej ratecheck(&pp->pr_hardlimit_warning_last,
605 1.31 thorpej &pp->pr_hardlimit_ratecap))
606 1.31 thorpej log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
607 1.21 thorpej
608 1.21 thorpej if (flags & PR_URGENT)
609 1.21 thorpej panic("pool_get: urgent");
610 1.21 thorpej
611 1.21 thorpej pp->pr_nfail++;
612 1.21 thorpej
613 1.25 thorpej pr_leave(pp);
614 1.21 thorpej simple_unlock(&pp->pr_slock);
615 1.20 thorpej return (NULL);
616 1.20 thorpej }
617 1.20 thorpej
618 1.3 pk /*
619 1.3 pk * The convention we use is that if `curpage' is not NULL, then
620 1.3 pk * it points at a non-empty bucket. In particular, `curpage'
621 1.3 pk * never points at a page header which has PR_PHINPAGE set and
622 1.3 pk * has no items in its bucket.
623 1.3 pk */
624 1.20 thorpej if ((ph = pp->pr_curpage) == NULL) {
625 1.15 pk void *v;
626 1.15 pk
627 1.20 thorpej #ifdef DIAGNOSTIC
628 1.20 thorpej if (pp->pr_nitems != 0) {
629 1.21 thorpej simple_unlock(&pp->pr_slock);
630 1.20 thorpej printf("pool_get: %s: curpage NULL, nitems %u\n",
631 1.20 thorpej pp->pr_wchan, pp->pr_nitems);
632 1.20 thorpej panic("pool_get: nitems inconsistent\n");
633 1.20 thorpej }
634 1.20 thorpej #endif
635 1.20 thorpej
636 1.21 thorpej /*
637 1.21 thorpej * Call the back-end page allocator for more memory.
638 1.21 thorpej * Release the pool lock, as the back-end page allocator
639 1.21 thorpej * may block.
640 1.21 thorpej */
641 1.25 thorpej pr_leave(pp);
642 1.21 thorpej simple_unlock(&pp->pr_slock);
643 1.21 thorpej v = (*pp->pr_alloc)(pp->pr_pagesz, flags, pp->pr_mtype);
644 1.21 thorpej simple_lock(&pp->pr_slock);
645 1.25 thorpej pr_enter(pp, file, line);
646 1.15 pk
647 1.21 thorpej if (v == NULL) {
648 1.21 thorpej /*
649 1.21 thorpej * We were unable to allocate a page, but
650 1.21 thorpej * we released the lock during allocation,
651 1.21 thorpej * so perhaps items were freed back to the
652 1.21 thorpej * pool. Check for this case.
653 1.21 thorpej */
654 1.21 thorpej if (pp->pr_curpage != NULL)
655 1.21 thorpej goto startover;
656 1.15 pk
657 1.3 pk if (flags & PR_URGENT)
658 1.3 pk panic("pool_get: urgent");
659 1.21 thorpej
660 1.3 pk if ((flags & PR_WAITOK) == 0) {
661 1.3 pk pp->pr_nfail++;
662 1.25 thorpej pr_leave(pp);
663 1.21 thorpej simple_unlock(&pp->pr_slock);
664 1.1 pk return (NULL);
665 1.3 pk }
666 1.3 pk
667 1.15 pk /*
668 1.15 pk * Wait for items to be returned to this pool.
669 1.21 thorpej *
670 1.15 pk * XXX: we actually want to wait just until
671 1.15 pk * the page allocator has memory again. Depending
672 1.15 pk * on this pool's usage, we might get stuck here
673 1.15 pk * for a long time.
674 1.20 thorpej *
675 1.20 thorpej * XXX: maybe we should wake up once a second and
676 1.20 thorpej * try again?
677 1.15 pk */
678 1.1 pk pp->pr_flags |= PR_WANTED;
679 1.25 thorpej pr_leave(pp);
680 1.40 sommerfe ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
681 1.25 thorpej pr_enter(pp, file, line);
682 1.20 thorpej goto startover;
683 1.1 pk }
684 1.3 pk
685 1.15 pk /* We have more memory; add it to the pool */
686 1.50 enami if (pool_prime_page(pp, v, flags & PR_WAITOK) != 0) {
687 1.50 enami /*
688 1.50 enami * Probably, we don't allowed to wait and
689 1.50 enami * couldn't allocate a page header.
690 1.50 enami */
691 1.50 enami (*pp->pr_free)(v, pp->pr_pagesz, pp->pr_mtype);
692 1.50 enami pp->pr_nfail++;
693 1.50 enami pr_leave(pp);
694 1.50 enami simple_unlock(&pp->pr_slock);
695 1.50 enami return (NULL);
696 1.50 enami }
697 1.15 pk pp->pr_npagealloc++;
698 1.15 pk
699 1.20 thorpej /* Start the allocation process over. */
700 1.20 thorpej goto startover;
701 1.3 pk }
702 1.3 pk
703 1.34 thorpej if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) {
704 1.25 thorpej pr_leave(pp);
705 1.21 thorpej simple_unlock(&pp->pr_slock);
706 1.3 pk panic("pool_get: %s: page empty", pp->pr_wchan);
707 1.21 thorpej }
708 1.20 thorpej #ifdef DIAGNOSTIC
709 1.34 thorpej if (__predict_false(pp->pr_nitems == 0)) {
710 1.25 thorpej pr_leave(pp);
711 1.21 thorpej simple_unlock(&pp->pr_slock);
712 1.20 thorpej printf("pool_get: %s: items on itemlist, nitems %u\n",
713 1.20 thorpej pp->pr_wchan, pp->pr_nitems);
714 1.20 thorpej panic("pool_get: nitems inconsistent\n");
715 1.20 thorpej }
716 1.20 thorpej #endif
717 1.3 pk pr_log(pp, v, PRLOG_GET, file, line);
718 1.3 pk
719 1.3 pk #ifdef DIAGNOSTIC
720 1.34 thorpej if (__predict_false(pi->pi_magic != PI_MAGIC)) {
721 1.25 thorpej pr_printlog(pp, pi, printf);
722 1.3 pk panic("pool_get(%s): free list modified: magic=%x; page %p;"
723 1.3 pk " item addr %p\n",
724 1.3 pk pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
725 1.3 pk }
726 1.3 pk #endif
727 1.3 pk
728 1.3 pk /*
729 1.3 pk * Remove from item list.
730 1.3 pk */
731 1.3 pk TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
732 1.20 thorpej pp->pr_nitems--;
733 1.20 thorpej pp->pr_nout++;
734 1.6 thorpej if (ph->ph_nmissing == 0) {
735 1.6 thorpej #ifdef DIAGNOSTIC
736 1.34 thorpej if (__predict_false(pp->pr_nidle == 0))
737 1.6 thorpej panic("pool_get: nidle inconsistent");
738 1.6 thorpej #endif
739 1.6 thorpej pp->pr_nidle--;
740 1.6 thorpej }
741 1.3 pk ph->ph_nmissing++;
742 1.3 pk if (TAILQ_FIRST(&ph->ph_itemlist) == NULL) {
743 1.21 thorpej #ifdef DIAGNOSTIC
744 1.34 thorpej if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) {
745 1.25 thorpej pr_leave(pp);
746 1.21 thorpej simple_unlock(&pp->pr_slock);
747 1.21 thorpej panic("pool_get: %s: nmissing inconsistent",
748 1.21 thorpej pp->pr_wchan);
749 1.21 thorpej }
750 1.21 thorpej #endif
751 1.3 pk /*
752 1.3 pk * Find a new non-empty page header, if any.
753 1.3 pk * Start search from the page head, to increase
754 1.3 pk * the chance for "high water" pages to be freed.
755 1.3 pk *
756 1.21 thorpej * Migrate empty pages to the end of the list. This
757 1.21 thorpej * will speed the update of curpage as pages become
758 1.21 thorpej * idle. Empty pages intermingled with idle pages
759 1.21 thorpej * is no big deal. As soon as a page becomes un-empty,
760 1.21 thorpej * it will move back to the head of the list.
761 1.3 pk */
762 1.3 pk TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
763 1.21 thorpej TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
764 1.21 thorpej for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
765 1.21 thorpej ph = TAILQ_NEXT(ph, ph_pagelist))
766 1.3 pk if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
767 1.3 pk break;
768 1.3 pk
769 1.3 pk pp->pr_curpage = ph;
770 1.1 pk }
771 1.3 pk
772 1.3 pk pp->pr_nget++;
773 1.20 thorpej
774 1.20 thorpej /*
775 1.20 thorpej * If we have a low water mark and we are now below that low
776 1.20 thorpej * water mark, add more items to the pool.
777 1.20 thorpej */
778 1.20 thorpej if (pp->pr_nitems < pp->pr_minitems && pool_catchup(pp) != 0) {
779 1.20 thorpej /*
780 1.20 thorpej * XXX: Should we log a warning? Should we set up a timeout
781 1.20 thorpej * to try again in a second or so? The latter could break
782 1.20 thorpej * a caller's assumptions about interrupt protection, etc.
783 1.20 thorpej */
784 1.20 thorpej }
785 1.20 thorpej
786 1.25 thorpej pr_leave(pp);
787 1.21 thorpej simple_unlock(&pp->pr_slock);
788 1.1 pk return (v);
789 1.1 pk }
790 1.1 pk
791 1.1 pk /*
792 1.43 thorpej * Internal version of pool_put(). Pool is already locked/entered.
793 1.1 pk */
794 1.43 thorpej static void
795 1.43 thorpej pool_do_put(struct pool *pp, void *v, const char *file, long line)
796 1.1 pk {
797 1.1 pk struct pool_item *pi = v;
798 1.3 pk struct pool_item_header *ph;
799 1.3 pk caddr_t page;
800 1.21 thorpej int s;
801 1.3 pk
802 1.3 pk page = (caddr_t)((u_long)v & pp->pr_pagemask);
803 1.1 pk
804 1.30 thorpej #ifdef DIAGNOSTIC
805 1.34 thorpej if (__predict_false(pp->pr_nout == 0)) {
806 1.30 thorpej printf("pool %s: putting with none out\n",
807 1.30 thorpej pp->pr_wchan);
808 1.30 thorpej panic("pool_put");
809 1.30 thorpej }
810 1.30 thorpej #endif
811 1.3 pk
812 1.3 pk pr_log(pp, v, PRLOG_PUT, file, line);
813 1.3 pk
814 1.34 thorpej if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {
815 1.25 thorpej pr_printlog(pp, NULL, printf);
816 1.3 pk panic("pool_put: %s: page header missing", pp->pr_wchan);
817 1.3 pk }
818 1.28 thorpej
819 1.28 thorpej #ifdef LOCKDEBUG
820 1.28 thorpej /*
821 1.28 thorpej * Check if we're freeing a locked simple lock.
822 1.28 thorpej */
823 1.28 thorpej simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
824 1.28 thorpej #endif
825 1.3 pk
826 1.3 pk /*
827 1.3 pk * Return to item list.
828 1.3 pk */
829 1.2 pk #ifdef DIAGNOSTIC
830 1.3 pk pi->pi_magic = PI_MAGIC;
831 1.3 pk #endif
832 1.32 chs #ifdef DEBUG
833 1.32 chs {
834 1.32 chs int i, *ip = v;
835 1.32 chs
836 1.32 chs for (i = 0; i < pp->pr_size / sizeof(int); i++) {
837 1.32 chs *ip++ = PI_MAGIC;
838 1.32 chs }
839 1.32 chs }
840 1.32 chs #endif
841 1.32 chs
842 1.3 pk TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
843 1.3 pk ph->ph_nmissing--;
844 1.3 pk pp->pr_nput++;
845 1.20 thorpej pp->pr_nitems++;
846 1.20 thorpej pp->pr_nout--;
847 1.3 pk
848 1.3 pk /* Cancel "pool empty" condition if it exists */
849 1.3 pk if (pp->pr_curpage == NULL)
850 1.3 pk pp->pr_curpage = ph;
851 1.3 pk
852 1.3 pk if (pp->pr_flags & PR_WANTED) {
853 1.3 pk pp->pr_flags &= ~PR_WANTED;
854 1.15 pk if (ph->ph_nmissing == 0)
855 1.15 pk pp->pr_nidle++;
856 1.3 pk wakeup((caddr_t)pp);
857 1.3 pk return;
858 1.3 pk }
859 1.3 pk
860 1.3 pk /*
861 1.21 thorpej * If this page is now complete, do one of two things:
862 1.21 thorpej *
863 1.21 thorpej * (1) If we have more pages than the page high water
864 1.21 thorpej * mark, free the page back to the system.
865 1.21 thorpej *
866 1.21 thorpej * (2) Move it to the end of the page list, so that
867 1.21 thorpej * we minimize our chances of fragmenting the
868 1.21 thorpej * pool. Idle pages migrate to the end (along with
869 1.21 thorpej * completely empty pages, so that we find un-empty
870 1.21 thorpej * pages more quickly when we update curpage) of the
871 1.21 thorpej * list so they can be more easily swept up by
872 1.21 thorpej * the pagedaemon when pages are scarce.
873 1.3 pk */
874 1.3 pk if (ph->ph_nmissing == 0) {
875 1.6 thorpej pp->pr_nidle++;
876 1.3 pk if (pp->pr_npages > pp->pr_maxpages) {
877 1.3 pk pr_rmpage(pp, ph);
878 1.3 pk } else {
879 1.3 pk TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
880 1.3 pk TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
881 1.3 pk
882 1.21 thorpej /*
883 1.21 thorpej * Update the timestamp on the page. A page must
884 1.21 thorpej * be idle for some period of time before it can
885 1.21 thorpej * be reclaimed by the pagedaemon. This minimizes
886 1.21 thorpej * ping-pong'ing for memory.
887 1.21 thorpej */
888 1.21 thorpej s = splclock();
889 1.21 thorpej ph->ph_time = mono_time;
890 1.21 thorpej splx(s);
891 1.21 thorpej
892 1.21 thorpej /*
893 1.21 thorpej * Update the current page pointer. Just look for
894 1.21 thorpej * the first page with any free items.
895 1.21 thorpej *
896 1.21 thorpej * XXX: Maybe we want an option to look for the
897 1.21 thorpej * page with the fewest available items, to minimize
898 1.21 thorpej * fragmentation?
899 1.21 thorpej */
900 1.3 pk for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
901 1.3 pk ph = TAILQ_NEXT(ph, ph_pagelist))
902 1.3 pk if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
903 1.3 pk break;
904 1.1 pk
905 1.3 pk pp->pr_curpage = ph;
906 1.1 pk }
907 1.1 pk }
908 1.21 thorpej /*
909 1.21 thorpej * If the page has just become un-empty, move it to the head of
910 1.21 thorpej * the list, and make it the current page. The next allocation
911 1.21 thorpej * will get the item from this page, instead of further fragmenting
912 1.21 thorpej * the pool.
913 1.21 thorpej */
914 1.21 thorpej else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
915 1.21 thorpej TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
916 1.21 thorpej TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
917 1.21 thorpej pp->pr_curpage = ph;
918 1.21 thorpej }
919 1.43 thorpej }
920 1.43 thorpej
921 1.43 thorpej /*
922 1.43 thorpej * Return resource to the pool; must be called at appropriate spl level
923 1.43 thorpej */
924 1.43 thorpej void
925 1.43 thorpej _pool_put(struct pool *pp, void *v, const char *file, long line)
926 1.43 thorpej {
927 1.43 thorpej
928 1.43 thorpej simple_lock(&pp->pr_slock);
929 1.43 thorpej pr_enter(pp, file, line);
930 1.43 thorpej
931 1.43 thorpej pool_do_put(pp, v, file, line);
932 1.21 thorpej
933 1.25 thorpej pr_leave(pp);
934 1.21 thorpej simple_unlock(&pp->pr_slock);
935 1.1 pk }
936 1.1 pk
937 1.1 pk /*
938 1.3 pk * Add a page worth of items to the pool.
939 1.21 thorpej *
940 1.21 thorpej * Note, we must be called with the pool descriptor LOCKED.
941 1.3 pk */
942 1.50 enami static int
943 1.50 enami pool_prime_page(struct pool *pp, caddr_t storage, int flags)
944 1.3 pk {
945 1.3 pk struct pool_item *pi;
946 1.3 pk struct pool_item_header *ph;
947 1.3 pk caddr_t cp = storage;
948 1.3 pk unsigned int align = pp->pr_align;
949 1.3 pk unsigned int ioff = pp->pr_itemoffset;
950 1.27 pk int s, n;
951 1.36 pk
952 1.36 pk if (((u_long)cp & (pp->pr_pagesz - 1)) != 0)
953 1.36 pk panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
954 1.3 pk
955 1.20 thorpej if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
956 1.3 pk ph = (struct pool_item_header *)(cp + pp->pr_phoffset);
957 1.3 pk } else {
958 1.27 pk s = splhigh();
959 1.50 enami ph = pool_get(&phpool, flags);
960 1.27 pk splx(s);
961 1.50 enami if (ph == NULL)
962 1.50 enami return (ENOMEM);
963 1.3 pk LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)],
964 1.3 pk ph, ph_hashlist);
965 1.3 pk }
966 1.3 pk
967 1.3 pk /*
968 1.3 pk * Insert page header.
969 1.3 pk */
970 1.3 pk TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
971 1.3 pk TAILQ_INIT(&ph->ph_itemlist);
972 1.3 pk ph->ph_page = storage;
973 1.3 pk ph->ph_nmissing = 0;
974 1.21 thorpej memset(&ph->ph_time, 0, sizeof(ph->ph_time));
975 1.3 pk
976 1.6 thorpej pp->pr_nidle++;
977 1.6 thorpej
978 1.3 pk /*
979 1.3 pk * Color this page.
980 1.3 pk */
981 1.3 pk cp = (caddr_t)(cp + pp->pr_curcolor);
982 1.3 pk if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
983 1.3 pk pp->pr_curcolor = 0;
984 1.3 pk
985 1.3 pk /*
986 1.3 pk * Adjust storage to apply aligment to `pr_itemoffset' in each item.
987 1.3 pk */
988 1.3 pk if (ioff != 0)
989 1.3 pk cp = (caddr_t)(cp + (align - ioff));
990 1.3 pk
991 1.3 pk /*
992 1.3 pk * Insert remaining chunks on the bucket list.
993 1.3 pk */
994 1.3 pk n = pp->pr_itemsperpage;
995 1.20 thorpej pp->pr_nitems += n;
996 1.3 pk
997 1.3 pk while (n--) {
998 1.3 pk pi = (struct pool_item *)cp;
999 1.3 pk
1000 1.3 pk /* Insert on page list */
1001 1.3 pk TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
1002 1.3 pk #ifdef DIAGNOSTIC
1003 1.3 pk pi->pi_magic = PI_MAGIC;
1004 1.3 pk #endif
1005 1.3 pk cp = (caddr_t)(cp + pp->pr_size);
1006 1.3 pk }
1007 1.3 pk
1008 1.3 pk /*
1009 1.3 pk * If the pool was depleted, point at the new page.
1010 1.3 pk */
1011 1.3 pk if (pp->pr_curpage == NULL)
1012 1.3 pk pp->pr_curpage = ph;
1013 1.3 pk
1014 1.3 pk if (++pp->pr_npages > pp->pr_hiwat)
1015 1.3 pk pp->pr_hiwat = pp->pr_npages;
1016 1.50 enami
1017 1.50 enami return (0);
1018 1.3 pk }
1019 1.3 pk
1020 1.20 thorpej /*
1021 1.52 thorpej * Used by pool_get() when nitems drops below the low water mark. This
1022 1.52 thorpej * is used to catch up nitmes with the low water mark.
1023 1.20 thorpej *
1024 1.21 thorpej * Note 1, we never wait for memory here, we let the caller decide what to do.
1025 1.20 thorpej *
1026 1.20 thorpej * Note 2, this doesn't work with static pools.
1027 1.20 thorpej *
1028 1.20 thorpej * Note 3, we must be called with the pool already locked, and we return
1029 1.20 thorpej * with it locked.
1030 1.20 thorpej */
1031 1.20 thorpej static int
1032 1.42 thorpej pool_catchup(struct pool *pp)
1033 1.20 thorpej {
1034 1.20 thorpej caddr_t cp;
1035 1.20 thorpej int error = 0;
1036 1.20 thorpej
1037 1.20 thorpej if (pp->pr_roflags & PR_STATIC) {
1038 1.20 thorpej /*
1039 1.20 thorpej * We dropped below the low water mark, and this is not a
1040 1.20 thorpej * good thing. Log a warning.
1041 1.21 thorpej *
1042 1.21 thorpej * XXX: rate-limit this?
1043 1.20 thorpej */
1044 1.20 thorpej printf("WARNING: static pool `%s' dropped below low water "
1045 1.20 thorpej "mark\n", pp->pr_wchan);
1046 1.20 thorpej return (0);
1047 1.20 thorpej }
1048 1.20 thorpej
1049 1.21 thorpej while (pp->pr_nitems < pp->pr_minitems) {
1050 1.20 thorpej /*
1051 1.21 thorpej * Call the page back-end allocator for more memory.
1052 1.21 thorpej *
1053 1.21 thorpej * XXX: We never wait, so should we bother unlocking
1054 1.21 thorpej * the pool descriptor?
1055 1.20 thorpej */
1056 1.21 thorpej simple_unlock(&pp->pr_slock);
1057 1.20 thorpej cp = (*pp->pr_alloc)(pp->pr_pagesz, 0, pp->pr_mtype);
1058 1.21 thorpej simple_lock(&pp->pr_slock);
1059 1.34 thorpej if (__predict_false(cp == NULL)) {
1060 1.20 thorpej error = ENOMEM;
1061 1.20 thorpej break;
1062 1.20 thorpej }
1063 1.50 enami if ((error = pool_prime_page(pp, cp, PR_NOWAIT)) != 0) {
1064 1.50 enami (*pp->pr_free)(cp, pp->pr_pagesz, pp->pr_mtype);
1065 1.50 enami break;
1066 1.50 enami }
1067 1.26 thorpej pp->pr_npagealloc++;
1068 1.20 thorpej }
1069 1.20 thorpej
1070 1.20 thorpej return (error);
1071 1.20 thorpej }
1072 1.20 thorpej
1073 1.3 pk void
1074 1.42 thorpej pool_setlowat(struct pool *pp, int n)
1075 1.3 pk {
1076 1.20 thorpej int error;
1077 1.15 pk
1078 1.21 thorpej simple_lock(&pp->pr_slock);
1079 1.21 thorpej
1080 1.3 pk pp->pr_minitems = n;
1081 1.15 pk pp->pr_minpages = (n == 0)
1082 1.15 pk ? 0
1083 1.18 thorpej : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1084 1.20 thorpej
1085 1.20 thorpej /* Make sure we're caught up with the newly-set low water mark. */
1086 1.41 sommerfe if ((pp->pr_nitems < pp->pr_minitems) &&
1087 1.41 sommerfe (error = pool_catchup(pp)) != 0) {
1088 1.20 thorpej /*
1089 1.20 thorpej * XXX: Should we log a warning? Should we set up a timeout
1090 1.20 thorpej * to try again in a second or so? The latter could break
1091 1.20 thorpej * a caller's assumptions about interrupt protection, etc.
1092 1.20 thorpej */
1093 1.20 thorpej }
1094 1.21 thorpej
1095 1.21 thorpej simple_unlock(&pp->pr_slock);
1096 1.3 pk }
1097 1.3 pk
1098 1.3 pk void
1099 1.42 thorpej pool_sethiwat(struct pool *pp, int n)
1100 1.3 pk {
1101 1.15 pk
1102 1.21 thorpej simple_lock(&pp->pr_slock);
1103 1.21 thorpej
1104 1.15 pk pp->pr_maxpages = (n == 0)
1105 1.15 pk ? 0
1106 1.18 thorpej : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1107 1.21 thorpej
1108 1.21 thorpej simple_unlock(&pp->pr_slock);
1109 1.3 pk }
1110 1.3 pk
1111 1.20 thorpej void
1112 1.42 thorpej pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1113 1.20 thorpej {
1114 1.20 thorpej
1115 1.21 thorpej simple_lock(&pp->pr_slock);
1116 1.20 thorpej
1117 1.20 thorpej pp->pr_hardlimit = n;
1118 1.20 thorpej pp->pr_hardlimit_warning = warnmess;
1119 1.31 thorpej pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1120 1.31 thorpej pp->pr_hardlimit_warning_last.tv_sec = 0;
1121 1.31 thorpej pp->pr_hardlimit_warning_last.tv_usec = 0;
1122 1.20 thorpej
1123 1.20 thorpej /*
1124 1.21 thorpej * In-line version of pool_sethiwat(), because we don't want to
1125 1.21 thorpej * release the lock.
1126 1.20 thorpej */
1127 1.20 thorpej pp->pr_maxpages = (n == 0)
1128 1.20 thorpej ? 0
1129 1.20 thorpej : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1130 1.21 thorpej
1131 1.21 thorpej simple_unlock(&pp->pr_slock);
1132 1.20 thorpej }
1133 1.3 pk
1134 1.3 pk /*
1135 1.3 pk * Default page allocator.
1136 1.3 pk */
1137 1.3 pk static void *
1138 1.42 thorpej pool_page_alloc(unsigned long sz, int flags, int mtype)
1139 1.3 pk {
1140 1.11 thorpej boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
1141 1.3 pk
1142 1.11 thorpej return ((void *)uvm_km_alloc_poolpage(waitok));
1143 1.3 pk }
1144 1.3 pk
1145 1.3 pk static void
1146 1.42 thorpej pool_page_free(void *v, unsigned long sz, int mtype)
1147 1.3 pk {
1148 1.3 pk
1149 1.10 eeh uvm_km_free_poolpage((vaddr_t)v);
1150 1.3 pk }
1151 1.12 thorpej
1152 1.12 thorpej /*
1153 1.12 thorpej * Alternate pool page allocator for pools that know they will
1154 1.12 thorpej * never be accessed in interrupt context.
1155 1.12 thorpej */
1156 1.12 thorpej void *
1157 1.42 thorpej pool_page_alloc_nointr(unsigned long sz, int flags, int mtype)
1158 1.12 thorpej {
1159 1.12 thorpej boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
1160 1.12 thorpej
1161 1.12 thorpej return ((void *)uvm_km_alloc_poolpage1(kernel_map, uvm.kernel_object,
1162 1.12 thorpej waitok));
1163 1.12 thorpej }
1164 1.12 thorpej
1165 1.12 thorpej void
1166 1.42 thorpej pool_page_free_nointr(void *v, unsigned long sz, int mtype)
1167 1.12 thorpej {
1168 1.12 thorpej
1169 1.12 thorpej uvm_km_free_poolpage1(kernel_map, (vaddr_t)v);
1170 1.12 thorpej }
1171 1.12 thorpej
1172 1.3 pk
1173 1.3 pk /*
1174 1.3 pk * Release all complete pages that have not been used recently.
1175 1.3 pk */
1176 1.3 pk void
1177 1.42 thorpej _pool_reclaim(struct pool *pp, const char *file, long line)
1178 1.3 pk {
1179 1.3 pk struct pool_item_header *ph, *phnext;
1180 1.43 thorpej struct pool_cache *pc;
1181 1.21 thorpej struct timeval curtime;
1182 1.21 thorpej int s;
1183 1.3 pk
1184 1.20 thorpej if (pp->pr_roflags & PR_STATIC)
1185 1.3 pk return;
1186 1.3 pk
1187 1.21 thorpej if (simple_lock_try(&pp->pr_slock) == 0)
1188 1.3 pk return;
1189 1.25 thorpej pr_enter(pp, file, line);
1190 1.3 pk
1191 1.43 thorpej /*
1192 1.43 thorpej * Reclaim items from the pool's caches.
1193 1.43 thorpej */
1194 1.43 thorpej for (pc = TAILQ_FIRST(&pp->pr_cachelist); pc != NULL;
1195 1.43 thorpej pc = TAILQ_NEXT(pc, pc_poollist))
1196 1.43 thorpej pool_cache_reclaim(pc);
1197 1.43 thorpej
1198 1.21 thorpej s = splclock();
1199 1.21 thorpej curtime = mono_time;
1200 1.21 thorpej splx(s);
1201 1.21 thorpej
1202 1.3 pk for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; ph = phnext) {
1203 1.3 pk phnext = TAILQ_NEXT(ph, ph_pagelist);
1204 1.3 pk
1205 1.3 pk /* Check our minimum page claim */
1206 1.3 pk if (pp->pr_npages <= pp->pr_minpages)
1207 1.3 pk break;
1208 1.3 pk
1209 1.3 pk if (ph->ph_nmissing == 0) {
1210 1.3 pk struct timeval diff;
1211 1.3 pk timersub(&curtime, &ph->ph_time, &diff);
1212 1.3 pk if (diff.tv_sec < pool_inactive_time)
1213 1.3 pk continue;
1214 1.21 thorpej
1215 1.21 thorpej /*
1216 1.21 thorpej * If freeing this page would put us below
1217 1.21 thorpej * the low water mark, stop now.
1218 1.21 thorpej */
1219 1.21 thorpej if ((pp->pr_nitems - pp->pr_itemsperpage) <
1220 1.21 thorpej pp->pr_minitems)
1221 1.21 thorpej break;
1222 1.21 thorpej
1223 1.3 pk pr_rmpage(pp, ph);
1224 1.3 pk }
1225 1.3 pk }
1226 1.3 pk
1227 1.25 thorpej pr_leave(pp);
1228 1.21 thorpej simple_unlock(&pp->pr_slock);
1229 1.3 pk }
1230 1.3 pk
1231 1.3 pk
1232 1.3 pk /*
1233 1.3 pk * Drain pools, one at a time.
1234 1.21 thorpej *
1235 1.21 thorpej * Note, we must never be called from an interrupt context.
1236 1.3 pk */
1237 1.3 pk void
1238 1.42 thorpej pool_drain(void *arg)
1239 1.3 pk {
1240 1.3 pk struct pool *pp;
1241 1.23 thorpej int s;
1242 1.3 pk
1243 1.49 thorpej s = splvm();
1244 1.23 thorpej simple_lock(&pool_head_slock);
1245 1.23 thorpej
1246 1.23 thorpej if (drainpp == NULL && (drainpp = TAILQ_FIRST(&pool_head)) == NULL)
1247 1.23 thorpej goto out;
1248 1.3 pk
1249 1.3 pk pp = drainpp;
1250 1.3 pk drainpp = TAILQ_NEXT(pp, pr_poollist);
1251 1.3 pk
1252 1.3 pk pool_reclaim(pp);
1253 1.23 thorpej
1254 1.23 thorpej out:
1255 1.23 thorpej simple_unlock(&pool_head_slock);
1256 1.3 pk splx(s);
1257 1.3 pk }
1258 1.3 pk
1259 1.3 pk
1260 1.3 pk /*
1261 1.3 pk * Diagnostic helpers.
1262 1.3 pk */
1263 1.3 pk void
1264 1.42 thorpej pool_print(struct pool *pp, const char *modif)
1265 1.21 thorpej {
1266 1.21 thorpej int s;
1267 1.21 thorpej
1268 1.49 thorpej s = splvm();
1269 1.25 thorpej if (simple_lock_try(&pp->pr_slock) == 0) {
1270 1.25 thorpej printf("pool %s is locked; try again later\n",
1271 1.25 thorpej pp->pr_wchan);
1272 1.25 thorpej splx(s);
1273 1.25 thorpej return;
1274 1.25 thorpej }
1275 1.25 thorpej pool_print1(pp, modif, printf);
1276 1.21 thorpej simple_unlock(&pp->pr_slock);
1277 1.21 thorpej splx(s);
1278 1.21 thorpej }
1279 1.21 thorpej
1280 1.25 thorpej void
1281 1.42 thorpej pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1282 1.25 thorpej {
1283 1.25 thorpej int didlock = 0;
1284 1.25 thorpej
1285 1.25 thorpej if (pp == NULL) {
1286 1.25 thorpej (*pr)("Must specify a pool to print.\n");
1287 1.25 thorpej return;
1288 1.25 thorpej }
1289 1.25 thorpej
1290 1.25 thorpej /*
1291 1.25 thorpej * Called from DDB; interrupts should be blocked, and all
1292 1.25 thorpej * other processors should be paused. We can skip locking
1293 1.25 thorpej * the pool in this case.
1294 1.25 thorpej *
1295 1.25 thorpej * We do a simple_lock_try() just to print the lock
1296 1.25 thorpej * status, however.
1297 1.25 thorpej */
1298 1.25 thorpej
1299 1.25 thorpej if (simple_lock_try(&pp->pr_slock) == 0)
1300 1.25 thorpej (*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
1301 1.25 thorpej else
1302 1.25 thorpej didlock = 1;
1303 1.25 thorpej
1304 1.25 thorpej pool_print1(pp, modif, pr);
1305 1.25 thorpej
1306 1.25 thorpej if (didlock)
1307 1.25 thorpej simple_unlock(&pp->pr_slock);
1308 1.25 thorpej }
1309 1.25 thorpej
1310 1.21 thorpej static void
1311 1.42 thorpej pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1312 1.3 pk {
1313 1.25 thorpej struct pool_item_header *ph;
1314 1.44 thorpej struct pool_cache *pc;
1315 1.44 thorpej struct pool_cache_group *pcg;
1316 1.25 thorpej #ifdef DIAGNOSTIC
1317 1.25 thorpej struct pool_item *pi;
1318 1.25 thorpej #endif
1319 1.44 thorpej int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1320 1.25 thorpej char c;
1321 1.25 thorpej
1322 1.25 thorpej while ((c = *modif++) != '\0') {
1323 1.25 thorpej if (c == 'l')
1324 1.25 thorpej print_log = 1;
1325 1.25 thorpej if (c == 'p')
1326 1.25 thorpej print_pagelist = 1;
1327 1.44 thorpej if (c == 'c')
1328 1.44 thorpej print_cache = 1;
1329 1.25 thorpej modif++;
1330 1.25 thorpej }
1331 1.25 thorpej
1332 1.25 thorpej (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1333 1.25 thorpej pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1334 1.25 thorpej pp->pr_roflags);
1335 1.25 thorpej (*pr)("\tpagesz %u, mtype %d\n", pp->pr_pagesz, pp->pr_mtype);
1336 1.25 thorpej (*pr)("\talloc %p, release %p\n", pp->pr_alloc, pp->pr_free);
1337 1.25 thorpej (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1338 1.25 thorpej pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1339 1.25 thorpej (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1340 1.25 thorpej pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1341 1.25 thorpej
1342 1.25 thorpej (*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
1343 1.25 thorpej pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1344 1.25 thorpej (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1345 1.25 thorpej pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1346 1.25 thorpej
1347 1.25 thorpej if (print_pagelist == 0)
1348 1.25 thorpej goto skip_pagelist;
1349 1.25 thorpej
1350 1.25 thorpej if ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL)
1351 1.25 thorpej (*pr)("\n\tpage list:\n");
1352 1.25 thorpej for (; ph != NULL; ph = TAILQ_NEXT(ph, ph_pagelist)) {
1353 1.25 thorpej (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
1354 1.25 thorpej ph->ph_page, ph->ph_nmissing,
1355 1.25 thorpej (u_long)ph->ph_time.tv_sec,
1356 1.25 thorpej (u_long)ph->ph_time.tv_usec);
1357 1.25 thorpej #ifdef DIAGNOSTIC
1358 1.25 thorpej for (pi = TAILQ_FIRST(&ph->ph_itemlist); pi != NULL;
1359 1.25 thorpej pi = TAILQ_NEXT(pi, pi_list)) {
1360 1.25 thorpej if (pi->pi_magic != PI_MAGIC) {
1361 1.25 thorpej (*pr)("\t\t\titem %p, magic 0x%x\n",
1362 1.25 thorpej pi, pi->pi_magic);
1363 1.25 thorpej }
1364 1.25 thorpej }
1365 1.25 thorpej #endif
1366 1.25 thorpej }
1367 1.25 thorpej if (pp->pr_curpage == NULL)
1368 1.25 thorpej (*pr)("\tno current page\n");
1369 1.25 thorpej else
1370 1.25 thorpej (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1371 1.25 thorpej
1372 1.25 thorpej skip_pagelist:
1373 1.25 thorpej
1374 1.25 thorpej if (print_log == 0)
1375 1.25 thorpej goto skip_log;
1376 1.25 thorpej
1377 1.25 thorpej (*pr)("\n");
1378 1.25 thorpej if ((pp->pr_roflags & PR_LOGGING) == 0)
1379 1.25 thorpej (*pr)("\tno log\n");
1380 1.25 thorpej else
1381 1.25 thorpej pr_printlog(pp, NULL, pr);
1382 1.3 pk
1383 1.25 thorpej skip_log:
1384 1.44 thorpej
1385 1.44 thorpej if (print_cache == 0)
1386 1.44 thorpej goto skip_cache;
1387 1.44 thorpej
1388 1.44 thorpej for (pc = TAILQ_FIRST(&pp->pr_cachelist); pc != NULL;
1389 1.44 thorpej pc = TAILQ_NEXT(pc, pc_poollist)) {
1390 1.44 thorpej (*pr)("\tcache %p: allocfrom %p freeto %p\n", pc,
1391 1.44 thorpej pc->pc_allocfrom, pc->pc_freeto);
1392 1.48 thorpej (*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n",
1393 1.48 thorpej pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
1394 1.44 thorpej for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
1395 1.44 thorpej pcg = TAILQ_NEXT(pcg, pcg_list)) {
1396 1.44 thorpej (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);
1397 1.44 thorpej for (i = 0; i < PCG_NOBJECTS; i++)
1398 1.44 thorpej (*pr)("\t\t\t%p\n", pcg->pcg_objects[i]);
1399 1.44 thorpej }
1400 1.44 thorpej }
1401 1.44 thorpej
1402 1.44 thorpej skip_cache:
1403 1.3 pk
1404 1.25 thorpej pr_enter_check(pp, pr);
1405 1.3 pk }
1406 1.3 pk
1407 1.3 pk int
1408 1.42 thorpej pool_chk(struct pool *pp, const char *label)
1409 1.3 pk {
1410 1.3 pk struct pool_item_header *ph;
1411 1.3 pk int r = 0;
1412 1.3 pk
1413 1.21 thorpej simple_lock(&pp->pr_slock);
1414 1.3 pk
1415 1.3 pk for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
1416 1.3 pk ph = TAILQ_NEXT(ph, ph_pagelist)) {
1417 1.3 pk
1418 1.3 pk struct pool_item *pi;
1419 1.3 pk int n;
1420 1.3 pk caddr_t page;
1421 1.3 pk
1422 1.3 pk page = (caddr_t)((u_long)ph & pp->pr_pagemask);
1423 1.20 thorpej if (page != ph->ph_page &&
1424 1.20 thorpej (pp->pr_roflags & PR_PHINPAGE) != 0) {
1425 1.3 pk if (label != NULL)
1426 1.3 pk printf("%s: ", label);
1427 1.16 briggs printf("pool(%p:%s): page inconsistency: page %p;"
1428 1.16 briggs " at page head addr %p (p %p)\n", pp,
1429 1.3 pk pp->pr_wchan, ph->ph_page,
1430 1.3 pk ph, page);
1431 1.3 pk r++;
1432 1.3 pk goto out;
1433 1.3 pk }
1434 1.3 pk
1435 1.3 pk for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
1436 1.3 pk pi != NULL;
1437 1.3 pk pi = TAILQ_NEXT(pi,pi_list), n++) {
1438 1.3 pk
1439 1.3 pk #ifdef DIAGNOSTIC
1440 1.3 pk if (pi->pi_magic != PI_MAGIC) {
1441 1.3 pk if (label != NULL)
1442 1.3 pk printf("%s: ", label);
1443 1.3 pk printf("pool(%s): free list modified: magic=%x;"
1444 1.3 pk " page %p; item ordinal %d;"
1445 1.3 pk " addr %p (p %p)\n",
1446 1.3 pk pp->pr_wchan, pi->pi_magic, ph->ph_page,
1447 1.3 pk n, pi, page);
1448 1.3 pk panic("pool");
1449 1.3 pk }
1450 1.3 pk #endif
1451 1.3 pk page = (caddr_t)((u_long)pi & pp->pr_pagemask);
1452 1.3 pk if (page == ph->ph_page)
1453 1.3 pk continue;
1454 1.3 pk
1455 1.3 pk if (label != NULL)
1456 1.3 pk printf("%s: ", label);
1457 1.16 briggs printf("pool(%p:%s): page inconsistency: page %p;"
1458 1.16 briggs " item ordinal %d; addr %p (p %p)\n", pp,
1459 1.3 pk pp->pr_wchan, ph->ph_page,
1460 1.3 pk n, pi, page);
1461 1.3 pk r++;
1462 1.3 pk goto out;
1463 1.3 pk }
1464 1.3 pk }
1465 1.3 pk out:
1466 1.21 thorpej simple_unlock(&pp->pr_slock);
1467 1.3 pk return (r);
1468 1.43 thorpej }
1469 1.43 thorpej
1470 1.43 thorpej /*
1471 1.43 thorpej * pool_cache_init:
1472 1.43 thorpej *
1473 1.43 thorpej * Initialize a pool cache.
1474 1.43 thorpej *
1475 1.43 thorpej * NOTE: If the pool must be protected from interrupts, we expect
1476 1.43 thorpej * to be called at the appropriate interrupt priority level.
1477 1.43 thorpej */
1478 1.43 thorpej void
1479 1.43 thorpej pool_cache_init(struct pool_cache *pc, struct pool *pp,
1480 1.43 thorpej int (*ctor)(void *, void *, int),
1481 1.43 thorpej void (*dtor)(void *, void *),
1482 1.43 thorpej void *arg)
1483 1.43 thorpej {
1484 1.43 thorpej
1485 1.43 thorpej TAILQ_INIT(&pc->pc_grouplist);
1486 1.43 thorpej simple_lock_init(&pc->pc_slock);
1487 1.43 thorpej
1488 1.43 thorpej pc->pc_allocfrom = NULL;
1489 1.43 thorpej pc->pc_freeto = NULL;
1490 1.43 thorpej pc->pc_pool = pp;
1491 1.43 thorpej
1492 1.43 thorpej pc->pc_ctor = ctor;
1493 1.43 thorpej pc->pc_dtor = dtor;
1494 1.43 thorpej pc->pc_arg = arg;
1495 1.43 thorpej
1496 1.48 thorpej pc->pc_hits = 0;
1497 1.48 thorpej pc->pc_misses = 0;
1498 1.48 thorpej
1499 1.48 thorpej pc->pc_ngroups = 0;
1500 1.48 thorpej
1501 1.48 thorpej pc->pc_nitems = 0;
1502 1.48 thorpej
1503 1.43 thorpej simple_lock(&pp->pr_slock);
1504 1.43 thorpej TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist);
1505 1.43 thorpej simple_unlock(&pp->pr_slock);
1506 1.43 thorpej }
1507 1.43 thorpej
1508 1.43 thorpej /*
1509 1.43 thorpej * pool_cache_destroy:
1510 1.43 thorpej *
1511 1.43 thorpej * Destroy a pool cache.
1512 1.43 thorpej */
1513 1.43 thorpej void
1514 1.43 thorpej pool_cache_destroy(struct pool_cache *pc)
1515 1.43 thorpej {
1516 1.43 thorpej struct pool *pp = pc->pc_pool;
1517 1.43 thorpej
1518 1.43 thorpej /* First, invalidate the entire cache. */
1519 1.43 thorpej pool_cache_invalidate(pc);
1520 1.43 thorpej
1521 1.43 thorpej /* ...and remove it from the pool's cache list. */
1522 1.43 thorpej simple_lock(&pp->pr_slock);
1523 1.43 thorpej TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist);
1524 1.43 thorpej simple_unlock(&pp->pr_slock);
1525 1.43 thorpej }
1526 1.43 thorpej
1527 1.43 thorpej static __inline void *
1528 1.43 thorpej pcg_get(struct pool_cache_group *pcg)
1529 1.43 thorpej {
1530 1.43 thorpej void *object;
1531 1.43 thorpej u_int idx;
1532 1.43 thorpej
1533 1.43 thorpej KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
1534 1.45 thorpej KASSERT(pcg->pcg_avail != 0);
1535 1.43 thorpej idx = --pcg->pcg_avail;
1536 1.43 thorpej
1537 1.43 thorpej KASSERT(pcg->pcg_objects[idx] != NULL);
1538 1.43 thorpej object = pcg->pcg_objects[idx];
1539 1.43 thorpej pcg->pcg_objects[idx] = NULL;
1540 1.43 thorpej
1541 1.43 thorpej return (object);
1542 1.43 thorpej }
1543 1.43 thorpej
1544 1.43 thorpej static __inline void
1545 1.43 thorpej pcg_put(struct pool_cache_group *pcg, void *object)
1546 1.43 thorpej {
1547 1.43 thorpej u_int idx;
1548 1.43 thorpej
1549 1.43 thorpej KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
1550 1.43 thorpej idx = pcg->pcg_avail++;
1551 1.43 thorpej
1552 1.43 thorpej KASSERT(pcg->pcg_objects[idx] == NULL);
1553 1.43 thorpej pcg->pcg_objects[idx] = object;
1554 1.43 thorpej }
1555 1.43 thorpej
1556 1.43 thorpej /*
1557 1.43 thorpej * pool_cache_get:
1558 1.43 thorpej *
1559 1.43 thorpej * Get an object from a pool cache.
1560 1.43 thorpej */
1561 1.43 thorpej void *
1562 1.43 thorpej pool_cache_get(struct pool_cache *pc, int flags)
1563 1.43 thorpej {
1564 1.43 thorpej struct pool_cache_group *pcg;
1565 1.43 thorpej void *object;
1566 1.43 thorpej
1567 1.43 thorpej simple_lock(&pc->pc_slock);
1568 1.43 thorpej
1569 1.43 thorpej if ((pcg = pc->pc_allocfrom) == NULL) {
1570 1.43 thorpej for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
1571 1.43 thorpej pcg = TAILQ_NEXT(pcg, pcg_list)) {
1572 1.43 thorpej if (pcg->pcg_avail != 0) {
1573 1.43 thorpej pc->pc_allocfrom = pcg;
1574 1.43 thorpej goto have_group;
1575 1.43 thorpej }
1576 1.43 thorpej }
1577 1.43 thorpej
1578 1.43 thorpej /*
1579 1.43 thorpej * No groups with any available objects. Allocate
1580 1.43 thorpej * a new object, construct it, and return it to
1581 1.43 thorpej * the caller. We will allocate a group, if necessary,
1582 1.43 thorpej * when the object is freed back to the cache.
1583 1.43 thorpej */
1584 1.48 thorpej pc->pc_misses++;
1585 1.43 thorpej simple_unlock(&pc->pc_slock);
1586 1.43 thorpej object = pool_get(pc->pc_pool, flags);
1587 1.43 thorpej if (object != NULL && pc->pc_ctor != NULL) {
1588 1.43 thorpej if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
1589 1.43 thorpej pool_put(pc->pc_pool, object);
1590 1.43 thorpej return (NULL);
1591 1.43 thorpej }
1592 1.43 thorpej }
1593 1.43 thorpej return (object);
1594 1.43 thorpej }
1595 1.43 thorpej
1596 1.43 thorpej have_group:
1597 1.48 thorpej pc->pc_hits++;
1598 1.48 thorpej pc->pc_nitems--;
1599 1.43 thorpej object = pcg_get(pcg);
1600 1.43 thorpej
1601 1.43 thorpej if (pcg->pcg_avail == 0)
1602 1.43 thorpej pc->pc_allocfrom = NULL;
1603 1.45 thorpej
1604 1.43 thorpej simple_unlock(&pc->pc_slock);
1605 1.43 thorpej
1606 1.43 thorpej return (object);
1607 1.43 thorpej }
1608 1.43 thorpej
1609 1.43 thorpej /*
1610 1.43 thorpej * pool_cache_put:
1611 1.43 thorpej *
1612 1.43 thorpej * Put an object back to the pool cache.
1613 1.43 thorpej */
1614 1.43 thorpej void
1615 1.43 thorpej pool_cache_put(struct pool_cache *pc, void *object)
1616 1.43 thorpej {
1617 1.43 thorpej struct pool_cache_group *pcg;
1618 1.43 thorpej
1619 1.43 thorpej simple_lock(&pc->pc_slock);
1620 1.43 thorpej
1621 1.43 thorpej if ((pcg = pc->pc_freeto) == NULL) {
1622 1.43 thorpej for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
1623 1.43 thorpej pcg = TAILQ_NEXT(pcg, pcg_list)) {
1624 1.43 thorpej if (pcg->pcg_avail != PCG_NOBJECTS) {
1625 1.43 thorpej pc->pc_freeto = pcg;
1626 1.43 thorpej goto have_group;
1627 1.43 thorpej }
1628 1.43 thorpej }
1629 1.43 thorpej
1630 1.43 thorpej /*
1631 1.43 thorpej * No empty groups to free the object to. Attempt to
1632 1.47 thorpej * allocate one.
1633 1.43 thorpej */
1634 1.47 thorpej simple_unlock(&pc->pc_slock);
1635 1.43 thorpej pcg = pool_get(&pcgpool, PR_NOWAIT);
1636 1.43 thorpej if (pcg != NULL) {
1637 1.43 thorpej memset(pcg, 0, sizeof(*pcg));
1638 1.47 thorpej simple_lock(&pc->pc_slock);
1639 1.48 thorpej pc->pc_ngroups++;
1640 1.43 thorpej TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list);
1641 1.47 thorpej if (pc->pc_freeto == NULL)
1642 1.47 thorpej pc->pc_freeto = pcg;
1643 1.43 thorpej goto have_group;
1644 1.43 thorpej }
1645 1.43 thorpej
1646 1.43 thorpej /*
1647 1.43 thorpej * Unable to allocate a cache group; destruct the object
1648 1.43 thorpej * and free it back to the pool.
1649 1.43 thorpej */
1650 1.51 thorpej pool_cache_destruct_object(pc, object);
1651 1.43 thorpej return;
1652 1.43 thorpej }
1653 1.43 thorpej
1654 1.43 thorpej have_group:
1655 1.48 thorpej pc->pc_nitems++;
1656 1.43 thorpej pcg_put(pcg, object);
1657 1.43 thorpej
1658 1.43 thorpej if (pcg->pcg_avail == PCG_NOBJECTS)
1659 1.43 thorpej pc->pc_freeto = NULL;
1660 1.43 thorpej
1661 1.43 thorpej simple_unlock(&pc->pc_slock);
1662 1.51 thorpej }
1663 1.51 thorpej
1664 1.51 thorpej /*
1665 1.51 thorpej * pool_cache_destruct_object:
1666 1.51 thorpej *
1667 1.51 thorpej * Force destruction of an object and its release back into
1668 1.51 thorpej * the pool.
1669 1.51 thorpej */
1670 1.51 thorpej void
1671 1.51 thorpej pool_cache_destruct_object(struct pool_cache *pc, void *object)
1672 1.51 thorpej {
1673 1.51 thorpej
1674 1.51 thorpej if (pc->pc_dtor != NULL)
1675 1.51 thorpej (*pc->pc_dtor)(pc->pc_arg, object);
1676 1.51 thorpej pool_put(pc->pc_pool, object);
1677 1.43 thorpej }
1678 1.43 thorpej
1679 1.43 thorpej /*
1680 1.43 thorpej * pool_cache_do_invalidate:
1681 1.43 thorpej *
1682 1.43 thorpej * This internal function implements pool_cache_invalidate() and
1683 1.43 thorpej * pool_cache_reclaim().
1684 1.43 thorpej */
1685 1.43 thorpej static void
1686 1.43 thorpej pool_cache_do_invalidate(struct pool_cache *pc, int free_groups,
1687 1.43 thorpej void (*putit)(struct pool *, void *, const char *, long))
1688 1.43 thorpej {
1689 1.43 thorpej struct pool_cache_group *pcg, *npcg;
1690 1.43 thorpej void *object;
1691 1.43 thorpej
1692 1.43 thorpej for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
1693 1.43 thorpej pcg = npcg) {
1694 1.43 thorpej npcg = TAILQ_NEXT(pcg, pcg_list);
1695 1.43 thorpej while (pcg->pcg_avail != 0) {
1696 1.48 thorpej pc->pc_nitems--;
1697 1.43 thorpej object = pcg_get(pcg);
1698 1.45 thorpej if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg)
1699 1.45 thorpej pc->pc_allocfrom = NULL;
1700 1.43 thorpej if (pc->pc_dtor != NULL)
1701 1.43 thorpej (*pc->pc_dtor)(pc->pc_arg, object);
1702 1.43 thorpej (*putit)(pc->pc_pool, object, __FILE__, __LINE__);
1703 1.43 thorpej }
1704 1.43 thorpej if (free_groups) {
1705 1.48 thorpej pc->pc_ngroups--;
1706 1.43 thorpej TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list);
1707 1.46 thorpej if (pc->pc_freeto == pcg)
1708 1.46 thorpej pc->pc_freeto = NULL;
1709 1.43 thorpej pool_put(&pcgpool, pcg);
1710 1.43 thorpej }
1711 1.43 thorpej }
1712 1.43 thorpej }
1713 1.43 thorpej
1714 1.43 thorpej /*
1715 1.43 thorpej * pool_cache_invalidate:
1716 1.43 thorpej *
1717 1.43 thorpej * Invalidate a pool cache (destruct and release all of the
1718 1.43 thorpej * cached objects).
1719 1.43 thorpej */
1720 1.43 thorpej void
1721 1.43 thorpej pool_cache_invalidate(struct pool_cache *pc)
1722 1.43 thorpej {
1723 1.43 thorpej
1724 1.43 thorpej simple_lock(&pc->pc_slock);
1725 1.43 thorpej pool_cache_do_invalidate(pc, 0, _pool_put);
1726 1.43 thorpej simple_unlock(&pc->pc_slock);
1727 1.43 thorpej }
1728 1.43 thorpej
1729 1.43 thorpej /*
1730 1.43 thorpej * pool_cache_reclaim:
1731 1.43 thorpej *
1732 1.43 thorpej * Reclaim a pool cache for pool_reclaim().
1733 1.43 thorpej */
1734 1.43 thorpej static void
1735 1.43 thorpej pool_cache_reclaim(struct pool_cache *pc)
1736 1.43 thorpej {
1737 1.43 thorpej
1738 1.47 thorpej simple_lock(&pc->pc_slock);
1739 1.43 thorpej pool_cache_do_invalidate(pc, 1, pool_do_put);
1740 1.43 thorpej simple_unlock(&pc->pc_slock);
1741 1.3 pk }
1742