subr_pool.c revision 1.52 1 /* $NetBSD: subr_pool.c,v 1.52 2001/05/09 23:46:03 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include "opt_pool.h"
41 #include "opt_poollog.h"
42 #include "opt_lockdebug.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/proc.h>
47 #include <sys/errno.h>
48 #include <sys/kernel.h>
49 #include <sys/malloc.h>
50 #include <sys/lock.h>
51 #include <sys/pool.h>
52 #include <sys/syslog.h>
53
54 #include <uvm/uvm.h>
55
56 /*
57 * Pool resource management utility.
58 *
59 * Memory is allocated in pages which are split into pieces according
60 * to the pool item size. Each page is kept on a list headed by `pr_pagelist'
61 * in the pool structure and the individual pool items are on a linked list
62 * headed by `ph_itemlist' in each page header. The memory for building
63 * the page list is either taken from the allocated pages themselves (for
64 * small pool items) or taken from an internal pool of page headers (`phpool').
65 */
66
67 /* List of all pools */
68 TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
69
70 /* Private pool for page header structures */
71 static struct pool phpool;
72
73 /* # of seconds to retain page after last use */
74 int pool_inactive_time = 10;
75
76 /* Next candidate for drainage (see pool_drain()) */
77 static struct pool *drainpp;
78
79 /* This spin lock protects both pool_head and drainpp. */
80 struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
81
82 struct pool_item_header {
83 /* Page headers */
84 TAILQ_ENTRY(pool_item_header)
85 ph_pagelist; /* pool page list */
86 TAILQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */
87 LIST_ENTRY(pool_item_header)
88 ph_hashlist; /* Off-page page headers */
89 int ph_nmissing; /* # of chunks in use */
90 caddr_t ph_page; /* this page's address */
91 struct timeval ph_time; /* last referenced */
92 };
93
94 struct pool_item {
95 #ifdef DIAGNOSTIC
96 int pi_magic;
97 #endif
98 #define PI_MAGIC 0xdeadbeef
99 /* Other entries use only this list entry */
100 TAILQ_ENTRY(pool_item) pi_list;
101 };
102
103 #define PR_HASH_INDEX(pp,addr) \
104 (((u_long)(addr) >> (pp)->pr_pageshift) & (PR_HASHTABSIZE - 1))
105
106 /*
107 * Pool cache management.
108 *
109 * Pool caches provide a way for constructed objects to be cached by the
110 * pool subsystem. This can lead to performance improvements by avoiding
111 * needless object construction/destruction; it is deferred until absolutely
112 * necessary.
113 *
114 * Caches are grouped into cache groups. Each cache group references
115 * up to 16 constructed objects. When a cache allocates an object
116 * from the pool, it calls the object's constructor and places it into
117 * a cache group. When a cache group frees an object back to the pool,
118 * it first calls the object's destructor. This allows the object to
119 * persist in constructed form while freed to the cache.
120 *
121 * Multiple caches may exist for each pool. This allows a single
122 * object type to have multiple constructed forms. The pool references
123 * each cache, so that when a pool is drained by the pagedaemon, it can
124 * drain each individual cache as well. Each time a cache is drained,
125 * the most idle cache group is freed to the pool in its entirety.
126 *
127 * Pool caches are layed on top of pools. By layering them, we can avoid
128 * the complexity of cache management for pools which would not benefit
129 * from it.
130 */
131
132 /* The cache group pool. */
133 static struct pool pcgpool;
134
135 /* The pool cache group. */
136 #define PCG_NOBJECTS 16
137 struct pool_cache_group {
138 TAILQ_ENTRY(pool_cache_group)
139 pcg_list; /* link in the pool cache's group list */
140 u_int pcg_avail; /* # available objects */
141 /* pointers to the objects */
142 void *pcg_objects[PCG_NOBJECTS];
143 };
144
145 static void pool_cache_reclaim(struct pool_cache *);
146
147 static int pool_catchup(struct pool *);
148 static int pool_prime_page(struct pool *, caddr_t, int);
149 static void *pool_page_alloc(unsigned long, int, int);
150 static void pool_page_free(void *, unsigned long, int);
151
152 static void pool_print1(struct pool *, const char *,
153 void (*)(const char *, ...));
154
155 /*
156 * Pool log entry. An array of these is allocated in pool_init().
157 */
158 struct pool_log {
159 const char *pl_file;
160 long pl_line;
161 int pl_action;
162 #define PRLOG_GET 1
163 #define PRLOG_PUT 2
164 void *pl_addr;
165 };
166
167 /* Number of entries in pool log buffers */
168 #ifndef POOL_LOGSIZE
169 #define POOL_LOGSIZE 10
170 #endif
171
172 int pool_logsize = POOL_LOGSIZE;
173
174 #ifdef DIAGNOSTIC
175 static __inline void
176 pr_log(struct pool *pp, void *v, int action, const char *file, long line)
177 {
178 int n = pp->pr_curlogentry;
179 struct pool_log *pl;
180
181 if ((pp->pr_roflags & PR_LOGGING) == 0)
182 return;
183
184 /*
185 * Fill in the current entry. Wrap around and overwrite
186 * the oldest entry if necessary.
187 */
188 pl = &pp->pr_log[n];
189 pl->pl_file = file;
190 pl->pl_line = line;
191 pl->pl_action = action;
192 pl->pl_addr = v;
193 if (++n >= pp->pr_logsize)
194 n = 0;
195 pp->pr_curlogentry = n;
196 }
197
198 static void
199 pr_printlog(struct pool *pp, struct pool_item *pi,
200 void (*pr)(const char *, ...))
201 {
202 int i = pp->pr_logsize;
203 int n = pp->pr_curlogentry;
204
205 if ((pp->pr_roflags & PR_LOGGING) == 0)
206 return;
207
208 /*
209 * Print all entries in this pool's log.
210 */
211 while (i-- > 0) {
212 struct pool_log *pl = &pp->pr_log[n];
213 if (pl->pl_action != 0) {
214 if (pi == NULL || pi == pl->pl_addr) {
215 (*pr)("\tlog entry %d:\n", i);
216 (*pr)("\t\taction = %s, addr = %p\n",
217 pl->pl_action == PRLOG_GET ? "get" : "put",
218 pl->pl_addr);
219 (*pr)("\t\tfile: %s at line %lu\n",
220 pl->pl_file, pl->pl_line);
221 }
222 }
223 if (++n >= pp->pr_logsize)
224 n = 0;
225 }
226 }
227
228 static __inline void
229 pr_enter(struct pool *pp, const char *file, long line)
230 {
231
232 if (__predict_false(pp->pr_entered_file != NULL)) {
233 printf("pool %s: reentrancy at file %s line %ld\n",
234 pp->pr_wchan, file, line);
235 printf(" previous entry at file %s line %ld\n",
236 pp->pr_entered_file, pp->pr_entered_line);
237 panic("pr_enter");
238 }
239
240 pp->pr_entered_file = file;
241 pp->pr_entered_line = line;
242 }
243
244 static __inline void
245 pr_leave(struct pool *pp)
246 {
247
248 if (__predict_false(pp->pr_entered_file == NULL)) {
249 printf("pool %s not entered?\n", pp->pr_wchan);
250 panic("pr_leave");
251 }
252
253 pp->pr_entered_file = NULL;
254 pp->pr_entered_line = 0;
255 }
256
257 static __inline void
258 pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
259 {
260
261 if (pp->pr_entered_file != NULL)
262 (*pr)("\n\tcurrently entered from file %s line %ld\n",
263 pp->pr_entered_file, pp->pr_entered_line);
264 }
265 #else
266 #define pr_log(pp, v, action, file, line)
267 #define pr_printlog(pp, pi, pr)
268 #define pr_enter(pp, file, line)
269 #define pr_leave(pp)
270 #define pr_enter_check(pp, pr)
271 #endif /* DIAGNOSTIC */
272
273 /*
274 * Return the pool page header based on page address.
275 */
276 static __inline struct pool_item_header *
277 pr_find_pagehead(struct pool *pp, caddr_t page)
278 {
279 struct pool_item_header *ph;
280
281 if ((pp->pr_roflags & PR_PHINPAGE) != 0)
282 return ((struct pool_item_header *)(page + pp->pr_phoffset));
283
284 for (ph = LIST_FIRST(&pp->pr_hashtab[PR_HASH_INDEX(pp, page)]);
285 ph != NULL;
286 ph = LIST_NEXT(ph, ph_hashlist)) {
287 if (ph->ph_page == page)
288 return (ph);
289 }
290 return (NULL);
291 }
292
293 /*
294 * Remove a page from the pool.
295 */
296 static __inline void
297 pr_rmpage(struct pool *pp, struct pool_item_header *ph)
298 {
299
300 /*
301 * If the page was idle, decrement the idle page count.
302 */
303 if (ph->ph_nmissing == 0) {
304 #ifdef DIAGNOSTIC
305 if (pp->pr_nidle == 0)
306 panic("pr_rmpage: nidle inconsistent");
307 if (pp->pr_nitems < pp->pr_itemsperpage)
308 panic("pr_rmpage: nitems inconsistent");
309 #endif
310 pp->pr_nidle--;
311 }
312
313 pp->pr_nitems -= pp->pr_itemsperpage;
314
315 /*
316 * Unlink a page from the pool and release it.
317 */
318 TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
319 (*pp->pr_free)(ph->ph_page, pp->pr_pagesz, pp->pr_mtype);
320 pp->pr_npages--;
321 pp->pr_npagefree++;
322
323 if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
324 int s;
325 LIST_REMOVE(ph, ph_hashlist);
326 s = splhigh();
327 pool_put(&phpool, ph);
328 splx(s);
329 }
330
331 if (pp->pr_curpage == ph) {
332 /*
333 * Find a new non-empty page header, if any.
334 * Start search from the page head, to increase the
335 * chance for "high water" pages to be freed.
336 */
337 for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
338 ph = TAILQ_NEXT(ph, ph_pagelist))
339 if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
340 break;
341
342 pp->pr_curpage = ph;
343 }
344 }
345
346 /*
347 * Initialize the given pool resource structure.
348 *
349 * We export this routine to allow other kernel parts to declare
350 * static pools that must be initialized before malloc() is available.
351 */
352 void
353 pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
354 const char *wchan, size_t pagesz,
355 void *(*alloc)(unsigned long, int, int),
356 void (*release)(void *, unsigned long, int),
357 int mtype)
358 {
359 int off, slack, i;
360
361 #ifdef POOL_DIAGNOSTIC
362 /*
363 * Always log if POOL_DIAGNOSTIC is defined.
364 */
365 if (pool_logsize != 0)
366 flags |= PR_LOGGING;
367 #endif
368
369 /*
370 * Check arguments and construct default values.
371 */
372 if (!powerof2(pagesz))
373 panic("pool_init: page size invalid (%lx)\n", (u_long)pagesz);
374
375 if (alloc == NULL && release == NULL) {
376 alloc = pool_page_alloc;
377 release = pool_page_free;
378 pagesz = PAGE_SIZE; /* Rounds to PAGE_SIZE anyhow. */
379 } else if ((alloc != NULL && release != NULL) == 0) {
380 /* If you specifiy one, must specify both. */
381 panic("pool_init: must specify alloc and release together");
382 }
383
384 if (pagesz == 0)
385 pagesz = PAGE_SIZE;
386
387 if (align == 0)
388 align = ALIGN(1);
389
390 if (size < sizeof(struct pool_item))
391 size = sizeof(struct pool_item);
392
393 size = ALIGN(size);
394 if (size > pagesz)
395 panic("pool_init: pool item size (%lu) too large",
396 (u_long)size);
397
398 /*
399 * Initialize the pool structure.
400 */
401 TAILQ_INIT(&pp->pr_pagelist);
402 TAILQ_INIT(&pp->pr_cachelist);
403 pp->pr_curpage = NULL;
404 pp->pr_npages = 0;
405 pp->pr_minitems = 0;
406 pp->pr_minpages = 0;
407 pp->pr_maxpages = UINT_MAX;
408 pp->pr_roflags = flags;
409 pp->pr_flags = 0;
410 pp->pr_size = size;
411 pp->pr_align = align;
412 pp->pr_wchan = wchan;
413 pp->pr_mtype = mtype;
414 pp->pr_alloc = alloc;
415 pp->pr_free = release;
416 pp->pr_pagesz = pagesz;
417 pp->pr_pagemask = ~(pagesz - 1);
418 pp->pr_pageshift = ffs(pagesz) - 1;
419 pp->pr_nitems = 0;
420 pp->pr_nout = 0;
421 pp->pr_hardlimit = UINT_MAX;
422 pp->pr_hardlimit_warning = NULL;
423 pp->pr_hardlimit_ratecap.tv_sec = 0;
424 pp->pr_hardlimit_ratecap.tv_usec = 0;
425 pp->pr_hardlimit_warning_last.tv_sec = 0;
426 pp->pr_hardlimit_warning_last.tv_usec = 0;
427
428 /*
429 * Decide whether to put the page header off page to avoid
430 * wasting too large a part of the page. Off-page page headers
431 * go on a hash table, so we can match a returned item
432 * with its header based on the page address.
433 * We use 1/16 of the page size as the threshold (XXX: tune)
434 */
435 if (pp->pr_size < pagesz/16) {
436 /* Use the end of the page for the page header */
437 pp->pr_roflags |= PR_PHINPAGE;
438 pp->pr_phoffset = off =
439 pagesz - ALIGN(sizeof(struct pool_item_header));
440 } else {
441 /* The page header will be taken from our page header pool */
442 pp->pr_phoffset = 0;
443 off = pagesz;
444 for (i = 0; i < PR_HASHTABSIZE; i++) {
445 LIST_INIT(&pp->pr_hashtab[i]);
446 }
447 }
448
449 /*
450 * Alignment is to take place at `ioff' within the item. This means
451 * we must reserve up to `align - 1' bytes on the page to allow
452 * appropriate positioning of each item.
453 *
454 * Silently enforce `0 <= ioff < align'.
455 */
456 pp->pr_itemoffset = ioff = ioff % align;
457 pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
458 KASSERT(pp->pr_itemsperpage != 0);
459
460 /*
461 * Use the slack between the chunks and the page header
462 * for "cache coloring".
463 */
464 slack = off - pp->pr_itemsperpage * pp->pr_size;
465 pp->pr_maxcolor = (slack / align) * align;
466 pp->pr_curcolor = 0;
467
468 pp->pr_nget = 0;
469 pp->pr_nfail = 0;
470 pp->pr_nput = 0;
471 pp->pr_npagealloc = 0;
472 pp->pr_npagefree = 0;
473 pp->pr_hiwat = 0;
474 pp->pr_nidle = 0;
475
476 if (flags & PR_LOGGING) {
477 if (kmem_map == NULL ||
478 (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
479 M_TEMP, M_NOWAIT)) == NULL)
480 pp->pr_roflags &= ~PR_LOGGING;
481 pp->pr_curlogentry = 0;
482 pp->pr_logsize = pool_logsize;
483 }
484
485 pp->pr_entered_file = NULL;
486 pp->pr_entered_line = 0;
487
488 simple_lock_init(&pp->pr_slock);
489
490 /*
491 * Initialize private page header pool and cache magazine pool if we
492 * haven't done so yet.
493 * XXX LOCKING.
494 */
495 if (phpool.pr_size == 0) {
496 pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,
497 0, "phpool", 0, 0, 0, 0);
498 pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
499 0, "pcgpool", 0, 0, 0, 0);
500 }
501
502 /* Insert into the list of all pools. */
503 simple_lock(&pool_head_slock);
504 TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
505 simple_unlock(&pool_head_slock);
506 }
507
508 /*
509 * De-commision a pool resource.
510 */
511 void
512 pool_destroy(struct pool *pp)
513 {
514 struct pool_item_header *ph;
515 struct pool_cache *pc;
516
517 /* Destroy all caches for this pool. */
518 while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL)
519 pool_cache_destroy(pc);
520
521 #ifdef DIAGNOSTIC
522 if (pp->pr_nout != 0) {
523 pr_printlog(pp, NULL, printf);
524 panic("pool_destroy: pool busy: still out: %u\n",
525 pp->pr_nout);
526 }
527 #endif
528
529 /* Remove all pages */
530 if ((pp->pr_roflags & PR_STATIC) == 0)
531 while ((ph = pp->pr_pagelist.tqh_first) != NULL)
532 pr_rmpage(pp, ph);
533
534 /* Remove from global pool list */
535 simple_lock(&pool_head_slock);
536 TAILQ_REMOVE(&pool_head, pp, pr_poollist);
537 /* XXX Only clear this if we were drainpp? */
538 drainpp = NULL;
539 simple_unlock(&pool_head_slock);
540
541 if ((pp->pr_roflags & PR_LOGGING) != 0)
542 free(pp->pr_log, M_TEMP);
543
544 if (pp->pr_roflags & PR_FREEHEADER)
545 free(pp, M_POOL);
546 }
547
548
549 /*
550 * Grab an item from the pool; must be called at appropriate spl level
551 */
552 void *
553 _pool_get(struct pool *pp, int flags, const char *file, long line)
554 {
555 void *v;
556 struct pool_item *pi;
557 struct pool_item_header *ph;
558
559 #ifdef DIAGNOSTIC
560 if (__predict_false((pp->pr_roflags & PR_STATIC) &&
561 (flags & PR_MALLOCOK))) {
562 pr_printlog(pp, NULL, printf);
563 panic("pool_get: static");
564 }
565 #endif
566
567 if (__predict_false(curproc == NULL && doing_shutdown == 0 &&
568 (flags & PR_WAITOK) != 0))
569 panic("pool_get: must have NOWAIT");
570
571 simple_lock(&pp->pr_slock);
572 pr_enter(pp, file, line);
573
574 startover:
575 /*
576 * Check to see if we've reached the hard limit. If we have,
577 * and we can wait, then wait until an item has been returned to
578 * the pool.
579 */
580 #ifdef DIAGNOSTIC
581 if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
582 pr_leave(pp);
583 simple_unlock(&pp->pr_slock);
584 panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
585 }
586 #endif
587 if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
588 if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
589 /*
590 * XXX: A warning isn't logged in this case. Should
591 * it be?
592 */
593 pp->pr_flags |= PR_WANTED;
594 pr_leave(pp);
595 ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
596 pr_enter(pp, file, line);
597 goto startover;
598 }
599
600 /*
601 * Log a message that the hard limit has been hit.
602 */
603 if (pp->pr_hardlimit_warning != NULL &&
604 ratecheck(&pp->pr_hardlimit_warning_last,
605 &pp->pr_hardlimit_ratecap))
606 log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
607
608 if (flags & PR_URGENT)
609 panic("pool_get: urgent");
610
611 pp->pr_nfail++;
612
613 pr_leave(pp);
614 simple_unlock(&pp->pr_slock);
615 return (NULL);
616 }
617
618 /*
619 * The convention we use is that if `curpage' is not NULL, then
620 * it points at a non-empty bucket. In particular, `curpage'
621 * never points at a page header which has PR_PHINPAGE set and
622 * has no items in its bucket.
623 */
624 if ((ph = pp->pr_curpage) == NULL) {
625 void *v;
626
627 #ifdef DIAGNOSTIC
628 if (pp->pr_nitems != 0) {
629 simple_unlock(&pp->pr_slock);
630 printf("pool_get: %s: curpage NULL, nitems %u\n",
631 pp->pr_wchan, pp->pr_nitems);
632 panic("pool_get: nitems inconsistent\n");
633 }
634 #endif
635
636 /*
637 * Call the back-end page allocator for more memory.
638 * Release the pool lock, as the back-end page allocator
639 * may block.
640 */
641 pr_leave(pp);
642 simple_unlock(&pp->pr_slock);
643 v = (*pp->pr_alloc)(pp->pr_pagesz, flags, pp->pr_mtype);
644 simple_lock(&pp->pr_slock);
645 pr_enter(pp, file, line);
646
647 if (v == NULL) {
648 /*
649 * We were unable to allocate a page, but
650 * we released the lock during allocation,
651 * so perhaps items were freed back to the
652 * pool. Check for this case.
653 */
654 if (pp->pr_curpage != NULL)
655 goto startover;
656
657 if (flags & PR_URGENT)
658 panic("pool_get: urgent");
659
660 if ((flags & PR_WAITOK) == 0) {
661 pp->pr_nfail++;
662 pr_leave(pp);
663 simple_unlock(&pp->pr_slock);
664 return (NULL);
665 }
666
667 /*
668 * Wait for items to be returned to this pool.
669 *
670 * XXX: we actually want to wait just until
671 * the page allocator has memory again. Depending
672 * on this pool's usage, we might get stuck here
673 * for a long time.
674 *
675 * XXX: maybe we should wake up once a second and
676 * try again?
677 */
678 pp->pr_flags |= PR_WANTED;
679 pr_leave(pp);
680 ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
681 pr_enter(pp, file, line);
682 goto startover;
683 }
684
685 /* We have more memory; add it to the pool */
686 if (pool_prime_page(pp, v, flags & PR_WAITOK) != 0) {
687 /*
688 * Probably, we don't allowed to wait and
689 * couldn't allocate a page header.
690 */
691 (*pp->pr_free)(v, pp->pr_pagesz, pp->pr_mtype);
692 pp->pr_nfail++;
693 pr_leave(pp);
694 simple_unlock(&pp->pr_slock);
695 return (NULL);
696 }
697 pp->pr_npagealloc++;
698
699 /* Start the allocation process over. */
700 goto startover;
701 }
702
703 if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) {
704 pr_leave(pp);
705 simple_unlock(&pp->pr_slock);
706 panic("pool_get: %s: page empty", pp->pr_wchan);
707 }
708 #ifdef DIAGNOSTIC
709 if (__predict_false(pp->pr_nitems == 0)) {
710 pr_leave(pp);
711 simple_unlock(&pp->pr_slock);
712 printf("pool_get: %s: items on itemlist, nitems %u\n",
713 pp->pr_wchan, pp->pr_nitems);
714 panic("pool_get: nitems inconsistent\n");
715 }
716 #endif
717 pr_log(pp, v, PRLOG_GET, file, line);
718
719 #ifdef DIAGNOSTIC
720 if (__predict_false(pi->pi_magic != PI_MAGIC)) {
721 pr_printlog(pp, pi, printf);
722 panic("pool_get(%s): free list modified: magic=%x; page %p;"
723 " item addr %p\n",
724 pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
725 }
726 #endif
727
728 /*
729 * Remove from item list.
730 */
731 TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
732 pp->pr_nitems--;
733 pp->pr_nout++;
734 if (ph->ph_nmissing == 0) {
735 #ifdef DIAGNOSTIC
736 if (__predict_false(pp->pr_nidle == 0))
737 panic("pool_get: nidle inconsistent");
738 #endif
739 pp->pr_nidle--;
740 }
741 ph->ph_nmissing++;
742 if (TAILQ_FIRST(&ph->ph_itemlist) == NULL) {
743 #ifdef DIAGNOSTIC
744 if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) {
745 pr_leave(pp);
746 simple_unlock(&pp->pr_slock);
747 panic("pool_get: %s: nmissing inconsistent",
748 pp->pr_wchan);
749 }
750 #endif
751 /*
752 * Find a new non-empty page header, if any.
753 * Start search from the page head, to increase
754 * the chance for "high water" pages to be freed.
755 *
756 * Migrate empty pages to the end of the list. This
757 * will speed the update of curpage as pages become
758 * idle. Empty pages intermingled with idle pages
759 * is no big deal. As soon as a page becomes un-empty,
760 * it will move back to the head of the list.
761 */
762 TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
763 TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
764 for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
765 ph = TAILQ_NEXT(ph, ph_pagelist))
766 if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
767 break;
768
769 pp->pr_curpage = ph;
770 }
771
772 pp->pr_nget++;
773
774 /*
775 * If we have a low water mark and we are now below that low
776 * water mark, add more items to the pool.
777 */
778 if (pp->pr_nitems < pp->pr_minitems && pool_catchup(pp) != 0) {
779 /*
780 * XXX: Should we log a warning? Should we set up a timeout
781 * to try again in a second or so? The latter could break
782 * a caller's assumptions about interrupt protection, etc.
783 */
784 }
785
786 pr_leave(pp);
787 simple_unlock(&pp->pr_slock);
788 return (v);
789 }
790
791 /*
792 * Internal version of pool_put(). Pool is already locked/entered.
793 */
794 static void
795 pool_do_put(struct pool *pp, void *v, const char *file, long line)
796 {
797 struct pool_item *pi = v;
798 struct pool_item_header *ph;
799 caddr_t page;
800 int s;
801
802 page = (caddr_t)((u_long)v & pp->pr_pagemask);
803
804 #ifdef DIAGNOSTIC
805 if (__predict_false(pp->pr_nout == 0)) {
806 printf("pool %s: putting with none out\n",
807 pp->pr_wchan);
808 panic("pool_put");
809 }
810 #endif
811
812 pr_log(pp, v, PRLOG_PUT, file, line);
813
814 if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {
815 pr_printlog(pp, NULL, printf);
816 panic("pool_put: %s: page header missing", pp->pr_wchan);
817 }
818
819 #ifdef LOCKDEBUG
820 /*
821 * Check if we're freeing a locked simple lock.
822 */
823 simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
824 #endif
825
826 /*
827 * Return to item list.
828 */
829 #ifdef DIAGNOSTIC
830 pi->pi_magic = PI_MAGIC;
831 #endif
832 #ifdef DEBUG
833 {
834 int i, *ip = v;
835
836 for (i = 0; i < pp->pr_size / sizeof(int); i++) {
837 *ip++ = PI_MAGIC;
838 }
839 }
840 #endif
841
842 TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
843 ph->ph_nmissing--;
844 pp->pr_nput++;
845 pp->pr_nitems++;
846 pp->pr_nout--;
847
848 /* Cancel "pool empty" condition if it exists */
849 if (pp->pr_curpage == NULL)
850 pp->pr_curpage = ph;
851
852 if (pp->pr_flags & PR_WANTED) {
853 pp->pr_flags &= ~PR_WANTED;
854 if (ph->ph_nmissing == 0)
855 pp->pr_nidle++;
856 wakeup((caddr_t)pp);
857 return;
858 }
859
860 /*
861 * If this page is now complete, do one of two things:
862 *
863 * (1) If we have more pages than the page high water
864 * mark, free the page back to the system.
865 *
866 * (2) Move it to the end of the page list, so that
867 * we minimize our chances of fragmenting the
868 * pool. Idle pages migrate to the end (along with
869 * completely empty pages, so that we find un-empty
870 * pages more quickly when we update curpage) of the
871 * list so they can be more easily swept up by
872 * the pagedaemon when pages are scarce.
873 */
874 if (ph->ph_nmissing == 0) {
875 pp->pr_nidle++;
876 if (pp->pr_npages > pp->pr_maxpages) {
877 pr_rmpage(pp, ph);
878 } else {
879 TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
880 TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
881
882 /*
883 * Update the timestamp on the page. A page must
884 * be idle for some period of time before it can
885 * be reclaimed by the pagedaemon. This minimizes
886 * ping-pong'ing for memory.
887 */
888 s = splclock();
889 ph->ph_time = mono_time;
890 splx(s);
891
892 /*
893 * Update the current page pointer. Just look for
894 * the first page with any free items.
895 *
896 * XXX: Maybe we want an option to look for the
897 * page with the fewest available items, to minimize
898 * fragmentation?
899 */
900 for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
901 ph = TAILQ_NEXT(ph, ph_pagelist))
902 if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
903 break;
904
905 pp->pr_curpage = ph;
906 }
907 }
908 /*
909 * If the page has just become un-empty, move it to the head of
910 * the list, and make it the current page. The next allocation
911 * will get the item from this page, instead of further fragmenting
912 * the pool.
913 */
914 else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
915 TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
916 TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
917 pp->pr_curpage = ph;
918 }
919 }
920
921 /*
922 * Return resource to the pool; must be called at appropriate spl level
923 */
924 void
925 _pool_put(struct pool *pp, void *v, const char *file, long line)
926 {
927
928 simple_lock(&pp->pr_slock);
929 pr_enter(pp, file, line);
930
931 pool_do_put(pp, v, file, line);
932
933 pr_leave(pp);
934 simple_unlock(&pp->pr_slock);
935 }
936
937 /*
938 * Add a page worth of items to the pool.
939 *
940 * Note, we must be called with the pool descriptor LOCKED.
941 */
942 static int
943 pool_prime_page(struct pool *pp, caddr_t storage, int flags)
944 {
945 struct pool_item *pi;
946 struct pool_item_header *ph;
947 caddr_t cp = storage;
948 unsigned int align = pp->pr_align;
949 unsigned int ioff = pp->pr_itemoffset;
950 int s, n;
951
952 if (((u_long)cp & (pp->pr_pagesz - 1)) != 0)
953 panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
954
955 if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
956 ph = (struct pool_item_header *)(cp + pp->pr_phoffset);
957 } else {
958 s = splhigh();
959 ph = pool_get(&phpool, flags);
960 splx(s);
961 if (ph == NULL)
962 return (ENOMEM);
963 LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)],
964 ph, ph_hashlist);
965 }
966
967 /*
968 * Insert page header.
969 */
970 TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
971 TAILQ_INIT(&ph->ph_itemlist);
972 ph->ph_page = storage;
973 ph->ph_nmissing = 0;
974 memset(&ph->ph_time, 0, sizeof(ph->ph_time));
975
976 pp->pr_nidle++;
977
978 /*
979 * Color this page.
980 */
981 cp = (caddr_t)(cp + pp->pr_curcolor);
982 if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
983 pp->pr_curcolor = 0;
984
985 /*
986 * Adjust storage to apply aligment to `pr_itemoffset' in each item.
987 */
988 if (ioff != 0)
989 cp = (caddr_t)(cp + (align - ioff));
990
991 /*
992 * Insert remaining chunks on the bucket list.
993 */
994 n = pp->pr_itemsperpage;
995 pp->pr_nitems += n;
996
997 while (n--) {
998 pi = (struct pool_item *)cp;
999
1000 /* Insert on page list */
1001 TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
1002 #ifdef DIAGNOSTIC
1003 pi->pi_magic = PI_MAGIC;
1004 #endif
1005 cp = (caddr_t)(cp + pp->pr_size);
1006 }
1007
1008 /*
1009 * If the pool was depleted, point at the new page.
1010 */
1011 if (pp->pr_curpage == NULL)
1012 pp->pr_curpage = ph;
1013
1014 if (++pp->pr_npages > pp->pr_hiwat)
1015 pp->pr_hiwat = pp->pr_npages;
1016
1017 return (0);
1018 }
1019
1020 /*
1021 * Used by pool_get() when nitems drops below the low water mark. This
1022 * is used to catch up nitmes with the low water mark.
1023 *
1024 * Note 1, we never wait for memory here, we let the caller decide what to do.
1025 *
1026 * Note 2, this doesn't work with static pools.
1027 *
1028 * Note 3, we must be called with the pool already locked, and we return
1029 * with it locked.
1030 */
1031 static int
1032 pool_catchup(struct pool *pp)
1033 {
1034 caddr_t cp;
1035 int error = 0;
1036
1037 if (pp->pr_roflags & PR_STATIC) {
1038 /*
1039 * We dropped below the low water mark, and this is not a
1040 * good thing. Log a warning.
1041 *
1042 * XXX: rate-limit this?
1043 */
1044 printf("WARNING: static pool `%s' dropped below low water "
1045 "mark\n", pp->pr_wchan);
1046 return (0);
1047 }
1048
1049 while (pp->pr_nitems < pp->pr_minitems) {
1050 /*
1051 * Call the page back-end allocator for more memory.
1052 *
1053 * XXX: We never wait, so should we bother unlocking
1054 * the pool descriptor?
1055 */
1056 simple_unlock(&pp->pr_slock);
1057 cp = (*pp->pr_alloc)(pp->pr_pagesz, 0, pp->pr_mtype);
1058 simple_lock(&pp->pr_slock);
1059 if (__predict_false(cp == NULL)) {
1060 error = ENOMEM;
1061 break;
1062 }
1063 if ((error = pool_prime_page(pp, cp, PR_NOWAIT)) != 0) {
1064 (*pp->pr_free)(cp, pp->pr_pagesz, pp->pr_mtype);
1065 break;
1066 }
1067 pp->pr_npagealloc++;
1068 }
1069
1070 return (error);
1071 }
1072
1073 void
1074 pool_setlowat(struct pool *pp, int n)
1075 {
1076 int error;
1077
1078 simple_lock(&pp->pr_slock);
1079
1080 pp->pr_minitems = n;
1081 pp->pr_minpages = (n == 0)
1082 ? 0
1083 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1084
1085 /* Make sure we're caught up with the newly-set low water mark. */
1086 if ((pp->pr_nitems < pp->pr_minitems) &&
1087 (error = pool_catchup(pp)) != 0) {
1088 /*
1089 * XXX: Should we log a warning? Should we set up a timeout
1090 * to try again in a second or so? The latter could break
1091 * a caller's assumptions about interrupt protection, etc.
1092 */
1093 }
1094
1095 simple_unlock(&pp->pr_slock);
1096 }
1097
1098 void
1099 pool_sethiwat(struct pool *pp, int n)
1100 {
1101
1102 simple_lock(&pp->pr_slock);
1103
1104 pp->pr_maxpages = (n == 0)
1105 ? 0
1106 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1107
1108 simple_unlock(&pp->pr_slock);
1109 }
1110
1111 void
1112 pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1113 {
1114
1115 simple_lock(&pp->pr_slock);
1116
1117 pp->pr_hardlimit = n;
1118 pp->pr_hardlimit_warning = warnmess;
1119 pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1120 pp->pr_hardlimit_warning_last.tv_sec = 0;
1121 pp->pr_hardlimit_warning_last.tv_usec = 0;
1122
1123 /*
1124 * In-line version of pool_sethiwat(), because we don't want to
1125 * release the lock.
1126 */
1127 pp->pr_maxpages = (n == 0)
1128 ? 0
1129 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1130
1131 simple_unlock(&pp->pr_slock);
1132 }
1133
1134 /*
1135 * Default page allocator.
1136 */
1137 static void *
1138 pool_page_alloc(unsigned long sz, int flags, int mtype)
1139 {
1140 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
1141
1142 return ((void *)uvm_km_alloc_poolpage(waitok));
1143 }
1144
1145 static void
1146 pool_page_free(void *v, unsigned long sz, int mtype)
1147 {
1148
1149 uvm_km_free_poolpage((vaddr_t)v);
1150 }
1151
1152 /*
1153 * Alternate pool page allocator for pools that know they will
1154 * never be accessed in interrupt context.
1155 */
1156 void *
1157 pool_page_alloc_nointr(unsigned long sz, int flags, int mtype)
1158 {
1159 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
1160
1161 return ((void *)uvm_km_alloc_poolpage1(kernel_map, uvm.kernel_object,
1162 waitok));
1163 }
1164
1165 void
1166 pool_page_free_nointr(void *v, unsigned long sz, int mtype)
1167 {
1168
1169 uvm_km_free_poolpage1(kernel_map, (vaddr_t)v);
1170 }
1171
1172
1173 /*
1174 * Release all complete pages that have not been used recently.
1175 */
1176 void
1177 _pool_reclaim(struct pool *pp, const char *file, long line)
1178 {
1179 struct pool_item_header *ph, *phnext;
1180 struct pool_cache *pc;
1181 struct timeval curtime;
1182 int s;
1183
1184 if (pp->pr_roflags & PR_STATIC)
1185 return;
1186
1187 if (simple_lock_try(&pp->pr_slock) == 0)
1188 return;
1189 pr_enter(pp, file, line);
1190
1191 /*
1192 * Reclaim items from the pool's caches.
1193 */
1194 for (pc = TAILQ_FIRST(&pp->pr_cachelist); pc != NULL;
1195 pc = TAILQ_NEXT(pc, pc_poollist))
1196 pool_cache_reclaim(pc);
1197
1198 s = splclock();
1199 curtime = mono_time;
1200 splx(s);
1201
1202 for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; ph = phnext) {
1203 phnext = TAILQ_NEXT(ph, ph_pagelist);
1204
1205 /* Check our minimum page claim */
1206 if (pp->pr_npages <= pp->pr_minpages)
1207 break;
1208
1209 if (ph->ph_nmissing == 0) {
1210 struct timeval diff;
1211 timersub(&curtime, &ph->ph_time, &diff);
1212 if (diff.tv_sec < pool_inactive_time)
1213 continue;
1214
1215 /*
1216 * If freeing this page would put us below
1217 * the low water mark, stop now.
1218 */
1219 if ((pp->pr_nitems - pp->pr_itemsperpage) <
1220 pp->pr_minitems)
1221 break;
1222
1223 pr_rmpage(pp, ph);
1224 }
1225 }
1226
1227 pr_leave(pp);
1228 simple_unlock(&pp->pr_slock);
1229 }
1230
1231
1232 /*
1233 * Drain pools, one at a time.
1234 *
1235 * Note, we must never be called from an interrupt context.
1236 */
1237 void
1238 pool_drain(void *arg)
1239 {
1240 struct pool *pp;
1241 int s;
1242
1243 s = splvm();
1244 simple_lock(&pool_head_slock);
1245
1246 if (drainpp == NULL && (drainpp = TAILQ_FIRST(&pool_head)) == NULL)
1247 goto out;
1248
1249 pp = drainpp;
1250 drainpp = TAILQ_NEXT(pp, pr_poollist);
1251
1252 pool_reclaim(pp);
1253
1254 out:
1255 simple_unlock(&pool_head_slock);
1256 splx(s);
1257 }
1258
1259
1260 /*
1261 * Diagnostic helpers.
1262 */
1263 void
1264 pool_print(struct pool *pp, const char *modif)
1265 {
1266 int s;
1267
1268 s = splvm();
1269 if (simple_lock_try(&pp->pr_slock) == 0) {
1270 printf("pool %s is locked; try again later\n",
1271 pp->pr_wchan);
1272 splx(s);
1273 return;
1274 }
1275 pool_print1(pp, modif, printf);
1276 simple_unlock(&pp->pr_slock);
1277 splx(s);
1278 }
1279
1280 void
1281 pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1282 {
1283 int didlock = 0;
1284
1285 if (pp == NULL) {
1286 (*pr)("Must specify a pool to print.\n");
1287 return;
1288 }
1289
1290 /*
1291 * Called from DDB; interrupts should be blocked, and all
1292 * other processors should be paused. We can skip locking
1293 * the pool in this case.
1294 *
1295 * We do a simple_lock_try() just to print the lock
1296 * status, however.
1297 */
1298
1299 if (simple_lock_try(&pp->pr_slock) == 0)
1300 (*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
1301 else
1302 didlock = 1;
1303
1304 pool_print1(pp, modif, pr);
1305
1306 if (didlock)
1307 simple_unlock(&pp->pr_slock);
1308 }
1309
1310 static void
1311 pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1312 {
1313 struct pool_item_header *ph;
1314 struct pool_cache *pc;
1315 struct pool_cache_group *pcg;
1316 #ifdef DIAGNOSTIC
1317 struct pool_item *pi;
1318 #endif
1319 int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1320 char c;
1321
1322 while ((c = *modif++) != '\0') {
1323 if (c == 'l')
1324 print_log = 1;
1325 if (c == 'p')
1326 print_pagelist = 1;
1327 if (c == 'c')
1328 print_cache = 1;
1329 modif++;
1330 }
1331
1332 (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1333 pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1334 pp->pr_roflags);
1335 (*pr)("\tpagesz %u, mtype %d\n", pp->pr_pagesz, pp->pr_mtype);
1336 (*pr)("\talloc %p, release %p\n", pp->pr_alloc, pp->pr_free);
1337 (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1338 pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1339 (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1340 pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1341
1342 (*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
1343 pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1344 (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1345 pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1346
1347 if (print_pagelist == 0)
1348 goto skip_pagelist;
1349
1350 if ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL)
1351 (*pr)("\n\tpage list:\n");
1352 for (; ph != NULL; ph = TAILQ_NEXT(ph, ph_pagelist)) {
1353 (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
1354 ph->ph_page, ph->ph_nmissing,
1355 (u_long)ph->ph_time.tv_sec,
1356 (u_long)ph->ph_time.tv_usec);
1357 #ifdef DIAGNOSTIC
1358 for (pi = TAILQ_FIRST(&ph->ph_itemlist); pi != NULL;
1359 pi = TAILQ_NEXT(pi, pi_list)) {
1360 if (pi->pi_magic != PI_MAGIC) {
1361 (*pr)("\t\t\titem %p, magic 0x%x\n",
1362 pi, pi->pi_magic);
1363 }
1364 }
1365 #endif
1366 }
1367 if (pp->pr_curpage == NULL)
1368 (*pr)("\tno current page\n");
1369 else
1370 (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1371
1372 skip_pagelist:
1373
1374 if (print_log == 0)
1375 goto skip_log;
1376
1377 (*pr)("\n");
1378 if ((pp->pr_roflags & PR_LOGGING) == 0)
1379 (*pr)("\tno log\n");
1380 else
1381 pr_printlog(pp, NULL, pr);
1382
1383 skip_log:
1384
1385 if (print_cache == 0)
1386 goto skip_cache;
1387
1388 for (pc = TAILQ_FIRST(&pp->pr_cachelist); pc != NULL;
1389 pc = TAILQ_NEXT(pc, pc_poollist)) {
1390 (*pr)("\tcache %p: allocfrom %p freeto %p\n", pc,
1391 pc->pc_allocfrom, pc->pc_freeto);
1392 (*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n",
1393 pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
1394 for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
1395 pcg = TAILQ_NEXT(pcg, pcg_list)) {
1396 (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);
1397 for (i = 0; i < PCG_NOBJECTS; i++)
1398 (*pr)("\t\t\t%p\n", pcg->pcg_objects[i]);
1399 }
1400 }
1401
1402 skip_cache:
1403
1404 pr_enter_check(pp, pr);
1405 }
1406
1407 int
1408 pool_chk(struct pool *pp, const char *label)
1409 {
1410 struct pool_item_header *ph;
1411 int r = 0;
1412
1413 simple_lock(&pp->pr_slock);
1414
1415 for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
1416 ph = TAILQ_NEXT(ph, ph_pagelist)) {
1417
1418 struct pool_item *pi;
1419 int n;
1420 caddr_t page;
1421
1422 page = (caddr_t)((u_long)ph & pp->pr_pagemask);
1423 if (page != ph->ph_page &&
1424 (pp->pr_roflags & PR_PHINPAGE) != 0) {
1425 if (label != NULL)
1426 printf("%s: ", label);
1427 printf("pool(%p:%s): page inconsistency: page %p;"
1428 " at page head addr %p (p %p)\n", pp,
1429 pp->pr_wchan, ph->ph_page,
1430 ph, page);
1431 r++;
1432 goto out;
1433 }
1434
1435 for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
1436 pi != NULL;
1437 pi = TAILQ_NEXT(pi,pi_list), n++) {
1438
1439 #ifdef DIAGNOSTIC
1440 if (pi->pi_magic != PI_MAGIC) {
1441 if (label != NULL)
1442 printf("%s: ", label);
1443 printf("pool(%s): free list modified: magic=%x;"
1444 " page %p; item ordinal %d;"
1445 " addr %p (p %p)\n",
1446 pp->pr_wchan, pi->pi_magic, ph->ph_page,
1447 n, pi, page);
1448 panic("pool");
1449 }
1450 #endif
1451 page = (caddr_t)((u_long)pi & pp->pr_pagemask);
1452 if (page == ph->ph_page)
1453 continue;
1454
1455 if (label != NULL)
1456 printf("%s: ", label);
1457 printf("pool(%p:%s): page inconsistency: page %p;"
1458 " item ordinal %d; addr %p (p %p)\n", pp,
1459 pp->pr_wchan, ph->ph_page,
1460 n, pi, page);
1461 r++;
1462 goto out;
1463 }
1464 }
1465 out:
1466 simple_unlock(&pp->pr_slock);
1467 return (r);
1468 }
1469
1470 /*
1471 * pool_cache_init:
1472 *
1473 * Initialize a pool cache.
1474 *
1475 * NOTE: If the pool must be protected from interrupts, we expect
1476 * to be called at the appropriate interrupt priority level.
1477 */
1478 void
1479 pool_cache_init(struct pool_cache *pc, struct pool *pp,
1480 int (*ctor)(void *, void *, int),
1481 void (*dtor)(void *, void *),
1482 void *arg)
1483 {
1484
1485 TAILQ_INIT(&pc->pc_grouplist);
1486 simple_lock_init(&pc->pc_slock);
1487
1488 pc->pc_allocfrom = NULL;
1489 pc->pc_freeto = NULL;
1490 pc->pc_pool = pp;
1491
1492 pc->pc_ctor = ctor;
1493 pc->pc_dtor = dtor;
1494 pc->pc_arg = arg;
1495
1496 pc->pc_hits = 0;
1497 pc->pc_misses = 0;
1498
1499 pc->pc_ngroups = 0;
1500
1501 pc->pc_nitems = 0;
1502
1503 simple_lock(&pp->pr_slock);
1504 TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist);
1505 simple_unlock(&pp->pr_slock);
1506 }
1507
1508 /*
1509 * pool_cache_destroy:
1510 *
1511 * Destroy a pool cache.
1512 */
1513 void
1514 pool_cache_destroy(struct pool_cache *pc)
1515 {
1516 struct pool *pp = pc->pc_pool;
1517
1518 /* First, invalidate the entire cache. */
1519 pool_cache_invalidate(pc);
1520
1521 /* ...and remove it from the pool's cache list. */
1522 simple_lock(&pp->pr_slock);
1523 TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist);
1524 simple_unlock(&pp->pr_slock);
1525 }
1526
1527 static __inline void *
1528 pcg_get(struct pool_cache_group *pcg)
1529 {
1530 void *object;
1531 u_int idx;
1532
1533 KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
1534 KASSERT(pcg->pcg_avail != 0);
1535 idx = --pcg->pcg_avail;
1536
1537 KASSERT(pcg->pcg_objects[idx] != NULL);
1538 object = pcg->pcg_objects[idx];
1539 pcg->pcg_objects[idx] = NULL;
1540
1541 return (object);
1542 }
1543
1544 static __inline void
1545 pcg_put(struct pool_cache_group *pcg, void *object)
1546 {
1547 u_int idx;
1548
1549 KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
1550 idx = pcg->pcg_avail++;
1551
1552 KASSERT(pcg->pcg_objects[idx] == NULL);
1553 pcg->pcg_objects[idx] = object;
1554 }
1555
1556 /*
1557 * pool_cache_get:
1558 *
1559 * Get an object from a pool cache.
1560 */
1561 void *
1562 pool_cache_get(struct pool_cache *pc, int flags)
1563 {
1564 struct pool_cache_group *pcg;
1565 void *object;
1566
1567 simple_lock(&pc->pc_slock);
1568
1569 if ((pcg = pc->pc_allocfrom) == NULL) {
1570 for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
1571 pcg = TAILQ_NEXT(pcg, pcg_list)) {
1572 if (pcg->pcg_avail != 0) {
1573 pc->pc_allocfrom = pcg;
1574 goto have_group;
1575 }
1576 }
1577
1578 /*
1579 * No groups with any available objects. Allocate
1580 * a new object, construct it, and return it to
1581 * the caller. We will allocate a group, if necessary,
1582 * when the object is freed back to the cache.
1583 */
1584 pc->pc_misses++;
1585 simple_unlock(&pc->pc_slock);
1586 object = pool_get(pc->pc_pool, flags);
1587 if (object != NULL && pc->pc_ctor != NULL) {
1588 if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
1589 pool_put(pc->pc_pool, object);
1590 return (NULL);
1591 }
1592 }
1593 return (object);
1594 }
1595
1596 have_group:
1597 pc->pc_hits++;
1598 pc->pc_nitems--;
1599 object = pcg_get(pcg);
1600
1601 if (pcg->pcg_avail == 0)
1602 pc->pc_allocfrom = NULL;
1603
1604 simple_unlock(&pc->pc_slock);
1605
1606 return (object);
1607 }
1608
1609 /*
1610 * pool_cache_put:
1611 *
1612 * Put an object back to the pool cache.
1613 */
1614 void
1615 pool_cache_put(struct pool_cache *pc, void *object)
1616 {
1617 struct pool_cache_group *pcg;
1618
1619 simple_lock(&pc->pc_slock);
1620
1621 if ((pcg = pc->pc_freeto) == NULL) {
1622 for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
1623 pcg = TAILQ_NEXT(pcg, pcg_list)) {
1624 if (pcg->pcg_avail != PCG_NOBJECTS) {
1625 pc->pc_freeto = pcg;
1626 goto have_group;
1627 }
1628 }
1629
1630 /*
1631 * No empty groups to free the object to. Attempt to
1632 * allocate one.
1633 */
1634 simple_unlock(&pc->pc_slock);
1635 pcg = pool_get(&pcgpool, PR_NOWAIT);
1636 if (pcg != NULL) {
1637 memset(pcg, 0, sizeof(*pcg));
1638 simple_lock(&pc->pc_slock);
1639 pc->pc_ngroups++;
1640 TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list);
1641 if (pc->pc_freeto == NULL)
1642 pc->pc_freeto = pcg;
1643 goto have_group;
1644 }
1645
1646 /*
1647 * Unable to allocate a cache group; destruct the object
1648 * and free it back to the pool.
1649 */
1650 pool_cache_destruct_object(pc, object);
1651 return;
1652 }
1653
1654 have_group:
1655 pc->pc_nitems++;
1656 pcg_put(pcg, object);
1657
1658 if (pcg->pcg_avail == PCG_NOBJECTS)
1659 pc->pc_freeto = NULL;
1660
1661 simple_unlock(&pc->pc_slock);
1662 }
1663
1664 /*
1665 * pool_cache_destruct_object:
1666 *
1667 * Force destruction of an object and its release back into
1668 * the pool.
1669 */
1670 void
1671 pool_cache_destruct_object(struct pool_cache *pc, void *object)
1672 {
1673
1674 if (pc->pc_dtor != NULL)
1675 (*pc->pc_dtor)(pc->pc_arg, object);
1676 pool_put(pc->pc_pool, object);
1677 }
1678
1679 /*
1680 * pool_cache_do_invalidate:
1681 *
1682 * This internal function implements pool_cache_invalidate() and
1683 * pool_cache_reclaim().
1684 */
1685 static void
1686 pool_cache_do_invalidate(struct pool_cache *pc, int free_groups,
1687 void (*putit)(struct pool *, void *, const char *, long))
1688 {
1689 struct pool_cache_group *pcg, *npcg;
1690 void *object;
1691
1692 for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
1693 pcg = npcg) {
1694 npcg = TAILQ_NEXT(pcg, pcg_list);
1695 while (pcg->pcg_avail != 0) {
1696 pc->pc_nitems--;
1697 object = pcg_get(pcg);
1698 if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg)
1699 pc->pc_allocfrom = NULL;
1700 if (pc->pc_dtor != NULL)
1701 (*pc->pc_dtor)(pc->pc_arg, object);
1702 (*putit)(pc->pc_pool, object, __FILE__, __LINE__);
1703 }
1704 if (free_groups) {
1705 pc->pc_ngroups--;
1706 TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list);
1707 if (pc->pc_freeto == pcg)
1708 pc->pc_freeto = NULL;
1709 pool_put(&pcgpool, pcg);
1710 }
1711 }
1712 }
1713
1714 /*
1715 * pool_cache_invalidate:
1716 *
1717 * Invalidate a pool cache (destruct and release all of the
1718 * cached objects).
1719 */
1720 void
1721 pool_cache_invalidate(struct pool_cache *pc)
1722 {
1723
1724 simple_lock(&pc->pc_slock);
1725 pool_cache_do_invalidate(pc, 0, _pool_put);
1726 simple_unlock(&pc->pc_slock);
1727 }
1728
1729 /*
1730 * pool_cache_reclaim:
1731 *
1732 * Reclaim a pool cache for pool_reclaim().
1733 */
1734 static void
1735 pool_cache_reclaim(struct pool_cache *pc)
1736 {
1737
1738 simple_lock(&pc->pc_slock);
1739 pool_cache_do_invalidate(pc, 1, pool_do_put);
1740 simple_unlock(&pc->pc_slock);
1741 }
1742