subr_pool.c revision 1.75 1 /* $NetBSD: subr_pool.c,v 1.75 2002/03/13 08:12:58 simonb Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.75 2002/03/13 08:12:58 simonb Exp $");
42
43 #include "opt_pool.h"
44 #include "opt_poollog.h"
45 #include "opt_lockdebug.h"
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/proc.h>
50 #include <sys/errno.h>
51 #include <sys/kernel.h>
52 #include <sys/malloc.h>
53 #include <sys/lock.h>
54 #include <sys/pool.h>
55 #include <sys/syslog.h>
56
57 #include <uvm/uvm.h>
58
59 /*
60 * Pool resource management utility.
61 *
62 * Memory is allocated in pages which are split into pieces according
63 * to the pool item size. Each page is kept on a list headed by `pr_pagelist'
64 * in the pool structure and the individual pool items are on a linked list
65 * headed by `ph_itemlist' in each page header. The memory for building
66 * the page list is either taken from the allocated pages themselves (for
67 * small pool items) or taken from an internal pool of page headers (`phpool').
68 */
69
70 /* List of all pools */
71 TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
72
73 /* Private pool for page header structures */
74 static struct pool phpool;
75
76 #ifdef POOL_SUBPAGE
77 /* Pool of subpages for use by normal pools. */
78 static struct pool psppool;
79 #endif
80
81 /* # of seconds to retain page after last use */
82 int pool_inactive_time = 10;
83
84 /* Next candidate for drainage (see pool_drain()) */
85 static struct pool *drainpp;
86
87 /* This spin lock protects both pool_head and drainpp. */
88 struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
89
90 struct pool_item_header {
91 /* Page headers */
92 TAILQ_ENTRY(pool_item_header)
93 ph_pagelist; /* pool page list */
94 TAILQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */
95 LIST_ENTRY(pool_item_header)
96 ph_hashlist; /* Off-page page headers */
97 int ph_nmissing; /* # of chunks in use */
98 caddr_t ph_page; /* this page's address */
99 struct timeval ph_time; /* last referenced */
100 };
101 TAILQ_HEAD(pool_pagelist,pool_item_header);
102
103 struct pool_item {
104 #ifdef DIAGNOSTIC
105 int pi_magic;
106 #endif
107 #define PI_MAGIC 0xdeadbeef
108 /* Other entries use only this list entry */
109 TAILQ_ENTRY(pool_item) pi_list;
110 };
111
112 #define PR_HASH_INDEX(pp,addr) \
113 (((u_long)(addr) >> (pp)->pr_alloc->pa_pageshift) & \
114 (PR_HASHTABSIZE - 1))
115
116 #define POOL_NEEDS_CATCHUP(pp) \
117 ((pp)->pr_nitems < (pp)->pr_minitems)
118
119 /*
120 * Pool cache management.
121 *
122 * Pool caches provide a way for constructed objects to be cached by the
123 * pool subsystem. This can lead to performance improvements by avoiding
124 * needless object construction/destruction; it is deferred until absolutely
125 * necessary.
126 *
127 * Caches are grouped into cache groups. Each cache group references
128 * up to 16 constructed objects. When a cache allocates an object
129 * from the pool, it calls the object's constructor and places it into
130 * a cache group. When a cache group frees an object back to the pool,
131 * it first calls the object's destructor. This allows the object to
132 * persist in constructed form while freed to the cache.
133 *
134 * Multiple caches may exist for each pool. This allows a single
135 * object type to have multiple constructed forms. The pool references
136 * each cache, so that when a pool is drained by the pagedaemon, it can
137 * drain each individual cache as well. Each time a cache is drained,
138 * the most idle cache group is freed to the pool in its entirety.
139 *
140 * Pool caches are layed on top of pools. By layering them, we can avoid
141 * the complexity of cache management for pools which would not benefit
142 * from it.
143 */
144
145 /* The cache group pool. */
146 static struct pool pcgpool;
147
148 /* The pool cache group. */
149 #define PCG_NOBJECTS 16
150 struct pool_cache_group {
151 TAILQ_ENTRY(pool_cache_group)
152 pcg_list; /* link in the pool cache's group list */
153 u_int pcg_avail; /* # available objects */
154 /* pointers to the objects */
155 void *pcg_objects[PCG_NOBJECTS];
156 };
157
158 static void pool_cache_reclaim(struct pool_cache *);
159
160 static int pool_catchup(struct pool *);
161 static void pool_prime_page(struct pool *, caddr_t,
162 struct pool_item_header *);
163
164 void *pool_allocator_alloc(struct pool *, int);
165 void pool_allocator_free(struct pool *, void *);
166
167 static void pool_print1(struct pool *, const char *,
168 void (*)(const char *, ...));
169
170 /*
171 * Pool log entry. An array of these is allocated in pool_init().
172 */
173 struct pool_log {
174 const char *pl_file;
175 long pl_line;
176 int pl_action;
177 #define PRLOG_GET 1
178 #define PRLOG_PUT 2
179 void *pl_addr;
180 };
181
182 /* Number of entries in pool log buffers */
183 #ifndef POOL_LOGSIZE
184 #define POOL_LOGSIZE 10
185 #endif
186
187 int pool_logsize = POOL_LOGSIZE;
188
189 #ifdef POOL_DIAGNOSTIC
190 static __inline void
191 pr_log(struct pool *pp, void *v, int action, const char *file, long line)
192 {
193 int n = pp->pr_curlogentry;
194 struct pool_log *pl;
195
196 if ((pp->pr_roflags & PR_LOGGING) == 0)
197 return;
198
199 /*
200 * Fill in the current entry. Wrap around and overwrite
201 * the oldest entry if necessary.
202 */
203 pl = &pp->pr_log[n];
204 pl->pl_file = file;
205 pl->pl_line = line;
206 pl->pl_action = action;
207 pl->pl_addr = v;
208 if (++n >= pp->pr_logsize)
209 n = 0;
210 pp->pr_curlogentry = n;
211 }
212
213 static void
214 pr_printlog(struct pool *pp, struct pool_item *pi,
215 void (*pr)(const char *, ...))
216 {
217 int i = pp->pr_logsize;
218 int n = pp->pr_curlogentry;
219
220 if ((pp->pr_roflags & PR_LOGGING) == 0)
221 return;
222
223 /*
224 * Print all entries in this pool's log.
225 */
226 while (i-- > 0) {
227 struct pool_log *pl = &pp->pr_log[n];
228 if (pl->pl_action != 0) {
229 if (pi == NULL || pi == pl->pl_addr) {
230 (*pr)("\tlog entry %d:\n", i);
231 (*pr)("\t\taction = %s, addr = %p\n",
232 pl->pl_action == PRLOG_GET ? "get" : "put",
233 pl->pl_addr);
234 (*pr)("\t\tfile: %s at line %lu\n",
235 pl->pl_file, pl->pl_line);
236 }
237 }
238 if (++n >= pp->pr_logsize)
239 n = 0;
240 }
241 }
242
243 static __inline void
244 pr_enter(struct pool *pp, const char *file, long line)
245 {
246
247 if (__predict_false(pp->pr_entered_file != NULL)) {
248 printf("pool %s: reentrancy at file %s line %ld\n",
249 pp->pr_wchan, file, line);
250 printf(" previous entry at file %s line %ld\n",
251 pp->pr_entered_file, pp->pr_entered_line);
252 panic("pr_enter");
253 }
254
255 pp->pr_entered_file = file;
256 pp->pr_entered_line = line;
257 }
258
259 static __inline void
260 pr_leave(struct pool *pp)
261 {
262
263 if (__predict_false(pp->pr_entered_file == NULL)) {
264 printf("pool %s not entered?\n", pp->pr_wchan);
265 panic("pr_leave");
266 }
267
268 pp->pr_entered_file = NULL;
269 pp->pr_entered_line = 0;
270 }
271
272 static __inline void
273 pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
274 {
275
276 if (pp->pr_entered_file != NULL)
277 (*pr)("\n\tcurrently entered from file %s line %ld\n",
278 pp->pr_entered_file, pp->pr_entered_line);
279 }
280 #else
281 #define pr_log(pp, v, action, file, line)
282 #define pr_printlog(pp, pi, pr)
283 #define pr_enter(pp, file, line)
284 #define pr_leave(pp)
285 #define pr_enter_check(pp, pr)
286 #endif /* POOL_DIAGNOSTIC */
287
288 /*
289 * Return the pool page header based on page address.
290 */
291 static __inline struct pool_item_header *
292 pr_find_pagehead(struct pool *pp, caddr_t page)
293 {
294 struct pool_item_header *ph;
295
296 if ((pp->pr_roflags & PR_PHINPAGE) != 0)
297 return ((struct pool_item_header *)(page + pp->pr_phoffset));
298
299 for (ph = LIST_FIRST(&pp->pr_hashtab[PR_HASH_INDEX(pp, page)]);
300 ph != NULL;
301 ph = LIST_NEXT(ph, ph_hashlist)) {
302 if (ph->ph_page == page)
303 return (ph);
304 }
305 return (NULL);
306 }
307
308 /*
309 * Remove a page from the pool.
310 */
311 static __inline void
312 pr_rmpage(struct pool *pp, struct pool_item_header *ph,
313 struct pool_pagelist *pq)
314 {
315 int s;
316
317 /*
318 * If the page was idle, decrement the idle page count.
319 */
320 if (ph->ph_nmissing == 0) {
321 #ifdef DIAGNOSTIC
322 if (pp->pr_nidle == 0)
323 panic("pr_rmpage: nidle inconsistent");
324 if (pp->pr_nitems < pp->pr_itemsperpage)
325 panic("pr_rmpage: nitems inconsistent");
326 #endif
327 pp->pr_nidle--;
328 }
329
330 pp->pr_nitems -= pp->pr_itemsperpage;
331
332 /*
333 * Unlink a page from the pool and release it (or queue it for release).
334 */
335 TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
336 if (pq) {
337 TAILQ_INSERT_HEAD(pq, ph, ph_pagelist);
338 } else {
339 pool_allocator_free(pp, ph->ph_page);
340 if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
341 LIST_REMOVE(ph, ph_hashlist);
342 s = splhigh();
343 pool_put(&phpool, ph);
344 splx(s);
345 }
346 }
347 pp->pr_npages--;
348 pp->pr_npagefree++;
349
350 if (pp->pr_curpage == ph) {
351 /*
352 * Find a new non-empty page header, if any.
353 * Start search from the page head, to increase the
354 * chance for "high water" pages to be freed.
355 */
356 TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
357 if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
358 break;
359
360 pp->pr_curpage = ph;
361 }
362 }
363
364 /*
365 * Initialize the given pool resource structure.
366 *
367 * We export this routine to allow other kernel parts to declare
368 * static pools that must be initialized before malloc() is available.
369 */
370 void
371 pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
372 const char *wchan, struct pool_allocator *palloc)
373 {
374 int off, slack, i;
375
376 #ifdef POOL_DIAGNOSTIC
377 /*
378 * Always log if POOL_DIAGNOSTIC is defined.
379 */
380 if (pool_logsize != 0)
381 flags |= PR_LOGGING;
382 #endif
383
384 #ifdef POOL_SUBPAGE
385 /*
386 * XXX We don't provide a real `nointr' back-end
387 * yet; all sub-pages come from a kmem back-end.
388 * maybe some day...
389 */
390 if (palloc == NULL) {
391 extern struct pool_allocator pool_allocator_kmem_subpage;
392 palloc = &pool_allocator_kmem_subpage;
393 }
394 /*
395 * We'll assume any user-specified back-end allocator
396 * will deal with sub-pages, or simply don't care.
397 */
398 #else
399 if (palloc == NULL)
400 palloc = &pool_allocator_kmem;
401 #endif /* POOL_SUBPAGE */
402 if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
403 if (palloc->pa_pagesz == 0) {
404 #ifdef POOL_SUBPAGE
405 if (palloc == &pool_allocator_kmem)
406 palloc->pa_pagesz = PAGE_SIZE;
407 else
408 palloc->pa_pagesz = POOL_SUBPAGE;
409 #else
410 palloc->pa_pagesz = PAGE_SIZE;
411 #endif /* POOL_SUBPAGE */
412 }
413
414 TAILQ_INIT(&palloc->pa_list);
415
416 simple_lock_init(&palloc->pa_slock);
417 palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
418 palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
419 palloc->pa_flags |= PA_INITIALIZED;
420 }
421
422 if (align == 0)
423 align = ALIGN(1);
424
425 if (size < sizeof(struct pool_item))
426 size = sizeof(struct pool_item);
427
428 size = ALIGN(size);
429 #ifdef DIAGNOSTIC
430 if (size > palloc->pa_pagesz)
431 panic("pool_init: pool item size (%lu) too large",
432 (u_long)size);
433 #endif
434
435 /*
436 * Initialize the pool structure.
437 */
438 TAILQ_INIT(&pp->pr_pagelist);
439 TAILQ_INIT(&pp->pr_cachelist);
440 pp->pr_curpage = NULL;
441 pp->pr_npages = 0;
442 pp->pr_minitems = 0;
443 pp->pr_minpages = 0;
444 pp->pr_maxpages = UINT_MAX;
445 pp->pr_roflags = flags;
446 pp->pr_flags = 0;
447 pp->pr_size = size;
448 pp->pr_align = align;
449 pp->pr_wchan = wchan;
450 pp->pr_alloc = palloc;
451 pp->pr_nitems = 0;
452 pp->pr_nout = 0;
453 pp->pr_hardlimit = UINT_MAX;
454 pp->pr_hardlimit_warning = NULL;
455 pp->pr_hardlimit_ratecap.tv_sec = 0;
456 pp->pr_hardlimit_ratecap.tv_usec = 0;
457 pp->pr_hardlimit_warning_last.tv_sec = 0;
458 pp->pr_hardlimit_warning_last.tv_usec = 0;
459 pp->pr_drain_hook = NULL;
460 pp->pr_drain_hook_arg = NULL;
461
462 /*
463 * Decide whether to put the page header off page to avoid
464 * wasting too large a part of the page. Off-page page headers
465 * go on a hash table, so we can match a returned item
466 * with its header based on the page address.
467 * We use 1/16 of the page size as the threshold (XXX: tune)
468 */
469 if (pp->pr_size < palloc->pa_pagesz/16) {
470 /* Use the end of the page for the page header */
471 pp->pr_roflags |= PR_PHINPAGE;
472 pp->pr_phoffset = off = palloc->pa_pagesz -
473 ALIGN(sizeof(struct pool_item_header));
474 } else {
475 /* The page header will be taken from our page header pool */
476 pp->pr_phoffset = 0;
477 off = palloc->pa_pagesz;
478 for (i = 0; i < PR_HASHTABSIZE; i++) {
479 LIST_INIT(&pp->pr_hashtab[i]);
480 }
481 }
482
483 /*
484 * Alignment is to take place at `ioff' within the item. This means
485 * we must reserve up to `align - 1' bytes on the page to allow
486 * appropriate positioning of each item.
487 *
488 * Silently enforce `0 <= ioff < align'.
489 */
490 pp->pr_itemoffset = ioff = ioff % align;
491 pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
492 KASSERT(pp->pr_itemsperpage != 0);
493
494 /*
495 * Use the slack between the chunks and the page header
496 * for "cache coloring".
497 */
498 slack = off - pp->pr_itemsperpage * pp->pr_size;
499 pp->pr_maxcolor = (slack / align) * align;
500 pp->pr_curcolor = 0;
501
502 pp->pr_nget = 0;
503 pp->pr_nfail = 0;
504 pp->pr_nput = 0;
505 pp->pr_npagealloc = 0;
506 pp->pr_npagefree = 0;
507 pp->pr_hiwat = 0;
508 pp->pr_nidle = 0;
509
510 #ifdef POOL_DIAGNOSTIC
511 if (flags & PR_LOGGING) {
512 if (kmem_map == NULL ||
513 (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
514 M_TEMP, M_NOWAIT)) == NULL)
515 pp->pr_roflags &= ~PR_LOGGING;
516 pp->pr_curlogentry = 0;
517 pp->pr_logsize = pool_logsize;
518 }
519 #endif
520
521 pp->pr_entered_file = NULL;
522 pp->pr_entered_line = 0;
523
524 simple_lock_init(&pp->pr_slock);
525
526 /*
527 * Initialize private page header pool and cache magazine pool if we
528 * haven't done so yet.
529 * XXX LOCKING.
530 */
531 if (phpool.pr_size == 0) {
532 #ifdef POOL_SUBPAGE
533 pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, 0,
534 "phpool", &pool_allocator_kmem);
535 pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
536 PR_RECURSIVE, "psppool", &pool_allocator_kmem);
537 #else
538 pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,
539 0, "phpool", NULL);
540 #endif
541 pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
542 0, "pcgpool", NULL);
543 }
544
545 /* Insert into the list of all pools. */
546 simple_lock(&pool_head_slock);
547 TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
548 simple_unlock(&pool_head_slock);
549
550 /* Insert this into the list of pools using this allocator. */
551 simple_lock(&palloc->pa_slock);
552 TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
553 simple_unlock(&palloc->pa_slock);
554 }
555
556 /*
557 * De-commision a pool resource.
558 */
559 void
560 pool_destroy(struct pool *pp)
561 {
562 struct pool_item_header *ph;
563 struct pool_cache *pc;
564
565 /* Locking order: pool_allocator -> pool */
566 simple_lock(&pp->pr_alloc->pa_slock);
567 TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
568 simple_unlock(&pp->pr_alloc->pa_slock);
569
570 /* Destroy all caches for this pool. */
571 while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL)
572 pool_cache_destroy(pc);
573
574 #ifdef DIAGNOSTIC
575 if (pp->pr_nout != 0) {
576 pr_printlog(pp, NULL, printf);
577 panic("pool_destroy: pool busy: still out: %u\n",
578 pp->pr_nout);
579 }
580 #endif
581
582 /* Remove all pages */
583 while ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL)
584 pr_rmpage(pp, ph, NULL);
585
586 /* Remove from global pool list */
587 simple_lock(&pool_head_slock);
588 TAILQ_REMOVE(&pool_head, pp, pr_poollist);
589 if (drainpp == pp) {
590 drainpp = NULL;
591 }
592 simple_unlock(&pool_head_slock);
593
594 #ifdef POOL_DIAGNOSTIC
595 if ((pp->pr_roflags & PR_LOGGING) != 0)
596 free(pp->pr_log, M_TEMP);
597 #endif
598 }
599
600 void
601 pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
602 {
603
604 /* XXX no locking -- must be used just after pool_init() */
605 #ifdef DIAGNOSTIC
606 if (pp->pr_drain_hook != NULL)
607 panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
608 #endif
609 pp->pr_drain_hook = fn;
610 pp->pr_drain_hook_arg = arg;
611 }
612
613 static __inline struct pool_item_header *
614 pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags)
615 {
616 struct pool_item_header *ph;
617 int s;
618
619 LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0);
620
621 if ((pp->pr_roflags & PR_PHINPAGE) != 0)
622 ph = (struct pool_item_header *) (storage + pp->pr_phoffset);
623 else {
624 s = splhigh();
625 ph = pool_get(&phpool, flags);
626 splx(s);
627 }
628
629 return (ph);
630 }
631
632 /*
633 * Grab an item from the pool; must be called at appropriate spl level
634 */
635 void *
636 #ifdef POOL_DIAGNOSTIC
637 _pool_get(struct pool *pp, int flags, const char *file, long line)
638 #else
639 pool_get(struct pool *pp, int flags)
640 #endif
641 {
642 struct pool_item *pi;
643 struct pool_item_header *ph;
644 void *v;
645
646 #ifdef DIAGNOSTIC
647 if (__predict_false(curproc == NULL && doing_shutdown == 0 &&
648 (flags & PR_WAITOK) != 0))
649 panic("pool_get: must have NOWAIT");
650
651 #ifdef LOCKDEBUG
652 if (flags & PR_WAITOK)
653 simple_lock_only_held(NULL, "pool_get(PR_WAITOK)");
654 #endif
655 #endif /* DIAGNOSTIC */
656
657 simple_lock(&pp->pr_slock);
658 pr_enter(pp, file, line);
659
660 startover:
661 /*
662 * Check to see if we've reached the hard limit. If we have,
663 * and we can wait, then wait until an item has been returned to
664 * the pool.
665 */
666 #ifdef DIAGNOSTIC
667 if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
668 pr_leave(pp);
669 simple_unlock(&pp->pr_slock);
670 panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
671 }
672 #endif
673 if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
674 if (pp->pr_drain_hook != NULL) {
675 /*
676 * Since the drain hook is going to free things
677 * back to the pool, unlock, call the hook, re-lock,
678 * and check the hardlimit condition again.
679 */
680 pr_leave(pp);
681 simple_unlock(&pp->pr_slock);
682 (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
683 simple_lock(&pp->pr_slock);
684 pr_enter(pp, file, line);
685 if (pp->pr_nout < pp->pr_hardlimit)
686 goto startover;
687 }
688
689 if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
690 /*
691 * XXX: A warning isn't logged in this case. Should
692 * it be?
693 */
694 pp->pr_flags |= PR_WANTED;
695 pr_leave(pp);
696 ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
697 pr_enter(pp, file, line);
698 goto startover;
699 }
700
701 /*
702 * Log a message that the hard limit has been hit.
703 */
704 if (pp->pr_hardlimit_warning != NULL &&
705 ratecheck(&pp->pr_hardlimit_warning_last,
706 &pp->pr_hardlimit_ratecap))
707 log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
708
709 pp->pr_nfail++;
710
711 pr_leave(pp);
712 simple_unlock(&pp->pr_slock);
713 return (NULL);
714 }
715
716 /*
717 * The convention we use is that if `curpage' is not NULL, then
718 * it points at a non-empty bucket. In particular, `curpage'
719 * never points at a page header which has PR_PHINPAGE set and
720 * has no items in its bucket.
721 */
722 if ((ph = pp->pr_curpage) == NULL) {
723 #ifdef DIAGNOSTIC
724 if (pp->pr_nitems != 0) {
725 simple_unlock(&pp->pr_slock);
726 printf("pool_get: %s: curpage NULL, nitems %u\n",
727 pp->pr_wchan, pp->pr_nitems);
728 panic("pool_get: nitems inconsistent\n");
729 }
730 #endif
731
732 /*
733 * Call the back-end page allocator for more memory.
734 * Release the pool lock, as the back-end page allocator
735 * may block.
736 */
737 pr_leave(pp);
738 simple_unlock(&pp->pr_slock);
739 v = pool_allocator_alloc(pp, flags);
740 if (__predict_true(v != NULL))
741 ph = pool_alloc_item_header(pp, v, flags);
742 simple_lock(&pp->pr_slock);
743 pr_enter(pp, file, line);
744
745 if (__predict_false(v == NULL || ph == NULL)) {
746 if (v != NULL)
747 pool_allocator_free(pp, v);
748
749 /*
750 * We were unable to allocate a page or item
751 * header, but we released the lock during
752 * allocation, so perhaps items were freed
753 * back to the pool. Check for this case.
754 */
755 if (pp->pr_curpage != NULL)
756 goto startover;
757
758 if ((flags & PR_WAITOK) == 0) {
759 pp->pr_nfail++;
760 pr_leave(pp);
761 simple_unlock(&pp->pr_slock);
762 return (NULL);
763 }
764
765 /*
766 * Wait for items to be returned to this pool.
767 *
768 * XXX: maybe we should wake up once a second and
769 * try again?
770 */
771 pp->pr_flags |= PR_WANTED;
772 /* PA_WANTED is already set on the allocator. */
773 pr_leave(pp);
774 ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
775 pr_enter(pp, file, line);
776 goto startover;
777 }
778
779 /* We have more memory; add it to the pool */
780 pool_prime_page(pp, v, ph);
781 pp->pr_npagealloc++;
782
783 /* Start the allocation process over. */
784 goto startover;
785 }
786
787 if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) {
788 pr_leave(pp);
789 simple_unlock(&pp->pr_slock);
790 panic("pool_get: %s: page empty", pp->pr_wchan);
791 }
792 #ifdef DIAGNOSTIC
793 if (__predict_false(pp->pr_nitems == 0)) {
794 pr_leave(pp);
795 simple_unlock(&pp->pr_slock);
796 printf("pool_get: %s: items on itemlist, nitems %u\n",
797 pp->pr_wchan, pp->pr_nitems);
798 panic("pool_get: nitems inconsistent\n");
799 }
800 #endif
801
802 #ifdef POOL_DIAGNOSTIC
803 pr_log(pp, v, PRLOG_GET, file, line);
804 #endif
805
806 #ifdef DIAGNOSTIC
807 if (__predict_false(pi->pi_magic != PI_MAGIC)) {
808 pr_printlog(pp, pi, printf);
809 panic("pool_get(%s): free list modified: magic=%x; page %p;"
810 " item addr %p\n",
811 pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
812 }
813 #endif
814
815 /*
816 * Remove from item list.
817 */
818 TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
819 pp->pr_nitems--;
820 pp->pr_nout++;
821 if (ph->ph_nmissing == 0) {
822 #ifdef DIAGNOSTIC
823 if (__predict_false(pp->pr_nidle == 0))
824 panic("pool_get: nidle inconsistent");
825 #endif
826 pp->pr_nidle--;
827 }
828 ph->ph_nmissing++;
829 if (TAILQ_FIRST(&ph->ph_itemlist) == NULL) {
830 #ifdef DIAGNOSTIC
831 if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) {
832 pr_leave(pp);
833 simple_unlock(&pp->pr_slock);
834 panic("pool_get: %s: nmissing inconsistent",
835 pp->pr_wchan);
836 }
837 #endif
838 /*
839 * Find a new non-empty page header, if any.
840 * Start search from the page head, to increase
841 * the chance for "high water" pages to be freed.
842 *
843 * Migrate empty pages to the end of the list. This
844 * will speed the update of curpage as pages become
845 * idle. Empty pages intermingled with idle pages
846 * is no big deal. As soon as a page becomes un-empty,
847 * it will move back to the head of the list.
848 */
849 TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
850 TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
851 TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
852 if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
853 break;
854
855 pp->pr_curpage = ph;
856 }
857
858 pp->pr_nget++;
859
860 /*
861 * If we have a low water mark and we are now below that low
862 * water mark, add more items to the pool.
863 */
864 if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
865 /*
866 * XXX: Should we log a warning? Should we set up a timeout
867 * to try again in a second or so? The latter could break
868 * a caller's assumptions about interrupt protection, etc.
869 */
870 }
871
872 pr_leave(pp);
873 simple_unlock(&pp->pr_slock);
874 return (v);
875 }
876
877 /*
878 * Internal version of pool_put(). Pool is already locked/entered.
879 */
880 static void
881 pool_do_put(struct pool *pp, void *v)
882 {
883 struct pool_item *pi = v;
884 struct pool_item_header *ph;
885 caddr_t page;
886 int s;
887
888 LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
889
890 page = (caddr_t)((u_long)v & pp->pr_alloc->pa_pagemask);
891
892 #ifdef DIAGNOSTIC
893 if (__predict_false(pp->pr_nout == 0)) {
894 printf("pool %s: putting with none out\n",
895 pp->pr_wchan);
896 panic("pool_put");
897 }
898 #endif
899
900 if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {
901 pr_printlog(pp, NULL, printf);
902 panic("pool_put: %s: page header missing", pp->pr_wchan);
903 }
904
905 #ifdef LOCKDEBUG
906 /*
907 * Check if we're freeing a locked simple lock.
908 */
909 simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
910 #endif
911
912 /*
913 * Return to item list.
914 */
915 #ifdef DIAGNOSTIC
916 pi->pi_magic = PI_MAGIC;
917 #endif
918 #ifdef DEBUG
919 {
920 int i, *ip = v;
921
922 for (i = 0; i < pp->pr_size / sizeof(int); i++) {
923 *ip++ = PI_MAGIC;
924 }
925 }
926 #endif
927
928 TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
929 ph->ph_nmissing--;
930 pp->pr_nput++;
931 pp->pr_nitems++;
932 pp->pr_nout--;
933
934 /* Cancel "pool empty" condition if it exists */
935 if (pp->pr_curpage == NULL)
936 pp->pr_curpage = ph;
937
938 if (pp->pr_flags & PR_WANTED) {
939 pp->pr_flags &= ~PR_WANTED;
940 if (ph->ph_nmissing == 0)
941 pp->pr_nidle++;
942 wakeup((caddr_t)pp);
943 return;
944 }
945
946 /*
947 * If this page is now complete, do one of two things:
948 *
949 * (1) If we have more pages than the page high water
950 * mark, free the page back to the system.
951 *
952 * (2) Move it to the end of the page list, so that
953 * we minimize our chances of fragmenting the
954 * pool. Idle pages migrate to the end (along with
955 * completely empty pages, so that we find un-empty
956 * pages more quickly when we update curpage) of the
957 * list so they can be more easily swept up by
958 * the pagedaemon when pages are scarce.
959 */
960 if (ph->ph_nmissing == 0) {
961 pp->pr_nidle++;
962 if (pp->pr_npages > pp->pr_maxpages ||
963 (pp->pr_alloc->pa_flags & PA_WANT) != 0) {
964 pr_rmpage(pp, ph, NULL);
965 } else {
966 TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
967 TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
968
969 /*
970 * Update the timestamp on the page. A page must
971 * be idle for some period of time before it can
972 * be reclaimed by the pagedaemon. This minimizes
973 * ping-pong'ing for memory.
974 */
975 s = splclock();
976 ph->ph_time = mono_time;
977 splx(s);
978
979 /*
980 * Update the current page pointer. Just look for
981 * the first page with any free items.
982 *
983 * XXX: Maybe we want an option to look for the
984 * page with the fewest available items, to minimize
985 * fragmentation?
986 */
987 TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
988 if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
989 break;
990
991 pp->pr_curpage = ph;
992 }
993 }
994 /*
995 * If the page has just become un-empty, move it to the head of
996 * the list, and make it the current page. The next allocation
997 * will get the item from this page, instead of further fragmenting
998 * the pool.
999 */
1000 else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1001 TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
1002 TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
1003 pp->pr_curpage = ph;
1004 }
1005 }
1006
1007 /*
1008 * Return resource to the pool; must be called at appropriate spl level
1009 */
1010 #ifdef POOL_DIAGNOSTIC
1011 void
1012 _pool_put(struct pool *pp, void *v, const char *file, long line)
1013 {
1014
1015 simple_lock(&pp->pr_slock);
1016 pr_enter(pp, file, line);
1017
1018 pr_log(pp, v, PRLOG_PUT, file, line);
1019
1020 pool_do_put(pp, v);
1021
1022 pr_leave(pp);
1023 simple_unlock(&pp->pr_slock);
1024 }
1025 #undef pool_put
1026 #endif /* POOL_DIAGNOSTIC */
1027
1028 void
1029 pool_put(struct pool *pp, void *v)
1030 {
1031
1032 simple_lock(&pp->pr_slock);
1033
1034 pool_do_put(pp, v);
1035
1036 simple_unlock(&pp->pr_slock);
1037 }
1038
1039 #ifdef POOL_DIAGNOSTIC
1040 #define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__)
1041 #endif
1042
1043 /*
1044 * Add N items to the pool.
1045 */
1046 int
1047 pool_prime(struct pool *pp, int n)
1048 {
1049 struct pool_item_header *ph;
1050 caddr_t cp;
1051 int newpages;
1052
1053 simple_lock(&pp->pr_slock);
1054
1055 newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1056
1057 while (newpages-- > 0) {
1058 simple_unlock(&pp->pr_slock);
1059 cp = pool_allocator_alloc(pp, PR_NOWAIT);
1060 if (__predict_true(cp != NULL))
1061 ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1062 simple_lock(&pp->pr_slock);
1063
1064 if (__predict_false(cp == NULL || ph == NULL)) {
1065 if (cp != NULL)
1066 pool_allocator_free(pp, cp);
1067 break;
1068 }
1069
1070 pool_prime_page(pp, cp, ph);
1071 pp->pr_npagealloc++;
1072 pp->pr_minpages++;
1073 }
1074
1075 if (pp->pr_minpages >= pp->pr_maxpages)
1076 pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1077
1078 simple_unlock(&pp->pr_slock);
1079 return (0);
1080 }
1081
1082 /*
1083 * Add a page worth of items to the pool.
1084 *
1085 * Note, we must be called with the pool descriptor LOCKED.
1086 */
1087 static void
1088 pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
1089 {
1090 struct pool_item *pi;
1091 caddr_t cp = storage;
1092 unsigned int align = pp->pr_align;
1093 unsigned int ioff = pp->pr_itemoffset;
1094 int n;
1095
1096 #ifdef DIAGNOSTIC
1097 if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
1098 panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1099 #endif
1100
1101 if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1102 LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)],
1103 ph, ph_hashlist);
1104
1105 /*
1106 * Insert page header.
1107 */
1108 TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
1109 TAILQ_INIT(&ph->ph_itemlist);
1110 ph->ph_page = storage;
1111 ph->ph_nmissing = 0;
1112 memset(&ph->ph_time, 0, sizeof(ph->ph_time));
1113
1114 pp->pr_nidle++;
1115
1116 /*
1117 * Color this page.
1118 */
1119 cp = (caddr_t)(cp + pp->pr_curcolor);
1120 if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1121 pp->pr_curcolor = 0;
1122
1123 /*
1124 * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1125 */
1126 if (ioff != 0)
1127 cp = (caddr_t)(cp + (align - ioff));
1128
1129 /*
1130 * Insert remaining chunks on the bucket list.
1131 */
1132 n = pp->pr_itemsperpage;
1133 pp->pr_nitems += n;
1134
1135 while (n--) {
1136 pi = (struct pool_item *)cp;
1137
1138 /* Insert on page list */
1139 TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
1140 #ifdef DIAGNOSTIC
1141 pi->pi_magic = PI_MAGIC;
1142 #endif
1143 cp = (caddr_t)(cp + pp->pr_size);
1144 }
1145
1146 /*
1147 * If the pool was depleted, point at the new page.
1148 */
1149 if (pp->pr_curpage == NULL)
1150 pp->pr_curpage = ph;
1151
1152 if (++pp->pr_npages > pp->pr_hiwat)
1153 pp->pr_hiwat = pp->pr_npages;
1154 }
1155
1156 /*
1157 * Used by pool_get() when nitems drops below the low water mark. This
1158 * is used to catch up nitmes with the low water mark.
1159 *
1160 * Note 1, we never wait for memory here, we let the caller decide what to do.
1161 *
1162 * Note 2, we must be called with the pool already locked, and we return
1163 * with it locked.
1164 */
1165 static int
1166 pool_catchup(struct pool *pp)
1167 {
1168 struct pool_item_header *ph;
1169 caddr_t cp;
1170 int error = 0;
1171
1172 while (POOL_NEEDS_CATCHUP(pp)) {
1173 /*
1174 * Call the page back-end allocator for more memory.
1175 *
1176 * XXX: We never wait, so should we bother unlocking
1177 * the pool descriptor?
1178 */
1179 simple_unlock(&pp->pr_slock);
1180 cp = pool_allocator_alloc(pp, PR_NOWAIT);
1181 if (__predict_true(cp != NULL))
1182 ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1183 simple_lock(&pp->pr_slock);
1184 if (__predict_false(cp == NULL || ph == NULL)) {
1185 if (cp != NULL)
1186 pool_allocator_free(pp, cp);
1187 error = ENOMEM;
1188 break;
1189 }
1190 pool_prime_page(pp, cp, ph);
1191 pp->pr_npagealloc++;
1192 }
1193
1194 return (error);
1195 }
1196
1197 void
1198 pool_setlowat(struct pool *pp, int n)
1199 {
1200
1201 simple_lock(&pp->pr_slock);
1202
1203 pp->pr_minitems = n;
1204 pp->pr_minpages = (n == 0)
1205 ? 0
1206 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1207
1208 /* Make sure we're caught up with the newly-set low water mark. */
1209 if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1210 /*
1211 * XXX: Should we log a warning? Should we set up a timeout
1212 * to try again in a second or so? The latter could break
1213 * a caller's assumptions about interrupt protection, etc.
1214 */
1215 }
1216
1217 simple_unlock(&pp->pr_slock);
1218 }
1219
1220 void
1221 pool_sethiwat(struct pool *pp, int n)
1222 {
1223
1224 simple_lock(&pp->pr_slock);
1225
1226 pp->pr_maxpages = (n == 0)
1227 ? 0
1228 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1229
1230 simple_unlock(&pp->pr_slock);
1231 }
1232
1233 void
1234 pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1235 {
1236
1237 simple_lock(&pp->pr_slock);
1238
1239 pp->pr_hardlimit = n;
1240 pp->pr_hardlimit_warning = warnmess;
1241 pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1242 pp->pr_hardlimit_warning_last.tv_sec = 0;
1243 pp->pr_hardlimit_warning_last.tv_usec = 0;
1244
1245 /*
1246 * In-line version of pool_sethiwat(), because we don't want to
1247 * release the lock.
1248 */
1249 pp->pr_maxpages = (n == 0)
1250 ? 0
1251 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1252
1253 simple_unlock(&pp->pr_slock);
1254 }
1255
1256 /*
1257 * Release all complete pages that have not been used recently.
1258 */
1259 int
1260 #ifdef POOL_DIAGNOSTIC
1261 _pool_reclaim(struct pool *pp, const char *file, long line)
1262 #else
1263 pool_reclaim(struct pool *pp)
1264 #endif
1265 {
1266 struct pool_item_header *ph, *phnext;
1267 struct pool_cache *pc;
1268 struct timeval curtime;
1269 struct pool_pagelist pq;
1270 int s;
1271
1272 if (pp->pr_drain_hook != NULL) {
1273 /*
1274 * The drain hook must be called with the pool unlocked.
1275 */
1276 (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1277 }
1278
1279 if (simple_lock_try(&pp->pr_slock) == 0)
1280 return (0);
1281 pr_enter(pp, file, line);
1282
1283 TAILQ_INIT(&pq);
1284
1285 /*
1286 * Reclaim items from the pool's caches.
1287 */
1288 TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist)
1289 pool_cache_reclaim(pc);
1290
1291 s = splclock();
1292 curtime = mono_time;
1293 splx(s);
1294
1295 for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; ph = phnext) {
1296 phnext = TAILQ_NEXT(ph, ph_pagelist);
1297
1298 /* Check our minimum page claim */
1299 if (pp->pr_npages <= pp->pr_minpages)
1300 break;
1301
1302 if (ph->ph_nmissing == 0) {
1303 struct timeval diff;
1304 timersub(&curtime, &ph->ph_time, &diff);
1305 if (diff.tv_sec < pool_inactive_time)
1306 continue;
1307
1308 /*
1309 * If freeing this page would put us below
1310 * the low water mark, stop now.
1311 */
1312 if ((pp->pr_nitems - pp->pr_itemsperpage) <
1313 pp->pr_minitems)
1314 break;
1315
1316 pr_rmpage(pp, ph, &pq);
1317 }
1318 }
1319
1320 pr_leave(pp);
1321 simple_unlock(&pp->pr_slock);
1322 if (TAILQ_EMPTY(&pq))
1323 return (0);
1324
1325 while ((ph = TAILQ_FIRST(&pq)) != NULL) {
1326 TAILQ_REMOVE(&pq, ph, ph_pagelist);
1327 pool_allocator_free(pp, ph->ph_page);
1328 if (pp->pr_roflags & PR_PHINPAGE) {
1329 continue;
1330 }
1331 LIST_REMOVE(ph, ph_hashlist);
1332 s = splhigh();
1333 pool_put(&phpool, ph);
1334 splx(s);
1335 }
1336
1337 return (1);
1338 }
1339
1340 /*
1341 * Drain pools, one at a time.
1342 *
1343 * Note, we must never be called from an interrupt context.
1344 */
1345 void
1346 pool_drain(void *arg)
1347 {
1348 struct pool *pp;
1349 int s;
1350
1351 pp = NULL;
1352 s = splvm();
1353 simple_lock(&pool_head_slock);
1354 if (drainpp == NULL) {
1355 drainpp = TAILQ_FIRST(&pool_head);
1356 }
1357 if (drainpp) {
1358 pp = drainpp;
1359 drainpp = TAILQ_NEXT(pp, pr_poollist);
1360 }
1361 simple_unlock(&pool_head_slock);
1362 pool_reclaim(pp);
1363 splx(s);
1364 }
1365
1366 /*
1367 * Diagnostic helpers.
1368 */
1369 void
1370 pool_print(struct pool *pp, const char *modif)
1371 {
1372 int s;
1373
1374 s = splvm();
1375 if (simple_lock_try(&pp->pr_slock) == 0) {
1376 printf("pool %s is locked; try again later\n",
1377 pp->pr_wchan);
1378 splx(s);
1379 return;
1380 }
1381 pool_print1(pp, modif, printf);
1382 simple_unlock(&pp->pr_slock);
1383 splx(s);
1384 }
1385
1386 void
1387 pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1388 {
1389 int didlock = 0;
1390
1391 if (pp == NULL) {
1392 (*pr)("Must specify a pool to print.\n");
1393 return;
1394 }
1395
1396 /*
1397 * Called from DDB; interrupts should be blocked, and all
1398 * other processors should be paused. We can skip locking
1399 * the pool in this case.
1400 *
1401 * We do a simple_lock_try() just to print the lock
1402 * status, however.
1403 */
1404
1405 if (simple_lock_try(&pp->pr_slock) == 0)
1406 (*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
1407 else
1408 didlock = 1;
1409
1410 pool_print1(pp, modif, pr);
1411
1412 if (didlock)
1413 simple_unlock(&pp->pr_slock);
1414 }
1415
1416 static void
1417 pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1418 {
1419 struct pool_item_header *ph;
1420 struct pool_cache *pc;
1421 struct pool_cache_group *pcg;
1422 #ifdef DIAGNOSTIC
1423 struct pool_item *pi;
1424 #endif
1425 int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1426 char c;
1427
1428 while ((c = *modif++) != '\0') {
1429 if (c == 'l')
1430 print_log = 1;
1431 if (c == 'p')
1432 print_pagelist = 1;
1433 if (c == 'c')
1434 print_cache = 1;
1435 modif++;
1436 }
1437
1438 (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1439 pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1440 pp->pr_roflags);
1441 (*pr)("\talloc %p\n", pp->pr_alloc);
1442 (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1443 pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1444 (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1445 pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1446
1447 (*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
1448 pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1449 (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1450 pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1451
1452 if (print_pagelist == 0)
1453 goto skip_pagelist;
1454
1455 if ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL)
1456 (*pr)("\n\tpage list:\n");
1457 for (; ph != NULL; ph = TAILQ_NEXT(ph, ph_pagelist)) {
1458 (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
1459 ph->ph_page, ph->ph_nmissing,
1460 (u_long)ph->ph_time.tv_sec,
1461 (u_long)ph->ph_time.tv_usec);
1462 #ifdef DIAGNOSTIC
1463 TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1464 if (pi->pi_magic != PI_MAGIC) {
1465 (*pr)("\t\t\titem %p, magic 0x%x\n",
1466 pi, pi->pi_magic);
1467 }
1468 }
1469 #endif
1470 }
1471 if (pp->pr_curpage == NULL)
1472 (*pr)("\tno current page\n");
1473 else
1474 (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1475
1476 skip_pagelist:
1477
1478 if (print_log == 0)
1479 goto skip_log;
1480
1481 (*pr)("\n");
1482 if ((pp->pr_roflags & PR_LOGGING) == 0)
1483 (*pr)("\tno log\n");
1484 else
1485 pr_printlog(pp, NULL, pr);
1486
1487 skip_log:
1488
1489 if (print_cache == 0)
1490 goto skip_cache;
1491
1492 TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) {
1493 (*pr)("\tcache %p: allocfrom %p freeto %p\n", pc,
1494 pc->pc_allocfrom, pc->pc_freeto);
1495 (*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n",
1496 pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
1497 TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1498 (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);
1499 for (i = 0; i < PCG_NOBJECTS; i++)
1500 (*pr)("\t\t\t%p\n", pcg->pcg_objects[i]);
1501 }
1502 }
1503
1504 skip_cache:
1505
1506 pr_enter_check(pp, pr);
1507 }
1508
1509 int
1510 pool_chk(struct pool *pp, const char *label)
1511 {
1512 struct pool_item_header *ph;
1513 int r = 0;
1514
1515 simple_lock(&pp->pr_slock);
1516
1517 TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) {
1518 struct pool_item *pi;
1519 int n;
1520 caddr_t page;
1521
1522 page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask);
1523 if (page != ph->ph_page &&
1524 (pp->pr_roflags & PR_PHINPAGE) != 0) {
1525 if (label != NULL)
1526 printf("%s: ", label);
1527 printf("pool(%p:%s): page inconsistency: page %p;"
1528 " at page head addr %p (p %p)\n", pp,
1529 pp->pr_wchan, ph->ph_page,
1530 ph, page);
1531 r++;
1532 goto out;
1533 }
1534
1535 for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
1536 pi != NULL;
1537 pi = TAILQ_NEXT(pi,pi_list), n++) {
1538
1539 #ifdef DIAGNOSTIC
1540 if (pi->pi_magic != PI_MAGIC) {
1541 if (label != NULL)
1542 printf("%s: ", label);
1543 printf("pool(%s): free list modified: magic=%x;"
1544 " page %p; item ordinal %d;"
1545 " addr %p (p %p)\n",
1546 pp->pr_wchan, pi->pi_magic, ph->ph_page,
1547 n, pi, page);
1548 panic("pool");
1549 }
1550 #endif
1551 page =
1552 (caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask);
1553 if (page == ph->ph_page)
1554 continue;
1555
1556 if (label != NULL)
1557 printf("%s: ", label);
1558 printf("pool(%p:%s): page inconsistency: page %p;"
1559 " item ordinal %d; addr %p (p %p)\n", pp,
1560 pp->pr_wchan, ph->ph_page,
1561 n, pi, page);
1562 r++;
1563 goto out;
1564 }
1565 }
1566 out:
1567 simple_unlock(&pp->pr_slock);
1568 return (r);
1569 }
1570
1571 /*
1572 * pool_cache_init:
1573 *
1574 * Initialize a pool cache.
1575 *
1576 * NOTE: If the pool must be protected from interrupts, we expect
1577 * to be called at the appropriate interrupt priority level.
1578 */
1579 void
1580 pool_cache_init(struct pool_cache *pc, struct pool *pp,
1581 int (*ctor)(void *, void *, int),
1582 void (*dtor)(void *, void *),
1583 void *arg)
1584 {
1585
1586 TAILQ_INIT(&pc->pc_grouplist);
1587 simple_lock_init(&pc->pc_slock);
1588
1589 pc->pc_allocfrom = NULL;
1590 pc->pc_freeto = NULL;
1591 pc->pc_pool = pp;
1592
1593 pc->pc_ctor = ctor;
1594 pc->pc_dtor = dtor;
1595 pc->pc_arg = arg;
1596
1597 pc->pc_hits = 0;
1598 pc->pc_misses = 0;
1599
1600 pc->pc_ngroups = 0;
1601
1602 pc->pc_nitems = 0;
1603
1604 simple_lock(&pp->pr_slock);
1605 TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist);
1606 simple_unlock(&pp->pr_slock);
1607 }
1608
1609 /*
1610 * pool_cache_destroy:
1611 *
1612 * Destroy a pool cache.
1613 */
1614 void
1615 pool_cache_destroy(struct pool_cache *pc)
1616 {
1617 struct pool *pp = pc->pc_pool;
1618
1619 /* First, invalidate the entire cache. */
1620 pool_cache_invalidate(pc);
1621
1622 /* ...and remove it from the pool's cache list. */
1623 simple_lock(&pp->pr_slock);
1624 TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist);
1625 simple_unlock(&pp->pr_slock);
1626 }
1627
1628 static __inline void *
1629 pcg_get(struct pool_cache_group *pcg)
1630 {
1631 void *object;
1632 u_int idx;
1633
1634 KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
1635 KASSERT(pcg->pcg_avail != 0);
1636 idx = --pcg->pcg_avail;
1637
1638 KASSERT(pcg->pcg_objects[idx] != NULL);
1639 object = pcg->pcg_objects[idx];
1640 pcg->pcg_objects[idx] = NULL;
1641
1642 return (object);
1643 }
1644
1645 static __inline void
1646 pcg_put(struct pool_cache_group *pcg, void *object)
1647 {
1648 u_int idx;
1649
1650 KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
1651 idx = pcg->pcg_avail++;
1652
1653 KASSERT(pcg->pcg_objects[idx] == NULL);
1654 pcg->pcg_objects[idx] = object;
1655 }
1656
1657 /*
1658 * pool_cache_get:
1659 *
1660 * Get an object from a pool cache.
1661 */
1662 void *
1663 pool_cache_get(struct pool_cache *pc, int flags)
1664 {
1665 struct pool_cache_group *pcg;
1666 void *object;
1667
1668 #ifdef LOCKDEBUG
1669 if (flags & PR_WAITOK)
1670 simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)");
1671 #endif
1672
1673 simple_lock(&pc->pc_slock);
1674
1675 if ((pcg = pc->pc_allocfrom) == NULL) {
1676 TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1677 if (pcg->pcg_avail != 0) {
1678 pc->pc_allocfrom = pcg;
1679 goto have_group;
1680 }
1681 }
1682
1683 /*
1684 * No groups with any available objects. Allocate
1685 * a new object, construct it, and return it to
1686 * the caller. We will allocate a group, if necessary,
1687 * when the object is freed back to the cache.
1688 */
1689 pc->pc_misses++;
1690 simple_unlock(&pc->pc_slock);
1691 object = pool_get(pc->pc_pool, flags);
1692 if (object != NULL && pc->pc_ctor != NULL) {
1693 if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
1694 pool_put(pc->pc_pool, object);
1695 return (NULL);
1696 }
1697 }
1698 return (object);
1699 }
1700
1701 have_group:
1702 pc->pc_hits++;
1703 pc->pc_nitems--;
1704 object = pcg_get(pcg);
1705
1706 if (pcg->pcg_avail == 0)
1707 pc->pc_allocfrom = NULL;
1708
1709 simple_unlock(&pc->pc_slock);
1710
1711 return (object);
1712 }
1713
1714 /*
1715 * pool_cache_put:
1716 *
1717 * Put an object back to the pool cache.
1718 */
1719 void
1720 pool_cache_put(struct pool_cache *pc, void *object)
1721 {
1722 struct pool_cache_group *pcg;
1723 int s;
1724
1725 simple_lock(&pc->pc_slock);
1726
1727 if ((pcg = pc->pc_freeto) == NULL) {
1728 TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1729 if (pcg->pcg_avail != PCG_NOBJECTS) {
1730 pc->pc_freeto = pcg;
1731 goto have_group;
1732 }
1733 }
1734
1735 /*
1736 * No empty groups to free the object to. Attempt to
1737 * allocate one.
1738 */
1739 simple_unlock(&pc->pc_slock);
1740 s = splvm();
1741 pcg = pool_get(&pcgpool, PR_NOWAIT);
1742 splx(s);
1743 if (pcg != NULL) {
1744 memset(pcg, 0, sizeof(*pcg));
1745 simple_lock(&pc->pc_slock);
1746 pc->pc_ngroups++;
1747 TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list);
1748 if (pc->pc_freeto == NULL)
1749 pc->pc_freeto = pcg;
1750 goto have_group;
1751 }
1752
1753 /*
1754 * Unable to allocate a cache group; destruct the object
1755 * and free it back to the pool.
1756 */
1757 pool_cache_destruct_object(pc, object);
1758 return;
1759 }
1760
1761 have_group:
1762 pc->pc_nitems++;
1763 pcg_put(pcg, object);
1764
1765 if (pcg->pcg_avail == PCG_NOBJECTS)
1766 pc->pc_freeto = NULL;
1767
1768 simple_unlock(&pc->pc_slock);
1769 }
1770
1771 /*
1772 * pool_cache_destruct_object:
1773 *
1774 * Force destruction of an object and its release back into
1775 * the pool.
1776 */
1777 void
1778 pool_cache_destruct_object(struct pool_cache *pc, void *object)
1779 {
1780
1781 if (pc->pc_dtor != NULL)
1782 (*pc->pc_dtor)(pc->pc_arg, object);
1783 pool_put(pc->pc_pool, object);
1784 }
1785
1786 /*
1787 * pool_cache_do_invalidate:
1788 *
1789 * This internal function implements pool_cache_invalidate() and
1790 * pool_cache_reclaim().
1791 */
1792 static void
1793 pool_cache_do_invalidate(struct pool_cache *pc, int free_groups,
1794 void (*putit)(struct pool *, void *))
1795 {
1796 struct pool_cache_group *pcg, *npcg;
1797 void *object;
1798 int s;
1799
1800 for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
1801 pcg = npcg) {
1802 npcg = TAILQ_NEXT(pcg, pcg_list);
1803 while (pcg->pcg_avail != 0) {
1804 pc->pc_nitems--;
1805 object = pcg_get(pcg);
1806 if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg)
1807 pc->pc_allocfrom = NULL;
1808 if (pc->pc_dtor != NULL)
1809 (*pc->pc_dtor)(pc->pc_arg, object);
1810 (*putit)(pc->pc_pool, object);
1811 }
1812 if (free_groups) {
1813 pc->pc_ngroups--;
1814 TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list);
1815 if (pc->pc_freeto == pcg)
1816 pc->pc_freeto = NULL;
1817 s = splvm();
1818 pool_put(&pcgpool, pcg);
1819 splx(s);
1820 }
1821 }
1822 }
1823
1824 /*
1825 * pool_cache_invalidate:
1826 *
1827 * Invalidate a pool cache (destruct and release all of the
1828 * cached objects).
1829 */
1830 void
1831 pool_cache_invalidate(struct pool_cache *pc)
1832 {
1833
1834 simple_lock(&pc->pc_slock);
1835 pool_cache_do_invalidate(pc, 0, pool_put);
1836 simple_unlock(&pc->pc_slock);
1837 }
1838
1839 /*
1840 * pool_cache_reclaim:
1841 *
1842 * Reclaim a pool cache for pool_reclaim().
1843 */
1844 static void
1845 pool_cache_reclaim(struct pool_cache *pc)
1846 {
1847
1848 simple_lock(&pc->pc_slock);
1849 pool_cache_do_invalidate(pc, 1, pool_do_put);
1850 simple_unlock(&pc->pc_slock);
1851 }
1852
1853 /*
1854 * Pool backend allocators.
1855 *
1856 * Each pool has a backend allocator that handles allocation, deallocation,
1857 * and any additional draining that might be needed.
1858 *
1859 * We provide two standard allocators:
1860 *
1861 * pool_allocator_kmem - the default when no allocator is specified
1862 *
1863 * pool_allocator_nointr - used for pools that will not be accessed
1864 * in interrupt context.
1865 */
1866 void *pool_page_alloc(struct pool *, int);
1867 void pool_page_free(struct pool *, void *);
1868
1869 struct pool_allocator pool_allocator_kmem = {
1870 pool_page_alloc, pool_page_free, 0,
1871 };
1872
1873 void *pool_page_alloc_nointr(struct pool *, int);
1874 void pool_page_free_nointr(struct pool *, void *);
1875
1876 struct pool_allocator pool_allocator_nointr = {
1877 pool_page_alloc_nointr, pool_page_free_nointr, 0,
1878 };
1879
1880 #ifdef POOL_SUBPAGE
1881 void *pool_subpage_alloc(struct pool *, int);
1882 void pool_subpage_free(struct pool *, void *);
1883
1884 struct pool_allocator pool_allocator_kmem_subpage = {
1885 pool_subpage_alloc, pool_subpage_free, 0,
1886 };
1887 #endif /* POOL_SUBPAGE */
1888
1889 /*
1890 * We have at least three different resources for the same allocation and
1891 * each resource can be depleted. First, we have the ready elements in the
1892 * pool. Then we have the resource (typically a vm_map) for this allocator.
1893 * Finally, we have physical memory. Waiting for any of these can be
1894 * unnecessary when any other is freed, but the kernel doesn't support
1895 * sleeping on multiple wait channels, so we have to employ another strategy.
1896 *
1897 * The caller sleeps on the pool (so that it can be awakened when an item
1898 * is returned to the pool), but we set PA_WANT on the allocator. When a
1899 * page is returned to the allocator and PA_WANT is set, pool_allocator_free
1900 * will wake up all sleeping pools belonging to this allocator.
1901 *
1902 * XXX Thundering herd.
1903 */
1904 void *
1905 pool_allocator_alloc(struct pool *org, int flags)
1906 {
1907 struct pool_allocator *pa = org->pr_alloc;
1908 struct pool *pp, *start;
1909 int s, freed;
1910 void *res;
1911
1912 do {
1913 if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
1914 return (res);
1915 if ((flags & PR_WAITOK) == 0) {
1916 /*
1917 * We only run the drain hookhere if PR_NOWAIT.
1918 * In other cases, the hook will be run in
1919 * pool_reclaim().
1920 */
1921 if (org->pr_drain_hook != NULL) {
1922 (*org->pr_drain_hook)(org->pr_drain_hook_arg,
1923 flags);
1924 if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
1925 return (res);
1926 }
1927 break;
1928 }
1929
1930 /*
1931 * Drain all pools, except "org", that use this
1932 * allocator. We do this to reclaim VA space.
1933 * pa_alloc is responsible for waiting for
1934 * physical memory.
1935 *
1936 * XXX We risk looping forever if start if someone
1937 * calls pool_destroy on "start". But there is no
1938 * other way to have potentially sleeping pool_reclaim,
1939 * non-sleeping locks on pool_allocator, and some
1940 * stirring of drained pools in the allocator.
1941 *
1942 * XXX Maybe we should use pool_head_slock for locking
1943 * the allocators?
1944 */
1945 freed = 0;
1946
1947 s = splvm();
1948 simple_lock(&pa->pa_slock);
1949 pp = start = TAILQ_FIRST(&pa->pa_list);
1950 do {
1951 TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list);
1952 TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list);
1953 if (pp == org)
1954 continue;
1955 simple_unlock(&pa->pa_slock);
1956 freed = pool_reclaim(pp);
1957 simple_lock(&pa->pa_slock);
1958 } while ((pp = TAILQ_FIRST(&pa->pa_list)) != start &&
1959 freed == 0);
1960
1961 if (freed == 0) {
1962 /*
1963 * We set PA_WANT here, the caller will most likely
1964 * sleep waiting for pages (if not, this won't hurt
1965 * that much), and there is no way to set this in
1966 * the caller without violating locking order.
1967 */
1968 pa->pa_flags |= PA_WANT;
1969 }
1970 simple_unlock(&pa->pa_slock);
1971 splx(s);
1972 } while (freed);
1973 return (NULL);
1974 }
1975
1976 void
1977 pool_allocator_free(struct pool *pp, void *v)
1978 {
1979 struct pool_allocator *pa = pp->pr_alloc;
1980 int s;
1981
1982 (*pa->pa_free)(pp, v);
1983
1984 s = splvm();
1985 simple_lock(&pa->pa_slock);
1986 if ((pa->pa_flags & PA_WANT) == 0) {
1987 simple_unlock(&pa->pa_slock);
1988 splx(s);
1989 return;
1990 }
1991
1992 TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
1993 simple_lock(&pp->pr_slock);
1994 if ((pp->pr_flags & PR_WANTED) != 0) {
1995 pp->pr_flags &= ~PR_WANTED;
1996 wakeup(pp);
1997 }
1998 simple_unlock(&pp->pr_slock);
1999 }
2000 pa->pa_flags &= ~PA_WANT;
2001 simple_unlock(&pa->pa_slock);
2002 splx(s);
2003 }
2004
2005 void *
2006 pool_page_alloc(struct pool *pp, int flags)
2007 {
2008 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2009
2010 return ((void *) uvm_km_alloc_poolpage(waitok));
2011 }
2012
2013 void
2014 pool_page_free(struct pool *pp, void *v)
2015 {
2016
2017 uvm_km_free_poolpage((vaddr_t) v);
2018 }
2019
2020 #ifdef POOL_SUBPAGE
2021 /* Sub-page allocator, for machines with large hardware pages. */
2022 void *
2023 pool_subpage_alloc(struct pool *pp, int flags)
2024 {
2025
2026 return (pool_get(&psppool, flags));
2027 }
2028
2029 void
2030 pool_subpage_free(struct pool *pp, void *v)
2031 {
2032
2033 pool_put(&psppool, v);
2034 }
2035
2036 /* We don't provide a real nointr allocator. Maybe later. */
2037 void *
2038 pool_page_alloc_nointr(struct pool *pp, int flags)
2039 {
2040
2041 return (pool_subpage_alloc(pp, flags));
2042 }
2043
2044 void
2045 pool_page_free_nointr(struct pool *pp, void *v)
2046 {
2047
2048 pool_subpage_free(pp, v);
2049 }
2050 #else
2051 void *
2052 pool_page_alloc_nointr(struct pool *pp, int flags)
2053 {
2054 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2055
2056 return ((void *) uvm_km_alloc_poolpage1(kernel_map,
2057 uvm.kernel_object, waitok));
2058 }
2059
2060 void
2061 pool_page_free_nointr(struct pool *pp, void *v)
2062 {
2063
2064 uvm_km_free_poolpage1(kernel_map, (vaddr_t) v);
2065 }
2066 #endif /* POOL_SUBPAGE */
2067