subr_pool.c revision 1.66 1 /* $NetBSD: subr_pool.c,v 1.66 2002/03/08 20:48:41 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.66 2002/03/08 20:48:41 thorpej Exp $");
42
43 #include "opt_pool.h"
44 #include "opt_poollog.h"
45 #include "opt_lockdebug.h"
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/proc.h>
50 #include <sys/errno.h>
51 #include <sys/kernel.h>
52 #include <sys/malloc.h>
53 #include <sys/lock.h>
54 #include <sys/pool.h>
55 #include <sys/syslog.h>
56
57 #include <uvm/uvm.h>
58
59 /*
60 * Pool resource management utility.
61 *
62 * Memory is allocated in pages which are split into pieces according
63 * to the pool item size. Each page is kept on a list headed by `pr_pagelist'
64 * in the pool structure and the individual pool items are on a linked list
65 * headed by `ph_itemlist' in each page header. The memory for building
66 * the page list is either taken from the allocated pages themselves (for
67 * small pool items) or taken from an internal pool of page headers (`phpool').
68 */
69
70 /* List of all pools */
71 TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
72
73 /* Private pool for page header structures */
74 static struct pool phpool;
75
76 #ifdef POOL_SUBPAGE
77 /* Pool of subpages for use by normal pools. */
78 static struct pool psppool;
79 #endif
80
81 /* # of seconds to retain page after last use */
82 int pool_inactive_time = 10;
83
84 /* Next candidate for drainage (see pool_drain()) */
85 static struct pool *drainpp;
86
87 /* This spin lock protects both pool_head and drainpp. */
88 struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
89
90 struct pool_item_header {
91 /* Page headers */
92 TAILQ_ENTRY(pool_item_header)
93 ph_pagelist; /* pool page list */
94 TAILQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */
95 LIST_ENTRY(pool_item_header)
96 ph_hashlist; /* Off-page page headers */
97 int ph_nmissing; /* # of chunks in use */
98 caddr_t ph_page; /* this page's address */
99 struct timeval ph_time; /* last referenced */
100 };
101 TAILQ_HEAD(pool_pagelist,pool_item_header);
102
103 struct pool_item {
104 #ifdef DIAGNOSTIC
105 int pi_magic;
106 #endif
107 #define PI_MAGIC 0xdeadbeef
108 /* Other entries use only this list entry */
109 TAILQ_ENTRY(pool_item) pi_list;
110 };
111
112 #define PR_HASH_INDEX(pp,addr) \
113 (((u_long)(addr) >> (pp)->pr_alloc->pa_pageshift) & \
114 (PR_HASHTABSIZE - 1))
115
116 #define POOL_NEEDS_CATCHUP(pp) \
117 ((pp)->pr_nitems < (pp)->pr_minitems)
118
119 /*
120 * Pool cache management.
121 *
122 * Pool caches provide a way for constructed objects to be cached by the
123 * pool subsystem. This can lead to performance improvements by avoiding
124 * needless object construction/destruction; it is deferred until absolutely
125 * necessary.
126 *
127 * Caches are grouped into cache groups. Each cache group references
128 * up to 16 constructed objects. When a cache allocates an object
129 * from the pool, it calls the object's constructor and places it into
130 * a cache group. When a cache group frees an object back to the pool,
131 * it first calls the object's destructor. This allows the object to
132 * persist in constructed form while freed to the cache.
133 *
134 * Multiple caches may exist for each pool. This allows a single
135 * object type to have multiple constructed forms. The pool references
136 * each cache, so that when a pool is drained by the pagedaemon, it can
137 * drain each individual cache as well. Each time a cache is drained,
138 * the most idle cache group is freed to the pool in its entirety.
139 *
140 * Pool caches are layed on top of pools. By layering them, we can avoid
141 * the complexity of cache management for pools which would not benefit
142 * from it.
143 */
144
145 /* The cache group pool. */
146 static struct pool pcgpool;
147
148 /* The pool cache group. */
149 #define PCG_NOBJECTS 16
150 struct pool_cache_group {
151 TAILQ_ENTRY(pool_cache_group)
152 pcg_list; /* link in the pool cache's group list */
153 u_int pcg_avail; /* # available objects */
154 /* pointers to the objects */
155 void *pcg_objects[PCG_NOBJECTS];
156 };
157
158 static void pool_cache_reclaim(struct pool_cache *);
159
160 static int pool_catchup(struct pool *);
161 static void pool_prime_page(struct pool *, caddr_t,
162 struct pool_item_header *);
163
164 void *pool_allocator_alloc(struct pool *, int);
165 void pool_allocator_free(struct pool *, void *);
166
167 static void pool_print1(struct pool *, const char *,
168 void (*)(const char *, ...));
169
170 /*
171 * Pool log entry. An array of these is allocated in pool_init().
172 */
173 struct pool_log {
174 const char *pl_file;
175 long pl_line;
176 int pl_action;
177 #define PRLOG_GET 1
178 #define PRLOG_PUT 2
179 void *pl_addr;
180 };
181
182 /* Number of entries in pool log buffers */
183 #ifndef POOL_LOGSIZE
184 #define POOL_LOGSIZE 10
185 #endif
186
187 int pool_logsize = POOL_LOGSIZE;
188
189 #ifdef POOL_DIAGNOSTIC
190 static __inline void
191 pr_log(struct pool *pp, void *v, int action, const char *file, long line)
192 {
193 int n = pp->pr_curlogentry;
194 struct pool_log *pl;
195
196 if ((pp->pr_roflags & PR_LOGGING) == 0)
197 return;
198
199 /*
200 * Fill in the current entry. Wrap around and overwrite
201 * the oldest entry if necessary.
202 */
203 pl = &pp->pr_log[n];
204 pl->pl_file = file;
205 pl->pl_line = line;
206 pl->pl_action = action;
207 pl->pl_addr = v;
208 if (++n >= pp->pr_logsize)
209 n = 0;
210 pp->pr_curlogentry = n;
211 }
212
213 static void
214 pr_printlog(struct pool *pp, struct pool_item *pi,
215 void (*pr)(const char *, ...))
216 {
217 int i = pp->pr_logsize;
218 int n = pp->pr_curlogentry;
219
220 if ((pp->pr_roflags & PR_LOGGING) == 0)
221 return;
222
223 /*
224 * Print all entries in this pool's log.
225 */
226 while (i-- > 0) {
227 struct pool_log *pl = &pp->pr_log[n];
228 if (pl->pl_action != 0) {
229 if (pi == NULL || pi == pl->pl_addr) {
230 (*pr)("\tlog entry %d:\n", i);
231 (*pr)("\t\taction = %s, addr = %p\n",
232 pl->pl_action == PRLOG_GET ? "get" : "put",
233 pl->pl_addr);
234 (*pr)("\t\tfile: %s at line %lu\n",
235 pl->pl_file, pl->pl_line);
236 }
237 }
238 if (++n >= pp->pr_logsize)
239 n = 0;
240 }
241 }
242
243 static __inline void
244 pr_enter(struct pool *pp, const char *file, long line)
245 {
246
247 if (__predict_false(pp->pr_entered_file != NULL)) {
248 printf("pool %s: reentrancy at file %s line %ld\n",
249 pp->pr_wchan, file, line);
250 printf(" previous entry at file %s line %ld\n",
251 pp->pr_entered_file, pp->pr_entered_line);
252 panic("pr_enter");
253 }
254
255 pp->pr_entered_file = file;
256 pp->pr_entered_line = line;
257 }
258
259 static __inline void
260 pr_leave(struct pool *pp)
261 {
262
263 if (__predict_false(pp->pr_entered_file == NULL)) {
264 printf("pool %s not entered?\n", pp->pr_wchan);
265 panic("pr_leave");
266 }
267
268 pp->pr_entered_file = NULL;
269 pp->pr_entered_line = 0;
270 }
271
272 static __inline void
273 pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
274 {
275
276 if (pp->pr_entered_file != NULL)
277 (*pr)("\n\tcurrently entered from file %s line %ld\n",
278 pp->pr_entered_file, pp->pr_entered_line);
279 }
280 #else
281 #define pr_log(pp, v, action, file, line)
282 #define pr_printlog(pp, pi, pr)
283 #define pr_enter(pp, file, line)
284 #define pr_leave(pp)
285 #define pr_enter_check(pp, pr)
286 #endif /* POOL_DIAGNOSTIC */
287
288 /*
289 * Return the pool page header based on page address.
290 */
291 static __inline struct pool_item_header *
292 pr_find_pagehead(struct pool *pp, caddr_t page)
293 {
294 struct pool_item_header *ph;
295
296 if ((pp->pr_roflags & PR_PHINPAGE) != 0)
297 return ((struct pool_item_header *)(page + pp->pr_phoffset));
298
299 for (ph = LIST_FIRST(&pp->pr_hashtab[PR_HASH_INDEX(pp, page)]);
300 ph != NULL;
301 ph = LIST_NEXT(ph, ph_hashlist)) {
302 if (ph->ph_page == page)
303 return (ph);
304 }
305 return (NULL);
306 }
307
308 /*
309 * Remove a page from the pool.
310 */
311 static __inline void
312 pr_rmpage(struct pool *pp, struct pool_item_header *ph,
313 struct pool_pagelist *pq)
314 {
315 int s;
316
317 /*
318 * If the page was idle, decrement the idle page count.
319 */
320 if (ph->ph_nmissing == 0) {
321 #ifdef DIAGNOSTIC
322 if (pp->pr_nidle == 0)
323 panic("pr_rmpage: nidle inconsistent");
324 if (pp->pr_nitems < pp->pr_itemsperpage)
325 panic("pr_rmpage: nitems inconsistent");
326 #endif
327 pp->pr_nidle--;
328 }
329
330 pp->pr_nitems -= pp->pr_itemsperpage;
331
332 /*
333 * Unlink a page from the pool and release it (or queue it for release).
334 */
335 TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
336 if (pq) {
337 TAILQ_INSERT_HEAD(pq, ph, ph_pagelist);
338 } else {
339 pool_allocator_free(pp, ph->ph_page);
340 if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
341 LIST_REMOVE(ph, ph_hashlist);
342 s = splhigh();
343 pool_put(&phpool, ph);
344 splx(s);
345 }
346 }
347 pp->pr_npages--;
348 pp->pr_npagefree++;
349
350 if (pp->pr_curpage == ph) {
351 /*
352 * Find a new non-empty page header, if any.
353 * Start search from the page head, to increase the
354 * chance for "high water" pages to be freed.
355 */
356 TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
357 if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
358 break;
359
360 pp->pr_curpage = ph;
361 }
362 }
363
364 /*
365 * Initialize the given pool resource structure.
366 *
367 * We export this routine to allow other kernel parts to declare
368 * static pools that must be initialized before malloc() is available.
369 */
370 void
371 pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
372 const char *wchan, struct pool_allocator *palloc)
373 {
374 int off, slack, i;
375
376 #ifdef POOL_DIAGNOSTIC
377 /*
378 * Always log if POOL_DIAGNOSTIC is defined.
379 */
380 if (pool_logsize != 0)
381 flags |= PR_LOGGING;
382 #endif
383
384 #ifdef POOL_SUBPAGE
385 /*
386 * XXX We don't provide a real `nointr' back-end
387 * yet; all sub-pages come from a kmem back-end.
388 * maybe some day...
389 */
390 if (palloc == NULL) {
391 extern struct pool_allocator pool_allocator_kmem_subpage;
392 palloc = &pool_allocator_kmem_subpage;
393 }
394 /*
395 * We'll assume any user-specified back-end allocator
396 * will deal with sub-pages, or simply don't care.
397 */
398 #else
399 if (palloc == NULL)
400 palloc = &pool_allocator_kmem;
401 #endif /* POOL_SUBPAGE */
402 if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
403 if (palloc->pa_pagesz == 0) {
404 #ifdef POOL_SUBPAGE
405 if (palloc == &pool_allocator_kmem)
406 palloc->pa_pagesz = PAGE_SIZE;
407 else
408 palloc->pa_pagesz = POOL_SUBPAGE;
409 #else
410 palloc->pa_pagesz = PAGE_SIZE;
411 #endif /* POOL_SUBPAGE */
412 }
413
414 TAILQ_INIT(&palloc->pa_list);
415
416 simple_lock_init(&palloc->pa_slock);
417 palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
418 palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
419 palloc->pa_flags |= PA_INITIALIZED;
420 }
421
422 if (align == 0)
423 align = ALIGN(1);
424
425 if (size < sizeof(struct pool_item))
426 size = sizeof(struct pool_item);
427
428 size = ALIGN(size);
429 #ifdef DIAGNOSTIC
430 if (size > palloc->pa_pagesz)
431 panic("pool_init: pool item size (%lu) too large",
432 (u_long)size);
433 #endif
434
435 /*
436 * Initialize the pool structure.
437 */
438 TAILQ_INIT(&pp->pr_pagelist);
439 TAILQ_INIT(&pp->pr_cachelist);
440 pp->pr_curpage = NULL;
441 pp->pr_npages = 0;
442 pp->pr_minitems = 0;
443 pp->pr_minpages = 0;
444 pp->pr_maxpages = UINT_MAX;
445 pp->pr_roflags = flags;
446 pp->pr_flags = 0;
447 pp->pr_size = size;
448 pp->pr_align = align;
449 pp->pr_wchan = wchan;
450 pp->pr_alloc = palloc;
451 pp->pr_nitems = 0;
452 pp->pr_nout = 0;
453 pp->pr_hardlimit = UINT_MAX;
454 pp->pr_hardlimit_warning = NULL;
455 pp->pr_hardlimit_ratecap.tv_sec = 0;
456 pp->pr_hardlimit_ratecap.tv_usec = 0;
457 pp->pr_hardlimit_warning_last.tv_sec = 0;
458 pp->pr_hardlimit_warning_last.tv_usec = 0;
459
460 /*
461 * Decide whether to put the page header off page to avoid
462 * wasting too large a part of the page. Off-page page headers
463 * go on a hash table, so we can match a returned item
464 * with its header based on the page address.
465 * We use 1/16 of the page size as the threshold (XXX: tune)
466 */
467 if (pp->pr_size < palloc->pa_pagesz/16) {
468 /* Use the end of the page for the page header */
469 pp->pr_roflags |= PR_PHINPAGE;
470 pp->pr_phoffset = off = palloc->pa_pagesz -
471 ALIGN(sizeof(struct pool_item_header));
472 } else {
473 /* The page header will be taken from our page header pool */
474 pp->pr_phoffset = 0;
475 off = palloc->pa_pagesz;
476 for (i = 0; i < PR_HASHTABSIZE; i++) {
477 LIST_INIT(&pp->pr_hashtab[i]);
478 }
479 }
480
481 /*
482 * Alignment is to take place at `ioff' within the item. This means
483 * we must reserve up to `align - 1' bytes on the page to allow
484 * appropriate positioning of each item.
485 *
486 * Silently enforce `0 <= ioff < align'.
487 */
488 pp->pr_itemoffset = ioff = ioff % align;
489 pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
490 KASSERT(pp->pr_itemsperpage != 0);
491
492 /*
493 * Use the slack between the chunks and the page header
494 * for "cache coloring".
495 */
496 slack = off - pp->pr_itemsperpage * pp->pr_size;
497 pp->pr_maxcolor = (slack / align) * align;
498 pp->pr_curcolor = 0;
499
500 pp->pr_nget = 0;
501 pp->pr_nfail = 0;
502 pp->pr_nput = 0;
503 pp->pr_npagealloc = 0;
504 pp->pr_npagefree = 0;
505 pp->pr_hiwat = 0;
506 pp->pr_nidle = 0;
507
508 #ifdef POOL_DIAGNOSTIC
509 if (flags & PR_LOGGING) {
510 if (kmem_map == NULL ||
511 (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
512 M_TEMP, M_NOWAIT)) == NULL)
513 pp->pr_roflags &= ~PR_LOGGING;
514 pp->pr_curlogentry = 0;
515 pp->pr_logsize = pool_logsize;
516 }
517 #endif
518
519 pp->pr_entered_file = NULL;
520 pp->pr_entered_line = 0;
521
522 simple_lock_init(&pp->pr_slock);
523
524 /*
525 * Initialize private page header pool and cache magazine pool if we
526 * haven't done so yet.
527 * XXX LOCKING.
528 */
529 if (phpool.pr_size == 0) {
530 #ifdef POOL_SUBPAGE
531 pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, 0,
532 "phpool", &pool_allocator_kmem);
533 pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
534 PR_RECURSIVE, "psppool", &pool_allocator_kmem);
535 #else
536 pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,
537 0, "phpool", NULL);
538 #endif
539 pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
540 0, "pcgpool", NULL);
541 }
542
543 /* Insert into the list of all pools. */
544 simple_lock(&pool_head_slock);
545 TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
546 simple_unlock(&pool_head_slock);
547
548 /* Insert this into the list of pools using this allocator. */
549 simple_lock(&palloc->pa_slock);
550 TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
551 simple_unlock(&palloc->pa_slock);
552 }
553
554 /*
555 * De-commision a pool resource.
556 */
557 void
558 pool_destroy(struct pool *pp)
559 {
560 struct pool_item_header *ph;
561 struct pool_cache *pc;
562
563 /* Locking order: pool_allocator -> pool */
564 simple_lock(&pp->pr_alloc->pa_slock);
565 TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
566 simple_unlock(&pp->pr_alloc->pa_slock);
567
568 /* Destroy all caches for this pool. */
569 while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL)
570 pool_cache_destroy(pc);
571
572 #ifdef DIAGNOSTIC
573 if (pp->pr_nout != 0) {
574 pr_printlog(pp, NULL, printf);
575 panic("pool_destroy: pool busy: still out: %u\n",
576 pp->pr_nout);
577 }
578 #endif
579
580 /* Remove all pages */
581 if ((pp->pr_roflags & PR_STATIC) == 0)
582 while ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL)
583 pr_rmpage(pp, ph, NULL);
584
585 /* Remove from global pool list */
586 simple_lock(&pool_head_slock);
587 TAILQ_REMOVE(&pool_head, pp, pr_poollist);
588 if (drainpp == pp) {
589 drainpp = NULL;
590 }
591 simple_unlock(&pool_head_slock);
592
593 #ifdef POOL_DIAGNOSTIC
594 if ((pp->pr_roflags & PR_LOGGING) != 0)
595 free(pp->pr_log, M_TEMP);
596 #endif
597
598 if (pp->pr_roflags & PR_FREEHEADER)
599 free(pp, M_POOL);
600 }
601
602 static __inline struct pool_item_header *
603 pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags)
604 {
605 struct pool_item_header *ph;
606 int s;
607
608 LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0);
609
610 if ((pp->pr_roflags & PR_PHINPAGE) != 0)
611 ph = (struct pool_item_header *) (storage + pp->pr_phoffset);
612 else {
613 s = splhigh();
614 ph = pool_get(&phpool, flags);
615 splx(s);
616 }
617
618 return (ph);
619 }
620
621 /*
622 * Grab an item from the pool; must be called at appropriate spl level
623 */
624 void *
625 #ifdef POOL_DIAGNOSTIC
626 _pool_get(struct pool *pp, int flags, const char *file, long line)
627 #else
628 pool_get(struct pool *pp, int flags)
629 #endif
630 {
631 struct pool_item *pi;
632 struct pool_item_header *ph;
633 void *v;
634
635 #ifdef DIAGNOSTIC
636 if (__predict_false((pp->pr_roflags & PR_STATIC) &&
637 (flags & PR_MALLOCOK))) {
638 pr_printlog(pp, NULL, printf);
639 panic("pool_get: static");
640 }
641
642 if (__predict_false(curproc == NULL && doing_shutdown == 0 &&
643 (flags & PR_WAITOK) != 0))
644 panic("pool_get: must have NOWAIT");
645
646 #ifdef LOCKDEBUG
647 if (flags & PR_WAITOK)
648 simple_lock_only_held(NULL, "pool_get(PR_WAITOK)");
649 #endif
650 #endif /* DIAGNOSTIC */
651
652 simple_lock(&pp->pr_slock);
653 pr_enter(pp, file, line);
654
655 startover:
656 /*
657 * Check to see if we've reached the hard limit. If we have,
658 * and we can wait, then wait until an item has been returned to
659 * the pool.
660 */
661 #ifdef DIAGNOSTIC
662 if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
663 pr_leave(pp);
664 simple_unlock(&pp->pr_slock);
665 panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
666 }
667 #endif
668 if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
669 if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
670 /*
671 * XXX: A warning isn't logged in this case. Should
672 * it be?
673 */
674 pp->pr_flags |= PR_WANTED;
675 pr_leave(pp);
676 ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
677 pr_enter(pp, file, line);
678 goto startover;
679 }
680
681 /*
682 * Log a message that the hard limit has been hit.
683 */
684 if (pp->pr_hardlimit_warning != NULL &&
685 ratecheck(&pp->pr_hardlimit_warning_last,
686 &pp->pr_hardlimit_ratecap))
687 log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
688
689 pp->pr_nfail++;
690
691 pr_leave(pp);
692 simple_unlock(&pp->pr_slock);
693 return (NULL);
694 }
695
696 /*
697 * The convention we use is that if `curpage' is not NULL, then
698 * it points at a non-empty bucket. In particular, `curpage'
699 * never points at a page header which has PR_PHINPAGE set and
700 * has no items in its bucket.
701 */
702 if ((ph = pp->pr_curpage) == NULL) {
703 #ifdef DIAGNOSTIC
704 if (pp->pr_nitems != 0) {
705 simple_unlock(&pp->pr_slock);
706 printf("pool_get: %s: curpage NULL, nitems %u\n",
707 pp->pr_wchan, pp->pr_nitems);
708 panic("pool_get: nitems inconsistent\n");
709 }
710 #endif
711
712 /*
713 * Call the back-end page allocator for more memory.
714 * Release the pool lock, as the back-end page allocator
715 * may block.
716 */
717 pr_leave(pp);
718 simple_unlock(&pp->pr_slock);
719 v = pool_allocator_alloc(pp, flags);
720 if (__predict_true(v != NULL))
721 ph = pool_alloc_item_header(pp, v, flags);
722 simple_lock(&pp->pr_slock);
723 pr_enter(pp, file, line);
724
725 if (__predict_false(v == NULL || ph == NULL)) {
726 if (v != NULL)
727 pool_allocator_free(pp, v);
728
729 /*
730 * We were unable to allocate a page or item
731 * header, but we released the lock during
732 * allocation, so perhaps items were freed
733 * back to the pool. Check for this case.
734 */
735 if (pp->pr_curpage != NULL)
736 goto startover;
737
738 if ((flags & PR_WAITOK) == 0) {
739 pp->pr_nfail++;
740 pr_leave(pp);
741 simple_unlock(&pp->pr_slock);
742 return (NULL);
743 }
744
745 /*
746 * Wait for items to be returned to this pool.
747 *
748 * XXX: maybe we should wake up once a second and
749 * try again?
750 */
751 pp->pr_flags |= PR_WANTED;
752 /* PA_WANTED is already set on the allocator. */
753 pr_leave(pp);
754 ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
755 pr_enter(pp, file, line);
756 goto startover;
757 }
758
759 /* We have more memory; add it to the pool */
760 pool_prime_page(pp, v, ph);
761 pp->pr_npagealloc++;
762
763 /* Start the allocation process over. */
764 goto startover;
765 }
766
767 if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) {
768 pr_leave(pp);
769 simple_unlock(&pp->pr_slock);
770 panic("pool_get: %s: page empty", pp->pr_wchan);
771 }
772 #ifdef DIAGNOSTIC
773 if (__predict_false(pp->pr_nitems == 0)) {
774 pr_leave(pp);
775 simple_unlock(&pp->pr_slock);
776 printf("pool_get: %s: items on itemlist, nitems %u\n",
777 pp->pr_wchan, pp->pr_nitems);
778 panic("pool_get: nitems inconsistent\n");
779 }
780 #endif
781
782 #ifdef POOL_DIAGNOSTIC
783 pr_log(pp, v, PRLOG_GET, file, line);
784 #endif
785
786 #ifdef DIAGNOSTIC
787 if (__predict_false(pi->pi_magic != PI_MAGIC)) {
788 pr_printlog(pp, pi, printf);
789 panic("pool_get(%s): free list modified: magic=%x; page %p;"
790 " item addr %p\n",
791 pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
792 }
793 #endif
794
795 /*
796 * Remove from item list.
797 */
798 TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
799 pp->pr_nitems--;
800 pp->pr_nout++;
801 if (ph->ph_nmissing == 0) {
802 #ifdef DIAGNOSTIC
803 if (__predict_false(pp->pr_nidle == 0))
804 panic("pool_get: nidle inconsistent");
805 #endif
806 pp->pr_nidle--;
807 }
808 ph->ph_nmissing++;
809 if (TAILQ_FIRST(&ph->ph_itemlist) == NULL) {
810 #ifdef DIAGNOSTIC
811 if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) {
812 pr_leave(pp);
813 simple_unlock(&pp->pr_slock);
814 panic("pool_get: %s: nmissing inconsistent",
815 pp->pr_wchan);
816 }
817 #endif
818 /*
819 * Find a new non-empty page header, if any.
820 * Start search from the page head, to increase
821 * the chance for "high water" pages to be freed.
822 *
823 * Migrate empty pages to the end of the list. This
824 * will speed the update of curpage as pages become
825 * idle. Empty pages intermingled with idle pages
826 * is no big deal. As soon as a page becomes un-empty,
827 * it will move back to the head of the list.
828 */
829 TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
830 TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
831 TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
832 if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
833 break;
834
835 pp->pr_curpage = ph;
836 }
837
838 pp->pr_nget++;
839
840 /*
841 * If we have a low water mark and we are now below that low
842 * water mark, add more items to the pool.
843 */
844 if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
845 /*
846 * XXX: Should we log a warning? Should we set up a timeout
847 * to try again in a second or so? The latter could break
848 * a caller's assumptions about interrupt protection, etc.
849 */
850 }
851
852 pr_leave(pp);
853 simple_unlock(&pp->pr_slock);
854 return (v);
855 }
856
857 /*
858 * Internal version of pool_put(). Pool is already locked/entered.
859 */
860 static void
861 pool_do_put(struct pool *pp, void *v)
862 {
863 struct pool_item *pi = v;
864 struct pool_item_header *ph;
865 caddr_t page;
866 int s;
867
868 LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
869
870 page = (caddr_t)((u_long)v & pp->pr_alloc->pa_pagemask);
871
872 #ifdef DIAGNOSTIC
873 if (__predict_false(pp->pr_nout == 0)) {
874 printf("pool %s: putting with none out\n",
875 pp->pr_wchan);
876 panic("pool_put");
877 }
878 #endif
879
880 if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {
881 pr_printlog(pp, NULL, printf);
882 panic("pool_put: %s: page header missing", pp->pr_wchan);
883 }
884
885 #ifdef LOCKDEBUG
886 /*
887 * Check if we're freeing a locked simple lock.
888 */
889 simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
890 #endif
891
892 /*
893 * Return to item list.
894 */
895 #ifdef DIAGNOSTIC
896 pi->pi_magic = PI_MAGIC;
897 #endif
898 #ifdef DEBUG
899 {
900 int i, *ip = v;
901
902 for (i = 0; i < pp->pr_size / sizeof(int); i++) {
903 *ip++ = PI_MAGIC;
904 }
905 }
906 #endif
907
908 TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
909 ph->ph_nmissing--;
910 pp->pr_nput++;
911 pp->pr_nitems++;
912 pp->pr_nout--;
913
914 /* Cancel "pool empty" condition if it exists */
915 if (pp->pr_curpage == NULL)
916 pp->pr_curpage = ph;
917
918 if (pp->pr_flags & PR_WANTED) {
919 pp->pr_flags &= ~PR_WANTED;
920 if (ph->ph_nmissing == 0)
921 pp->pr_nidle++;
922 wakeup((caddr_t)pp);
923 return;
924 }
925
926 /*
927 * If this page is now complete, do one of two things:
928 *
929 * (1) If we have more pages than the page high water
930 * mark, free the page back to the system.
931 *
932 * (2) Move it to the end of the page list, so that
933 * we minimize our chances of fragmenting the
934 * pool. Idle pages migrate to the end (along with
935 * completely empty pages, so that we find un-empty
936 * pages more quickly when we update curpage) of the
937 * list so they can be more easily swept up by
938 * the pagedaemon when pages are scarce.
939 */
940 if (ph->ph_nmissing == 0) {
941 pp->pr_nidle++;
942 if (pp->pr_npages > pp->pr_maxpages) {
943 pr_rmpage(pp, ph, NULL);
944 } else {
945 TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
946 TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
947
948 /*
949 * Update the timestamp on the page. A page must
950 * be idle for some period of time before it can
951 * be reclaimed by the pagedaemon. This minimizes
952 * ping-pong'ing for memory.
953 */
954 s = splclock();
955 ph->ph_time = mono_time;
956 splx(s);
957
958 /*
959 * Update the current page pointer. Just look for
960 * the first page with any free items.
961 *
962 * XXX: Maybe we want an option to look for the
963 * page with the fewest available items, to minimize
964 * fragmentation?
965 */
966 TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
967 if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
968 break;
969
970 pp->pr_curpage = ph;
971 }
972 }
973 /*
974 * If the page has just become un-empty, move it to the head of
975 * the list, and make it the current page. The next allocation
976 * will get the item from this page, instead of further fragmenting
977 * the pool.
978 */
979 else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
980 TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
981 TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
982 pp->pr_curpage = ph;
983 }
984 }
985
986 /*
987 * Return resource to the pool; must be called at appropriate spl level
988 */
989 #ifdef POOL_DIAGNOSTIC
990 void
991 _pool_put(struct pool *pp, void *v, const char *file, long line)
992 {
993
994 simple_lock(&pp->pr_slock);
995 pr_enter(pp, file, line);
996
997 pr_log(pp, v, PRLOG_PUT, file, line);
998
999 pool_do_put(pp, v);
1000
1001 pr_leave(pp);
1002 simple_unlock(&pp->pr_slock);
1003 }
1004 #undef pool_put
1005 #endif /* POOL_DIAGNOSTIC */
1006
1007 void
1008 pool_put(struct pool *pp, void *v)
1009 {
1010
1011 simple_lock(&pp->pr_slock);
1012
1013 pool_do_put(pp, v);
1014
1015 simple_unlock(&pp->pr_slock);
1016 }
1017
1018 #ifdef POOL_DIAGNOSTIC
1019 #define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__)
1020 #endif
1021
1022 /*
1023 * Add N items to the pool.
1024 */
1025 int
1026 pool_prime(struct pool *pp, int n)
1027 {
1028 struct pool_item_header *ph;
1029 caddr_t cp;
1030 int newpages, error = 0;
1031
1032 simple_lock(&pp->pr_slock);
1033
1034 newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1035
1036 while (newpages-- > 0) {
1037 simple_unlock(&pp->pr_slock);
1038 cp = pool_allocator_alloc(pp, PR_NOWAIT);
1039 if (__predict_true(cp != NULL))
1040 ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1041 simple_lock(&pp->pr_slock);
1042
1043 if (__predict_false(cp == NULL || ph == NULL)) {
1044 error = ENOMEM;
1045 if (cp != NULL)
1046 pool_allocator_free(pp, cp);
1047 break;
1048 }
1049
1050 pool_prime_page(pp, cp, ph);
1051 pp->pr_npagealloc++;
1052 pp->pr_minpages++;
1053 }
1054
1055 if (pp->pr_minpages >= pp->pr_maxpages)
1056 pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1057
1058 simple_unlock(&pp->pr_slock);
1059 return (0);
1060 }
1061
1062 /*
1063 * Add a page worth of items to the pool.
1064 *
1065 * Note, we must be called with the pool descriptor LOCKED.
1066 */
1067 static void
1068 pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
1069 {
1070 struct pool_item *pi;
1071 caddr_t cp = storage;
1072 unsigned int align = pp->pr_align;
1073 unsigned int ioff = pp->pr_itemoffset;
1074 int n;
1075
1076 #ifdef DIAGNOSTIC
1077 if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
1078 panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1079 #endif
1080
1081 if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1082 LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)],
1083 ph, ph_hashlist);
1084
1085 /*
1086 * Insert page header.
1087 */
1088 TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
1089 TAILQ_INIT(&ph->ph_itemlist);
1090 ph->ph_page = storage;
1091 ph->ph_nmissing = 0;
1092 memset(&ph->ph_time, 0, sizeof(ph->ph_time));
1093
1094 pp->pr_nidle++;
1095
1096 /*
1097 * Color this page.
1098 */
1099 cp = (caddr_t)(cp + pp->pr_curcolor);
1100 if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1101 pp->pr_curcolor = 0;
1102
1103 /*
1104 * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1105 */
1106 if (ioff != 0)
1107 cp = (caddr_t)(cp + (align - ioff));
1108
1109 /*
1110 * Insert remaining chunks on the bucket list.
1111 */
1112 n = pp->pr_itemsperpage;
1113 pp->pr_nitems += n;
1114
1115 while (n--) {
1116 pi = (struct pool_item *)cp;
1117
1118 /* Insert on page list */
1119 TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
1120 #ifdef DIAGNOSTIC
1121 pi->pi_magic = PI_MAGIC;
1122 #endif
1123 cp = (caddr_t)(cp + pp->pr_size);
1124 }
1125
1126 /*
1127 * If the pool was depleted, point at the new page.
1128 */
1129 if (pp->pr_curpage == NULL)
1130 pp->pr_curpage = ph;
1131
1132 if (++pp->pr_npages > pp->pr_hiwat)
1133 pp->pr_hiwat = pp->pr_npages;
1134 }
1135
1136 /*
1137 * Used by pool_get() when nitems drops below the low water mark. This
1138 * is used to catch up nitmes with the low water mark.
1139 *
1140 * Note 1, we never wait for memory here, we let the caller decide what to do.
1141 *
1142 * Note 2, this doesn't work with static pools.
1143 *
1144 * Note 3, we must be called with the pool already locked, and we return
1145 * with it locked.
1146 */
1147 static int
1148 pool_catchup(struct pool *pp)
1149 {
1150 struct pool_item_header *ph;
1151 caddr_t cp;
1152 int error = 0;
1153
1154 if (pp->pr_roflags & PR_STATIC) {
1155 /*
1156 * We dropped below the low water mark, and this is not a
1157 * good thing. Log a warning.
1158 *
1159 * XXX: rate-limit this?
1160 */
1161 printf("WARNING: static pool `%s' dropped below low water "
1162 "mark\n", pp->pr_wchan);
1163 return (0);
1164 }
1165
1166 while (POOL_NEEDS_CATCHUP(pp)) {
1167 /*
1168 * Call the page back-end allocator for more memory.
1169 *
1170 * XXX: We never wait, so should we bother unlocking
1171 * the pool descriptor?
1172 */
1173 simple_unlock(&pp->pr_slock);
1174 cp = pool_allocator_alloc(pp, PR_NOWAIT);
1175 if (__predict_true(cp != NULL))
1176 ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1177 simple_lock(&pp->pr_slock);
1178 if (__predict_false(cp == NULL || ph == NULL)) {
1179 if (cp != NULL)
1180 pool_allocator_free(pp, cp);
1181 error = ENOMEM;
1182 break;
1183 }
1184 pool_prime_page(pp, cp, ph);
1185 pp->pr_npagealloc++;
1186 }
1187
1188 return (error);
1189 }
1190
1191 void
1192 pool_setlowat(struct pool *pp, int n)
1193 {
1194 int error;
1195
1196 simple_lock(&pp->pr_slock);
1197
1198 pp->pr_minitems = n;
1199 pp->pr_minpages = (n == 0)
1200 ? 0
1201 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1202
1203 /* Make sure we're caught up with the newly-set low water mark. */
1204 if (POOL_NEEDS_CATCHUP(pp) && (error = pool_catchup(pp) != 0)) {
1205 /*
1206 * XXX: Should we log a warning? Should we set up a timeout
1207 * to try again in a second or so? The latter could break
1208 * a caller's assumptions about interrupt protection, etc.
1209 */
1210 }
1211
1212 simple_unlock(&pp->pr_slock);
1213 }
1214
1215 void
1216 pool_sethiwat(struct pool *pp, int n)
1217 {
1218
1219 simple_lock(&pp->pr_slock);
1220
1221 pp->pr_maxpages = (n == 0)
1222 ? 0
1223 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1224
1225 simple_unlock(&pp->pr_slock);
1226 }
1227
1228 void
1229 pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1230 {
1231
1232 simple_lock(&pp->pr_slock);
1233
1234 pp->pr_hardlimit = n;
1235 pp->pr_hardlimit_warning = warnmess;
1236 pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1237 pp->pr_hardlimit_warning_last.tv_sec = 0;
1238 pp->pr_hardlimit_warning_last.tv_usec = 0;
1239
1240 /*
1241 * In-line version of pool_sethiwat(), because we don't want to
1242 * release the lock.
1243 */
1244 pp->pr_maxpages = (n == 0)
1245 ? 0
1246 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1247
1248 simple_unlock(&pp->pr_slock);
1249 }
1250
1251 /*
1252 * Release all complete pages that have not been used recently.
1253 */
1254 int
1255 #ifdef POOL_DIAGNOSTIC
1256 _pool_reclaim(struct pool *pp, const char *file, long line)
1257 #else
1258 pool_reclaim(struct pool *pp)
1259 #endif
1260 {
1261 struct pool_item_header *ph, *phnext;
1262 struct pool_cache *pc;
1263 struct timeval curtime;
1264 struct pool_pagelist pq;
1265 int s;
1266
1267 if (pp->pr_roflags & PR_STATIC)
1268 return (0);
1269
1270 if (simple_lock_try(&pp->pr_slock) == 0)
1271 return (0);
1272 pr_enter(pp, file, line);
1273 TAILQ_INIT(&pq);
1274
1275 /*
1276 * Reclaim items from the pool's caches.
1277 */
1278 TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist)
1279 pool_cache_reclaim(pc);
1280
1281 s = splclock();
1282 curtime = mono_time;
1283 splx(s);
1284
1285 for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; ph = phnext) {
1286 phnext = TAILQ_NEXT(ph, ph_pagelist);
1287
1288 /* Check our minimum page claim */
1289 if (pp->pr_npages <= pp->pr_minpages)
1290 break;
1291
1292 if (ph->ph_nmissing == 0) {
1293 struct timeval diff;
1294 timersub(&curtime, &ph->ph_time, &diff);
1295 if (diff.tv_sec < pool_inactive_time)
1296 continue;
1297
1298 /*
1299 * If freeing this page would put us below
1300 * the low water mark, stop now.
1301 */
1302 if ((pp->pr_nitems - pp->pr_itemsperpage) <
1303 pp->pr_minitems)
1304 break;
1305
1306 pr_rmpage(pp, ph, &pq);
1307 }
1308 }
1309
1310 pr_leave(pp);
1311 simple_unlock(&pp->pr_slock);
1312 if (TAILQ_EMPTY(&pq))
1313 return (0);
1314
1315 while ((ph = TAILQ_FIRST(&pq)) != NULL) {
1316 TAILQ_REMOVE(&pq, ph, ph_pagelist);
1317 pool_allocator_free(pp, ph->ph_page);
1318 if (pp->pr_roflags & PR_PHINPAGE) {
1319 continue;
1320 }
1321 LIST_REMOVE(ph, ph_hashlist);
1322 s = splhigh();
1323 pool_put(&phpool, ph);
1324 splx(s);
1325 }
1326
1327 return (1);
1328 }
1329
1330 /*
1331 * Drain pools, one at a time.
1332 *
1333 * Note, we must never be called from an interrupt context.
1334 */
1335 void
1336 pool_drain(void *arg)
1337 {
1338 struct pool *pp;
1339 int s;
1340
1341 pp = NULL;
1342 s = splvm();
1343 simple_lock(&pool_head_slock);
1344 if (drainpp == NULL) {
1345 drainpp = TAILQ_FIRST(&pool_head);
1346 }
1347 if (drainpp) {
1348 pp = drainpp;
1349 drainpp = TAILQ_NEXT(pp, pr_poollist);
1350 }
1351 simple_unlock(&pool_head_slock);
1352 pool_reclaim(pp);
1353 splx(s);
1354 }
1355
1356 /*
1357 * Diagnostic helpers.
1358 */
1359 void
1360 pool_print(struct pool *pp, const char *modif)
1361 {
1362 int s;
1363
1364 s = splvm();
1365 if (simple_lock_try(&pp->pr_slock) == 0) {
1366 printf("pool %s is locked; try again later\n",
1367 pp->pr_wchan);
1368 splx(s);
1369 return;
1370 }
1371 pool_print1(pp, modif, printf);
1372 simple_unlock(&pp->pr_slock);
1373 splx(s);
1374 }
1375
1376 void
1377 pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1378 {
1379 int didlock = 0;
1380
1381 if (pp == NULL) {
1382 (*pr)("Must specify a pool to print.\n");
1383 return;
1384 }
1385
1386 /*
1387 * Called from DDB; interrupts should be blocked, and all
1388 * other processors should be paused. We can skip locking
1389 * the pool in this case.
1390 *
1391 * We do a simple_lock_try() just to print the lock
1392 * status, however.
1393 */
1394
1395 if (simple_lock_try(&pp->pr_slock) == 0)
1396 (*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
1397 else
1398 didlock = 1;
1399
1400 pool_print1(pp, modif, pr);
1401
1402 if (didlock)
1403 simple_unlock(&pp->pr_slock);
1404 }
1405
1406 static void
1407 pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1408 {
1409 struct pool_item_header *ph;
1410 struct pool_cache *pc;
1411 struct pool_cache_group *pcg;
1412 #ifdef DIAGNOSTIC
1413 struct pool_item *pi;
1414 #endif
1415 int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1416 char c;
1417
1418 while ((c = *modif++) != '\0') {
1419 if (c == 'l')
1420 print_log = 1;
1421 if (c == 'p')
1422 print_pagelist = 1;
1423 if (c == 'c')
1424 print_cache = 1;
1425 modif++;
1426 }
1427
1428 (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1429 pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1430 pp->pr_roflags);
1431 (*pr)("\talloc %p\n", pp->pr_alloc);
1432 (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1433 pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1434 (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1435 pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1436
1437 (*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
1438 pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1439 (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1440 pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1441
1442 if (print_pagelist == 0)
1443 goto skip_pagelist;
1444
1445 if ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL)
1446 (*pr)("\n\tpage list:\n");
1447 for (; ph != NULL; ph = TAILQ_NEXT(ph, ph_pagelist)) {
1448 (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
1449 ph->ph_page, ph->ph_nmissing,
1450 (u_long)ph->ph_time.tv_sec,
1451 (u_long)ph->ph_time.tv_usec);
1452 #ifdef DIAGNOSTIC
1453 TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1454 if (pi->pi_magic != PI_MAGIC) {
1455 (*pr)("\t\t\titem %p, magic 0x%x\n",
1456 pi, pi->pi_magic);
1457 }
1458 }
1459 #endif
1460 }
1461 if (pp->pr_curpage == NULL)
1462 (*pr)("\tno current page\n");
1463 else
1464 (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1465
1466 skip_pagelist:
1467
1468 if (print_log == 0)
1469 goto skip_log;
1470
1471 (*pr)("\n");
1472 if ((pp->pr_roflags & PR_LOGGING) == 0)
1473 (*pr)("\tno log\n");
1474 else
1475 pr_printlog(pp, NULL, pr);
1476
1477 skip_log:
1478
1479 if (print_cache == 0)
1480 goto skip_cache;
1481
1482 TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) {
1483 (*pr)("\tcache %p: allocfrom %p freeto %p\n", pc,
1484 pc->pc_allocfrom, pc->pc_freeto);
1485 (*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n",
1486 pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
1487 TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1488 (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);
1489 for (i = 0; i < PCG_NOBJECTS; i++)
1490 (*pr)("\t\t\t%p\n", pcg->pcg_objects[i]);
1491 }
1492 }
1493
1494 skip_cache:
1495
1496 pr_enter_check(pp, pr);
1497 }
1498
1499 int
1500 pool_chk(struct pool *pp, const char *label)
1501 {
1502 struct pool_item_header *ph;
1503 int r = 0;
1504
1505 simple_lock(&pp->pr_slock);
1506
1507 TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) {
1508 struct pool_item *pi;
1509 int n;
1510 caddr_t page;
1511
1512 page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask);
1513 if (page != ph->ph_page &&
1514 (pp->pr_roflags & PR_PHINPAGE) != 0) {
1515 if (label != NULL)
1516 printf("%s: ", label);
1517 printf("pool(%p:%s): page inconsistency: page %p;"
1518 " at page head addr %p (p %p)\n", pp,
1519 pp->pr_wchan, ph->ph_page,
1520 ph, page);
1521 r++;
1522 goto out;
1523 }
1524
1525 for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
1526 pi != NULL;
1527 pi = TAILQ_NEXT(pi,pi_list), n++) {
1528
1529 #ifdef DIAGNOSTIC
1530 if (pi->pi_magic != PI_MAGIC) {
1531 if (label != NULL)
1532 printf("%s: ", label);
1533 printf("pool(%s): free list modified: magic=%x;"
1534 " page %p; item ordinal %d;"
1535 " addr %p (p %p)\n",
1536 pp->pr_wchan, pi->pi_magic, ph->ph_page,
1537 n, pi, page);
1538 panic("pool");
1539 }
1540 #endif
1541 page =
1542 (caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask);
1543 if (page == ph->ph_page)
1544 continue;
1545
1546 if (label != NULL)
1547 printf("%s: ", label);
1548 printf("pool(%p:%s): page inconsistency: page %p;"
1549 " item ordinal %d; addr %p (p %p)\n", pp,
1550 pp->pr_wchan, ph->ph_page,
1551 n, pi, page);
1552 r++;
1553 goto out;
1554 }
1555 }
1556 out:
1557 simple_unlock(&pp->pr_slock);
1558 return (r);
1559 }
1560
1561 /*
1562 * pool_cache_init:
1563 *
1564 * Initialize a pool cache.
1565 *
1566 * NOTE: If the pool must be protected from interrupts, we expect
1567 * to be called at the appropriate interrupt priority level.
1568 */
1569 void
1570 pool_cache_init(struct pool_cache *pc, struct pool *pp,
1571 int (*ctor)(void *, void *, int),
1572 void (*dtor)(void *, void *),
1573 void *arg)
1574 {
1575
1576 TAILQ_INIT(&pc->pc_grouplist);
1577 simple_lock_init(&pc->pc_slock);
1578
1579 pc->pc_allocfrom = NULL;
1580 pc->pc_freeto = NULL;
1581 pc->pc_pool = pp;
1582
1583 pc->pc_ctor = ctor;
1584 pc->pc_dtor = dtor;
1585 pc->pc_arg = arg;
1586
1587 pc->pc_hits = 0;
1588 pc->pc_misses = 0;
1589
1590 pc->pc_ngroups = 0;
1591
1592 pc->pc_nitems = 0;
1593
1594 simple_lock(&pp->pr_slock);
1595 TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist);
1596 simple_unlock(&pp->pr_slock);
1597 }
1598
1599 /*
1600 * pool_cache_destroy:
1601 *
1602 * Destroy a pool cache.
1603 */
1604 void
1605 pool_cache_destroy(struct pool_cache *pc)
1606 {
1607 struct pool *pp = pc->pc_pool;
1608
1609 /* First, invalidate the entire cache. */
1610 pool_cache_invalidate(pc);
1611
1612 /* ...and remove it from the pool's cache list. */
1613 simple_lock(&pp->pr_slock);
1614 TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist);
1615 simple_unlock(&pp->pr_slock);
1616 }
1617
1618 static __inline void *
1619 pcg_get(struct pool_cache_group *pcg)
1620 {
1621 void *object;
1622 u_int idx;
1623
1624 KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
1625 KASSERT(pcg->pcg_avail != 0);
1626 idx = --pcg->pcg_avail;
1627
1628 KASSERT(pcg->pcg_objects[idx] != NULL);
1629 object = pcg->pcg_objects[idx];
1630 pcg->pcg_objects[idx] = NULL;
1631
1632 return (object);
1633 }
1634
1635 static __inline void
1636 pcg_put(struct pool_cache_group *pcg, void *object)
1637 {
1638 u_int idx;
1639
1640 KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
1641 idx = pcg->pcg_avail++;
1642
1643 KASSERT(pcg->pcg_objects[idx] == NULL);
1644 pcg->pcg_objects[idx] = object;
1645 }
1646
1647 /*
1648 * pool_cache_get:
1649 *
1650 * Get an object from a pool cache.
1651 */
1652 void *
1653 pool_cache_get(struct pool_cache *pc, int flags)
1654 {
1655 struct pool_cache_group *pcg;
1656 void *object;
1657
1658 #ifdef LOCKDEBUG
1659 if (flags & PR_WAITOK)
1660 simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)");
1661 #endif
1662
1663 simple_lock(&pc->pc_slock);
1664
1665 if ((pcg = pc->pc_allocfrom) == NULL) {
1666 TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1667 if (pcg->pcg_avail != 0) {
1668 pc->pc_allocfrom = pcg;
1669 goto have_group;
1670 }
1671 }
1672
1673 /*
1674 * No groups with any available objects. Allocate
1675 * a new object, construct it, and return it to
1676 * the caller. We will allocate a group, if necessary,
1677 * when the object is freed back to the cache.
1678 */
1679 pc->pc_misses++;
1680 simple_unlock(&pc->pc_slock);
1681 object = pool_get(pc->pc_pool, flags);
1682 if (object != NULL && pc->pc_ctor != NULL) {
1683 if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
1684 pool_put(pc->pc_pool, object);
1685 return (NULL);
1686 }
1687 }
1688 return (object);
1689 }
1690
1691 have_group:
1692 pc->pc_hits++;
1693 pc->pc_nitems--;
1694 object = pcg_get(pcg);
1695
1696 if (pcg->pcg_avail == 0)
1697 pc->pc_allocfrom = NULL;
1698
1699 simple_unlock(&pc->pc_slock);
1700
1701 return (object);
1702 }
1703
1704 /*
1705 * pool_cache_put:
1706 *
1707 * Put an object back to the pool cache.
1708 */
1709 void
1710 pool_cache_put(struct pool_cache *pc, void *object)
1711 {
1712 struct pool_cache_group *pcg;
1713 int s;
1714
1715 simple_lock(&pc->pc_slock);
1716
1717 if ((pcg = pc->pc_freeto) == NULL) {
1718 TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1719 if (pcg->pcg_avail != PCG_NOBJECTS) {
1720 pc->pc_freeto = pcg;
1721 goto have_group;
1722 }
1723 }
1724
1725 /*
1726 * No empty groups to free the object to. Attempt to
1727 * allocate one.
1728 */
1729 simple_unlock(&pc->pc_slock);
1730 s = splvm();
1731 pcg = pool_get(&pcgpool, PR_NOWAIT);
1732 splx(s);
1733 if (pcg != NULL) {
1734 memset(pcg, 0, sizeof(*pcg));
1735 simple_lock(&pc->pc_slock);
1736 pc->pc_ngroups++;
1737 TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list);
1738 if (pc->pc_freeto == NULL)
1739 pc->pc_freeto = pcg;
1740 goto have_group;
1741 }
1742
1743 /*
1744 * Unable to allocate a cache group; destruct the object
1745 * and free it back to the pool.
1746 */
1747 pool_cache_destruct_object(pc, object);
1748 return;
1749 }
1750
1751 have_group:
1752 pc->pc_nitems++;
1753 pcg_put(pcg, object);
1754
1755 if (pcg->pcg_avail == PCG_NOBJECTS)
1756 pc->pc_freeto = NULL;
1757
1758 simple_unlock(&pc->pc_slock);
1759 }
1760
1761 /*
1762 * pool_cache_destruct_object:
1763 *
1764 * Force destruction of an object and its release back into
1765 * the pool.
1766 */
1767 void
1768 pool_cache_destruct_object(struct pool_cache *pc, void *object)
1769 {
1770
1771 if (pc->pc_dtor != NULL)
1772 (*pc->pc_dtor)(pc->pc_arg, object);
1773 pool_put(pc->pc_pool, object);
1774 }
1775
1776 /*
1777 * pool_cache_do_invalidate:
1778 *
1779 * This internal function implements pool_cache_invalidate() and
1780 * pool_cache_reclaim().
1781 */
1782 static void
1783 pool_cache_do_invalidate(struct pool_cache *pc, int free_groups,
1784 void (*putit)(struct pool *, void *))
1785 {
1786 struct pool_cache_group *pcg, *npcg;
1787 void *object;
1788 int s;
1789
1790 for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
1791 pcg = npcg) {
1792 npcg = TAILQ_NEXT(pcg, pcg_list);
1793 while (pcg->pcg_avail != 0) {
1794 pc->pc_nitems--;
1795 object = pcg_get(pcg);
1796 if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg)
1797 pc->pc_allocfrom = NULL;
1798 if (pc->pc_dtor != NULL)
1799 (*pc->pc_dtor)(pc->pc_arg, object);
1800 (*putit)(pc->pc_pool, object);
1801 }
1802 if (free_groups) {
1803 pc->pc_ngroups--;
1804 TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list);
1805 if (pc->pc_freeto == pcg)
1806 pc->pc_freeto = NULL;
1807 s = splvm();
1808 pool_put(&pcgpool, pcg);
1809 splx(s);
1810 }
1811 }
1812 }
1813
1814 /*
1815 * pool_cache_invalidate:
1816 *
1817 * Invalidate a pool cache (destruct and release all of the
1818 * cached objects).
1819 */
1820 void
1821 pool_cache_invalidate(struct pool_cache *pc)
1822 {
1823
1824 simple_lock(&pc->pc_slock);
1825 pool_cache_do_invalidate(pc, 0, pool_put);
1826 simple_unlock(&pc->pc_slock);
1827 }
1828
1829 /*
1830 * pool_cache_reclaim:
1831 *
1832 * Reclaim a pool cache for pool_reclaim().
1833 */
1834 static void
1835 pool_cache_reclaim(struct pool_cache *pc)
1836 {
1837
1838 simple_lock(&pc->pc_slock);
1839 pool_cache_do_invalidate(pc, 1, pool_do_put);
1840 simple_unlock(&pc->pc_slock);
1841 }
1842
1843 /*
1844 * Pool backend allocators.
1845 *
1846 * Each pool has a backend allocator that handles allocation, deallocation,
1847 * and any additional draining that might be needed.
1848 *
1849 * We provide two standard allocators:
1850 *
1851 * pool_allocator_kmem - the default when no allocator is specified
1852 *
1853 * pool_allocator_nointr - used for pools that will not be accessed
1854 * in interrupt context.
1855 */
1856 void *pool_page_alloc(struct pool *, int);
1857 void pool_page_free(struct pool *, void *);
1858
1859 struct pool_allocator pool_allocator_kmem = {
1860 pool_page_alloc, pool_page_free, 0,
1861 };
1862
1863 void *pool_page_alloc_nointr(struct pool *, int);
1864 void pool_page_free_nointr(struct pool *, void *);
1865
1866 struct pool_allocator pool_allocator_nointr = {
1867 pool_page_alloc_nointr, pool_page_free_nointr, 0,
1868 };
1869
1870 #ifdef POOL_SUBPAGE
1871 void *pool_subpage_alloc(struct pool *, int);
1872 void pool_subpage_free(struct pool *, void *);
1873
1874 struct pool_allocator pool_allocator_kmem_subpage = {
1875 pool_subpage_alloc, pool_subpage_free, 0,
1876 };
1877 #endif /* POOL_SUBPAGE */
1878
1879 /*
1880 * We have at least three different resources for the same allocation and
1881 * each resource can be depleted. First, we have the ready elements in the
1882 * pool. Then we have the resource (typically a vm_map) for this allocator.
1883 * Finally, we have physical memory. Waiting for any of these can be
1884 * unnecessary when any other is freed, but the kernel doesn't support
1885 * sleeping on multiple wait channels, so we have to employ another strategy.
1886 *
1887 * The caller sleeps on the pool (so that it can be awakened when an item
1888 * is returned to the pool), but we set PA_WANT on the allocator. When a
1889 * page is returned to the allocator and PA_WANT is set, pool_allocator_free
1890 * will wake up all sleeping pools belonging to this allocator.
1891 *
1892 * XXX Thundering herd.
1893 */
1894 void *
1895 pool_allocator_alloc(struct pool *org, int flags)
1896 {
1897 struct pool_allocator *pa = org->pr_alloc;
1898 struct pool *pp, *start;
1899 int s, freed;
1900 void *res;
1901
1902 do {
1903 if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
1904 return (res);
1905 if ((flags & PR_WAITOK) == 0)
1906 break;
1907
1908 /*
1909 * Drain all pools, except "org", that use this
1910 * allocator. We do this to reclaim VA space.
1911 * pa_alloc is responsible for waiting for
1912 * physical memory.
1913 *
1914 * XXX We risk looping forever if start if someone
1915 * calls pool_destroy on "start". But there is no
1916 * other way to have potentially sleeping pool_reclaim,
1917 * non-sleeping locks on pool_allocator, and some
1918 * stirring of drained pools in the allocator.
1919 */
1920 freed = 0;
1921
1922 s = splvm();
1923 simple_lock(&pa->pa_slock);
1924 pp = start = TAILQ_FIRST(&pa->pa_list);
1925 do {
1926 TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list);
1927 TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list);
1928 if (pp == org)
1929 continue;
1930 simple_unlock(&pa->pa_list);
1931 freed = pool_reclaim(pp);
1932 simple_lock(&pa->pa_list);
1933 } while ((pp = TAILQ_FIRST(&pa->pa_list)) != start &&
1934 freed == 0);
1935
1936 if (freed == 0) {
1937 /*
1938 * We set PA_WANT here, the caller will most likely
1939 * sleep waiting for pages (if not, this won't hurt
1940 * that much), and there is no way to set this in
1941 * the caller without violating locking order.
1942 */
1943 pa->pa_flags |= PA_WANT;
1944 }
1945 simple_unlock(&pa->pa_slock);
1946 splx(s);
1947 } while (freed);
1948 return (NULL);
1949 }
1950
1951 void
1952 pool_allocator_free(struct pool *pp, void *v)
1953 {
1954 struct pool_allocator *pa = pp->pr_alloc;
1955 int s;
1956
1957 (*pa->pa_free)(pp, v);
1958
1959 s = splvm();
1960 simple_lock(&pa->pa_slock);
1961 if ((pa->pa_flags & PA_WANT) == 0) {
1962 simple_unlock(&pa->pa_slock);
1963 splx(s);
1964 return;
1965 }
1966
1967 TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
1968 simple_lock(&pp->pr_slock);
1969 if ((pp->pr_flags & PR_WANTED) != 0) {
1970 pp->pr_flags &= ~PR_WANTED;
1971 wakeup(pp);
1972 }
1973 }
1974 pa->pa_flags &= ~PA_WANT;
1975 simple_unlock(&pa->pa_slock);
1976 splx(s);
1977 }
1978
1979 void *
1980 pool_page_alloc(struct pool *pp, int flags)
1981 {
1982 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
1983
1984 return ((void *) uvm_km_alloc_poolpage(waitok));
1985 }
1986
1987 void
1988 pool_page_free(struct pool *pp, void *v)
1989 {
1990
1991 uvm_km_free_poolpage((vaddr_t) v);
1992 }
1993
1994 #ifdef POOL_SUBPAGE
1995 /* Sub-page allocator, for machines with large hardware pages. */
1996 void *
1997 pool_subpage_alloc(struct pool *pp, int flags)
1998 {
1999
2000 return (pool_get(&psppool, flags));
2001 }
2002
2003 void
2004 pool_subpage_free(struct pool *pp, void *v)
2005 {
2006
2007 pool_put(&psppool, v);
2008 }
2009
2010 /* We don't provide a real nointr allocator. Maybe later. */
2011 void *
2012 pool_page_alloc_nointr(struct pool *pp, int flags)
2013 {
2014
2015 return (pool_subpage_alloc(pp, flags));
2016 }
2017
2018 void
2019 pool_page_free_nointr(struct pool *pp, void *v)
2020 {
2021
2022 pool_subpage_free(pp, v);
2023 }
2024 #else
2025 void *
2026 pool_page_alloc_nointr(struct pool *pp, int flags)
2027 {
2028 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2029
2030 return ((void *) uvm_km_alloc_poolpage1(kernel_map,
2031 uvm.kernel_object, waitok));
2032 }
2033
2034 void
2035 pool_page_free_nointr(struct pool *pp, void *v)
2036 {
2037
2038 uvm_km_free_poolpage1(kernel_map, (vaddr_t) v);
2039 }
2040 #endif /* POOL_SUBPAGE */
2041