subr_pool.c revision 1.67 1 /* $NetBSD: subr_pool.c,v 1.67 2002/03/08 20:51:26 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.67 2002/03/08 20:51:26 thorpej Exp $");
42
43 #include "opt_pool.h"
44 #include "opt_poollog.h"
45 #include "opt_lockdebug.h"
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/proc.h>
50 #include <sys/errno.h>
51 #include <sys/kernel.h>
52 #include <sys/malloc.h>
53 #include <sys/lock.h>
54 #include <sys/pool.h>
55 #include <sys/syslog.h>
56
57 #include <uvm/uvm.h>
58
59 /*
60 * Pool resource management utility.
61 *
62 * Memory is allocated in pages which are split into pieces according
63 * to the pool item size. Each page is kept on a list headed by `pr_pagelist'
64 * in the pool structure and the individual pool items are on a linked list
65 * headed by `ph_itemlist' in each page header. The memory for building
66 * the page list is either taken from the allocated pages themselves (for
67 * small pool items) or taken from an internal pool of page headers (`phpool').
68 */
69
70 /* List of all pools */
71 TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
72
73 /* Private pool for page header structures */
74 static struct pool phpool;
75
76 #ifdef POOL_SUBPAGE
77 /* Pool of subpages for use by normal pools. */
78 static struct pool psppool;
79 #endif
80
81 /* # of seconds to retain page after last use */
82 int pool_inactive_time = 10;
83
84 /* Next candidate for drainage (see pool_drain()) */
85 static struct pool *drainpp;
86
87 /* This spin lock protects both pool_head and drainpp. */
88 struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
89
90 struct pool_item_header {
91 /* Page headers */
92 TAILQ_ENTRY(pool_item_header)
93 ph_pagelist; /* pool page list */
94 TAILQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */
95 LIST_ENTRY(pool_item_header)
96 ph_hashlist; /* Off-page page headers */
97 int ph_nmissing; /* # of chunks in use */
98 caddr_t ph_page; /* this page's address */
99 struct timeval ph_time; /* last referenced */
100 };
101 TAILQ_HEAD(pool_pagelist,pool_item_header);
102
103 struct pool_item {
104 #ifdef DIAGNOSTIC
105 int pi_magic;
106 #endif
107 #define PI_MAGIC 0xdeadbeef
108 /* Other entries use only this list entry */
109 TAILQ_ENTRY(pool_item) pi_list;
110 };
111
112 #define PR_HASH_INDEX(pp,addr) \
113 (((u_long)(addr) >> (pp)->pr_alloc->pa_pageshift) & \
114 (PR_HASHTABSIZE - 1))
115
116 #define POOL_NEEDS_CATCHUP(pp) \
117 ((pp)->pr_nitems < (pp)->pr_minitems)
118
119 /*
120 * Pool cache management.
121 *
122 * Pool caches provide a way for constructed objects to be cached by the
123 * pool subsystem. This can lead to performance improvements by avoiding
124 * needless object construction/destruction; it is deferred until absolutely
125 * necessary.
126 *
127 * Caches are grouped into cache groups. Each cache group references
128 * up to 16 constructed objects. When a cache allocates an object
129 * from the pool, it calls the object's constructor and places it into
130 * a cache group. When a cache group frees an object back to the pool,
131 * it first calls the object's destructor. This allows the object to
132 * persist in constructed form while freed to the cache.
133 *
134 * Multiple caches may exist for each pool. This allows a single
135 * object type to have multiple constructed forms. The pool references
136 * each cache, so that when a pool is drained by the pagedaemon, it can
137 * drain each individual cache as well. Each time a cache is drained,
138 * the most idle cache group is freed to the pool in its entirety.
139 *
140 * Pool caches are layed on top of pools. By layering them, we can avoid
141 * the complexity of cache management for pools which would not benefit
142 * from it.
143 */
144
145 /* The cache group pool. */
146 static struct pool pcgpool;
147
148 /* The pool cache group. */
149 #define PCG_NOBJECTS 16
150 struct pool_cache_group {
151 TAILQ_ENTRY(pool_cache_group)
152 pcg_list; /* link in the pool cache's group list */
153 u_int pcg_avail; /* # available objects */
154 /* pointers to the objects */
155 void *pcg_objects[PCG_NOBJECTS];
156 };
157
158 static void pool_cache_reclaim(struct pool_cache *);
159
160 static int pool_catchup(struct pool *);
161 static void pool_prime_page(struct pool *, caddr_t,
162 struct pool_item_header *);
163
164 void *pool_allocator_alloc(struct pool *, int);
165 void pool_allocator_free(struct pool *, void *);
166
167 static void pool_print1(struct pool *, const char *,
168 void (*)(const char *, ...));
169
170 /*
171 * Pool log entry. An array of these is allocated in pool_init().
172 */
173 struct pool_log {
174 const char *pl_file;
175 long pl_line;
176 int pl_action;
177 #define PRLOG_GET 1
178 #define PRLOG_PUT 2
179 void *pl_addr;
180 };
181
182 /* Number of entries in pool log buffers */
183 #ifndef POOL_LOGSIZE
184 #define POOL_LOGSIZE 10
185 #endif
186
187 int pool_logsize = POOL_LOGSIZE;
188
189 #ifdef POOL_DIAGNOSTIC
190 static __inline void
191 pr_log(struct pool *pp, void *v, int action, const char *file, long line)
192 {
193 int n = pp->pr_curlogentry;
194 struct pool_log *pl;
195
196 if ((pp->pr_roflags & PR_LOGGING) == 0)
197 return;
198
199 /*
200 * Fill in the current entry. Wrap around and overwrite
201 * the oldest entry if necessary.
202 */
203 pl = &pp->pr_log[n];
204 pl->pl_file = file;
205 pl->pl_line = line;
206 pl->pl_action = action;
207 pl->pl_addr = v;
208 if (++n >= pp->pr_logsize)
209 n = 0;
210 pp->pr_curlogentry = n;
211 }
212
213 static void
214 pr_printlog(struct pool *pp, struct pool_item *pi,
215 void (*pr)(const char *, ...))
216 {
217 int i = pp->pr_logsize;
218 int n = pp->pr_curlogentry;
219
220 if ((pp->pr_roflags & PR_LOGGING) == 0)
221 return;
222
223 /*
224 * Print all entries in this pool's log.
225 */
226 while (i-- > 0) {
227 struct pool_log *pl = &pp->pr_log[n];
228 if (pl->pl_action != 0) {
229 if (pi == NULL || pi == pl->pl_addr) {
230 (*pr)("\tlog entry %d:\n", i);
231 (*pr)("\t\taction = %s, addr = %p\n",
232 pl->pl_action == PRLOG_GET ? "get" : "put",
233 pl->pl_addr);
234 (*pr)("\t\tfile: %s at line %lu\n",
235 pl->pl_file, pl->pl_line);
236 }
237 }
238 if (++n >= pp->pr_logsize)
239 n = 0;
240 }
241 }
242
243 static __inline void
244 pr_enter(struct pool *pp, const char *file, long line)
245 {
246
247 if (__predict_false(pp->pr_entered_file != NULL)) {
248 printf("pool %s: reentrancy at file %s line %ld\n",
249 pp->pr_wchan, file, line);
250 printf(" previous entry at file %s line %ld\n",
251 pp->pr_entered_file, pp->pr_entered_line);
252 panic("pr_enter");
253 }
254
255 pp->pr_entered_file = file;
256 pp->pr_entered_line = line;
257 }
258
259 static __inline void
260 pr_leave(struct pool *pp)
261 {
262
263 if (__predict_false(pp->pr_entered_file == NULL)) {
264 printf("pool %s not entered?\n", pp->pr_wchan);
265 panic("pr_leave");
266 }
267
268 pp->pr_entered_file = NULL;
269 pp->pr_entered_line = 0;
270 }
271
272 static __inline void
273 pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
274 {
275
276 if (pp->pr_entered_file != NULL)
277 (*pr)("\n\tcurrently entered from file %s line %ld\n",
278 pp->pr_entered_file, pp->pr_entered_line);
279 }
280 #else
281 #define pr_log(pp, v, action, file, line)
282 #define pr_printlog(pp, pi, pr)
283 #define pr_enter(pp, file, line)
284 #define pr_leave(pp)
285 #define pr_enter_check(pp, pr)
286 #endif /* POOL_DIAGNOSTIC */
287
288 /*
289 * Return the pool page header based on page address.
290 */
291 static __inline struct pool_item_header *
292 pr_find_pagehead(struct pool *pp, caddr_t page)
293 {
294 struct pool_item_header *ph;
295
296 if ((pp->pr_roflags & PR_PHINPAGE) != 0)
297 return ((struct pool_item_header *)(page + pp->pr_phoffset));
298
299 for (ph = LIST_FIRST(&pp->pr_hashtab[PR_HASH_INDEX(pp, page)]);
300 ph != NULL;
301 ph = LIST_NEXT(ph, ph_hashlist)) {
302 if (ph->ph_page == page)
303 return (ph);
304 }
305 return (NULL);
306 }
307
308 /*
309 * Remove a page from the pool.
310 */
311 static __inline void
312 pr_rmpage(struct pool *pp, struct pool_item_header *ph,
313 struct pool_pagelist *pq)
314 {
315 int s;
316
317 /*
318 * If the page was idle, decrement the idle page count.
319 */
320 if (ph->ph_nmissing == 0) {
321 #ifdef DIAGNOSTIC
322 if (pp->pr_nidle == 0)
323 panic("pr_rmpage: nidle inconsistent");
324 if (pp->pr_nitems < pp->pr_itemsperpage)
325 panic("pr_rmpage: nitems inconsistent");
326 #endif
327 pp->pr_nidle--;
328 }
329
330 pp->pr_nitems -= pp->pr_itemsperpage;
331
332 /*
333 * Unlink a page from the pool and release it (or queue it for release).
334 */
335 TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
336 if (pq) {
337 TAILQ_INSERT_HEAD(pq, ph, ph_pagelist);
338 } else {
339 pool_allocator_free(pp, ph->ph_page);
340 if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
341 LIST_REMOVE(ph, ph_hashlist);
342 s = splhigh();
343 pool_put(&phpool, ph);
344 splx(s);
345 }
346 }
347 pp->pr_npages--;
348 pp->pr_npagefree++;
349
350 if (pp->pr_curpage == ph) {
351 /*
352 * Find a new non-empty page header, if any.
353 * Start search from the page head, to increase the
354 * chance for "high water" pages to be freed.
355 */
356 TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
357 if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
358 break;
359
360 pp->pr_curpage = ph;
361 }
362 }
363
364 /*
365 * Initialize the given pool resource structure.
366 *
367 * We export this routine to allow other kernel parts to declare
368 * static pools that must be initialized before malloc() is available.
369 */
370 void
371 pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
372 const char *wchan, struct pool_allocator *palloc)
373 {
374 int off, slack, i;
375
376 #ifdef POOL_DIAGNOSTIC
377 /*
378 * Always log if POOL_DIAGNOSTIC is defined.
379 */
380 if (pool_logsize != 0)
381 flags |= PR_LOGGING;
382 #endif
383
384 #ifdef POOL_SUBPAGE
385 /*
386 * XXX We don't provide a real `nointr' back-end
387 * yet; all sub-pages come from a kmem back-end.
388 * maybe some day...
389 */
390 if (palloc == NULL) {
391 extern struct pool_allocator pool_allocator_kmem_subpage;
392 palloc = &pool_allocator_kmem_subpage;
393 }
394 /*
395 * We'll assume any user-specified back-end allocator
396 * will deal with sub-pages, or simply don't care.
397 */
398 #else
399 if (palloc == NULL)
400 palloc = &pool_allocator_kmem;
401 #endif /* POOL_SUBPAGE */
402 if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
403 if (palloc->pa_pagesz == 0) {
404 #ifdef POOL_SUBPAGE
405 if (palloc == &pool_allocator_kmem)
406 palloc->pa_pagesz = PAGE_SIZE;
407 else
408 palloc->pa_pagesz = POOL_SUBPAGE;
409 #else
410 palloc->pa_pagesz = PAGE_SIZE;
411 #endif /* POOL_SUBPAGE */
412 }
413
414 TAILQ_INIT(&palloc->pa_list);
415
416 simple_lock_init(&palloc->pa_slock);
417 palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
418 palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
419 palloc->pa_flags |= PA_INITIALIZED;
420 }
421
422 if (align == 0)
423 align = ALIGN(1);
424
425 if (size < sizeof(struct pool_item))
426 size = sizeof(struct pool_item);
427
428 size = ALIGN(size);
429 #ifdef DIAGNOSTIC
430 if (size > palloc->pa_pagesz)
431 panic("pool_init: pool item size (%lu) too large",
432 (u_long)size);
433 #endif
434
435 /*
436 * Initialize the pool structure.
437 */
438 TAILQ_INIT(&pp->pr_pagelist);
439 TAILQ_INIT(&pp->pr_cachelist);
440 pp->pr_curpage = NULL;
441 pp->pr_npages = 0;
442 pp->pr_minitems = 0;
443 pp->pr_minpages = 0;
444 pp->pr_maxpages = UINT_MAX;
445 pp->pr_roflags = flags;
446 pp->pr_flags = 0;
447 pp->pr_size = size;
448 pp->pr_align = align;
449 pp->pr_wchan = wchan;
450 pp->pr_alloc = palloc;
451 pp->pr_nitems = 0;
452 pp->pr_nout = 0;
453 pp->pr_hardlimit = UINT_MAX;
454 pp->pr_hardlimit_warning = NULL;
455 pp->pr_hardlimit_ratecap.tv_sec = 0;
456 pp->pr_hardlimit_ratecap.tv_usec = 0;
457 pp->pr_hardlimit_warning_last.tv_sec = 0;
458 pp->pr_hardlimit_warning_last.tv_usec = 0;
459
460 /*
461 * Decide whether to put the page header off page to avoid
462 * wasting too large a part of the page. Off-page page headers
463 * go on a hash table, so we can match a returned item
464 * with its header based on the page address.
465 * We use 1/16 of the page size as the threshold (XXX: tune)
466 */
467 if (pp->pr_size < palloc->pa_pagesz/16) {
468 /* Use the end of the page for the page header */
469 pp->pr_roflags |= PR_PHINPAGE;
470 pp->pr_phoffset = off = palloc->pa_pagesz -
471 ALIGN(sizeof(struct pool_item_header));
472 } else {
473 /* The page header will be taken from our page header pool */
474 pp->pr_phoffset = 0;
475 off = palloc->pa_pagesz;
476 for (i = 0; i < PR_HASHTABSIZE; i++) {
477 LIST_INIT(&pp->pr_hashtab[i]);
478 }
479 }
480
481 /*
482 * Alignment is to take place at `ioff' within the item. This means
483 * we must reserve up to `align - 1' bytes on the page to allow
484 * appropriate positioning of each item.
485 *
486 * Silently enforce `0 <= ioff < align'.
487 */
488 pp->pr_itemoffset = ioff = ioff % align;
489 pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
490 KASSERT(pp->pr_itemsperpage != 0);
491
492 /*
493 * Use the slack between the chunks and the page header
494 * for "cache coloring".
495 */
496 slack = off - pp->pr_itemsperpage * pp->pr_size;
497 pp->pr_maxcolor = (slack / align) * align;
498 pp->pr_curcolor = 0;
499
500 pp->pr_nget = 0;
501 pp->pr_nfail = 0;
502 pp->pr_nput = 0;
503 pp->pr_npagealloc = 0;
504 pp->pr_npagefree = 0;
505 pp->pr_hiwat = 0;
506 pp->pr_nidle = 0;
507
508 #ifdef POOL_DIAGNOSTIC
509 if (flags & PR_LOGGING) {
510 if (kmem_map == NULL ||
511 (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
512 M_TEMP, M_NOWAIT)) == NULL)
513 pp->pr_roflags &= ~PR_LOGGING;
514 pp->pr_curlogentry = 0;
515 pp->pr_logsize = pool_logsize;
516 }
517 #endif
518
519 pp->pr_entered_file = NULL;
520 pp->pr_entered_line = 0;
521
522 simple_lock_init(&pp->pr_slock);
523
524 /*
525 * Initialize private page header pool and cache magazine pool if we
526 * haven't done so yet.
527 * XXX LOCKING.
528 */
529 if (phpool.pr_size == 0) {
530 #ifdef POOL_SUBPAGE
531 pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, 0,
532 "phpool", &pool_allocator_kmem);
533 pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
534 PR_RECURSIVE, "psppool", &pool_allocator_kmem);
535 #else
536 pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,
537 0, "phpool", NULL);
538 #endif
539 pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
540 0, "pcgpool", NULL);
541 }
542
543 /* Insert into the list of all pools. */
544 simple_lock(&pool_head_slock);
545 TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
546 simple_unlock(&pool_head_slock);
547
548 /* Insert this into the list of pools using this allocator. */
549 simple_lock(&palloc->pa_slock);
550 TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
551 simple_unlock(&palloc->pa_slock);
552 }
553
554 /*
555 * De-commision a pool resource.
556 */
557 void
558 pool_destroy(struct pool *pp)
559 {
560 struct pool_item_header *ph;
561 struct pool_cache *pc;
562
563 /* Locking order: pool_allocator -> pool */
564 simple_lock(&pp->pr_alloc->pa_slock);
565 TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
566 simple_unlock(&pp->pr_alloc->pa_slock);
567
568 /* Destroy all caches for this pool. */
569 while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL)
570 pool_cache_destroy(pc);
571
572 #ifdef DIAGNOSTIC
573 if (pp->pr_nout != 0) {
574 pr_printlog(pp, NULL, printf);
575 panic("pool_destroy: pool busy: still out: %u\n",
576 pp->pr_nout);
577 }
578 #endif
579
580 /* Remove all pages */
581 if ((pp->pr_roflags & PR_STATIC) == 0)
582 while ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL)
583 pr_rmpage(pp, ph, NULL);
584
585 /* Remove from global pool list */
586 simple_lock(&pool_head_slock);
587 TAILQ_REMOVE(&pool_head, pp, pr_poollist);
588 if (drainpp == pp) {
589 drainpp = NULL;
590 }
591 simple_unlock(&pool_head_slock);
592
593 #ifdef POOL_DIAGNOSTIC
594 if ((pp->pr_roflags & PR_LOGGING) != 0)
595 free(pp->pr_log, M_TEMP);
596 #endif
597 }
598
599 static __inline struct pool_item_header *
600 pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags)
601 {
602 struct pool_item_header *ph;
603 int s;
604
605 LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0);
606
607 if ((pp->pr_roflags & PR_PHINPAGE) != 0)
608 ph = (struct pool_item_header *) (storage + pp->pr_phoffset);
609 else {
610 s = splhigh();
611 ph = pool_get(&phpool, flags);
612 splx(s);
613 }
614
615 return (ph);
616 }
617
618 /*
619 * Grab an item from the pool; must be called at appropriate spl level
620 */
621 void *
622 #ifdef POOL_DIAGNOSTIC
623 _pool_get(struct pool *pp, int flags, const char *file, long line)
624 #else
625 pool_get(struct pool *pp, int flags)
626 #endif
627 {
628 struct pool_item *pi;
629 struct pool_item_header *ph;
630 void *v;
631
632 #ifdef DIAGNOSTIC
633 if (__predict_false((pp->pr_roflags & PR_STATIC) &&
634 (flags & PR_MALLOCOK))) {
635 pr_printlog(pp, NULL, printf);
636 panic("pool_get: static");
637 }
638
639 if (__predict_false(curproc == NULL && doing_shutdown == 0 &&
640 (flags & PR_WAITOK) != 0))
641 panic("pool_get: must have NOWAIT");
642
643 #ifdef LOCKDEBUG
644 if (flags & PR_WAITOK)
645 simple_lock_only_held(NULL, "pool_get(PR_WAITOK)");
646 #endif
647 #endif /* DIAGNOSTIC */
648
649 simple_lock(&pp->pr_slock);
650 pr_enter(pp, file, line);
651
652 startover:
653 /*
654 * Check to see if we've reached the hard limit. If we have,
655 * and we can wait, then wait until an item has been returned to
656 * the pool.
657 */
658 #ifdef DIAGNOSTIC
659 if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
660 pr_leave(pp);
661 simple_unlock(&pp->pr_slock);
662 panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
663 }
664 #endif
665 if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
666 if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
667 /*
668 * XXX: A warning isn't logged in this case. Should
669 * it be?
670 */
671 pp->pr_flags |= PR_WANTED;
672 pr_leave(pp);
673 ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
674 pr_enter(pp, file, line);
675 goto startover;
676 }
677
678 /*
679 * Log a message that the hard limit has been hit.
680 */
681 if (pp->pr_hardlimit_warning != NULL &&
682 ratecheck(&pp->pr_hardlimit_warning_last,
683 &pp->pr_hardlimit_ratecap))
684 log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
685
686 pp->pr_nfail++;
687
688 pr_leave(pp);
689 simple_unlock(&pp->pr_slock);
690 return (NULL);
691 }
692
693 /*
694 * The convention we use is that if `curpage' is not NULL, then
695 * it points at a non-empty bucket. In particular, `curpage'
696 * never points at a page header which has PR_PHINPAGE set and
697 * has no items in its bucket.
698 */
699 if ((ph = pp->pr_curpage) == NULL) {
700 #ifdef DIAGNOSTIC
701 if (pp->pr_nitems != 0) {
702 simple_unlock(&pp->pr_slock);
703 printf("pool_get: %s: curpage NULL, nitems %u\n",
704 pp->pr_wchan, pp->pr_nitems);
705 panic("pool_get: nitems inconsistent\n");
706 }
707 #endif
708
709 /*
710 * Call the back-end page allocator for more memory.
711 * Release the pool lock, as the back-end page allocator
712 * may block.
713 */
714 pr_leave(pp);
715 simple_unlock(&pp->pr_slock);
716 v = pool_allocator_alloc(pp, flags);
717 if (__predict_true(v != NULL))
718 ph = pool_alloc_item_header(pp, v, flags);
719 simple_lock(&pp->pr_slock);
720 pr_enter(pp, file, line);
721
722 if (__predict_false(v == NULL || ph == NULL)) {
723 if (v != NULL)
724 pool_allocator_free(pp, v);
725
726 /*
727 * We were unable to allocate a page or item
728 * header, but we released the lock during
729 * allocation, so perhaps items were freed
730 * back to the pool. Check for this case.
731 */
732 if (pp->pr_curpage != NULL)
733 goto startover;
734
735 if ((flags & PR_WAITOK) == 0) {
736 pp->pr_nfail++;
737 pr_leave(pp);
738 simple_unlock(&pp->pr_slock);
739 return (NULL);
740 }
741
742 /*
743 * Wait for items to be returned to this pool.
744 *
745 * XXX: maybe we should wake up once a second and
746 * try again?
747 */
748 pp->pr_flags |= PR_WANTED;
749 /* PA_WANTED is already set on the allocator. */
750 pr_leave(pp);
751 ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
752 pr_enter(pp, file, line);
753 goto startover;
754 }
755
756 /* We have more memory; add it to the pool */
757 pool_prime_page(pp, v, ph);
758 pp->pr_npagealloc++;
759
760 /* Start the allocation process over. */
761 goto startover;
762 }
763
764 if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) {
765 pr_leave(pp);
766 simple_unlock(&pp->pr_slock);
767 panic("pool_get: %s: page empty", pp->pr_wchan);
768 }
769 #ifdef DIAGNOSTIC
770 if (__predict_false(pp->pr_nitems == 0)) {
771 pr_leave(pp);
772 simple_unlock(&pp->pr_slock);
773 printf("pool_get: %s: items on itemlist, nitems %u\n",
774 pp->pr_wchan, pp->pr_nitems);
775 panic("pool_get: nitems inconsistent\n");
776 }
777 #endif
778
779 #ifdef POOL_DIAGNOSTIC
780 pr_log(pp, v, PRLOG_GET, file, line);
781 #endif
782
783 #ifdef DIAGNOSTIC
784 if (__predict_false(pi->pi_magic != PI_MAGIC)) {
785 pr_printlog(pp, pi, printf);
786 panic("pool_get(%s): free list modified: magic=%x; page %p;"
787 " item addr %p\n",
788 pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
789 }
790 #endif
791
792 /*
793 * Remove from item list.
794 */
795 TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
796 pp->pr_nitems--;
797 pp->pr_nout++;
798 if (ph->ph_nmissing == 0) {
799 #ifdef DIAGNOSTIC
800 if (__predict_false(pp->pr_nidle == 0))
801 panic("pool_get: nidle inconsistent");
802 #endif
803 pp->pr_nidle--;
804 }
805 ph->ph_nmissing++;
806 if (TAILQ_FIRST(&ph->ph_itemlist) == NULL) {
807 #ifdef DIAGNOSTIC
808 if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) {
809 pr_leave(pp);
810 simple_unlock(&pp->pr_slock);
811 panic("pool_get: %s: nmissing inconsistent",
812 pp->pr_wchan);
813 }
814 #endif
815 /*
816 * Find a new non-empty page header, if any.
817 * Start search from the page head, to increase
818 * the chance for "high water" pages to be freed.
819 *
820 * Migrate empty pages to the end of the list. This
821 * will speed the update of curpage as pages become
822 * idle. Empty pages intermingled with idle pages
823 * is no big deal. As soon as a page becomes un-empty,
824 * it will move back to the head of the list.
825 */
826 TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
827 TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
828 TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
829 if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
830 break;
831
832 pp->pr_curpage = ph;
833 }
834
835 pp->pr_nget++;
836
837 /*
838 * If we have a low water mark and we are now below that low
839 * water mark, add more items to the pool.
840 */
841 if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
842 /*
843 * XXX: Should we log a warning? Should we set up a timeout
844 * to try again in a second or so? The latter could break
845 * a caller's assumptions about interrupt protection, etc.
846 */
847 }
848
849 pr_leave(pp);
850 simple_unlock(&pp->pr_slock);
851 return (v);
852 }
853
854 /*
855 * Internal version of pool_put(). Pool is already locked/entered.
856 */
857 static void
858 pool_do_put(struct pool *pp, void *v)
859 {
860 struct pool_item *pi = v;
861 struct pool_item_header *ph;
862 caddr_t page;
863 int s;
864
865 LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
866
867 page = (caddr_t)((u_long)v & pp->pr_alloc->pa_pagemask);
868
869 #ifdef DIAGNOSTIC
870 if (__predict_false(pp->pr_nout == 0)) {
871 printf("pool %s: putting with none out\n",
872 pp->pr_wchan);
873 panic("pool_put");
874 }
875 #endif
876
877 if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {
878 pr_printlog(pp, NULL, printf);
879 panic("pool_put: %s: page header missing", pp->pr_wchan);
880 }
881
882 #ifdef LOCKDEBUG
883 /*
884 * Check if we're freeing a locked simple lock.
885 */
886 simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
887 #endif
888
889 /*
890 * Return to item list.
891 */
892 #ifdef DIAGNOSTIC
893 pi->pi_magic = PI_MAGIC;
894 #endif
895 #ifdef DEBUG
896 {
897 int i, *ip = v;
898
899 for (i = 0; i < pp->pr_size / sizeof(int); i++) {
900 *ip++ = PI_MAGIC;
901 }
902 }
903 #endif
904
905 TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
906 ph->ph_nmissing--;
907 pp->pr_nput++;
908 pp->pr_nitems++;
909 pp->pr_nout--;
910
911 /* Cancel "pool empty" condition if it exists */
912 if (pp->pr_curpage == NULL)
913 pp->pr_curpage = ph;
914
915 if (pp->pr_flags & PR_WANTED) {
916 pp->pr_flags &= ~PR_WANTED;
917 if (ph->ph_nmissing == 0)
918 pp->pr_nidle++;
919 wakeup((caddr_t)pp);
920 return;
921 }
922
923 /*
924 * If this page is now complete, do one of two things:
925 *
926 * (1) If we have more pages than the page high water
927 * mark, free the page back to the system.
928 *
929 * (2) Move it to the end of the page list, so that
930 * we minimize our chances of fragmenting the
931 * pool. Idle pages migrate to the end (along with
932 * completely empty pages, so that we find un-empty
933 * pages more quickly when we update curpage) of the
934 * list so they can be more easily swept up by
935 * the pagedaemon when pages are scarce.
936 */
937 if (ph->ph_nmissing == 0) {
938 pp->pr_nidle++;
939 if (pp->pr_npages > pp->pr_maxpages) {
940 pr_rmpage(pp, ph, NULL);
941 } else {
942 TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
943 TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
944
945 /*
946 * Update the timestamp on the page. A page must
947 * be idle for some period of time before it can
948 * be reclaimed by the pagedaemon. This minimizes
949 * ping-pong'ing for memory.
950 */
951 s = splclock();
952 ph->ph_time = mono_time;
953 splx(s);
954
955 /*
956 * Update the current page pointer. Just look for
957 * the first page with any free items.
958 *
959 * XXX: Maybe we want an option to look for the
960 * page with the fewest available items, to minimize
961 * fragmentation?
962 */
963 TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
964 if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
965 break;
966
967 pp->pr_curpage = ph;
968 }
969 }
970 /*
971 * If the page has just become un-empty, move it to the head of
972 * the list, and make it the current page. The next allocation
973 * will get the item from this page, instead of further fragmenting
974 * the pool.
975 */
976 else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
977 TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
978 TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
979 pp->pr_curpage = ph;
980 }
981 }
982
983 /*
984 * Return resource to the pool; must be called at appropriate spl level
985 */
986 #ifdef POOL_DIAGNOSTIC
987 void
988 _pool_put(struct pool *pp, void *v, const char *file, long line)
989 {
990
991 simple_lock(&pp->pr_slock);
992 pr_enter(pp, file, line);
993
994 pr_log(pp, v, PRLOG_PUT, file, line);
995
996 pool_do_put(pp, v);
997
998 pr_leave(pp);
999 simple_unlock(&pp->pr_slock);
1000 }
1001 #undef pool_put
1002 #endif /* POOL_DIAGNOSTIC */
1003
1004 void
1005 pool_put(struct pool *pp, void *v)
1006 {
1007
1008 simple_lock(&pp->pr_slock);
1009
1010 pool_do_put(pp, v);
1011
1012 simple_unlock(&pp->pr_slock);
1013 }
1014
1015 #ifdef POOL_DIAGNOSTIC
1016 #define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__)
1017 #endif
1018
1019 /*
1020 * Add N items to the pool.
1021 */
1022 int
1023 pool_prime(struct pool *pp, int n)
1024 {
1025 struct pool_item_header *ph;
1026 caddr_t cp;
1027 int newpages, error = 0;
1028
1029 simple_lock(&pp->pr_slock);
1030
1031 newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1032
1033 while (newpages-- > 0) {
1034 simple_unlock(&pp->pr_slock);
1035 cp = pool_allocator_alloc(pp, PR_NOWAIT);
1036 if (__predict_true(cp != NULL))
1037 ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1038 simple_lock(&pp->pr_slock);
1039
1040 if (__predict_false(cp == NULL || ph == NULL)) {
1041 error = ENOMEM;
1042 if (cp != NULL)
1043 pool_allocator_free(pp, cp);
1044 break;
1045 }
1046
1047 pool_prime_page(pp, cp, ph);
1048 pp->pr_npagealloc++;
1049 pp->pr_minpages++;
1050 }
1051
1052 if (pp->pr_minpages >= pp->pr_maxpages)
1053 pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1054
1055 simple_unlock(&pp->pr_slock);
1056 return (0);
1057 }
1058
1059 /*
1060 * Add a page worth of items to the pool.
1061 *
1062 * Note, we must be called with the pool descriptor LOCKED.
1063 */
1064 static void
1065 pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
1066 {
1067 struct pool_item *pi;
1068 caddr_t cp = storage;
1069 unsigned int align = pp->pr_align;
1070 unsigned int ioff = pp->pr_itemoffset;
1071 int n;
1072
1073 #ifdef DIAGNOSTIC
1074 if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
1075 panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1076 #endif
1077
1078 if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1079 LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)],
1080 ph, ph_hashlist);
1081
1082 /*
1083 * Insert page header.
1084 */
1085 TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
1086 TAILQ_INIT(&ph->ph_itemlist);
1087 ph->ph_page = storage;
1088 ph->ph_nmissing = 0;
1089 memset(&ph->ph_time, 0, sizeof(ph->ph_time));
1090
1091 pp->pr_nidle++;
1092
1093 /*
1094 * Color this page.
1095 */
1096 cp = (caddr_t)(cp + pp->pr_curcolor);
1097 if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1098 pp->pr_curcolor = 0;
1099
1100 /*
1101 * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1102 */
1103 if (ioff != 0)
1104 cp = (caddr_t)(cp + (align - ioff));
1105
1106 /*
1107 * Insert remaining chunks on the bucket list.
1108 */
1109 n = pp->pr_itemsperpage;
1110 pp->pr_nitems += n;
1111
1112 while (n--) {
1113 pi = (struct pool_item *)cp;
1114
1115 /* Insert on page list */
1116 TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
1117 #ifdef DIAGNOSTIC
1118 pi->pi_magic = PI_MAGIC;
1119 #endif
1120 cp = (caddr_t)(cp + pp->pr_size);
1121 }
1122
1123 /*
1124 * If the pool was depleted, point at the new page.
1125 */
1126 if (pp->pr_curpage == NULL)
1127 pp->pr_curpage = ph;
1128
1129 if (++pp->pr_npages > pp->pr_hiwat)
1130 pp->pr_hiwat = pp->pr_npages;
1131 }
1132
1133 /*
1134 * Used by pool_get() when nitems drops below the low water mark. This
1135 * is used to catch up nitmes with the low water mark.
1136 *
1137 * Note 1, we never wait for memory here, we let the caller decide what to do.
1138 *
1139 * Note 2, this doesn't work with static pools.
1140 *
1141 * Note 3, we must be called with the pool already locked, and we return
1142 * with it locked.
1143 */
1144 static int
1145 pool_catchup(struct pool *pp)
1146 {
1147 struct pool_item_header *ph;
1148 caddr_t cp;
1149 int error = 0;
1150
1151 if (pp->pr_roflags & PR_STATIC) {
1152 /*
1153 * We dropped below the low water mark, and this is not a
1154 * good thing. Log a warning.
1155 *
1156 * XXX: rate-limit this?
1157 */
1158 printf("WARNING: static pool `%s' dropped below low water "
1159 "mark\n", pp->pr_wchan);
1160 return (0);
1161 }
1162
1163 while (POOL_NEEDS_CATCHUP(pp)) {
1164 /*
1165 * Call the page back-end allocator for more memory.
1166 *
1167 * XXX: We never wait, so should we bother unlocking
1168 * the pool descriptor?
1169 */
1170 simple_unlock(&pp->pr_slock);
1171 cp = pool_allocator_alloc(pp, PR_NOWAIT);
1172 if (__predict_true(cp != NULL))
1173 ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1174 simple_lock(&pp->pr_slock);
1175 if (__predict_false(cp == NULL || ph == NULL)) {
1176 if (cp != NULL)
1177 pool_allocator_free(pp, cp);
1178 error = ENOMEM;
1179 break;
1180 }
1181 pool_prime_page(pp, cp, ph);
1182 pp->pr_npagealloc++;
1183 }
1184
1185 return (error);
1186 }
1187
1188 void
1189 pool_setlowat(struct pool *pp, int n)
1190 {
1191 int error;
1192
1193 simple_lock(&pp->pr_slock);
1194
1195 pp->pr_minitems = n;
1196 pp->pr_minpages = (n == 0)
1197 ? 0
1198 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1199
1200 /* Make sure we're caught up with the newly-set low water mark. */
1201 if (POOL_NEEDS_CATCHUP(pp) && (error = pool_catchup(pp) != 0)) {
1202 /*
1203 * XXX: Should we log a warning? Should we set up a timeout
1204 * to try again in a second or so? The latter could break
1205 * a caller's assumptions about interrupt protection, etc.
1206 */
1207 }
1208
1209 simple_unlock(&pp->pr_slock);
1210 }
1211
1212 void
1213 pool_sethiwat(struct pool *pp, int n)
1214 {
1215
1216 simple_lock(&pp->pr_slock);
1217
1218 pp->pr_maxpages = (n == 0)
1219 ? 0
1220 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1221
1222 simple_unlock(&pp->pr_slock);
1223 }
1224
1225 void
1226 pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1227 {
1228
1229 simple_lock(&pp->pr_slock);
1230
1231 pp->pr_hardlimit = n;
1232 pp->pr_hardlimit_warning = warnmess;
1233 pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1234 pp->pr_hardlimit_warning_last.tv_sec = 0;
1235 pp->pr_hardlimit_warning_last.tv_usec = 0;
1236
1237 /*
1238 * In-line version of pool_sethiwat(), because we don't want to
1239 * release the lock.
1240 */
1241 pp->pr_maxpages = (n == 0)
1242 ? 0
1243 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1244
1245 simple_unlock(&pp->pr_slock);
1246 }
1247
1248 /*
1249 * Release all complete pages that have not been used recently.
1250 */
1251 int
1252 #ifdef POOL_DIAGNOSTIC
1253 _pool_reclaim(struct pool *pp, const char *file, long line)
1254 #else
1255 pool_reclaim(struct pool *pp)
1256 #endif
1257 {
1258 struct pool_item_header *ph, *phnext;
1259 struct pool_cache *pc;
1260 struct timeval curtime;
1261 struct pool_pagelist pq;
1262 int s;
1263
1264 if (pp->pr_roflags & PR_STATIC)
1265 return (0);
1266
1267 if (simple_lock_try(&pp->pr_slock) == 0)
1268 return (0);
1269 pr_enter(pp, file, line);
1270 TAILQ_INIT(&pq);
1271
1272 /*
1273 * Reclaim items from the pool's caches.
1274 */
1275 TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist)
1276 pool_cache_reclaim(pc);
1277
1278 s = splclock();
1279 curtime = mono_time;
1280 splx(s);
1281
1282 for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; ph = phnext) {
1283 phnext = TAILQ_NEXT(ph, ph_pagelist);
1284
1285 /* Check our minimum page claim */
1286 if (pp->pr_npages <= pp->pr_minpages)
1287 break;
1288
1289 if (ph->ph_nmissing == 0) {
1290 struct timeval diff;
1291 timersub(&curtime, &ph->ph_time, &diff);
1292 if (diff.tv_sec < pool_inactive_time)
1293 continue;
1294
1295 /*
1296 * If freeing this page would put us below
1297 * the low water mark, stop now.
1298 */
1299 if ((pp->pr_nitems - pp->pr_itemsperpage) <
1300 pp->pr_minitems)
1301 break;
1302
1303 pr_rmpage(pp, ph, &pq);
1304 }
1305 }
1306
1307 pr_leave(pp);
1308 simple_unlock(&pp->pr_slock);
1309 if (TAILQ_EMPTY(&pq))
1310 return (0);
1311
1312 while ((ph = TAILQ_FIRST(&pq)) != NULL) {
1313 TAILQ_REMOVE(&pq, ph, ph_pagelist);
1314 pool_allocator_free(pp, ph->ph_page);
1315 if (pp->pr_roflags & PR_PHINPAGE) {
1316 continue;
1317 }
1318 LIST_REMOVE(ph, ph_hashlist);
1319 s = splhigh();
1320 pool_put(&phpool, ph);
1321 splx(s);
1322 }
1323
1324 return (1);
1325 }
1326
1327 /*
1328 * Drain pools, one at a time.
1329 *
1330 * Note, we must never be called from an interrupt context.
1331 */
1332 void
1333 pool_drain(void *arg)
1334 {
1335 struct pool *pp;
1336 int s;
1337
1338 pp = NULL;
1339 s = splvm();
1340 simple_lock(&pool_head_slock);
1341 if (drainpp == NULL) {
1342 drainpp = TAILQ_FIRST(&pool_head);
1343 }
1344 if (drainpp) {
1345 pp = drainpp;
1346 drainpp = TAILQ_NEXT(pp, pr_poollist);
1347 }
1348 simple_unlock(&pool_head_slock);
1349 pool_reclaim(pp);
1350 splx(s);
1351 }
1352
1353 /*
1354 * Diagnostic helpers.
1355 */
1356 void
1357 pool_print(struct pool *pp, const char *modif)
1358 {
1359 int s;
1360
1361 s = splvm();
1362 if (simple_lock_try(&pp->pr_slock) == 0) {
1363 printf("pool %s is locked; try again later\n",
1364 pp->pr_wchan);
1365 splx(s);
1366 return;
1367 }
1368 pool_print1(pp, modif, printf);
1369 simple_unlock(&pp->pr_slock);
1370 splx(s);
1371 }
1372
1373 void
1374 pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1375 {
1376 int didlock = 0;
1377
1378 if (pp == NULL) {
1379 (*pr)("Must specify a pool to print.\n");
1380 return;
1381 }
1382
1383 /*
1384 * Called from DDB; interrupts should be blocked, and all
1385 * other processors should be paused. We can skip locking
1386 * the pool in this case.
1387 *
1388 * We do a simple_lock_try() just to print the lock
1389 * status, however.
1390 */
1391
1392 if (simple_lock_try(&pp->pr_slock) == 0)
1393 (*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
1394 else
1395 didlock = 1;
1396
1397 pool_print1(pp, modif, pr);
1398
1399 if (didlock)
1400 simple_unlock(&pp->pr_slock);
1401 }
1402
1403 static void
1404 pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1405 {
1406 struct pool_item_header *ph;
1407 struct pool_cache *pc;
1408 struct pool_cache_group *pcg;
1409 #ifdef DIAGNOSTIC
1410 struct pool_item *pi;
1411 #endif
1412 int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1413 char c;
1414
1415 while ((c = *modif++) != '\0') {
1416 if (c == 'l')
1417 print_log = 1;
1418 if (c == 'p')
1419 print_pagelist = 1;
1420 if (c == 'c')
1421 print_cache = 1;
1422 modif++;
1423 }
1424
1425 (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1426 pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1427 pp->pr_roflags);
1428 (*pr)("\talloc %p\n", pp->pr_alloc);
1429 (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1430 pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1431 (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1432 pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1433
1434 (*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
1435 pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1436 (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1437 pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1438
1439 if (print_pagelist == 0)
1440 goto skip_pagelist;
1441
1442 if ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL)
1443 (*pr)("\n\tpage list:\n");
1444 for (; ph != NULL; ph = TAILQ_NEXT(ph, ph_pagelist)) {
1445 (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
1446 ph->ph_page, ph->ph_nmissing,
1447 (u_long)ph->ph_time.tv_sec,
1448 (u_long)ph->ph_time.tv_usec);
1449 #ifdef DIAGNOSTIC
1450 TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1451 if (pi->pi_magic != PI_MAGIC) {
1452 (*pr)("\t\t\titem %p, magic 0x%x\n",
1453 pi, pi->pi_magic);
1454 }
1455 }
1456 #endif
1457 }
1458 if (pp->pr_curpage == NULL)
1459 (*pr)("\tno current page\n");
1460 else
1461 (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1462
1463 skip_pagelist:
1464
1465 if (print_log == 0)
1466 goto skip_log;
1467
1468 (*pr)("\n");
1469 if ((pp->pr_roflags & PR_LOGGING) == 0)
1470 (*pr)("\tno log\n");
1471 else
1472 pr_printlog(pp, NULL, pr);
1473
1474 skip_log:
1475
1476 if (print_cache == 0)
1477 goto skip_cache;
1478
1479 TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) {
1480 (*pr)("\tcache %p: allocfrom %p freeto %p\n", pc,
1481 pc->pc_allocfrom, pc->pc_freeto);
1482 (*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n",
1483 pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
1484 TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1485 (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);
1486 for (i = 0; i < PCG_NOBJECTS; i++)
1487 (*pr)("\t\t\t%p\n", pcg->pcg_objects[i]);
1488 }
1489 }
1490
1491 skip_cache:
1492
1493 pr_enter_check(pp, pr);
1494 }
1495
1496 int
1497 pool_chk(struct pool *pp, const char *label)
1498 {
1499 struct pool_item_header *ph;
1500 int r = 0;
1501
1502 simple_lock(&pp->pr_slock);
1503
1504 TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) {
1505 struct pool_item *pi;
1506 int n;
1507 caddr_t page;
1508
1509 page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask);
1510 if (page != ph->ph_page &&
1511 (pp->pr_roflags & PR_PHINPAGE) != 0) {
1512 if (label != NULL)
1513 printf("%s: ", label);
1514 printf("pool(%p:%s): page inconsistency: page %p;"
1515 " at page head addr %p (p %p)\n", pp,
1516 pp->pr_wchan, ph->ph_page,
1517 ph, page);
1518 r++;
1519 goto out;
1520 }
1521
1522 for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
1523 pi != NULL;
1524 pi = TAILQ_NEXT(pi,pi_list), n++) {
1525
1526 #ifdef DIAGNOSTIC
1527 if (pi->pi_magic != PI_MAGIC) {
1528 if (label != NULL)
1529 printf("%s: ", label);
1530 printf("pool(%s): free list modified: magic=%x;"
1531 " page %p; item ordinal %d;"
1532 " addr %p (p %p)\n",
1533 pp->pr_wchan, pi->pi_magic, ph->ph_page,
1534 n, pi, page);
1535 panic("pool");
1536 }
1537 #endif
1538 page =
1539 (caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask);
1540 if (page == ph->ph_page)
1541 continue;
1542
1543 if (label != NULL)
1544 printf("%s: ", label);
1545 printf("pool(%p:%s): page inconsistency: page %p;"
1546 " item ordinal %d; addr %p (p %p)\n", pp,
1547 pp->pr_wchan, ph->ph_page,
1548 n, pi, page);
1549 r++;
1550 goto out;
1551 }
1552 }
1553 out:
1554 simple_unlock(&pp->pr_slock);
1555 return (r);
1556 }
1557
1558 /*
1559 * pool_cache_init:
1560 *
1561 * Initialize a pool cache.
1562 *
1563 * NOTE: If the pool must be protected from interrupts, we expect
1564 * to be called at the appropriate interrupt priority level.
1565 */
1566 void
1567 pool_cache_init(struct pool_cache *pc, struct pool *pp,
1568 int (*ctor)(void *, void *, int),
1569 void (*dtor)(void *, void *),
1570 void *arg)
1571 {
1572
1573 TAILQ_INIT(&pc->pc_grouplist);
1574 simple_lock_init(&pc->pc_slock);
1575
1576 pc->pc_allocfrom = NULL;
1577 pc->pc_freeto = NULL;
1578 pc->pc_pool = pp;
1579
1580 pc->pc_ctor = ctor;
1581 pc->pc_dtor = dtor;
1582 pc->pc_arg = arg;
1583
1584 pc->pc_hits = 0;
1585 pc->pc_misses = 0;
1586
1587 pc->pc_ngroups = 0;
1588
1589 pc->pc_nitems = 0;
1590
1591 simple_lock(&pp->pr_slock);
1592 TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist);
1593 simple_unlock(&pp->pr_slock);
1594 }
1595
1596 /*
1597 * pool_cache_destroy:
1598 *
1599 * Destroy a pool cache.
1600 */
1601 void
1602 pool_cache_destroy(struct pool_cache *pc)
1603 {
1604 struct pool *pp = pc->pc_pool;
1605
1606 /* First, invalidate the entire cache. */
1607 pool_cache_invalidate(pc);
1608
1609 /* ...and remove it from the pool's cache list. */
1610 simple_lock(&pp->pr_slock);
1611 TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist);
1612 simple_unlock(&pp->pr_slock);
1613 }
1614
1615 static __inline void *
1616 pcg_get(struct pool_cache_group *pcg)
1617 {
1618 void *object;
1619 u_int idx;
1620
1621 KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
1622 KASSERT(pcg->pcg_avail != 0);
1623 idx = --pcg->pcg_avail;
1624
1625 KASSERT(pcg->pcg_objects[idx] != NULL);
1626 object = pcg->pcg_objects[idx];
1627 pcg->pcg_objects[idx] = NULL;
1628
1629 return (object);
1630 }
1631
1632 static __inline void
1633 pcg_put(struct pool_cache_group *pcg, void *object)
1634 {
1635 u_int idx;
1636
1637 KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
1638 idx = pcg->pcg_avail++;
1639
1640 KASSERT(pcg->pcg_objects[idx] == NULL);
1641 pcg->pcg_objects[idx] = object;
1642 }
1643
1644 /*
1645 * pool_cache_get:
1646 *
1647 * Get an object from a pool cache.
1648 */
1649 void *
1650 pool_cache_get(struct pool_cache *pc, int flags)
1651 {
1652 struct pool_cache_group *pcg;
1653 void *object;
1654
1655 #ifdef LOCKDEBUG
1656 if (flags & PR_WAITOK)
1657 simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)");
1658 #endif
1659
1660 simple_lock(&pc->pc_slock);
1661
1662 if ((pcg = pc->pc_allocfrom) == NULL) {
1663 TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1664 if (pcg->pcg_avail != 0) {
1665 pc->pc_allocfrom = pcg;
1666 goto have_group;
1667 }
1668 }
1669
1670 /*
1671 * No groups with any available objects. Allocate
1672 * a new object, construct it, and return it to
1673 * the caller. We will allocate a group, if necessary,
1674 * when the object is freed back to the cache.
1675 */
1676 pc->pc_misses++;
1677 simple_unlock(&pc->pc_slock);
1678 object = pool_get(pc->pc_pool, flags);
1679 if (object != NULL && pc->pc_ctor != NULL) {
1680 if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
1681 pool_put(pc->pc_pool, object);
1682 return (NULL);
1683 }
1684 }
1685 return (object);
1686 }
1687
1688 have_group:
1689 pc->pc_hits++;
1690 pc->pc_nitems--;
1691 object = pcg_get(pcg);
1692
1693 if (pcg->pcg_avail == 0)
1694 pc->pc_allocfrom = NULL;
1695
1696 simple_unlock(&pc->pc_slock);
1697
1698 return (object);
1699 }
1700
1701 /*
1702 * pool_cache_put:
1703 *
1704 * Put an object back to the pool cache.
1705 */
1706 void
1707 pool_cache_put(struct pool_cache *pc, void *object)
1708 {
1709 struct pool_cache_group *pcg;
1710 int s;
1711
1712 simple_lock(&pc->pc_slock);
1713
1714 if ((pcg = pc->pc_freeto) == NULL) {
1715 TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1716 if (pcg->pcg_avail != PCG_NOBJECTS) {
1717 pc->pc_freeto = pcg;
1718 goto have_group;
1719 }
1720 }
1721
1722 /*
1723 * No empty groups to free the object to. Attempt to
1724 * allocate one.
1725 */
1726 simple_unlock(&pc->pc_slock);
1727 s = splvm();
1728 pcg = pool_get(&pcgpool, PR_NOWAIT);
1729 splx(s);
1730 if (pcg != NULL) {
1731 memset(pcg, 0, sizeof(*pcg));
1732 simple_lock(&pc->pc_slock);
1733 pc->pc_ngroups++;
1734 TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list);
1735 if (pc->pc_freeto == NULL)
1736 pc->pc_freeto = pcg;
1737 goto have_group;
1738 }
1739
1740 /*
1741 * Unable to allocate a cache group; destruct the object
1742 * and free it back to the pool.
1743 */
1744 pool_cache_destruct_object(pc, object);
1745 return;
1746 }
1747
1748 have_group:
1749 pc->pc_nitems++;
1750 pcg_put(pcg, object);
1751
1752 if (pcg->pcg_avail == PCG_NOBJECTS)
1753 pc->pc_freeto = NULL;
1754
1755 simple_unlock(&pc->pc_slock);
1756 }
1757
1758 /*
1759 * pool_cache_destruct_object:
1760 *
1761 * Force destruction of an object and its release back into
1762 * the pool.
1763 */
1764 void
1765 pool_cache_destruct_object(struct pool_cache *pc, void *object)
1766 {
1767
1768 if (pc->pc_dtor != NULL)
1769 (*pc->pc_dtor)(pc->pc_arg, object);
1770 pool_put(pc->pc_pool, object);
1771 }
1772
1773 /*
1774 * pool_cache_do_invalidate:
1775 *
1776 * This internal function implements pool_cache_invalidate() and
1777 * pool_cache_reclaim().
1778 */
1779 static void
1780 pool_cache_do_invalidate(struct pool_cache *pc, int free_groups,
1781 void (*putit)(struct pool *, void *))
1782 {
1783 struct pool_cache_group *pcg, *npcg;
1784 void *object;
1785 int s;
1786
1787 for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
1788 pcg = npcg) {
1789 npcg = TAILQ_NEXT(pcg, pcg_list);
1790 while (pcg->pcg_avail != 0) {
1791 pc->pc_nitems--;
1792 object = pcg_get(pcg);
1793 if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg)
1794 pc->pc_allocfrom = NULL;
1795 if (pc->pc_dtor != NULL)
1796 (*pc->pc_dtor)(pc->pc_arg, object);
1797 (*putit)(pc->pc_pool, object);
1798 }
1799 if (free_groups) {
1800 pc->pc_ngroups--;
1801 TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list);
1802 if (pc->pc_freeto == pcg)
1803 pc->pc_freeto = NULL;
1804 s = splvm();
1805 pool_put(&pcgpool, pcg);
1806 splx(s);
1807 }
1808 }
1809 }
1810
1811 /*
1812 * pool_cache_invalidate:
1813 *
1814 * Invalidate a pool cache (destruct and release all of the
1815 * cached objects).
1816 */
1817 void
1818 pool_cache_invalidate(struct pool_cache *pc)
1819 {
1820
1821 simple_lock(&pc->pc_slock);
1822 pool_cache_do_invalidate(pc, 0, pool_put);
1823 simple_unlock(&pc->pc_slock);
1824 }
1825
1826 /*
1827 * pool_cache_reclaim:
1828 *
1829 * Reclaim a pool cache for pool_reclaim().
1830 */
1831 static void
1832 pool_cache_reclaim(struct pool_cache *pc)
1833 {
1834
1835 simple_lock(&pc->pc_slock);
1836 pool_cache_do_invalidate(pc, 1, pool_do_put);
1837 simple_unlock(&pc->pc_slock);
1838 }
1839
1840 /*
1841 * Pool backend allocators.
1842 *
1843 * Each pool has a backend allocator that handles allocation, deallocation,
1844 * and any additional draining that might be needed.
1845 *
1846 * We provide two standard allocators:
1847 *
1848 * pool_allocator_kmem - the default when no allocator is specified
1849 *
1850 * pool_allocator_nointr - used for pools that will not be accessed
1851 * in interrupt context.
1852 */
1853 void *pool_page_alloc(struct pool *, int);
1854 void pool_page_free(struct pool *, void *);
1855
1856 struct pool_allocator pool_allocator_kmem = {
1857 pool_page_alloc, pool_page_free, 0,
1858 };
1859
1860 void *pool_page_alloc_nointr(struct pool *, int);
1861 void pool_page_free_nointr(struct pool *, void *);
1862
1863 struct pool_allocator pool_allocator_nointr = {
1864 pool_page_alloc_nointr, pool_page_free_nointr, 0,
1865 };
1866
1867 #ifdef POOL_SUBPAGE
1868 void *pool_subpage_alloc(struct pool *, int);
1869 void pool_subpage_free(struct pool *, void *);
1870
1871 struct pool_allocator pool_allocator_kmem_subpage = {
1872 pool_subpage_alloc, pool_subpage_free, 0,
1873 };
1874 #endif /* POOL_SUBPAGE */
1875
1876 /*
1877 * We have at least three different resources for the same allocation and
1878 * each resource can be depleted. First, we have the ready elements in the
1879 * pool. Then we have the resource (typically a vm_map) for this allocator.
1880 * Finally, we have physical memory. Waiting for any of these can be
1881 * unnecessary when any other is freed, but the kernel doesn't support
1882 * sleeping on multiple wait channels, so we have to employ another strategy.
1883 *
1884 * The caller sleeps on the pool (so that it can be awakened when an item
1885 * is returned to the pool), but we set PA_WANT on the allocator. When a
1886 * page is returned to the allocator and PA_WANT is set, pool_allocator_free
1887 * will wake up all sleeping pools belonging to this allocator.
1888 *
1889 * XXX Thundering herd.
1890 */
1891 void *
1892 pool_allocator_alloc(struct pool *org, int flags)
1893 {
1894 struct pool_allocator *pa = org->pr_alloc;
1895 struct pool *pp, *start;
1896 int s, freed;
1897 void *res;
1898
1899 do {
1900 if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
1901 return (res);
1902 if ((flags & PR_WAITOK) == 0)
1903 break;
1904
1905 /*
1906 * Drain all pools, except "org", that use this
1907 * allocator. We do this to reclaim VA space.
1908 * pa_alloc is responsible for waiting for
1909 * physical memory.
1910 *
1911 * XXX We risk looping forever if start if someone
1912 * calls pool_destroy on "start". But there is no
1913 * other way to have potentially sleeping pool_reclaim,
1914 * non-sleeping locks on pool_allocator, and some
1915 * stirring of drained pools in the allocator.
1916 */
1917 freed = 0;
1918
1919 s = splvm();
1920 simple_lock(&pa->pa_slock);
1921 pp = start = TAILQ_FIRST(&pa->pa_list);
1922 do {
1923 TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list);
1924 TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list);
1925 if (pp == org)
1926 continue;
1927 simple_unlock(&pa->pa_list);
1928 freed = pool_reclaim(pp);
1929 simple_lock(&pa->pa_list);
1930 } while ((pp = TAILQ_FIRST(&pa->pa_list)) != start &&
1931 freed == 0);
1932
1933 if (freed == 0) {
1934 /*
1935 * We set PA_WANT here, the caller will most likely
1936 * sleep waiting for pages (if not, this won't hurt
1937 * that much), and there is no way to set this in
1938 * the caller without violating locking order.
1939 */
1940 pa->pa_flags |= PA_WANT;
1941 }
1942 simple_unlock(&pa->pa_slock);
1943 splx(s);
1944 } while (freed);
1945 return (NULL);
1946 }
1947
1948 void
1949 pool_allocator_free(struct pool *pp, void *v)
1950 {
1951 struct pool_allocator *pa = pp->pr_alloc;
1952 int s;
1953
1954 (*pa->pa_free)(pp, v);
1955
1956 s = splvm();
1957 simple_lock(&pa->pa_slock);
1958 if ((pa->pa_flags & PA_WANT) == 0) {
1959 simple_unlock(&pa->pa_slock);
1960 splx(s);
1961 return;
1962 }
1963
1964 TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
1965 simple_lock(&pp->pr_slock);
1966 if ((pp->pr_flags & PR_WANTED) != 0) {
1967 pp->pr_flags &= ~PR_WANTED;
1968 wakeup(pp);
1969 }
1970 }
1971 pa->pa_flags &= ~PA_WANT;
1972 simple_unlock(&pa->pa_slock);
1973 splx(s);
1974 }
1975
1976 void *
1977 pool_page_alloc(struct pool *pp, int flags)
1978 {
1979 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
1980
1981 return ((void *) uvm_km_alloc_poolpage(waitok));
1982 }
1983
1984 void
1985 pool_page_free(struct pool *pp, void *v)
1986 {
1987
1988 uvm_km_free_poolpage((vaddr_t) v);
1989 }
1990
1991 #ifdef POOL_SUBPAGE
1992 /* Sub-page allocator, for machines with large hardware pages. */
1993 void *
1994 pool_subpage_alloc(struct pool *pp, int flags)
1995 {
1996
1997 return (pool_get(&psppool, flags));
1998 }
1999
2000 void
2001 pool_subpage_free(struct pool *pp, void *v)
2002 {
2003
2004 pool_put(&psppool, v);
2005 }
2006
2007 /* We don't provide a real nointr allocator. Maybe later. */
2008 void *
2009 pool_page_alloc_nointr(struct pool *pp, int flags)
2010 {
2011
2012 return (pool_subpage_alloc(pp, flags));
2013 }
2014
2015 void
2016 pool_page_free_nointr(struct pool *pp, void *v)
2017 {
2018
2019 pool_subpage_free(pp, v);
2020 }
2021 #else
2022 void *
2023 pool_page_alloc_nointr(struct pool *pp, int flags)
2024 {
2025 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2026
2027 return ((void *) uvm_km_alloc_poolpage1(kernel_map,
2028 uvm.kernel_object, waitok));
2029 }
2030
2031 void
2032 pool_page_free_nointr(struct pool *pp, void *v)
2033 {
2034
2035 uvm_km_free_poolpage1(kernel_map, (vaddr_t) v);
2036 }
2037 #endif /* POOL_SUBPAGE */
2038