subr_pool.c revision 1.92 1 /* $NetBSD: subr_pool.c,v 1.92 2004/02/22 00:19:48 enami Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.92 2004/02/22 00:19:48 enami Exp $");
42
43 #include "opt_pool.h"
44 #include "opt_poollog.h"
45 #include "opt_lockdebug.h"
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/proc.h>
50 #include <sys/errno.h>
51 #include <sys/kernel.h>
52 #include <sys/malloc.h>
53 #include <sys/lock.h>
54 #include <sys/pool.h>
55 #include <sys/syslog.h>
56
57 #include <uvm/uvm.h>
58
59 /*
60 * Pool resource management utility.
61 *
62 * Memory is allocated in pages which are split into pieces according to
63 * the pool item size. Each page is kept on one of three lists in the
64 * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
65 * for empty, full and partially-full pages respectively. The individual
66 * pool items are on a linked list headed by `ph_itemlist' in each page
67 * header. The memory for building the page list is either taken from
68 * the allocated pages themselves (for small pool items) or taken from
69 * an internal pool of page headers (`phpool').
70 */
71
72 /* List of all pools */
73 TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
74
75 /* Private pool for page header structures */
76 static struct pool phpool;
77
78 #ifdef POOL_SUBPAGE
79 /* Pool of subpages for use by normal pools. */
80 static struct pool psppool;
81 #endif
82
83 /* # of seconds to retain page after last use */
84 int pool_inactive_time = 10;
85
86 /* Next candidate for drainage (see pool_drain()) */
87 static struct pool *drainpp;
88
89 /* This spin lock protects both pool_head and drainpp. */
90 struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
91
92 struct pool_item_header {
93 /* Page headers */
94 LIST_ENTRY(pool_item_header)
95 ph_pagelist; /* pool page list */
96 TAILQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */
97 SPLAY_ENTRY(pool_item_header)
98 ph_node; /* Off-page page headers */
99 unsigned int ph_nmissing; /* # of chunks in use */
100 caddr_t ph_page; /* this page's address */
101 struct timeval ph_time; /* last referenced */
102 };
103
104 struct pool_item {
105 #ifdef DIAGNOSTIC
106 u_int pi_magic;
107 #endif
108 #define PI_MAGIC 0xdeadbeefU
109 /* Other entries use only this list entry */
110 TAILQ_ENTRY(pool_item) pi_list;
111 };
112
113 #define POOL_NEEDS_CATCHUP(pp) \
114 ((pp)->pr_nitems < (pp)->pr_minitems)
115
116 /*
117 * Pool cache management.
118 *
119 * Pool caches provide a way for constructed objects to be cached by the
120 * pool subsystem. This can lead to performance improvements by avoiding
121 * needless object construction/destruction; it is deferred until absolutely
122 * necessary.
123 *
124 * Caches are grouped into cache groups. Each cache group references
125 * up to 16 constructed objects. When a cache allocates an object
126 * from the pool, it calls the object's constructor and places it into
127 * a cache group. When a cache group frees an object back to the pool,
128 * it first calls the object's destructor. This allows the object to
129 * persist in constructed form while freed to the cache.
130 *
131 * Multiple caches may exist for each pool. This allows a single
132 * object type to have multiple constructed forms. The pool references
133 * each cache, so that when a pool is drained by the pagedaemon, it can
134 * drain each individual cache as well. Each time a cache is drained,
135 * the most idle cache group is freed to the pool in its entirety.
136 *
137 * Pool caches are layed on top of pools. By layering them, we can avoid
138 * the complexity of cache management for pools which would not benefit
139 * from it.
140 */
141
142 /* The cache group pool. */
143 static struct pool pcgpool;
144
145 static void pool_cache_reclaim(struct pool_cache *);
146
147 static int pool_catchup(struct pool *);
148 static void pool_prime_page(struct pool *, caddr_t,
149 struct pool_item_header *);
150 static void pool_update_curpage(struct pool *);
151
152 void *pool_allocator_alloc(struct pool *, int);
153 void pool_allocator_free(struct pool *, void *);
154
155 static void pool_print_pagelist(struct pool_pagelist *,
156 void (*)(const char *, ...));
157 static void pool_print1(struct pool *, const char *,
158 void (*)(const char *, ...));
159
160 static int pool_chk_page(struct pool *, const char *,
161 struct pool_item_header *);
162
163 /*
164 * Pool log entry. An array of these is allocated in pool_init().
165 */
166 struct pool_log {
167 const char *pl_file;
168 long pl_line;
169 int pl_action;
170 #define PRLOG_GET 1
171 #define PRLOG_PUT 2
172 void *pl_addr;
173 };
174
175 #ifdef POOL_DIAGNOSTIC
176 /* Number of entries in pool log buffers */
177 #ifndef POOL_LOGSIZE
178 #define POOL_LOGSIZE 10
179 #endif
180
181 int pool_logsize = POOL_LOGSIZE;
182
183 static __inline void
184 pr_log(struct pool *pp, void *v, int action, const char *file, long line)
185 {
186 int n = pp->pr_curlogentry;
187 struct pool_log *pl;
188
189 if ((pp->pr_roflags & PR_LOGGING) == 0)
190 return;
191
192 /*
193 * Fill in the current entry. Wrap around and overwrite
194 * the oldest entry if necessary.
195 */
196 pl = &pp->pr_log[n];
197 pl->pl_file = file;
198 pl->pl_line = line;
199 pl->pl_action = action;
200 pl->pl_addr = v;
201 if (++n >= pp->pr_logsize)
202 n = 0;
203 pp->pr_curlogentry = n;
204 }
205
206 static void
207 pr_printlog(struct pool *pp, struct pool_item *pi,
208 void (*pr)(const char *, ...))
209 {
210 int i = pp->pr_logsize;
211 int n = pp->pr_curlogentry;
212
213 if ((pp->pr_roflags & PR_LOGGING) == 0)
214 return;
215
216 /*
217 * Print all entries in this pool's log.
218 */
219 while (i-- > 0) {
220 struct pool_log *pl = &pp->pr_log[n];
221 if (pl->pl_action != 0) {
222 if (pi == NULL || pi == pl->pl_addr) {
223 (*pr)("\tlog entry %d:\n", i);
224 (*pr)("\t\taction = %s, addr = %p\n",
225 pl->pl_action == PRLOG_GET ? "get" : "put",
226 pl->pl_addr);
227 (*pr)("\t\tfile: %s at line %lu\n",
228 pl->pl_file, pl->pl_line);
229 }
230 }
231 if (++n >= pp->pr_logsize)
232 n = 0;
233 }
234 }
235
236 static __inline void
237 pr_enter(struct pool *pp, const char *file, long line)
238 {
239
240 if (__predict_false(pp->pr_entered_file != NULL)) {
241 printf("pool %s: reentrancy at file %s line %ld\n",
242 pp->pr_wchan, file, line);
243 printf(" previous entry at file %s line %ld\n",
244 pp->pr_entered_file, pp->pr_entered_line);
245 panic("pr_enter");
246 }
247
248 pp->pr_entered_file = file;
249 pp->pr_entered_line = line;
250 }
251
252 static __inline void
253 pr_leave(struct pool *pp)
254 {
255
256 if (__predict_false(pp->pr_entered_file == NULL)) {
257 printf("pool %s not entered?\n", pp->pr_wchan);
258 panic("pr_leave");
259 }
260
261 pp->pr_entered_file = NULL;
262 pp->pr_entered_line = 0;
263 }
264
265 static __inline void
266 pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
267 {
268
269 if (pp->pr_entered_file != NULL)
270 (*pr)("\n\tcurrently entered from file %s line %ld\n",
271 pp->pr_entered_file, pp->pr_entered_line);
272 }
273 #else
274 #define pr_log(pp, v, action, file, line)
275 #define pr_printlog(pp, pi, pr)
276 #define pr_enter(pp, file, line)
277 #define pr_leave(pp)
278 #define pr_enter_check(pp, pr)
279 #endif /* POOL_DIAGNOSTIC */
280
281 static __inline int
282 phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
283 {
284 if (a->ph_page < b->ph_page)
285 return (-1);
286 else if (a->ph_page > b->ph_page)
287 return (1);
288 else
289 return (0);
290 }
291
292 SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
293 SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
294
295 /*
296 * Return the pool page header based on page address.
297 */
298 static __inline struct pool_item_header *
299 pr_find_pagehead(struct pool *pp, caddr_t page)
300 {
301 struct pool_item_header *ph, tmp;
302
303 if ((pp->pr_roflags & PR_PHINPAGE) != 0)
304 return ((struct pool_item_header *)(page + pp->pr_phoffset));
305
306 tmp.ph_page = page;
307 ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
308 return ph;
309 }
310
311 /*
312 * Remove a page from the pool.
313 */
314 static __inline void
315 pr_rmpage(struct pool *pp, struct pool_item_header *ph,
316 struct pool_pagelist *pq)
317 {
318 int s;
319
320 LOCK_ASSERT(!simple_lock_held(&pp->pr_slock) || pq != NULL);
321
322 /*
323 * If the page was idle, decrement the idle page count.
324 */
325 if (ph->ph_nmissing == 0) {
326 #ifdef DIAGNOSTIC
327 if (pp->pr_nidle == 0)
328 panic("pr_rmpage: nidle inconsistent");
329 if (pp->pr_nitems < pp->pr_itemsperpage)
330 panic("pr_rmpage: nitems inconsistent");
331 #endif
332 pp->pr_nidle--;
333 }
334
335 pp->pr_nitems -= pp->pr_itemsperpage;
336
337 /*
338 * Unlink a page from the pool and release it (or queue it for release).
339 */
340 LIST_REMOVE(ph, ph_pagelist);
341 if ((pp->pr_roflags & PR_PHINPAGE) == 0)
342 SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
343 if (pq) {
344 LIST_INSERT_HEAD(pq, ph, ph_pagelist);
345 } else {
346 pool_allocator_free(pp, ph->ph_page);
347 if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
348 s = splvm();
349 pool_put(&phpool, ph);
350 splx(s);
351 }
352 }
353 pp->pr_npages--;
354 pp->pr_npagefree++;
355
356 pool_update_curpage(pp);
357 }
358
359 /*
360 * Initialize the given pool resource structure.
361 *
362 * We export this routine to allow other kernel parts to declare
363 * static pools that must be initialized before malloc() is available.
364 */
365 void
366 pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
367 const char *wchan, struct pool_allocator *palloc)
368 {
369 int off, slack;
370 size_t trysize, phsize;
371
372 #ifdef POOL_DIAGNOSTIC
373 /*
374 * Always log if POOL_DIAGNOSTIC is defined.
375 */
376 if (pool_logsize != 0)
377 flags |= PR_LOGGING;
378 #endif
379
380 #ifdef POOL_SUBPAGE
381 /*
382 * XXX We don't provide a real `nointr' back-end
383 * yet; all sub-pages come from a kmem back-end.
384 * maybe some day...
385 */
386 if (palloc == NULL) {
387 extern struct pool_allocator pool_allocator_kmem_subpage;
388 palloc = &pool_allocator_kmem_subpage;
389 }
390 /*
391 * We'll assume any user-specified back-end allocator
392 * will deal with sub-pages, or simply don't care.
393 */
394 #else
395 if (palloc == NULL)
396 palloc = &pool_allocator_kmem;
397 #endif /* POOL_SUBPAGE */
398 if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
399 if (palloc->pa_pagesz == 0) {
400 #ifdef POOL_SUBPAGE
401 if (palloc == &pool_allocator_kmem)
402 palloc->pa_pagesz = PAGE_SIZE;
403 else
404 palloc->pa_pagesz = POOL_SUBPAGE;
405 #else
406 palloc->pa_pagesz = PAGE_SIZE;
407 #endif /* POOL_SUBPAGE */
408 }
409
410 TAILQ_INIT(&palloc->pa_list);
411
412 simple_lock_init(&palloc->pa_slock);
413 palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
414 palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
415 palloc->pa_flags |= PA_INITIALIZED;
416 }
417
418 if (align == 0)
419 align = ALIGN(1);
420
421 if (size < sizeof(struct pool_item))
422 size = sizeof(struct pool_item);
423
424 size = roundup(size, align);
425 #ifdef DIAGNOSTIC
426 if (size > palloc->pa_pagesz)
427 panic("pool_init: pool item size (%lu) too large",
428 (u_long)size);
429 #endif
430
431 /*
432 * Initialize the pool structure.
433 */
434 LIST_INIT(&pp->pr_emptypages);
435 LIST_INIT(&pp->pr_fullpages);
436 LIST_INIT(&pp->pr_partpages);
437 TAILQ_INIT(&pp->pr_cachelist);
438 pp->pr_curpage = NULL;
439 pp->pr_npages = 0;
440 pp->pr_minitems = 0;
441 pp->pr_minpages = 0;
442 pp->pr_maxpages = UINT_MAX;
443 pp->pr_roflags = flags;
444 pp->pr_flags = 0;
445 pp->pr_size = size;
446 pp->pr_align = align;
447 pp->pr_wchan = wchan;
448 pp->pr_alloc = palloc;
449 pp->pr_nitems = 0;
450 pp->pr_nout = 0;
451 pp->pr_hardlimit = UINT_MAX;
452 pp->pr_hardlimit_warning = NULL;
453 pp->pr_hardlimit_ratecap.tv_sec = 0;
454 pp->pr_hardlimit_ratecap.tv_usec = 0;
455 pp->pr_hardlimit_warning_last.tv_sec = 0;
456 pp->pr_hardlimit_warning_last.tv_usec = 0;
457 pp->pr_drain_hook = NULL;
458 pp->pr_drain_hook_arg = NULL;
459
460 /*
461 * Decide whether to put the page header off page to avoid
462 * wasting too large a part of the page or too big item.
463 * Off-page page headers go on a hash table, so we can match
464 * a returned item with its header based on the page address.
465 * We use 1/16 of the page size and about 8 times of the item
466 * size as the threshold (XXX: tune)
467 *
468 * However, we'll put the header into the page if we can put
469 * it without wasting any items.
470 *
471 * Silently enforce `0 <= ioff < align'.
472 */
473 pp->pr_itemoffset = ioff %= align;
474 /* See the comment below about reserved bytes. */
475 trysize = palloc->pa_pagesz - ((align - ioff) % align);
476 phsize = ALIGN(sizeof(struct pool_item_header));
477 if (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
478 trysize / pp->pr_size == (trysize - phsize) / pp->pr_size) {
479 /* Use the end of the page for the page header */
480 pp->pr_roflags |= PR_PHINPAGE;
481 pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
482 } else {
483 /* The page header will be taken from our page header pool */
484 pp->pr_phoffset = 0;
485 off = palloc->pa_pagesz;
486 SPLAY_INIT(&pp->pr_phtree);
487 }
488
489 /*
490 * Alignment is to take place at `ioff' within the item. This means
491 * we must reserve up to `align - 1' bytes on the page to allow
492 * appropriate positioning of each item.
493 */
494 pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
495 KASSERT(pp->pr_itemsperpage != 0);
496
497 /*
498 * Use the slack between the chunks and the page header
499 * for "cache coloring".
500 */
501 slack = off - pp->pr_itemsperpage * pp->pr_size;
502 pp->pr_maxcolor = (slack / align) * align;
503 pp->pr_curcolor = 0;
504
505 pp->pr_nget = 0;
506 pp->pr_nfail = 0;
507 pp->pr_nput = 0;
508 pp->pr_npagealloc = 0;
509 pp->pr_npagefree = 0;
510 pp->pr_hiwat = 0;
511 pp->pr_nidle = 0;
512
513 #ifdef POOL_DIAGNOSTIC
514 if (flags & PR_LOGGING) {
515 if (kmem_map == NULL ||
516 (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
517 M_TEMP, M_NOWAIT)) == NULL)
518 pp->pr_roflags &= ~PR_LOGGING;
519 pp->pr_curlogentry = 0;
520 pp->pr_logsize = pool_logsize;
521 }
522 #endif
523
524 pp->pr_entered_file = NULL;
525 pp->pr_entered_line = 0;
526
527 simple_lock_init(&pp->pr_slock);
528
529 /*
530 * Initialize private page header pool and cache magazine pool if we
531 * haven't done so yet.
532 * XXX LOCKING.
533 */
534 if (phpool.pr_size == 0) {
535 #ifdef POOL_SUBPAGE
536 pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, 0,
537 "phpool", &pool_allocator_kmem);
538 pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
539 PR_RECURSIVE, "psppool", &pool_allocator_kmem);
540 #else
541 pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,
542 0, "phpool", NULL);
543 #endif
544 pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
545 0, "pcgpool", NULL);
546 }
547
548 /* Insert into the list of all pools. */
549 simple_lock(&pool_head_slock);
550 TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
551 simple_unlock(&pool_head_slock);
552
553 /* Insert this into the list of pools using this allocator. */
554 simple_lock(&palloc->pa_slock);
555 TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
556 simple_unlock(&palloc->pa_slock);
557 }
558
559 /*
560 * De-commision a pool resource.
561 */
562 void
563 pool_destroy(struct pool *pp)
564 {
565 struct pool_item_header *ph;
566 struct pool_cache *pc;
567
568 /* Locking order: pool_allocator -> pool */
569 simple_lock(&pp->pr_alloc->pa_slock);
570 TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
571 simple_unlock(&pp->pr_alloc->pa_slock);
572
573 /* Destroy all caches for this pool. */
574 while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL)
575 pool_cache_destroy(pc);
576
577 #ifdef DIAGNOSTIC
578 if (pp->pr_nout != 0) {
579 pr_printlog(pp, NULL, printf);
580 panic("pool_destroy: pool busy: still out: %u",
581 pp->pr_nout);
582 }
583 #endif
584
585 /* Remove all pages */
586 while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
587 pr_rmpage(pp, ph, NULL);
588 KASSERT(LIST_EMPTY(&pp->pr_fullpages));
589 KASSERT(LIST_EMPTY(&pp->pr_partpages));
590
591 /* Remove from global pool list */
592 simple_lock(&pool_head_slock);
593 TAILQ_REMOVE(&pool_head, pp, pr_poollist);
594 if (drainpp == pp) {
595 drainpp = NULL;
596 }
597 simple_unlock(&pool_head_slock);
598
599 #ifdef POOL_DIAGNOSTIC
600 if ((pp->pr_roflags & PR_LOGGING) != 0)
601 free(pp->pr_log, M_TEMP);
602 #endif
603 }
604
605 void
606 pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
607 {
608
609 /* XXX no locking -- must be used just after pool_init() */
610 #ifdef DIAGNOSTIC
611 if (pp->pr_drain_hook != NULL)
612 panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
613 #endif
614 pp->pr_drain_hook = fn;
615 pp->pr_drain_hook_arg = arg;
616 }
617
618 static struct pool_item_header *
619 pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags)
620 {
621 struct pool_item_header *ph;
622 int s;
623
624 LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0);
625
626 if ((pp->pr_roflags & PR_PHINPAGE) != 0)
627 ph = (struct pool_item_header *) (storage + pp->pr_phoffset);
628 else {
629 s = splvm();
630 ph = pool_get(&phpool, flags);
631 splx(s);
632 }
633
634 return (ph);
635 }
636
637 /*
638 * Grab an item from the pool; must be called at appropriate spl level
639 */
640 void *
641 #ifdef POOL_DIAGNOSTIC
642 _pool_get(struct pool *pp, int flags, const char *file, long line)
643 #else
644 pool_get(struct pool *pp, int flags)
645 #endif
646 {
647 struct pool_item *pi;
648 struct pool_item_header *ph;
649 void *v;
650
651 #ifdef DIAGNOSTIC
652 if (__predict_false(curlwp == NULL && doing_shutdown == 0 &&
653 (flags & PR_WAITOK) != 0))
654 panic("pool_get: %s: must have NOWAIT", pp->pr_wchan);
655
656 #ifdef LOCKDEBUG
657 if (flags & PR_WAITOK)
658 simple_lock_only_held(NULL, "pool_get(PR_WAITOK)");
659 #endif
660 #endif /* DIAGNOSTIC */
661
662 simple_lock(&pp->pr_slock);
663 pr_enter(pp, file, line);
664
665 startover:
666 /*
667 * Check to see if we've reached the hard limit. If we have,
668 * and we can wait, then wait until an item has been returned to
669 * the pool.
670 */
671 #ifdef DIAGNOSTIC
672 if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
673 pr_leave(pp);
674 simple_unlock(&pp->pr_slock);
675 panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
676 }
677 #endif
678 if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
679 if (pp->pr_drain_hook != NULL) {
680 /*
681 * Since the drain hook is going to free things
682 * back to the pool, unlock, call the hook, re-lock,
683 * and check the hardlimit condition again.
684 */
685 pr_leave(pp);
686 simple_unlock(&pp->pr_slock);
687 (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
688 simple_lock(&pp->pr_slock);
689 pr_enter(pp, file, line);
690 if (pp->pr_nout < pp->pr_hardlimit)
691 goto startover;
692 }
693
694 if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
695 /*
696 * XXX: A warning isn't logged in this case. Should
697 * it be?
698 */
699 pp->pr_flags |= PR_WANTED;
700 pr_leave(pp);
701 ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
702 pr_enter(pp, file, line);
703 goto startover;
704 }
705
706 /*
707 * Log a message that the hard limit has been hit.
708 */
709 if (pp->pr_hardlimit_warning != NULL &&
710 ratecheck(&pp->pr_hardlimit_warning_last,
711 &pp->pr_hardlimit_ratecap))
712 log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
713
714 pp->pr_nfail++;
715
716 pr_leave(pp);
717 simple_unlock(&pp->pr_slock);
718 return (NULL);
719 }
720
721 /*
722 * The convention we use is that if `curpage' is not NULL, then
723 * it points at a non-empty bucket. In particular, `curpage'
724 * never points at a page header which has PR_PHINPAGE set and
725 * has no items in its bucket.
726 */
727 if ((ph = pp->pr_curpage) == NULL) {
728 #ifdef DIAGNOSTIC
729 if (pp->pr_nitems != 0) {
730 simple_unlock(&pp->pr_slock);
731 printf("pool_get: %s: curpage NULL, nitems %u\n",
732 pp->pr_wchan, pp->pr_nitems);
733 panic("pool_get: nitems inconsistent");
734 }
735 #endif
736
737 /*
738 * Call the back-end page allocator for more memory.
739 * Release the pool lock, as the back-end page allocator
740 * may block.
741 */
742 pr_leave(pp);
743 simple_unlock(&pp->pr_slock);
744 v = pool_allocator_alloc(pp, flags);
745 if (__predict_true(v != NULL))
746 ph = pool_alloc_item_header(pp, v, flags);
747
748 if (__predict_false(v == NULL || ph == NULL)) {
749 if (v != NULL)
750 pool_allocator_free(pp, v);
751
752 simple_lock(&pp->pr_slock);
753 pr_enter(pp, file, line);
754
755 /*
756 * We were unable to allocate a page or item
757 * header, but we released the lock during
758 * allocation, so perhaps items were freed
759 * back to the pool. Check for this case.
760 */
761 if (pp->pr_curpage != NULL)
762 goto startover;
763
764 if ((flags & PR_WAITOK) == 0) {
765 pp->pr_nfail++;
766 pr_leave(pp);
767 simple_unlock(&pp->pr_slock);
768 return (NULL);
769 }
770
771 /*
772 * Wait for items to be returned to this pool.
773 *
774 * XXX: maybe we should wake up once a second and
775 * try again?
776 */
777 pp->pr_flags |= PR_WANTED;
778 /* PA_WANTED is already set on the allocator. */
779 pr_leave(pp);
780 ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
781 pr_enter(pp, file, line);
782 goto startover;
783 }
784
785 /* We have more memory; add it to the pool */
786 simple_lock(&pp->pr_slock);
787 pr_enter(pp, file, line);
788 pool_prime_page(pp, v, ph);
789 pp->pr_npagealloc++;
790
791 /* Start the allocation process over. */
792 goto startover;
793 }
794 if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) {
795 pr_leave(pp);
796 simple_unlock(&pp->pr_slock);
797 panic("pool_get: %s: page empty", pp->pr_wchan);
798 }
799 #ifdef DIAGNOSTIC
800 if (__predict_false(pp->pr_nitems == 0)) {
801 pr_leave(pp);
802 simple_unlock(&pp->pr_slock);
803 printf("pool_get: %s: items on itemlist, nitems %u\n",
804 pp->pr_wchan, pp->pr_nitems);
805 panic("pool_get: nitems inconsistent");
806 }
807 #endif
808
809 #ifdef POOL_DIAGNOSTIC
810 pr_log(pp, v, PRLOG_GET, file, line);
811 #endif
812
813 #ifdef DIAGNOSTIC
814 if (__predict_false(pi->pi_magic != PI_MAGIC)) {
815 pr_printlog(pp, pi, printf);
816 panic("pool_get(%s): free list modified: magic=%x; page %p;"
817 " item addr %p\n",
818 pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
819 }
820 #endif
821
822 /*
823 * Remove from item list.
824 */
825 TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
826 pp->pr_nitems--;
827 pp->pr_nout++;
828 if (ph->ph_nmissing == 0) {
829 #ifdef DIAGNOSTIC
830 if (__predict_false(pp->pr_nidle == 0))
831 panic("pool_get: nidle inconsistent");
832 #endif
833 pp->pr_nidle--;
834
835 /*
836 * This page was previously empty. Move it to the list of
837 * partially-full pages. This page is already curpage.
838 */
839 LIST_REMOVE(ph, ph_pagelist);
840 LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
841 }
842 ph->ph_nmissing++;
843 if (TAILQ_EMPTY(&ph->ph_itemlist)) {
844 #ifdef DIAGNOSTIC
845 if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) {
846 pr_leave(pp);
847 simple_unlock(&pp->pr_slock);
848 panic("pool_get: %s: nmissing inconsistent",
849 pp->pr_wchan);
850 }
851 #endif
852 /*
853 * This page is now full. Move it to the full list
854 * and select a new current page.
855 */
856 LIST_REMOVE(ph, ph_pagelist);
857 LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
858 pool_update_curpage(pp);
859 }
860
861 pp->pr_nget++;
862
863 /*
864 * If we have a low water mark and we are now below that low
865 * water mark, add more items to the pool.
866 */
867 if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
868 /*
869 * XXX: Should we log a warning? Should we set up a timeout
870 * to try again in a second or so? The latter could break
871 * a caller's assumptions about interrupt protection, etc.
872 */
873 }
874
875 pr_leave(pp);
876 simple_unlock(&pp->pr_slock);
877 return (v);
878 }
879
880 /*
881 * Internal version of pool_put(). Pool is already locked/entered.
882 */
883 static void
884 pool_do_put(struct pool *pp, void *v)
885 {
886 struct pool_item *pi = v;
887 struct pool_item_header *ph;
888 caddr_t page;
889 int s;
890
891 LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
892
893 page = (caddr_t)((u_long)v & pp->pr_alloc->pa_pagemask);
894
895 #ifdef DIAGNOSTIC
896 if (__predict_false(pp->pr_nout == 0)) {
897 printf("pool %s: putting with none out\n",
898 pp->pr_wchan);
899 panic("pool_put");
900 }
901 #endif
902
903 if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {
904 pr_printlog(pp, NULL, printf);
905 panic("pool_put: %s: page header missing", pp->pr_wchan);
906 }
907
908 #ifdef LOCKDEBUG
909 /*
910 * Check if we're freeing a locked simple lock.
911 */
912 simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
913 #endif
914
915 /*
916 * Return to item list.
917 */
918 #ifdef DIAGNOSTIC
919 pi->pi_magic = PI_MAGIC;
920 #endif
921 #ifdef DEBUG
922 {
923 int i, *ip = v;
924
925 for (i = 0; i < pp->pr_size / sizeof(int); i++) {
926 *ip++ = PI_MAGIC;
927 }
928 }
929 #endif
930
931 TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
932 KDASSERT(ph->ph_nmissing != 0);
933 ph->ph_nmissing--;
934 pp->pr_nput++;
935 pp->pr_nitems++;
936 pp->pr_nout--;
937
938 /* Cancel "pool empty" condition if it exists */
939 if (pp->pr_curpage == NULL)
940 pp->pr_curpage = ph;
941
942 if (pp->pr_flags & PR_WANTED) {
943 pp->pr_flags &= ~PR_WANTED;
944 if (ph->ph_nmissing == 0)
945 pp->pr_nidle++;
946 wakeup((caddr_t)pp);
947 return;
948 }
949
950 /*
951 * If this page is now empty, do one of two things:
952 *
953 * (1) If we have more pages than the page high water mark,
954 * or if we are flagged as immediately freeing back idle
955 * pages, free the page back to the system. ONLY CONSIDER
956 * FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
957 * CLAIM.
958 *
959 * (2) Otherwise, move the page to the empty page list.
960 *
961 * Either way, select a new current page (so we use a partially-full
962 * page if one is available).
963 */
964 if (ph->ph_nmissing == 0) {
965 pp->pr_nidle++;
966 if (pp->pr_npages > pp->pr_minpages &&
967 (pp->pr_npages > pp->pr_maxpages ||
968 (pp->pr_roflags & PR_IMMEDRELEASE) != 0 ||
969 (pp->pr_alloc->pa_flags & PA_WANT) != 0)) {
970 simple_unlock(&pp->pr_slock);
971 pr_rmpage(pp, ph, NULL);
972 simple_lock(&pp->pr_slock);
973 } else {
974 LIST_REMOVE(ph, ph_pagelist);
975 LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
976
977 /*
978 * Update the timestamp on the page. A page must
979 * be idle for some period of time before it can
980 * be reclaimed by the pagedaemon. This minimizes
981 * ping-pong'ing for memory.
982 */
983 s = splclock();
984 ph->ph_time = mono_time;
985 splx(s);
986 }
987 pool_update_curpage(pp);
988 }
989
990 /*
991 * If the page was previously completely full, move it to the
992 * partially-full list and make it the current page. The next
993 * allocation will get the item from this page, instead of
994 * further fragmenting the pool.
995 */
996 else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
997 LIST_REMOVE(ph, ph_pagelist);
998 LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
999 pp->pr_curpage = ph;
1000 }
1001 }
1002
1003 /*
1004 * Return resource to the pool; must be called at appropriate spl level
1005 */
1006 #ifdef POOL_DIAGNOSTIC
1007 void
1008 _pool_put(struct pool *pp, void *v, const char *file, long line)
1009 {
1010
1011 simple_lock(&pp->pr_slock);
1012 pr_enter(pp, file, line);
1013
1014 pr_log(pp, v, PRLOG_PUT, file, line);
1015
1016 pool_do_put(pp, v);
1017
1018 pr_leave(pp);
1019 simple_unlock(&pp->pr_slock);
1020 }
1021 #undef pool_put
1022 #endif /* POOL_DIAGNOSTIC */
1023
1024 void
1025 pool_put(struct pool *pp, void *v)
1026 {
1027
1028 simple_lock(&pp->pr_slock);
1029
1030 pool_do_put(pp, v);
1031
1032 simple_unlock(&pp->pr_slock);
1033 }
1034
1035 #ifdef POOL_DIAGNOSTIC
1036 #define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__)
1037 #endif
1038
1039 /*
1040 * Add N items to the pool.
1041 */
1042 int
1043 pool_prime(struct pool *pp, int n)
1044 {
1045 struct pool_item_header *ph = NULL;
1046 caddr_t cp;
1047 int newpages;
1048
1049 simple_lock(&pp->pr_slock);
1050
1051 newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1052
1053 while (newpages-- > 0) {
1054 simple_unlock(&pp->pr_slock);
1055 cp = pool_allocator_alloc(pp, PR_NOWAIT);
1056 if (__predict_true(cp != NULL))
1057 ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1058
1059 if (__predict_false(cp == NULL || ph == NULL)) {
1060 if (cp != NULL)
1061 pool_allocator_free(pp, cp);
1062 simple_lock(&pp->pr_slock);
1063 break;
1064 }
1065
1066 simple_lock(&pp->pr_slock);
1067 pool_prime_page(pp, cp, ph);
1068 pp->pr_npagealloc++;
1069 pp->pr_minpages++;
1070 }
1071
1072 if (pp->pr_minpages >= pp->pr_maxpages)
1073 pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1074
1075 simple_unlock(&pp->pr_slock);
1076 return (0);
1077 }
1078
1079 /*
1080 * Add a page worth of items to the pool.
1081 *
1082 * Note, we must be called with the pool descriptor LOCKED.
1083 */
1084 static void
1085 pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
1086 {
1087 struct pool_item *pi;
1088 caddr_t cp = storage;
1089 unsigned int align = pp->pr_align;
1090 unsigned int ioff = pp->pr_itemoffset;
1091 int n;
1092 int s;
1093
1094 LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
1095
1096 #ifdef DIAGNOSTIC
1097 if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
1098 panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1099 #endif
1100
1101 /*
1102 * Insert page header.
1103 */
1104 LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1105 TAILQ_INIT(&ph->ph_itemlist);
1106 ph->ph_page = storage;
1107 ph->ph_nmissing = 0;
1108 s = splclock();
1109 ph->ph_time = mono_time;
1110 splx(s);
1111 if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1112 SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
1113
1114 pp->pr_nidle++;
1115
1116 /*
1117 * Color this page.
1118 */
1119 cp = (caddr_t)(cp + pp->pr_curcolor);
1120 if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1121 pp->pr_curcolor = 0;
1122
1123 /*
1124 * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1125 */
1126 if (ioff != 0)
1127 cp = (caddr_t)(cp + (align - ioff));
1128
1129 /*
1130 * Insert remaining chunks on the bucket list.
1131 */
1132 n = pp->pr_itemsperpage;
1133 pp->pr_nitems += n;
1134
1135 while (n--) {
1136 pi = (struct pool_item *)cp;
1137
1138 KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
1139
1140 /* Insert on page list */
1141 TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
1142 #ifdef DIAGNOSTIC
1143 pi->pi_magic = PI_MAGIC;
1144 #endif
1145 cp = (caddr_t)(cp + pp->pr_size);
1146 }
1147
1148 /*
1149 * If the pool was depleted, point at the new page.
1150 */
1151 if (pp->pr_curpage == NULL)
1152 pp->pr_curpage = ph;
1153
1154 if (++pp->pr_npages > pp->pr_hiwat)
1155 pp->pr_hiwat = pp->pr_npages;
1156 }
1157
1158 /*
1159 * Used by pool_get() when nitems drops below the low water mark. This
1160 * is used to catch up pr_nitems with the low water mark.
1161 *
1162 * Note 1, we never wait for memory here, we let the caller decide what to do.
1163 *
1164 * Note 2, we must be called with the pool already locked, and we return
1165 * with it locked.
1166 */
1167 static int
1168 pool_catchup(struct pool *pp)
1169 {
1170 struct pool_item_header *ph = NULL;
1171 caddr_t cp;
1172 int error = 0;
1173
1174 while (POOL_NEEDS_CATCHUP(pp)) {
1175 /*
1176 * Call the page back-end allocator for more memory.
1177 *
1178 * XXX: We never wait, so should we bother unlocking
1179 * the pool descriptor?
1180 */
1181 simple_unlock(&pp->pr_slock);
1182 cp = pool_allocator_alloc(pp, PR_NOWAIT);
1183 if (__predict_true(cp != NULL))
1184 ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1185 if (__predict_false(cp == NULL || ph == NULL)) {
1186 if (cp != NULL)
1187 pool_allocator_free(pp, cp);
1188 error = ENOMEM;
1189 simple_lock(&pp->pr_slock);
1190 break;
1191 }
1192 simple_lock(&pp->pr_slock);
1193 pool_prime_page(pp, cp, ph);
1194 pp->pr_npagealloc++;
1195 }
1196
1197 return (error);
1198 }
1199
1200 static void
1201 pool_update_curpage(struct pool *pp)
1202 {
1203
1204 pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
1205 if (pp->pr_curpage == NULL) {
1206 pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
1207 }
1208 }
1209
1210 void
1211 pool_setlowat(struct pool *pp, int n)
1212 {
1213
1214 simple_lock(&pp->pr_slock);
1215
1216 pp->pr_minitems = n;
1217 pp->pr_minpages = (n == 0)
1218 ? 0
1219 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1220
1221 /* Make sure we're caught up with the newly-set low water mark. */
1222 if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1223 /*
1224 * XXX: Should we log a warning? Should we set up a timeout
1225 * to try again in a second or so? The latter could break
1226 * a caller's assumptions about interrupt protection, etc.
1227 */
1228 }
1229
1230 simple_unlock(&pp->pr_slock);
1231 }
1232
1233 void
1234 pool_sethiwat(struct pool *pp, int n)
1235 {
1236
1237 simple_lock(&pp->pr_slock);
1238
1239 pp->pr_maxpages = (n == 0)
1240 ? 0
1241 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1242
1243 simple_unlock(&pp->pr_slock);
1244 }
1245
1246 void
1247 pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1248 {
1249
1250 simple_lock(&pp->pr_slock);
1251
1252 pp->pr_hardlimit = n;
1253 pp->pr_hardlimit_warning = warnmess;
1254 pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1255 pp->pr_hardlimit_warning_last.tv_sec = 0;
1256 pp->pr_hardlimit_warning_last.tv_usec = 0;
1257
1258 /*
1259 * In-line version of pool_sethiwat(), because we don't want to
1260 * release the lock.
1261 */
1262 pp->pr_maxpages = (n == 0)
1263 ? 0
1264 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1265
1266 simple_unlock(&pp->pr_slock);
1267 }
1268
1269 /*
1270 * Release all complete pages that have not been used recently.
1271 */
1272 int
1273 #ifdef POOL_DIAGNOSTIC
1274 _pool_reclaim(struct pool *pp, const char *file, long line)
1275 #else
1276 pool_reclaim(struct pool *pp)
1277 #endif
1278 {
1279 struct pool_item_header *ph, *phnext;
1280 struct pool_cache *pc;
1281 struct timeval curtime;
1282 struct pool_pagelist pq;
1283 struct timeval diff;
1284 int s;
1285
1286 if (pp->pr_drain_hook != NULL) {
1287 /*
1288 * The drain hook must be called with the pool unlocked.
1289 */
1290 (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1291 }
1292
1293 if (simple_lock_try(&pp->pr_slock) == 0)
1294 return (0);
1295 pr_enter(pp, file, line);
1296
1297 LIST_INIT(&pq);
1298
1299 /*
1300 * Reclaim items from the pool's caches.
1301 */
1302 TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist)
1303 pool_cache_reclaim(pc);
1304
1305 s = splclock();
1306 curtime = mono_time;
1307 splx(s);
1308
1309 for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
1310 phnext = LIST_NEXT(ph, ph_pagelist);
1311
1312 /* Check our minimum page claim */
1313 if (pp->pr_npages <= pp->pr_minpages)
1314 break;
1315
1316 KASSERT(ph->ph_nmissing == 0);
1317 timersub(&curtime, &ph->ph_time, &diff);
1318 if (diff.tv_sec < pool_inactive_time)
1319 continue;
1320
1321 /*
1322 * If freeing this page would put us below
1323 * the low water mark, stop now.
1324 */
1325 if ((pp->pr_nitems - pp->pr_itemsperpage) <
1326 pp->pr_minitems)
1327 break;
1328
1329 pr_rmpage(pp, ph, &pq);
1330 }
1331
1332 pr_leave(pp);
1333 simple_unlock(&pp->pr_slock);
1334 if (LIST_EMPTY(&pq))
1335 return (0);
1336
1337 while ((ph = LIST_FIRST(&pq)) != NULL) {
1338 LIST_REMOVE(ph, ph_pagelist);
1339 pool_allocator_free(pp, ph->ph_page);
1340 if (pp->pr_roflags & PR_PHINPAGE) {
1341 continue;
1342 }
1343 s = splvm();
1344 pool_put(&phpool, ph);
1345 splx(s);
1346 }
1347
1348 return (1);
1349 }
1350
1351 /*
1352 * Drain pools, one at a time.
1353 *
1354 * Note, we must never be called from an interrupt context.
1355 */
1356 void
1357 pool_drain(void *arg)
1358 {
1359 struct pool *pp;
1360 int s;
1361
1362 pp = NULL;
1363 s = splvm();
1364 simple_lock(&pool_head_slock);
1365 if (drainpp == NULL) {
1366 drainpp = TAILQ_FIRST(&pool_head);
1367 }
1368 if (drainpp) {
1369 pp = drainpp;
1370 drainpp = TAILQ_NEXT(pp, pr_poollist);
1371 }
1372 simple_unlock(&pool_head_slock);
1373 pool_reclaim(pp);
1374 splx(s);
1375 }
1376
1377 /*
1378 * Diagnostic helpers.
1379 */
1380 void
1381 pool_print(struct pool *pp, const char *modif)
1382 {
1383 int s;
1384
1385 s = splvm();
1386 if (simple_lock_try(&pp->pr_slock) == 0) {
1387 printf("pool %s is locked; try again later\n",
1388 pp->pr_wchan);
1389 splx(s);
1390 return;
1391 }
1392 pool_print1(pp, modif, printf);
1393 simple_unlock(&pp->pr_slock);
1394 splx(s);
1395 }
1396
1397 void
1398 pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1399 {
1400 int didlock = 0;
1401
1402 if (pp == NULL) {
1403 (*pr)("Must specify a pool to print.\n");
1404 return;
1405 }
1406
1407 /*
1408 * Called from DDB; interrupts should be blocked, and all
1409 * other processors should be paused. We can skip locking
1410 * the pool in this case.
1411 *
1412 * We do a simple_lock_try() just to print the lock
1413 * status, however.
1414 */
1415
1416 if (simple_lock_try(&pp->pr_slock) == 0)
1417 (*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
1418 else
1419 didlock = 1;
1420
1421 pool_print1(pp, modif, pr);
1422
1423 if (didlock)
1424 simple_unlock(&pp->pr_slock);
1425 }
1426
1427 static void
1428 pool_print_pagelist(struct pool_pagelist *pl, void (*pr)(const char *, ...))
1429 {
1430 struct pool_item_header *ph;
1431 #ifdef DIAGNOSTIC
1432 struct pool_item *pi;
1433 #endif
1434
1435 LIST_FOREACH(ph, pl, ph_pagelist) {
1436 (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
1437 ph->ph_page, ph->ph_nmissing,
1438 (u_long)ph->ph_time.tv_sec,
1439 (u_long)ph->ph_time.tv_usec);
1440 #ifdef DIAGNOSTIC
1441 TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1442 if (pi->pi_magic != PI_MAGIC) {
1443 (*pr)("\t\t\titem %p, magic 0x%x\n",
1444 pi, pi->pi_magic);
1445 }
1446 }
1447 #endif
1448 }
1449 }
1450
1451 static void
1452 pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1453 {
1454 struct pool_item_header *ph;
1455 struct pool_cache *pc;
1456 struct pool_cache_group *pcg;
1457 int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1458 char c;
1459
1460 while ((c = *modif++) != '\0') {
1461 if (c == 'l')
1462 print_log = 1;
1463 if (c == 'p')
1464 print_pagelist = 1;
1465 if (c == 'c')
1466 print_cache = 1;
1467 }
1468
1469 (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1470 pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1471 pp->pr_roflags);
1472 (*pr)("\talloc %p\n", pp->pr_alloc);
1473 (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1474 pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1475 (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1476 pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1477
1478 (*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
1479 pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1480 (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1481 pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1482
1483 if (print_pagelist == 0)
1484 goto skip_pagelist;
1485
1486 if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1487 (*pr)("\n\tempty page list:\n");
1488 pool_print_pagelist(&pp->pr_emptypages, pr);
1489 if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
1490 (*pr)("\n\tfull page list:\n");
1491 pool_print_pagelist(&pp->pr_fullpages, pr);
1492 if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
1493 (*pr)("\n\tpartial-page list:\n");
1494 pool_print_pagelist(&pp->pr_partpages, pr);
1495
1496 if (pp->pr_curpage == NULL)
1497 (*pr)("\tno current page\n");
1498 else
1499 (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1500
1501 skip_pagelist:
1502 if (print_log == 0)
1503 goto skip_log;
1504
1505 (*pr)("\n");
1506 if ((pp->pr_roflags & PR_LOGGING) == 0)
1507 (*pr)("\tno log\n");
1508 else
1509 pr_printlog(pp, NULL, pr);
1510
1511 skip_log:
1512 if (print_cache == 0)
1513 goto skip_cache;
1514
1515 TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) {
1516 (*pr)("\tcache %p: allocfrom %p freeto %p\n", pc,
1517 pc->pc_allocfrom, pc->pc_freeto);
1518 (*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n",
1519 pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
1520 TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1521 (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);
1522 for (i = 0; i < PCG_NOBJECTS; i++) {
1523 if (pcg->pcg_objects[i].pcgo_pa !=
1524 POOL_PADDR_INVALID) {
1525 (*pr)("\t\t\t%p, 0x%llx\n",
1526 pcg->pcg_objects[i].pcgo_va,
1527 (unsigned long long)
1528 pcg->pcg_objects[i].pcgo_pa);
1529 } else {
1530 (*pr)("\t\t\t%p\n",
1531 pcg->pcg_objects[i].pcgo_va);
1532 }
1533 }
1534 }
1535 }
1536
1537 skip_cache:
1538 pr_enter_check(pp, pr);
1539 }
1540
1541 static int
1542 pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
1543 {
1544 struct pool_item *pi;
1545 caddr_t page;
1546 int n;
1547
1548 page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask);
1549 if (page != ph->ph_page &&
1550 (pp->pr_roflags & PR_PHINPAGE) != 0) {
1551 if (label != NULL)
1552 printf("%s: ", label);
1553 printf("pool(%p:%s): page inconsistency: page %p;"
1554 " at page head addr %p (p %p)\n", pp,
1555 pp->pr_wchan, ph->ph_page,
1556 ph, page);
1557 return 1;
1558 }
1559
1560 for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
1561 pi != NULL;
1562 pi = TAILQ_NEXT(pi,pi_list), n++) {
1563
1564 #ifdef DIAGNOSTIC
1565 if (pi->pi_magic != PI_MAGIC) {
1566 if (label != NULL)
1567 printf("%s: ", label);
1568 printf("pool(%s): free list modified: magic=%x;"
1569 " page %p; item ordinal %d;"
1570 " addr %p (p %p)\n",
1571 pp->pr_wchan, pi->pi_magic, ph->ph_page,
1572 n, pi, page);
1573 panic("pool");
1574 }
1575 #endif
1576 page =
1577 (caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask);
1578 if (page == ph->ph_page)
1579 continue;
1580
1581 if (label != NULL)
1582 printf("%s: ", label);
1583 printf("pool(%p:%s): page inconsistency: page %p;"
1584 " item ordinal %d; addr %p (p %p)\n", pp,
1585 pp->pr_wchan, ph->ph_page,
1586 n, pi, page);
1587 return 1;
1588 }
1589 return 0;
1590 }
1591
1592
1593 int
1594 pool_chk(struct pool *pp, const char *label)
1595 {
1596 struct pool_item_header *ph;
1597 int r = 0;
1598
1599 simple_lock(&pp->pr_slock);
1600 LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
1601 r = pool_chk_page(pp, label, ph);
1602 if (r) {
1603 goto out;
1604 }
1605 }
1606 LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
1607 r = pool_chk_page(pp, label, ph);
1608 if (r) {
1609 goto out;
1610 }
1611 }
1612 LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
1613 r = pool_chk_page(pp, label, ph);
1614 if (r) {
1615 goto out;
1616 }
1617 }
1618
1619 out:
1620 simple_unlock(&pp->pr_slock);
1621 return (r);
1622 }
1623
1624 /*
1625 * pool_cache_init:
1626 *
1627 * Initialize a pool cache.
1628 *
1629 * NOTE: If the pool must be protected from interrupts, we expect
1630 * to be called at the appropriate interrupt priority level.
1631 */
1632 void
1633 pool_cache_init(struct pool_cache *pc, struct pool *pp,
1634 int (*ctor)(void *, void *, int),
1635 void (*dtor)(void *, void *),
1636 void *arg)
1637 {
1638
1639 TAILQ_INIT(&pc->pc_grouplist);
1640 simple_lock_init(&pc->pc_slock);
1641
1642 pc->pc_allocfrom = NULL;
1643 pc->pc_freeto = NULL;
1644 pc->pc_pool = pp;
1645
1646 pc->pc_ctor = ctor;
1647 pc->pc_dtor = dtor;
1648 pc->pc_arg = arg;
1649
1650 pc->pc_hits = 0;
1651 pc->pc_misses = 0;
1652
1653 pc->pc_ngroups = 0;
1654
1655 pc->pc_nitems = 0;
1656
1657 simple_lock(&pp->pr_slock);
1658 TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist);
1659 simple_unlock(&pp->pr_slock);
1660 }
1661
1662 /*
1663 * pool_cache_destroy:
1664 *
1665 * Destroy a pool cache.
1666 */
1667 void
1668 pool_cache_destroy(struct pool_cache *pc)
1669 {
1670 struct pool *pp = pc->pc_pool;
1671
1672 /* First, invalidate the entire cache. */
1673 pool_cache_invalidate(pc);
1674
1675 /* ...and remove it from the pool's cache list. */
1676 simple_lock(&pp->pr_slock);
1677 TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist);
1678 simple_unlock(&pp->pr_slock);
1679 }
1680
1681 static __inline void *
1682 pcg_get(struct pool_cache_group *pcg, paddr_t *pap)
1683 {
1684 void *object;
1685 u_int idx;
1686
1687 KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
1688 KASSERT(pcg->pcg_avail != 0);
1689 idx = --pcg->pcg_avail;
1690
1691 KASSERT(pcg->pcg_objects[idx].pcgo_va != NULL);
1692 object = pcg->pcg_objects[idx].pcgo_va;
1693 if (pap != NULL)
1694 *pap = pcg->pcg_objects[idx].pcgo_pa;
1695 pcg->pcg_objects[idx].pcgo_va = NULL;
1696
1697 return (object);
1698 }
1699
1700 static __inline void
1701 pcg_put(struct pool_cache_group *pcg, void *object, paddr_t pa)
1702 {
1703 u_int idx;
1704
1705 KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
1706 idx = pcg->pcg_avail++;
1707
1708 KASSERT(pcg->pcg_objects[idx].pcgo_va == NULL);
1709 pcg->pcg_objects[idx].pcgo_va = object;
1710 pcg->pcg_objects[idx].pcgo_pa = pa;
1711 }
1712
1713 /*
1714 * pool_cache_get{,_paddr}:
1715 *
1716 * Get an object from a pool cache (optionally returning
1717 * the physical address of the object).
1718 */
1719 void *
1720 pool_cache_get_paddr(struct pool_cache *pc, int flags, paddr_t *pap)
1721 {
1722 struct pool_cache_group *pcg;
1723 void *object;
1724
1725 #ifdef LOCKDEBUG
1726 if (flags & PR_WAITOK)
1727 simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)");
1728 #endif
1729
1730 simple_lock(&pc->pc_slock);
1731
1732 if ((pcg = pc->pc_allocfrom) == NULL) {
1733 TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1734 if (pcg->pcg_avail != 0) {
1735 pc->pc_allocfrom = pcg;
1736 goto have_group;
1737 }
1738 }
1739
1740 /*
1741 * No groups with any available objects. Allocate
1742 * a new object, construct it, and return it to
1743 * the caller. We will allocate a group, if necessary,
1744 * when the object is freed back to the cache.
1745 */
1746 pc->pc_misses++;
1747 simple_unlock(&pc->pc_slock);
1748 object = pool_get(pc->pc_pool, flags);
1749 if (object != NULL && pc->pc_ctor != NULL) {
1750 if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
1751 pool_put(pc->pc_pool, object);
1752 return (NULL);
1753 }
1754 }
1755 if (object != NULL && pap != NULL) {
1756 #ifdef POOL_VTOPHYS
1757 *pap = POOL_VTOPHYS(object);
1758 #else
1759 *pap = POOL_PADDR_INVALID;
1760 #endif
1761 }
1762 return (object);
1763 }
1764
1765 have_group:
1766 pc->pc_hits++;
1767 pc->pc_nitems--;
1768 object = pcg_get(pcg, pap);
1769
1770 if (pcg->pcg_avail == 0)
1771 pc->pc_allocfrom = NULL;
1772
1773 simple_unlock(&pc->pc_slock);
1774
1775 return (object);
1776 }
1777
1778 /*
1779 * pool_cache_put{,_paddr}:
1780 *
1781 * Put an object back to the pool cache (optionally caching the
1782 * physical address of the object).
1783 */
1784 void
1785 pool_cache_put_paddr(struct pool_cache *pc, void *object, paddr_t pa)
1786 {
1787 struct pool_cache_group *pcg;
1788 int s;
1789
1790 simple_lock(&pc->pc_slock);
1791
1792 if ((pcg = pc->pc_freeto) == NULL) {
1793 TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1794 if (pcg->pcg_avail != PCG_NOBJECTS) {
1795 pc->pc_freeto = pcg;
1796 goto have_group;
1797 }
1798 }
1799
1800 /*
1801 * No empty groups to free the object to. Attempt to
1802 * allocate one.
1803 */
1804 simple_unlock(&pc->pc_slock);
1805 s = splvm();
1806 pcg = pool_get(&pcgpool, PR_NOWAIT);
1807 splx(s);
1808 if (pcg != NULL) {
1809 memset(pcg, 0, sizeof(*pcg));
1810 simple_lock(&pc->pc_slock);
1811 pc->pc_ngroups++;
1812 TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list);
1813 if (pc->pc_freeto == NULL)
1814 pc->pc_freeto = pcg;
1815 goto have_group;
1816 }
1817
1818 /*
1819 * Unable to allocate a cache group; destruct the object
1820 * and free it back to the pool.
1821 */
1822 pool_cache_destruct_object(pc, object);
1823 return;
1824 }
1825
1826 have_group:
1827 pc->pc_nitems++;
1828 pcg_put(pcg, object, pa);
1829
1830 if (pcg->pcg_avail == PCG_NOBJECTS)
1831 pc->pc_freeto = NULL;
1832
1833 simple_unlock(&pc->pc_slock);
1834 }
1835
1836 /*
1837 * pool_cache_destruct_object:
1838 *
1839 * Force destruction of an object and its release back into
1840 * the pool.
1841 */
1842 void
1843 pool_cache_destruct_object(struct pool_cache *pc, void *object)
1844 {
1845
1846 if (pc->pc_dtor != NULL)
1847 (*pc->pc_dtor)(pc->pc_arg, object);
1848 pool_put(pc->pc_pool, object);
1849 }
1850
1851 /*
1852 * pool_cache_do_invalidate:
1853 *
1854 * This internal function implements pool_cache_invalidate() and
1855 * pool_cache_reclaim().
1856 */
1857 static void
1858 pool_cache_do_invalidate(struct pool_cache *pc, int free_groups,
1859 void (*putit)(struct pool *, void *))
1860 {
1861 struct pool_cache_group *pcg, *npcg;
1862 void *object;
1863 int s;
1864
1865 for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
1866 pcg = npcg) {
1867 npcg = TAILQ_NEXT(pcg, pcg_list);
1868 while (pcg->pcg_avail != 0) {
1869 pc->pc_nitems--;
1870 object = pcg_get(pcg, NULL);
1871 if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg)
1872 pc->pc_allocfrom = NULL;
1873 if (pc->pc_dtor != NULL)
1874 (*pc->pc_dtor)(pc->pc_arg, object);
1875 (*putit)(pc->pc_pool, object);
1876 }
1877 if (free_groups) {
1878 pc->pc_ngroups--;
1879 TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list);
1880 if (pc->pc_freeto == pcg)
1881 pc->pc_freeto = NULL;
1882 s = splvm();
1883 pool_put(&pcgpool, pcg);
1884 splx(s);
1885 }
1886 }
1887 }
1888
1889 /*
1890 * pool_cache_invalidate:
1891 *
1892 * Invalidate a pool cache (destruct and release all of the
1893 * cached objects).
1894 */
1895 void
1896 pool_cache_invalidate(struct pool_cache *pc)
1897 {
1898
1899 simple_lock(&pc->pc_slock);
1900 pool_cache_do_invalidate(pc, 0, pool_put);
1901 simple_unlock(&pc->pc_slock);
1902 }
1903
1904 /*
1905 * pool_cache_reclaim:
1906 *
1907 * Reclaim a pool cache for pool_reclaim().
1908 */
1909 static void
1910 pool_cache_reclaim(struct pool_cache *pc)
1911 {
1912
1913 simple_lock(&pc->pc_slock);
1914 pool_cache_do_invalidate(pc, 1, pool_do_put);
1915 simple_unlock(&pc->pc_slock);
1916 }
1917
1918 /*
1919 * Pool backend allocators.
1920 *
1921 * Each pool has a backend allocator that handles allocation, deallocation,
1922 * and any additional draining that might be needed.
1923 *
1924 * We provide two standard allocators:
1925 *
1926 * pool_allocator_kmem - the default when no allocator is specified
1927 *
1928 * pool_allocator_nointr - used for pools that will not be accessed
1929 * in interrupt context.
1930 */
1931 void *pool_page_alloc(struct pool *, int);
1932 void pool_page_free(struct pool *, void *);
1933
1934 struct pool_allocator pool_allocator_kmem = {
1935 pool_page_alloc, pool_page_free, 0,
1936 };
1937
1938 void *pool_page_alloc_nointr(struct pool *, int);
1939 void pool_page_free_nointr(struct pool *, void *);
1940
1941 struct pool_allocator pool_allocator_nointr = {
1942 pool_page_alloc_nointr, pool_page_free_nointr, 0,
1943 };
1944
1945 #ifdef POOL_SUBPAGE
1946 void *pool_subpage_alloc(struct pool *, int);
1947 void pool_subpage_free(struct pool *, void *);
1948
1949 struct pool_allocator pool_allocator_kmem_subpage = {
1950 pool_subpage_alloc, pool_subpage_free, 0,
1951 };
1952 #endif /* POOL_SUBPAGE */
1953
1954 /*
1955 * We have at least three different resources for the same allocation and
1956 * each resource can be depleted. First, we have the ready elements in the
1957 * pool. Then we have the resource (typically a vm_map) for this allocator.
1958 * Finally, we have physical memory. Waiting for any of these can be
1959 * unnecessary when any other is freed, but the kernel doesn't support
1960 * sleeping on multiple wait channels, so we have to employ another strategy.
1961 *
1962 * The caller sleeps on the pool (so that it can be awakened when an item
1963 * is returned to the pool), but we set PA_WANT on the allocator. When a
1964 * page is returned to the allocator and PA_WANT is set, pool_allocator_free
1965 * will wake up all sleeping pools belonging to this allocator.
1966 *
1967 * XXX Thundering herd.
1968 */
1969 void *
1970 pool_allocator_alloc(struct pool *org, int flags)
1971 {
1972 struct pool_allocator *pa = org->pr_alloc;
1973 struct pool *pp, *start;
1974 int s, freed;
1975 void *res;
1976
1977 LOCK_ASSERT(!simple_lock_held(&org->pr_slock));
1978
1979 do {
1980 if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
1981 return (res);
1982 if ((flags & PR_WAITOK) == 0) {
1983 /*
1984 * We only run the drain hookhere if PR_NOWAIT.
1985 * In other cases, the hook will be run in
1986 * pool_reclaim().
1987 */
1988 if (org->pr_drain_hook != NULL) {
1989 (*org->pr_drain_hook)(org->pr_drain_hook_arg,
1990 flags);
1991 if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
1992 return (res);
1993 }
1994 break;
1995 }
1996
1997 /*
1998 * Drain all pools, except "org", that use this
1999 * allocator. We do this to reclaim VA space.
2000 * pa_alloc is responsible for waiting for
2001 * physical memory.
2002 *
2003 * XXX We risk looping forever if start if someone
2004 * calls pool_destroy on "start". But there is no
2005 * other way to have potentially sleeping pool_reclaim,
2006 * non-sleeping locks on pool_allocator, and some
2007 * stirring of drained pools in the allocator.
2008 *
2009 * XXX Maybe we should use pool_head_slock for locking
2010 * the allocators?
2011 */
2012 freed = 0;
2013
2014 s = splvm();
2015 simple_lock(&pa->pa_slock);
2016 pp = start = TAILQ_FIRST(&pa->pa_list);
2017 do {
2018 TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list);
2019 TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list);
2020 if (pp == org)
2021 continue;
2022 simple_unlock(&pa->pa_slock);
2023 freed = pool_reclaim(pp);
2024 simple_lock(&pa->pa_slock);
2025 } while ((pp = TAILQ_FIRST(&pa->pa_list)) != start &&
2026 freed == 0);
2027
2028 if (freed == 0) {
2029 /*
2030 * We set PA_WANT here, the caller will most likely
2031 * sleep waiting for pages (if not, this won't hurt
2032 * that much), and there is no way to set this in
2033 * the caller without violating locking order.
2034 */
2035 pa->pa_flags |= PA_WANT;
2036 }
2037 simple_unlock(&pa->pa_slock);
2038 splx(s);
2039 } while (freed);
2040 return (NULL);
2041 }
2042
2043 void
2044 pool_allocator_free(struct pool *pp, void *v)
2045 {
2046 struct pool_allocator *pa = pp->pr_alloc;
2047 int s;
2048
2049 LOCK_ASSERT(!simple_lock_held(&pp->pr_slock));
2050
2051 (*pa->pa_free)(pp, v);
2052
2053 s = splvm();
2054 simple_lock(&pa->pa_slock);
2055 if ((pa->pa_flags & PA_WANT) == 0) {
2056 simple_unlock(&pa->pa_slock);
2057 splx(s);
2058 return;
2059 }
2060
2061 TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
2062 simple_lock(&pp->pr_slock);
2063 if ((pp->pr_flags & PR_WANTED) != 0) {
2064 pp->pr_flags &= ~PR_WANTED;
2065 wakeup(pp);
2066 }
2067 simple_unlock(&pp->pr_slock);
2068 }
2069 pa->pa_flags &= ~PA_WANT;
2070 simple_unlock(&pa->pa_slock);
2071 splx(s);
2072 }
2073
2074 void *
2075 pool_page_alloc(struct pool *pp, int flags)
2076 {
2077 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2078
2079 return ((void *) uvm_km_alloc_poolpage(waitok));
2080 }
2081
2082 void
2083 pool_page_free(struct pool *pp, void *v)
2084 {
2085
2086 uvm_km_free_poolpage((vaddr_t) v);
2087 }
2088
2089 #ifdef POOL_SUBPAGE
2090 /* Sub-page allocator, for machines with large hardware pages. */
2091 void *
2092 pool_subpage_alloc(struct pool *pp, int flags)
2093 {
2094
2095 return (pool_get(&psppool, flags));
2096 }
2097
2098 void
2099 pool_subpage_free(struct pool *pp, void *v)
2100 {
2101
2102 pool_put(&psppool, v);
2103 }
2104
2105 /* We don't provide a real nointr allocator. Maybe later. */
2106 void *
2107 pool_page_alloc_nointr(struct pool *pp, int flags)
2108 {
2109
2110 return (pool_subpage_alloc(pp, flags));
2111 }
2112
2113 void
2114 pool_page_free_nointr(struct pool *pp, void *v)
2115 {
2116
2117 pool_subpage_free(pp, v);
2118 }
2119 #else
2120 void *
2121 pool_page_alloc_nointr(struct pool *pp, int flags)
2122 {
2123 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2124
2125 return ((void *) uvm_km_alloc_poolpage1(kernel_map,
2126 uvm.kernel_object, waitok));
2127 }
2128
2129 void
2130 pool_page_free_nointr(struct pool *pp, void *v)
2131 {
2132
2133 uvm_km_free_poolpage1(kernel_map, (vaddr_t) v);
2134 }
2135 #endif /* POOL_SUBPAGE */
2136