subr_pool.c revision 1.98 1 /* $NetBSD: subr_pool.c,v 1.98 2005/01/01 21:08:02 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.98 2005/01/01 21:08:02 yamt Exp $");
42
43 #include "opt_pool.h"
44 #include "opt_poollog.h"
45 #include "opt_lockdebug.h"
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/proc.h>
50 #include <sys/errno.h>
51 #include <sys/kernel.h>
52 #include <sys/malloc.h>
53 #include <sys/lock.h>
54 #include <sys/pool.h>
55 #include <sys/syslog.h>
56
57 #include <uvm/uvm.h>
58
59 /*
60 * Pool resource management utility.
61 *
62 * Memory is allocated in pages which are split into pieces according to
63 * the pool item size. Each page is kept on one of three lists in the
64 * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
65 * for empty, full and partially-full pages respectively. The individual
66 * pool items are on a linked list headed by `ph_itemlist' in each page
67 * header. The memory for building the page list is either taken from
68 * the allocated pages themselves (for small pool items) or taken from
69 * an internal pool of page headers (`phpool').
70 */
71
72 /* List of all pools */
73 TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
74
75 /* Private pool for page header structures */
76 #define PHPOOL_MAX 8
77 static struct pool phpool[PHPOOL_MAX];
78 #define PHPOOL_FREELIST_NELEM(idx) (((idx) == 0) ? 0 : (1 << (idx)))
79
80 #ifdef POOL_SUBPAGE
81 /* Pool of subpages for use by normal pools. */
82 static struct pool psppool;
83 #endif
84
85 static void *pool_page_alloc_meta(struct pool *, int);
86 static void pool_page_free_meta(struct pool *, void *);
87
88 /* allocator for pool metadata */
89 static struct pool_allocator pool_allocator_meta = {
90 pool_page_alloc_meta, pool_page_free_meta
91 };
92
93 /* # of seconds to retain page after last use */
94 int pool_inactive_time = 10;
95
96 /* Next candidate for drainage (see pool_drain()) */
97 static struct pool *drainpp;
98
99 /* This spin lock protects both pool_head and drainpp. */
100 struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
101
102 struct pool_item_header {
103 /* Page headers */
104 LIST_ENTRY(pool_item_header)
105 ph_pagelist; /* pool page list */
106 SPLAY_ENTRY(pool_item_header)
107 ph_node; /* Off-page page headers */
108 caddr_t ph_page; /* this page's address */
109 struct timeval ph_time; /* last referenced */
110 union {
111 /* !PR_NOTOUCH */
112 struct {
113 TAILQ_HEAD(, pool_item)
114 phu_itemlist; /* chunk list for this page */
115 } phu_normal;
116 /* PR_NOTOUCH */
117 struct {
118 uint16_t
119 phu_off; /* start offset in page */
120 uint16_t
121 phu_firstfree; /* first free item */
122 } phu_notouch;
123 } ph_u;
124 uint16_t ph_nmissing; /* # of chunks in use */
125 };
126 #define ph_itemlist ph_u.phu_normal.phu_itemlist
127 #define ph_off ph_u.phu_notouch.phu_off
128 #define ph_firstfree ph_u.phu_notouch.phu_firstfree
129
130 struct pool_item {
131 #ifdef DIAGNOSTIC
132 u_int pi_magic;
133 #endif
134 #define PI_MAGIC 0xdeadbeefU
135 /* Other entries use only this list entry */
136 TAILQ_ENTRY(pool_item) pi_list;
137 };
138
139 #define POOL_NEEDS_CATCHUP(pp) \
140 ((pp)->pr_nitems < (pp)->pr_minitems)
141
142 /*
143 * Pool cache management.
144 *
145 * Pool caches provide a way for constructed objects to be cached by the
146 * pool subsystem. This can lead to performance improvements by avoiding
147 * needless object construction/destruction; it is deferred until absolutely
148 * necessary.
149 *
150 * Caches are grouped into cache groups. Each cache group references
151 * up to 16 constructed objects. When a cache allocates an object
152 * from the pool, it calls the object's constructor and places it into
153 * a cache group. When a cache group frees an object back to the pool,
154 * it first calls the object's destructor. This allows the object to
155 * persist in constructed form while freed to the cache.
156 *
157 * Multiple caches may exist for each pool. This allows a single
158 * object type to have multiple constructed forms. The pool references
159 * each cache, so that when a pool is drained by the pagedaemon, it can
160 * drain each individual cache as well. Each time a cache is drained,
161 * the most idle cache group is freed to the pool in its entirety.
162 *
163 * Pool caches are layed on top of pools. By layering them, we can avoid
164 * the complexity of cache management for pools which would not benefit
165 * from it.
166 */
167
168 /* The cache group pool. */
169 static struct pool pcgpool;
170
171 static void pool_cache_reclaim(struct pool_cache *);
172
173 static int pool_catchup(struct pool *);
174 static void pool_prime_page(struct pool *, caddr_t,
175 struct pool_item_header *);
176 static void pool_update_curpage(struct pool *);
177
178 void *pool_allocator_alloc(struct pool *, int);
179 void pool_allocator_free(struct pool *, void *);
180
181 static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
182 void (*)(const char *, ...));
183 static void pool_print1(struct pool *, const char *,
184 void (*)(const char *, ...));
185
186 static int pool_chk_page(struct pool *, const char *,
187 struct pool_item_header *);
188
189 /*
190 * Pool log entry. An array of these is allocated in pool_init().
191 */
192 struct pool_log {
193 const char *pl_file;
194 long pl_line;
195 int pl_action;
196 #define PRLOG_GET 1
197 #define PRLOG_PUT 2
198 void *pl_addr;
199 };
200
201 #ifdef POOL_DIAGNOSTIC
202 /* Number of entries in pool log buffers */
203 #ifndef POOL_LOGSIZE
204 #define POOL_LOGSIZE 10
205 #endif
206
207 int pool_logsize = POOL_LOGSIZE;
208
209 static __inline void
210 pr_log(struct pool *pp, void *v, int action, const char *file, long line)
211 {
212 int n = pp->pr_curlogentry;
213 struct pool_log *pl;
214
215 if ((pp->pr_roflags & PR_LOGGING) == 0)
216 return;
217
218 /*
219 * Fill in the current entry. Wrap around and overwrite
220 * the oldest entry if necessary.
221 */
222 pl = &pp->pr_log[n];
223 pl->pl_file = file;
224 pl->pl_line = line;
225 pl->pl_action = action;
226 pl->pl_addr = v;
227 if (++n >= pp->pr_logsize)
228 n = 0;
229 pp->pr_curlogentry = n;
230 }
231
232 static void
233 pr_printlog(struct pool *pp, struct pool_item *pi,
234 void (*pr)(const char *, ...))
235 {
236 int i = pp->pr_logsize;
237 int n = pp->pr_curlogentry;
238
239 if ((pp->pr_roflags & PR_LOGGING) == 0)
240 return;
241
242 /*
243 * Print all entries in this pool's log.
244 */
245 while (i-- > 0) {
246 struct pool_log *pl = &pp->pr_log[n];
247 if (pl->pl_action != 0) {
248 if (pi == NULL || pi == pl->pl_addr) {
249 (*pr)("\tlog entry %d:\n", i);
250 (*pr)("\t\taction = %s, addr = %p\n",
251 pl->pl_action == PRLOG_GET ? "get" : "put",
252 pl->pl_addr);
253 (*pr)("\t\tfile: %s at line %lu\n",
254 pl->pl_file, pl->pl_line);
255 }
256 }
257 if (++n >= pp->pr_logsize)
258 n = 0;
259 }
260 }
261
262 static __inline void
263 pr_enter(struct pool *pp, const char *file, long line)
264 {
265
266 if (__predict_false(pp->pr_entered_file != NULL)) {
267 printf("pool %s: reentrancy at file %s line %ld\n",
268 pp->pr_wchan, file, line);
269 printf(" previous entry at file %s line %ld\n",
270 pp->pr_entered_file, pp->pr_entered_line);
271 panic("pr_enter");
272 }
273
274 pp->pr_entered_file = file;
275 pp->pr_entered_line = line;
276 }
277
278 static __inline void
279 pr_leave(struct pool *pp)
280 {
281
282 if (__predict_false(pp->pr_entered_file == NULL)) {
283 printf("pool %s not entered?\n", pp->pr_wchan);
284 panic("pr_leave");
285 }
286
287 pp->pr_entered_file = NULL;
288 pp->pr_entered_line = 0;
289 }
290
291 static __inline void
292 pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
293 {
294
295 if (pp->pr_entered_file != NULL)
296 (*pr)("\n\tcurrently entered from file %s line %ld\n",
297 pp->pr_entered_file, pp->pr_entered_line);
298 }
299 #else
300 #define pr_log(pp, v, action, file, line)
301 #define pr_printlog(pp, pi, pr)
302 #define pr_enter(pp, file, line)
303 #define pr_leave(pp)
304 #define pr_enter_check(pp, pr)
305 #endif /* POOL_DIAGNOSTIC */
306
307 static __inline int
308 pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph,
309 const void *v)
310 {
311 const char *cp = v;
312 int idx;
313
314 KASSERT(pp->pr_roflags & PR_NOTOUCH);
315 idx = (cp - ph->ph_page - ph->ph_off) / pp->pr_size;
316 KASSERT(idx < pp->pr_itemsperpage);
317 return idx;
318 }
319
320 #define PR_FREELIST_ALIGN(p) roundup((uintptr_t)(p), sizeof(uint16_t))
321 #define PR_FREELIST(ph) ((uint16_t *)PR_FREELIST_ALIGN((ph) + 1))
322 #define PR_INDEX_USED ((uint16_t)-1)
323 #define PR_INDEX_EOL ((uint16_t)-2)
324
325 static __inline void
326 pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph,
327 void *obj)
328 {
329 int idx = pr_item_notouch_index(pp, ph, obj);
330 uint16_t *freelist = PR_FREELIST(ph);
331
332 KASSERT(freelist[idx] == PR_INDEX_USED);
333 freelist[idx] = ph->ph_firstfree;
334 ph->ph_firstfree = idx;
335 }
336
337 static __inline void *
338 pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph)
339 {
340 int idx = ph->ph_firstfree;
341 uint16_t *freelist = PR_FREELIST(ph);
342
343 KASSERT(freelist[idx] != PR_INDEX_USED);
344 ph->ph_firstfree = freelist[idx];
345 freelist[idx] = PR_INDEX_USED;
346
347 return ph->ph_page + ph->ph_off + idx * pp->pr_size;
348 }
349
350 static __inline int
351 phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
352 {
353 if (a->ph_page < b->ph_page)
354 return (-1);
355 else if (a->ph_page > b->ph_page)
356 return (1);
357 else
358 return (0);
359 }
360
361 SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
362 SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
363
364 /*
365 * Return the pool page header based on page address.
366 */
367 static __inline struct pool_item_header *
368 pr_find_pagehead(struct pool *pp, caddr_t page)
369 {
370 struct pool_item_header *ph, tmp;
371
372 if ((pp->pr_roflags & PR_PHINPAGE) != 0)
373 return ((struct pool_item_header *)(page + pp->pr_phoffset));
374
375 tmp.ph_page = page;
376 ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
377 return ph;
378 }
379
380 /*
381 * Remove a page from the pool.
382 */
383 static __inline void
384 pr_rmpage(struct pool *pp, struct pool_item_header *ph,
385 struct pool_pagelist *pq)
386 {
387 int s;
388
389 LOCK_ASSERT(!simple_lock_held(&pp->pr_slock) || pq != NULL);
390
391 /*
392 * If the page was idle, decrement the idle page count.
393 */
394 if (ph->ph_nmissing == 0) {
395 #ifdef DIAGNOSTIC
396 if (pp->pr_nidle == 0)
397 panic("pr_rmpage: nidle inconsistent");
398 if (pp->pr_nitems < pp->pr_itemsperpage)
399 panic("pr_rmpage: nitems inconsistent");
400 #endif
401 pp->pr_nidle--;
402 }
403
404 pp->pr_nitems -= pp->pr_itemsperpage;
405
406 /*
407 * Unlink a page from the pool and release it (or queue it for release).
408 */
409 LIST_REMOVE(ph, ph_pagelist);
410 if ((pp->pr_roflags & PR_PHINPAGE) == 0)
411 SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
412 if (pq) {
413 LIST_INSERT_HEAD(pq, ph, ph_pagelist);
414 } else {
415 pool_allocator_free(pp, ph->ph_page);
416 if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
417 s = splvm();
418 pool_put(pp->pr_phpool, ph);
419 splx(s);
420 }
421 }
422 pp->pr_npages--;
423 pp->pr_npagefree++;
424
425 pool_update_curpage(pp);
426 }
427
428 /*
429 * Initialize all the pools listed in the "pools" link set.
430 */
431 void
432 link_pool_init(void)
433 {
434 __link_set_decl(pools, struct link_pool_init);
435 struct link_pool_init * const *pi;
436
437 __link_set_foreach(pi, pools)
438 pool_init((*pi)->pp, (*pi)->size, (*pi)->align,
439 (*pi)->align_offset, (*pi)->flags, (*pi)->wchan,
440 (*pi)->palloc);
441 }
442
443 /*
444 * Initialize the given pool resource structure.
445 *
446 * We export this routine to allow other kernel parts to declare
447 * static pools that must be initialized before malloc() is available.
448 */
449 void
450 pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
451 const char *wchan, struct pool_allocator *palloc)
452 {
453 int off, slack;
454 size_t trysize, phsize;
455 int s;
456
457 #ifdef POOL_DIAGNOSTIC
458 /*
459 * Always log if POOL_DIAGNOSTIC is defined.
460 */
461 if (pool_logsize != 0)
462 flags |= PR_LOGGING;
463 #endif
464
465 #ifdef POOL_SUBPAGE
466 /*
467 * XXX We don't provide a real `nointr' back-end
468 * yet; all sub-pages come from a kmem back-end.
469 * maybe some day...
470 */
471 if (palloc == NULL) {
472 extern struct pool_allocator pool_allocator_kmem_subpage;
473 palloc = &pool_allocator_kmem_subpage;
474 }
475 /*
476 * We'll assume any user-specified back-end allocator
477 * will deal with sub-pages, or simply don't care.
478 */
479 #else
480 if (palloc == NULL)
481 palloc = &pool_allocator_kmem;
482 #endif /* POOL_SUBPAGE */
483 if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
484 if (palloc->pa_pagesz == 0) {
485 #ifdef POOL_SUBPAGE
486 if (palloc == &pool_allocator_kmem)
487 palloc->pa_pagesz = PAGE_SIZE;
488 else
489 palloc->pa_pagesz = POOL_SUBPAGE;
490 #else
491 palloc->pa_pagesz = PAGE_SIZE;
492 #endif /* POOL_SUBPAGE */
493 }
494
495 TAILQ_INIT(&palloc->pa_list);
496
497 simple_lock_init(&palloc->pa_slock);
498 palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
499 palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
500 palloc->pa_flags |= PA_INITIALIZED;
501 }
502
503 if (align == 0)
504 align = ALIGN(1);
505
506 if (size < sizeof(struct pool_item))
507 size = sizeof(struct pool_item);
508
509 size = roundup(size, align);
510 #ifdef DIAGNOSTIC
511 if (size > palloc->pa_pagesz)
512 panic("pool_init: pool item size (%lu) too large",
513 (u_long)size);
514 #endif
515
516 /*
517 * Initialize the pool structure.
518 */
519 LIST_INIT(&pp->pr_emptypages);
520 LIST_INIT(&pp->pr_fullpages);
521 LIST_INIT(&pp->pr_partpages);
522 TAILQ_INIT(&pp->pr_cachelist);
523 pp->pr_curpage = NULL;
524 pp->pr_npages = 0;
525 pp->pr_minitems = 0;
526 pp->pr_minpages = 0;
527 pp->pr_maxpages = UINT_MAX;
528 pp->pr_roflags = flags;
529 pp->pr_flags = 0;
530 pp->pr_size = size;
531 pp->pr_align = align;
532 pp->pr_wchan = wchan;
533 pp->pr_alloc = palloc;
534 pp->pr_nitems = 0;
535 pp->pr_nout = 0;
536 pp->pr_hardlimit = UINT_MAX;
537 pp->pr_hardlimit_warning = NULL;
538 pp->pr_hardlimit_ratecap.tv_sec = 0;
539 pp->pr_hardlimit_ratecap.tv_usec = 0;
540 pp->pr_hardlimit_warning_last.tv_sec = 0;
541 pp->pr_hardlimit_warning_last.tv_usec = 0;
542 pp->pr_drain_hook = NULL;
543 pp->pr_drain_hook_arg = NULL;
544
545 /*
546 * Decide whether to put the page header off page to avoid
547 * wasting too large a part of the page or too big item.
548 * Off-page page headers go on a hash table, so we can match
549 * a returned item with its header based on the page address.
550 * We use 1/16 of the page size and about 8 times of the item
551 * size as the threshold (XXX: tune)
552 *
553 * However, we'll put the header into the page if we can put
554 * it without wasting any items.
555 *
556 * Silently enforce `0 <= ioff < align'.
557 */
558 pp->pr_itemoffset = ioff %= align;
559 /* See the comment below about reserved bytes. */
560 trysize = palloc->pa_pagesz - ((align - ioff) % align);
561 phsize = ALIGN(sizeof(struct pool_item_header));
562 if ((pp->pr_roflags & PR_NOTOUCH) == 0 &&
563 (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
564 trysize / pp->pr_size == (trysize - phsize) / pp->pr_size)) {
565 /* Use the end of the page for the page header */
566 pp->pr_roflags |= PR_PHINPAGE;
567 pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
568 } else {
569 /* The page header will be taken from our page header pool */
570 pp->pr_phoffset = 0;
571 off = palloc->pa_pagesz;
572 SPLAY_INIT(&pp->pr_phtree);
573 }
574
575 /*
576 * Alignment is to take place at `ioff' within the item. This means
577 * we must reserve up to `align - 1' bytes on the page to allow
578 * appropriate positioning of each item.
579 */
580 pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
581 KASSERT(pp->pr_itemsperpage != 0);
582 if ((pp->pr_roflags & PR_NOTOUCH)) {
583 int idx;
584
585 for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
586 idx++) {
587 /* nothing */
588 }
589 if (idx >= PHPOOL_MAX) {
590 /*
591 * if you see this panic, consider to tweak
592 * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
593 */
594 panic("%s: too large itemsperpage(%d) for PR_NOTOUCH",
595 pp->pr_wchan, pp->pr_itemsperpage);
596 }
597 pp->pr_phpool = &phpool[idx];
598 } else if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
599 pp->pr_phpool = &phpool[0];
600 }
601 #if defined(DIAGNOSTIC)
602 else {
603 pp->pr_phpool = NULL;
604 }
605 #endif
606
607 /*
608 * Use the slack between the chunks and the page header
609 * for "cache coloring".
610 */
611 slack = off - pp->pr_itemsperpage * pp->pr_size;
612 pp->pr_maxcolor = (slack / align) * align;
613 pp->pr_curcolor = 0;
614
615 pp->pr_nget = 0;
616 pp->pr_nfail = 0;
617 pp->pr_nput = 0;
618 pp->pr_npagealloc = 0;
619 pp->pr_npagefree = 0;
620 pp->pr_hiwat = 0;
621 pp->pr_nidle = 0;
622
623 #ifdef POOL_DIAGNOSTIC
624 if (flags & PR_LOGGING) {
625 if (kmem_map == NULL ||
626 (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
627 M_TEMP, M_NOWAIT)) == NULL)
628 pp->pr_roflags &= ~PR_LOGGING;
629 pp->pr_curlogentry = 0;
630 pp->pr_logsize = pool_logsize;
631 }
632 #endif
633
634 pp->pr_entered_file = NULL;
635 pp->pr_entered_line = 0;
636
637 simple_lock_init(&pp->pr_slock);
638
639 /*
640 * Initialize private page header pool and cache magazine pool if we
641 * haven't done so yet.
642 * XXX LOCKING.
643 */
644 if (phpool[0].pr_size == 0) {
645 int idx;
646 for (idx = 0; idx < PHPOOL_MAX; idx++) {
647 static char phpool_names[PHPOOL_MAX][6+1+6+1];
648 int nelem;
649 size_t sz;
650
651 nelem = PHPOOL_FREELIST_NELEM(idx);
652 snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
653 "phpool-%d", nelem);
654 sz = sizeof(struct pool_item_header);
655 if (nelem) {
656 sz = PR_FREELIST_ALIGN(sz)
657 + nelem * sizeof(uint16_t);
658 }
659 pool_init(&phpool[idx], sz, 0, 0, 0,
660 phpool_names[idx], &pool_allocator_meta);
661 }
662 #ifdef POOL_SUBPAGE
663 pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
664 PR_RECURSIVE, "psppool", &pool_allocator_meta);
665 #endif
666 pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
667 0, "pcgpool", &pool_allocator_meta);
668 }
669
670 /* Insert into the list of all pools. */
671 simple_lock(&pool_head_slock);
672 TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
673 simple_unlock(&pool_head_slock);
674
675 /* Insert this into the list of pools using this allocator. */
676 s = splvm();
677 simple_lock(&palloc->pa_slock);
678 TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
679 simple_unlock(&palloc->pa_slock);
680 splx(s);
681 }
682
683 /*
684 * De-commision a pool resource.
685 */
686 void
687 pool_destroy(struct pool *pp)
688 {
689 struct pool_item_header *ph;
690 struct pool_cache *pc;
691 int s;
692
693 /* Locking order: pool_allocator -> pool */
694 s = splvm();
695 simple_lock(&pp->pr_alloc->pa_slock);
696 TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
697 simple_unlock(&pp->pr_alloc->pa_slock);
698 splx(s);
699
700 /* Destroy all caches for this pool. */
701 while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL)
702 pool_cache_destroy(pc);
703
704 #ifdef DIAGNOSTIC
705 if (pp->pr_nout != 0) {
706 pr_printlog(pp, NULL, printf);
707 panic("pool_destroy: pool busy: still out: %u",
708 pp->pr_nout);
709 }
710 #endif
711
712 /* Remove all pages */
713 while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
714 pr_rmpage(pp, ph, NULL);
715 KASSERT(LIST_EMPTY(&pp->pr_fullpages));
716 KASSERT(LIST_EMPTY(&pp->pr_partpages));
717
718 /* Remove from global pool list */
719 simple_lock(&pool_head_slock);
720 TAILQ_REMOVE(&pool_head, pp, pr_poollist);
721 if (drainpp == pp) {
722 drainpp = NULL;
723 }
724 simple_unlock(&pool_head_slock);
725
726 #ifdef POOL_DIAGNOSTIC
727 if ((pp->pr_roflags & PR_LOGGING) != 0)
728 free(pp->pr_log, M_TEMP);
729 #endif
730 }
731
732 void
733 pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
734 {
735
736 /* XXX no locking -- must be used just after pool_init() */
737 #ifdef DIAGNOSTIC
738 if (pp->pr_drain_hook != NULL)
739 panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
740 #endif
741 pp->pr_drain_hook = fn;
742 pp->pr_drain_hook_arg = arg;
743 }
744
745 static struct pool_item_header *
746 pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags)
747 {
748 struct pool_item_header *ph;
749 int s;
750
751 LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0);
752
753 if ((pp->pr_roflags & PR_PHINPAGE) != 0)
754 ph = (struct pool_item_header *) (storage + pp->pr_phoffset);
755 else {
756 s = splvm();
757 ph = pool_get(pp->pr_phpool, flags);
758 splx(s);
759 }
760
761 return (ph);
762 }
763
764 /*
765 * Grab an item from the pool; must be called at appropriate spl level
766 */
767 void *
768 #ifdef POOL_DIAGNOSTIC
769 _pool_get(struct pool *pp, int flags, const char *file, long line)
770 #else
771 pool_get(struct pool *pp, int flags)
772 #endif
773 {
774 struct pool_item *pi;
775 struct pool_item_header *ph;
776 void *v;
777
778 #ifdef DIAGNOSTIC
779 if (__predict_false(pp->pr_itemsperpage == 0))
780 panic("pool_get: pool %p: pr_itemsperpage is zero, "
781 "pool not initialized?", pp);
782 if (__predict_false(curlwp == NULL && doing_shutdown == 0 &&
783 (flags & PR_WAITOK) != 0))
784 panic("pool_get: %s: must have NOWAIT", pp->pr_wchan);
785
786 #ifdef LOCKDEBUG
787 if (flags & PR_WAITOK)
788 simple_lock_only_held(NULL, "pool_get(PR_WAITOK)");
789 #endif
790 #endif /* DIAGNOSTIC */
791
792 simple_lock(&pp->pr_slock);
793 pr_enter(pp, file, line);
794
795 startover:
796 /*
797 * Check to see if we've reached the hard limit. If we have,
798 * and we can wait, then wait until an item has been returned to
799 * the pool.
800 */
801 #ifdef DIAGNOSTIC
802 if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
803 pr_leave(pp);
804 simple_unlock(&pp->pr_slock);
805 panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
806 }
807 #endif
808 if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
809 if (pp->pr_drain_hook != NULL) {
810 /*
811 * Since the drain hook is going to free things
812 * back to the pool, unlock, call the hook, re-lock,
813 * and check the hardlimit condition again.
814 */
815 pr_leave(pp);
816 simple_unlock(&pp->pr_slock);
817 (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
818 simple_lock(&pp->pr_slock);
819 pr_enter(pp, file, line);
820 if (pp->pr_nout < pp->pr_hardlimit)
821 goto startover;
822 }
823
824 if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
825 /*
826 * XXX: A warning isn't logged in this case. Should
827 * it be?
828 */
829 pp->pr_flags |= PR_WANTED;
830 pr_leave(pp);
831 ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
832 pr_enter(pp, file, line);
833 goto startover;
834 }
835
836 /*
837 * Log a message that the hard limit has been hit.
838 */
839 if (pp->pr_hardlimit_warning != NULL &&
840 ratecheck(&pp->pr_hardlimit_warning_last,
841 &pp->pr_hardlimit_ratecap))
842 log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
843
844 pp->pr_nfail++;
845
846 pr_leave(pp);
847 simple_unlock(&pp->pr_slock);
848 return (NULL);
849 }
850
851 /*
852 * The convention we use is that if `curpage' is not NULL, then
853 * it points at a non-empty bucket. In particular, `curpage'
854 * never points at a page header which has PR_PHINPAGE set and
855 * has no items in its bucket.
856 */
857 if ((ph = pp->pr_curpage) == NULL) {
858 #ifdef DIAGNOSTIC
859 if (pp->pr_nitems != 0) {
860 simple_unlock(&pp->pr_slock);
861 printf("pool_get: %s: curpage NULL, nitems %u\n",
862 pp->pr_wchan, pp->pr_nitems);
863 panic("pool_get: nitems inconsistent");
864 }
865 #endif
866
867 /*
868 * Call the back-end page allocator for more memory.
869 * Release the pool lock, as the back-end page allocator
870 * may block.
871 */
872 pr_leave(pp);
873 simple_unlock(&pp->pr_slock);
874 v = pool_allocator_alloc(pp, flags);
875 if (__predict_true(v != NULL))
876 ph = pool_alloc_item_header(pp, v, flags);
877
878 if (__predict_false(v == NULL || ph == NULL)) {
879 if (v != NULL)
880 pool_allocator_free(pp, v);
881
882 simple_lock(&pp->pr_slock);
883 pr_enter(pp, file, line);
884
885 /*
886 * We were unable to allocate a page or item
887 * header, but we released the lock during
888 * allocation, so perhaps items were freed
889 * back to the pool. Check for this case.
890 */
891 if (pp->pr_curpage != NULL)
892 goto startover;
893
894 if ((flags & PR_WAITOK) == 0) {
895 pp->pr_nfail++;
896 pr_leave(pp);
897 simple_unlock(&pp->pr_slock);
898 return (NULL);
899 }
900
901 /*
902 * Wait for items to be returned to this pool.
903 *
904 * XXX: maybe we should wake up once a second and
905 * try again?
906 */
907 pp->pr_flags |= PR_WANTED;
908 /* PA_WANTED is already set on the allocator. */
909 pr_leave(pp);
910 ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
911 pr_enter(pp, file, line);
912 goto startover;
913 }
914
915 /* We have more memory; add it to the pool */
916 simple_lock(&pp->pr_slock);
917 pr_enter(pp, file, line);
918 pool_prime_page(pp, v, ph);
919 pp->pr_npagealloc++;
920
921 /* Start the allocation process over. */
922 goto startover;
923 }
924 if (pp->pr_roflags & PR_NOTOUCH) {
925 #ifdef DIAGNOSTIC
926 if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) {
927 pr_leave(pp);
928 simple_unlock(&pp->pr_slock);
929 panic("pool_get: %s: page empty", pp->pr_wchan);
930 }
931 #endif
932 v = pr_item_notouch_get(pp, ph);
933 #ifdef POOL_DIAGNOSTIC
934 pr_log(pp, v, PRLOG_GET, file, line);
935 #endif
936 } else {
937 v = pi = TAILQ_FIRST(&ph->ph_itemlist);
938 if (__predict_false(v == NULL)) {
939 pr_leave(pp);
940 simple_unlock(&pp->pr_slock);
941 panic("pool_get: %s: page empty", pp->pr_wchan);
942 }
943 #ifdef DIAGNOSTIC
944 if (__predict_false(pp->pr_nitems == 0)) {
945 pr_leave(pp);
946 simple_unlock(&pp->pr_slock);
947 printf("pool_get: %s: items on itemlist, nitems %u\n",
948 pp->pr_wchan, pp->pr_nitems);
949 panic("pool_get: nitems inconsistent");
950 }
951 #endif
952
953 #ifdef POOL_DIAGNOSTIC
954 pr_log(pp, v, PRLOG_GET, file, line);
955 #endif
956
957 #ifdef DIAGNOSTIC
958 if (__predict_false(pi->pi_magic != PI_MAGIC)) {
959 pr_printlog(pp, pi, printf);
960 panic("pool_get(%s): free list modified: "
961 "magic=%x; page %p; item addr %p\n",
962 pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
963 }
964 #endif
965
966 /*
967 * Remove from item list.
968 */
969 TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
970 }
971 pp->pr_nitems--;
972 pp->pr_nout++;
973 if (ph->ph_nmissing == 0) {
974 #ifdef DIAGNOSTIC
975 if (__predict_false(pp->pr_nidle == 0))
976 panic("pool_get: nidle inconsistent");
977 #endif
978 pp->pr_nidle--;
979
980 /*
981 * This page was previously empty. Move it to the list of
982 * partially-full pages. This page is already curpage.
983 */
984 LIST_REMOVE(ph, ph_pagelist);
985 LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
986 }
987 ph->ph_nmissing++;
988 if (ph->ph_nmissing == pp->pr_itemsperpage) {
989 #ifdef DIAGNOSTIC
990 if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 &&
991 !TAILQ_EMPTY(&ph->ph_itemlist))) {
992 pr_leave(pp);
993 simple_unlock(&pp->pr_slock);
994 panic("pool_get: %s: nmissing inconsistent",
995 pp->pr_wchan);
996 }
997 #endif
998 /*
999 * This page is now full. Move it to the full list
1000 * and select a new current page.
1001 */
1002 LIST_REMOVE(ph, ph_pagelist);
1003 LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
1004 pool_update_curpage(pp);
1005 }
1006
1007 pp->pr_nget++;
1008
1009 /*
1010 * If we have a low water mark and we are now below that low
1011 * water mark, add more items to the pool.
1012 */
1013 if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1014 /*
1015 * XXX: Should we log a warning? Should we set up a timeout
1016 * to try again in a second or so? The latter could break
1017 * a caller's assumptions about interrupt protection, etc.
1018 */
1019 }
1020
1021 pr_leave(pp);
1022 simple_unlock(&pp->pr_slock);
1023 return (v);
1024 }
1025
1026 /*
1027 * Internal version of pool_put(). Pool is already locked/entered.
1028 */
1029 static void
1030 pool_do_put(struct pool *pp, void *v)
1031 {
1032 struct pool_item *pi = v;
1033 struct pool_item_header *ph;
1034 caddr_t page;
1035 int s;
1036
1037 LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
1038
1039 page = (caddr_t)((u_long)v & pp->pr_alloc->pa_pagemask);
1040
1041 #ifdef DIAGNOSTIC
1042 if (__predict_false(pp->pr_nout == 0)) {
1043 printf("pool %s: putting with none out\n",
1044 pp->pr_wchan);
1045 panic("pool_put");
1046 }
1047 #endif
1048
1049 if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {
1050 pr_printlog(pp, NULL, printf);
1051 panic("pool_put: %s: page header missing", pp->pr_wchan);
1052 }
1053
1054 #ifdef LOCKDEBUG
1055 /*
1056 * Check if we're freeing a locked simple lock.
1057 */
1058 simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
1059 #endif
1060
1061 /*
1062 * Return to item list.
1063 */
1064 if (pp->pr_roflags & PR_NOTOUCH) {
1065 pr_item_notouch_put(pp, ph, v);
1066 } else {
1067 #ifdef DIAGNOSTIC
1068 pi->pi_magic = PI_MAGIC;
1069 #endif
1070 #ifdef DEBUG
1071 {
1072 int i, *ip = v;
1073
1074 for (i = 0; i < pp->pr_size / sizeof(int); i++) {
1075 *ip++ = PI_MAGIC;
1076 }
1077 }
1078 #endif
1079
1080 TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1081 }
1082 KDASSERT(ph->ph_nmissing != 0);
1083 ph->ph_nmissing--;
1084 pp->pr_nput++;
1085 pp->pr_nitems++;
1086 pp->pr_nout--;
1087
1088 /* Cancel "pool empty" condition if it exists */
1089 if (pp->pr_curpage == NULL)
1090 pp->pr_curpage = ph;
1091
1092 if (pp->pr_flags & PR_WANTED) {
1093 pp->pr_flags &= ~PR_WANTED;
1094 if (ph->ph_nmissing == 0)
1095 pp->pr_nidle++;
1096 wakeup((caddr_t)pp);
1097 return;
1098 }
1099
1100 /*
1101 * If this page is now empty, do one of two things:
1102 *
1103 * (1) If we have more pages than the page high water mark,
1104 * free the page back to the system. ONLY CONSIDER
1105 * FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
1106 * CLAIM.
1107 *
1108 * (2) Otherwise, move the page to the empty page list.
1109 *
1110 * Either way, select a new current page (so we use a partially-full
1111 * page if one is available).
1112 */
1113 if (ph->ph_nmissing == 0) {
1114 pp->pr_nidle++;
1115 if (pp->pr_npages > pp->pr_minpages &&
1116 (pp->pr_npages > pp->pr_maxpages ||
1117 (pp->pr_alloc->pa_flags & PA_WANT) != 0)) {
1118 simple_unlock(&pp->pr_slock);
1119 pr_rmpage(pp, ph, NULL);
1120 simple_lock(&pp->pr_slock);
1121 } else {
1122 LIST_REMOVE(ph, ph_pagelist);
1123 LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1124
1125 /*
1126 * Update the timestamp on the page. A page must
1127 * be idle for some period of time before it can
1128 * be reclaimed by the pagedaemon. This minimizes
1129 * ping-pong'ing for memory.
1130 */
1131 s = splclock();
1132 ph->ph_time = mono_time;
1133 splx(s);
1134 }
1135 pool_update_curpage(pp);
1136 }
1137
1138 /*
1139 * If the page was previously completely full, move it to the
1140 * partially-full list and make it the current page. The next
1141 * allocation will get the item from this page, instead of
1142 * further fragmenting the pool.
1143 */
1144 else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1145 LIST_REMOVE(ph, ph_pagelist);
1146 LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1147 pp->pr_curpage = ph;
1148 }
1149 }
1150
1151 /*
1152 * Return resource to the pool; must be called at appropriate spl level
1153 */
1154 #ifdef POOL_DIAGNOSTIC
1155 void
1156 _pool_put(struct pool *pp, void *v, const char *file, long line)
1157 {
1158
1159 simple_lock(&pp->pr_slock);
1160 pr_enter(pp, file, line);
1161
1162 pr_log(pp, v, PRLOG_PUT, file, line);
1163
1164 pool_do_put(pp, v);
1165
1166 pr_leave(pp);
1167 simple_unlock(&pp->pr_slock);
1168 }
1169 #undef pool_put
1170 #endif /* POOL_DIAGNOSTIC */
1171
1172 void
1173 pool_put(struct pool *pp, void *v)
1174 {
1175
1176 simple_lock(&pp->pr_slock);
1177
1178 pool_do_put(pp, v);
1179
1180 simple_unlock(&pp->pr_slock);
1181 }
1182
1183 #ifdef POOL_DIAGNOSTIC
1184 #define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__)
1185 #endif
1186
1187 /*
1188 * Add N items to the pool.
1189 */
1190 int
1191 pool_prime(struct pool *pp, int n)
1192 {
1193 struct pool_item_header *ph = NULL;
1194 caddr_t cp;
1195 int newpages;
1196
1197 simple_lock(&pp->pr_slock);
1198
1199 newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1200
1201 while (newpages-- > 0) {
1202 simple_unlock(&pp->pr_slock);
1203 cp = pool_allocator_alloc(pp, PR_NOWAIT);
1204 if (__predict_true(cp != NULL))
1205 ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1206
1207 if (__predict_false(cp == NULL || ph == NULL)) {
1208 if (cp != NULL)
1209 pool_allocator_free(pp, cp);
1210 simple_lock(&pp->pr_slock);
1211 break;
1212 }
1213
1214 simple_lock(&pp->pr_slock);
1215 pool_prime_page(pp, cp, ph);
1216 pp->pr_npagealloc++;
1217 pp->pr_minpages++;
1218 }
1219
1220 if (pp->pr_minpages >= pp->pr_maxpages)
1221 pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1222
1223 simple_unlock(&pp->pr_slock);
1224 return (0);
1225 }
1226
1227 /*
1228 * Add a page worth of items to the pool.
1229 *
1230 * Note, we must be called with the pool descriptor LOCKED.
1231 */
1232 static void
1233 pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
1234 {
1235 struct pool_item *pi;
1236 caddr_t cp = storage;
1237 unsigned int align = pp->pr_align;
1238 unsigned int ioff = pp->pr_itemoffset;
1239 int n;
1240 int s;
1241
1242 LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
1243
1244 #ifdef DIAGNOSTIC
1245 if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
1246 panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1247 #endif
1248
1249 /*
1250 * Insert page header.
1251 */
1252 LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1253 TAILQ_INIT(&ph->ph_itemlist);
1254 ph->ph_page = storage;
1255 ph->ph_nmissing = 0;
1256 s = splclock();
1257 ph->ph_time = mono_time;
1258 splx(s);
1259 if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1260 SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
1261
1262 pp->pr_nidle++;
1263
1264 /*
1265 * Color this page.
1266 */
1267 cp = (caddr_t)(cp + pp->pr_curcolor);
1268 if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1269 pp->pr_curcolor = 0;
1270
1271 /*
1272 * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1273 */
1274 if (ioff != 0)
1275 cp = (caddr_t)(cp + (align - ioff));
1276
1277 /*
1278 * Insert remaining chunks on the bucket list.
1279 */
1280 n = pp->pr_itemsperpage;
1281 pp->pr_nitems += n;
1282
1283 ph->ph_off = cp - storage;
1284
1285 if (pp->pr_roflags & PR_NOTOUCH) {
1286 uint16_t *freelist = PR_FREELIST(ph);
1287 int i;
1288
1289 ph->ph_firstfree = 0;
1290 for (i = 0; i < n - 1; i++)
1291 freelist[i] = i + 1;
1292 freelist[n - 1] = PR_INDEX_EOL;
1293 } else {
1294 while (n--) {
1295 pi = (struct pool_item *)cp;
1296
1297 KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
1298
1299 /* Insert on page list */
1300 TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
1301 #ifdef DIAGNOSTIC
1302 pi->pi_magic = PI_MAGIC;
1303 #endif
1304 cp = (caddr_t)(cp + pp->pr_size);
1305 }
1306 }
1307
1308 /*
1309 * If the pool was depleted, point at the new page.
1310 */
1311 if (pp->pr_curpage == NULL)
1312 pp->pr_curpage = ph;
1313
1314 if (++pp->pr_npages > pp->pr_hiwat)
1315 pp->pr_hiwat = pp->pr_npages;
1316 }
1317
1318 /*
1319 * Used by pool_get() when nitems drops below the low water mark. This
1320 * is used to catch up pr_nitems with the low water mark.
1321 *
1322 * Note 1, we never wait for memory here, we let the caller decide what to do.
1323 *
1324 * Note 2, we must be called with the pool already locked, and we return
1325 * with it locked.
1326 */
1327 static int
1328 pool_catchup(struct pool *pp)
1329 {
1330 struct pool_item_header *ph = NULL;
1331 caddr_t cp;
1332 int error = 0;
1333
1334 while (POOL_NEEDS_CATCHUP(pp)) {
1335 /*
1336 * Call the page back-end allocator for more memory.
1337 *
1338 * XXX: We never wait, so should we bother unlocking
1339 * the pool descriptor?
1340 */
1341 simple_unlock(&pp->pr_slock);
1342 cp = pool_allocator_alloc(pp, PR_NOWAIT);
1343 if (__predict_true(cp != NULL))
1344 ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1345 if (__predict_false(cp == NULL || ph == NULL)) {
1346 if (cp != NULL)
1347 pool_allocator_free(pp, cp);
1348 error = ENOMEM;
1349 simple_lock(&pp->pr_slock);
1350 break;
1351 }
1352 simple_lock(&pp->pr_slock);
1353 pool_prime_page(pp, cp, ph);
1354 pp->pr_npagealloc++;
1355 }
1356
1357 return (error);
1358 }
1359
1360 static void
1361 pool_update_curpage(struct pool *pp)
1362 {
1363
1364 pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
1365 if (pp->pr_curpage == NULL) {
1366 pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
1367 }
1368 }
1369
1370 void
1371 pool_setlowat(struct pool *pp, int n)
1372 {
1373
1374 simple_lock(&pp->pr_slock);
1375
1376 pp->pr_minitems = n;
1377 pp->pr_minpages = (n == 0)
1378 ? 0
1379 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1380
1381 /* Make sure we're caught up with the newly-set low water mark. */
1382 if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1383 /*
1384 * XXX: Should we log a warning? Should we set up a timeout
1385 * to try again in a second or so? The latter could break
1386 * a caller's assumptions about interrupt protection, etc.
1387 */
1388 }
1389
1390 simple_unlock(&pp->pr_slock);
1391 }
1392
1393 void
1394 pool_sethiwat(struct pool *pp, int n)
1395 {
1396
1397 simple_lock(&pp->pr_slock);
1398
1399 pp->pr_maxpages = (n == 0)
1400 ? 0
1401 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1402
1403 simple_unlock(&pp->pr_slock);
1404 }
1405
1406 void
1407 pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1408 {
1409
1410 simple_lock(&pp->pr_slock);
1411
1412 pp->pr_hardlimit = n;
1413 pp->pr_hardlimit_warning = warnmess;
1414 pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1415 pp->pr_hardlimit_warning_last.tv_sec = 0;
1416 pp->pr_hardlimit_warning_last.tv_usec = 0;
1417
1418 /*
1419 * In-line version of pool_sethiwat(), because we don't want to
1420 * release the lock.
1421 */
1422 pp->pr_maxpages = (n == 0)
1423 ? 0
1424 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1425
1426 simple_unlock(&pp->pr_slock);
1427 }
1428
1429 /*
1430 * Release all complete pages that have not been used recently.
1431 */
1432 int
1433 #ifdef POOL_DIAGNOSTIC
1434 _pool_reclaim(struct pool *pp, const char *file, long line)
1435 #else
1436 pool_reclaim(struct pool *pp)
1437 #endif
1438 {
1439 struct pool_item_header *ph, *phnext;
1440 struct pool_cache *pc;
1441 struct timeval curtime;
1442 struct pool_pagelist pq;
1443 struct timeval diff;
1444 int s;
1445
1446 if (pp->pr_drain_hook != NULL) {
1447 /*
1448 * The drain hook must be called with the pool unlocked.
1449 */
1450 (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1451 }
1452
1453 if (simple_lock_try(&pp->pr_slock) == 0)
1454 return (0);
1455 pr_enter(pp, file, line);
1456
1457 LIST_INIT(&pq);
1458
1459 /*
1460 * Reclaim items from the pool's caches.
1461 */
1462 TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist)
1463 pool_cache_reclaim(pc);
1464
1465 s = splclock();
1466 curtime = mono_time;
1467 splx(s);
1468
1469 for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
1470 phnext = LIST_NEXT(ph, ph_pagelist);
1471
1472 /* Check our minimum page claim */
1473 if (pp->pr_npages <= pp->pr_minpages)
1474 break;
1475
1476 KASSERT(ph->ph_nmissing == 0);
1477 timersub(&curtime, &ph->ph_time, &diff);
1478 if (diff.tv_sec < pool_inactive_time)
1479 continue;
1480
1481 /*
1482 * If freeing this page would put us below
1483 * the low water mark, stop now.
1484 */
1485 if ((pp->pr_nitems - pp->pr_itemsperpage) <
1486 pp->pr_minitems)
1487 break;
1488
1489 pr_rmpage(pp, ph, &pq);
1490 }
1491
1492 pr_leave(pp);
1493 simple_unlock(&pp->pr_slock);
1494 if (LIST_EMPTY(&pq))
1495 return (0);
1496
1497 while ((ph = LIST_FIRST(&pq)) != NULL) {
1498 LIST_REMOVE(ph, ph_pagelist);
1499 pool_allocator_free(pp, ph->ph_page);
1500 if (pp->pr_roflags & PR_PHINPAGE) {
1501 continue;
1502 }
1503 s = splvm();
1504 pool_put(pp->pr_phpool, ph);
1505 splx(s);
1506 }
1507
1508 return (1);
1509 }
1510
1511 /*
1512 * Drain pools, one at a time.
1513 *
1514 * Note, we must never be called from an interrupt context.
1515 */
1516 void
1517 pool_drain(void *arg)
1518 {
1519 struct pool *pp;
1520 int s;
1521
1522 pp = NULL;
1523 s = splvm();
1524 simple_lock(&pool_head_slock);
1525 if (drainpp == NULL) {
1526 drainpp = TAILQ_FIRST(&pool_head);
1527 }
1528 if (drainpp) {
1529 pp = drainpp;
1530 drainpp = TAILQ_NEXT(pp, pr_poollist);
1531 }
1532 simple_unlock(&pool_head_slock);
1533 pool_reclaim(pp);
1534 splx(s);
1535 }
1536
1537 /*
1538 * Diagnostic helpers.
1539 */
1540 void
1541 pool_print(struct pool *pp, const char *modif)
1542 {
1543 int s;
1544
1545 s = splvm();
1546 if (simple_lock_try(&pp->pr_slock) == 0) {
1547 printf("pool %s is locked; try again later\n",
1548 pp->pr_wchan);
1549 splx(s);
1550 return;
1551 }
1552 pool_print1(pp, modif, printf);
1553 simple_unlock(&pp->pr_slock);
1554 splx(s);
1555 }
1556
1557 void
1558 pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1559 {
1560 int didlock = 0;
1561
1562 if (pp == NULL) {
1563 (*pr)("Must specify a pool to print.\n");
1564 return;
1565 }
1566
1567 /*
1568 * Called from DDB; interrupts should be blocked, and all
1569 * other processors should be paused. We can skip locking
1570 * the pool in this case.
1571 *
1572 * We do a simple_lock_try() just to print the lock
1573 * status, however.
1574 */
1575
1576 if (simple_lock_try(&pp->pr_slock) == 0)
1577 (*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
1578 else
1579 didlock = 1;
1580
1581 pool_print1(pp, modif, pr);
1582
1583 if (didlock)
1584 simple_unlock(&pp->pr_slock);
1585 }
1586
1587 static void
1588 pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
1589 void (*pr)(const char *, ...))
1590 {
1591 struct pool_item_header *ph;
1592 #ifdef DIAGNOSTIC
1593 struct pool_item *pi;
1594 #endif
1595
1596 LIST_FOREACH(ph, pl, ph_pagelist) {
1597 (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
1598 ph->ph_page, ph->ph_nmissing,
1599 (u_long)ph->ph_time.tv_sec,
1600 (u_long)ph->ph_time.tv_usec);
1601 #ifdef DIAGNOSTIC
1602 if (!(pp->pr_roflags & PR_NOTOUCH)) {
1603 TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1604 if (pi->pi_magic != PI_MAGIC) {
1605 (*pr)("\t\t\titem %p, magic 0x%x\n",
1606 pi, pi->pi_magic);
1607 }
1608 }
1609 }
1610 #endif
1611 }
1612 }
1613
1614 static void
1615 pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1616 {
1617 struct pool_item_header *ph;
1618 struct pool_cache *pc;
1619 struct pool_cache_group *pcg;
1620 int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1621 char c;
1622
1623 while ((c = *modif++) != '\0') {
1624 if (c == 'l')
1625 print_log = 1;
1626 if (c == 'p')
1627 print_pagelist = 1;
1628 if (c == 'c')
1629 print_cache = 1;
1630 }
1631
1632 (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1633 pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1634 pp->pr_roflags);
1635 (*pr)("\talloc %p\n", pp->pr_alloc);
1636 (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1637 pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1638 (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1639 pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1640
1641 (*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
1642 pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1643 (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1644 pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1645
1646 if (print_pagelist == 0)
1647 goto skip_pagelist;
1648
1649 if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1650 (*pr)("\n\tempty page list:\n");
1651 pool_print_pagelist(pp, &pp->pr_emptypages, pr);
1652 if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
1653 (*pr)("\n\tfull page list:\n");
1654 pool_print_pagelist(pp, &pp->pr_fullpages, pr);
1655 if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
1656 (*pr)("\n\tpartial-page list:\n");
1657 pool_print_pagelist(pp, &pp->pr_partpages, pr);
1658
1659 if (pp->pr_curpage == NULL)
1660 (*pr)("\tno current page\n");
1661 else
1662 (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1663
1664 skip_pagelist:
1665 if (print_log == 0)
1666 goto skip_log;
1667
1668 (*pr)("\n");
1669 if ((pp->pr_roflags & PR_LOGGING) == 0)
1670 (*pr)("\tno log\n");
1671 else
1672 pr_printlog(pp, NULL, pr);
1673
1674 skip_log:
1675 if (print_cache == 0)
1676 goto skip_cache;
1677
1678 TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) {
1679 (*pr)("\tcache %p: allocfrom %p freeto %p\n", pc,
1680 pc->pc_allocfrom, pc->pc_freeto);
1681 (*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n",
1682 pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
1683 TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1684 (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);
1685 for (i = 0; i < PCG_NOBJECTS; i++) {
1686 if (pcg->pcg_objects[i].pcgo_pa !=
1687 POOL_PADDR_INVALID) {
1688 (*pr)("\t\t\t%p, 0x%llx\n",
1689 pcg->pcg_objects[i].pcgo_va,
1690 (unsigned long long)
1691 pcg->pcg_objects[i].pcgo_pa);
1692 } else {
1693 (*pr)("\t\t\t%p\n",
1694 pcg->pcg_objects[i].pcgo_va);
1695 }
1696 }
1697 }
1698 }
1699
1700 skip_cache:
1701 pr_enter_check(pp, pr);
1702 }
1703
1704 static int
1705 pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
1706 {
1707 struct pool_item *pi;
1708 caddr_t page;
1709 int n;
1710
1711 page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask);
1712 if (page != ph->ph_page &&
1713 (pp->pr_roflags & PR_PHINPAGE) != 0) {
1714 if (label != NULL)
1715 printf("%s: ", label);
1716 printf("pool(%p:%s): page inconsistency: page %p;"
1717 " at page head addr %p (p %p)\n", pp,
1718 pp->pr_wchan, ph->ph_page,
1719 ph, page);
1720 return 1;
1721 }
1722
1723 if ((pp->pr_roflags & PR_NOTOUCH) != 0)
1724 return 0;
1725
1726 for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
1727 pi != NULL;
1728 pi = TAILQ_NEXT(pi,pi_list), n++) {
1729
1730 #ifdef DIAGNOSTIC
1731 if (pi->pi_magic != PI_MAGIC) {
1732 if (label != NULL)
1733 printf("%s: ", label);
1734 printf("pool(%s): free list modified: magic=%x;"
1735 " page %p; item ordinal %d;"
1736 " addr %p (p %p)\n",
1737 pp->pr_wchan, pi->pi_magic, ph->ph_page,
1738 n, pi, page);
1739 panic("pool");
1740 }
1741 #endif
1742 page =
1743 (caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask);
1744 if (page == ph->ph_page)
1745 continue;
1746
1747 if (label != NULL)
1748 printf("%s: ", label);
1749 printf("pool(%p:%s): page inconsistency: page %p;"
1750 " item ordinal %d; addr %p (p %p)\n", pp,
1751 pp->pr_wchan, ph->ph_page,
1752 n, pi, page);
1753 return 1;
1754 }
1755 return 0;
1756 }
1757
1758
1759 int
1760 pool_chk(struct pool *pp, const char *label)
1761 {
1762 struct pool_item_header *ph;
1763 int r = 0;
1764
1765 simple_lock(&pp->pr_slock);
1766 LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
1767 r = pool_chk_page(pp, label, ph);
1768 if (r) {
1769 goto out;
1770 }
1771 }
1772 LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
1773 r = pool_chk_page(pp, label, ph);
1774 if (r) {
1775 goto out;
1776 }
1777 }
1778 LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
1779 r = pool_chk_page(pp, label, ph);
1780 if (r) {
1781 goto out;
1782 }
1783 }
1784
1785 out:
1786 simple_unlock(&pp->pr_slock);
1787 return (r);
1788 }
1789
1790 /*
1791 * pool_cache_init:
1792 *
1793 * Initialize a pool cache.
1794 *
1795 * NOTE: If the pool must be protected from interrupts, we expect
1796 * to be called at the appropriate interrupt priority level.
1797 */
1798 void
1799 pool_cache_init(struct pool_cache *pc, struct pool *pp,
1800 int (*ctor)(void *, void *, int),
1801 void (*dtor)(void *, void *),
1802 void *arg)
1803 {
1804
1805 TAILQ_INIT(&pc->pc_grouplist);
1806 simple_lock_init(&pc->pc_slock);
1807
1808 pc->pc_allocfrom = NULL;
1809 pc->pc_freeto = NULL;
1810 pc->pc_pool = pp;
1811
1812 pc->pc_ctor = ctor;
1813 pc->pc_dtor = dtor;
1814 pc->pc_arg = arg;
1815
1816 pc->pc_hits = 0;
1817 pc->pc_misses = 0;
1818
1819 pc->pc_ngroups = 0;
1820
1821 pc->pc_nitems = 0;
1822
1823 simple_lock(&pp->pr_slock);
1824 TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist);
1825 simple_unlock(&pp->pr_slock);
1826 }
1827
1828 /*
1829 * pool_cache_destroy:
1830 *
1831 * Destroy a pool cache.
1832 */
1833 void
1834 pool_cache_destroy(struct pool_cache *pc)
1835 {
1836 struct pool *pp = pc->pc_pool;
1837
1838 /* First, invalidate the entire cache. */
1839 pool_cache_invalidate(pc);
1840
1841 /* ...and remove it from the pool's cache list. */
1842 simple_lock(&pp->pr_slock);
1843 TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist);
1844 simple_unlock(&pp->pr_slock);
1845 }
1846
1847 static __inline void *
1848 pcg_get(struct pool_cache_group *pcg, paddr_t *pap)
1849 {
1850 void *object;
1851 u_int idx;
1852
1853 KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
1854 KASSERT(pcg->pcg_avail != 0);
1855 idx = --pcg->pcg_avail;
1856
1857 KASSERT(pcg->pcg_objects[idx].pcgo_va != NULL);
1858 object = pcg->pcg_objects[idx].pcgo_va;
1859 if (pap != NULL)
1860 *pap = pcg->pcg_objects[idx].pcgo_pa;
1861 pcg->pcg_objects[idx].pcgo_va = NULL;
1862
1863 return (object);
1864 }
1865
1866 static __inline void
1867 pcg_put(struct pool_cache_group *pcg, void *object, paddr_t pa)
1868 {
1869 u_int idx;
1870
1871 KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
1872 idx = pcg->pcg_avail++;
1873
1874 KASSERT(pcg->pcg_objects[idx].pcgo_va == NULL);
1875 pcg->pcg_objects[idx].pcgo_va = object;
1876 pcg->pcg_objects[idx].pcgo_pa = pa;
1877 }
1878
1879 /*
1880 * pool_cache_get{,_paddr}:
1881 *
1882 * Get an object from a pool cache (optionally returning
1883 * the physical address of the object).
1884 */
1885 void *
1886 pool_cache_get_paddr(struct pool_cache *pc, int flags, paddr_t *pap)
1887 {
1888 struct pool_cache_group *pcg;
1889 void *object;
1890
1891 #ifdef LOCKDEBUG
1892 if (flags & PR_WAITOK)
1893 simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)");
1894 #endif
1895
1896 simple_lock(&pc->pc_slock);
1897
1898 if ((pcg = pc->pc_allocfrom) == NULL) {
1899 TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1900 if (pcg->pcg_avail != 0) {
1901 pc->pc_allocfrom = pcg;
1902 goto have_group;
1903 }
1904 }
1905
1906 /*
1907 * No groups with any available objects. Allocate
1908 * a new object, construct it, and return it to
1909 * the caller. We will allocate a group, if necessary,
1910 * when the object is freed back to the cache.
1911 */
1912 pc->pc_misses++;
1913 simple_unlock(&pc->pc_slock);
1914 object = pool_get(pc->pc_pool, flags);
1915 if (object != NULL && pc->pc_ctor != NULL) {
1916 if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
1917 pool_put(pc->pc_pool, object);
1918 return (NULL);
1919 }
1920 }
1921 if (object != NULL && pap != NULL) {
1922 #ifdef POOL_VTOPHYS
1923 *pap = POOL_VTOPHYS(object);
1924 #else
1925 *pap = POOL_PADDR_INVALID;
1926 #endif
1927 }
1928 return (object);
1929 }
1930
1931 have_group:
1932 pc->pc_hits++;
1933 pc->pc_nitems--;
1934 object = pcg_get(pcg, pap);
1935
1936 if (pcg->pcg_avail == 0)
1937 pc->pc_allocfrom = NULL;
1938
1939 simple_unlock(&pc->pc_slock);
1940
1941 return (object);
1942 }
1943
1944 /*
1945 * pool_cache_put{,_paddr}:
1946 *
1947 * Put an object back to the pool cache (optionally caching the
1948 * physical address of the object).
1949 */
1950 void
1951 pool_cache_put_paddr(struct pool_cache *pc, void *object, paddr_t pa)
1952 {
1953 struct pool_cache_group *pcg;
1954 int s;
1955
1956 simple_lock(&pc->pc_slock);
1957
1958 if ((pcg = pc->pc_freeto) == NULL) {
1959 TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1960 if (pcg->pcg_avail != PCG_NOBJECTS) {
1961 pc->pc_freeto = pcg;
1962 goto have_group;
1963 }
1964 }
1965
1966 /*
1967 * No empty groups to free the object to. Attempt to
1968 * allocate one.
1969 */
1970 simple_unlock(&pc->pc_slock);
1971 s = splvm();
1972 pcg = pool_get(&pcgpool, PR_NOWAIT);
1973 splx(s);
1974 if (pcg != NULL) {
1975 memset(pcg, 0, sizeof(*pcg));
1976 simple_lock(&pc->pc_slock);
1977 pc->pc_ngroups++;
1978 TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list);
1979 if (pc->pc_freeto == NULL)
1980 pc->pc_freeto = pcg;
1981 goto have_group;
1982 }
1983
1984 /*
1985 * Unable to allocate a cache group; destruct the object
1986 * and free it back to the pool.
1987 */
1988 pool_cache_destruct_object(pc, object);
1989 return;
1990 }
1991
1992 have_group:
1993 pc->pc_nitems++;
1994 pcg_put(pcg, object, pa);
1995
1996 if (pcg->pcg_avail == PCG_NOBJECTS)
1997 pc->pc_freeto = NULL;
1998
1999 simple_unlock(&pc->pc_slock);
2000 }
2001
2002 /*
2003 * pool_cache_destruct_object:
2004 *
2005 * Force destruction of an object and its release back into
2006 * the pool.
2007 */
2008 void
2009 pool_cache_destruct_object(struct pool_cache *pc, void *object)
2010 {
2011
2012 if (pc->pc_dtor != NULL)
2013 (*pc->pc_dtor)(pc->pc_arg, object);
2014 pool_put(pc->pc_pool, object);
2015 }
2016
2017 /*
2018 * pool_cache_do_invalidate:
2019 *
2020 * This internal function implements pool_cache_invalidate() and
2021 * pool_cache_reclaim().
2022 */
2023 static void
2024 pool_cache_do_invalidate(struct pool_cache *pc, int free_groups,
2025 void (*putit)(struct pool *, void *))
2026 {
2027 struct pool_cache_group *pcg, *npcg;
2028 void *object;
2029 int s;
2030
2031 for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
2032 pcg = npcg) {
2033 npcg = TAILQ_NEXT(pcg, pcg_list);
2034 while (pcg->pcg_avail != 0) {
2035 pc->pc_nitems--;
2036 object = pcg_get(pcg, NULL);
2037 if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg)
2038 pc->pc_allocfrom = NULL;
2039 if (pc->pc_dtor != NULL)
2040 (*pc->pc_dtor)(pc->pc_arg, object);
2041 (*putit)(pc->pc_pool, object);
2042 }
2043 if (free_groups) {
2044 pc->pc_ngroups--;
2045 TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list);
2046 if (pc->pc_freeto == pcg)
2047 pc->pc_freeto = NULL;
2048 s = splvm();
2049 pool_put(&pcgpool, pcg);
2050 splx(s);
2051 }
2052 }
2053 }
2054
2055 /*
2056 * pool_cache_invalidate:
2057 *
2058 * Invalidate a pool cache (destruct and release all of the
2059 * cached objects).
2060 */
2061 void
2062 pool_cache_invalidate(struct pool_cache *pc)
2063 {
2064
2065 simple_lock(&pc->pc_slock);
2066 pool_cache_do_invalidate(pc, 0, pool_put);
2067 simple_unlock(&pc->pc_slock);
2068 }
2069
2070 /*
2071 * pool_cache_reclaim:
2072 *
2073 * Reclaim a pool cache for pool_reclaim().
2074 */
2075 static void
2076 pool_cache_reclaim(struct pool_cache *pc)
2077 {
2078
2079 simple_lock(&pc->pc_slock);
2080 pool_cache_do_invalidate(pc, 1, pool_do_put);
2081 simple_unlock(&pc->pc_slock);
2082 }
2083
2084 /*
2085 * Pool backend allocators.
2086 *
2087 * Each pool has a backend allocator that handles allocation, deallocation,
2088 * and any additional draining that might be needed.
2089 *
2090 * We provide two standard allocators:
2091 *
2092 * pool_allocator_kmem - the default when no allocator is specified
2093 *
2094 * pool_allocator_nointr - used for pools that will not be accessed
2095 * in interrupt context.
2096 */
2097 void *pool_page_alloc(struct pool *, int);
2098 void pool_page_free(struct pool *, void *);
2099
2100 struct pool_allocator pool_allocator_kmem = {
2101 pool_page_alloc, pool_page_free, 0,
2102 };
2103
2104 void *pool_page_alloc_nointr(struct pool *, int);
2105 void pool_page_free_nointr(struct pool *, void *);
2106
2107 struct pool_allocator pool_allocator_nointr = {
2108 pool_page_alloc_nointr, pool_page_free_nointr, 0,
2109 };
2110
2111 #ifdef POOL_SUBPAGE
2112 void *pool_subpage_alloc(struct pool *, int);
2113 void pool_subpage_free(struct pool *, void *);
2114
2115 struct pool_allocator pool_allocator_kmem_subpage = {
2116 pool_subpage_alloc, pool_subpage_free, 0,
2117 };
2118 #endif /* POOL_SUBPAGE */
2119
2120 /*
2121 * We have at least three different resources for the same allocation and
2122 * each resource can be depleted. First, we have the ready elements in the
2123 * pool. Then we have the resource (typically a vm_map) for this allocator.
2124 * Finally, we have physical memory. Waiting for any of these can be
2125 * unnecessary when any other is freed, but the kernel doesn't support
2126 * sleeping on multiple wait channels, so we have to employ another strategy.
2127 *
2128 * The caller sleeps on the pool (so that it can be awakened when an item
2129 * is returned to the pool), but we set PA_WANT on the allocator. When a
2130 * page is returned to the allocator and PA_WANT is set, pool_allocator_free
2131 * will wake up all sleeping pools belonging to this allocator.
2132 *
2133 * XXX Thundering herd.
2134 */
2135 void *
2136 pool_allocator_alloc(struct pool *org, int flags)
2137 {
2138 struct pool_allocator *pa = org->pr_alloc;
2139 struct pool *pp, *start;
2140 int s, freed;
2141 void *res;
2142
2143 LOCK_ASSERT(!simple_lock_held(&org->pr_slock));
2144
2145 do {
2146 if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
2147 return (res);
2148 if ((flags & PR_WAITOK) == 0) {
2149 /*
2150 * We only run the drain hookhere if PR_NOWAIT.
2151 * In other cases, the hook will be run in
2152 * pool_reclaim().
2153 */
2154 if (org->pr_drain_hook != NULL) {
2155 (*org->pr_drain_hook)(org->pr_drain_hook_arg,
2156 flags);
2157 if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
2158 return (res);
2159 }
2160 break;
2161 }
2162
2163 /*
2164 * Drain all pools, except "org", that use this
2165 * allocator. We do this to reclaim VA space.
2166 * pa_alloc is responsible for waiting for
2167 * physical memory.
2168 *
2169 * XXX We risk looping forever if start if someone
2170 * calls pool_destroy on "start". But there is no
2171 * other way to have potentially sleeping pool_reclaim,
2172 * non-sleeping locks on pool_allocator, and some
2173 * stirring of drained pools in the allocator.
2174 *
2175 * XXX Maybe we should use pool_head_slock for locking
2176 * the allocators?
2177 */
2178 freed = 0;
2179
2180 s = splvm();
2181 simple_lock(&pa->pa_slock);
2182 pp = start = TAILQ_FIRST(&pa->pa_list);
2183 do {
2184 TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list);
2185 TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list);
2186 if (pp == org)
2187 continue;
2188 simple_unlock(&pa->pa_slock);
2189 freed = pool_reclaim(pp);
2190 simple_lock(&pa->pa_slock);
2191 } while ((pp = TAILQ_FIRST(&pa->pa_list)) != start &&
2192 freed == 0);
2193
2194 if (freed == 0) {
2195 /*
2196 * We set PA_WANT here, the caller will most likely
2197 * sleep waiting for pages (if not, this won't hurt
2198 * that much), and there is no way to set this in
2199 * the caller without violating locking order.
2200 */
2201 pa->pa_flags |= PA_WANT;
2202 }
2203 simple_unlock(&pa->pa_slock);
2204 splx(s);
2205 } while (freed);
2206 return (NULL);
2207 }
2208
2209 void
2210 pool_allocator_free(struct pool *pp, void *v)
2211 {
2212 struct pool_allocator *pa = pp->pr_alloc;
2213 int s;
2214
2215 LOCK_ASSERT(!simple_lock_held(&pp->pr_slock));
2216
2217 (*pa->pa_free)(pp, v);
2218
2219 s = splvm();
2220 simple_lock(&pa->pa_slock);
2221 if ((pa->pa_flags & PA_WANT) == 0) {
2222 simple_unlock(&pa->pa_slock);
2223 splx(s);
2224 return;
2225 }
2226
2227 TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
2228 simple_lock(&pp->pr_slock);
2229 if ((pp->pr_flags & PR_WANTED) != 0) {
2230 pp->pr_flags &= ~PR_WANTED;
2231 wakeup(pp);
2232 }
2233 simple_unlock(&pp->pr_slock);
2234 }
2235 pa->pa_flags &= ~PA_WANT;
2236 simple_unlock(&pa->pa_slock);
2237 splx(s);
2238 }
2239
2240 void *
2241 pool_page_alloc(struct pool *pp, int flags)
2242 {
2243 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2244
2245 return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, NULL, waitok));
2246 }
2247
2248 void
2249 pool_page_free(struct pool *pp, void *v)
2250 {
2251
2252 uvm_km_free_poolpage_cache(kmem_map, (vaddr_t) v);
2253 }
2254
2255 static void *
2256 pool_page_alloc_meta(struct pool *pp, int flags)
2257 {
2258 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2259
2260 return ((void *) uvm_km_alloc_poolpage1(kmem_map, NULL, waitok));
2261 }
2262
2263 static void
2264 pool_page_free_meta(struct pool *pp, void *v)
2265 {
2266
2267 uvm_km_free_poolpage1(kmem_map, (vaddr_t) v);
2268 }
2269
2270 #ifdef POOL_SUBPAGE
2271 /* Sub-page allocator, for machines with large hardware pages. */
2272 void *
2273 pool_subpage_alloc(struct pool *pp, int flags)
2274 {
2275 void *v;
2276 int s;
2277 s = splvm();
2278 v = pool_get(&psppool, flags);
2279 splx(s);
2280 return v;
2281 }
2282
2283 void
2284 pool_subpage_free(struct pool *pp, void *v)
2285 {
2286 int s;
2287 s = splvm();
2288 pool_put(&psppool, v);
2289 splx(s);
2290 }
2291
2292 /* We don't provide a real nointr allocator. Maybe later. */
2293 void *
2294 pool_page_alloc_nointr(struct pool *pp, int flags)
2295 {
2296
2297 return (pool_subpage_alloc(pp, flags));
2298 }
2299
2300 void
2301 pool_page_free_nointr(struct pool *pp, void *v)
2302 {
2303
2304 pool_subpage_free(pp, v);
2305 }
2306 #else
2307 void *
2308 pool_page_alloc_nointr(struct pool *pp, int flags)
2309 {
2310 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2311
2312 return ((void *) uvm_km_alloc_poolpage_cache(kernel_map,
2313 uvm.kernel_object, waitok));
2314 }
2315
2316 void
2317 pool_page_free_nointr(struct pool *pp, void *v)
2318 {
2319
2320 uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v);
2321 }
2322 #endif /* POOL_SUBPAGE */
2323