subr_pool.c revision 1.112 1 /* $NetBSD: subr_pool.c,v 1.112 2006/02/24 11:46:20 bjh21 Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.112 2006/02/24 11:46:20 bjh21 Exp $");
42
43 #include "opt_pool.h"
44 #include "opt_poollog.h"
45 #include "opt_lockdebug.h"
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/proc.h>
50 #include <sys/errno.h>
51 #include <sys/kernel.h>
52 #include <sys/malloc.h>
53 #include <sys/lock.h>
54 #include <sys/pool.h>
55 #include <sys/syslog.h>
56
57 #include <uvm/uvm.h>
58
59 /*
60 * Pool resource management utility.
61 *
62 * Memory is allocated in pages which are split into pieces according to
63 * the pool item size. Each page is kept on one of three lists in the
64 * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
65 * for empty, full and partially-full pages respectively. The individual
66 * pool items are on a linked list headed by `ph_itemlist' in each page
67 * header. The memory for building the page list is either taken from
68 * the allocated pages themselves (for small pool items) or taken from
69 * an internal pool of page headers (`phpool').
70 */
71
72 /* List of all pools */
73 LIST_HEAD(,pool) pool_head = LIST_HEAD_INITIALIZER(pool_head);
74
75 /* Private pool for page header structures */
76 #define PHPOOL_MAX 8
77 static struct pool phpool[PHPOOL_MAX];
78 #define PHPOOL_FREELIST_NELEM(idx) (((idx) == 0) ? 0 : (1 << (idx)))
79
80 #ifdef POOL_SUBPAGE
81 /* Pool of subpages for use by normal pools. */
82 static struct pool psppool;
83 #endif
84
85 static void *pool_page_alloc_meta(struct pool *, int);
86 static void pool_page_free_meta(struct pool *, void *);
87
88 /* allocator for pool metadata */
89 static struct pool_allocator pool_allocator_meta = {
90 pool_page_alloc_meta, pool_page_free_meta
91 };
92
93 /* # of seconds to retain page after last use */
94 int pool_inactive_time = 10;
95
96 /* Next candidate for drainage (see pool_drain()) */
97 static struct pool *drainpp;
98
99 /* This spin lock protects both pool_head and drainpp. */
100 struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
101
102 typedef uint8_t pool_item_freelist_t;
103
104 struct pool_item_header {
105 /* Page headers */
106 LIST_ENTRY(pool_item_header)
107 ph_pagelist; /* pool page list */
108 SPLAY_ENTRY(pool_item_header)
109 ph_node; /* Off-page page headers */
110 caddr_t ph_page; /* this page's address */
111 struct timeval ph_time; /* last referenced */
112 union {
113 /* !PR_NOTOUCH */
114 struct {
115 LIST_HEAD(, pool_item)
116 phu_itemlist; /* chunk list for this page */
117 } phu_normal;
118 /* PR_NOTOUCH */
119 struct {
120 uint16_t
121 phu_off; /* start offset in page */
122 pool_item_freelist_t
123 phu_firstfree; /* first free item */
124 /*
125 * XXX it might be better to use
126 * a simple bitmap and ffs(3)
127 */
128 } phu_notouch;
129 } ph_u;
130 uint16_t ph_nmissing; /* # of chunks in use */
131 };
132 #define ph_itemlist ph_u.phu_normal.phu_itemlist
133 #define ph_off ph_u.phu_notouch.phu_off
134 #define ph_firstfree ph_u.phu_notouch.phu_firstfree
135
136 struct pool_item {
137 #ifdef DIAGNOSTIC
138 u_int pi_magic;
139 #endif
140 #define PI_MAGIC 0xdeadbeefU
141 /* Other entries use only this list entry */
142 LIST_ENTRY(pool_item) pi_list;
143 };
144
145 #define POOL_NEEDS_CATCHUP(pp) \
146 ((pp)->pr_nitems < (pp)->pr_minitems)
147
148 /*
149 * Pool cache management.
150 *
151 * Pool caches provide a way for constructed objects to be cached by the
152 * pool subsystem. This can lead to performance improvements by avoiding
153 * needless object construction/destruction; it is deferred until absolutely
154 * necessary.
155 *
156 * Caches are grouped into cache groups. Each cache group references
157 * up to 16 constructed objects. When a cache allocates an object
158 * from the pool, it calls the object's constructor and places it into
159 * a cache group. When a cache group frees an object back to the pool,
160 * it first calls the object's destructor. This allows the object to
161 * persist in constructed form while freed to the cache.
162 *
163 * Multiple caches may exist for each pool. This allows a single
164 * object type to have multiple constructed forms. The pool references
165 * each cache, so that when a pool is drained by the pagedaemon, it can
166 * drain each individual cache as well. Each time a cache is drained,
167 * the most idle cache group is freed to the pool in its entirety.
168 *
169 * Pool caches are layed on top of pools. By layering them, we can avoid
170 * the complexity of cache management for pools which would not benefit
171 * from it.
172 */
173
174 /* The cache group pool. */
175 static struct pool pcgpool;
176
177 static void pool_cache_reclaim(struct pool_cache *, struct pool_pagelist *,
178 struct pool_cache_grouplist *);
179 static void pcg_grouplist_free(struct pool_cache_grouplist *);
180
181 static int pool_catchup(struct pool *);
182 static void pool_prime_page(struct pool *, caddr_t,
183 struct pool_item_header *);
184 static void pool_update_curpage(struct pool *);
185
186 void *pool_allocator_alloc(struct pool *, int);
187 void pool_allocator_free(struct pool *, void *);
188
189 static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
190 void (*)(const char *, ...));
191 static void pool_print1(struct pool *, const char *,
192 void (*)(const char *, ...));
193
194 static int pool_chk_page(struct pool *, const char *,
195 struct pool_item_header *);
196
197 /*
198 * Pool log entry. An array of these is allocated in pool_init().
199 */
200 struct pool_log {
201 const char *pl_file;
202 long pl_line;
203 int pl_action;
204 #define PRLOG_GET 1
205 #define PRLOG_PUT 2
206 void *pl_addr;
207 };
208
209 #ifdef POOL_DIAGNOSTIC
210 /* Number of entries in pool log buffers */
211 #ifndef POOL_LOGSIZE
212 #define POOL_LOGSIZE 10
213 #endif
214
215 int pool_logsize = POOL_LOGSIZE;
216
217 static inline void
218 pr_log(struct pool *pp, void *v, int action, const char *file, long line)
219 {
220 int n = pp->pr_curlogentry;
221 struct pool_log *pl;
222
223 if ((pp->pr_roflags & PR_LOGGING) == 0)
224 return;
225
226 /*
227 * Fill in the current entry. Wrap around and overwrite
228 * the oldest entry if necessary.
229 */
230 pl = &pp->pr_log[n];
231 pl->pl_file = file;
232 pl->pl_line = line;
233 pl->pl_action = action;
234 pl->pl_addr = v;
235 if (++n >= pp->pr_logsize)
236 n = 0;
237 pp->pr_curlogentry = n;
238 }
239
240 static void
241 pr_printlog(struct pool *pp, struct pool_item *pi,
242 void (*pr)(const char *, ...))
243 {
244 int i = pp->pr_logsize;
245 int n = pp->pr_curlogentry;
246
247 if ((pp->pr_roflags & PR_LOGGING) == 0)
248 return;
249
250 /*
251 * Print all entries in this pool's log.
252 */
253 while (i-- > 0) {
254 struct pool_log *pl = &pp->pr_log[n];
255 if (pl->pl_action != 0) {
256 if (pi == NULL || pi == pl->pl_addr) {
257 (*pr)("\tlog entry %d:\n", i);
258 (*pr)("\t\taction = %s, addr = %p\n",
259 pl->pl_action == PRLOG_GET ? "get" : "put",
260 pl->pl_addr);
261 (*pr)("\t\tfile: %s at line %lu\n",
262 pl->pl_file, pl->pl_line);
263 }
264 }
265 if (++n >= pp->pr_logsize)
266 n = 0;
267 }
268 }
269
270 static inline void
271 pr_enter(struct pool *pp, const char *file, long line)
272 {
273
274 if (__predict_false(pp->pr_entered_file != NULL)) {
275 printf("pool %s: reentrancy at file %s line %ld\n",
276 pp->pr_wchan, file, line);
277 printf(" previous entry at file %s line %ld\n",
278 pp->pr_entered_file, pp->pr_entered_line);
279 panic("pr_enter");
280 }
281
282 pp->pr_entered_file = file;
283 pp->pr_entered_line = line;
284 }
285
286 static inline void
287 pr_leave(struct pool *pp)
288 {
289
290 if (__predict_false(pp->pr_entered_file == NULL)) {
291 printf("pool %s not entered?\n", pp->pr_wchan);
292 panic("pr_leave");
293 }
294
295 pp->pr_entered_file = NULL;
296 pp->pr_entered_line = 0;
297 }
298
299 static inline void
300 pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
301 {
302
303 if (pp->pr_entered_file != NULL)
304 (*pr)("\n\tcurrently entered from file %s line %ld\n",
305 pp->pr_entered_file, pp->pr_entered_line);
306 }
307 #else
308 #define pr_log(pp, v, action, file, line)
309 #define pr_printlog(pp, pi, pr)
310 #define pr_enter(pp, file, line)
311 #define pr_leave(pp)
312 #define pr_enter_check(pp, pr)
313 #endif /* POOL_DIAGNOSTIC */
314
315 static inline int
316 pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph,
317 const void *v)
318 {
319 const char *cp = v;
320 int idx;
321
322 KASSERT(pp->pr_roflags & PR_NOTOUCH);
323 idx = (cp - ph->ph_page - ph->ph_off) / pp->pr_size;
324 KASSERT(idx < pp->pr_itemsperpage);
325 return idx;
326 }
327
328 #define PR_FREELIST_ALIGN(p) \
329 roundup((uintptr_t)(p), sizeof(pool_item_freelist_t))
330 #define PR_FREELIST(ph) ((pool_item_freelist_t *)PR_FREELIST_ALIGN((ph) + 1))
331 #define PR_INDEX_USED ((pool_item_freelist_t)-1)
332 #define PR_INDEX_EOL ((pool_item_freelist_t)-2)
333
334 static inline void
335 pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph,
336 void *obj)
337 {
338 int idx = pr_item_notouch_index(pp, ph, obj);
339 pool_item_freelist_t *freelist = PR_FREELIST(ph);
340
341 KASSERT(freelist[idx] == PR_INDEX_USED);
342 freelist[idx] = ph->ph_firstfree;
343 ph->ph_firstfree = idx;
344 }
345
346 static inline void *
347 pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph)
348 {
349 int idx = ph->ph_firstfree;
350 pool_item_freelist_t *freelist = PR_FREELIST(ph);
351
352 KASSERT(freelist[idx] != PR_INDEX_USED);
353 ph->ph_firstfree = freelist[idx];
354 freelist[idx] = PR_INDEX_USED;
355
356 return ph->ph_page + ph->ph_off + idx * pp->pr_size;
357 }
358
359 static inline int
360 phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
361 {
362 if (a->ph_page < b->ph_page)
363 return (-1);
364 else if (a->ph_page > b->ph_page)
365 return (1);
366 else
367 return (0);
368 }
369
370 SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
371 SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
372
373 /*
374 * Return the pool page header based on page address.
375 */
376 static inline struct pool_item_header *
377 pr_find_pagehead(struct pool *pp, caddr_t page)
378 {
379 struct pool_item_header *ph, tmp;
380
381 if ((pp->pr_roflags & PR_PHINPAGE) != 0)
382 return ((struct pool_item_header *)(page + pp->pr_phoffset));
383
384 tmp.ph_page = page;
385 ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
386 return ph;
387 }
388
389 static void
390 pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq)
391 {
392 struct pool_item_header *ph;
393 int s;
394
395 while ((ph = LIST_FIRST(pq)) != NULL) {
396 LIST_REMOVE(ph, ph_pagelist);
397 pool_allocator_free(pp, ph->ph_page);
398 if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
399 s = splvm();
400 pool_put(pp->pr_phpool, ph);
401 splx(s);
402 }
403 }
404 }
405
406 /*
407 * Remove a page from the pool.
408 */
409 static inline void
410 pr_rmpage(struct pool *pp, struct pool_item_header *ph,
411 struct pool_pagelist *pq)
412 {
413
414 LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
415
416 /*
417 * If the page was idle, decrement the idle page count.
418 */
419 if (ph->ph_nmissing == 0) {
420 #ifdef DIAGNOSTIC
421 if (pp->pr_nidle == 0)
422 panic("pr_rmpage: nidle inconsistent");
423 if (pp->pr_nitems < pp->pr_itemsperpage)
424 panic("pr_rmpage: nitems inconsistent");
425 #endif
426 pp->pr_nidle--;
427 }
428
429 pp->pr_nitems -= pp->pr_itemsperpage;
430
431 /*
432 * Unlink the page from the pool and queue it for release.
433 */
434 LIST_REMOVE(ph, ph_pagelist);
435 if ((pp->pr_roflags & PR_PHINPAGE) == 0)
436 SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
437 LIST_INSERT_HEAD(pq, ph, ph_pagelist);
438
439 pp->pr_npages--;
440 pp->pr_npagefree++;
441
442 pool_update_curpage(pp);
443 }
444
445 /*
446 * Initialize all the pools listed in the "pools" link set.
447 */
448 void
449 link_pool_init(void)
450 {
451 __link_set_decl(pools, struct link_pool_init);
452 struct link_pool_init * const *pi;
453
454 __link_set_foreach(pi, pools)
455 pool_init((*pi)->pp, (*pi)->size, (*pi)->align,
456 (*pi)->align_offset, (*pi)->flags, (*pi)->wchan,
457 (*pi)->palloc);
458 }
459
460 /*
461 * Initialize the given pool resource structure.
462 *
463 * We export this routine to allow other kernel parts to declare
464 * static pools that must be initialized before malloc() is available.
465 */
466 void
467 pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
468 const char *wchan, struct pool_allocator *palloc)
469 {
470 int off, slack;
471 size_t trysize, phsize;
472 int s;
473
474 KASSERT((1UL << (CHAR_BIT * sizeof(pool_item_freelist_t))) - 2 >=
475 PHPOOL_FREELIST_NELEM(PHPOOL_MAX - 1));
476
477 #ifdef POOL_DIAGNOSTIC
478 /*
479 * Always log if POOL_DIAGNOSTIC is defined.
480 */
481 if (pool_logsize != 0)
482 flags |= PR_LOGGING;
483 #endif
484
485 if (palloc == NULL)
486 palloc = &pool_allocator_kmem;
487 #ifdef POOL_SUBPAGE
488 if (size > palloc->pa_pagesz) {
489 if (palloc == &pool_allocator_kmem)
490 palloc = &pool_allocator_kmem_fullpage;
491 else if (palloc == &pool_allocator_nointr)
492 palloc = &pool_allocator_nointr_fullpage;
493 }
494 #endif /* POOL_SUBPAGE */
495 if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
496 if (palloc->pa_pagesz == 0)
497 palloc->pa_pagesz = PAGE_SIZE;
498
499 TAILQ_INIT(&palloc->pa_list);
500
501 simple_lock_init(&palloc->pa_slock);
502 palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
503 palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
504 palloc->pa_flags |= PA_INITIALIZED;
505 }
506
507 if (align == 0)
508 align = ALIGN(1);
509
510 if (size < sizeof(struct pool_item))
511 size = sizeof(struct pool_item);
512
513 size = roundup(size, align);
514 #ifdef DIAGNOSTIC
515 if (size > palloc->pa_pagesz)
516 panic("pool_init: pool item size (%lu) too large",
517 (u_long)size);
518 #endif
519
520 /*
521 * Initialize the pool structure.
522 */
523 LIST_INIT(&pp->pr_emptypages);
524 LIST_INIT(&pp->pr_fullpages);
525 LIST_INIT(&pp->pr_partpages);
526 LIST_INIT(&pp->pr_cachelist);
527 pp->pr_curpage = NULL;
528 pp->pr_npages = 0;
529 pp->pr_minitems = 0;
530 pp->pr_minpages = 0;
531 pp->pr_maxpages = UINT_MAX;
532 pp->pr_roflags = flags;
533 pp->pr_flags = 0;
534 pp->pr_size = size;
535 pp->pr_align = align;
536 pp->pr_wchan = wchan;
537 pp->pr_alloc = palloc;
538 pp->pr_nitems = 0;
539 pp->pr_nout = 0;
540 pp->pr_hardlimit = UINT_MAX;
541 pp->pr_hardlimit_warning = NULL;
542 pp->pr_hardlimit_ratecap.tv_sec = 0;
543 pp->pr_hardlimit_ratecap.tv_usec = 0;
544 pp->pr_hardlimit_warning_last.tv_sec = 0;
545 pp->pr_hardlimit_warning_last.tv_usec = 0;
546 pp->pr_drain_hook = NULL;
547 pp->pr_drain_hook_arg = NULL;
548
549 /*
550 * Decide whether to put the page header off page to avoid
551 * wasting too large a part of the page or too big item.
552 * Off-page page headers go on a hash table, so we can match
553 * a returned item with its header based on the page address.
554 * We use 1/16 of the page size and about 8 times of the item
555 * size as the threshold (XXX: tune)
556 *
557 * However, we'll put the header into the page if we can put
558 * it without wasting any items.
559 *
560 * Silently enforce `0 <= ioff < align'.
561 */
562 pp->pr_itemoffset = ioff %= align;
563 /* See the comment below about reserved bytes. */
564 trysize = palloc->pa_pagesz - ((align - ioff) % align);
565 phsize = ALIGN(sizeof(struct pool_item_header));
566 if ((pp->pr_roflags & PR_NOTOUCH) == 0 &&
567 (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
568 trysize / pp->pr_size == (trysize - phsize) / pp->pr_size)) {
569 /* Use the end of the page for the page header */
570 pp->pr_roflags |= PR_PHINPAGE;
571 pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
572 } else {
573 /* The page header will be taken from our page header pool */
574 pp->pr_phoffset = 0;
575 off = palloc->pa_pagesz;
576 SPLAY_INIT(&pp->pr_phtree);
577 }
578
579 /*
580 * Alignment is to take place at `ioff' within the item. This means
581 * we must reserve up to `align - 1' bytes on the page to allow
582 * appropriate positioning of each item.
583 */
584 pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
585 KASSERT(pp->pr_itemsperpage != 0);
586 if ((pp->pr_roflags & PR_NOTOUCH)) {
587 int idx;
588
589 for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
590 idx++) {
591 /* nothing */
592 }
593 if (idx >= PHPOOL_MAX) {
594 /*
595 * if you see this panic, consider to tweak
596 * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
597 */
598 panic("%s: too large itemsperpage(%d) for PR_NOTOUCH",
599 pp->pr_wchan, pp->pr_itemsperpage);
600 }
601 pp->pr_phpool = &phpool[idx];
602 } else if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
603 pp->pr_phpool = &phpool[0];
604 }
605 #if defined(DIAGNOSTIC)
606 else {
607 pp->pr_phpool = NULL;
608 }
609 #endif
610
611 /*
612 * Use the slack between the chunks and the page header
613 * for "cache coloring".
614 */
615 slack = off - pp->pr_itemsperpage * pp->pr_size;
616 pp->pr_maxcolor = (slack / align) * align;
617 pp->pr_curcolor = 0;
618
619 pp->pr_nget = 0;
620 pp->pr_nfail = 0;
621 pp->pr_nput = 0;
622 pp->pr_npagealloc = 0;
623 pp->pr_npagefree = 0;
624 pp->pr_hiwat = 0;
625 pp->pr_nidle = 0;
626
627 #ifdef POOL_DIAGNOSTIC
628 if (flags & PR_LOGGING) {
629 if (kmem_map == NULL ||
630 (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
631 M_TEMP, M_NOWAIT)) == NULL)
632 pp->pr_roflags &= ~PR_LOGGING;
633 pp->pr_curlogentry = 0;
634 pp->pr_logsize = pool_logsize;
635 }
636 #endif
637
638 pp->pr_entered_file = NULL;
639 pp->pr_entered_line = 0;
640
641 simple_lock_init(&pp->pr_slock);
642
643 /*
644 * Initialize private page header pool and cache magazine pool if we
645 * haven't done so yet.
646 * XXX LOCKING.
647 */
648 if (phpool[0].pr_size == 0) {
649 int idx;
650 for (idx = 0; idx < PHPOOL_MAX; idx++) {
651 static char phpool_names[PHPOOL_MAX][6+1+6+1];
652 int nelem;
653 size_t sz;
654
655 nelem = PHPOOL_FREELIST_NELEM(idx);
656 snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
657 "phpool-%d", nelem);
658 sz = sizeof(struct pool_item_header);
659 if (nelem) {
660 sz = PR_FREELIST_ALIGN(sz)
661 + nelem * sizeof(pool_item_freelist_t);
662 }
663 pool_init(&phpool[idx], sz, 0, 0, 0,
664 phpool_names[idx], &pool_allocator_meta);
665 }
666 #ifdef POOL_SUBPAGE
667 pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
668 PR_RECURSIVE, "psppool", &pool_allocator_meta);
669 #endif
670 pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
671 0, "pcgpool", &pool_allocator_meta);
672 }
673
674 /* Insert into the list of all pools. */
675 simple_lock(&pool_head_slock);
676 LIST_INSERT_HEAD(&pool_head, pp, pr_poollist);
677 simple_unlock(&pool_head_slock);
678
679 /* Insert this into the list of pools using this allocator. */
680 s = splvm();
681 simple_lock(&palloc->pa_slock);
682 TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
683 simple_unlock(&palloc->pa_slock);
684 splx(s);
685 }
686
687 /*
688 * De-commision a pool resource.
689 */
690 void
691 pool_destroy(struct pool *pp)
692 {
693 struct pool_pagelist pq;
694 struct pool_item_header *ph;
695 int s;
696
697 /* Remove from global pool list */
698 simple_lock(&pool_head_slock);
699 LIST_REMOVE(pp, pr_poollist);
700 if (drainpp == pp)
701 drainpp = NULL;
702 simple_unlock(&pool_head_slock);
703
704 /* Remove this pool from its allocator's list of pools. */
705 s = splvm();
706 simple_lock(&pp->pr_alloc->pa_slock);
707 TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
708 simple_unlock(&pp->pr_alloc->pa_slock);
709 splx(s);
710
711 s = splvm();
712 simple_lock(&pp->pr_slock);
713
714 KASSERT(LIST_EMPTY(&pp->pr_cachelist));
715
716 #ifdef DIAGNOSTIC
717 if (pp->pr_nout != 0) {
718 pr_printlog(pp, NULL, printf);
719 panic("pool_destroy: pool busy: still out: %u",
720 pp->pr_nout);
721 }
722 #endif
723
724 KASSERT(LIST_EMPTY(&pp->pr_fullpages));
725 KASSERT(LIST_EMPTY(&pp->pr_partpages));
726
727 /* Remove all pages */
728 LIST_INIT(&pq);
729 while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
730 pr_rmpage(pp, ph, &pq);
731
732 simple_unlock(&pp->pr_slock);
733 splx(s);
734
735 pr_pagelist_free(pp, &pq);
736
737 #ifdef POOL_DIAGNOSTIC
738 if ((pp->pr_roflags & PR_LOGGING) != 0)
739 free(pp->pr_log, M_TEMP);
740 #endif
741 }
742
743 void
744 pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
745 {
746
747 /* XXX no locking -- must be used just after pool_init() */
748 #ifdef DIAGNOSTIC
749 if (pp->pr_drain_hook != NULL)
750 panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
751 #endif
752 pp->pr_drain_hook = fn;
753 pp->pr_drain_hook_arg = arg;
754 }
755
756 static struct pool_item_header *
757 pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags)
758 {
759 struct pool_item_header *ph;
760 int s;
761
762 LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0);
763
764 if ((pp->pr_roflags & PR_PHINPAGE) != 0)
765 ph = (struct pool_item_header *) (storage + pp->pr_phoffset);
766 else {
767 s = splvm();
768 ph = pool_get(pp->pr_phpool, flags);
769 splx(s);
770 }
771
772 return (ph);
773 }
774
775 /*
776 * Grab an item from the pool; must be called at appropriate spl level
777 */
778 void *
779 #ifdef POOL_DIAGNOSTIC
780 _pool_get(struct pool *pp, int flags, const char *file, long line)
781 #else
782 pool_get(struct pool *pp, int flags)
783 #endif
784 {
785 struct pool_item *pi;
786 struct pool_item_header *ph;
787 void *v;
788
789 #ifdef DIAGNOSTIC
790 if (__predict_false(pp->pr_itemsperpage == 0))
791 panic("pool_get: pool %p: pr_itemsperpage is zero, "
792 "pool not initialized?", pp);
793 if (__predict_false(curlwp == NULL && doing_shutdown == 0 &&
794 (flags & PR_WAITOK) != 0))
795 panic("pool_get: %s: must have NOWAIT", pp->pr_wchan);
796
797 #endif /* DIAGNOSTIC */
798 #ifdef LOCKDEBUG
799 if (flags & PR_WAITOK)
800 simple_lock_only_held(NULL, "pool_get(PR_WAITOK)");
801 SCHED_ASSERT_UNLOCKED();
802 #endif
803
804 simple_lock(&pp->pr_slock);
805 pr_enter(pp, file, line);
806
807 startover:
808 /*
809 * Check to see if we've reached the hard limit. If we have,
810 * and we can wait, then wait until an item has been returned to
811 * the pool.
812 */
813 #ifdef DIAGNOSTIC
814 if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
815 pr_leave(pp);
816 simple_unlock(&pp->pr_slock);
817 panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
818 }
819 #endif
820 if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
821 if (pp->pr_drain_hook != NULL) {
822 /*
823 * Since the drain hook is going to free things
824 * back to the pool, unlock, call the hook, re-lock,
825 * and check the hardlimit condition again.
826 */
827 pr_leave(pp);
828 simple_unlock(&pp->pr_slock);
829 (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
830 simple_lock(&pp->pr_slock);
831 pr_enter(pp, file, line);
832 if (pp->pr_nout < pp->pr_hardlimit)
833 goto startover;
834 }
835
836 if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
837 /*
838 * XXX: A warning isn't logged in this case. Should
839 * it be?
840 */
841 pp->pr_flags |= PR_WANTED;
842 pr_leave(pp);
843 ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
844 pr_enter(pp, file, line);
845 goto startover;
846 }
847
848 /*
849 * Log a message that the hard limit has been hit.
850 */
851 if (pp->pr_hardlimit_warning != NULL &&
852 ratecheck(&pp->pr_hardlimit_warning_last,
853 &pp->pr_hardlimit_ratecap))
854 log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
855
856 pp->pr_nfail++;
857
858 pr_leave(pp);
859 simple_unlock(&pp->pr_slock);
860 return (NULL);
861 }
862
863 /*
864 * The convention we use is that if `curpage' is not NULL, then
865 * it points at a non-empty bucket. In particular, `curpage'
866 * never points at a page header which has PR_PHINPAGE set and
867 * has no items in its bucket.
868 */
869 if ((ph = pp->pr_curpage) == NULL) {
870 #ifdef DIAGNOSTIC
871 if (pp->pr_nitems != 0) {
872 simple_unlock(&pp->pr_slock);
873 printf("pool_get: %s: curpage NULL, nitems %u\n",
874 pp->pr_wchan, pp->pr_nitems);
875 panic("pool_get: nitems inconsistent");
876 }
877 #endif
878
879 /*
880 * Call the back-end page allocator for more memory.
881 * Release the pool lock, as the back-end page allocator
882 * may block.
883 */
884 pr_leave(pp);
885 simple_unlock(&pp->pr_slock);
886 v = pool_allocator_alloc(pp, flags);
887 if (__predict_true(v != NULL))
888 ph = pool_alloc_item_header(pp, v, flags);
889
890 if (__predict_false(v == NULL || ph == NULL)) {
891 if (v != NULL)
892 pool_allocator_free(pp, v);
893
894 simple_lock(&pp->pr_slock);
895 pr_enter(pp, file, line);
896
897 /*
898 * We were unable to allocate a page or item
899 * header, but we released the lock during
900 * allocation, so perhaps items were freed
901 * back to the pool. Check for this case.
902 */
903 if (pp->pr_curpage != NULL)
904 goto startover;
905
906 if ((flags & PR_WAITOK) == 0) {
907 pp->pr_nfail++;
908 pr_leave(pp);
909 simple_unlock(&pp->pr_slock);
910 return (NULL);
911 }
912
913 /*
914 * Wait for items to be returned to this pool.
915 *
916 * wake up once a second and try again,
917 * as the check in pool_cache_put_paddr() is racy.
918 */
919 pp->pr_flags |= PR_WANTED;
920 /* PA_WANTED is already set on the allocator. */
921 pr_leave(pp);
922 ltsleep(pp, PSWP, pp->pr_wchan, hz, &pp->pr_slock);
923 pr_enter(pp, file, line);
924 goto startover;
925 }
926
927 /* We have more memory; add it to the pool */
928 simple_lock(&pp->pr_slock);
929 pr_enter(pp, file, line);
930 pool_prime_page(pp, v, ph);
931 pp->pr_npagealloc++;
932
933 /* Start the allocation process over. */
934 goto startover;
935 }
936 if (pp->pr_roflags & PR_NOTOUCH) {
937 #ifdef DIAGNOSTIC
938 if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) {
939 pr_leave(pp);
940 simple_unlock(&pp->pr_slock);
941 panic("pool_get: %s: page empty", pp->pr_wchan);
942 }
943 #endif
944 v = pr_item_notouch_get(pp, ph);
945 #ifdef POOL_DIAGNOSTIC
946 pr_log(pp, v, PRLOG_GET, file, line);
947 #endif
948 } else {
949 v = pi = LIST_FIRST(&ph->ph_itemlist);
950 if (__predict_false(v == NULL)) {
951 pr_leave(pp);
952 simple_unlock(&pp->pr_slock);
953 panic("pool_get: %s: page empty", pp->pr_wchan);
954 }
955 #ifdef DIAGNOSTIC
956 if (__predict_false(pp->pr_nitems == 0)) {
957 pr_leave(pp);
958 simple_unlock(&pp->pr_slock);
959 printf("pool_get: %s: items on itemlist, nitems %u\n",
960 pp->pr_wchan, pp->pr_nitems);
961 panic("pool_get: nitems inconsistent");
962 }
963 #endif
964
965 #ifdef POOL_DIAGNOSTIC
966 pr_log(pp, v, PRLOG_GET, file, line);
967 #endif
968
969 #ifdef DIAGNOSTIC
970 if (__predict_false(pi->pi_magic != PI_MAGIC)) {
971 pr_printlog(pp, pi, printf);
972 panic("pool_get(%s): free list modified: "
973 "magic=%x; page %p; item addr %p\n",
974 pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
975 }
976 #endif
977
978 /*
979 * Remove from item list.
980 */
981 LIST_REMOVE(pi, pi_list);
982 }
983 pp->pr_nitems--;
984 pp->pr_nout++;
985 if (ph->ph_nmissing == 0) {
986 #ifdef DIAGNOSTIC
987 if (__predict_false(pp->pr_nidle == 0))
988 panic("pool_get: nidle inconsistent");
989 #endif
990 pp->pr_nidle--;
991
992 /*
993 * This page was previously empty. Move it to the list of
994 * partially-full pages. This page is already curpage.
995 */
996 LIST_REMOVE(ph, ph_pagelist);
997 LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
998 }
999 ph->ph_nmissing++;
1000 if (ph->ph_nmissing == pp->pr_itemsperpage) {
1001 #ifdef DIAGNOSTIC
1002 if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 &&
1003 !LIST_EMPTY(&ph->ph_itemlist))) {
1004 pr_leave(pp);
1005 simple_unlock(&pp->pr_slock);
1006 panic("pool_get: %s: nmissing inconsistent",
1007 pp->pr_wchan);
1008 }
1009 #endif
1010 /*
1011 * This page is now full. Move it to the full list
1012 * and select a new current page.
1013 */
1014 LIST_REMOVE(ph, ph_pagelist);
1015 LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
1016 pool_update_curpage(pp);
1017 }
1018
1019 pp->pr_nget++;
1020 pr_leave(pp);
1021
1022 /*
1023 * If we have a low water mark and we are now below that low
1024 * water mark, add more items to the pool.
1025 */
1026 if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1027 /*
1028 * XXX: Should we log a warning? Should we set up a timeout
1029 * to try again in a second or so? The latter could break
1030 * a caller's assumptions about interrupt protection, etc.
1031 */
1032 }
1033
1034 simple_unlock(&pp->pr_slock);
1035 return (v);
1036 }
1037
1038 /*
1039 * Internal version of pool_put(). Pool is already locked/entered.
1040 */
1041 static void
1042 pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
1043 {
1044 struct pool_item *pi = v;
1045 struct pool_item_header *ph;
1046 caddr_t page;
1047 int s;
1048
1049 LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
1050 SCHED_ASSERT_UNLOCKED();
1051
1052 page = (caddr_t)((u_long)v & pp->pr_alloc->pa_pagemask);
1053
1054 #ifdef DIAGNOSTIC
1055 if (__predict_false(pp->pr_nout == 0)) {
1056 printf("pool %s: putting with none out\n",
1057 pp->pr_wchan);
1058 panic("pool_put");
1059 }
1060 #endif
1061
1062 if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {
1063 pr_printlog(pp, NULL, printf);
1064 panic("pool_put: %s: page header missing", pp->pr_wchan);
1065 }
1066
1067 #ifdef LOCKDEBUG
1068 /*
1069 * Check if we're freeing a locked simple lock.
1070 */
1071 simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
1072 #endif
1073
1074 /*
1075 * Return to item list.
1076 */
1077 if (pp->pr_roflags & PR_NOTOUCH) {
1078 pr_item_notouch_put(pp, ph, v);
1079 } else {
1080 #ifdef DIAGNOSTIC
1081 pi->pi_magic = PI_MAGIC;
1082 #endif
1083 #ifdef DEBUG
1084 {
1085 int i, *ip = v;
1086
1087 for (i = 0; i < pp->pr_size / sizeof(int); i++) {
1088 *ip++ = PI_MAGIC;
1089 }
1090 }
1091 #endif
1092
1093 LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1094 }
1095 KDASSERT(ph->ph_nmissing != 0);
1096 ph->ph_nmissing--;
1097 pp->pr_nput++;
1098 pp->pr_nitems++;
1099 pp->pr_nout--;
1100
1101 /* Cancel "pool empty" condition if it exists */
1102 if (pp->pr_curpage == NULL)
1103 pp->pr_curpage = ph;
1104
1105 if (pp->pr_flags & PR_WANTED) {
1106 pp->pr_flags &= ~PR_WANTED;
1107 if (ph->ph_nmissing == 0)
1108 pp->pr_nidle++;
1109 wakeup((caddr_t)pp);
1110 return;
1111 }
1112
1113 /*
1114 * If this page is now empty, do one of two things:
1115 *
1116 * (1) If we have more pages than the page high water mark,
1117 * free the page back to the system. ONLY CONSIDER
1118 * FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
1119 * CLAIM.
1120 *
1121 * (2) Otherwise, move the page to the empty page list.
1122 *
1123 * Either way, select a new current page (so we use a partially-full
1124 * page if one is available).
1125 */
1126 if (ph->ph_nmissing == 0) {
1127 pp->pr_nidle++;
1128 if (pp->pr_npages > pp->pr_minpages &&
1129 (pp->pr_npages > pp->pr_maxpages ||
1130 (pp->pr_alloc->pa_flags & PA_WANT) != 0)) {
1131 pr_rmpage(pp, ph, pq);
1132 } else {
1133 LIST_REMOVE(ph, ph_pagelist);
1134 LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1135
1136 /*
1137 * Update the timestamp on the page. A page must
1138 * be idle for some period of time before it can
1139 * be reclaimed by the pagedaemon. This minimizes
1140 * ping-pong'ing for memory.
1141 */
1142 s = splclock();
1143 ph->ph_time = mono_time;
1144 splx(s);
1145 }
1146 pool_update_curpage(pp);
1147 }
1148
1149 /*
1150 * If the page was previously completely full, move it to the
1151 * partially-full list and make it the current page. The next
1152 * allocation will get the item from this page, instead of
1153 * further fragmenting the pool.
1154 */
1155 else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1156 LIST_REMOVE(ph, ph_pagelist);
1157 LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1158 pp->pr_curpage = ph;
1159 }
1160 }
1161
1162 /*
1163 * Return resource to the pool; must be called at appropriate spl level
1164 */
1165 #ifdef POOL_DIAGNOSTIC
1166 void
1167 _pool_put(struct pool *pp, void *v, const char *file, long line)
1168 {
1169 struct pool_pagelist pq;
1170
1171 LIST_INIT(&pq);
1172
1173 simple_lock(&pp->pr_slock);
1174 pr_enter(pp, file, line);
1175
1176 pr_log(pp, v, PRLOG_PUT, file, line);
1177
1178 pool_do_put(pp, v, &pq);
1179
1180 pr_leave(pp);
1181 simple_unlock(&pp->pr_slock);
1182
1183 pr_pagelist_free(pp, &pq);
1184 }
1185 #undef pool_put
1186 #endif /* POOL_DIAGNOSTIC */
1187
1188 void
1189 pool_put(struct pool *pp, void *v)
1190 {
1191 struct pool_pagelist pq;
1192
1193 LIST_INIT(&pq);
1194
1195 simple_lock(&pp->pr_slock);
1196 pool_do_put(pp, v, &pq);
1197 simple_unlock(&pp->pr_slock);
1198
1199 pr_pagelist_free(pp, &pq);
1200 }
1201
1202 #ifdef POOL_DIAGNOSTIC
1203 #define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__)
1204 #endif
1205
1206 /*
1207 * Add N items to the pool.
1208 */
1209 int
1210 pool_prime(struct pool *pp, int n)
1211 {
1212 struct pool_item_header *ph = NULL;
1213 caddr_t cp;
1214 int newpages;
1215
1216 simple_lock(&pp->pr_slock);
1217
1218 newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1219
1220 while (newpages-- > 0) {
1221 simple_unlock(&pp->pr_slock);
1222 cp = pool_allocator_alloc(pp, PR_NOWAIT);
1223 if (__predict_true(cp != NULL))
1224 ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1225
1226 if (__predict_false(cp == NULL || ph == NULL)) {
1227 if (cp != NULL)
1228 pool_allocator_free(pp, cp);
1229 simple_lock(&pp->pr_slock);
1230 break;
1231 }
1232
1233 simple_lock(&pp->pr_slock);
1234 pool_prime_page(pp, cp, ph);
1235 pp->pr_npagealloc++;
1236 pp->pr_minpages++;
1237 }
1238
1239 if (pp->pr_minpages >= pp->pr_maxpages)
1240 pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1241
1242 simple_unlock(&pp->pr_slock);
1243 return (0);
1244 }
1245
1246 /*
1247 * Add a page worth of items to the pool.
1248 *
1249 * Note, we must be called with the pool descriptor LOCKED.
1250 */
1251 static void
1252 pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
1253 {
1254 struct pool_item *pi;
1255 caddr_t cp = storage;
1256 unsigned int align = pp->pr_align;
1257 unsigned int ioff = pp->pr_itemoffset;
1258 int n;
1259 int s;
1260
1261 LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
1262
1263 #ifdef DIAGNOSTIC
1264 if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
1265 panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1266 #endif
1267
1268 /*
1269 * Insert page header.
1270 */
1271 LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1272 LIST_INIT(&ph->ph_itemlist);
1273 ph->ph_page = storage;
1274 ph->ph_nmissing = 0;
1275 s = splclock();
1276 ph->ph_time = mono_time;
1277 splx(s);
1278 if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1279 SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
1280
1281 pp->pr_nidle++;
1282
1283 /*
1284 * Color this page.
1285 */
1286 cp = (caddr_t)(cp + pp->pr_curcolor);
1287 if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1288 pp->pr_curcolor = 0;
1289
1290 /*
1291 * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1292 */
1293 if (ioff != 0)
1294 cp = (caddr_t)(cp + (align - ioff));
1295
1296 /*
1297 * Insert remaining chunks on the bucket list.
1298 */
1299 n = pp->pr_itemsperpage;
1300 pp->pr_nitems += n;
1301
1302 if (pp->pr_roflags & PR_NOTOUCH) {
1303 pool_item_freelist_t *freelist = PR_FREELIST(ph);
1304 int i;
1305
1306 ph->ph_off = cp - storage;
1307 ph->ph_firstfree = 0;
1308 for (i = 0; i < n - 1; i++)
1309 freelist[i] = i + 1;
1310 freelist[n - 1] = PR_INDEX_EOL;
1311 } else {
1312 while (n--) {
1313 pi = (struct pool_item *)cp;
1314
1315 KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
1316
1317 /* Insert on page list */
1318 LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1319 #ifdef DIAGNOSTIC
1320 pi->pi_magic = PI_MAGIC;
1321 #endif
1322 cp = (caddr_t)(cp + pp->pr_size);
1323 }
1324 }
1325
1326 /*
1327 * If the pool was depleted, point at the new page.
1328 */
1329 if (pp->pr_curpage == NULL)
1330 pp->pr_curpage = ph;
1331
1332 if (++pp->pr_npages > pp->pr_hiwat)
1333 pp->pr_hiwat = pp->pr_npages;
1334 }
1335
1336 /*
1337 * Used by pool_get() when nitems drops below the low water mark. This
1338 * is used to catch up pr_nitems with the low water mark.
1339 *
1340 * Note 1, we never wait for memory here, we let the caller decide what to do.
1341 *
1342 * Note 2, we must be called with the pool already locked, and we return
1343 * with it locked.
1344 */
1345 static int
1346 pool_catchup(struct pool *pp)
1347 {
1348 struct pool_item_header *ph = NULL;
1349 caddr_t cp;
1350 int error = 0;
1351
1352 while (POOL_NEEDS_CATCHUP(pp)) {
1353 /*
1354 * Call the page back-end allocator for more memory.
1355 *
1356 * XXX: We never wait, so should we bother unlocking
1357 * the pool descriptor?
1358 */
1359 simple_unlock(&pp->pr_slock);
1360 cp = pool_allocator_alloc(pp, PR_NOWAIT);
1361 if (__predict_true(cp != NULL))
1362 ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1363 if (__predict_false(cp == NULL || ph == NULL)) {
1364 if (cp != NULL)
1365 pool_allocator_free(pp, cp);
1366 error = ENOMEM;
1367 simple_lock(&pp->pr_slock);
1368 break;
1369 }
1370 simple_lock(&pp->pr_slock);
1371 pool_prime_page(pp, cp, ph);
1372 pp->pr_npagealloc++;
1373 }
1374
1375 return (error);
1376 }
1377
1378 static void
1379 pool_update_curpage(struct pool *pp)
1380 {
1381
1382 pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
1383 if (pp->pr_curpage == NULL) {
1384 pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
1385 }
1386 }
1387
1388 void
1389 pool_setlowat(struct pool *pp, int n)
1390 {
1391
1392 simple_lock(&pp->pr_slock);
1393
1394 pp->pr_minitems = n;
1395 pp->pr_minpages = (n == 0)
1396 ? 0
1397 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1398
1399 /* Make sure we're caught up with the newly-set low water mark. */
1400 if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1401 /*
1402 * XXX: Should we log a warning? Should we set up a timeout
1403 * to try again in a second or so? The latter could break
1404 * a caller's assumptions about interrupt protection, etc.
1405 */
1406 }
1407
1408 simple_unlock(&pp->pr_slock);
1409 }
1410
1411 void
1412 pool_sethiwat(struct pool *pp, int n)
1413 {
1414
1415 simple_lock(&pp->pr_slock);
1416
1417 pp->pr_maxpages = (n == 0)
1418 ? 0
1419 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1420
1421 simple_unlock(&pp->pr_slock);
1422 }
1423
1424 void
1425 pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1426 {
1427
1428 simple_lock(&pp->pr_slock);
1429
1430 pp->pr_hardlimit = n;
1431 pp->pr_hardlimit_warning = warnmess;
1432 pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1433 pp->pr_hardlimit_warning_last.tv_sec = 0;
1434 pp->pr_hardlimit_warning_last.tv_usec = 0;
1435
1436 /*
1437 * In-line version of pool_sethiwat(), because we don't want to
1438 * release the lock.
1439 */
1440 pp->pr_maxpages = (n == 0)
1441 ? 0
1442 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1443
1444 simple_unlock(&pp->pr_slock);
1445 }
1446
1447 /*
1448 * Release all complete pages that have not been used recently.
1449 */
1450 int
1451 #ifdef POOL_DIAGNOSTIC
1452 _pool_reclaim(struct pool *pp, const char *file, long line)
1453 #else
1454 pool_reclaim(struct pool *pp)
1455 #endif
1456 {
1457 struct pool_item_header *ph, *phnext;
1458 struct pool_cache *pc;
1459 struct pool_pagelist pq;
1460 struct pool_cache_grouplist pcgl;
1461 struct timeval curtime, diff;
1462 int s;
1463
1464 if (pp->pr_drain_hook != NULL) {
1465 /*
1466 * The drain hook must be called with the pool unlocked.
1467 */
1468 (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1469 }
1470
1471 if (simple_lock_try(&pp->pr_slock) == 0)
1472 return (0);
1473 pr_enter(pp, file, line);
1474
1475 LIST_INIT(&pq);
1476 LIST_INIT(&pcgl);
1477
1478 /*
1479 * Reclaim items from the pool's caches.
1480 */
1481 LIST_FOREACH(pc, &pp->pr_cachelist, pc_poollist)
1482 pool_cache_reclaim(pc, &pq, &pcgl);
1483
1484 s = splclock();
1485 curtime = mono_time;
1486 splx(s);
1487
1488 for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
1489 phnext = LIST_NEXT(ph, ph_pagelist);
1490
1491 /* Check our minimum page claim */
1492 if (pp->pr_npages <= pp->pr_minpages)
1493 break;
1494
1495 KASSERT(ph->ph_nmissing == 0);
1496 timersub(&curtime, &ph->ph_time, &diff);
1497 if (diff.tv_sec < pool_inactive_time)
1498 continue;
1499
1500 /*
1501 * If freeing this page would put us below
1502 * the low water mark, stop now.
1503 */
1504 if ((pp->pr_nitems - pp->pr_itemsperpage) <
1505 pp->pr_minitems)
1506 break;
1507
1508 pr_rmpage(pp, ph, &pq);
1509 }
1510
1511 pr_leave(pp);
1512 simple_unlock(&pp->pr_slock);
1513 if (LIST_EMPTY(&pq) && LIST_EMPTY(&pcgl))
1514 return 0;
1515
1516 pr_pagelist_free(pp, &pq);
1517 pcg_grouplist_free(&pcgl);
1518 return (1);
1519 }
1520
1521 /*
1522 * Drain pools, one at a time.
1523 *
1524 * Note, we must never be called from an interrupt context.
1525 */
1526 void
1527 pool_drain(void *arg)
1528 {
1529 struct pool *pp;
1530 int s;
1531
1532 pp = NULL;
1533 s = splvm();
1534 simple_lock(&pool_head_slock);
1535 if (drainpp == NULL) {
1536 drainpp = LIST_FIRST(&pool_head);
1537 }
1538 if (drainpp) {
1539 pp = drainpp;
1540 drainpp = LIST_NEXT(pp, pr_poollist);
1541 }
1542 simple_unlock(&pool_head_slock);
1543 pool_reclaim(pp);
1544 splx(s);
1545 }
1546
1547 /*
1548 * Diagnostic helpers.
1549 */
1550 void
1551 pool_print(struct pool *pp, const char *modif)
1552 {
1553 int s;
1554
1555 s = splvm();
1556 if (simple_lock_try(&pp->pr_slock) == 0) {
1557 printf("pool %s is locked; try again later\n",
1558 pp->pr_wchan);
1559 splx(s);
1560 return;
1561 }
1562 pool_print1(pp, modif, printf);
1563 simple_unlock(&pp->pr_slock);
1564 splx(s);
1565 }
1566
1567 void
1568 pool_printall(const char *modif, void (*pr)(const char *, ...))
1569 {
1570 struct pool *pp;
1571
1572 if (simple_lock_try(&pool_head_slock) == 0) {
1573 (*pr)("WARNING: pool_head_slock is locked\n");
1574 } else {
1575 simple_unlock(&pool_head_slock);
1576 }
1577
1578 LIST_FOREACH(pp, &pool_head, pr_poollist) {
1579 pool_printit(pp, modif, pr);
1580 }
1581 }
1582
1583 void
1584 pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1585 {
1586
1587 if (pp == NULL) {
1588 (*pr)("Must specify a pool to print.\n");
1589 return;
1590 }
1591
1592 /*
1593 * Called from DDB; interrupts should be blocked, and all
1594 * other processors should be paused. We can skip locking
1595 * the pool in this case.
1596 *
1597 * We do a simple_lock_try() just to print the lock
1598 * status, however.
1599 */
1600
1601 if (simple_lock_try(&pp->pr_slock) == 0)
1602 (*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
1603 else
1604 simple_unlock(&pp->pr_slock);
1605
1606 pool_print1(pp, modif, pr);
1607 }
1608
1609 static void
1610 pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
1611 void (*pr)(const char *, ...))
1612 {
1613 struct pool_item_header *ph;
1614 #ifdef DIAGNOSTIC
1615 struct pool_item *pi;
1616 #endif
1617
1618 LIST_FOREACH(ph, pl, ph_pagelist) {
1619 (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
1620 ph->ph_page, ph->ph_nmissing,
1621 (u_long)ph->ph_time.tv_sec,
1622 (u_long)ph->ph_time.tv_usec);
1623 #ifdef DIAGNOSTIC
1624 if (!(pp->pr_roflags & PR_NOTOUCH)) {
1625 LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1626 if (pi->pi_magic != PI_MAGIC) {
1627 (*pr)("\t\t\titem %p, magic 0x%x\n",
1628 pi, pi->pi_magic);
1629 }
1630 }
1631 }
1632 #endif
1633 }
1634 }
1635
1636 static void
1637 pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1638 {
1639 struct pool_item_header *ph;
1640 struct pool_cache *pc;
1641 struct pool_cache_group *pcg;
1642 int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1643 char c;
1644
1645 while ((c = *modif++) != '\0') {
1646 if (c == 'l')
1647 print_log = 1;
1648 if (c == 'p')
1649 print_pagelist = 1;
1650 if (c == 'c')
1651 print_cache = 1;
1652 }
1653
1654 (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1655 pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1656 pp->pr_roflags);
1657 (*pr)("\talloc %p\n", pp->pr_alloc);
1658 (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1659 pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1660 (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1661 pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1662
1663 (*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
1664 pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1665 (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1666 pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1667
1668 if (print_pagelist == 0)
1669 goto skip_pagelist;
1670
1671 if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1672 (*pr)("\n\tempty page list:\n");
1673 pool_print_pagelist(pp, &pp->pr_emptypages, pr);
1674 if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
1675 (*pr)("\n\tfull page list:\n");
1676 pool_print_pagelist(pp, &pp->pr_fullpages, pr);
1677 if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
1678 (*pr)("\n\tpartial-page list:\n");
1679 pool_print_pagelist(pp, &pp->pr_partpages, pr);
1680
1681 if (pp->pr_curpage == NULL)
1682 (*pr)("\tno current page\n");
1683 else
1684 (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1685
1686 skip_pagelist:
1687 if (print_log == 0)
1688 goto skip_log;
1689
1690 (*pr)("\n");
1691 if ((pp->pr_roflags & PR_LOGGING) == 0)
1692 (*pr)("\tno log\n");
1693 else
1694 pr_printlog(pp, NULL, pr);
1695
1696 skip_log:
1697 if (print_cache == 0)
1698 goto skip_cache;
1699
1700 #define PR_GROUPLIST(pcg) \
1701 (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); \
1702 for (i = 0; i < PCG_NOBJECTS; i++) { \
1703 if (pcg->pcg_objects[i].pcgo_pa != \
1704 POOL_PADDR_INVALID) { \
1705 (*pr)("\t\t\t%p, 0x%llx\n", \
1706 pcg->pcg_objects[i].pcgo_va, \
1707 (unsigned long long) \
1708 pcg->pcg_objects[i].pcgo_pa); \
1709 } else { \
1710 (*pr)("\t\t\t%p\n", \
1711 pcg->pcg_objects[i].pcgo_va); \
1712 } \
1713 }
1714
1715 LIST_FOREACH(pc, &pp->pr_cachelist, pc_poollist) {
1716 (*pr)("\tcache %p\n", pc);
1717 (*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n",
1718 pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
1719 (*pr)("\t full groups:\n");
1720 LIST_FOREACH(pcg, &pc->pc_fullgroups, pcg_list) {
1721 PR_GROUPLIST(pcg);
1722 }
1723 (*pr)("\t partial groups:\n");
1724 LIST_FOREACH(pcg, &pc->pc_partgroups, pcg_list) {
1725 PR_GROUPLIST(pcg);
1726 }
1727 (*pr)("\t empty groups:\n");
1728 LIST_FOREACH(pcg, &pc->pc_emptygroups, pcg_list) {
1729 PR_GROUPLIST(pcg);
1730 }
1731 }
1732 #undef PR_GROUPLIST
1733
1734 skip_cache:
1735 pr_enter_check(pp, pr);
1736 }
1737
1738 static int
1739 pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
1740 {
1741 struct pool_item *pi;
1742 caddr_t page;
1743 int n;
1744
1745 page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask);
1746 if (page != ph->ph_page &&
1747 (pp->pr_roflags & PR_PHINPAGE) != 0) {
1748 if (label != NULL)
1749 printf("%s: ", label);
1750 printf("pool(%p:%s): page inconsistency: page %p;"
1751 " at page head addr %p (p %p)\n", pp,
1752 pp->pr_wchan, ph->ph_page,
1753 ph, page);
1754 return 1;
1755 }
1756
1757 if ((pp->pr_roflags & PR_NOTOUCH) != 0)
1758 return 0;
1759
1760 for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0;
1761 pi != NULL;
1762 pi = LIST_NEXT(pi,pi_list), n++) {
1763
1764 #ifdef DIAGNOSTIC
1765 if (pi->pi_magic != PI_MAGIC) {
1766 if (label != NULL)
1767 printf("%s: ", label);
1768 printf("pool(%s): free list modified: magic=%x;"
1769 " page %p; item ordinal %d;"
1770 " addr %p (p %p)\n",
1771 pp->pr_wchan, pi->pi_magic, ph->ph_page,
1772 n, pi, page);
1773 panic("pool");
1774 }
1775 #endif
1776 page =
1777 (caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask);
1778 if (page == ph->ph_page)
1779 continue;
1780
1781 if (label != NULL)
1782 printf("%s: ", label);
1783 printf("pool(%p:%s): page inconsistency: page %p;"
1784 " item ordinal %d; addr %p (p %p)\n", pp,
1785 pp->pr_wchan, ph->ph_page,
1786 n, pi, page);
1787 return 1;
1788 }
1789 return 0;
1790 }
1791
1792
1793 int
1794 pool_chk(struct pool *pp, const char *label)
1795 {
1796 struct pool_item_header *ph;
1797 int r = 0;
1798
1799 simple_lock(&pp->pr_slock);
1800 LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
1801 r = pool_chk_page(pp, label, ph);
1802 if (r) {
1803 goto out;
1804 }
1805 }
1806 LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
1807 r = pool_chk_page(pp, label, ph);
1808 if (r) {
1809 goto out;
1810 }
1811 }
1812 LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
1813 r = pool_chk_page(pp, label, ph);
1814 if (r) {
1815 goto out;
1816 }
1817 }
1818
1819 out:
1820 simple_unlock(&pp->pr_slock);
1821 return (r);
1822 }
1823
1824 /*
1825 * pool_cache_init:
1826 *
1827 * Initialize a pool cache.
1828 *
1829 * NOTE: If the pool must be protected from interrupts, we expect
1830 * to be called at the appropriate interrupt priority level.
1831 */
1832 void
1833 pool_cache_init(struct pool_cache *pc, struct pool *pp,
1834 int (*ctor)(void *, void *, int),
1835 void (*dtor)(void *, void *),
1836 void *arg)
1837 {
1838
1839 LIST_INIT(&pc->pc_emptygroups);
1840 LIST_INIT(&pc->pc_fullgroups);
1841 LIST_INIT(&pc->pc_partgroups);
1842 simple_lock_init(&pc->pc_slock);
1843
1844 pc->pc_pool = pp;
1845
1846 pc->pc_ctor = ctor;
1847 pc->pc_dtor = dtor;
1848 pc->pc_arg = arg;
1849
1850 pc->pc_hits = 0;
1851 pc->pc_misses = 0;
1852
1853 pc->pc_ngroups = 0;
1854
1855 pc->pc_nitems = 0;
1856
1857 simple_lock(&pp->pr_slock);
1858 LIST_INSERT_HEAD(&pp->pr_cachelist, pc, pc_poollist);
1859 simple_unlock(&pp->pr_slock);
1860 }
1861
1862 /*
1863 * pool_cache_destroy:
1864 *
1865 * Destroy a pool cache.
1866 */
1867 void
1868 pool_cache_destroy(struct pool_cache *pc)
1869 {
1870 struct pool *pp = pc->pc_pool;
1871
1872 /* First, invalidate the entire cache. */
1873 pool_cache_invalidate(pc);
1874
1875 /* ...and remove it from the pool's cache list. */
1876 simple_lock(&pp->pr_slock);
1877 LIST_REMOVE(pc, pc_poollist);
1878 simple_unlock(&pp->pr_slock);
1879 }
1880
1881 static inline void *
1882 pcg_get(struct pool_cache_group *pcg, paddr_t *pap)
1883 {
1884 void *object;
1885 u_int idx;
1886
1887 KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
1888 KASSERT(pcg->pcg_avail != 0);
1889 idx = --pcg->pcg_avail;
1890
1891 KASSERT(pcg->pcg_objects[idx].pcgo_va != NULL);
1892 object = pcg->pcg_objects[idx].pcgo_va;
1893 if (pap != NULL)
1894 *pap = pcg->pcg_objects[idx].pcgo_pa;
1895 pcg->pcg_objects[idx].pcgo_va = NULL;
1896
1897 return (object);
1898 }
1899
1900 static inline void
1901 pcg_put(struct pool_cache_group *pcg, void *object, paddr_t pa)
1902 {
1903 u_int idx;
1904
1905 KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
1906 idx = pcg->pcg_avail++;
1907
1908 KASSERT(pcg->pcg_objects[idx].pcgo_va == NULL);
1909 pcg->pcg_objects[idx].pcgo_va = object;
1910 pcg->pcg_objects[idx].pcgo_pa = pa;
1911 }
1912
1913 static void
1914 pcg_grouplist_free(struct pool_cache_grouplist *pcgl)
1915 {
1916 struct pool_cache_group *pcg;
1917 int s;
1918
1919 s = splvm();
1920 while ((pcg = LIST_FIRST(pcgl)) != NULL) {
1921 LIST_REMOVE(pcg, pcg_list);
1922 pool_put(&pcgpool, pcg);
1923 }
1924 splx(s);
1925 }
1926
1927 /*
1928 * pool_cache_get{,_paddr}:
1929 *
1930 * Get an object from a pool cache (optionally returning
1931 * the physical address of the object).
1932 */
1933 void *
1934 pool_cache_get_paddr(struct pool_cache *pc, int flags, paddr_t *pap)
1935 {
1936 struct pool_cache_group *pcg;
1937 void *object;
1938
1939 #ifdef LOCKDEBUG
1940 if (flags & PR_WAITOK)
1941 simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)");
1942 #endif
1943
1944 simple_lock(&pc->pc_slock);
1945
1946 pcg = LIST_FIRST(&pc->pc_partgroups);
1947 if (pcg == NULL) {
1948 pcg = LIST_FIRST(&pc->pc_fullgroups);
1949 if (pcg != NULL) {
1950 LIST_REMOVE(pcg, pcg_list);
1951 LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list);
1952 }
1953 }
1954 if (pcg == NULL) {
1955
1956 /*
1957 * No groups with any available objects. Allocate
1958 * a new object, construct it, and return it to
1959 * the caller. We will allocate a group, if necessary,
1960 * when the object is freed back to the cache.
1961 */
1962 pc->pc_misses++;
1963 simple_unlock(&pc->pc_slock);
1964 object = pool_get(pc->pc_pool, flags);
1965 if (object != NULL && pc->pc_ctor != NULL) {
1966 if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
1967 pool_put(pc->pc_pool, object);
1968 return (NULL);
1969 }
1970 }
1971 if (object != NULL && pap != NULL) {
1972 #ifdef POOL_VTOPHYS
1973 *pap = POOL_VTOPHYS(object);
1974 #else
1975 *pap = POOL_PADDR_INVALID;
1976 #endif
1977 }
1978 return (object);
1979 }
1980
1981 pc->pc_hits++;
1982 pc->pc_nitems--;
1983 object = pcg_get(pcg, pap);
1984
1985 if (pcg->pcg_avail == 0) {
1986 LIST_REMOVE(pcg, pcg_list);
1987 LIST_INSERT_HEAD(&pc->pc_emptygroups, pcg, pcg_list);
1988 }
1989 simple_unlock(&pc->pc_slock);
1990
1991 return (object);
1992 }
1993
1994 /*
1995 * pool_cache_put{,_paddr}:
1996 *
1997 * Put an object back to the pool cache (optionally caching the
1998 * physical address of the object).
1999 */
2000 void
2001 pool_cache_put_paddr(struct pool_cache *pc, void *object, paddr_t pa)
2002 {
2003 struct pool_cache_group *pcg;
2004 int s;
2005
2006 if (__predict_false((pc->pc_pool->pr_flags & PR_WANTED) != 0)) {
2007 goto destruct;
2008 }
2009
2010 simple_lock(&pc->pc_slock);
2011
2012 pcg = LIST_FIRST(&pc->pc_partgroups);
2013 if (pcg == NULL) {
2014 pcg = LIST_FIRST(&pc->pc_emptygroups);
2015 if (pcg != NULL) {
2016 LIST_REMOVE(pcg, pcg_list);
2017 LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list);
2018 }
2019 }
2020 if (pcg == NULL) {
2021
2022 /*
2023 * No empty groups to free the object to. Attempt to
2024 * allocate one.
2025 */
2026 simple_unlock(&pc->pc_slock);
2027 s = splvm();
2028 pcg = pool_get(&pcgpool, PR_NOWAIT);
2029 splx(s);
2030 if (pcg == NULL) {
2031 destruct:
2032
2033 /*
2034 * Unable to allocate a cache group; destruct the object
2035 * and free it back to the pool.
2036 */
2037 pool_cache_destruct_object(pc, object);
2038 return;
2039 }
2040 memset(pcg, 0, sizeof(*pcg));
2041 simple_lock(&pc->pc_slock);
2042 pc->pc_ngroups++;
2043 LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list);
2044 }
2045
2046 pc->pc_nitems++;
2047 pcg_put(pcg, object, pa);
2048
2049 if (pcg->pcg_avail == PCG_NOBJECTS) {
2050 LIST_REMOVE(pcg, pcg_list);
2051 LIST_INSERT_HEAD(&pc->pc_fullgroups, pcg, pcg_list);
2052 }
2053 simple_unlock(&pc->pc_slock);
2054 }
2055
2056 /*
2057 * pool_cache_destruct_object:
2058 *
2059 * Force destruction of an object and its release back into
2060 * the pool.
2061 */
2062 void
2063 pool_cache_destruct_object(struct pool_cache *pc, void *object)
2064 {
2065
2066 if (pc->pc_dtor != NULL)
2067 (*pc->pc_dtor)(pc->pc_arg, object);
2068 pool_put(pc->pc_pool, object);
2069 }
2070
2071 static void
2072 pool_do_cache_invalidate_grouplist(struct pool_cache_grouplist *pcgsl,
2073 struct pool_cache *pc, struct pool_pagelist *pq,
2074 struct pool_cache_grouplist *pcgdl)
2075 {
2076 struct pool_cache_group *pcg, *npcg;
2077 void *object;
2078
2079 for (pcg = LIST_FIRST(pcgsl); pcg != NULL; pcg = npcg) {
2080 npcg = LIST_NEXT(pcg, pcg_list);
2081 while (pcg->pcg_avail != 0) {
2082 pc->pc_nitems--;
2083 object = pcg_get(pcg, NULL);
2084 if (pc->pc_dtor != NULL)
2085 (*pc->pc_dtor)(pc->pc_arg, object);
2086 pool_do_put(pc->pc_pool, object, pq);
2087 }
2088 pc->pc_ngroups--;
2089 LIST_REMOVE(pcg, pcg_list);
2090 LIST_INSERT_HEAD(pcgdl, pcg, pcg_list);
2091 }
2092 }
2093
2094 static void
2095 pool_do_cache_invalidate(struct pool_cache *pc, struct pool_pagelist *pq,
2096 struct pool_cache_grouplist *pcgl)
2097 {
2098
2099 LOCK_ASSERT(simple_lock_held(&pc->pc_slock));
2100 LOCK_ASSERT(simple_lock_held(&pc->pc_pool->pr_slock));
2101
2102 pool_do_cache_invalidate_grouplist(&pc->pc_fullgroups, pc, pq, pcgl);
2103 pool_do_cache_invalidate_grouplist(&pc->pc_partgroups, pc, pq, pcgl);
2104
2105 KASSERT(LIST_EMPTY(&pc->pc_partgroups));
2106 KASSERT(LIST_EMPTY(&pc->pc_fullgroups));
2107 KASSERT(pc->pc_nitems == 0);
2108 }
2109
2110 /*
2111 * pool_cache_invalidate:
2112 *
2113 * Invalidate a pool cache (destruct and release all of the
2114 * cached objects).
2115 */
2116 void
2117 pool_cache_invalidate(struct pool_cache *pc)
2118 {
2119 struct pool_pagelist pq;
2120 struct pool_cache_grouplist pcgl;
2121
2122 LIST_INIT(&pq);
2123 LIST_INIT(&pcgl);
2124
2125 simple_lock(&pc->pc_slock);
2126 simple_lock(&pc->pc_pool->pr_slock);
2127
2128 pool_do_cache_invalidate(pc, &pq, &pcgl);
2129
2130 simple_unlock(&pc->pc_pool->pr_slock);
2131 simple_unlock(&pc->pc_slock);
2132
2133 pr_pagelist_free(pc->pc_pool, &pq);
2134 pcg_grouplist_free(&pcgl);
2135 }
2136
2137 /*
2138 * pool_cache_reclaim:
2139 *
2140 * Reclaim a pool cache for pool_reclaim().
2141 */
2142 static void
2143 pool_cache_reclaim(struct pool_cache *pc, struct pool_pagelist *pq,
2144 struct pool_cache_grouplist *pcgl)
2145 {
2146
2147 /*
2148 * We're locking in the wrong order (normally pool_cache -> pool,
2149 * but the pool is already locked when we get here), so we have
2150 * to use trylock. If we can't lock the pool_cache, it's not really
2151 * a big deal here.
2152 */
2153 if (simple_lock_try(&pc->pc_slock) == 0)
2154 return;
2155
2156 pool_do_cache_invalidate(pc, pq, pcgl);
2157
2158 simple_unlock(&pc->pc_slock);
2159 }
2160
2161 /*
2162 * Pool backend allocators.
2163 *
2164 * Each pool has a backend allocator that handles allocation, deallocation,
2165 * and any additional draining that might be needed.
2166 *
2167 * We provide two standard allocators:
2168 *
2169 * pool_allocator_kmem - the default when no allocator is specified
2170 *
2171 * pool_allocator_nointr - used for pools that will not be accessed
2172 * in interrupt context.
2173 */
2174 void *pool_page_alloc(struct pool *, int);
2175 void pool_page_free(struct pool *, void *);
2176
2177 #ifdef POOL_SUBPAGE
2178 struct pool_allocator pool_allocator_kmem_fullpage = {
2179 pool_page_alloc, pool_page_free, 0,
2180 };
2181 #else
2182 struct pool_allocator pool_allocator_kmem = {
2183 pool_page_alloc, pool_page_free, 0,
2184 };
2185 #endif
2186
2187 void *pool_page_alloc_nointr(struct pool *, int);
2188 void pool_page_free_nointr(struct pool *, void *);
2189
2190 #ifdef POOL_SUBPAGE
2191 struct pool_allocator pool_allocator_nointr_fullpage = {
2192 pool_page_alloc_nointr, pool_page_free_nointr, 0,
2193 };
2194 #else
2195 struct pool_allocator pool_allocator_nointr = {
2196 pool_page_alloc_nointr, pool_page_free_nointr, 0,
2197 };
2198 #endif
2199
2200 #ifdef POOL_SUBPAGE
2201 void *pool_subpage_alloc(struct pool *, int);
2202 void pool_subpage_free(struct pool *, void *);
2203
2204 struct pool_allocator pool_allocator_kmem = {
2205 pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
2206 };
2207
2208 void *pool_subpage_alloc_nointr(struct pool *, int);
2209 void pool_subpage_free_nointr(struct pool *, void *);
2210
2211 struct pool_allocator pool_allocator_nointr = {
2212 pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
2213 };
2214 #endif /* POOL_SUBPAGE */
2215
2216 /*
2217 * We have at least three different resources for the same allocation and
2218 * each resource can be depleted. First, we have the ready elements in the
2219 * pool. Then we have the resource (typically a vm_map) for this allocator.
2220 * Finally, we have physical memory. Waiting for any of these can be
2221 * unnecessary when any other is freed, but the kernel doesn't support
2222 * sleeping on multiple wait channels, so we have to employ another strategy.
2223 *
2224 * The caller sleeps on the pool (so that it can be awakened when an item
2225 * is returned to the pool), but we set PA_WANT on the allocator. When a
2226 * page is returned to the allocator and PA_WANT is set, pool_allocator_free
2227 * will wake up all sleeping pools belonging to this allocator.
2228 *
2229 * XXX Thundering herd.
2230 */
2231 void *
2232 pool_allocator_alloc(struct pool *org, int flags)
2233 {
2234 struct pool_allocator *pa = org->pr_alloc;
2235 struct pool *pp, *start;
2236 int s, freed;
2237 void *res;
2238
2239 LOCK_ASSERT(!simple_lock_held(&org->pr_slock));
2240
2241 do {
2242 if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
2243 return (res);
2244 if ((flags & PR_WAITOK) == 0) {
2245 /*
2246 * We only run the drain hookhere if PR_NOWAIT.
2247 * In other cases, the hook will be run in
2248 * pool_reclaim().
2249 */
2250 if (org->pr_drain_hook != NULL) {
2251 (*org->pr_drain_hook)(org->pr_drain_hook_arg,
2252 flags);
2253 if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
2254 return (res);
2255 }
2256 break;
2257 }
2258
2259 /*
2260 * Drain all pools, that use this allocator.
2261 * We do this to reclaim VA space.
2262 * pa_alloc is responsible for waiting for
2263 * physical memory.
2264 *
2265 * XXX We risk looping forever if start if someone
2266 * calls pool_destroy on "start". But there is no
2267 * other way to have potentially sleeping pool_reclaim,
2268 * non-sleeping locks on pool_allocator, and some
2269 * stirring of drained pools in the allocator.
2270 *
2271 * XXX Maybe we should use pool_head_slock for locking
2272 * the allocators?
2273 */
2274 freed = 0;
2275
2276 s = splvm();
2277 simple_lock(&pa->pa_slock);
2278 pp = start = TAILQ_FIRST(&pa->pa_list);
2279 do {
2280 TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list);
2281 TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list);
2282 simple_unlock(&pa->pa_slock);
2283 freed = pool_reclaim(pp);
2284 simple_lock(&pa->pa_slock);
2285 } while ((pp = TAILQ_FIRST(&pa->pa_list)) != start &&
2286 freed == 0);
2287
2288 if (freed == 0) {
2289 /*
2290 * We set PA_WANT here, the caller will most likely
2291 * sleep waiting for pages (if not, this won't hurt
2292 * that much), and there is no way to set this in
2293 * the caller without violating locking order.
2294 */
2295 pa->pa_flags |= PA_WANT;
2296 }
2297 simple_unlock(&pa->pa_slock);
2298 splx(s);
2299 } while (freed);
2300 return (NULL);
2301 }
2302
2303 void
2304 pool_allocator_free(struct pool *pp, void *v)
2305 {
2306 struct pool_allocator *pa = pp->pr_alloc;
2307 int s;
2308
2309 LOCK_ASSERT(!simple_lock_held(&pp->pr_slock));
2310
2311 (*pa->pa_free)(pp, v);
2312
2313 s = splvm();
2314 simple_lock(&pa->pa_slock);
2315 if ((pa->pa_flags & PA_WANT) == 0) {
2316 simple_unlock(&pa->pa_slock);
2317 splx(s);
2318 return;
2319 }
2320
2321 TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
2322 simple_lock(&pp->pr_slock);
2323 if ((pp->pr_flags & PR_WANTED) != 0) {
2324 pp->pr_flags &= ~PR_WANTED;
2325 wakeup(pp);
2326 }
2327 simple_unlock(&pp->pr_slock);
2328 }
2329 pa->pa_flags &= ~PA_WANT;
2330 simple_unlock(&pa->pa_slock);
2331 splx(s);
2332 }
2333
2334 void *
2335 pool_page_alloc(struct pool *pp, int flags)
2336 {
2337 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2338
2339 return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, waitok));
2340 }
2341
2342 void
2343 pool_page_free(struct pool *pp, void *v)
2344 {
2345
2346 uvm_km_free_poolpage_cache(kmem_map, (vaddr_t) v);
2347 }
2348
2349 static void *
2350 pool_page_alloc_meta(struct pool *pp, int flags)
2351 {
2352 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2353
2354 return ((void *) uvm_km_alloc_poolpage(kmem_map, waitok));
2355 }
2356
2357 static void
2358 pool_page_free_meta(struct pool *pp, void *v)
2359 {
2360
2361 uvm_km_free_poolpage(kmem_map, (vaddr_t) v);
2362 }
2363
2364 #ifdef POOL_SUBPAGE
2365 /* Sub-page allocator, for machines with large hardware pages. */
2366 void *
2367 pool_subpage_alloc(struct pool *pp, int flags)
2368 {
2369 void *v;
2370 int s;
2371 s = splvm();
2372 v = pool_get(&psppool, flags);
2373 splx(s);
2374 return v;
2375 }
2376
2377 void
2378 pool_subpage_free(struct pool *pp, void *v)
2379 {
2380 int s;
2381 s = splvm();
2382 pool_put(&psppool, v);
2383 splx(s);
2384 }
2385
2386 /* We don't provide a real nointr allocator. Maybe later. */
2387 void *
2388 pool_subpage_alloc_nointr(struct pool *pp, int flags)
2389 {
2390
2391 return (pool_subpage_alloc(pp, flags));
2392 }
2393
2394 void
2395 pool_subpage_free_nointr(struct pool *pp, void *v)
2396 {
2397
2398 pool_subpage_free(pp, v);
2399 }
2400 #endif /* POOL_SUBPAGE */
2401 void *
2402 pool_page_alloc_nointr(struct pool *pp, int flags)
2403 {
2404 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2405
2406 return ((void *) uvm_km_alloc_poolpage_cache(kernel_map, waitok));
2407 }
2408
2409 void
2410 pool_page_free_nointr(struct pool *pp, void *v)
2411 {
2412
2413 uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v);
2414 }
2415