subr_pool.c revision 1.218 1 /* $NetBSD: subr_pool.c,v 1.218 2017/12/04 03:05:24 mrg Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010, 2014, 2015
5 * The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
10 * Simulation Facility, NASA Ames Research Center; by Andrew Doran, and by
11 * Maxime Villard.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.218 2017/12/04 03:05:24 mrg Exp $");
37
38 #ifdef _KERNEL_OPT
39 #include "opt_ddb.h"
40 #include "opt_lockdebug.h"
41 #endif
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/sysctl.h>
46 #include <sys/bitops.h>
47 #include <sys/proc.h>
48 #include <sys/errno.h>
49 #include <sys/kernel.h>
50 #include <sys/vmem.h>
51 #include <sys/pool.h>
52 #include <sys/syslog.h>
53 #include <sys/debug.h>
54 #include <sys/lockdebug.h>
55 #include <sys/xcall.h>
56 #include <sys/cpu.h>
57 #include <sys/atomic.h>
58
59 #include <uvm/uvm_extern.h>
60
61 /*
62 * Pool resource management utility.
63 *
64 * Memory is allocated in pages which are split into pieces according to
65 * the pool item size. Each page is kept on one of three lists in the
66 * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
67 * for empty, full and partially-full pages respectively. The individual
68 * pool items are on a linked list headed by `ph_itemlist' in each page
69 * header. The memory for building the page list is either taken from
70 * the allocated pages themselves (for small pool items) or taken from
71 * an internal pool of page headers (`phpool').
72 */
73
74 /* List of all pools. Non static as needed by 'vmstat -i' */
75 TAILQ_HEAD(, pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
76
77 /* Private pool for page header structures */
78 #define PHPOOL_MAX 8
79 static struct pool phpool[PHPOOL_MAX];
80 #define PHPOOL_FREELIST_NELEM(idx) \
81 (((idx) == 0) ? 0 : BITMAP_SIZE * (1 << (idx)))
82
83 #ifdef POOL_SUBPAGE
84 /* Pool of subpages for use by normal pools. */
85 static struct pool psppool;
86 #endif
87
88 #ifdef POOL_REDZONE
89 # define POOL_REDZONE_SIZE 2
90 static void pool_redzone_init(struct pool *, size_t);
91 static void pool_redzone_fill(struct pool *, void *);
92 static void pool_redzone_check(struct pool *, void *);
93 #else
94 # define pool_redzone_init(pp, sz) /* NOTHING */
95 # define pool_redzone_fill(pp, ptr) /* NOTHING */
96 # define pool_redzone_check(pp, ptr) /* NOTHING */
97 #endif
98
99 static void *pool_page_alloc_meta(struct pool *, int);
100 static void pool_page_free_meta(struct pool *, void *);
101
102 /* allocator for pool metadata */
103 struct pool_allocator pool_allocator_meta = {
104 .pa_alloc = pool_page_alloc_meta,
105 .pa_free = pool_page_free_meta,
106 .pa_pagesz = 0
107 };
108
109 #define POOL_ALLOCATOR_BIG_BASE 13
110 extern struct pool_allocator pool_allocator_big[];
111 static int pool_bigidx(size_t);
112
113 /* # of seconds to retain page after last use */
114 int pool_inactive_time = 10;
115
116 /* Next candidate for drainage (see pool_drain()) */
117 static struct pool *drainpp;
118
119 /* This lock protects both pool_head and drainpp. */
120 static kmutex_t pool_head_lock;
121 static kcondvar_t pool_busy;
122
123 /* This lock protects initialization of a potentially shared pool allocator */
124 static kmutex_t pool_allocator_lock;
125
126 typedef uint32_t pool_item_bitmap_t;
127 #define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t))
128 #define BITMAP_MASK (BITMAP_SIZE - 1)
129
130 struct pool_item_header {
131 /* Page headers */
132 LIST_ENTRY(pool_item_header)
133 ph_pagelist; /* pool page list */
134 SPLAY_ENTRY(pool_item_header)
135 ph_node; /* Off-page page headers */
136 void * ph_page; /* this page's address */
137 uint32_t ph_time; /* last referenced */
138 uint16_t ph_nmissing; /* # of chunks in use */
139 uint16_t ph_off; /* start offset in page */
140 union {
141 /* !PR_NOTOUCH */
142 struct {
143 LIST_HEAD(, pool_item)
144 phu_itemlist; /* chunk list for this page */
145 } phu_normal;
146 /* PR_NOTOUCH */
147 struct {
148 pool_item_bitmap_t phu_bitmap[1];
149 } phu_notouch;
150 } ph_u;
151 };
152 #define ph_itemlist ph_u.phu_normal.phu_itemlist
153 #define ph_bitmap ph_u.phu_notouch.phu_bitmap
154
155 struct pool_item {
156 #ifdef DIAGNOSTIC
157 u_int pi_magic;
158 #endif
159 #define PI_MAGIC 0xdeaddeadU
160 /* Other entries use only this list entry */
161 LIST_ENTRY(pool_item) pi_list;
162 };
163
164 #define POOL_NEEDS_CATCHUP(pp) \
165 ((pp)->pr_nitems < (pp)->pr_minitems)
166
167 /*
168 * Pool cache management.
169 *
170 * Pool caches provide a way for constructed objects to be cached by the
171 * pool subsystem. This can lead to performance improvements by avoiding
172 * needless object construction/destruction; it is deferred until absolutely
173 * necessary.
174 *
175 * Caches are grouped into cache groups. Each cache group references up
176 * to PCG_NUMOBJECTS constructed objects. When a cache allocates an
177 * object from the pool, it calls the object's constructor and places it
178 * into a cache group. When a cache group frees an object back to the
179 * pool, it first calls the object's destructor. This allows the object
180 * to persist in constructed form while freed to the cache.
181 *
182 * The pool references each cache, so that when a pool is drained by the
183 * pagedaemon, it can drain each individual cache as well. Each time a
184 * cache is drained, the most idle cache group is freed to the pool in
185 * its entirety.
186 *
187 * Pool caches are layed on top of pools. By layering them, we can avoid
188 * the complexity of cache management for pools which would not benefit
189 * from it.
190 */
191
192 static struct pool pcg_normal_pool;
193 static struct pool pcg_large_pool;
194 static struct pool cache_pool;
195 static struct pool cache_cpu_pool;
196
197 pool_cache_t pnbuf_cache; /* pathname buffer cache */
198
199 /* List of all caches. */
200 TAILQ_HEAD(,pool_cache) pool_cache_head =
201 TAILQ_HEAD_INITIALIZER(pool_cache_head);
202
203 int pool_cache_disable; /* global disable for caching */
204 static const pcg_t pcg_dummy; /* zero sized: always empty, yet always full */
205
206 static bool pool_cache_put_slow(pool_cache_cpu_t *, int,
207 void *);
208 static bool pool_cache_get_slow(pool_cache_cpu_t *, int,
209 void **, paddr_t *, int);
210 static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t);
211 static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *);
212 static void pool_cache_invalidate_cpu(pool_cache_t, u_int);
213 static void pool_cache_transfer(pool_cache_t);
214
215 static int pool_catchup(struct pool *);
216 static void pool_prime_page(struct pool *, void *,
217 struct pool_item_header *);
218 static void pool_update_curpage(struct pool *);
219
220 static int pool_grow(struct pool *, int);
221 static void *pool_allocator_alloc(struct pool *, int);
222 static void pool_allocator_free(struct pool *, void *);
223
224 static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
225 void (*)(const char *, ...) __printflike(1, 2));
226 static void pool_print1(struct pool *, const char *,
227 void (*)(const char *, ...) __printflike(1, 2));
228
229 static int pool_chk_page(struct pool *, const char *,
230 struct pool_item_header *);
231
232 static inline unsigned int
233 pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph,
234 const void *v)
235 {
236 const char *cp = v;
237 unsigned int idx;
238
239 KASSERT(pp->pr_roflags & PR_NOTOUCH);
240 idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size;
241 KASSERT(idx < pp->pr_itemsperpage);
242 return idx;
243 }
244
245 static inline void
246 pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph,
247 void *obj)
248 {
249 unsigned int idx = pr_item_notouch_index(pp, ph, obj);
250 pool_item_bitmap_t *bitmap = ph->ph_bitmap + (idx / BITMAP_SIZE);
251 pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
252
253 KASSERT((*bitmap & mask) == 0);
254 *bitmap |= mask;
255 }
256
257 static inline void *
258 pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph)
259 {
260 pool_item_bitmap_t *bitmap = ph->ph_bitmap;
261 unsigned int idx;
262 int i;
263
264 for (i = 0; ; i++) {
265 int bit;
266
267 KASSERT((i * BITMAP_SIZE) < pp->pr_itemsperpage);
268 bit = ffs32(bitmap[i]);
269 if (bit) {
270 pool_item_bitmap_t mask;
271
272 bit--;
273 idx = (i * BITMAP_SIZE) + bit;
274 mask = 1 << bit;
275 KASSERT((bitmap[i] & mask) != 0);
276 bitmap[i] &= ~mask;
277 break;
278 }
279 }
280 KASSERT(idx < pp->pr_itemsperpage);
281 return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size;
282 }
283
284 static inline void
285 pr_item_notouch_init(const struct pool *pp, struct pool_item_header *ph)
286 {
287 pool_item_bitmap_t *bitmap = ph->ph_bitmap;
288 const int n = howmany(pp->pr_itemsperpage, BITMAP_SIZE);
289 int i;
290
291 for (i = 0; i < n; i++) {
292 bitmap[i] = (pool_item_bitmap_t)-1;
293 }
294 }
295
296 static inline int
297 phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
298 {
299
300 /*
301 * we consider pool_item_header with smaller ph_page bigger.
302 * (this unnatural ordering is for the benefit of pr_find_pagehead.)
303 */
304
305 if (a->ph_page < b->ph_page)
306 return (1);
307 else if (a->ph_page > b->ph_page)
308 return (-1);
309 else
310 return (0);
311 }
312
313 SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
314 SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
315
316 static inline struct pool_item_header *
317 pr_find_pagehead_noalign(struct pool *pp, void *v)
318 {
319 struct pool_item_header *ph, tmp;
320
321 tmp.ph_page = (void *)(uintptr_t)v;
322 ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
323 if (ph == NULL) {
324 ph = SPLAY_ROOT(&pp->pr_phtree);
325 if (ph != NULL && phtree_compare(&tmp, ph) >= 0) {
326 ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph);
327 }
328 KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0);
329 }
330
331 return ph;
332 }
333
334 /*
335 * Return the pool page header based on item address.
336 */
337 static inline struct pool_item_header *
338 pr_find_pagehead(struct pool *pp, void *v)
339 {
340 struct pool_item_header *ph, tmp;
341
342 if ((pp->pr_roflags & PR_NOALIGN) != 0) {
343 ph = pr_find_pagehead_noalign(pp, v);
344 } else {
345 void *page =
346 (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask);
347
348 if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
349 ph = (struct pool_item_header *)((char *)page + pp->pr_phoffset);
350 } else {
351 tmp.ph_page = page;
352 ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
353 }
354 }
355
356 KASSERT(ph == NULL || ((pp->pr_roflags & PR_PHINPAGE) != 0) ||
357 ((char *)ph->ph_page <= (char *)v &&
358 (char *)v < (char *)ph->ph_page + pp->pr_alloc->pa_pagesz));
359 return ph;
360 }
361
362 static void
363 pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq)
364 {
365 struct pool_item_header *ph;
366
367 while ((ph = LIST_FIRST(pq)) != NULL) {
368 LIST_REMOVE(ph, ph_pagelist);
369 pool_allocator_free(pp, ph->ph_page);
370 if ((pp->pr_roflags & PR_PHINPAGE) == 0)
371 pool_put(pp->pr_phpool, ph);
372 }
373 }
374
375 /*
376 * Remove a page from the pool.
377 */
378 static inline void
379 pr_rmpage(struct pool *pp, struct pool_item_header *ph,
380 struct pool_pagelist *pq)
381 {
382
383 KASSERT(mutex_owned(&pp->pr_lock));
384
385 /*
386 * If the page was idle, decrement the idle page count.
387 */
388 if (ph->ph_nmissing == 0) {
389 KASSERT(pp->pr_nidle != 0);
390 KASSERTMSG((pp->pr_nitems >= pp->pr_itemsperpage),
391 "nitems=%u < itemsperpage=%u",
392 pp->pr_nitems, pp->pr_itemsperpage);
393 pp->pr_nidle--;
394 }
395
396 pp->pr_nitems -= pp->pr_itemsperpage;
397
398 /*
399 * Unlink the page from the pool and queue it for release.
400 */
401 LIST_REMOVE(ph, ph_pagelist);
402 if ((pp->pr_roflags & PR_PHINPAGE) == 0)
403 SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
404 LIST_INSERT_HEAD(pq, ph, ph_pagelist);
405
406 pp->pr_npages--;
407 pp->pr_npagefree++;
408
409 pool_update_curpage(pp);
410 }
411
412 /*
413 * Initialize all the pools listed in the "pools" link set.
414 */
415 void
416 pool_subsystem_init(void)
417 {
418 size_t size;
419 int idx;
420
421 mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE);
422 mutex_init(&pool_allocator_lock, MUTEX_DEFAULT, IPL_NONE);
423 cv_init(&pool_busy, "poolbusy");
424
425 /*
426 * Initialize private page header pool and cache magazine pool if we
427 * haven't done so yet.
428 */
429 for (idx = 0; idx < PHPOOL_MAX; idx++) {
430 static char phpool_names[PHPOOL_MAX][6+1+6+1];
431 int nelem;
432 size_t sz;
433
434 nelem = PHPOOL_FREELIST_NELEM(idx);
435 snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
436 "phpool-%d", nelem);
437 sz = sizeof(struct pool_item_header);
438 if (nelem) {
439 sz = offsetof(struct pool_item_header,
440 ph_bitmap[howmany(nelem, BITMAP_SIZE)]);
441 }
442 pool_init(&phpool[idx], sz, 0, 0, 0,
443 phpool_names[idx], &pool_allocator_meta, IPL_VM);
444 }
445 #ifdef POOL_SUBPAGE
446 pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
447 PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM);
448 #endif
449
450 size = sizeof(pcg_t) +
451 (PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t);
452 pool_init(&pcg_normal_pool, size, coherency_unit, 0, 0,
453 "pcgnormal", &pool_allocator_meta, IPL_VM);
454
455 size = sizeof(pcg_t) +
456 (PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t);
457 pool_init(&pcg_large_pool, size, coherency_unit, 0, 0,
458 "pcglarge", &pool_allocator_meta, IPL_VM);
459
460 pool_init(&cache_pool, sizeof(struct pool_cache), coherency_unit,
461 0, 0, "pcache", &pool_allocator_meta, IPL_NONE);
462
463 pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), coherency_unit,
464 0, 0, "pcachecpu", &pool_allocator_meta, IPL_NONE);
465 }
466
467 /*
468 * Initialize the given pool resource structure.
469 *
470 * We export this routine to allow other kernel parts to declare
471 * static pools that must be initialized before kmem(9) is available.
472 */
473 void
474 pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
475 const char *wchan, struct pool_allocator *palloc, int ipl)
476 {
477 struct pool *pp1;
478 size_t trysize, phsize, prsize;
479 int off, slack;
480
481 #ifdef DEBUG
482 if (__predict_true(!cold))
483 mutex_enter(&pool_head_lock);
484 /*
485 * Check that the pool hasn't already been initialised and
486 * added to the list of all pools.
487 */
488 TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
489 if (pp == pp1)
490 panic("%s: [%s] already initialised", __func__,
491 wchan);
492 }
493 if (__predict_true(!cold))
494 mutex_exit(&pool_head_lock);
495 #endif
496
497 if (palloc == NULL)
498 palloc = &pool_allocator_kmem;
499 #ifdef POOL_SUBPAGE
500 if (size > palloc->pa_pagesz) {
501 if (palloc == &pool_allocator_kmem)
502 palloc = &pool_allocator_kmem_fullpage;
503 else if (palloc == &pool_allocator_nointr)
504 palloc = &pool_allocator_nointr_fullpage;
505 }
506 #endif /* POOL_SUBPAGE */
507 if (!cold)
508 mutex_enter(&pool_allocator_lock);
509 if (palloc->pa_refcnt++ == 0) {
510 if (palloc->pa_pagesz == 0)
511 palloc->pa_pagesz = PAGE_SIZE;
512
513 TAILQ_INIT(&palloc->pa_list);
514
515 mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM);
516 palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
517 palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
518 }
519 if (!cold)
520 mutex_exit(&pool_allocator_lock);
521
522 if (align == 0)
523 align = ALIGN(1);
524
525 prsize = size;
526 if ((flags & PR_NOTOUCH) == 0 && prsize < sizeof(struct pool_item))
527 prsize = sizeof(struct pool_item);
528
529 prsize = roundup(prsize, align);
530 KASSERTMSG((prsize <= palloc->pa_pagesz),
531 "%s: [%s] pool item size (%zu) larger than page size (%u)",
532 __func__, wchan, prsize, palloc->pa_pagesz);
533
534 /*
535 * Initialize the pool structure.
536 */
537 LIST_INIT(&pp->pr_emptypages);
538 LIST_INIT(&pp->pr_fullpages);
539 LIST_INIT(&pp->pr_partpages);
540 pp->pr_cache = NULL;
541 pp->pr_curpage = NULL;
542 pp->pr_npages = 0;
543 pp->pr_minitems = 0;
544 pp->pr_minpages = 0;
545 pp->pr_maxpages = UINT_MAX;
546 pp->pr_roflags = flags;
547 pp->pr_flags = 0;
548 pp->pr_size = prsize;
549 pp->pr_align = align;
550 pp->pr_wchan = wchan;
551 pp->pr_alloc = palloc;
552 pp->pr_nitems = 0;
553 pp->pr_nout = 0;
554 pp->pr_hardlimit = UINT_MAX;
555 pp->pr_hardlimit_warning = NULL;
556 pp->pr_hardlimit_ratecap.tv_sec = 0;
557 pp->pr_hardlimit_ratecap.tv_usec = 0;
558 pp->pr_hardlimit_warning_last.tv_sec = 0;
559 pp->pr_hardlimit_warning_last.tv_usec = 0;
560 pp->pr_drain_hook = NULL;
561 pp->pr_drain_hook_arg = NULL;
562 pp->pr_freecheck = NULL;
563 pool_redzone_init(pp, size);
564
565 /*
566 * Decide whether to put the page header off page to avoid
567 * wasting too large a part of the page or too big item.
568 * Off-page page headers go on a hash table, so we can match
569 * a returned item with its header based on the page address.
570 * We use 1/16 of the page size and about 8 times of the item
571 * size as the threshold (XXX: tune)
572 *
573 * However, we'll put the header into the page if we can put
574 * it without wasting any items.
575 *
576 * Silently enforce `0 <= ioff < align'.
577 */
578 pp->pr_itemoffset = ioff %= align;
579 /* See the comment below about reserved bytes. */
580 trysize = palloc->pa_pagesz - ((align - ioff) % align);
581 phsize = ALIGN(sizeof(struct pool_item_header));
582 if (pp->pr_roflags & PR_PHINPAGE ||
583 ((pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) == 0 &&
584 (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
585 trysize / pp->pr_size == (trysize - phsize) / pp->pr_size))) {
586 /* Use the end of the page for the page header */
587 pp->pr_roflags |= PR_PHINPAGE;
588 pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
589 } else {
590 /* The page header will be taken from our page header pool */
591 pp->pr_phoffset = 0;
592 off = palloc->pa_pagesz;
593 SPLAY_INIT(&pp->pr_phtree);
594 }
595
596 /*
597 * Alignment is to take place at `ioff' within the item. This means
598 * we must reserve up to `align - 1' bytes on the page to allow
599 * appropriate positioning of each item.
600 */
601 pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
602 KASSERT(pp->pr_itemsperpage != 0);
603 if ((pp->pr_roflags & PR_NOTOUCH)) {
604 int idx;
605
606 for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
607 idx++) {
608 /* nothing */
609 }
610 if (idx >= PHPOOL_MAX) {
611 /*
612 * if you see this panic, consider to tweak
613 * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
614 */
615 panic("%s: [%s] too large itemsperpage(%d) for "
616 "PR_NOTOUCH", __func__,
617 pp->pr_wchan, pp->pr_itemsperpage);
618 }
619 pp->pr_phpool = &phpool[idx];
620 } else if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
621 pp->pr_phpool = &phpool[0];
622 }
623 #if defined(DIAGNOSTIC)
624 else {
625 pp->pr_phpool = NULL;
626 }
627 #endif
628
629 /*
630 * Use the slack between the chunks and the page header
631 * for "cache coloring".
632 */
633 slack = off - pp->pr_itemsperpage * pp->pr_size;
634 pp->pr_maxcolor = (slack / align) * align;
635 pp->pr_curcolor = 0;
636
637 pp->pr_nget = 0;
638 pp->pr_nfail = 0;
639 pp->pr_nput = 0;
640 pp->pr_npagealloc = 0;
641 pp->pr_npagefree = 0;
642 pp->pr_hiwat = 0;
643 pp->pr_nidle = 0;
644 pp->pr_refcnt = 0;
645
646 mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl);
647 cv_init(&pp->pr_cv, wchan);
648 pp->pr_ipl = ipl;
649
650 /* Insert into the list of all pools. */
651 if (!cold)
652 mutex_enter(&pool_head_lock);
653 TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
654 if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0)
655 break;
656 }
657 if (pp1 == NULL)
658 TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
659 else
660 TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist);
661 if (!cold)
662 mutex_exit(&pool_head_lock);
663
664 /* Insert this into the list of pools using this allocator. */
665 if (!cold)
666 mutex_enter(&palloc->pa_lock);
667 TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
668 if (!cold)
669 mutex_exit(&palloc->pa_lock);
670 }
671
672 /*
673 * De-commision a pool resource.
674 */
675 void
676 pool_destroy(struct pool *pp)
677 {
678 struct pool_pagelist pq;
679 struct pool_item_header *ph;
680
681 /* Remove from global pool list */
682 mutex_enter(&pool_head_lock);
683 while (pp->pr_refcnt != 0)
684 cv_wait(&pool_busy, &pool_head_lock);
685 TAILQ_REMOVE(&pool_head, pp, pr_poollist);
686 if (drainpp == pp)
687 drainpp = NULL;
688 mutex_exit(&pool_head_lock);
689
690 /* Remove this pool from its allocator's list of pools. */
691 mutex_enter(&pp->pr_alloc->pa_lock);
692 TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
693 mutex_exit(&pp->pr_alloc->pa_lock);
694
695 mutex_enter(&pool_allocator_lock);
696 if (--pp->pr_alloc->pa_refcnt == 0)
697 mutex_destroy(&pp->pr_alloc->pa_lock);
698 mutex_exit(&pool_allocator_lock);
699
700 mutex_enter(&pp->pr_lock);
701
702 KASSERT(pp->pr_cache == NULL);
703 KASSERTMSG((pp->pr_nout == 0),
704 "%s: pool busy: still out: %u", __func__, pp->pr_nout);
705 KASSERT(LIST_EMPTY(&pp->pr_fullpages));
706 KASSERT(LIST_EMPTY(&pp->pr_partpages));
707
708 /* Remove all pages */
709 LIST_INIT(&pq);
710 while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
711 pr_rmpage(pp, ph, &pq);
712
713 mutex_exit(&pp->pr_lock);
714
715 pr_pagelist_free(pp, &pq);
716 cv_destroy(&pp->pr_cv);
717 mutex_destroy(&pp->pr_lock);
718 }
719
720 void
721 pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
722 {
723
724 /* XXX no locking -- must be used just after pool_init() */
725 KASSERTMSG((pp->pr_drain_hook == NULL),
726 "%s: [%s] already set", __func__, pp->pr_wchan);
727 pp->pr_drain_hook = fn;
728 pp->pr_drain_hook_arg = arg;
729 }
730
731 static struct pool_item_header *
732 pool_alloc_item_header(struct pool *pp, void *storage, int flags)
733 {
734 struct pool_item_header *ph;
735
736 if ((pp->pr_roflags & PR_PHINPAGE) != 0)
737 ph = (void *)((char *)storage + pp->pr_phoffset);
738 else
739 ph = pool_get(pp->pr_phpool, flags);
740
741 return (ph);
742 }
743
744 /*
745 * Grab an item from the pool.
746 */
747 void *
748 pool_get(struct pool *pp, int flags)
749 {
750 struct pool_item *pi;
751 struct pool_item_header *ph;
752 void *v;
753
754 KASSERT(!(flags & PR_NOWAIT) != !(flags & PR_WAITOK));
755 KASSERTMSG((pp->pr_itemsperpage != 0),
756 "%s: [%s] pr_itemsperpage is zero, "
757 "pool not initialized?", __func__, pp->pr_wchan);
758 KASSERTMSG((!(cpu_intr_p() || cpu_softintr_p())
759 || pp->pr_ipl != IPL_NONE || cold || panicstr != NULL),
760 "%s: [%s] is IPL_NONE, but called from interrupt context",
761 __func__, pp->pr_wchan);
762 if (flags & PR_WAITOK) {
763 ASSERT_SLEEPABLE();
764 }
765
766 mutex_enter(&pp->pr_lock);
767 startover:
768 /*
769 * Check to see if we've reached the hard limit. If we have,
770 * and we can wait, then wait until an item has been returned to
771 * the pool.
772 */
773 KASSERTMSG((pp->pr_nout <= pp->pr_hardlimit),
774 "%s: %s: crossed hard limit", __func__, pp->pr_wchan);
775 if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
776 if (pp->pr_drain_hook != NULL) {
777 /*
778 * Since the drain hook is going to free things
779 * back to the pool, unlock, call the hook, re-lock,
780 * and check the hardlimit condition again.
781 */
782 mutex_exit(&pp->pr_lock);
783 (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
784 mutex_enter(&pp->pr_lock);
785 if (pp->pr_nout < pp->pr_hardlimit)
786 goto startover;
787 }
788
789 if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
790 /*
791 * XXX: A warning isn't logged in this case. Should
792 * it be?
793 */
794 pp->pr_flags |= PR_WANTED;
795 do {
796 cv_wait(&pp->pr_cv, &pp->pr_lock);
797 } while (pp->pr_flags & PR_WANTED);
798 goto startover;
799 }
800
801 /*
802 * Log a message that the hard limit has been hit.
803 */
804 if (pp->pr_hardlimit_warning != NULL &&
805 ratecheck(&pp->pr_hardlimit_warning_last,
806 &pp->pr_hardlimit_ratecap))
807 log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
808
809 pp->pr_nfail++;
810
811 mutex_exit(&pp->pr_lock);
812 KASSERT((flags & (PR_NOWAIT|PR_LIMITFAIL)) != 0);
813 return (NULL);
814 }
815
816 /*
817 * The convention we use is that if `curpage' is not NULL, then
818 * it points at a non-empty bucket. In particular, `curpage'
819 * never points at a page header which has PR_PHINPAGE set and
820 * has no items in its bucket.
821 */
822 if ((ph = pp->pr_curpage) == NULL) {
823 int error;
824
825 KASSERTMSG((pp->pr_nitems == 0),
826 "%s: [%s] curpage NULL, inconsistent nitems %u",
827 __func__, pp->pr_wchan, pp->pr_nitems);
828
829 /*
830 * Call the back-end page allocator for more memory.
831 * Release the pool lock, as the back-end page allocator
832 * may block.
833 */
834 error = pool_grow(pp, flags);
835 if (error != 0) {
836 /*
837 * pool_grow aborts when another thread
838 * is allocating a new page. Retry if it
839 * waited for it.
840 */
841 if (error == ERESTART)
842 goto startover;
843
844 /*
845 * We were unable to allocate a page or item
846 * header, but we released the lock during
847 * allocation, so perhaps items were freed
848 * back to the pool. Check for this case.
849 */
850 if (pp->pr_curpage != NULL)
851 goto startover;
852
853 pp->pr_nfail++;
854 mutex_exit(&pp->pr_lock);
855 KASSERT((flags & (PR_WAITOK|PR_NOWAIT)) == PR_NOWAIT);
856 return (NULL);
857 }
858
859 /* Start the allocation process over. */
860 goto startover;
861 }
862 if (pp->pr_roflags & PR_NOTOUCH) {
863 KASSERTMSG((ph->ph_nmissing < pp->pr_itemsperpage),
864 "%s: %s: page empty", __func__, pp->pr_wchan);
865 v = pr_item_notouch_get(pp, ph);
866 } else {
867 v = pi = LIST_FIRST(&ph->ph_itemlist);
868 if (__predict_false(v == NULL)) {
869 mutex_exit(&pp->pr_lock);
870 panic("%s: [%s] page empty", __func__, pp->pr_wchan);
871 }
872 KASSERTMSG((pp->pr_nitems > 0),
873 "%s: [%s] nitems %u inconsistent on itemlist",
874 __func__, pp->pr_wchan, pp->pr_nitems);
875 KASSERTMSG((pi->pi_magic == PI_MAGIC),
876 "%s: [%s] free list modified: "
877 "magic=%x; page %p; item addr %p", __func__,
878 pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
879
880 /*
881 * Remove from item list.
882 */
883 LIST_REMOVE(pi, pi_list);
884 }
885 pp->pr_nitems--;
886 pp->pr_nout++;
887 if (ph->ph_nmissing == 0) {
888 KASSERT(pp->pr_nidle > 0);
889 pp->pr_nidle--;
890
891 /*
892 * This page was previously empty. Move it to the list of
893 * partially-full pages. This page is already curpage.
894 */
895 LIST_REMOVE(ph, ph_pagelist);
896 LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
897 }
898 ph->ph_nmissing++;
899 if (ph->ph_nmissing == pp->pr_itemsperpage) {
900 KASSERTMSG(((pp->pr_roflags & PR_NOTOUCH) ||
901 LIST_EMPTY(&ph->ph_itemlist)),
902 "%s: [%s] nmissing (%u) inconsistent", __func__,
903 pp->pr_wchan, ph->ph_nmissing);
904 /*
905 * This page is now full. Move it to the full list
906 * and select a new current page.
907 */
908 LIST_REMOVE(ph, ph_pagelist);
909 LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
910 pool_update_curpage(pp);
911 }
912
913 pp->pr_nget++;
914
915 /*
916 * If we have a low water mark and we are now below that low
917 * water mark, add more items to the pool.
918 */
919 if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
920 /*
921 * XXX: Should we log a warning? Should we set up a timeout
922 * to try again in a second or so? The latter could break
923 * a caller's assumptions about interrupt protection, etc.
924 */
925 }
926
927 mutex_exit(&pp->pr_lock);
928 KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0);
929 FREECHECK_OUT(&pp->pr_freecheck, v);
930 pool_redzone_fill(pp, v);
931 return (v);
932 }
933
934 /*
935 * Internal version of pool_put(). Pool is already locked/entered.
936 */
937 static void
938 pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
939 {
940 struct pool_item *pi = v;
941 struct pool_item_header *ph;
942
943 KASSERT(mutex_owned(&pp->pr_lock));
944 pool_redzone_check(pp, v);
945 FREECHECK_IN(&pp->pr_freecheck, v);
946 LOCKDEBUG_MEM_CHECK(v, pp->pr_size);
947
948 KASSERTMSG((pp->pr_nout > 0),
949 "%s: [%s] putting with none out", __func__, pp->pr_wchan);
950
951 if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) {
952 panic("%s: [%s] page header missing", __func__, pp->pr_wchan);
953 }
954
955 /*
956 * Return to item list.
957 */
958 if (pp->pr_roflags & PR_NOTOUCH) {
959 pr_item_notouch_put(pp, ph, v);
960 } else {
961 #ifdef DIAGNOSTIC
962 pi->pi_magic = PI_MAGIC;
963 #endif
964 #ifdef DEBUG
965 {
966 int i, *ip = v;
967
968 for (i = 0; i < pp->pr_size / sizeof(int); i++) {
969 *ip++ = PI_MAGIC;
970 }
971 }
972 #endif
973
974 LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
975 }
976 KDASSERT(ph->ph_nmissing != 0);
977 ph->ph_nmissing--;
978 pp->pr_nput++;
979 pp->pr_nitems++;
980 pp->pr_nout--;
981
982 /* Cancel "pool empty" condition if it exists */
983 if (pp->pr_curpage == NULL)
984 pp->pr_curpage = ph;
985
986 if (pp->pr_flags & PR_WANTED) {
987 pp->pr_flags &= ~PR_WANTED;
988 cv_broadcast(&pp->pr_cv);
989 }
990
991 /*
992 * If this page is now empty, do one of two things:
993 *
994 * (1) If we have more pages than the page high water mark,
995 * free the page back to the system. ONLY CONSIDER
996 * FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
997 * CLAIM.
998 *
999 * (2) Otherwise, move the page to the empty page list.
1000 *
1001 * Either way, select a new current page (so we use a partially-full
1002 * page if one is available).
1003 */
1004 if (ph->ph_nmissing == 0) {
1005 pp->pr_nidle++;
1006 if (pp->pr_npages > pp->pr_minpages &&
1007 pp->pr_npages > pp->pr_maxpages) {
1008 pr_rmpage(pp, ph, pq);
1009 } else {
1010 LIST_REMOVE(ph, ph_pagelist);
1011 LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1012
1013 /*
1014 * Update the timestamp on the page. A page must
1015 * be idle for some period of time before it can
1016 * be reclaimed by the pagedaemon. This minimizes
1017 * ping-pong'ing for memory.
1018 *
1019 * note for 64-bit time_t: truncating to 32-bit is not
1020 * a problem for our usage.
1021 */
1022 ph->ph_time = time_uptime;
1023 }
1024 pool_update_curpage(pp);
1025 }
1026
1027 /*
1028 * If the page was previously completely full, move it to the
1029 * partially-full list and make it the current page. The next
1030 * allocation will get the item from this page, instead of
1031 * further fragmenting the pool.
1032 */
1033 else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1034 LIST_REMOVE(ph, ph_pagelist);
1035 LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1036 pp->pr_curpage = ph;
1037 }
1038 }
1039
1040 void
1041 pool_put(struct pool *pp, void *v)
1042 {
1043 struct pool_pagelist pq;
1044
1045 LIST_INIT(&pq);
1046
1047 mutex_enter(&pp->pr_lock);
1048 pool_do_put(pp, v, &pq);
1049 mutex_exit(&pp->pr_lock);
1050
1051 pr_pagelist_free(pp, &pq);
1052 }
1053
1054 /*
1055 * pool_grow: grow a pool by a page.
1056 *
1057 * => called with pool locked.
1058 * => unlock and relock the pool.
1059 * => return with pool locked.
1060 */
1061
1062 static int
1063 pool_grow(struct pool *pp, int flags)
1064 {
1065 /*
1066 * If there's a pool_grow in progress, wait for it to complete
1067 * and try again from the top.
1068 */
1069 if (pp->pr_flags & PR_GROWING) {
1070 if (flags & PR_WAITOK) {
1071 do {
1072 cv_wait(&pp->pr_cv, &pp->pr_lock);
1073 } while (pp->pr_flags & PR_GROWING);
1074 return ERESTART;
1075 } else {
1076 return EWOULDBLOCK;
1077 }
1078 }
1079 pp->pr_flags |= PR_GROWING;
1080
1081 mutex_exit(&pp->pr_lock);
1082 char *cp = pool_allocator_alloc(pp, flags);
1083 if (__predict_false(cp == NULL))
1084 goto out;
1085
1086 struct pool_item_header *ph = pool_alloc_item_header(pp, cp, flags);
1087 if (__predict_false(ph == NULL)) {
1088 pool_allocator_free(pp, cp);
1089 goto out;
1090 }
1091
1092 mutex_enter(&pp->pr_lock);
1093 pool_prime_page(pp, cp, ph);
1094 pp->pr_npagealloc++;
1095 KASSERT(pp->pr_flags & PR_GROWING);
1096 pp->pr_flags &= ~PR_GROWING;
1097 /*
1098 * If anyone was waiting for pool_grow, notify them that we
1099 * may have just done it.
1100 */
1101 cv_broadcast(&pp->pr_cv);
1102 return 0;
1103 out:
1104 KASSERT(pp->pr_flags & PR_GROWING);
1105 pp->pr_flags &= ~PR_GROWING;
1106 mutex_enter(&pp->pr_lock);
1107 return ENOMEM;
1108 }
1109
1110 /*
1111 * Add N items to the pool.
1112 */
1113 int
1114 pool_prime(struct pool *pp, int n)
1115 {
1116 int newpages;
1117 int error = 0;
1118
1119 mutex_enter(&pp->pr_lock);
1120
1121 newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1122
1123 while (newpages > 0) {
1124 error = pool_grow(pp, PR_NOWAIT);
1125 if (error) {
1126 if (error == ERESTART)
1127 continue;
1128 break;
1129 }
1130 pp->pr_minpages++;
1131 newpages--;
1132 }
1133
1134 if (pp->pr_minpages >= pp->pr_maxpages)
1135 pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1136
1137 mutex_exit(&pp->pr_lock);
1138 return error;
1139 }
1140
1141 /*
1142 * Add a page worth of items to the pool.
1143 *
1144 * Note, we must be called with the pool descriptor LOCKED.
1145 */
1146 static void
1147 pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph)
1148 {
1149 struct pool_item *pi;
1150 void *cp = storage;
1151 const unsigned int align = pp->pr_align;
1152 const unsigned int ioff = pp->pr_itemoffset;
1153 int n;
1154
1155 KASSERT(mutex_owned(&pp->pr_lock));
1156 KASSERTMSG(((pp->pr_roflags & PR_NOALIGN) ||
1157 (((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) == 0)),
1158 "%s: [%s] unaligned page: %p", __func__, pp->pr_wchan, cp);
1159
1160 /*
1161 * Insert page header.
1162 */
1163 LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1164 LIST_INIT(&ph->ph_itemlist);
1165 ph->ph_page = storage;
1166 ph->ph_nmissing = 0;
1167 ph->ph_time = time_uptime;
1168 if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1169 SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
1170
1171 pp->pr_nidle++;
1172
1173 /*
1174 * Color this page.
1175 */
1176 ph->ph_off = pp->pr_curcolor;
1177 cp = (char *)cp + ph->ph_off;
1178 if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1179 pp->pr_curcolor = 0;
1180
1181 /*
1182 * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1183 */
1184 if (ioff != 0)
1185 cp = (char *)cp + align - ioff;
1186
1187 KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
1188
1189 /*
1190 * Insert remaining chunks on the bucket list.
1191 */
1192 n = pp->pr_itemsperpage;
1193 pp->pr_nitems += n;
1194
1195 if (pp->pr_roflags & PR_NOTOUCH) {
1196 pr_item_notouch_init(pp, ph);
1197 } else {
1198 while (n--) {
1199 pi = (struct pool_item *)cp;
1200
1201 KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
1202
1203 /* Insert on page list */
1204 LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1205 #ifdef DIAGNOSTIC
1206 pi->pi_magic = PI_MAGIC;
1207 #endif
1208 cp = (char *)cp + pp->pr_size;
1209
1210 KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
1211 }
1212 }
1213
1214 /*
1215 * If the pool was depleted, point at the new page.
1216 */
1217 if (pp->pr_curpage == NULL)
1218 pp->pr_curpage = ph;
1219
1220 if (++pp->pr_npages > pp->pr_hiwat)
1221 pp->pr_hiwat = pp->pr_npages;
1222 }
1223
1224 /*
1225 * Used by pool_get() when nitems drops below the low water mark. This
1226 * is used to catch up pr_nitems with the low water mark.
1227 *
1228 * Note 1, we never wait for memory here, we let the caller decide what to do.
1229 *
1230 * Note 2, we must be called with the pool already locked, and we return
1231 * with it locked.
1232 */
1233 static int
1234 pool_catchup(struct pool *pp)
1235 {
1236 int error = 0;
1237
1238 while (POOL_NEEDS_CATCHUP(pp)) {
1239 error = pool_grow(pp, PR_NOWAIT);
1240 if (error) {
1241 if (error == ERESTART)
1242 continue;
1243 break;
1244 }
1245 }
1246 return error;
1247 }
1248
1249 static void
1250 pool_update_curpage(struct pool *pp)
1251 {
1252
1253 pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
1254 if (pp->pr_curpage == NULL) {
1255 pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
1256 }
1257 KASSERT((pp->pr_curpage == NULL && pp->pr_nitems == 0) ||
1258 (pp->pr_curpage != NULL && pp->pr_nitems > 0));
1259 }
1260
1261 void
1262 pool_setlowat(struct pool *pp, int n)
1263 {
1264
1265 mutex_enter(&pp->pr_lock);
1266
1267 pp->pr_minitems = n;
1268 pp->pr_minpages = (n == 0)
1269 ? 0
1270 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1271
1272 /* Make sure we're caught up with the newly-set low water mark. */
1273 if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1274 /*
1275 * XXX: Should we log a warning? Should we set up a timeout
1276 * to try again in a second or so? The latter could break
1277 * a caller's assumptions about interrupt protection, etc.
1278 */
1279 }
1280
1281 mutex_exit(&pp->pr_lock);
1282 }
1283
1284 void
1285 pool_sethiwat(struct pool *pp, int n)
1286 {
1287
1288 mutex_enter(&pp->pr_lock);
1289
1290 pp->pr_maxpages = (n == 0)
1291 ? 0
1292 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1293
1294 mutex_exit(&pp->pr_lock);
1295 }
1296
1297 void
1298 pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1299 {
1300
1301 mutex_enter(&pp->pr_lock);
1302
1303 pp->pr_hardlimit = n;
1304 pp->pr_hardlimit_warning = warnmess;
1305 pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1306 pp->pr_hardlimit_warning_last.tv_sec = 0;
1307 pp->pr_hardlimit_warning_last.tv_usec = 0;
1308
1309 /*
1310 * In-line version of pool_sethiwat(), because we don't want to
1311 * release the lock.
1312 */
1313 pp->pr_maxpages = (n == 0)
1314 ? 0
1315 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1316
1317 mutex_exit(&pp->pr_lock);
1318 }
1319
1320 /*
1321 * Release all complete pages that have not been used recently.
1322 *
1323 * Must not be called from interrupt context.
1324 */
1325 int
1326 pool_reclaim(struct pool *pp)
1327 {
1328 struct pool_item_header *ph, *phnext;
1329 struct pool_pagelist pq;
1330 uint32_t curtime;
1331 bool klock;
1332 int rv;
1333
1334 KASSERT(!cpu_intr_p() && !cpu_softintr_p());
1335
1336 if (pp->pr_drain_hook != NULL) {
1337 /*
1338 * The drain hook must be called with the pool unlocked.
1339 */
1340 (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1341 }
1342
1343 /*
1344 * XXXSMP Because we do not want to cause non-MPSAFE code
1345 * to block.
1346 */
1347 if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK ||
1348 pp->pr_ipl == IPL_SOFTSERIAL) {
1349 KERNEL_LOCK(1, NULL);
1350 klock = true;
1351 } else
1352 klock = false;
1353
1354 /* Reclaim items from the pool's cache (if any). */
1355 if (pp->pr_cache != NULL)
1356 pool_cache_invalidate(pp->pr_cache);
1357
1358 if (mutex_tryenter(&pp->pr_lock) == 0) {
1359 if (klock) {
1360 KERNEL_UNLOCK_ONE(NULL);
1361 }
1362 return (0);
1363 }
1364
1365 LIST_INIT(&pq);
1366
1367 curtime = time_uptime;
1368
1369 for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
1370 phnext = LIST_NEXT(ph, ph_pagelist);
1371
1372 /* Check our minimum page claim */
1373 if (pp->pr_npages <= pp->pr_minpages)
1374 break;
1375
1376 KASSERT(ph->ph_nmissing == 0);
1377 if (curtime - ph->ph_time < pool_inactive_time)
1378 continue;
1379
1380 /*
1381 * If freeing this page would put us below
1382 * the low water mark, stop now.
1383 */
1384 if ((pp->pr_nitems - pp->pr_itemsperpage) <
1385 pp->pr_minitems)
1386 break;
1387
1388 pr_rmpage(pp, ph, &pq);
1389 }
1390
1391 mutex_exit(&pp->pr_lock);
1392
1393 if (LIST_EMPTY(&pq))
1394 rv = 0;
1395 else {
1396 pr_pagelist_free(pp, &pq);
1397 rv = 1;
1398 }
1399
1400 if (klock) {
1401 KERNEL_UNLOCK_ONE(NULL);
1402 }
1403
1404 return (rv);
1405 }
1406
1407 /*
1408 * Drain pools, one at a time. The drained pool is returned within ppp.
1409 *
1410 * Note, must never be called from interrupt context.
1411 */
1412 bool
1413 pool_drain(struct pool **ppp)
1414 {
1415 bool reclaimed;
1416 struct pool *pp;
1417
1418 KASSERT(!TAILQ_EMPTY(&pool_head));
1419
1420 pp = NULL;
1421
1422 /* Find next pool to drain, and add a reference. */
1423 mutex_enter(&pool_head_lock);
1424 do {
1425 if (drainpp == NULL) {
1426 drainpp = TAILQ_FIRST(&pool_head);
1427 }
1428 if (drainpp != NULL) {
1429 pp = drainpp;
1430 drainpp = TAILQ_NEXT(pp, pr_poollist);
1431 }
1432 /*
1433 * Skip completely idle pools. We depend on at least
1434 * one pool in the system being active.
1435 */
1436 } while (pp == NULL || pp->pr_npages == 0);
1437 pp->pr_refcnt++;
1438 mutex_exit(&pool_head_lock);
1439
1440 /* Drain the cache (if any) and pool.. */
1441 reclaimed = pool_reclaim(pp);
1442
1443 /* Finally, unlock the pool. */
1444 mutex_enter(&pool_head_lock);
1445 pp->pr_refcnt--;
1446 cv_broadcast(&pool_busy);
1447 mutex_exit(&pool_head_lock);
1448
1449 if (ppp != NULL)
1450 *ppp = pp;
1451
1452 return reclaimed;
1453 }
1454
1455 /*
1456 * Calculate the total number of pages consumed by pools.
1457 */
1458 int
1459 pool_totalpages(void)
1460 {
1461 struct pool *pp;
1462 uint64_t total = 0;
1463
1464 mutex_enter(&pool_head_lock);
1465 TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
1466 uint64_t bytes = pp->pr_npages * pp->pr_alloc->pa_pagesz;
1467
1468 if ((pp->pr_roflags & PR_RECURSIVE) != 0)
1469 bytes -= (pp->pr_nout * pp->pr_size);
1470 total += bytes;
1471 }
1472 mutex_exit(&pool_head_lock);
1473
1474 return atop(total);
1475 }
1476
1477 /*
1478 * Diagnostic helpers.
1479 */
1480
1481 void
1482 pool_printall(const char *modif, void (*pr)(const char *, ...))
1483 {
1484 struct pool *pp;
1485
1486 TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
1487 pool_printit(pp, modif, pr);
1488 }
1489 }
1490
1491 void
1492 pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1493 {
1494
1495 if (pp == NULL) {
1496 (*pr)("Must specify a pool to print.\n");
1497 return;
1498 }
1499
1500 pool_print1(pp, modif, pr);
1501 }
1502
1503 static void
1504 pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
1505 void (*pr)(const char *, ...))
1506 {
1507 struct pool_item_header *ph;
1508 struct pool_item *pi __diagused;
1509
1510 LIST_FOREACH(ph, pl, ph_pagelist) {
1511 (*pr)("\t\tpage %p, nmissing %d, time %" PRIu32 "\n",
1512 ph->ph_page, ph->ph_nmissing, ph->ph_time);
1513 #ifdef DIAGNOSTIC
1514 if (!(pp->pr_roflags & PR_NOTOUCH)) {
1515 LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1516 if (pi->pi_magic != PI_MAGIC) {
1517 (*pr)("\t\t\titem %p, magic 0x%x\n",
1518 pi, pi->pi_magic);
1519 }
1520 }
1521 }
1522 #endif
1523 }
1524 }
1525
1526 static void
1527 pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1528 {
1529 struct pool_item_header *ph;
1530 pool_cache_t pc;
1531 pcg_t *pcg;
1532 pool_cache_cpu_t *cc;
1533 uint64_t cpuhit, cpumiss;
1534 int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1535 char c;
1536
1537 while ((c = *modif++) != '\0') {
1538 if (c == 'l')
1539 print_log = 1;
1540 if (c == 'p')
1541 print_pagelist = 1;
1542 if (c == 'c')
1543 print_cache = 1;
1544 }
1545
1546 if ((pc = pp->pr_cache) != NULL) {
1547 (*pr)("POOL CACHE");
1548 } else {
1549 (*pr)("POOL");
1550 }
1551
1552 (*pr)(" %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1553 pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1554 pp->pr_roflags);
1555 (*pr)("\talloc %p\n", pp->pr_alloc);
1556 (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1557 pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1558 (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1559 pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1560
1561 (*pr)("\tnget %lu, nfail %lu, nput %lu\n",
1562 pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1563 (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1564 pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1565
1566 if (print_pagelist == 0)
1567 goto skip_pagelist;
1568
1569 if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1570 (*pr)("\n\tempty page list:\n");
1571 pool_print_pagelist(pp, &pp->pr_emptypages, pr);
1572 if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
1573 (*pr)("\n\tfull page list:\n");
1574 pool_print_pagelist(pp, &pp->pr_fullpages, pr);
1575 if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
1576 (*pr)("\n\tpartial-page list:\n");
1577 pool_print_pagelist(pp, &pp->pr_partpages, pr);
1578
1579 if (pp->pr_curpage == NULL)
1580 (*pr)("\tno current page\n");
1581 else
1582 (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1583
1584 skip_pagelist:
1585 if (print_log == 0)
1586 goto skip_log;
1587
1588 (*pr)("\n");
1589
1590 skip_log:
1591
1592 #define PR_GROUPLIST(pcg) \
1593 (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); \
1594 for (i = 0; i < pcg->pcg_size; i++) { \
1595 if (pcg->pcg_objects[i].pcgo_pa != \
1596 POOL_PADDR_INVALID) { \
1597 (*pr)("\t\t\t%p, 0x%llx\n", \
1598 pcg->pcg_objects[i].pcgo_va, \
1599 (unsigned long long) \
1600 pcg->pcg_objects[i].pcgo_pa); \
1601 } else { \
1602 (*pr)("\t\t\t%p\n", \
1603 pcg->pcg_objects[i].pcgo_va); \
1604 } \
1605 }
1606
1607 if (pc != NULL) {
1608 cpuhit = 0;
1609 cpumiss = 0;
1610 for (i = 0; i < __arraycount(pc->pc_cpus); i++) {
1611 if ((cc = pc->pc_cpus[i]) == NULL)
1612 continue;
1613 cpuhit += cc->cc_hits;
1614 cpumiss += cc->cc_misses;
1615 }
1616 (*pr)("\tcpu layer hits %llu misses %llu\n", cpuhit, cpumiss);
1617 (*pr)("\tcache layer hits %llu misses %llu\n",
1618 pc->pc_hits, pc->pc_misses);
1619 (*pr)("\tcache layer entry uncontended %llu contended %llu\n",
1620 pc->pc_hits + pc->pc_misses - pc->pc_contended,
1621 pc->pc_contended);
1622 (*pr)("\tcache layer empty groups %u full groups %u\n",
1623 pc->pc_nempty, pc->pc_nfull);
1624 if (print_cache) {
1625 (*pr)("\tfull cache groups:\n");
1626 for (pcg = pc->pc_fullgroups; pcg != NULL;
1627 pcg = pcg->pcg_next) {
1628 PR_GROUPLIST(pcg);
1629 }
1630 (*pr)("\tempty cache groups:\n");
1631 for (pcg = pc->pc_emptygroups; pcg != NULL;
1632 pcg = pcg->pcg_next) {
1633 PR_GROUPLIST(pcg);
1634 }
1635 }
1636 }
1637 #undef PR_GROUPLIST
1638 }
1639
1640 static int
1641 pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
1642 {
1643 struct pool_item *pi;
1644 void *page;
1645 int n;
1646
1647 if ((pp->pr_roflags & PR_NOALIGN) == 0) {
1648 page = (void *)((uintptr_t)ph & pp->pr_alloc->pa_pagemask);
1649 if (page != ph->ph_page &&
1650 (pp->pr_roflags & PR_PHINPAGE) != 0) {
1651 if (label != NULL)
1652 printf("%s: ", label);
1653 printf("pool(%p:%s): page inconsistency: page %p;"
1654 " at page head addr %p (p %p)\n", pp,
1655 pp->pr_wchan, ph->ph_page,
1656 ph, page);
1657 return 1;
1658 }
1659 }
1660
1661 if ((pp->pr_roflags & PR_NOTOUCH) != 0)
1662 return 0;
1663
1664 for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0;
1665 pi != NULL;
1666 pi = LIST_NEXT(pi,pi_list), n++) {
1667
1668 #ifdef DIAGNOSTIC
1669 if (pi->pi_magic != PI_MAGIC) {
1670 if (label != NULL)
1671 printf("%s: ", label);
1672 printf("pool(%s): free list modified: magic=%x;"
1673 " page %p; item ordinal %d; addr %p\n",
1674 pp->pr_wchan, pi->pi_magic, ph->ph_page,
1675 n, pi);
1676 panic("pool");
1677 }
1678 #endif
1679 if ((pp->pr_roflags & PR_NOALIGN) != 0) {
1680 continue;
1681 }
1682 page = (void *)((uintptr_t)pi & pp->pr_alloc->pa_pagemask);
1683 if (page == ph->ph_page)
1684 continue;
1685
1686 if (label != NULL)
1687 printf("%s: ", label);
1688 printf("pool(%p:%s): page inconsistency: page %p;"
1689 " item ordinal %d; addr %p (p %p)\n", pp,
1690 pp->pr_wchan, ph->ph_page,
1691 n, pi, page);
1692 return 1;
1693 }
1694 return 0;
1695 }
1696
1697
1698 int
1699 pool_chk(struct pool *pp, const char *label)
1700 {
1701 struct pool_item_header *ph;
1702 int r = 0;
1703
1704 mutex_enter(&pp->pr_lock);
1705 LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
1706 r = pool_chk_page(pp, label, ph);
1707 if (r) {
1708 goto out;
1709 }
1710 }
1711 LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
1712 r = pool_chk_page(pp, label, ph);
1713 if (r) {
1714 goto out;
1715 }
1716 }
1717 LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
1718 r = pool_chk_page(pp, label, ph);
1719 if (r) {
1720 goto out;
1721 }
1722 }
1723
1724 out:
1725 mutex_exit(&pp->pr_lock);
1726 return (r);
1727 }
1728
1729 /*
1730 * pool_cache_init:
1731 *
1732 * Initialize a pool cache.
1733 */
1734 pool_cache_t
1735 pool_cache_init(size_t size, u_int align, u_int align_offset, u_int flags,
1736 const char *wchan, struct pool_allocator *palloc, int ipl,
1737 int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), void *arg)
1738 {
1739 pool_cache_t pc;
1740
1741 pc = pool_get(&cache_pool, PR_WAITOK);
1742 if (pc == NULL)
1743 return NULL;
1744
1745 pool_cache_bootstrap(pc, size, align, align_offset, flags, wchan,
1746 palloc, ipl, ctor, dtor, arg);
1747
1748 return pc;
1749 }
1750
1751 /*
1752 * pool_cache_bootstrap:
1753 *
1754 * Kernel-private version of pool_cache_init(). The caller
1755 * provides initial storage.
1756 */
1757 void
1758 pool_cache_bootstrap(pool_cache_t pc, size_t size, u_int align,
1759 u_int align_offset, u_int flags, const char *wchan,
1760 struct pool_allocator *palloc, int ipl,
1761 int (*ctor)(void *, void *, int), void (*dtor)(void *, void *),
1762 void *arg)
1763 {
1764 CPU_INFO_ITERATOR cii;
1765 pool_cache_t pc1;
1766 struct cpu_info *ci;
1767 struct pool *pp;
1768
1769 pp = &pc->pc_pool;
1770 if (palloc == NULL && ipl == IPL_NONE) {
1771 if (size > PAGE_SIZE) {
1772 int bigidx = pool_bigidx(size);
1773
1774 palloc = &pool_allocator_big[bigidx];
1775 } else
1776 palloc = &pool_allocator_nointr;
1777 }
1778 pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl);
1779 mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl);
1780
1781 if (ctor == NULL) {
1782 ctor = (int (*)(void *, void *, int))nullop;
1783 }
1784 if (dtor == NULL) {
1785 dtor = (void (*)(void *, void *))nullop;
1786 }
1787
1788 pc->pc_emptygroups = NULL;
1789 pc->pc_fullgroups = NULL;
1790 pc->pc_partgroups = NULL;
1791 pc->pc_ctor = ctor;
1792 pc->pc_dtor = dtor;
1793 pc->pc_arg = arg;
1794 pc->pc_hits = 0;
1795 pc->pc_misses = 0;
1796 pc->pc_nempty = 0;
1797 pc->pc_npart = 0;
1798 pc->pc_nfull = 0;
1799 pc->pc_contended = 0;
1800 pc->pc_refcnt = 0;
1801 pc->pc_freecheck = NULL;
1802
1803 if ((flags & PR_LARGECACHE) != 0) {
1804 pc->pc_pcgsize = PCG_NOBJECTS_LARGE;
1805 pc->pc_pcgpool = &pcg_large_pool;
1806 } else {
1807 pc->pc_pcgsize = PCG_NOBJECTS_NORMAL;
1808 pc->pc_pcgpool = &pcg_normal_pool;
1809 }
1810
1811 /* Allocate per-CPU caches. */
1812 memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus));
1813 pc->pc_ncpu = 0;
1814 if (ncpu < 2) {
1815 /* XXX For sparc: boot CPU is not attached yet. */
1816 pool_cache_cpu_init1(curcpu(), pc);
1817 } else {
1818 for (CPU_INFO_FOREACH(cii, ci)) {
1819 pool_cache_cpu_init1(ci, pc);
1820 }
1821 }
1822
1823 /* Add to list of all pools. */
1824 if (__predict_true(!cold))
1825 mutex_enter(&pool_head_lock);
1826 TAILQ_FOREACH(pc1, &pool_cache_head, pc_cachelist) {
1827 if (strcmp(pc1->pc_pool.pr_wchan, pc->pc_pool.pr_wchan) > 0)
1828 break;
1829 }
1830 if (pc1 == NULL)
1831 TAILQ_INSERT_TAIL(&pool_cache_head, pc, pc_cachelist);
1832 else
1833 TAILQ_INSERT_BEFORE(pc1, pc, pc_cachelist);
1834 if (__predict_true(!cold))
1835 mutex_exit(&pool_head_lock);
1836
1837 membar_sync();
1838 pp->pr_cache = pc;
1839 }
1840
1841 /*
1842 * pool_cache_destroy:
1843 *
1844 * Destroy a pool cache.
1845 */
1846 void
1847 pool_cache_destroy(pool_cache_t pc)
1848 {
1849
1850 pool_cache_bootstrap_destroy(pc);
1851 pool_put(&cache_pool, pc);
1852 }
1853
1854 /*
1855 * pool_cache_bootstrap_destroy:
1856 *
1857 * Destroy a pool cache.
1858 */
1859 void
1860 pool_cache_bootstrap_destroy(pool_cache_t pc)
1861 {
1862 struct pool *pp = &pc->pc_pool;
1863 u_int i;
1864
1865 /* Remove it from the global list. */
1866 mutex_enter(&pool_head_lock);
1867 while (pc->pc_refcnt != 0)
1868 cv_wait(&pool_busy, &pool_head_lock);
1869 TAILQ_REMOVE(&pool_cache_head, pc, pc_cachelist);
1870 mutex_exit(&pool_head_lock);
1871
1872 /* First, invalidate the entire cache. */
1873 pool_cache_invalidate(pc);
1874
1875 /* Disassociate it from the pool. */
1876 mutex_enter(&pp->pr_lock);
1877 pp->pr_cache = NULL;
1878 mutex_exit(&pp->pr_lock);
1879
1880 /* Destroy per-CPU data */
1881 for (i = 0; i < __arraycount(pc->pc_cpus); i++)
1882 pool_cache_invalidate_cpu(pc, i);
1883
1884 /* Finally, destroy it. */
1885 mutex_destroy(&pc->pc_lock);
1886 pool_destroy(pp);
1887 }
1888
1889 /*
1890 * pool_cache_cpu_init1:
1891 *
1892 * Called for each pool_cache whenever a new CPU is attached.
1893 */
1894 static void
1895 pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc)
1896 {
1897 pool_cache_cpu_t *cc;
1898 int index;
1899
1900 index = ci->ci_index;
1901
1902 KASSERT(index < __arraycount(pc->pc_cpus));
1903
1904 if ((cc = pc->pc_cpus[index]) != NULL) {
1905 KASSERT(cc->cc_cpuindex == index);
1906 return;
1907 }
1908
1909 /*
1910 * The first CPU is 'free'. This needs to be the case for
1911 * bootstrap - we may not be able to allocate yet.
1912 */
1913 if (pc->pc_ncpu == 0) {
1914 cc = &pc->pc_cpu0;
1915 pc->pc_ncpu = 1;
1916 } else {
1917 mutex_enter(&pc->pc_lock);
1918 pc->pc_ncpu++;
1919 mutex_exit(&pc->pc_lock);
1920 cc = pool_get(&cache_cpu_pool, PR_WAITOK);
1921 }
1922
1923 cc->cc_ipl = pc->pc_pool.pr_ipl;
1924 cc->cc_iplcookie = makeiplcookie(cc->cc_ipl);
1925 cc->cc_cache = pc;
1926 cc->cc_cpuindex = index;
1927 cc->cc_hits = 0;
1928 cc->cc_misses = 0;
1929 cc->cc_current = __UNCONST(&pcg_dummy);
1930 cc->cc_previous = __UNCONST(&pcg_dummy);
1931
1932 pc->pc_cpus[index] = cc;
1933 }
1934
1935 /*
1936 * pool_cache_cpu_init:
1937 *
1938 * Called whenever a new CPU is attached.
1939 */
1940 void
1941 pool_cache_cpu_init(struct cpu_info *ci)
1942 {
1943 pool_cache_t pc;
1944
1945 mutex_enter(&pool_head_lock);
1946 TAILQ_FOREACH(pc, &pool_cache_head, pc_cachelist) {
1947 pc->pc_refcnt++;
1948 mutex_exit(&pool_head_lock);
1949
1950 pool_cache_cpu_init1(ci, pc);
1951
1952 mutex_enter(&pool_head_lock);
1953 pc->pc_refcnt--;
1954 cv_broadcast(&pool_busy);
1955 }
1956 mutex_exit(&pool_head_lock);
1957 }
1958
1959 /*
1960 * pool_cache_reclaim:
1961 *
1962 * Reclaim memory from a pool cache.
1963 */
1964 bool
1965 pool_cache_reclaim(pool_cache_t pc)
1966 {
1967
1968 return pool_reclaim(&pc->pc_pool);
1969 }
1970
1971 static void
1972 pool_cache_destruct_object1(pool_cache_t pc, void *object)
1973 {
1974
1975 (*pc->pc_dtor)(pc->pc_arg, object);
1976 pool_put(&pc->pc_pool, object);
1977 }
1978
1979 /*
1980 * pool_cache_destruct_object:
1981 *
1982 * Force destruction of an object and its release back into
1983 * the pool.
1984 */
1985 void
1986 pool_cache_destruct_object(pool_cache_t pc, void *object)
1987 {
1988
1989 FREECHECK_IN(&pc->pc_freecheck, object);
1990
1991 pool_cache_destruct_object1(pc, object);
1992 }
1993
1994 /*
1995 * pool_cache_invalidate_groups:
1996 *
1997 * Invalidate a chain of groups and destruct all objects.
1998 */
1999 static void
2000 pool_cache_invalidate_groups(pool_cache_t pc, pcg_t *pcg)
2001 {
2002 void *object;
2003 pcg_t *next;
2004 int i;
2005
2006 for (; pcg != NULL; pcg = next) {
2007 next = pcg->pcg_next;
2008
2009 for (i = 0; i < pcg->pcg_avail; i++) {
2010 object = pcg->pcg_objects[i].pcgo_va;
2011 pool_cache_destruct_object1(pc, object);
2012 }
2013
2014 if (pcg->pcg_size == PCG_NOBJECTS_LARGE) {
2015 pool_put(&pcg_large_pool, pcg);
2016 } else {
2017 KASSERT(pcg->pcg_size == PCG_NOBJECTS_NORMAL);
2018 pool_put(&pcg_normal_pool, pcg);
2019 }
2020 }
2021 }
2022
2023 /*
2024 * pool_cache_invalidate:
2025 *
2026 * Invalidate a pool cache (destruct and release all of the
2027 * cached objects). Does not reclaim objects from the pool.
2028 *
2029 * Note: For pool caches that provide constructed objects, there
2030 * is an assumption that another level of synchronization is occurring
2031 * between the input to the constructor and the cache invalidation.
2032 *
2033 * Invalidation is a costly process and should not be called from
2034 * interrupt context.
2035 */
2036 void
2037 pool_cache_invalidate(pool_cache_t pc)
2038 {
2039 uint64_t where;
2040 pcg_t *full, *empty, *part;
2041
2042 KASSERT(!cpu_intr_p() && !cpu_softintr_p());
2043
2044 if (ncpu < 2 || !mp_online) {
2045 /*
2046 * We might be called early enough in the boot process
2047 * for the CPU data structures to not be fully initialized.
2048 * In this case, transfer the content of the local CPU's
2049 * cache back into global cache as only this CPU is currently
2050 * running.
2051 */
2052 pool_cache_transfer(pc);
2053 } else {
2054 /*
2055 * Signal all CPUs that they must transfer their local
2056 * cache back to the global pool then wait for the xcall to
2057 * complete.
2058 */
2059 where = xc_broadcast(0, (xcfunc_t)pool_cache_transfer,
2060 pc, NULL);
2061 xc_wait(where);
2062 }
2063
2064 /* Empty pool caches, then invalidate objects */
2065 mutex_enter(&pc->pc_lock);
2066 full = pc->pc_fullgroups;
2067 empty = pc->pc_emptygroups;
2068 part = pc->pc_partgroups;
2069 pc->pc_fullgroups = NULL;
2070 pc->pc_emptygroups = NULL;
2071 pc->pc_partgroups = NULL;
2072 pc->pc_nfull = 0;
2073 pc->pc_nempty = 0;
2074 pc->pc_npart = 0;
2075 mutex_exit(&pc->pc_lock);
2076
2077 pool_cache_invalidate_groups(pc, full);
2078 pool_cache_invalidate_groups(pc, empty);
2079 pool_cache_invalidate_groups(pc, part);
2080 }
2081
2082 /*
2083 * pool_cache_invalidate_cpu:
2084 *
2085 * Invalidate all CPU-bound cached objects in pool cache, the CPU being
2086 * identified by its associated index.
2087 * It is caller's responsibility to ensure that no operation is
2088 * taking place on this pool cache while doing this invalidation.
2089 * WARNING: as no inter-CPU locking is enforced, trying to invalidate
2090 * pool cached objects from a CPU different from the one currently running
2091 * may result in an undefined behaviour.
2092 */
2093 static void
2094 pool_cache_invalidate_cpu(pool_cache_t pc, u_int index)
2095 {
2096 pool_cache_cpu_t *cc;
2097 pcg_t *pcg;
2098
2099 if ((cc = pc->pc_cpus[index]) == NULL)
2100 return;
2101
2102 if ((pcg = cc->cc_current) != &pcg_dummy) {
2103 pcg->pcg_next = NULL;
2104 pool_cache_invalidate_groups(pc, pcg);
2105 }
2106 if ((pcg = cc->cc_previous) != &pcg_dummy) {
2107 pcg->pcg_next = NULL;
2108 pool_cache_invalidate_groups(pc, pcg);
2109 }
2110 if (cc != &pc->pc_cpu0)
2111 pool_put(&cache_cpu_pool, cc);
2112
2113 }
2114
2115 void
2116 pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg)
2117 {
2118
2119 pool_set_drain_hook(&pc->pc_pool, fn, arg);
2120 }
2121
2122 void
2123 pool_cache_setlowat(pool_cache_t pc, int n)
2124 {
2125
2126 pool_setlowat(&pc->pc_pool, n);
2127 }
2128
2129 void
2130 pool_cache_sethiwat(pool_cache_t pc, int n)
2131 {
2132
2133 pool_sethiwat(&pc->pc_pool, n);
2134 }
2135
2136 void
2137 pool_cache_sethardlimit(pool_cache_t pc, int n, const char *warnmess, int ratecap)
2138 {
2139
2140 pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap);
2141 }
2142
2143 static bool __noinline
2144 pool_cache_get_slow(pool_cache_cpu_t *cc, int s, void **objectp,
2145 paddr_t *pap, int flags)
2146 {
2147 pcg_t *pcg, *cur;
2148 uint64_t ncsw;
2149 pool_cache_t pc;
2150 void *object;
2151
2152 KASSERT(cc->cc_current->pcg_avail == 0);
2153 KASSERT(cc->cc_previous->pcg_avail == 0);
2154
2155 pc = cc->cc_cache;
2156 cc->cc_misses++;
2157
2158 /*
2159 * Nothing was available locally. Try and grab a group
2160 * from the cache.
2161 */
2162 if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {
2163 ncsw = curlwp->l_ncsw;
2164 mutex_enter(&pc->pc_lock);
2165 pc->pc_contended++;
2166
2167 /*
2168 * If we context switched while locking, then
2169 * our view of the per-CPU data is invalid:
2170 * retry.
2171 */
2172 if (curlwp->l_ncsw != ncsw) {
2173 mutex_exit(&pc->pc_lock);
2174 return true;
2175 }
2176 }
2177
2178 if (__predict_true((pcg = pc->pc_fullgroups) != NULL)) {
2179 /*
2180 * If there's a full group, release our empty
2181 * group back to the cache. Install the full
2182 * group as cc_current and return.
2183 */
2184 if (__predict_true((cur = cc->cc_current) != &pcg_dummy)) {
2185 KASSERT(cur->pcg_avail == 0);
2186 cur->pcg_next = pc->pc_emptygroups;
2187 pc->pc_emptygroups = cur;
2188 pc->pc_nempty++;
2189 }
2190 KASSERT(pcg->pcg_avail == pcg->pcg_size);
2191 cc->cc_current = pcg;
2192 pc->pc_fullgroups = pcg->pcg_next;
2193 pc->pc_hits++;
2194 pc->pc_nfull--;
2195 mutex_exit(&pc->pc_lock);
2196 return true;
2197 }
2198
2199 /*
2200 * Nothing available locally or in cache. Take the slow
2201 * path: fetch a new object from the pool and construct
2202 * it.
2203 */
2204 pc->pc_misses++;
2205 mutex_exit(&pc->pc_lock);
2206 splx(s);
2207
2208 object = pool_get(&pc->pc_pool, flags);
2209 *objectp = object;
2210 if (__predict_false(object == NULL)) {
2211 KASSERT((flags & (PR_WAITOK|PR_NOWAIT)) == PR_NOWAIT);
2212 return false;
2213 }
2214
2215 if (__predict_false((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0)) {
2216 pool_put(&pc->pc_pool, object);
2217 *objectp = NULL;
2218 return false;
2219 }
2220
2221 KASSERT((((vaddr_t)object + pc->pc_pool.pr_itemoffset) &
2222 (pc->pc_pool.pr_align - 1)) == 0);
2223
2224 if (pap != NULL) {
2225 #ifdef POOL_VTOPHYS
2226 *pap = POOL_VTOPHYS(object);
2227 #else
2228 *pap = POOL_PADDR_INVALID;
2229 #endif
2230 }
2231
2232 FREECHECK_OUT(&pc->pc_freecheck, object);
2233 pool_redzone_fill(&pc->pc_pool, object);
2234 return false;
2235 }
2236
2237 /*
2238 * pool_cache_get{,_paddr}:
2239 *
2240 * Get an object from a pool cache (optionally returning
2241 * the physical address of the object).
2242 */
2243 void *
2244 pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap)
2245 {
2246 pool_cache_cpu_t *cc;
2247 pcg_t *pcg;
2248 void *object;
2249 int s;
2250
2251 KASSERT(!(flags & PR_NOWAIT) != !(flags & PR_WAITOK));
2252 KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()) ||
2253 (pc->pc_pool.pr_ipl != IPL_NONE || cold || panicstr != NULL),
2254 "%s: [%s] is IPL_NONE, but called from interrupt context",
2255 __func__, pc->pc_pool.pr_wchan);
2256
2257 if (flags & PR_WAITOK) {
2258 ASSERT_SLEEPABLE();
2259 }
2260
2261 /* Lock out interrupts and disable preemption. */
2262 s = splvm();
2263 while (/* CONSTCOND */ true) {
2264 /* Try and allocate an object from the current group. */
2265 cc = pc->pc_cpus[curcpu()->ci_index];
2266 KASSERT(cc->cc_cache == pc);
2267 pcg = cc->cc_current;
2268 if (__predict_true(pcg->pcg_avail > 0)) {
2269 object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va;
2270 if (__predict_false(pap != NULL))
2271 *pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa;
2272 #if defined(DIAGNOSTIC)
2273 pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL;
2274 KASSERT(pcg->pcg_avail < pcg->pcg_size);
2275 KASSERT(object != NULL);
2276 #endif
2277 cc->cc_hits++;
2278 splx(s);
2279 FREECHECK_OUT(&pc->pc_freecheck, object);
2280 pool_redzone_fill(&pc->pc_pool, object);
2281 return object;
2282 }
2283
2284 /*
2285 * That failed. If the previous group isn't empty, swap
2286 * it with the current group and allocate from there.
2287 */
2288 pcg = cc->cc_previous;
2289 if (__predict_true(pcg->pcg_avail > 0)) {
2290 cc->cc_previous = cc->cc_current;
2291 cc->cc_current = pcg;
2292 continue;
2293 }
2294
2295 /*
2296 * Can't allocate from either group: try the slow path.
2297 * If get_slow() allocated an object for us, or if
2298 * no more objects are available, it will return false.
2299 * Otherwise, we need to retry.
2300 */
2301 if (!pool_cache_get_slow(cc, s, &object, pap, flags))
2302 break;
2303 }
2304
2305 /*
2306 * We would like to KASSERT(object || (flags & PR_NOWAIT)), but
2307 * pool_cache_get can fail even in the PR_WAITOK case, if the
2308 * constructor fails.
2309 */
2310 return object;
2311 }
2312
2313 static bool __noinline
2314 pool_cache_put_slow(pool_cache_cpu_t *cc, int s, void *object)
2315 {
2316 struct lwp *l = curlwp;
2317 pcg_t *pcg, *cur;
2318 uint64_t ncsw;
2319 pool_cache_t pc;
2320
2321 KASSERT(cc->cc_current->pcg_avail == cc->cc_current->pcg_size);
2322 KASSERT(cc->cc_previous->pcg_avail == cc->cc_previous->pcg_size);
2323
2324 pc = cc->cc_cache;
2325 pcg = NULL;
2326 cc->cc_misses++;
2327 ncsw = l->l_ncsw;
2328
2329 /*
2330 * If there are no empty groups in the cache then allocate one
2331 * while still unlocked.
2332 */
2333 if (__predict_false(pc->pc_emptygroups == NULL)) {
2334 if (__predict_true(!pool_cache_disable)) {
2335 pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT);
2336 }
2337 /*
2338 * If pool_get() blocked, then our view of
2339 * the per-CPU data is invalid: retry.
2340 */
2341 if (__predict_false(l->l_ncsw != ncsw)) {
2342 if (pcg != NULL) {
2343 pool_put(pc->pc_pcgpool, pcg);
2344 }
2345 return true;
2346 }
2347 if (__predict_true(pcg != NULL)) {
2348 pcg->pcg_avail = 0;
2349 pcg->pcg_size = pc->pc_pcgsize;
2350 }
2351 }
2352
2353 /* Lock the cache. */
2354 if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {
2355 mutex_enter(&pc->pc_lock);
2356 pc->pc_contended++;
2357
2358 /*
2359 * If we context switched while locking, then our view of
2360 * the per-CPU data is invalid: retry.
2361 */
2362 if (__predict_false(l->l_ncsw != ncsw)) {
2363 mutex_exit(&pc->pc_lock);
2364 if (pcg != NULL) {
2365 pool_put(pc->pc_pcgpool, pcg);
2366 }
2367 return true;
2368 }
2369 }
2370
2371 /* If there are no empty groups in the cache then allocate one. */
2372 if (pcg == NULL && pc->pc_emptygroups != NULL) {
2373 pcg = pc->pc_emptygroups;
2374 pc->pc_emptygroups = pcg->pcg_next;
2375 pc->pc_nempty--;
2376 }
2377
2378 /*
2379 * If there's a empty group, release our full group back
2380 * to the cache. Install the empty group to the local CPU
2381 * and return.
2382 */
2383 if (pcg != NULL) {
2384 KASSERT(pcg->pcg_avail == 0);
2385 if (__predict_false(cc->cc_previous == &pcg_dummy)) {
2386 cc->cc_previous = pcg;
2387 } else {
2388 cur = cc->cc_current;
2389 if (__predict_true(cur != &pcg_dummy)) {
2390 KASSERT(cur->pcg_avail == cur->pcg_size);
2391 cur->pcg_next = pc->pc_fullgroups;
2392 pc->pc_fullgroups = cur;
2393 pc->pc_nfull++;
2394 }
2395 cc->cc_current = pcg;
2396 }
2397 pc->pc_hits++;
2398 mutex_exit(&pc->pc_lock);
2399 return true;
2400 }
2401
2402 /*
2403 * Nothing available locally or in cache, and we didn't
2404 * allocate an empty group. Take the slow path and destroy
2405 * the object here and now.
2406 */
2407 pc->pc_misses++;
2408 mutex_exit(&pc->pc_lock);
2409 splx(s);
2410 pool_cache_destruct_object(pc, object);
2411
2412 return false;
2413 }
2414
2415 /*
2416 * pool_cache_put{,_paddr}:
2417 *
2418 * Put an object back to the pool cache (optionally caching the
2419 * physical address of the object).
2420 */
2421 void
2422 pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa)
2423 {
2424 pool_cache_cpu_t *cc;
2425 pcg_t *pcg;
2426 int s;
2427
2428 KASSERT(object != NULL);
2429 pool_redzone_check(&pc->pc_pool, object);
2430 FREECHECK_IN(&pc->pc_freecheck, object);
2431
2432 /* Lock out interrupts and disable preemption. */
2433 s = splvm();
2434 while (/* CONSTCOND */ true) {
2435 /* If the current group isn't full, release it there. */
2436 cc = pc->pc_cpus[curcpu()->ci_index];
2437 KASSERT(cc->cc_cache == pc);
2438 pcg = cc->cc_current;
2439 if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) {
2440 pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object;
2441 pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa;
2442 pcg->pcg_avail++;
2443 cc->cc_hits++;
2444 splx(s);
2445 return;
2446 }
2447
2448 /*
2449 * That failed. If the previous group isn't full, swap
2450 * it with the current group and try again.
2451 */
2452 pcg = cc->cc_previous;
2453 if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) {
2454 cc->cc_previous = cc->cc_current;
2455 cc->cc_current = pcg;
2456 continue;
2457 }
2458
2459 /*
2460 * Can't free to either group: try the slow path.
2461 * If put_slow() releases the object for us, it
2462 * will return false. Otherwise we need to retry.
2463 */
2464 if (!pool_cache_put_slow(cc, s, object))
2465 break;
2466 }
2467 }
2468
2469 /*
2470 * pool_cache_transfer:
2471 *
2472 * Transfer objects from the per-CPU cache to the global cache.
2473 * Run within a cross-call thread.
2474 */
2475 static void
2476 pool_cache_transfer(pool_cache_t pc)
2477 {
2478 pool_cache_cpu_t *cc;
2479 pcg_t *prev, *cur, **list;
2480 int s;
2481
2482 s = splvm();
2483 mutex_enter(&pc->pc_lock);
2484 cc = pc->pc_cpus[curcpu()->ci_index];
2485 cur = cc->cc_current;
2486 cc->cc_current = __UNCONST(&pcg_dummy);
2487 prev = cc->cc_previous;
2488 cc->cc_previous = __UNCONST(&pcg_dummy);
2489 if (cur != &pcg_dummy) {
2490 if (cur->pcg_avail == cur->pcg_size) {
2491 list = &pc->pc_fullgroups;
2492 pc->pc_nfull++;
2493 } else if (cur->pcg_avail == 0) {
2494 list = &pc->pc_emptygroups;
2495 pc->pc_nempty++;
2496 } else {
2497 list = &pc->pc_partgroups;
2498 pc->pc_npart++;
2499 }
2500 cur->pcg_next = *list;
2501 *list = cur;
2502 }
2503 if (prev != &pcg_dummy) {
2504 if (prev->pcg_avail == prev->pcg_size) {
2505 list = &pc->pc_fullgroups;
2506 pc->pc_nfull++;
2507 } else if (prev->pcg_avail == 0) {
2508 list = &pc->pc_emptygroups;
2509 pc->pc_nempty++;
2510 } else {
2511 list = &pc->pc_partgroups;
2512 pc->pc_npart++;
2513 }
2514 prev->pcg_next = *list;
2515 *list = prev;
2516 }
2517 mutex_exit(&pc->pc_lock);
2518 splx(s);
2519 }
2520
2521 /*
2522 * Pool backend allocators.
2523 *
2524 * Each pool has a backend allocator that handles allocation, deallocation,
2525 * and any additional draining that might be needed.
2526 *
2527 * We provide two standard allocators:
2528 *
2529 * pool_allocator_kmem - the default when no allocator is specified
2530 *
2531 * pool_allocator_nointr - used for pools that will not be accessed
2532 * in interrupt context.
2533 */
2534 void *pool_page_alloc(struct pool *, int);
2535 void pool_page_free(struct pool *, void *);
2536
2537 #ifdef POOL_SUBPAGE
2538 struct pool_allocator pool_allocator_kmem_fullpage = {
2539 .pa_alloc = pool_page_alloc,
2540 .pa_free = pool_page_free,
2541 .pa_pagesz = 0
2542 };
2543 #else
2544 struct pool_allocator pool_allocator_kmem = {
2545 .pa_alloc = pool_page_alloc,
2546 .pa_free = pool_page_free,
2547 .pa_pagesz = 0
2548 };
2549 #endif
2550
2551 #ifdef POOL_SUBPAGE
2552 struct pool_allocator pool_allocator_nointr_fullpage = {
2553 .pa_alloc = pool_page_alloc,
2554 .pa_free = pool_page_free,
2555 .pa_pagesz = 0
2556 };
2557 #else
2558 struct pool_allocator pool_allocator_nointr = {
2559 .pa_alloc = pool_page_alloc,
2560 .pa_free = pool_page_free,
2561 .pa_pagesz = 0
2562 };
2563 #endif
2564
2565 #ifdef POOL_SUBPAGE
2566 void *pool_subpage_alloc(struct pool *, int);
2567 void pool_subpage_free(struct pool *, void *);
2568
2569 struct pool_allocator pool_allocator_kmem = {
2570 .pa_alloc = pool_subpage_alloc,
2571 .pa_free = pool_subpage_free,
2572 .pa_pagesz = POOL_SUBPAGE
2573 };
2574
2575 struct pool_allocator pool_allocator_nointr = {
2576 .pa_alloc = pool_subpage_alloc,
2577 .pa_free = pool_subpage_free,
2578 .pa_pagesz = POOL_SUBPAGE
2579 };
2580 #endif /* POOL_SUBPAGE */
2581
2582 struct pool_allocator pool_allocator_big[] = {
2583 {
2584 .pa_alloc = pool_page_alloc,
2585 .pa_free = pool_page_free,
2586 .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 0),
2587 },
2588 {
2589 .pa_alloc = pool_page_alloc,
2590 .pa_free = pool_page_free,
2591 .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 1),
2592 },
2593 {
2594 .pa_alloc = pool_page_alloc,
2595 .pa_free = pool_page_free,
2596 .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 2),
2597 },
2598 {
2599 .pa_alloc = pool_page_alloc,
2600 .pa_free = pool_page_free,
2601 .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 3),
2602 },
2603 {
2604 .pa_alloc = pool_page_alloc,
2605 .pa_free = pool_page_free,
2606 .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 4),
2607 },
2608 {
2609 .pa_alloc = pool_page_alloc,
2610 .pa_free = pool_page_free,
2611 .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 5),
2612 },
2613 {
2614 .pa_alloc = pool_page_alloc,
2615 .pa_free = pool_page_free,
2616 .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 6),
2617 },
2618 {
2619 .pa_alloc = pool_page_alloc,
2620 .pa_free = pool_page_free,
2621 .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 7),
2622 }
2623 };
2624
2625 static int
2626 pool_bigidx(size_t size)
2627 {
2628 int i;
2629
2630 for (i = 0; i < __arraycount(pool_allocator_big); i++) {
2631 if (1 << (i + POOL_ALLOCATOR_BIG_BASE) >= size)
2632 return i;
2633 }
2634 panic("pool item size %zu too large, use a custom allocator", size);
2635 }
2636
2637 static void *
2638 pool_allocator_alloc(struct pool *pp, int flags)
2639 {
2640 struct pool_allocator *pa = pp->pr_alloc;
2641 void *res;
2642
2643 res = (*pa->pa_alloc)(pp, flags);
2644 if (res == NULL && (flags & PR_WAITOK) == 0) {
2645 /*
2646 * We only run the drain hook here if PR_NOWAIT.
2647 * In other cases, the hook will be run in
2648 * pool_reclaim().
2649 */
2650 if (pp->pr_drain_hook != NULL) {
2651 (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
2652 res = (*pa->pa_alloc)(pp, flags);
2653 }
2654 }
2655 return res;
2656 }
2657
2658 static void
2659 pool_allocator_free(struct pool *pp, void *v)
2660 {
2661 struct pool_allocator *pa = pp->pr_alloc;
2662
2663 (*pa->pa_free)(pp, v);
2664 }
2665
2666 void *
2667 pool_page_alloc(struct pool *pp, int flags)
2668 {
2669 const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
2670 vmem_addr_t va;
2671 int ret;
2672
2673 ret = uvm_km_kmem_alloc(kmem_va_arena, pp->pr_alloc->pa_pagesz,
2674 vflags | VM_INSTANTFIT, &va);
2675
2676 return ret ? NULL : (void *)va;
2677 }
2678
2679 void
2680 pool_page_free(struct pool *pp, void *v)
2681 {
2682
2683 uvm_km_kmem_free(kmem_va_arena, (vaddr_t)v, pp->pr_alloc->pa_pagesz);
2684 }
2685
2686 static void *
2687 pool_page_alloc_meta(struct pool *pp, int flags)
2688 {
2689 const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
2690 vmem_addr_t va;
2691 int ret;
2692
2693 ret = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz,
2694 vflags | VM_INSTANTFIT, &va);
2695
2696 return ret ? NULL : (void *)va;
2697 }
2698
2699 static void
2700 pool_page_free_meta(struct pool *pp, void *v)
2701 {
2702
2703 vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz);
2704 }
2705
2706 #ifdef POOL_REDZONE
2707 #if defined(_LP64)
2708 # define PRIME 0x9e37fffffffc0000UL
2709 #else /* defined(_LP64) */
2710 # define PRIME 0x9e3779b1
2711 #endif /* defined(_LP64) */
2712 #define STATIC_BYTE 0xFE
2713 CTASSERT(POOL_REDZONE_SIZE > 1);
2714
2715 static inline uint8_t
2716 pool_pattern_generate(const void *p)
2717 {
2718 return (uint8_t)(((uintptr_t)p) * PRIME
2719 >> ((sizeof(uintptr_t) - sizeof(uint8_t))) * CHAR_BIT);
2720 }
2721
2722 static void
2723 pool_redzone_init(struct pool *pp, size_t requested_size)
2724 {
2725 size_t nsz;
2726
2727 if (pp->pr_roflags & PR_NOTOUCH) {
2728 pp->pr_reqsize = 0;
2729 pp->pr_redzone = false;
2730 return;
2731 }
2732
2733 /*
2734 * We may have extended the requested size earlier; check if
2735 * there's naturally space in the padding for a red zone.
2736 */
2737 if (pp->pr_size - requested_size >= POOL_REDZONE_SIZE) {
2738 pp->pr_reqsize = requested_size;
2739 pp->pr_redzone = true;
2740 return;
2741 }
2742
2743 /*
2744 * No space in the natural padding; check if we can extend a
2745 * bit the size of the pool.
2746 */
2747 nsz = roundup(pp->pr_size + POOL_REDZONE_SIZE, pp->pr_align);
2748 if (nsz <= pp->pr_alloc->pa_pagesz) {
2749 /* Ok, we can */
2750 pp->pr_size = nsz;
2751 pp->pr_reqsize = requested_size;
2752 pp->pr_redzone = true;
2753 } else {
2754 /* No space for a red zone... snif :'( */
2755 pp->pr_reqsize = 0;
2756 pp->pr_redzone = false;
2757 printf("pool redzone disabled for '%s'\n", pp->pr_wchan);
2758 }
2759 }
2760
2761 static void
2762 pool_redzone_fill(struct pool *pp, void *p)
2763 {
2764 uint8_t *cp, pat;
2765 const uint8_t *ep;
2766
2767 if (!pp->pr_redzone)
2768 return;
2769
2770 cp = (uint8_t *)p + pp->pr_reqsize;
2771 ep = cp + POOL_REDZONE_SIZE;
2772
2773 /*
2774 * We really don't want the first byte of the red zone to be '\0';
2775 * an off-by-one in a string may not be properly detected.
2776 */
2777 pat = pool_pattern_generate(cp);
2778 *cp = (pat == '\0') ? STATIC_BYTE: pat;
2779 cp++;
2780
2781 while (cp < ep) {
2782 *cp = pool_pattern_generate(cp);
2783 cp++;
2784 }
2785 }
2786
2787 static void
2788 pool_redzone_check(struct pool *pp, void *p)
2789 {
2790 uint8_t *cp, pat, expected;
2791 const uint8_t *ep;
2792
2793 if (!pp->pr_redzone)
2794 return;
2795
2796 cp = (uint8_t *)p + pp->pr_reqsize;
2797 ep = cp + POOL_REDZONE_SIZE;
2798
2799 pat = pool_pattern_generate(cp);
2800 expected = (pat == '\0') ? STATIC_BYTE: pat;
2801 if (expected != *cp) {
2802 panic("%s: %p: 0x%02x != 0x%02x\n",
2803 __func__, cp, *cp, expected);
2804 }
2805 cp++;
2806
2807 while (cp < ep) {
2808 expected = pool_pattern_generate(cp);
2809 if (*cp != expected) {
2810 panic("%s: %p: 0x%02x != 0x%02x\n",
2811 __func__, cp, *cp, expected);
2812 }
2813 cp++;
2814 }
2815 }
2816
2817 #endif /* POOL_REDZONE */
2818
2819
2820 #ifdef POOL_SUBPAGE
2821 /* Sub-page allocator, for machines with large hardware pages. */
2822 void *
2823 pool_subpage_alloc(struct pool *pp, int flags)
2824 {
2825 return pool_get(&psppool, flags);
2826 }
2827
2828 void
2829 pool_subpage_free(struct pool *pp, void *v)
2830 {
2831 pool_put(&psppool, v);
2832 }
2833
2834 #endif /* POOL_SUBPAGE */
2835
2836 #if defined(DDB)
2837 static bool
2838 pool_in_page(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
2839 {
2840
2841 return (uintptr_t)ph->ph_page <= addr &&
2842 addr < (uintptr_t)ph->ph_page + pp->pr_alloc->pa_pagesz;
2843 }
2844
2845 static bool
2846 pool_in_item(struct pool *pp, void *item, uintptr_t addr)
2847 {
2848
2849 return (uintptr_t)item <= addr && addr < (uintptr_t)item + pp->pr_size;
2850 }
2851
2852 static bool
2853 pool_in_cg(struct pool *pp, struct pool_cache_group *pcg, uintptr_t addr)
2854 {
2855 int i;
2856
2857 if (pcg == NULL) {
2858 return false;
2859 }
2860 for (i = 0; i < pcg->pcg_avail; i++) {
2861 if (pool_in_item(pp, pcg->pcg_objects[i].pcgo_va, addr)) {
2862 return true;
2863 }
2864 }
2865 return false;
2866 }
2867
2868 static bool
2869 pool_allocated(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
2870 {
2871
2872 if ((pp->pr_roflags & PR_NOTOUCH) != 0) {
2873 unsigned int idx = pr_item_notouch_index(pp, ph, (void *)addr);
2874 pool_item_bitmap_t *bitmap =
2875 ph->ph_bitmap + (idx / BITMAP_SIZE);
2876 pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
2877
2878 return (*bitmap & mask) == 0;
2879 } else {
2880 struct pool_item *pi;
2881
2882 LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
2883 if (pool_in_item(pp, pi, addr)) {
2884 return false;
2885 }
2886 }
2887 return true;
2888 }
2889 }
2890
2891 void
2892 pool_whatis(uintptr_t addr, void (*pr)(const char *, ...))
2893 {
2894 struct pool *pp;
2895
2896 TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
2897 struct pool_item_header *ph;
2898 uintptr_t item;
2899 bool allocated = true;
2900 bool incache = false;
2901 bool incpucache = false;
2902 char cpucachestr[32];
2903
2904 if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
2905 LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
2906 if (pool_in_page(pp, ph, addr)) {
2907 goto found;
2908 }
2909 }
2910 LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
2911 if (pool_in_page(pp, ph, addr)) {
2912 allocated =
2913 pool_allocated(pp, ph, addr);
2914 goto found;
2915 }
2916 }
2917 LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
2918 if (pool_in_page(pp, ph, addr)) {
2919 allocated = false;
2920 goto found;
2921 }
2922 }
2923 continue;
2924 } else {
2925 ph = pr_find_pagehead_noalign(pp, (void *)addr);
2926 if (ph == NULL || !pool_in_page(pp, ph, addr)) {
2927 continue;
2928 }
2929 allocated = pool_allocated(pp, ph, addr);
2930 }
2931 found:
2932 if (allocated && pp->pr_cache) {
2933 pool_cache_t pc = pp->pr_cache;
2934 struct pool_cache_group *pcg;
2935 int i;
2936
2937 for (pcg = pc->pc_fullgroups; pcg != NULL;
2938 pcg = pcg->pcg_next) {
2939 if (pool_in_cg(pp, pcg, addr)) {
2940 incache = true;
2941 goto print;
2942 }
2943 }
2944 for (i = 0; i < __arraycount(pc->pc_cpus); i++) {
2945 pool_cache_cpu_t *cc;
2946
2947 if ((cc = pc->pc_cpus[i]) == NULL) {
2948 continue;
2949 }
2950 if (pool_in_cg(pp, cc->cc_current, addr) ||
2951 pool_in_cg(pp, cc->cc_previous, addr)) {
2952 struct cpu_info *ci =
2953 cpu_lookup(i);
2954
2955 incpucache = true;
2956 snprintf(cpucachestr,
2957 sizeof(cpucachestr),
2958 "cached by CPU %u",
2959 ci->ci_index);
2960 goto print;
2961 }
2962 }
2963 }
2964 print:
2965 item = (uintptr_t)ph->ph_page + ph->ph_off;
2966 item = item + rounddown(addr - item, pp->pr_size);
2967 (*pr)("%p is %p+%zu in POOL '%s' (%s)\n",
2968 (void *)addr, item, (size_t)(addr - item),
2969 pp->pr_wchan,
2970 incpucache ? cpucachestr :
2971 incache ? "cached" : allocated ? "allocated" : "free");
2972 }
2973 }
2974 #endif /* defined(DDB) */
2975
2976 static int
2977 pool_sysctl(SYSCTLFN_ARGS)
2978 {
2979 struct pool_sysctl data;
2980 struct pool *pp;
2981 struct pool_cache *pc;
2982 pool_cache_cpu_t *cc;
2983 int error;
2984 size_t i, written;
2985
2986 if (oldp == NULL) {
2987 *oldlenp = 0;
2988 TAILQ_FOREACH(pp, &pool_head, pr_poollist)
2989 *oldlenp += sizeof(data);
2990 return 0;
2991 }
2992
2993 memset(&data, 0, sizeof(data));
2994 error = 0;
2995 written = 0;
2996 TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
2997 if (written + sizeof(data) > *oldlenp)
2998 break;
2999 strlcpy(data.pr_wchan, pp->pr_wchan, sizeof(data.pr_wchan));
3000 data.pr_pagesize = pp->pr_alloc->pa_pagesz;
3001 data.pr_flags = pp->pr_roflags | pp->pr_flags;
3002 #define COPY(field) data.field = pp->field
3003 COPY(pr_size);
3004
3005 COPY(pr_itemsperpage);
3006 COPY(pr_nitems);
3007 COPY(pr_nout);
3008 COPY(pr_hardlimit);
3009 COPY(pr_npages);
3010 COPY(pr_minpages);
3011 COPY(pr_maxpages);
3012
3013 COPY(pr_nget);
3014 COPY(pr_nfail);
3015 COPY(pr_nput);
3016 COPY(pr_npagealloc);
3017 COPY(pr_npagefree);
3018 COPY(pr_hiwat);
3019 COPY(pr_nidle);
3020 #undef COPY
3021
3022 data.pr_cache_nmiss_pcpu = 0;
3023 data.pr_cache_nhit_pcpu = 0;
3024 if (pp->pr_cache) {
3025 pc = pp->pr_cache;
3026 data.pr_cache_meta_size = pc->pc_pcgsize;
3027 data.pr_cache_nfull = pc->pc_nfull;
3028 data.pr_cache_npartial = pc->pc_npart;
3029 data.pr_cache_nempty = pc->pc_nempty;
3030 data.pr_cache_ncontended = pc->pc_contended;
3031 data.pr_cache_nmiss_global = pc->pc_misses;
3032 data.pr_cache_nhit_global = pc->pc_hits;
3033 for (i = 0; i < pc->pc_ncpu; ++i) {
3034 cc = pc->pc_cpus[i];
3035 if (cc == NULL)
3036 continue;
3037 data.pr_cache_nmiss_pcpu += cc->cc_misses;
3038 data.pr_cache_nhit_pcpu += cc->cc_hits;
3039 }
3040 } else {
3041 data.pr_cache_meta_size = 0;
3042 data.pr_cache_nfull = 0;
3043 data.pr_cache_npartial = 0;
3044 data.pr_cache_nempty = 0;
3045 data.pr_cache_ncontended = 0;
3046 data.pr_cache_nmiss_global = 0;
3047 data.pr_cache_nhit_global = 0;
3048 }
3049
3050 error = sysctl_copyout(l, &data, oldp, sizeof(data));
3051 if (error)
3052 break;
3053 written += sizeof(data);
3054 oldp = (char *)oldp + sizeof(data);
3055 }
3056
3057 *oldlenp = written;
3058 return error;
3059 }
3060
3061 SYSCTL_SETUP(sysctl_pool_setup, "sysctl kern.pool setup")
3062 {
3063 const struct sysctlnode *rnode = NULL;
3064
3065 sysctl_createv(clog, 0, NULL, &rnode,
3066 CTLFLAG_PERMANENT,
3067 CTLTYPE_STRUCT, "pool",
3068 SYSCTL_DESCR("Get pool statistics"),
3069 pool_sysctl, 0, NULL, 0,
3070 CTL_KERN, CTL_CREATE, CTL_EOL);
3071 }
3072