subr_pool.c revision 1.252.2.5 1 /* $NetBSD: subr_pool.c,v 1.252.2.5 2025/05/29 09:49:13 martin Exp $ */
2
3 /*
4 * Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010, 2014, 2015, 2018
5 * The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
10 * Simulation Facility, NASA Ames Research Center; by Andrew Doran, and by
11 * Maxime Villard.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.252.2.5 2025/05/29 09:49:13 martin Exp $");
37
38 #ifdef _KERNEL_OPT
39 #include "opt_ddb.h"
40 #include "opt_lockdebug.h"
41 #include "opt_pool.h"
42 #include "opt_kleak.h"
43 #endif
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/sysctl.h>
48 #include <sys/bitops.h>
49 #include <sys/proc.h>
50 #include <sys/errno.h>
51 #include <sys/kernel.h>
52 #include <sys/vmem.h>
53 #include <sys/pool.h>
54 #include <sys/syslog.h>
55 #include <sys/debug.h>
56 #include <sys/lockdebug.h>
57 #include <sys/xcall.h>
58 #include <sys/cpu.h>
59 #include <sys/atomic.h>
60 #include <sys/asan.h>
61
62 #include <uvm/uvm_extern.h>
63
64 /*
65 * Pool resource management utility.
66 *
67 * Memory is allocated in pages which are split into pieces according to
68 * the pool item size. Each page is kept on one of three lists in the
69 * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
70 * for empty, full and partially-full pages respectively. The individual
71 * pool items are on a linked list headed by `ph_itemlist' in each page
72 * header. The memory for building the page list is either taken from
73 * the allocated pages themselves (for small pool items) or taken from
74 * an internal pool of page headers (`phpool').
75 */
76
77 /* List of all pools. Non static as needed by 'vmstat -m' */
78 TAILQ_HEAD(, pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
79
80 /* Private pool for page header structures */
81 #define PHPOOL_MAX 8
82 static struct pool phpool[PHPOOL_MAX];
83 #define PHPOOL_FREELIST_NELEM(idx) \
84 (((idx) == 0) ? BITMAP_MIN_SIZE : BITMAP_SIZE * (1 << (idx)))
85
86 #if defined(KASAN)
87 #define POOL_REDZONE
88 #endif
89
90 #ifdef POOL_REDZONE
91 # ifdef KASAN
92 # define POOL_REDZONE_SIZE 8
93 # else
94 # define POOL_REDZONE_SIZE 2
95 # endif
96 static void pool_redzone_init(struct pool *, size_t);
97 static void pool_redzone_fill(struct pool *, void *);
98 static void pool_redzone_check(struct pool *, void *);
99 static void pool_cache_redzone_check(pool_cache_t, void *);
100 #else
101 # define pool_redzone_init(pp, sz) __nothing
102 # define pool_redzone_fill(pp, ptr) __nothing
103 # define pool_redzone_check(pp, ptr) __nothing
104 # define pool_cache_redzone_check(pc, ptr) __nothing
105 #endif
106
107 #ifdef KLEAK
108 static void pool_kleak_fill(struct pool *, void *);
109 static void pool_cache_kleak_fill(pool_cache_t, void *);
110 #else
111 #define pool_kleak_fill(pp, ptr) __nothing
112 #define pool_cache_kleak_fill(pc, ptr) __nothing
113 #endif
114
115 #ifdef POOL_QUARANTINE
116 static void pool_quarantine_init(struct pool *);
117 static void pool_quarantine_flush(struct pool *);
118 static bool pool_put_quarantine(struct pool *, void *,
119 struct pool_pagelist *);
120 static bool pool_cache_put_quarantine(pool_cache_t, void *, paddr_t);
121 #else
122 #define pool_quarantine_init(a) __nothing
123 #define pool_quarantine_flush(a) __nothing
124 #define pool_put_quarantine(a, b, c) false
125 #define pool_cache_put_quarantine(a, b, c) false
126 #endif
127
128 #define pc_has_ctor(pc) \
129 (pc->pc_ctor != (int (*)(void *, void *, int))nullop)
130 #define pc_has_dtor(pc) \
131 (pc->pc_dtor != (void (*)(void *, void *))nullop)
132
133 static void *pool_page_alloc_meta(struct pool *, int);
134 static void pool_page_free_meta(struct pool *, void *);
135
136 /* allocator for pool metadata */
137 struct pool_allocator pool_allocator_meta = {
138 .pa_alloc = pool_page_alloc_meta,
139 .pa_free = pool_page_free_meta,
140 .pa_pagesz = 0
141 };
142
143 #define POOL_ALLOCATOR_BIG_BASE 13
144 extern struct pool_allocator pool_allocator_big[];
145 static int pool_bigidx(size_t);
146
147 /* # of seconds to retain page after last use */
148 int pool_inactive_time = 10;
149
150 /* Next candidate for drainage (see pool_drain()) */
151 static struct pool *drainpp;
152
153 /* This lock protects both pool_head and drainpp. */
154 static kmutex_t pool_head_lock;
155 static kcondvar_t pool_busy;
156
157 /* This lock protects initialization of a potentially shared pool allocator */
158 static kmutex_t pool_allocator_lock;
159
160 static unsigned int poolid_counter = 0;
161
162 typedef uint32_t pool_item_bitmap_t;
163 #define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t))
164 #define BITMAP_MASK (BITMAP_SIZE - 1)
165 #define BITMAP_MIN_SIZE (CHAR_BIT * sizeof(((struct pool_item_header *)NULL)->ph_u2))
166
167 struct pool_item_header {
168 /* Page headers */
169 LIST_ENTRY(pool_item_header)
170 ph_pagelist; /* pool page list */
171 union {
172 /* !PR_PHINPAGE */
173 struct {
174 SPLAY_ENTRY(pool_item_header)
175 phu_node; /* off-page page headers */
176 } phu_offpage;
177 /* PR_PHINPAGE */
178 struct {
179 unsigned int phu_poolid;
180 } phu_onpage;
181 } ph_u1;
182 void * ph_page; /* this page's address */
183 uint32_t ph_time; /* last referenced */
184 uint16_t ph_nmissing; /* # of chunks in use */
185 uint16_t ph_off; /* start offset in page */
186 union {
187 /* !PR_USEBMAP */
188 struct {
189 LIST_HEAD(, pool_item)
190 phu_itemlist; /* chunk list for this page */
191 } phu_normal;
192 /* PR_USEBMAP */
193 struct {
194 pool_item_bitmap_t phu_bitmap[1];
195 } phu_notouch;
196 } ph_u2;
197 };
198 #define ph_node ph_u1.phu_offpage.phu_node
199 #define ph_poolid ph_u1.phu_onpage.phu_poolid
200 #define ph_itemlist ph_u2.phu_normal.phu_itemlist
201 #define ph_bitmap ph_u2.phu_notouch.phu_bitmap
202
203 #define PHSIZE ALIGN(sizeof(struct pool_item_header))
204
205 CTASSERT(offsetof(struct pool_item_header, ph_u2) +
206 BITMAP_MIN_SIZE / CHAR_BIT == sizeof(struct pool_item_header));
207
208 #if defined(DIAGNOSTIC) && !defined(KASAN)
209 #define POOL_CHECK_MAGIC
210 #endif
211
212 struct pool_item {
213 #ifdef POOL_CHECK_MAGIC
214 u_int pi_magic;
215 #endif
216 #define PI_MAGIC 0xdeaddeadU
217 /* Other entries use only this list entry */
218 LIST_ENTRY(pool_item) pi_list;
219 };
220
221 #define POOL_NEEDS_CATCHUP(pp) \
222 ((pp)->pr_nitems < (pp)->pr_minitems)
223 #define POOL_OBJ_TO_PAGE(pp, v) \
224 (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask)
225
226 /*
227 * Pool cache management.
228 *
229 * Pool caches provide a way for constructed objects to be cached by the
230 * pool subsystem. This can lead to performance improvements by avoiding
231 * needless object construction/destruction; it is deferred until absolutely
232 * necessary.
233 *
234 * Caches are grouped into cache groups. Each cache group references up
235 * to PCG_NUMOBJECTS constructed objects. When a cache allocates an
236 * object from the pool, it calls the object's constructor and places it
237 * into a cache group. When a cache group frees an object back to the
238 * pool, it first calls the object's destructor. This allows the object
239 * to persist in constructed form while freed to the cache.
240 *
241 * The pool references each cache, so that when a pool is drained by the
242 * pagedaemon, it can drain each individual cache as well. Each time a
243 * cache is drained, the most idle cache group is freed to the pool in
244 * its entirety.
245 *
246 * Pool caches are layed on top of pools. By layering them, we can avoid
247 * the complexity of cache management for pools which would not benefit
248 * from it.
249 */
250
251 static struct pool pcg_normal_pool;
252 static struct pool pcg_large_pool;
253 static struct pool cache_pool;
254 static struct pool cache_cpu_pool;
255
256 /* List of all caches. */
257 TAILQ_HEAD(,pool_cache) pool_cache_head =
258 TAILQ_HEAD_INITIALIZER(pool_cache_head);
259
260 int pool_cache_disable; /* global disable for caching */
261 static const pcg_t pcg_dummy; /* zero sized: always empty, yet always full */
262
263 static bool pool_cache_put_slow(pool_cache_cpu_t *, int,
264 void *);
265 static bool pool_cache_get_slow(pool_cache_cpu_t *, int,
266 void **, paddr_t *, int);
267 static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t);
268 static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *);
269 static void pool_cache_invalidate_cpu(pool_cache_t, u_int);
270 static void pool_cache_transfer(pool_cache_t);
271
272 static int pool_catchup(struct pool *);
273 static void pool_prime_page(struct pool *, void *,
274 struct pool_item_header *);
275 static void pool_update_curpage(struct pool *);
276
277 static int pool_grow(struct pool *, int);
278 static void *pool_allocator_alloc(struct pool *, int);
279 static void pool_allocator_free(struct pool *, void *);
280
281 static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
282 void (*)(const char *, ...) __printflike(1, 2));
283 static void pool_print1(struct pool *, const char *,
284 void (*)(const char *, ...) __printflike(1, 2));
285
286 static int pool_chk_page(struct pool *, const char *,
287 struct pool_item_header *);
288
289 /* -------------------------------------------------------------------------- */
290
291 static inline unsigned int
292 pr_item_bitmap_index(const struct pool *pp, const struct pool_item_header *ph,
293 const void *v)
294 {
295 const char *cp = v;
296 unsigned int idx;
297
298 KASSERT(pp->pr_roflags & PR_USEBMAP);
299 idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size;
300
301 if (__predict_false(idx >= pp->pr_itemsperpage)) {
302 panic("%s: [%s] %u >= %u", __func__, pp->pr_wchan, idx,
303 pp->pr_itemsperpage);
304 }
305
306 return idx;
307 }
308
309 static inline void
310 pr_item_bitmap_put(const struct pool *pp, struct pool_item_header *ph,
311 void *obj)
312 {
313 unsigned int idx = pr_item_bitmap_index(pp, ph, obj);
314 pool_item_bitmap_t *bitmap = ph->ph_bitmap + (idx / BITMAP_SIZE);
315 pool_item_bitmap_t mask = 1U << (idx & BITMAP_MASK);
316
317 if (__predict_false((*bitmap & mask) != 0)) {
318 panic("%s: [%s] %p already freed", __func__, pp->pr_wchan, obj);
319 }
320
321 *bitmap |= mask;
322 }
323
324 static inline void *
325 pr_item_bitmap_get(const struct pool *pp, struct pool_item_header *ph)
326 {
327 pool_item_bitmap_t *bitmap = ph->ph_bitmap;
328 unsigned int idx;
329 int i;
330
331 for (i = 0; ; i++) {
332 int bit;
333
334 KASSERT((i * BITMAP_SIZE) < pp->pr_itemsperpage);
335 bit = ffs32(bitmap[i]);
336 if (bit) {
337 pool_item_bitmap_t mask;
338
339 bit--;
340 idx = (i * BITMAP_SIZE) + bit;
341 mask = 1U << bit;
342 KASSERT((bitmap[i] & mask) != 0);
343 bitmap[i] &= ~mask;
344 break;
345 }
346 }
347 KASSERT(idx < pp->pr_itemsperpage);
348 return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size;
349 }
350
351 static inline void
352 pr_item_bitmap_init(const struct pool *pp, struct pool_item_header *ph)
353 {
354 pool_item_bitmap_t *bitmap = ph->ph_bitmap;
355 const int n = howmany(pp->pr_itemsperpage, BITMAP_SIZE);
356 int i;
357
358 for (i = 0; i < n; i++) {
359 bitmap[i] = (pool_item_bitmap_t)-1;
360 }
361 }
362
363 /* -------------------------------------------------------------------------- */
364
365 static inline void
366 pr_item_linkedlist_put(const struct pool *pp, struct pool_item_header *ph,
367 void *obj)
368 {
369 struct pool_item *pi = obj;
370
371 #ifdef POOL_CHECK_MAGIC
372 pi->pi_magic = PI_MAGIC;
373 #endif
374
375 if (pp->pr_redzone) {
376 /*
377 * Mark the pool_item as valid. The rest is already
378 * invalid.
379 */
380 kasan_mark(pi, sizeof(*pi), sizeof(*pi), 0);
381 }
382
383 LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
384 }
385
386 static inline void *
387 pr_item_linkedlist_get(struct pool *pp, struct pool_item_header *ph)
388 {
389 struct pool_item *pi;
390 void *v;
391
392 v = pi = LIST_FIRST(&ph->ph_itemlist);
393 if (__predict_false(v == NULL)) {
394 mutex_exit(&pp->pr_lock);
395 panic("%s: [%s] page empty", __func__, pp->pr_wchan);
396 }
397 KASSERTMSG((pp->pr_nitems > 0),
398 "%s: [%s] nitems %u inconsistent on itemlist",
399 __func__, pp->pr_wchan, pp->pr_nitems);
400 #ifdef POOL_CHECK_MAGIC
401 KASSERTMSG((pi->pi_magic == PI_MAGIC),
402 "%s: [%s] free list modified: "
403 "magic=%x; page %p; item addr %p", __func__,
404 pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
405 #endif
406
407 /*
408 * Remove from item list.
409 */
410 LIST_REMOVE(pi, pi_list);
411
412 return v;
413 }
414
415 /* -------------------------------------------------------------------------- */
416
417 static inline void
418 pr_phinpage_check(struct pool *pp, struct pool_item_header *ph, void *page,
419 void *object)
420 {
421 if (__predict_false((void *)ph->ph_page != page)) {
422 panic("%s: [%s] item %p not part of pool", __func__,
423 pp->pr_wchan, object);
424 }
425 if (__predict_false((char *)object < (char *)page + ph->ph_off)) {
426 panic("%s: [%s] item %p below item space", __func__,
427 pp->pr_wchan, object);
428 }
429 if (__predict_false(ph->ph_poolid != pp->pr_poolid)) {
430 panic("%s: [%s] item %p poolid %u != %u", __func__,
431 pp->pr_wchan, object, ph->ph_poolid, pp->pr_poolid);
432 }
433 }
434
435 static inline void
436 pc_phinpage_check(pool_cache_t pc, void *object)
437 {
438 struct pool_item_header *ph;
439 struct pool *pp;
440 void *page;
441
442 pp = &pc->pc_pool;
443 page = POOL_OBJ_TO_PAGE(pp, object);
444 ph = (struct pool_item_header *)page;
445
446 pr_phinpage_check(pp, ph, page, object);
447 }
448
449 /* -------------------------------------------------------------------------- */
450
451 static inline int
452 phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
453 {
454
455 /*
456 * We consider pool_item_header with smaller ph_page bigger. This
457 * unnatural ordering is for the benefit of pr_find_pagehead.
458 */
459 if (a->ph_page < b->ph_page)
460 return 1;
461 else if (a->ph_page > b->ph_page)
462 return -1;
463 else
464 return 0;
465 }
466
467 SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
468 SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
469
470 static inline struct pool_item_header *
471 pr_find_pagehead_noalign(struct pool *pp, void *v)
472 {
473 struct pool_item_header *ph, tmp;
474
475 tmp.ph_page = (void *)(uintptr_t)v;
476 ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
477 if (ph == NULL) {
478 ph = SPLAY_ROOT(&pp->pr_phtree);
479 if (ph != NULL && phtree_compare(&tmp, ph) >= 0) {
480 ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph);
481 }
482 KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0);
483 }
484
485 return ph;
486 }
487
488 /*
489 * Return the pool page header based on item address.
490 */
491 static inline struct pool_item_header *
492 pr_find_pagehead(struct pool *pp, void *v)
493 {
494 struct pool_item_header *ph, tmp;
495
496 if ((pp->pr_roflags & PR_NOALIGN) != 0) {
497 ph = pr_find_pagehead_noalign(pp, v);
498 } else {
499 void *page = POOL_OBJ_TO_PAGE(pp, v);
500 if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
501 ph = (struct pool_item_header *)page;
502 pr_phinpage_check(pp, ph, page, v);
503 } else {
504 tmp.ph_page = page;
505 ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
506 }
507 }
508
509 KASSERT(ph == NULL || ((pp->pr_roflags & PR_PHINPAGE) != 0) ||
510 ((char *)ph->ph_page <= (char *)v &&
511 (char *)v < (char *)ph->ph_page + pp->pr_alloc->pa_pagesz));
512 return ph;
513 }
514
515 static void
516 pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq)
517 {
518 struct pool_item_header *ph;
519
520 while ((ph = LIST_FIRST(pq)) != NULL) {
521 LIST_REMOVE(ph, ph_pagelist);
522 pool_allocator_free(pp, ph->ph_page);
523 if ((pp->pr_roflags & PR_PHINPAGE) == 0)
524 pool_put(pp->pr_phpool, ph);
525 }
526 }
527
528 /*
529 * Remove a page from the pool.
530 */
531 static inline void
532 pr_rmpage(struct pool *pp, struct pool_item_header *ph,
533 struct pool_pagelist *pq)
534 {
535
536 KASSERT(mutex_owned(&pp->pr_lock));
537
538 /*
539 * If the page was idle, decrement the idle page count.
540 */
541 if (ph->ph_nmissing == 0) {
542 KASSERT(pp->pr_nidle != 0);
543 KASSERTMSG((pp->pr_nitems >= pp->pr_itemsperpage),
544 "%s: [%s] nitems=%u < itemsperpage=%u", __func__,
545 pp->pr_wchan, pp->pr_nitems, pp->pr_itemsperpage);
546 pp->pr_nidle--;
547 }
548
549 pp->pr_nitems -= pp->pr_itemsperpage;
550
551 /*
552 * Unlink the page from the pool and queue it for release.
553 */
554 LIST_REMOVE(ph, ph_pagelist);
555 if (pp->pr_roflags & PR_PHINPAGE) {
556 if (__predict_false(ph->ph_poolid != pp->pr_poolid)) {
557 panic("%s: [%s] ph %p poolid %u != %u",
558 __func__, pp->pr_wchan, ph, ph->ph_poolid,
559 pp->pr_poolid);
560 }
561 } else {
562 SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
563 }
564 LIST_INSERT_HEAD(pq, ph, ph_pagelist);
565
566 pp->pr_npages--;
567 pp->pr_npagefree++;
568
569 pool_update_curpage(pp);
570 }
571
572 /*
573 * Initialize all the pools listed in the "pools" link set.
574 */
575 void
576 pool_subsystem_init(void)
577 {
578 size_t size;
579 int idx;
580
581 mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE);
582 mutex_init(&pool_allocator_lock, MUTEX_DEFAULT, IPL_NONE);
583 cv_init(&pool_busy, "poolbusy");
584
585 /*
586 * Initialize private page header pool and cache magazine pool if we
587 * haven't done so yet.
588 */
589 for (idx = 0; idx < PHPOOL_MAX; idx++) {
590 static char phpool_names[PHPOOL_MAX][6+1+6+1];
591 int nelem;
592 size_t sz;
593
594 nelem = PHPOOL_FREELIST_NELEM(idx);
595 KASSERT(nelem != 0);
596 snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
597 "phpool-%d", nelem);
598 sz = offsetof(struct pool_item_header,
599 ph_bitmap[howmany(nelem, BITMAP_SIZE)]);
600 pool_init(&phpool[idx], sz, 0, 0, 0,
601 phpool_names[idx], &pool_allocator_meta, IPL_VM);
602 }
603
604 size = sizeof(pcg_t) +
605 (PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t);
606 pool_init(&pcg_normal_pool, size, coherency_unit, 0, 0,
607 "pcgnormal", &pool_allocator_meta, IPL_VM);
608
609 size = sizeof(pcg_t) +
610 (PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t);
611 pool_init(&pcg_large_pool, size, coherency_unit, 0, 0,
612 "pcglarge", &pool_allocator_meta, IPL_VM);
613
614 pool_init(&cache_pool, sizeof(struct pool_cache), coherency_unit,
615 0, 0, "pcache", &pool_allocator_meta, IPL_NONE);
616
617 pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), coherency_unit,
618 0, 0, "pcachecpu", &pool_allocator_meta, IPL_NONE);
619 }
620
621 static inline bool
622 pool_init_is_phinpage(const struct pool *pp)
623 {
624 size_t pagesize;
625
626 if (pp->pr_roflags & PR_PHINPAGE) {
627 return true;
628 }
629 if (pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) {
630 return false;
631 }
632
633 pagesize = pp->pr_alloc->pa_pagesz;
634
635 /*
636 * Threshold: the item size is below 1/16 of a page size, and below
637 * 8 times the page header size. The latter ensures we go off-page
638 * if the page header would make us waste a rather big item.
639 */
640 if (pp->pr_size < MIN(pagesize / 16, PHSIZE * 8)) {
641 return true;
642 }
643
644 /* Put the header into the page if it doesn't waste any items. */
645 if (pagesize / pp->pr_size == (pagesize - PHSIZE) / pp->pr_size) {
646 return true;
647 }
648
649 return false;
650 }
651
652 static inline bool
653 pool_init_is_usebmap(const struct pool *pp)
654 {
655 size_t bmapsize;
656
657 if (pp->pr_roflags & PR_NOTOUCH) {
658 return true;
659 }
660
661 /*
662 * If we're off-page, go with a bitmap.
663 */
664 if (!(pp->pr_roflags & PR_PHINPAGE)) {
665 return true;
666 }
667
668 /*
669 * If we're on-page, and the page header can already contain a bitmap
670 * big enough to cover all the items of the page, go with a bitmap.
671 */
672 bmapsize = roundup(PHSIZE, pp->pr_align) -
673 offsetof(struct pool_item_header, ph_bitmap[0]);
674 KASSERT(bmapsize % sizeof(pool_item_bitmap_t) == 0);
675 if (pp->pr_itemsperpage <= bmapsize * CHAR_BIT) {
676 return true;
677 }
678
679 return false;
680 }
681
682 /*
683 * Initialize the given pool resource structure.
684 *
685 * We export this routine to allow other kernel parts to declare
686 * static pools that must be initialized before kmem(9) is available.
687 */
688 void
689 pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
690 const char *wchan, struct pool_allocator *palloc, int ipl)
691 {
692 struct pool *pp1;
693 size_t prsize;
694 int itemspace, slack;
695
696 /* XXX ioff will be removed. */
697 KASSERT(ioff == 0);
698
699 #ifdef DEBUG
700 if (__predict_true(!cold))
701 mutex_enter(&pool_head_lock);
702 /*
703 * Check that the pool hasn't already been initialised and
704 * added to the list of all pools.
705 */
706 TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
707 if (pp == pp1)
708 panic("%s: [%s] already initialised", __func__,
709 wchan);
710 }
711 if (__predict_true(!cold))
712 mutex_exit(&pool_head_lock);
713 #endif
714
715 if (palloc == NULL)
716 palloc = &pool_allocator_kmem;
717
718 if (!cold)
719 mutex_enter(&pool_allocator_lock);
720 if (palloc->pa_refcnt++ == 0) {
721 if (palloc->pa_pagesz == 0)
722 palloc->pa_pagesz = PAGE_SIZE;
723
724 TAILQ_INIT(&palloc->pa_list);
725
726 mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM);
727 palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
728 palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
729 }
730 if (!cold)
731 mutex_exit(&pool_allocator_lock);
732
733 if (align == 0)
734 align = ALIGN(1);
735
736 prsize = size;
737 if ((flags & PR_NOTOUCH) == 0 && prsize < sizeof(struct pool_item))
738 prsize = sizeof(struct pool_item);
739
740 prsize = roundup(prsize, align);
741 KASSERTMSG((prsize <= palloc->pa_pagesz),
742 "%s: [%s] pool item size (%zu) larger than page size (%u)",
743 __func__, wchan, prsize, palloc->pa_pagesz);
744
745 /*
746 * Initialize the pool structure.
747 */
748 LIST_INIT(&pp->pr_emptypages);
749 LIST_INIT(&pp->pr_fullpages);
750 LIST_INIT(&pp->pr_partpages);
751 pp->pr_cache = NULL;
752 pp->pr_curpage = NULL;
753 pp->pr_npages = 0;
754 pp->pr_minitems = 0;
755 pp->pr_minpages = 0;
756 pp->pr_maxpages = UINT_MAX;
757 pp->pr_roflags = flags;
758 pp->pr_flags = 0;
759 pp->pr_size = prsize;
760 pp->pr_reqsize = size;
761 pp->pr_align = align;
762 pp->pr_wchan = wchan;
763 pp->pr_alloc = palloc;
764 pp->pr_poolid = atomic_inc_uint_nv(&poolid_counter);
765 pp->pr_nitems = 0;
766 pp->pr_nout = 0;
767 pp->pr_hardlimit = UINT_MAX;
768 pp->pr_hardlimit_warning = NULL;
769 pp->pr_hardlimit_ratecap.tv_sec = 0;
770 pp->pr_hardlimit_ratecap.tv_usec = 0;
771 pp->pr_hardlimit_warning_last.tv_sec = 0;
772 pp->pr_hardlimit_warning_last.tv_usec = 0;
773 pp->pr_drain_hook = NULL;
774 pp->pr_drain_hook_arg = NULL;
775 pp->pr_freecheck = NULL;
776 pp->pr_redzone = false;
777 pool_redzone_init(pp, size);
778 pool_quarantine_init(pp);
779
780 /*
781 * Decide whether to put the page header off-page to avoid wasting too
782 * large a part of the page or too big an item. Off-page page headers
783 * go on a hash table, so we can match a returned item with its header
784 * based on the page address.
785 */
786 if (pool_init_is_phinpage(pp)) {
787 /* Use the beginning of the page for the page header */
788 itemspace = palloc->pa_pagesz - roundup(PHSIZE, align);
789 pp->pr_itemoffset = roundup(PHSIZE, align);
790 pp->pr_roflags |= PR_PHINPAGE;
791 } else {
792 /* The page header will be taken from our page header pool */
793 itemspace = palloc->pa_pagesz;
794 pp->pr_itemoffset = 0;
795 SPLAY_INIT(&pp->pr_phtree);
796 }
797
798 pp->pr_itemsperpage = itemspace / pp->pr_size;
799 KASSERT(pp->pr_itemsperpage != 0);
800
801 /*
802 * Decide whether to use a bitmap or a linked list to manage freed
803 * items.
804 */
805 if (pool_init_is_usebmap(pp)) {
806 pp->pr_roflags |= PR_USEBMAP;
807 }
808
809 /*
810 * If we're off-page, then we're using a bitmap; choose the appropriate
811 * pool to allocate page headers, whose size varies depending on the
812 * bitmap. If we're on-page, nothing to do.
813 */
814 if (!(pp->pr_roflags & PR_PHINPAGE)) {
815 int idx;
816
817 KASSERT(pp->pr_roflags & PR_USEBMAP);
818
819 for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
820 idx++) {
821 /* nothing */
822 }
823 if (idx >= PHPOOL_MAX) {
824 /*
825 * if you see this panic, consider to tweak
826 * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
827 */
828 panic("%s: [%s] too large itemsperpage(%d) for "
829 "PR_USEBMAP", __func__,
830 pp->pr_wchan, pp->pr_itemsperpage);
831 }
832 pp->pr_phpool = &phpool[idx];
833 } else {
834 pp->pr_phpool = NULL;
835 }
836
837 /*
838 * Use the slack between the chunks and the page header
839 * for "cache coloring".
840 */
841 slack = itemspace - pp->pr_itemsperpage * pp->pr_size;
842 pp->pr_maxcolor = rounddown(slack, align);
843 pp->pr_curcolor = 0;
844
845 pp->pr_nget = 0;
846 pp->pr_nfail = 0;
847 pp->pr_nput = 0;
848 pp->pr_npagealloc = 0;
849 pp->pr_npagefree = 0;
850 pp->pr_hiwat = 0;
851 pp->pr_nidle = 0;
852 pp->pr_refcnt = 0;
853
854 mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl);
855 cv_init(&pp->pr_cv, wchan);
856 pp->pr_ipl = ipl;
857
858 /* Insert into the list of all pools. */
859 if (!cold)
860 mutex_enter(&pool_head_lock);
861 TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
862 if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0)
863 break;
864 }
865 if (pp1 == NULL)
866 TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
867 else
868 TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist);
869 if (!cold)
870 mutex_exit(&pool_head_lock);
871
872 /* Insert this into the list of pools using this allocator. */
873 if (!cold)
874 mutex_enter(&palloc->pa_lock);
875 TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
876 if (!cold)
877 mutex_exit(&palloc->pa_lock);
878 }
879
880 /*
881 * De-commision a pool resource.
882 */
883 void
884 pool_destroy(struct pool *pp)
885 {
886 struct pool_pagelist pq;
887 struct pool_item_header *ph;
888
889 pool_quarantine_flush(pp);
890
891 /* Remove from global pool list */
892 mutex_enter(&pool_head_lock);
893 while (pp->pr_refcnt != 0)
894 cv_wait(&pool_busy, &pool_head_lock);
895 TAILQ_REMOVE(&pool_head, pp, pr_poollist);
896 if (drainpp == pp)
897 drainpp = NULL;
898 mutex_exit(&pool_head_lock);
899
900 /* Remove this pool from its allocator's list of pools. */
901 mutex_enter(&pp->pr_alloc->pa_lock);
902 TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
903 mutex_exit(&pp->pr_alloc->pa_lock);
904
905 mutex_enter(&pool_allocator_lock);
906 if (--pp->pr_alloc->pa_refcnt == 0)
907 mutex_destroy(&pp->pr_alloc->pa_lock);
908 mutex_exit(&pool_allocator_lock);
909
910 mutex_enter(&pp->pr_lock);
911
912 KASSERT(pp->pr_cache == NULL);
913 KASSERTMSG((pp->pr_nout == 0),
914 "%s: [%s] pool busy: still out: %u", __func__, pp->pr_wchan,
915 pp->pr_nout);
916 KASSERT(LIST_EMPTY(&pp->pr_fullpages));
917 KASSERT(LIST_EMPTY(&pp->pr_partpages));
918
919 /* Remove all pages */
920 LIST_INIT(&pq);
921 while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
922 pr_rmpage(pp, ph, &pq);
923
924 mutex_exit(&pp->pr_lock);
925
926 pr_pagelist_free(pp, &pq);
927 cv_destroy(&pp->pr_cv);
928 mutex_destroy(&pp->pr_lock);
929 }
930
931 void
932 pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
933 {
934
935 /* XXX no locking -- must be used just after pool_init() */
936 KASSERTMSG((pp->pr_drain_hook == NULL),
937 "%s: [%s] already set", __func__, pp->pr_wchan);
938 pp->pr_drain_hook = fn;
939 pp->pr_drain_hook_arg = arg;
940 }
941
942 static struct pool_item_header *
943 pool_alloc_item_header(struct pool *pp, void *storage, int flags)
944 {
945 struct pool_item_header *ph;
946
947 if ((pp->pr_roflags & PR_PHINPAGE) != 0)
948 ph = storage;
949 else
950 ph = pool_get(pp->pr_phpool, flags);
951
952 return ph;
953 }
954
955 /*
956 * Grab an item from the pool.
957 */
958 void *
959 pool_get(struct pool *pp, int flags)
960 {
961 struct pool_item_header *ph;
962 void *v;
963
964 KASSERT(!(flags & PR_NOWAIT) != !(flags & PR_WAITOK));
965 KASSERTMSG((pp->pr_itemsperpage != 0),
966 "%s: [%s] pr_itemsperpage is zero, "
967 "pool not initialized?", __func__, pp->pr_wchan);
968 KASSERTMSG((!(cpu_intr_p() || cpu_softintr_p())
969 || pp->pr_ipl != IPL_NONE || cold || panicstr != NULL),
970 "%s: [%s] is IPL_NONE, but called from interrupt context",
971 __func__, pp->pr_wchan);
972 if (flags & PR_WAITOK) {
973 ASSERT_SLEEPABLE();
974 }
975
976 mutex_enter(&pp->pr_lock);
977 startover:
978 /*
979 * Check to see if we've reached the hard limit. If we have,
980 * and we can wait, then wait until an item has been returned to
981 * the pool.
982 */
983 KASSERTMSG((pp->pr_nout <= pp->pr_hardlimit),
984 "%s: %s: crossed hard limit", __func__, pp->pr_wchan);
985 if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
986 if (pp->pr_drain_hook != NULL) {
987 /*
988 * Since the drain hook is going to free things
989 * back to the pool, unlock, call the hook, re-lock,
990 * and check the hardlimit condition again.
991 */
992 mutex_exit(&pp->pr_lock);
993 (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
994 mutex_enter(&pp->pr_lock);
995 if (pp->pr_nout < pp->pr_hardlimit)
996 goto startover;
997 }
998
999 if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
1000 /*
1001 * XXX: A warning isn't logged in this case. Should
1002 * it be?
1003 */
1004 pp->pr_flags |= PR_WANTED;
1005 do {
1006 cv_wait(&pp->pr_cv, &pp->pr_lock);
1007 } while (pp->pr_flags & PR_WANTED);
1008 goto startover;
1009 }
1010
1011 /*
1012 * Log a message that the hard limit has been hit.
1013 */
1014 if (pp->pr_hardlimit_warning != NULL &&
1015 ratecheck(&pp->pr_hardlimit_warning_last,
1016 &pp->pr_hardlimit_ratecap))
1017 log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
1018
1019 pp->pr_nfail++;
1020
1021 mutex_exit(&pp->pr_lock);
1022 KASSERT((flags & (PR_NOWAIT|PR_LIMITFAIL)) != 0);
1023 return NULL;
1024 }
1025
1026 /*
1027 * The convention we use is that if `curpage' is not NULL, then
1028 * it points at a non-empty bucket. In particular, `curpage'
1029 * never points at a page header which has PR_PHINPAGE set and
1030 * has no items in its bucket.
1031 */
1032 if ((ph = pp->pr_curpage) == NULL) {
1033 int error;
1034
1035 KASSERTMSG((pp->pr_nitems == 0),
1036 "%s: [%s] curpage NULL, inconsistent nitems %u",
1037 __func__, pp->pr_wchan, pp->pr_nitems);
1038
1039 /*
1040 * Call the back-end page allocator for more memory.
1041 * Release the pool lock, as the back-end page allocator
1042 * may block.
1043 */
1044 error = pool_grow(pp, flags);
1045 if (error != 0) {
1046 /*
1047 * pool_grow aborts when another thread
1048 * is allocating a new page. Retry if it
1049 * waited for it.
1050 */
1051 if (error == ERESTART)
1052 goto startover;
1053
1054 /*
1055 * We were unable to allocate a page or item
1056 * header, but we released the lock during
1057 * allocation, so perhaps items were freed
1058 * back to the pool. Check for this case.
1059 */
1060 if (pp->pr_curpage != NULL)
1061 goto startover;
1062
1063 pp->pr_nfail++;
1064 mutex_exit(&pp->pr_lock);
1065 KASSERT((flags & (PR_NOWAIT|PR_LIMITFAIL)) != 0);
1066 return NULL;
1067 }
1068
1069 /* Start the allocation process over. */
1070 goto startover;
1071 }
1072 if (pp->pr_roflags & PR_USEBMAP) {
1073 KASSERTMSG((ph->ph_nmissing < pp->pr_itemsperpage),
1074 "%s: [%s] pool page empty", __func__, pp->pr_wchan);
1075 v = pr_item_bitmap_get(pp, ph);
1076 } else {
1077 v = pr_item_linkedlist_get(pp, ph);
1078 }
1079 pp->pr_nitems--;
1080 pp->pr_nout++;
1081 if (ph->ph_nmissing == 0) {
1082 KASSERT(pp->pr_nidle > 0);
1083 pp->pr_nidle--;
1084
1085 /*
1086 * This page was previously empty. Move it to the list of
1087 * partially-full pages. This page is already curpage.
1088 */
1089 LIST_REMOVE(ph, ph_pagelist);
1090 LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1091 }
1092 ph->ph_nmissing++;
1093 if (ph->ph_nmissing == pp->pr_itemsperpage) {
1094 KASSERTMSG(((pp->pr_roflags & PR_USEBMAP) ||
1095 LIST_EMPTY(&ph->ph_itemlist)),
1096 "%s: [%s] nmissing (%u) inconsistent", __func__,
1097 pp->pr_wchan, ph->ph_nmissing);
1098 /*
1099 * This page is now full. Move it to the full list
1100 * and select a new current page.
1101 */
1102 LIST_REMOVE(ph, ph_pagelist);
1103 LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
1104 pool_update_curpage(pp);
1105 }
1106
1107 pp->pr_nget++;
1108
1109 /*
1110 * If we have a low water mark and we are now below that low
1111 * water mark, add more items to the pool.
1112 */
1113 if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1114 /*
1115 * XXX: Should we log a warning? Should we set up a timeout
1116 * to try again in a second or so? The latter could break
1117 * a caller's assumptions about interrupt protection, etc.
1118 */
1119 }
1120
1121 mutex_exit(&pp->pr_lock);
1122 KASSERT((((vaddr_t)v) & (pp->pr_align - 1)) == 0);
1123 FREECHECK_OUT(&pp->pr_freecheck, v);
1124 pool_redzone_fill(pp, v);
1125 if (flags & PR_ZERO)
1126 memset(v, 0, pp->pr_reqsize);
1127 else
1128 pool_kleak_fill(pp, v);
1129 return v;
1130 }
1131
1132 /*
1133 * Internal version of pool_put(). Pool is already locked/entered.
1134 */
1135 static void
1136 pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
1137 {
1138 struct pool_item_header *ph;
1139
1140 KASSERT(mutex_owned(&pp->pr_lock));
1141 pool_redzone_check(pp, v);
1142 FREECHECK_IN(&pp->pr_freecheck, v);
1143 LOCKDEBUG_MEM_CHECK(v, pp->pr_size);
1144
1145 KASSERTMSG((pp->pr_nout > 0),
1146 "%s: [%s] putting with none out", __func__, pp->pr_wchan);
1147
1148 if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) {
1149 panic("%s: [%s] page header missing", __func__, pp->pr_wchan);
1150 }
1151
1152 /*
1153 * Return to item list.
1154 */
1155 if (pp->pr_roflags & PR_USEBMAP) {
1156 pr_item_bitmap_put(pp, ph, v);
1157 } else {
1158 pr_item_linkedlist_put(pp, ph, v);
1159 }
1160 KDASSERT(ph->ph_nmissing != 0);
1161 ph->ph_nmissing--;
1162 pp->pr_nput++;
1163 pp->pr_nitems++;
1164 pp->pr_nout--;
1165
1166 /* Cancel "pool empty" condition if it exists */
1167 if (pp->pr_curpage == NULL)
1168 pp->pr_curpage = ph;
1169
1170 if (pp->pr_flags & PR_WANTED) {
1171 pp->pr_flags &= ~PR_WANTED;
1172 cv_broadcast(&pp->pr_cv);
1173 }
1174
1175 /*
1176 * If this page is now empty, do one of two things:
1177 *
1178 * (1) If we have more pages than the page high water mark,
1179 * free the page back to the system. ONLY CONSIDER
1180 * FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
1181 * CLAIM.
1182 *
1183 * (2) Otherwise, move the page to the empty page list.
1184 *
1185 * Either way, select a new current page (so we use a partially-full
1186 * page if one is available).
1187 */
1188 if (ph->ph_nmissing == 0) {
1189 pp->pr_nidle++;
1190 if (pp->pr_npages > pp->pr_minpages &&
1191 pp->pr_npages > pp->pr_maxpages) {
1192 pr_rmpage(pp, ph, pq);
1193 } else {
1194 LIST_REMOVE(ph, ph_pagelist);
1195 LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1196
1197 /*
1198 * Update the timestamp on the page. A page must
1199 * be idle for some period of time before it can
1200 * be reclaimed by the pagedaemon. This minimizes
1201 * ping-pong'ing for memory.
1202 *
1203 * note for 64-bit time_t: truncating to 32-bit is not
1204 * a problem for our usage.
1205 */
1206 ph->ph_time = time_uptime;
1207 }
1208 pool_update_curpage(pp);
1209 }
1210
1211 /*
1212 * If the page was previously completely full, move it to the
1213 * partially-full list and make it the current page. The next
1214 * allocation will get the item from this page, instead of
1215 * further fragmenting the pool.
1216 */
1217 else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1218 LIST_REMOVE(ph, ph_pagelist);
1219 LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1220 pp->pr_curpage = ph;
1221 }
1222 }
1223
1224 void
1225 pool_put(struct pool *pp, void *v)
1226 {
1227 struct pool_pagelist pq;
1228
1229 LIST_INIT(&pq);
1230
1231 mutex_enter(&pp->pr_lock);
1232 if (!pool_put_quarantine(pp, v, &pq)) {
1233 pool_do_put(pp, v, &pq);
1234 }
1235 mutex_exit(&pp->pr_lock);
1236
1237 pr_pagelist_free(pp, &pq);
1238 }
1239
1240 /*
1241 * pool_grow: grow a pool by a page.
1242 *
1243 * => called with pool locked.
1244 * => unlock and relock the pool.
1245 * => return with pool locked.
1246 */
1247
1248 static int
1249 pool_grow(struct pool *pp, int flags)
1250 {
1251 struct pool_item_header *ph;
1252 char *storage;
1253
1254 /*
1255 * If there's a pool_grow in progress, wait for it to complete
1256 * and try again from the top.
1257 */
1258 if (pp->pr_flags & PR_GROWING) {
1259 if (flags & PR_WAITOK) {
1260 do {
1261 cv_wait(&pp->pr_cv, &pp->pr_lock);
1262 } while (pp->pr_flags & PR_GROWING);
1263 return ERESTART;
1264 } else {
1265 if (pp->pr_flags & PR_GROWINGNOWAIT) {
1266 /*
1267 * This needs an unlock/relock dance so
1268 * that the other caller has a chance to
1269 * run and actually do the thing. Note
1270 * that this is effectively a busy-wait.
1271 */
1272 mutex_exit(&pp->pr_lock);
1273 mutex_enter(&pp->pr_lock);
1274 return ERESTART;
1275 }
1276 return EWOULDBLOCK;
1277 }
1278 }
1279 pp->pr_flags |= PR_GROWING;
1280 if (flags & PR_WAITOK)
1281 mutex_exit(&pp->pr_lock);
1282 else
1283 pp->pr_flags |= PR_GROWINGNOWAIT;
1284
1285 storage = pool_allocator_alloc(pp, flags);
1286 if (__predict_false(storage == NULL))
1287 goto out;
1288
1289 ph = pool_alloc_item_header(pp, storage, flags);
1290 if (__predict_false(ph == NULL)) {
1291 pool_allocator_free(pp, storage);
1292 goto out;
1293 }
1294
1295 if (flags & PR_WAITOK)
1296 mutex_enter(&pp->pr_lock);
1297 pool_prime_page(pp, storage, ph);
1298 pp->pr_npagealloc++;
1299 KASSERT(pp->pr_flags & PR_GROWING);
1300 pp->pr_flags &= ~(PR_GROWING|PR_GROWINGNOWAIT);
1301 /*
1302 * If anyone was waiting for pool_grow, notify them that we
1303 * may have just done it.
1304 */
1305 cv_broadcast(&pp->pr_cv);
1306 return 0;
1307 out:
1308 if (flags & PR_WAITOK)
1309 mutex_enter(&pp->pr_lock);
1310 KASSERT(pp->pr_flags & PR_GROWING);
1311 pp->pr_flags &= ~(PR_GROWING|PR_GROWINGNOWAIT);
1312 return ENOMEM;
1313 }
1314
1315 /*
1316 * Add N items to the pool.
1317 */
1318 int
1319 pool_prime(struct pool *pp, int n)
1320 {
1321 int newpages;
1322 int error = 0;
1323
1324 mutex_enter(&pp->pr_lock);
1325
1326 newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1327
1328 while (newpages > 0) {
1329 error = pool_grow(pp, PR_NOWAIT);
1330 if (error) {
1331 if (error == ERESTART)
1332 continue;
1333 break;
1334 }
1335 pp->pr_minpages++;
1336 newpages--;
1337 }
1338
1339 if (pp->pr_minpages >= pp->pr_maxpages)
1340 pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1341
1342 mutex_exit(&pp->pr_lock);
1343 return error;
1344 }
1345
1346 /*
1347 * Add a page worth of items to the pool.
1348 *
1349 * Note, we must be called with the pool descriptor LOCKED.
1350 */
1351 static void
1352 pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph)
1353 {
1354 const unsigned int align = pp->pr_align;
1355 struct pool_item *pi;
1356 void *cp = storage;
1357 int n;
1358
1359 KASSERT(mutex_owned(&pp->pr_lock));
1360 KASSERTMSG(((pp->pr_roflags & PR_NOALIGN) ||
1361 (((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) == 0)),
1362 "%s: [%s] unaligned page: %p", __func__, pp->pr_wchan, cp);
1363
1364 /*
1365 * Insert page header.
1366 */
1367 LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1368 LIST_INIT(&ph->ph_itemlist);
1369 ph->ph_page = storage;
1370 ph->ph_nmissing = 0;
1371 ph->ph_time = time_uptime;
1372 if (pp->pr_roflags & PR_PHINPAGE)
1373 ph->ph_poolid = pp->pr_poolid;
1374 else
1375 SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
1376
1377 pp->pr_nidle++;
1378
1379 /*
1380 * The item space starts after the on-page header, if any.
1381 */
1382 ph->ph_off = pp->pr_itemoffset;
1383
1384 /*
1385 * Color this page.
1386 */
1387 ph->ph_off += pp->pr_curcolor;
1388 cp = (char *)cp + ph->ph_off;
1389 if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1390 pp->pr_curcolor = 0;
1391
1392 KASSERT((((vaddr_t)cp) & (align - 1)) == 0);
1393
1394 /*
1395 * Insert remaining chunks on the bucket list.
1396 */
1397 n = pp->pr_itemsperpage;
1398 pp->pr_nitems += n;
1399
1400 if (pp->pr_roflags & PR_USEBMAP) {
1401 pr_item_bitmap_init(pp, ph);
1402 } else {
1403 while (n--) {
1404 pi = (struct pool_item *)cp;
1405
1406 KASSERT((((vaddr_t)pi) & (align - 1)) == 0);
1407
1408 /* Insert on page list */
1409 LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1410 #ifdef POOL_CHECK_MAGIC
1411 pi->pi_magic = PI_MAGIC;
1412 #endif
1413 cp = (char *)cp + pp->pr_size;
1414
1415 KASSERT((((vaddr_t)cp) & (align - 1)) == 0);
1416 }
1417 }
1418
1419 /*
1420 * If the pool was depleted, point at the new page.
1421 */
1422 if (pp->pr_curpage == NULL)
1423 pp->pr_curpage = ph;
1424
1425 if (++pp->pr_npages > pp->pr_hiwat)
1426 pp->pr_hiwat = pp->pr_npages;
1427 }
1428
1429 /*
1430 * Used by pool_get() when nitems drops below the low water mark. This
1431 * is used to catch up pr_nitems with the low water mark.
1432 *
1433 * Note 1, we never wait for memory here, we let the caller decide what to do.
1434 *
1435 * Note 2, we must be called with the pool already locked, and we return
1436 * with it locked.
1437 */
1438 static int
1439 pool_catchup(struct pool *pp)
1440 {
1441 int error = 0;
1442
1443 while (POOL_NEEDS_CATCHUP(pp)) {
1444 error = pool_grow(pp, PR_NOWAIT);
1445 if (error) {
1446 if (error == ERESTART)
1447 continue;
1448 break;
1449 }
1450 }
1451 return error;
1452 }
1453
1454 static void
1455 pool_update_curpage(struct pool *pp)
1456 {
1457
1458 pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
1459 if (pp->pr_curpage == NULL) {
1460 pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
1461 }
1462 KASSERT((pp->pr_curpage == NULL && pp->pr_nitems == 0) ||
1463 (pp->pr_curpage != NULL && pp->pr_nitems > 0));
1464 }
1465
1466 void
1467 pool_setlowat(struct pool *pp, int n)
1468 {
1469
1470 mutex_enter(&pp->pr_lock);
1471
1472 pp->pr_minitems = n;
1473 pp->pr_minpages = (n == 0)
1474 ? 0
1475 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1476
1477 /* Make sure we're caught up with the newly-set low water mark. */
1478 if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1479 /*
1480 * XXX: Should we log a warning? Should we set up a timeout
1481 * to try again in a second or so? The latter could break
1482 * a caller's assumptions about interrupt protection, etc.
1483 */
1484 }
1485
1486 mutex_exit(&pp->pr_lock);
1487 }
1488
1489 void
1490 pool_sethiwat(struct pool *pp, int n)
1491 {
1492
1493 mutex_enter(&pp->pr_lock);
1494
1495 pp->pr_maxpages = (n == 0)
1496 ? 0
1497 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1498
1499 mutex_exit(&pp->pr_lock);
1500 }
1501
1502 void
1503 pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1504 {
1505
1506 mutex_enter(&pp->pr_lock);
1507
1508 pp->pr_hardlimit = n;
1509 pp->pr_hardlimit_warning = warnmess;
1510 pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1511 pp->pr_hardlimit_warning_last.tv_sec = 0;
1512 pp->pr_hardlimit_warning_last.tv_usec = 0;
1513
1514 /*
1515 * In-line version of pool_sethiwat(), because we don't want to
1516 * release the lock.
1517 */
1518 pp->pr_maxpages = (n == 0)
1519 ? 0
1520 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1521
1522 mutex_exit(&pp->pr_lock);
1523 }
1524
1525 /*
1526 * Release all complete pages that have not been used recently.
1527 *
1528 * Must not be called from interrupt context.
1529 */
1530 int
1531 pool_reclaim(struct pool *pp)
1532 {
1533 struct pool_item_header *ph, *phnext;
1534 struct pool_pagelist pq;
1535 uint32_t curtime;
1536 bool klock;
1537 int rv;
1538
1539 KASSERT(!cpu_intr_p() && !cpu_softintr_p());
1540
1541 if (pp->pr_drain_hook != NULL) {
1542 /*
1543 * The drain hook must be called with the pool unlocked.
1544 */
1545 (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1546 }
1547
1548 /*
1549 * XXXSMP Because we do not want to cause non-MPSAFE code
1550 * to block.
1551 */
1552 if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK ||
1553 pp->pr_ipl == IPL_SOFTSERIAL) {
1554 KERNEL_LOCK(1, NULL);
1555 klock = true;
1556 } else
1557 klock = false;
1558
1559 /* Reclaim items from the pool's cache (if any). */
1560 if (pp->pr_cache != NULL)
1561 pool_cache_invalidate(pp->pr_cache);
1562
1563 if (mutex_tryenter(&pp->pr_lock) == 0) {
1564 if (klock) {
1565 KERNEL_UNLOCK_ONE(NULL);
1566 }
1567 return 0;
1568 }
1569
1570 LIST_INIT(&pq);
1571
1572 curtime = time_uptime;
1573
1574 for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
1575 phnext = LIST_NEXT(ph, ph_pagelist);
1576
1577 /* Check our minimum page claim */
1578 if (pp->pr_npages <= pp->pr_minpages)
1579 break;
1580
1581 KASSERT(ph->ph_nmissing == 0);
1582 if (curtime - ph->ph_time < pool_inactive_time)
1583 continue;
1584
1585 /*
1586 * If freeing this page would put us below
1587 * the low water mark, stop now.
1588 */
1589 if ((pp->pr_nitems - pp->pr_itemsperpage) <
1590 pp->pr_minitems)
1591 break;
1592
1593 pr_rmpage(pp, ph, &pq);
1594 }
1595
1596 mutex_exit(&pp->pr_lock);
1597
1598 if (LIST_EMPTY(&pq))
1599 rv = 0;
1600 else {
1601 pr_pagelist_free(pp, &pq);
1602 rv = 1;
1603 }
1604
1605 if (klock) {
1606 KERNEL_UNLOCK_ONE(NULL);
1607 }
1608
1609 return rv;
1610 }
1611
1612 /*
1613 * Drain pools, one at a time. The drained pool is returned within ppp.
1614 *
1615 * Note, must never be called from interrupt context.
1616 */
1617 bool
1618 pool_drain(struct pool **ppp)
1619 {
1620 bool reclaimed;
1621 struct pool *pp;
1622
1623 KASSERT(!TAILQ_EMPTY(&pool_head));
1624
1625 pp = NULL;
1626
1627 /* Find next pool to drain, and add a reference. */
1628 mutex_enter(&pool_head_lock);
1629 do {
1630 if (drainpp == NULL) {
1631 drainpp = TAILQ_FIRST(&pool_head);
1632 }
1633 if (drainpp != NULL) {
1634 pp = drainpp;
1635 drainpp = TAILQ_NEXT(pp, pr_poollist);
1636 }
1637 /*
1638 * Skip completely idle pools. We depend on at least
1639 * one pool in the system being active.
1640 */
1641 } while (pp == NULL || pp->pr_npages == 0);
1642 pp->pr_refcnt++;
1643 mutex_exit(&pool_head_lock);
1644
1645 /* Drain the cache (if any) and pool.. */
1646 reclaimed = pool_reclaim(pp);
1647
1648 /* Finally, unlock the pool. */
1649 mutex_enter(&pool_head_lock);
1650 pp->pr_refcnt--;
1651 cv_broadcast(&pool_busy);
1652 mutex_exit(&pool_head_lock);
1653
1654 if (ppp != NULL)
1655 *ppp = pp;
1656
1657 return reclaimed;
1658 }
1659
1660 /*
1661 * Calculate the total number of pages consumed by pools.
1662 */
1663 int
1664 pool_totalpages(void)
1665 {
1666
1667 mutex_enter(&pool_head_lock);
1668 int pages = pool_totalpages_locked();
1669 mutex_exit(&pool_head_lock);
1670
1671 return pages;
1672 }
1673
1674 int
1675 pool_totalpages_locked(void)
1676 {
1677 struct pool *pp;
1678 uint64_t total = 0;
1679
1680 TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
1681 uint64_t bytes =
1682 (uint64_t)pp->pr_npages * pp->pr_alloc->pa_pagesz;
1683
1684 if ((pp->pr_roflags & PR_RECURSIVE) != 0)
1685 bytes -= ((uint64_t)pp->pr_nout * pp->pr_size);
1686 total += bytes;
1687 }
1688
1689 return atop(total);
1690 }
1691
1692 /*
1693 * Diagnostic helpers.
1694 */
1695
1696 void
1697 pool_printall(const char *modif, void (*pr)(const char *, ...))
1698 {
1699 struct pool *pp;
1700
1701 TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
1702 pool_printit(pp, modif, pr);
1703 }
1704 }
1705
1706 void
1707 pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1708 {
1709
1710 if (pp == NULL) {
1711 (*pr)("Must specify a pool to print.\n");
1712 return;
1713 }
1714
1715 pool_print1(pp, modif, pr);
1716 }
1717
1718 static void
1719 pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
1720 void (*pr)(const char *, ...))
1721 {
1722 struct pool_item_header *ph;
1723
1724 LIST_FOREACH(ph, pl, ph_pagelist) {
1725 (*pr)("\t\tpage %p, nmissing %d, time %" PRIu32 "\n",
1726 ph->ph_page, ph->ph_nmissing, ph->ph_time);
1727 #ifdef POOL_CHECK_MAGIC
1728 struct pool_item *pi;
1729 if (!(pp->pr_roflags & PR_USEBMAP)) {
1730 LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1731 if (pi->pi_magic != PI_MAGIC) {
1732 (*pr)("\t\t\titem %p, magic 0x%x\n",
1733 pi, pi->pi_magic);
1734 }
1735 }
1736 }
1737 #endif
1738 }
1739 }
1740
1741 static void
1742 pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1743 {
1744 struct pool_item_header *ph;
1745 pool_cache_t pc;
1746 pcg_t *pcg;
1747 pool_cache_cpu_t *cc;
1748 uint64_t cpuhit, cpumiss;
1749 int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1750 char c;
1751
1752 while ((c = *modif++) != '\0') {
1753 if (c == 'l')
1754 print_log = 1;
1755 if (c == 'p')
1756 print_pagelist = 1;
1757 if (c == 'c')
1758 print_cache = 1;
1759 }
1760
1761 if ((pc = pp->pr_cache) != NULL) {
1762 (*pr)("POOL CACHE");
1763 } else {
1764 (*pr)("POOL");
1765 }
1766
1767 (*pr)(" %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1768 pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1769 pp->pr_roflags);
1770 (*pr)("\talloc %p\n", pp->pr_alloc);
1771 (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1772 pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1773 (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1774 pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1775
1776 (*pr)("\tnget %lu, nfail %lu, nput %lu\n",
1777 pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1778 (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1779 pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1780
1781 if (print_pagelist == 0)
1782 goto skip_pagelist;
1783
1784 if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1785 (*pr)("\n\tempty page list:\n");
1786 pool_print_pagelist(pp, &pp->pr_emptypages, pr);
1787 if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
1788 (*pr)("\n\tfull page list:\n");
1789 pool_print_pagelist(pp, &pp->pr_fullpages, pr);
1790 if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
1791 (*pr)("\n\tpartial-page list:\n");
1792 pool_print_pagelist(pp, &pp->pr_partpages, pr);
1793
1794 if (pp->pr_curpage == NULL)
1795 (*pr)("\tno current page\n");
1796 else
1797 (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1798
1799 skip_pagelist:
1800 if (print_log == 0)
1801 goto skip_log;
1802
1803 (*pr)("\n");
1804
1805 skip_log:
1806
1807 #define PR_GROUPLIST(pcg) \
1808 (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); \
1809 for (i = 0; i < pcg->pcg_size; i++) { \
1810 if (pcg->pcg_objects[i].pcgo_pa != \
1811 POOL_PADDR_INVALID) { \
1812 (*pr)("\t\t\t%p, 0x%llx\n", \
1813 pcg->pcg_objects[i].pcgo_va, \
1814 (unsigned long long) \
1815 pcg->pcg_objects[i].pcgo_pa); \
1816 } else { \
1817 (*pr)("\t\t\t%p\n", \
1818 pcg->pcg_objects[i].pcgo_va); \
1819 } \
1820 }
1821
1822 if (pc != NULL) {
1823 cpuhit = 0;
1824 cpumiss = 0;
1825 for (i = 0; i < __arraycount(pc->pc_cpus); i++) {
1826 if ((cc = pc->pc_cpus[i]) == NULL)
1827 continue;
1828 cpuhit += cc->cc_hits;
1829 cpumiss += cc->cc_misses;
1830 }
1831 (*pr)("\tcpu layer hits %llu misses %llu\n", cpuhit, cpumiss);
1832 (*pr)("\tcache layer hits %llu misses %llu\n",
1833 pc->pc_hits, pc->pc_misses);
1834 (*pr)("\tcache layer entry uncontended %llu contended %llu\n",
1835 pc->pc_hits + pc->pc_misses - pc->pc_contended,
1836 pc->pc_contended);
1837 (*pr)("\tcache layer empty groups %u full groups %u\n",
1838 pc->pc_nempty, pc->pc_nfull);
1839 if (print_cache) {
1840 (*pr)("\tfull cache groups:\n");
1841 for (pcg = pc->pc_fullgroups; pcg != NULL;
1842 pcg = pcg->pcg_next) {
1843 PR_GROUPLIST(pcg);
1844 }
1845 (*pr)("\tempty cache groups:\n");
1846 for (pcg = pc->pc_emptygroups; pcg != NULL;
1847 pcg = pcg->pcg_next) {
1848 PR_GROUPLIST(pcg);
1849 }
1850 }
1851 }
1852 #undef PR_GROUPLIST
1853 }
1854
1855 static int
1856 pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
1857 {
1858 struct pool_item *pi;
1859 void *page;
1860 int n;
1861
1862 if ((pp->pr_roflags & PR_NOALIGN) == 0) {
1863 page = POOL_OBJ_TO_PAGE(pp, ph);
1864 if (page != ph->ph_page &&
1865 (pp->pr_roflags & PR_PHINPAGE) != 0) {
1866 if (label != NULL)
1867 printf("%s: ", label);
1868 printf("pool(%p:%s): page inconsistency: page %p;"
1869 " at page head addr %p (p %p)\n", pp,
1870 pp->pr_wchan, ph->ph_page,
1871 ph, page);
1872 return 1;
1873 }
1874 }
1875
1876 if ((pp->pr_roflags & PR_USEBMAP) != 0)
1877 return 0;
1878
1879 for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0;
1880 pi != NULL;
1881 pi = LIST_NEXT(pi,pi_list), n++) {
1882
1883 #ifdef POOL_CHECK_MAGIC
1884 if (pi->pi_magic != PI_MAGIC) {
1885 if (label != NULL)
1886 printf("%s: ", label);
1887 printf("pool(%s): free list modified: magic=%x;"
1888 " page %p; item ordinal %d; addr %p\n",
1889 pp->pr_wchan, pi->pi_magic, ph->ph_page,
1890 n, pi);
1891 panic("pool");
1892 }
1893 #endif
1894 if ((pp->pr_roflags & PR_NOALIGN) != 0) {
1895 continue;
1896 }
1897 page = POOL_OBJ_TO_PAGE(pp, pi);
1898 if (page == ph->ph_page)
1899 continue;
1900
1901 if (label != NULL)
1902 printf("%s: ", label);
1903 printf("pool(%p:%s): page inconsistency: page %p;"
1904 " item ordinal %d; addr %p (p %p)\n", pp,
1905 pp->pr_wchan, ph->ph_page,
1906 n, pi, page);
1907 return 1;
1908 }
1909 return 0;
1910 }
1911
1912
1913 int
1914 pool_chk(struct pool *pp, const char *label)
1915 {
1916 struct pool_item_header *ph;
1917 int r = 0;
1918
1919 mutex_enter(&pp->pr_lock);
1920 LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
1921 r = pool_chk_page(pp, label, ph);
1922 if (r) {
1923 goto out;
1924 }
1925 }
1926 LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
1927 r = pool_chk_page(pp, label, ph);
1928 if (r) {
1929 goto out;
1930 }
1931 }
1932 LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
1933 r = pool_chk_page(pp, label, ph);
1934 if (r) {
1935 goto out;
1936 }
1937 }
1938
1939 out:
1940 mutex_exit(&pp->pr_lock);
1941 return r;
1942 }
1943
1944 /*
1945 * pool_cache_init:
1946 *
1947 * Initialize a pool cache.
1948 */
1949 pool_cache_t
1950 pool_cache_init(size_t size, u_int align, u_int align_offset, u_int flags,
1951 const char *wchan, struct pool_allocator *palloc, int ipl,
1952 int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), void *arg)
1953 {
1954 pool_cache_t pc;
1955
1956 pc = pool_get(&cache_pool, PR_WAITOK);
1957 if (pc == NULL)
1958 return NULL;
1959
1960 pool_cache_bootstrap(pc, size, align, align_offset, flags, wchan,
1961 palloc, ipl, ctor, dtor, arg);
1962
1963 return pc;
1964 }
1965
1966 /*
1967 * pool_cache_bootstrap:
1968 *
1969 * Kernel-private version of pool_cache_init(). The caller
1970 * provides initial storage.
1971 */
1972 void
1973 pool_cache_bootstrap(pool_cache_t pc, size_t size, u_int align,
1974 u_int align_offset, u_int flags, const char *wchan,
1975 struct pool_allocator *palloc, int ipl,
1976 int (*ctor)(void *, void *, int), void (*dtor)(void *, void *),
1977 void *arg)
1978 {
1979 CPU_INFO_ITERATOR cii;
1980 pool_cache_t pc1;
1981 struct cpu_info *ci;
1982 struct pool *pp;
1983
1984 pp = &pc->pc_pool;
1985 if (palloc == NULL && ipl == IPL_NONE) {
1986 if (size > PAGE_SIZE) {
1987 int bigidx = pool_bigidx(size);
1988
1989 palloc = &pool_allocator_big[bigidx];
1990 flags |= PR_NOALIGN;
1991 } else
1992 palloc = &pool_allocator_nointr;
1993 }
1994 pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl);
1995 mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl);
1996
1997 if (ctor == NULL) {
1998 ctor = (int (*)(void *, void *, int))nullop;
1999 }
2000 if (dtor == NULL) {
2001 dtor = (void (*)(void *, void *))nullop;
2002 }
2003
2004 pc->pc_emptygroups = NULL;
2005 pc->pc_fullgroups = NULL;
2006 pc->pc_partgroups = NULL;
2007 pc->pc_ctor = ctor;
2008 pc->pc_dtor = dtor;
2009 pc->pc_arg = arg;
2010 pc->pc_hits = 0;
2011 pc->pc_misses = 0;
2012 pc->pc_nempty = 0;
2013 pc->pc_npart = 0;
2014 pc->pc_nfull = 0;
2015 pc->pc_contended = 0;
2016 pc->pc_refcnt = 0;
2017 pc->pc_freecheck = NULL;
2018
2019 if ((flags & PR_LARGECACHE) != 0) {
2020 pc->pc_pcgsize = PCG_NOBJECTS_LARGE;
2021 pc->pc_pcgpool = &pcg_large_pool;
2022 } else {
2023 pc->pc_pcgsize = PCG_NOBJECTS_NORMAL;
2024 pc->pc_pcgpool = &pcg_normal_pool;
2025 }
2026
2027 /* Allocate per-CPU caches. */
2028 memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus));
2029 pc->pc_ncpu = 0;
2030 if (ncpu < 2) {
2031 /* XXX For sparc: boot CPU is not attached yet. */
2032 pool_cache_cpu_init1(curcpu(), pc);
2033 } else {
2034 for (CPU_INFO_FOREACH(cii, ci)) {
2035 pool_cache_cpu_init1(ci, pc);
2036 }
2037 }
2038
2039 /* Add to list of all pools. */
2040 if (__predict_true(!cold))
2041 mutex_enter(&pool_head_lock);
2042 TAILQ_FOREACH(pc1, &pool_cache_head, pc_cachelist) {
2043 if (strcmp(pc1->pc_pool.pr_wchan, pc->pc_pool.pr_wchan) > 0)
2044 break;
2045 }
2046 if (pc1 == NULL)
2047 TAILQ_INSERT_TAIL(&pool_cache_head, pc, pc_cachelist);
2048 else
2049 TAILQ_INSERT_BEFORE(pc1, pc, pc_cachelist);
2050 if (__predict_true(!cold))
2051 mutex_exit(&pool_head_lock);
2052
2053 membar_sync();
2054 pp->pr_cache = pc;
2055 }
2056
2057 /*
2058 * pool_cache_destroy:
2059 *
2060 * Destroy a pool cache.
2061 */
2062 void
2063 pool_cache_destroy(pool_cache_t pc)
2064 {
2065
2066 pool_cache_bootstrap_destroy(pc);
2067 pool_put(&cache_pool, pc);
2068 }
2069
2070 /*
2071 * pool_cache_bootstrap_destroy:
2072 *
2073 * Destroy a pool cache.
2074 */
2075 void
2076 pool_cache_bootstrap_destroy(pool_cache_t pc)
2077 {
2078 struct pool *pp = &pc->pc_pool;
2079 u_int i;
2080
2081 /* Remove it from the global list. */
2082 mutex_enter(&pool_head_lock);
2083 while (pc->pc_refcnt != 0)
2084 cv_wait(&pool_busy, &pool_head_lock);
2085 TAILQ_REMOVE(&pool_cache_head, pc, pc_cachelist);
2086 mutex_exit(&pool_head_lock);
2087
2088 /* First, invalidate the entire cache. */
2089 pool_cache_invalidate(pc);
2090
2091 /* Disassociate it from the pool. */
2092 mutex_enter(&pp->pr_lock);
2093 pp->pr_cache = NULL;
2094 mutex_exit(&pp->pr_lock);
2095
2096 /* Destroy per-CPU data */
2097 for (i = 0; i < __arraycount(pc->pc_cpus); i++)
2098 pool_cache_invalidate_cpu(pc, i);
2099
2100 /* Finally, destroy it. */
2101 mutex_destroy(&pc->pc_lock);
2102 pool_destroy(pp);
2103 }
2104
2105 /*
2106 * pool_cache_cpu_init1:
2107 *
2108 * Called for each pool_cache whenever a new CPU is attached.
2109 */
2110 static void
2111 pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc)
2112 {
2113 pool_cache_cpu_t *cc;
2114 int index;
2115
2116 index = ci->ci_index;
2117
2118 KASSERT(index < __arraycount(pc->pc_cpus));
2119
2120 if ((cc = pc->pc_cpus[index]) != NULL) {
2121 KASSERT(cc->cc_cpuindex == index);
2122 return;
2123 }
2124
2125 /*
2126 * The first CPU is 'free'. This needs to be the case for
2127 * bootstrap - we may not be able to allocate yet.
2128 */
2129 if (pc->pc_ncpu == 0) {
2130 cc = &pc->pc_cpu0;
2131 pc->pc_ncpu = 1;
2132 } else {
2133 mutex_enter(&pc->pc_lock);
2134 pc->pc_ncpu++;
2135 mutex_exit(&pc->pc_lock);
2136 cc = pool_get(&cache_cpu_pool, PR_WAITOK);
2137 }
2138
2139 cc->cc_ipl = pc->pc_pool.pr_ipl;
2140 cc->cc_iplcookie = makeiplcookie(cc->cc_ipl);
2141 cc->cc_cache = pc;
2142 cc->cc_cpuindex = index;
2143 cc->cc_hits = 0;
2144 cc->cc_misses = 0;
2145 cc->cc_current = __UNCONST(&pcg_dummy);
2146 cc->cc_previous = __UNCONST(&pcg_dummy);
2147
2148 pc->pc_cpus[index] = cc;
2149 }
2150
2151 /*
2152 * pool_cache_cpu_init:
2153 *
2154 * Called whenever a new CPU is attached.
2155 */
2156 void
2157 pool_cache_cpu_init(struct cpu_info *ci)
2158 {
2159 pool_cache_t pc;
2160
2161 mutex_enter(&pool_head_lock);
2162 TAILQ_FOREACH(pc, &pool_cache_head, pc_cachelist) {
2163 pc->pc_refcnt++;
2164 mutex_exit(&pool_head_lock);
2165
2166 pool_cache_cpu_init1(ci, pc);
2167
2168 mutex_enter(&pool_head_lock);
2169 pc->pc_refcnt--;
2170 cv_broadcast(&pool_busy);
2171 }
2172 mutex_exit(&pool_head_lock);
2173 }
2174
2175 /*
2176 * pool_cache_reclaim:
2177 *
2178 * Reclaim memory from a pool cache.
2179 */
2180 bool
2181 pool_cache_reclaim(pool_cache_t pc)
2182 {
2183
2184 return pool_reclaim(&pc->pc_pool);
2185 }
2186
2187 static void
2188 pool_cache_destruct_object1(pool_cache_t pc, void *object)
2189 {
2190 (*pc->pc_dtor)(pc->pc_arg, object);
2191 pool_put(&pc->pc_pool, object);
2192 }
2193
2194 /*
2195 * pool_cache_destruct_object:
2196 *
2197 * Force destruction of an object and its release back into
2198 * the pool.
2199 */
2200 void
2201 pool_cache_destruct_object(pool_cache_t pc, void *object)
2202 {
2203
2204 FREECHECK_IN(&pc->pc_freecheck, object);
2205
2206 pool_cache_destruct_object1(pc, object);
2207 }
2208
2209 /*
2210 * pool_cache_invalidate_groups:
2211 *
2212 * Invalidate a chain of groups and destruct all objects.
2213 */
2214 static void
2215 pool_cache_invalidate_groups(pool_cache_t pc, pcg_t *pcg)
2216 {
2217 void *object;
2218 pcg_t *next;
2219 int i;
2220
2221 for (; pcg != NULL; pcg = next) {
2222 next = pcg->pcg_next;
2223
2224 for (i = 0; i < pcg->pcg_avail; i++) {
2225 object = pcg->pcg_objects[i].pcgo_va;
2226 pool_cache_destruct_object1(pc, object);
2227 }
2228
2229 if (pcg->pcg_size == PCG_NOBJECTS_LARGE) {
2230 pool_put(&pcg_large_pool, pcg);
2231 } else {
2232 KASSERT(pcg->pcg_size == PCG_NOBJECTS_NORMAL);
2233 pool_put(&pcg_normal_pool, pcg);
2234 }
2235 }
2236 }
2237
2238 /*
2239 * pool_cache_invalidate:
2240 *
2241 * Invalidate a pool cache (destruct and release all of the
2242 * cached objects). Does not reclaim objects from the pool.
2243 *
2244 * Note: For pool caches that provide constructed objects, there
2245 * is an assumption that another level of synchronization is occurring
2246 * between the input to the constructor and the cache invalidation.
2247 *
2248 * Invalidation is a costly process and should not be called from
2249 * interrupt context.
2250 */
2251 void
2252 pool_cache_invalidate(pool_cache_t pc)
2253 {
2254 uint64_t where;
2255 pcg_t *full, *empty, *part;
2256
2257 KASSERT(!cpu_intr_p() && !cpu_softintr_p());
2258
2259 if (ncpu < 2 || !mp_online) {
2260 /*
2261 * We might be called early enough in the boot process
2262 * for the CPU data structures to not be fully initialized.
2263 * In this case, transfer the content of the local CPU's
2264 * cache back into global cache as only this CPU is currently
2265 * running.
2266 */
2267 pool_cache_transfer(pc);
2268 } else {
2269 /*
2270 * Signal all CPUs that they must transfer their local
2271 * cache back to the global pool then wait for the xcall to
2272 * complete.
2273 */
2274 where = xc_broadcast(0, (xcfunc_t)pool_cache_transfer,
2275 pc, NULL);
2276 xc_wait(where);
2277 }
2278
2279 /* Empty pool caches, then invalidate objects */
2280 mutex_enter(&pc->pc_lock);
2281 full = pc->pc_fullgroups;
2282 empty = pc->pc_emptygroups;
2283 part = pc->pc_partgroups;
2284 pc->pc_fullgroups = NULL;
2285 pc->pc_emptygroups = NULL;
2286 pc->pc_partgroups = NULL;
2287 pc->pc_nfull = 0;
2288 pc->pc_nempty = 0;
2289 pc->pc_npart = 0;
2290 mutex_exit(&pc->pc_lock);
2291
2292 pool_cache_invalidate_groups(pc, full);
2293 pool_cache_invalidate_groups(pc, empty);
2294 pool_cache_invalidate_groups(pc, part);
2295 }
2296
2297 /*
2298 * pool_cache_invalidate_cpu:
2299 *
2300 * Invalidate all CPU-bound cached objects in pool cache, the CPU being
2301 * identified by its associated index.
2302 * It is caller's responsibility to ensure that no operation is
2303 * taking place on this pool cache while doing this invalidation.
2304 * WARNING: as no inter-CPU locking is enforced, trying to invalidate
2305 * pool cached objects from a CPU different from the one currently running
2306 * may result in an undefined behaviour.
2307 */
2308 static void
2309 pool_cache_invalidate_cpu(pool_cache_t pc, u_int index)
2310 {
2311 pool_cache_cpu_t *cc;
2312 pcg_t *pcg;
2313
2314 if ((cc = pc->pc_cpus[index]) == NULL)
2315 return;
2316
2317 if ((pcg = cc->cc_current) != &pcg_dummy) {
2318 pcg->pcg_next = NULL;
2319 pool_cache_invalidate_groups(pc, pcg);
2320 }
2321 if ((pcg = cc->cc_previous) != &pcg_dummy) {
2322 pcg->pcg_next = NULL;
2323 pool_cache_invalidate_groups(pc, pcg);
2324 }
2325 if (cc != &pc->pc_cpu0)
2326 pool_put(&cache_cpu_pool, cc);
2327
2328 }
2329
2330 void
2331 pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg)
2332 {
2333
2334 pool_set_drain_hook(&pc->pc_pool, fn, arg);
2335 }
2336
2337 void
2338 pool_cache_setlowat(pool_cache_t pc, int n)
2339 {
2340
2341 pool_setlowat(&pc->pc_pool, n);
2342 }
2343
2344 void
2345 pool_cache_sethiwat(pool_cache_t pc, int n)
2346 {
2347
2348 pool_sethiwat(&pc->pc_pool, n);
2349 }
2350
2351 void
2352 pool_cache_sethardlimit(pool_cache_t pc, int n, const char *warnmess, int ratecap)
2353 {
2354
2355 pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap);
2356 }
2357
2358 static bool __noinline
2359 pool_cache_get_slow(pool_cache_cpu_t *cc, int s, void **objectp,
2360 paddr_t *pap, int flags)
2361 {
2362 pcg_t *pcg, *cur;
2363 uint64_t ncsw;
2364 pool_cache_t pc;
2365 void *object;
2366
2367 KASSERT(cc->cc_current->pcg_avail == 0);
2368 KASSERT(cc->cc_previous->pcg_avail == 0);
2369
2370 pc = cc->cc_cache;
2371 cc->cc_misses++;
2372
2373 /*
2374 * Nothing was available locally. Try and grab a group
2375 * from the cache.
2376 */
2377 if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {
2378 ncsw = curlwp->l_ncsw;
2379 mutex_enter(&pc->pc_lock);
2380 pc->pc_contended++;
2381
2382 /*
2383 * If we context switched while locking, then
2384 * our view of the per-CPU data is invalid:
2385 * retry.
2386 */
2387 if (curlwp->l_ncsw != ncsw) {
2388 mutex_exit(&pc->pc_lock);
2389 return true;
2390 }
2391 }
2392
2393 if (__predict_true((pcg = pc->pc_fullgroups) != NULL)) {
2394 /*
2395 * If there's a full group, release our empty
2396 * group back to the cache. Install the full
2397 * group as cc_current and return.
2398 */
2399 if (__predict_true((cur = cc->cc_current) != &pcg_dummy)) {
2400 KASSERT(cur->pcg_avail == 0);
2401 cur->pcg_next = pc->pc_emptygroups;
2402 pc->pc_emptygroups = cur;
2403 pc->pc_nempty++;
2404 }
2405 KASSERT(pcg->pcg_avail == pcg->pcg_size);
2406 cc->cc_current = pcg;
2407 pc->pc_fullgroups = pcg->pcg_next;
2408 pc->pc_hits++;
2409 pc->pc_nfull--;
2410 mutex_exit(&pc->pc_lock);
2411 return true;
2412 }
2413
2414 /*
2415 * Nothing available locally or in cache. Take the slow
2416 * path: fetch a new object from the pool and construct
2417 * it.
2418 */
2419 pc->pc_misses++;
2420 mutex_exit(&pc->pc_lock);
2421 splx(s);
2422
2423 object = pool_get(&pc->pc_pool, flags);
2424 *objectp = object;
2425 if (__predict_false(object == NULL)) {
2426 KASSERT((flags & (PR_NOWAIT|PR_LIMITFAIL)) != 0);
2427 return false;
2428 }
2429
2430 if (__predict_false((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0)) {
2431 pool_put(&pc->pc_pool, object);
2432 *objectp = NULL;
2433 return false;
2434 }
2435
2436 KASSERT((((vaddr_t)object) & (pc->pc_pool.pr_align - 1)) == 0);
2437
2438 if (pap != NULL) {
2439 #ifdef POOL_VTOPHYS
2440 *pap = POOL_VTOPHYS(object);
2441 #else
2442 *pap = POOL_PADDR_INVALID;
2443 #endif
2444 }
2445
2446 FREECHECK_OUT(&pc->pc_freecheck, object);
2447 pool_cache_kleak_fill(pc, object);
2448 return false;
2449 }
2450
2451 /*
2452 * pool_cache_get{,_paddr}:
2453 *
2454 * Get an object from a pool cache (optionally returning
2455 * the physical address of the object).
2456 */
2457 void *
2458 pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap)
2459 {
2460 pool_cache_cpu_t *cc;
2461 pcg_t *pcg;
2462 void *object;
2463 int s;
2464
2465 KASSERT(!(flags & PR_NOWAIT) != !(flags & PR_WAITOK));
2466 KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()) ||
2467 (pc->pc_pool.pr_ipl != IPL_NONE || cold || panicstr != NULL),
2468 "%s: [%s] is IPL_NONE, but called from interrupt context",
2469 __func__, pc->pc_pool.pr_wchan);
2470
2471 if (flags & PR_WAITOK) {
2472 ASSERT_SLEEPABLE();
2473 }
2474
2475 /* Lock out interrupts and disable preemption. */
2476 s = splvm();
2477 while (/* CONSTCOND */ true) {
2478 /* Try and allocate an object from the current group. */
2479 cc = pc->pc_cpus[curcpu()->ci_index];
2480 KASSERT(cc->cc_cache == pc);
2481 pcg = cc->cc_current;
2482 if (__predict_true(pcg->pcg_avail > 0)) {
2483 object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va;
2484 if (__predict_false(pap != NULL))
2485 *pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa;
2486 #if defined(DIAGNOSTIC)
2487 pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL;
2488 KASSERT(pcg->pcg_avail < pcg->pcg_size);
2489 KASSERT(object != NULL);
2490 #endif
2491 cc->cc_hits++;
2492 splx(s);
2493 FREECHECK_OUT(&pc->pc_freecheck, object);
2494 pool_redzone_fill(&pc->pc_pool, object);
2495 pool_cache_kleak_fill(pc, object);
2496 return object;
2497 }
2498
2499 /*
2500 * That failed. If the previous group isn't empty, swap
2501 * it with the current group and allocate from there.
2502 */
2503 pcg = cc->cc_previous;
2504 if (__predict_true(pcg->pcg_avail > 0)) {
2505 cc->cc_previous = cc->cc_current;
2506 cc->cc_current = pcg;
2507 continue;
2508 }
2509
2510 /*
2511 * Can't allocate from either group: try the slow path.
2512 * If get_slow() allocated an object for us, or if
2513 * no more objects are available, it will return false.
2514 * Otherwise, we need to retry.
2515 */
2516 if (!pool_cache_get_slow(cc, s, &object, pap, flags))
2517 break;
2518 }
2519
2520 /*
2521 * We would like to KASSERT(object || (flags & PR_NOWAIT)), but
2522 * pool_cache_get can fail even in the PR_WAITOK case, if the
2523 * constructor fails.
2524 */
2525 return object;
2526 }
2527
2528 static bool __noinline
2529 pool_cache_put_slow(pool_cache_cpu_t *cc, int s, void *object)
2530 {
2531 struct lwp *l = curlwp;
2532 pcg_t *pcg, *cur;
2533 uint64_t ncsw;
2534 pool_cache_t pc;
2535
2536 KASSERT(cc->cc_current->pcg_avail == cc->cc_current->pcg_size);
2537 KASSERT(cc->cc_previous->pcg_avail == cc->cc_previous->pcg_size);
2538
2539 pc = cc->cc_cache;
2540 pcg = NULL;
2541 cc->cc_misses++;
2542 ncsw = l->l_ncsw;
2543
2544 /*
2545 * If there are no empty groups in the cache then allocate one
2546 * while still unlocked.
2547 */
2548 if (__predict_false(pc->pc_emptygroups == NULL)) {
2549 if (__predict_true(!pool_cache_disable)) {
2550 pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT);
2551 }
2552 /*
2553 * If pool_get() blocked, then our view of
2554 * the per-CPU data is invalid: retry.
2555 */
2556 if (__predict_false(l->l_ncsw != ncsw)) {
2557 if (pcg != NULL) {
2558 pool_put(pc->pc_pcgpool, pcg);
2559 }
2560 return true;
2561 }
2562 if (__predict_true(pcg != NULL)) {
2563 pcg->pcg_avail = 0;
2564 pcg->pcg_size = pc->pc_pcgsize;
2565 }
2566 }
2567
2568 /* Lock the cache. */
2569 if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {
2570 mutex_enter(&pc->pc_lock);
2571 pc->pc_contended++;
2572
2573 /*
2574 * If we context switched while locking, then our view of
2575 * the per-CPU data is invalid: retry.
2576 */
2577 if (__predict_false(l->l_ncsw != ncsw)) {
2578 mutex_exit(&pc->pc_lock);
2579 if (pcg != NULL) {
2580 pool_put(pc->pc_pcgpool, pcg);
2581 }
2582 return true;
2583 }
2584 }
2585
2586 /* If there are no empty groups in the cache then allocate one. */
2587 if (pcg == NULL && pc->pc_emptygroups != NULL) {
2588 pcg = pc->pc_emptygroups;
2589 pc->pc_emptygroups = pcg->pcg_next;
2590 pc->pc_nempty--;
2591 }
2592
2593 /*
2594 * If there's a empty group, release our full group back
2595 * to the cache. Install the empty group to the local CPU
2596 * and return.
2597 */
2598 if (pcg != NULL) {
2599 KASSERT(pcg->pcg_avail == 0);
2600 if (__predict_false(cc->cc_previous == &pcg_dummy)) {
2601 cc->cc_previous = pcg;
2602 } else {
2603 cur = cc->cc_current;
2604 if (__predict_true(cur != &pcg_dummy)) {
2605 KASSERT(cur->pcg_avail == cur->pcg_size);
2606 cur->pcg_next = pc->pc_fullgroups;
2607 pc->pc_fullgroups = cur;
2608 pc->pc_nfull++;
2609 }
2610 cc->cc_current = pcg;
2611 }
2612 pc->pc_hits++;
2613 mutex_exit(&pc->pc_lock);
2614 return true;
2615 }
2616
2617 /*
2618 * Nothing available locally or in cache, and we didn't
2619 * allocate an empty group. Take the slow path and destroy
2620 * the object here and now.
2621 */
2622 pc->pc_misses++;
2623 mutex_exit(&pc->pc_lock);
2624 splx(s);
2625 pool_cache_destruct_object(pc, object);
2626
2627 return false;
2628 }
2629
2630 /*
2631 * pool_cache_put{,_paddr}:
2632 *
2633 * Put an object back to the pool cache (optionally caching the
2634 * physical address of the object).
2635 */
2636 void
2637 pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa)
2638 {
2639 pool_cache_cpu_t *cc;
2640 pcg_t *pcg;
2641 int s;
2642
2643 KASSERT(object != NULL);
2644 pool_cache_redzone_check(pc, object);
2645 FREECHECK_IN(&pc->pc_freecheck, object);
2646
2647 if (pc->pc_pool.pr_roflags & PR_PHINPAGE) {
2648 pc_phinpage_check(pc, object);
2649 }
2650
2651 if (pool_cache_put_quarantine(pc, object, pa)) {
2652 return;
2653 }
2654
2655 /* Lock out interrupts and disable preemption. */
2656 s = splvm();
2657 while (/* CONSTCOND */ true) {
2658 /* If the current group isn't full, release it there. */
2659 cc = pc->pc_cpus[curcpu()->ci_index];
2660 KASSERT(cc->cc_cache == pc);
2661 pcg = cc->cc_current;
2662 if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) {
2663 pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object;
2664 pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa;
2665 pcg->pcg_avail++;
2666 cc->cc_hits++;
2667 splx(s);
2668 return;
2669 }
2670
2671 /*
2672 * That failed. If the previous group isn't full, swap
2673 * it with the current group and try again.
2674 */
2675 pcg = cc->cc_previous;
2676 if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) {
2677 cc->cc_previous = cc->cc_current;
2678 cc->cc_current = pcg;
2679 continue;
2680 }
2681
2682 /*
2683 * Can't free to either group: try the slow path.
2684 * If put_slow() releases the object for us, it
2685 * will return false. Otherwise we need to retry.
2686 */
2687 if (!pool_cache_put_slow(cc, s, object))
2688 break;
2689 }
2690 }
2691
2692 /*
2693 * pool_cache_transfer:
2694 *
2695 * Transfer objects from the per-CPU cache to the global cache.
2696 * Run within a cross-call thread.
2697 */
2698 static void
2699 pool_cache_transfer(pool_cache_t pc)
2700 {
2701 pool_cache_cpu_t *cc;
2702 pcg_t *prev, *cur, **list;
2703 int s;
2704
2705 s = splvm();
2706 mutex_enter(&pc->pc_lock);
2707 cc = pc->pc_cpus[curcpu()->ci_index];
2708 cur = cc->cc_current;
2709 cc->cc_current = __UNCONST(&pcg_dummy);
2710 prev = cc->cc_previous;
2711 cc->cc_previous = __UNCONST(&pcg_dummy);
2712 if (cur != &pcg_dummy) {
2713 if (cur->pcg_avail == cur->pcg_size) {
2714 list = &pc->pc_fullgroups;
2715 pc->pc_nfull++;
2716 } else if (cur->pcg_avail == 0) {
2717 list = &pc->pc_emptygroups;
2718 pc->pc_nempty++;
2719 } else {
2720 list = &pc->pc_partgroups;
2721 pc->pc_npart++;
2722 }
2723 cur->pcg_next = *list;
2724 *list = cur;
2725 }
2726 if (prev != &pcg_dummy) {
2727 if (prev->pcg_avail == prev->pcg_size) {
2728 list = &pc->pc_fullgroups;
2729 pc->pc_nfull++;
2730 } else if (prev->pcg_avail == 0) {
2731 list = &pc->pc_emptygroups;
2732 pc->pc_nempty++;
2733 } else {
2734 list = &pc->pc_partgroups;
2735 pc->pc_npart++;
2736 }
2737 prev->pcg_next = *list;
2738 *list = prev;
2739 }
2740 mutex_exit(&pc->pc_lock);
2741 splx(s);
2742 }
2743
2744 /*
2745 * Pool backend allocators.
2746 *
2747 * Each pool has a backend allocator that handles allocation, deallocation,
2748 * and any additional draining that might be needed.
2749 *
2750 * We provide two standard allocators:
2751 *
2752 * pool_allocator_kmem - the default when no allocator is specified
2753 *
2754 * pool_allocator_nointr - used for pools that will not be accessed
2755 * in interrupt context.
2756 */
2757 void *pool_page_alloc(struct pool *, int);
2758 void pool_page_free(struct pool *, void *);
2759
2760 struct pool_allocator pool_allocator_kmem = {
2761 .pa_alloc = pool_page_alloc,
2762 .pa_free = pool_page_free,
2763 .pa_pagesz = 0
2764 };
2765
2766 struct pool_allocator pool_allocator_nointr = {
2767 .pa_alloc = pool_page_alloc,
2768 .pa_free = pool_page_free,
2769 .pa_pagesz = 0
2770 };
2771
2772 struct pool_allocator pool_allocator_big[] = {
2773 {
2774 .pa_alloc = pool_page_alloc,
2775 .pa_free = pool_page_free,
2776 .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 0),
2777 },
2778 {
2779 .pa_alloc = pool_page_alloc,
2780 .pa_free = pool_page_free,
2781 .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 1),
2782 },
2783 {
2784 .pa_alloc = pool_page_alloc,
2785 .pa_free = pool_page_free,
2786 .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 2),
2787 },
2788 {
2789 .pa_alloc = pool_page_alloc,
2790 .pa_free = pool_page_free,
2791 .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 3),
2792 },
2793 {
2794 .pa_alloc = pool_page_alloc,
2795 .pa_free = pool_page_free,
2796 .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 4),
2797 },
2798 {
2799 .pa_alloc = pool_page_alloc,
2800 .pa_free = pool_page_free,
2801 .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 5),
2802 },
2803 {
2804 .pa_alloc = pool_page_alloc,
2805 .pa_free = pool_page_free,
2806 .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 6),
2807 },
2808 {
2809 .pa_alloc = pool_page_alloc,
2810 .pa_free = pool_page_free,
2811 .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 7),
2812 }
2813 };
2814
2815 static int
2816 pool_bigidx(size_t size)
2817 {
2818 int i;
2819
2820 for (i = 0; i < __arraycount(pool_allocator_big); i++) {
2821 if (1 << (i + POOL_ALLOCATOR_BIG_BASE) >= size)
2822 return i;
2823 }
2824 panic("pool item size %zu too large, use a custom allocator", size);
2825 }
2826
2827 static void *
2828 pool_allocator_alloc(struct pool *pp, int flags)
2829 {
2830 struct pool_allocator *pa = pp->pr_alloc;
2831 void *res;
2832
2833 res = (*pa->pa_alloc)(pp, flags);
2834 return res;
2835 }
2836
2837 static void
2838 pool_allocator_free(struct pool *pp, void *v)
2839 {
2840 struct pool_allocator *pa = pp->pr_alloc;
2841
2842 if (pp->pr_redzone) {
2843 kasan_mark(v, pa->pa_pagesz, pa->pa_pagesz, 0);
2844 }
2845 (*pa->pa_free)(pp, v);
2846 }
2847
2848 void *
2849 pool_page_alloc(struct pool *pp, int flags)
2850 {
2851 const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
2852 vmem_addr_t va;
2853 int ret;
2854
2855 ret = uvm_km_kmem_alloc(kmem_va_arena, pp->pr_alloc->pa_pagesz,
2856 vflags | VM_INSTANTFIT, &va);
2857
2858 return ret ? NULL : (void *)va;
2859 }
2860
2861 void
2862 pool_page_free(struct pool *pp, void *v)
2863 {
2864
2865 uvm_km_kmem_free(kmem_va_arena, (vaddr_t)v, pp->pr_alloc->pa_pagesz);
2866 }
2867
2868 static void *
2869 pool_page_alloc_meta(struct pool *pp, int flags)
2870 {
2871 const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
2872 vmem_addr_t va;
2873 int ret;
2874
2875 ret = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz,
2876 vflags | VM_INSTANTFIT, &va);
2877
2878 return ret ? NULL : (void *)va;
2879 }
2880
2881 static void
2882 pool_page_free_meta(struct pool *pp, void *v)
2883 {
2884
2885 vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz);
2886 }
2887
2888 #ifdef KLEAK
2889 static void
2890 pool_kleak_fill(struct pool *pp, void *p)
2891 {
2892 if (__predict_false(pp->pr_roflags & PR_NOTOUCH)) {
2893 return;
2894 }
2895 kleak_fill_area(p, pp->pr_size);
2896 }
2897
2898 static void
2899 pool_cache_kleak_fill(pool_cache_t pc, void *p)
2900 {
2901 if (__predict_false(pc_has_ctor(pc) || pc_has_dtor(pc))) {
2902 return;
2903 }
2904 pool_kleak_fill(&pc->pc_pool, p);
2905 }
2906 #endif
2907
2908 #ifdef POOL_QUARANTINE
2909 static void
2910 pool_quarantine_init(struct pool *pp)
2911 {
2912 pp->pr_quar.rotor = 0;
2913 memset(&pp->pr_quar, 0, sizeof(pp->pr_quar));
2914 }
2915
2916 static void
2917 pool_quarantine_flush(struct pool *pp)
2918 {
2919 pool_quar_t *quar = &pp->pr_quar;
2920 struct pool_pagelist pq;
2921 size_t i;
2922
2923 LIST_INIT(&pq);
2924
2925 mutex_enter(&pp->pr_lock);
2926 for (i = 0; i < POOL_QUARANTINE_DEPTH; i++) {
2927 if (quar->list[i] == 0)
2928 continue;
2929 pool_do_put(pp, (void *)quar->list[i], &pq);
2930 }
2931 mutex_exit(&pp->pr_lock);
2932
2933 pr_pagelist_free(pp, &pq);
2934 }
2935
2936 static bool
2937 pool_put_quarantine(struct pool *pp, void *v, struct pool_pagelist *pq)
2938 {
2939 pool_quar_t *quar = &pp->pr_quar;
2940 uintptr_t old;
2941
2942 if (pp->pr_roflags & PR_NOTOUCH) {
2943 return false;
2944 }
2945
2946 pool_redzone_check(pp, v);
2947
2948 old = quar->list[quar->rotor];
2949 quar->list[quar->rotor] = (uintptr_t)v;
2950 quar->rotor = (quar->rotor + 1) % POOL_QUARANTINE_DEPTH;
2951 if (old != 0) {
2952 pool_do_put(pp, (void *)old, pq);
2953 }
2954
2955 return true;
2956 }
2957
2958 static bool
2959 pool_cache_put_quarantine(pool_cache_t pc, void *p, paddr_t pa)
2960 {
2961 pool_cache_destruct_object(pc, p);
2962 return true;
2963 }
2964 #endif
2965
2966 #ifdef POOL_REDZONE
2967 #if defined(_LP64)
2968 # define PRIME 0x9e37fffffffc0000UL
2969 #else /* defined(_LP64) */
2970 # define PRIME 0x9e3779b1
2971 #endif /* defined(_LP64) */
2972 #define STATIC_BYTE 0xFE
2973 CTASSERT(POOL_REDZONE_SIZE > 1);
2974
2975 #ifndef KASAN
2976 static inline uint8_t
2977 pool_pattern_generate(const void *p)
2978 {
2979 return (uint8_t)(((uintptr_t)p) * PRIME
2980 >> ((sizeof(uintptr_t) - sizeof(uint8_t))) * CHAR_BIT);
2981 }
2982 #endif
2983
2984 static void
2985 pool_redzone_init(struct pool *pp, size_t requested_size)
2986 {
2987 size_t redzsz;
2988 size_t nsz;
2989
2990 #ifdef KASAN
2991 redzsz = requested_size;
2992 kasan_add_redzone(&redzsz);
2993 redzsz -= requested_size;
2994 #else
2995 redzsz = POOL_REDZONE_SIZE;
2996 #endif
2997
2998 if (pp->pr_roflags & PR_NOTOUCH) {
2999 pp->pr_redzone = false;
3000 return;
3001 }
3002
3003 /*
3004 * We may have extended the requested size earlier; check if
3005 * there's naturally space in the padding for a red zone.
3006 */
3007 if (pp->pr_size - requested_size >= redzsz) {
3008 pp->pr_reqsize_with_redzone = requested_size + redzsz;
3009 pp->pr_redzone = true;
3010 return;
3011 }
3012
3013 /*
3014 * No space in the natural padding; check if we can extend a
3015 * bit the size of the pool.
3016 */
3017 nsz = roundup(pp->pr_size + redzsz, pp->pr_align);
3018 if (nsz <= pp->pr_alloc->pa_pagesz) {
3019 /* Ok, we can */
3020 pp->pr_size = nsz;
3021 pp->pr_reqsize_with_redzone = requested_size + redzsz;
3022 pp->pr_redzone = true;
3023 } else {
3024 /* No space for a red zone... snif :'( */
3025 pp->pr_redzone = false;
3026 printf("pool redzone disabled for '%s'\n", pp->pr_wchan);
3027 }
3028 }
3029
3030 static void
3031 pool_redzone_fill(struct pool *pp, void *p)
3032 {
3033 if (!pp->pr_redzone)
3034 return;
3035 #ifdef KASAN
3036 kasan_mark(p, pp->pr_reqsize, pp->pr_reqsize_with_redzone,
3037 KASAN_POOL_REDZONE);
3038 #else
3039 uint8_t *cp, pat;
3040 const uint8_t *ep;
3041
3042 cp = (uint8_t *)p + pp->pr_reqsize;
3043 ep = cp + POOL_REDZONE_SIZE;
3044
3045 /*
3046 * We really don't want the first byte of the red zone to be '\0';
3047 * an off-by-one in a string may not be properly detected.
3048 */
3049 pat = pool_pattern_generate(cp);
3050 *cp = (pat == '\0') ? STATIC_BYTE: pat;
3051 cp++;
3052
3053 while (cp < ep) {
3054 *cp = pool_pattern_generate(cp);
3055 cp++;
3056 }
3057 #endif
3058 }
3059
3060 static void
3061 pool_redzone_check(struct pool *pp, void *p)
3062 {
3063 if (!pp->pr_redzone)
3064 return;
3065 #ifdef KASAN
3066 kasan_mark(p, 0, pp->pr_reqsize_with_redzone, KASAN_POOL_FREED);
3067 #else
3068 uint8_t *cp, pat, expected;
3069 const uint8_t *ep;
3070
3071 cp = (uint8_t *)p + pp->pr_reqsize;
3072 ep = cp + POOL_REDZONE_SIZE;
3073
3074 pat = pool_pattern_generate(cp);
3075 expected = (pat == '\0') ? STATIC_BYTE: pat;
3076 if (__predict_false(expected != *cp)) {
3077 printf("%s: %p: 0x%02x != 0x%02x\n",
3078 __func__, cp, *cp, expected);
3079 }
3080 cp++;
3081
3082 while (cp < ep) {
3083 expected = pool_pattern_generate(cp);
3084 if (__predict_false(*cp != expected)) {
3085 printf("%s: %p: 0x%02x != 0x%02x\n",
3086 __func__, cp, *cp, expected);
3087 }
3088 cp++;
3089 }
3090 #endif
3091 }
3092
3093 static void
3094 pool_cache_redzone_check(pool_cache_t pc, void *p)
3095 {
3096 #ifdef KASAN
3097 /* If there is a ctor/dtor, leave the data as valid. */
3098 if (__predict_false(pc_has_ctor(pc) || pc_has_dtor(pc))) {
3099 return;
3100 }
3101 #endif
3102 pool_redzone_check(&pc->pc_pool, p);
3103 }
3104
3105 #endif /* POOL_REDZONE */
3106
3107 #if defined(DDB)
3108 static bool
3109 pool_in_page(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
3110 {
3111
3112 return (uintptr_t)ph->ph_page <= addr &&
3113 addr < (uintptr_t)ph->ph_page + pp->pr_alloc->pa_pagesz;
3114 }
3115
3116 static bool
3117 pool_in_item(struct pool *pp, void *item, uintptr_t addr)
3118 {
3119
3120 return (uintptr_t)item <= addr && addr < (uintptr_t)item + pp->pr_size;
3121 }
3122
3123 static bool
3124 pool_in_cg(struct pool *pp, struct pool_cache_group *pcg, uintptr_t addr)
3125 {
3126 int i;
3127
3128 if (pcg == NULL) {
3129 return false;
3130 }
3131 for (i = 0; i < pcg->pcg_avail; i++) {
3132 if (pool_in_item(pp, pcg->pcg_objects[i].pcgo_va, addr)) {
3133 return true;
3134 }
3135 }
3136 return false;
3137 }
3138
3139 static bool
3140 pool_allocated(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
3141 {
3142
3143 if ((pp->pr_roflags & PR_USEBMAP) != 0) {
3144 unsigned int idx = pr_item_bitmap_index(pp, ph, (void *)addr);
3145 pool_item_bitmap_t *bitmap =
3146 ph->ph_bitmap + (idx / BITMAP_SIZE);
3147 pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
3148
3149 return (*bitmap & mask) == 0;
3150 } else {
3151 struct pool_item *pi;
3152
3153 LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
3154 if (pool_in_item(pp, pi, addr)) {
3155 return false;
3156 }
3157 }
3158 return true;
3159 }
3160 }
3161
3162 void
3163 pool_whatis(uintptr_t addr, void (*pr)(const char *, ...))
3164 {
3165 struct pool *pp;
3166
3167 TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
3168 struct pool_item_header *ph;
3169 uintptr_t item;
3170 bool allocated = true;
3171 bool incache = false;
3172 bool incpucache = false;
3173 char cpucachestr[32];
3174
3175 if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
3176 LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
3177 if (pool_in_page(pp, ph, addr)) {
3178 goto found;
3179 }
3180 }
3181 LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
3182 if (pool_in_page(pp, ph, addr)) {
3183 allocated =
3184 pool_allocated(pp, ph, addr);
3185 goto found;
3186 }
3187 }
3188 LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
3189 if (pool_in_page(pp, ph, addr)) {
3190 allocated = false;
3191 goto found;
3192 }
3193 }
3194 continue;
3195 } else {
3196 ph = pr_find_pagehead_noalign(pp, (void *)addr);
3197 if (ph == NULL || !pool_in_page(pp, ph, addr)) {
3198 continue;
3199 }
3200 allocated = pool_allocated(pp, ph, addr);
3201 }
3202 found:
3203 if (allocated && pp->pr_cache) {
3204 pool_cache_t pc = pp->pr_cache;
3205 struct pool_cache_group *pcg;
3206 int i;
3207
3208 for (pcg = pc->pc_fullgroups; pcg != NULL;
3209 pcg = pcg->pcg_next) {
3210 if (pool_in_cg(pp, pcg, addr)) {
3211 incache = true;
3212 goto print;
3213 }
3214 }
3215 for (i = 0; i < __arraycount(pc->pc_cpus); i++) {
3216 pool_cache_cpu_t *cc;
3217
3218 if ((cc = pc->pc_cpus[i]) == NULL) {
3219 continue;
3220 }
3221 if (pool_in_cg(pp, cc->cc_current, addr) ||
3222 pool_in_cg(pp, cc->cc_previous, addr)) {
3223 struct cpu_info *ci =
3224 cpu_lookup(i);
3225
3226 incpucache = true;
3227 snprintf(cpucachestr,
3228 sizeof(cpucachestr),
3229 "cached by CPU %u",
3230 ci->ci_index);
3231 goto print;
3232 }
3233 }
3234 }
3235 print:
3236 item = (uintptr_t)ph->ph_page + ph->ph_off;
3237 item = item + rounddown(addr - item, pp->pr_size);
3238 (*pr)("%p is %p+%zu in POOL '%s' (%s)\n",
3239 (void *)addr, item, (size_t)(addr - item),
3240 pp->pr_wchan,
3241 incpucache ? cpucachestr :
3242 incache ? "cached" : allocated ? "allocated" : "free");
3243 }
3244 }
3245 #endif /* defined(DDB) */
3246
3247 static int
3248 pool_sysctl(SYSCTLFN_ARGS)
3249 {
3250 struct pool_sysctl data;
3251 struct pool *pp;
3252 struct pool_cache *pc;
3253 pool_cache_cpu_t *cc;
3254 int error;
3255 size_t i, written;
3256
3257 if (oldp == NULL) {
3258 *oldlenp = 0;
3259 TAILQ_FOREACH(pp, &pool_head, pr_poollist)
3260 *oldlenp += sizeof(data);
3261 return 0;
3262 }
3263
3264 memset(&data, 0, sizeof(data));
3265 error = 0;
3266 written = 0;
3267 TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
3268 if (written + sizeof(data) > *oldlenp)
3269 break;
3270 strlcpy(data.pr_wchan, pp->pr_wchan, sizeof(data.pr_wchan));
3271 data.pr_pagesize = pp->pr_alloc->pa_pagesz;
3272 data.pr_flags = pp->pr_roflags | pp->pr_flags;
3273 #define COPY(field) data.field = pp->field
3274 COPY(pr_size);
3275
3276 COPY(pr_itemsperpage);
3277 COPY(pr_nitems);
3278 COPY(pr_nout);
3279 COPY(pr_hardlimit);
3280 COPY(pr_npages);
3281 COPY(pr_minpages);
3282 COPY(pr_maxpages);
3283
3284 COPY(pr_nget);
3285 COPY(pr_nfail);
3286 COPY(pr_nput);
3287 COPY(pr_npagealloc);
3288 COPY(pr_npagefree);
3289 COPY(pr_hiwat);
3290 COPY(pr_nidle);
3291 #undef COPY
3292
3293 data.pr_cache_nmiss_pcpu = 0;
3294 data.pr_cache_nhit_pcpu = 0;
3295 if (pp->pr_cache) {
3296 pc = pp->pr_cache;
3297 data.pr_cache_meta_size = pc->pc_pcgsize;
3298 data.pr_cache_nfull = pc->pc_nfull;
3299 data.pr_cache_npartial = pc->pc_npart;
3300 data.pr_cache_nempty = pc->pc_nempty;
3301 data.pr_cache_ncontended = pc->pc_contended;
3302 data.pr_cache_nmiss_global = pc->pc_misses;
3303 data.pr_cache_nhit_global = pc->pc_hits;
3304 for (i = 0; i < pc->pc_ncpu; ++i) {
3305 cc = pc->pc_cpus[i];
3306 if (cc == NULL)
3307 continue;
3308 data.pr_cache_nmiss_pcpu += cc->cc_misses;
3309 data.pr_cache_nhit_pcpu += cc->cc_hits;
3310 }
3311 } else {
3312 data.pr_cache_meta_size = 0;
3313 data.pr_cache_nfull = 0;
3314 data.pr_cache_npartial = 0;
3315 data.pr_cache_nempty = 0;
3316 data.pr_cache_ncontended = 0;
3317 data.pr_cache_nmiss_global = 0;
3318 data.pr_cache_nhit_global = 0;
3319 }
3320
3321 error = sysctl_copyout(l, &data, oldp, sizeof(data));
3322 if (error)
3323 break;
3324 written += sizeof(data);
3325 oldp = (char *)oldp + sizeof(data);
3326 }
3327
3328 *oldlenp = written;
3329 return error;
3330 }
3331
3332 SYSCTL_SETUP(sysctl_pool_setup, "sysctl kern.pool setup")
3333 {
3334 const struct sysctlnode *rnode = NULL;
3335
3336 sysctl_createv(clog, 0, NULL, &rnode,
3337 CTLFLAG_PERMANENT,
3338 CTLTYPE_STRUCT, "pool",
3339 SYSCTL_DESCR("Get pool statistics"),
3340 pool_sysctl, 0, NULL, 0,
3341 CTL_KERN, CTL_CREATE, CTL_EOL);
3342 }
3343