subr_pool.c revision 1.270 1 1.270 maxv /* $NetBSD: subr_pool.c,v 1.270 2020/06/07 09:45:19 maxv Exp $ */
2 1.1 pk
3 1.229 maxv /*
4 1.229 maxv * Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010, 2014, 2015, 2018
5 1.183 ad * The NetBSD Foundation, Inc.
6 1.1 pk * All rights reserved.
7 1.1 pk *
8 1.1 pk * This code is derived from software contributed to The NetBSD Foundation
9 1.20 thorpej * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
10 1.204 maxv * Simulation Facility, NASA Ames Research Center; by Andrew Doran, and by
11 1.204 maxv * Maxime Villard.
12 1.1 pk *
13 1.1 pk * Redistribution and use in source and binary forms, with or without
14 1.1 pk * modification, are permitted provided that the following conditions
15 1.1 pk * are met:
16 1.1 pk * 1. Redistributions of source code must retain the above copyright
17 1.1 pk * notice, this list of conditions and the following disclaimer.
18 1.1 pk * 2. Redistributions in binary form must reproduce the above copyright
19 1.1 pk * notice, this list of conditions and the following disclaimer in the
20 1.1 pk * documentation and/or other materials provided with the distribution.
21 1.1 pk *
22 1.1 pk * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23 1.1 pk * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24 1.1 pk * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 1.1 pk * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26 1.1 pk * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 1.1 pk * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 1.1 pk * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 1.1 pk * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 1.1 pk * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 1.1 pk * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 1.1 pk * POSSIBILITY OF SUCH DAMAGE.
33 1.1 pk */
34 1.64 lukem
35 1.64 lukem #include <sys/cdefs.h>
36 1.270 maxv __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.270 2020/06/07 09:45:19 maxv Exp $");
37 1.24 scottr
38 1.205 pooka #ifdef _KERNEL_OPT
39 1.141 yamt #include "opt_ddb.h"
40 1.28 thorpej #include "opt_lockdebug.h"
41 1.249 maxv #include "opt_pool.h"
42 1.205 pooka #endif
43 1.1 pk
44 1.1 pk #include <sys/param.h>
45 1.1 pk #include <sys/systm.h>
46 1.203 joerg #include <sys/sysctl.h>
47 1.135 yamt #include <sys/bitops.h>
48 1.1 pk #include <sys/proc.h>
49 1.1 pk #include <sys/errno.h>
50 1.1 pk #include <sys/kernel.h>
51 1.191 para #include <sys/vmem.h>
52 1.1 pk #include <sys/pool.h>
53 1.20 thorpej #include <sys/syslog.h>
54 1.125 ad #include <sys/debug.h>
55 1.134 ad #include <sys/lockdebug.h>
56 1.134 ad #include <sys/xcall.h>
57 1.134 ad #include <sys/cpu.h>
58 1.145 ad #include <sys/atomic.h>
59 1.224 maxv #include <sys/asan.h>
60 1.262 maxv #include <sys/msan.h>
61 1.270 maxv #include <sys/fault.h>
62 1.3 pk
63 1.187 uebayasi #include <uvm/uvm_extern.h>
64 1.3 pk
65 1.1 pk /*
66 1.1 pk * Pool resource management utility.
67 1.3 pk *
68 1.88 chs * Memory is allocated in pages which are split into pieces according to
69 1.88 chs * the pool item size. Each page is kept on one of three lists in the
70 1.88 chs * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
71 1.88 chs * for empty, full and partially-full pages respectively. The individual
72 1.88 chs * pool items are on a linked list headed by `ph_itemlist' in each page
73 1.88 chs * header. The memory for building the page list is either taken from
74 1.88 chs * the allocated pages themselves (for small pool items) or taken from
75 1.88 chs * an internal pool of page headers (`phpool').
76 1.1 pk */
77 1.1 pk
78 1.221 para /* List of all pools. Non static as needed by 'vmstat -m' */
79 1.202 abs TAILQ_HEAD(, pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
80 1.134 ad
81 1.3 pk /* Private pool for page header structures */
82 1.97 yamt #define PHPOOL_MAX 8
83 1.97 yamt static struct pool phpool[PHPOOL_MAX];
84 1.135 yamt #define PHPOOL_FREELIST_NELEM(idx) \
85 1.256 maxv (((idx) == 0) ? BITMAP_MIN_SIZE : BITMAP_SIZE * (1 << (idx)))
86 1.3 pk
87 1.262 maxv #if !defined(KMSAN) && (defined(DIAGNOSTIC) || defined(KASAN))
88 1.224 maxv #define POOL_REDZONE
89 1.224 maxv #endif
90 1.224 maxv
91 1.268 maxv #if defined(POOL_QUARANTINE)
92 1.268 maxv #define POOL_NOCACHE
93 1.268 maxv #endif
94 1.268 maxv
95 1.204 maxv #ifdef POOL_REDZONE
96 1.224 maxv # ifdef KASAN
97 1.224 maxv # define POOL_REDZONE_SIZE 8
98 1.224 maxv # else
99 1.224 maxv # define POOL_REDZONE_SIZE 2
100 1.224 maxv # endif
101 1.204 maxv static void pool_redzone_init(struct pool *, size_t);
102 1.204 maxv static void pool_redzone_fill(struct pool *, void *);
103 1.204 maxv static void pool_redzone_check(struct pool *, void *);
104 1.229 maxv static void pool_cache_redzone_check(pool_cache_t, void *);
105 1.204 maxv #else
106 1.229 maxv # define pool_redzone_init(pp, sz) __nothing
107 1.229 maxv # define pool_redzone_fill(pp, ptr) __nothing
108 1.229 maxv # define pool_redzone_check(pp, ptr) __nothing
109 1.229 maxv # define pool_cache_redzone_check(pc, ptr) __nothing
110 1.204 maxv #endif
111 1.204 maxv
112 1.262 maxv #ifdef KMSAN
113 1.262 maxv static inline void pool_get_kmsan(struct pool *, void *);
114 1.262 maxv static inline void pool_put_kmsan(struct pool *, void *);
115 1.262 maxv static inline void pool_cache_get_kmsan(pool_cache_t, void *);
116 1.262 maxv static inline void pool_cache_put_kmsan(pool_cache_t, void *);
117 1.262 maxv #else
118 1.262 maxv #define pool_get_kmsan(pp, ptr) __nothing
119 1.262 maxv #define pool_put_kmsan(pp, ptr) __nothing
120 1.262 maxv #define pool_cache_get_kmsan(pc, ptr) __nothing
121 1.262 maxv #define pool_cache_put_kmsan(pc, ptr) __nothing
122 1.262 maxv #endif
123 1.262 maxv
124 1.249 maxv #ifdef POOL_QUARANTINE
125 1.249 maxv static void pool_quarantine_init(struct pool *);
126 1.249 maxv static void pool_quarantine_flush(struct pool *);
127 1.249 maxv static bool pool_put_quarantine(struct pool *, void *,
128 1.249 maxv struct pool_pagelist *);
129 1.249 maxv #else
130 1.249 maxv #define pool_quarantine_init(a) __nothing
131 1.249 maxv #define pool_quarantine_flush(a) __nothing
132 1.249 maxv #define pool_put_quarantine(a, b, c) false
133 1.268 maxv #endif
134 1.268 maxv
135 1.268 maxv #ifdef POOL_NOCACHE
136 1.268 maxv static bool pool_cache_put_nocache(pool_cache_t, void *);
137 1.268 maxv #else
138 1.268 maxv #define pool_cache_put_nocache(a, b) false
139 1.249 maxv #endif
140 1.249 maxv
141 1.261 christos #define NO_CTOR __FPTRCAST(int (*)(void *, void *, int), nullop)
142 1.261 christos #define NO_DTOR __FPTRCAST(void (*)(void *, void *), nullop)
143 1.261 christos
144 1.261 christos #define pc_has_ctor(pc) ((pc)->pc_ctor != NO_CTOR)
145 1.261 christos #define pc_has_dtor(pc) ((pc)->pc_dtor != NO_DTOR)
146 1.229 maxv
147 1.258 maxv /*
148 1.258 maxv * Pool backend allocators.
149 1.258 maxv *
150 1.258 maxv * Each pool has a backend allocator that handles allocation, deallocation,
151 1.258 maxv * and any additional draining that might be needed.
152 1.258 maxv *
153 1.258 maxv * We provide two standard allocators:
154 1.258 maxv *
155 1.258 maxv * pool_allocator_kmem - the default when no allocator is specified
156 1.258 maxv *
157 1.258 maxv * pool_allocator_nointr - used for pools that will not be accessed
158 1.258 maxv * in interrupt context.
159 1.258 maxv */
160 1.258 maxv void *pool_page_alloc(struct pool *, int);
161 1.258 maxv void pool_page_free(struct pool *, void *);
162 1.258 maxv
163 1.98 yamt static void *pool_page_alloc_meta(struct pool *, int);
164 1.98 yamt static void pool_page_free_meta(struct pool *, void *);
165 1.98 yamt
166 1.258 maxv struct pool_allocator pool_allocator_kmem = {
167 1.258 maxv .pa_alloc = pool_page_alloc,
168 1.258 maxv .pa_free = pool_page_free,
169 1.258 maxv .pa_pagesz = 0
170 1.258 maxv };
171 1.258 maxv
172 1.258 maxv struct pool_allocator pool_allocator_nointr = {
173 1.258 maxv .pa_alloc = pool_page_alloc,
174 1.258 maxv .pa_free = pool_page_free,
175 1.258 maxv .pa_pagesz = 0
176 1.258 maxv };
177 1.258 maxv
178 1.134 ad struct pool_allocator pool_allocator_meta = {
179 1.191 para .pa_alloc = pool_page_alloc_meta,
180 1.191 para .pa_free = pool_page_free_meta,
181 1.191 para .pa_pagesz = 0
182 1.98 yamt };
183 1.98 yamt
184 1.208 chs #define POOL_ALLOCATOR_BIG_BASE 13
185 1.258 maxv static struct pool_allocator pool_allocator_big[] = {
186 1.258 maxv {
187 1.258 maxv .pa_alloc = pool_page_alloc,
188 1.258 maxv .pa_free = pool_page_free,
189 1.258 maxv .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 0),
190 1.258 maxv },
191 1.258 maxv {
192 1.258 maxv .pa_alloc = pool_page_alloc,
193 1.258 maxv .pa_free = pool_page_free,
194 1.258 maxv .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 1),
195 1.258 maxv },
196 1.258 maxv {
197 1.258 maxv .pa_alloc = pool_page_alloc,
198 1.258 maxv .pa_free = pool_page_free,
199 1.258 maxv .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 2),
200 1.258 maxv },
201 1.258 maxv {
202 1.258 maxv .pa_alloc = pool_page_alloc,
203 1.258 maxv .pa_free = pool_page_free,
204 1.258 maxv .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 3),
205 1.258 maxv },
206 1.258 maxv {
207 1.258 maxv .pa_alloc = pool_page_alloc,
208 1.258 maxv .pa_free = pool_page_free,
209 1.258 maxv .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 4),
210 1.258 maxv },
211 1.258 maxv {
212 1.258 maxv .pa_alloc = pool_page_alloc,
213 1.258 maxv .pa_free = pool_page_free,
214 1.258 maxv .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 5),
215 1.258 maxv },
216 1.258 maxv {
217 1.258 maxv .pa_alloc = pool_page_alloc,
218 1.258 maxv .pa_free = pool_page_free,
219 1.258 maxv .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 6),
220 1.258 maxv },
221 1.258 maxv {
222 1.258 maxv .pa_alloc = pool_page_alloc,
223 1.258 maxv .pa_free = pool_page_free,
224 1.258 maxv .pa_pagesz = 1 << (POOL_ALLOCATOR_BIG_BASE + 7),
225 1.258 maxv }
226 1.258 maxv };
227 1.258 maxv
228 1.208 chs static int pool_bigidx(size_t);
229 1.208 chs
230 1.3 pk /* # of seconds to retain page after last use */
231 1.3 pk int pool_inactive_time = 10;
232 1.3 pk
233 1.3 pk /* Next candidate for drainage (see pool_drain()) */
234 1.236 maxv static struct pool *drainpp;
235 1.23 thorpej
236 1.134 ad /* This lock protects both pool_head and drainpp. */
237 1.134 ad static kmutex_t pool_head_lock;
238 1.134 ad static kcondvar_t pool_busy;
239 1.3 pk
240 1.178 elad /* This lock protects initialization of a potentially shared pool allocator */
241 1.178 elad static kmutex_t pool_allocator_lock;
242 1.178 elad
243 1.245 maxv static unsigned int poolid_counter = 0;
244 1.245 maxv
245 1.135 yamt typedef uint32_t pool_item_bitmap_t;
246 1.135 yamt #define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t))
247 1.135 yamt #define BITMAP_MASK (BITMAP_SIZE - 1)
248 1.256 maxv #define BITMAP_MIN_SIZE (CHAR_BIT * sizeof(((struct pool_item_header *)NULL)->ph_u2))
249 1.99 yamt
250 1.3 pk struct pool_item_header {
251 1.3 pk /* Page headers */
252 1.88 chs LIST_ENTRY(pool_item_header)
253 1.3 pk ph_pagelist; /* pool page list */
254 1.245 maxv union {
255 1.245 maxv /* !PR_PHINPAGE */
256 1.245 maxv struct {
257 1.245 maxv SPLAY_ENTRY(pool_item_header)
258 1.245 maxv phu_node; /* off-page page headers */
259 1.245 maxv } phu_offpage;
260 1.245 maxv /* PR_PHINPAGE */
261 1.245 maxv struct {
262 1.245 maxv unsigned int phu_poolid;
263 1.245 maxv } phu_onpage;
264 1.245 maxv } ph_u1;
265 1.128 christos void * ph_page; /* this page's address */
266 1.151 yamt uint32_t ph_time; /* last referenced */
267 1.135 yamt uint16_t ph_nmissing; /* # of chunks in use */
268 1.141 yamt uint16_t ph_off; /* start offset in page */
269 1.97 yamt union {
270 1.242 maxv /* !PR_USEBMAP */
271 1.97 yamt struct {
272 1.102 chs LIST_HEAD(, pool_item)
273 1.97 yamt phu_itemlist; /* chunk list for this page */
274 1.97 yamt } phu_normal;
275 1.242 maxv /* PR_USEBMAP */
276 1.97 yamt struct {
277 1.141 yamt pool_item_bitmap_t phu_bitmap[1];
278 1.97 yamt } phu_notouch;
279 1.245 maxv } ph_u2;
280 1.3 pk };
281 1.245 maxv #define ph_node ph_u1.phu_offpage.phu_node
282 1.245 maxv #define ph_poolid ph_u1.phu_onpage.phu_poolid
283 1.245 maxv #define ph_itemlist ph_u2.phu_normal.phu_itemlist
284 1.245 maxv #define ph_bitmap ph_u2.phu_notouch.phu_bitmap
285 1.3 pk
286 1.240 maxv #define PHSIZE ALIGN(sizeof(struct pool_item_header))
287 1.240 maxv
288 1.256 maxv CTASSERT(offsetof(struct pool_item_header, ph_u2) +
289 1.256 maxv BITMAP_MIN_SIZE / CHAR_BIT == sizeof(struct pool_item_header));
290 1.256 maxv
291 1.229 maxv #if defined(DIAGNOSTIC) && !defined(KASAN)
292 1.229 maxv #define POOL_CHECK_MAGIC
293 1.229 maxv #endif
294 1.229 maxv
295 1.1 pk struct pool_item {
296 1.229 maxv #ifdef POOL_CHECK_MAGIC
297 1.82 thorpej u_int pi_magic;
298 1.33 chs #endif
299 1.134 ad #define PI_MAGIC 0xdeaddeadU
300 1.3 pk /* Other entries use only this list entry */
301 1.102 chs LIST_ENTRY(pool_item) pi_list;
302 1.3 pk };
303 1.3 pk
304 1.53 thorpej #define POOL_NEEDS_CATCHUP(pp) \
305 1.267 chs ((pp)->pr_nitems < (pp)->pr_minitems || \
306 1.267 chs (pp)->pr_npages < (pp)->pr_minpages)
307 1.253 maxv #define POOL_OBJ_TO_PAGE(pp, v) \
308 1.253 maxv (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask)
309 1.53 thorpej
310 1.43 thorpej /*
311 1.43 thorpej * Pool cache management.
312 1.43 thorpej *
313 1.43 thorpej * Pool caches provide a way for constructed objects to be cached by the
314 1.43 thorpej * pool subsystem. This can lead to performance improvements by avoiding
315 1.43 thorpej * needless object construction/destruction; it is deferred until absolutely
316 1.43 thorpej * necessary.
317 1.43 thorpej *
318 1.134 ad * Caches are grouped into cache groups. Each cache group references up
319 1.134 ad * to PCG_NUMOBJECTS constructed objects. When a cache allocates an
320 1.134 ad * object from the pool, it calls the object's constructor and places it
321 1.134 ad * into a cache group. When a cache group frees an object back to the
322 1.134 ad * pool, it first calls the object's destructor. This allows the object
323 1.134 ad * to persist in constructed form while freed to the cache.
324 1.134 ad *
325 1.134 ad * The pool references each cache, so that when a pool is drained by the
326 1.134 ad * pagedaemon, it can drain each individual cache as well. Each time a
327 1.134 ad * cache is drained, the most idle cache group is freed to the pool in
328 1.134 ad * its entirety.
329 1.43 thorpej *
330 1.43 thorpej * Pool caches are layed on top of pools. By layering them, we can avoid
331 1.43 thorpej * the complexity of cache management for pools which would not benefit
332 1.43 thorpej * from it.
333 1.43 thorpej */
334 1.43 thorpej
335 1.142 ad static struct pool pcg_normal_pool;
336 1.142 ad static struct pool pcg_large_pool;
337 1.134 ad static struct pool cache_pool;
338 1.134 ad static struct pool cache_cpu_pool;
339 1.3 pk
340 1.145 ad /* List of all caches. */
341 1.145 ad TAILQ_HEAD(,pool_cache) pool_cache_head =
342 1.145 ad TAILQ_HEAD_INITIALIZER(pool_cache_head);
343 1.145 ad
344 1.162 ad int pool_cache_disable; /* global disable for caching */
345 1.169 yamt static const pcg_t pcg_dummy; /* zero sized: always empty, yet always full */
346 1.145 ad
347 1.162 ad static bool pool_cache_put_slow(pool_cache_cpu_t *, int,
348 1.162 ad void *);
349 1.162 ad static bool pool_cache_get_slow(pool_cache_cpu_t *, int,
350 1.162 ad void **, paddr_t *, int);
351 1.134 ad static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t);
352 1.134 ad static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *);
353 1.175 jym static void pool_cache_invalidate_cpu(pool_cache_t, u_int);
354 1.196 jym static void pool_cache_transfer(pool_cache_t);
355 1.3 pk
356 1.42 thorpej static int pool_catchup(struct pool *);
357 1.128 christos static void pool_prime_page(struct pool *, void *,
358 1.55 thorpej struct pool_item_header *);
359 1.88 chs static void pool_update_curpage(struct pool *);
360 1.66 thorpej
361 1.113 yamt static int pool_grow(struct pool *, int);
362 1.117 yamt static void *pool_allocator_alloc(struct pool *, int);
363 1.117 yamt static void pool_allocator_free(struct pool *, void *);
364 1.3 pk
365 1.97 yamt static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
366 1.199 christos void (*)(const char *, ...) __printflike(1, 2));
367 1.42 thorpej static void pool_print1(struct pool *, const char *,
368 1.199 christos void (*)(const char *, ...) __printflike(1, 2));
369 1.3 pk
370 1.88 chs static int pool_chk_page(struct pool *, const char *,
371 1.88 chs struct pool_item_header *);
372 1.88 chs
373 1.234 maxv /* -------------------------------------------------------------------------- */
374 1.234 maxv
375 1.135 yamt static inline unsigned int
376 1.234 maxv pr_item_bitmap_index(const struct pool *pp, const struct pool_item_header *ph,
377 1.97 yamt const void *v)
378 1.97 yamt {
379 1.97 yamt const char *cp = v;
380 1.135 yamt unsigned int idx;
381 1.97 yamt
382 1.242 maxv KASSERT(pp->pr_roflags & PR_USEBMAP);
383 1.128 christos idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size;
384 1.237 maxv
385 1.237 maxv if (__predict_false(idx >= pp->pr_itemsperpage)) {
386 1.237 maxv panic("%s: [%s] %u >= %u", __func__, pp->pr_wchan, idx,
387 1.237 maxv pp->pr_itemsperpage);
388 1.237 maxv }
389 1.237 maxv
390 1.97 yamt return idx;
391 1.97 yamt }
392 1.97 yamt
393 1.110 perry static inline void
394 1.234 maxv pr_item_bitmap_put(const struct pool *pp, struct pool_item_header *ph,
395 1.97 yamt void *obj)
396 1.97 yamt {
397 1.234 maxv unsigned int idx = pr_item_bitmap_index(pp, ph, obj);
398 1.135 yamt pool_item_bitmap_t *bitmap = ph->ph_bitmap + (idx / BITMAP_SIZE);
399 1.223 kamil pool_item_bitmap_t mask = 1U << (idx & BITMAP_MASK);
400 1.97 yamt
401 1.237 maxv if (__predict_false((*bitmap & mask) != 0)) {
402 1.237 maxv panic("%s: [%s] %p already freed", __func__, pp->pr_wchan, obj);
403 1.237 maxv }
404 1.237 maxv
405 1.135 yamt *bitmap |= mask;
406 1.97 yamt }
407 1.97 yamt
408 1.110 perry static inline void *
409 1.234 maxv pr_item_bitmap_get(const struct pool *pp, struct pool_item_header *ph)
410 1.97 yamt {
411 1.135 yamt pool_item_bitmap_t *bitmap = ph->ph_bitmap;
412 1.135 yamt unsigned int idx;
413 1.135 yamt int i;
414 1.97 yamt
415 1.135 yamt for (i = 0; ; i++) {
416 1.135 yamt int bit;
417 1.97 yamt
418 1.135 yamt KASSERT((i * BITMAP_SIZE) < pp->pr_itemsperpage);
419 1.135 yamt bit = ffs32(bitmap[i]);
420 1.135 yamt if (bit) {
421 1.135 yamt pool_item_bitmap_t mask;
422 1.135 yamt
423 1.135 yamt bit--;
424 1.135 yamt idx = (i * BITMAP_SIZE) + bit;
425 1.222 kamil mask = 1U << bit;
426 1.135 yamt KASSERT((bitmap[i] & mask) != 0);
427 1.135 yamt bitmap[i] &= ~mask;
428 1.135 yamt break;
429 1.135 yamt }
430 1.135 yamt }
431 1.135 yamt KASSERT(idx < pp->pr_itemsperpage);
432 1.128 christos return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size;
433 1.97 yamt }
434 1.97 yamt
435 1.135 yamt static inline void
436 1.234 maxv pr_item_bitmap_init(const struct pool *pp, struct pool_item_header *ph)
437 1.135 yamt {
438 1.135 yamt pool_item_bitmap_t *bitmap = ph->ph_bitmap;
439 1.135 yamt const int n = howmany(pp->pr_itemsperpage, BITMAP_SIZE);
440 1.135 yamt int i;
441 1.135 yamt
442 1.135 yamt for (i = 0; i < n; i++) {
443 1.135 yamt bitmap[i] = (pool_item_bitmap_t)-1;
444 1.135 yamt }
445 1.135 yamt }
446 1.135 yamt
447 1.234 maxv /* -------------------------------------------------------------------------- */
448 1.234 maxv
449 1.234 maxv static inline void
450 1.234 maxv pr_item_linkedlist_put(const struct pool *pp, struct pool_item_header *ph,
451 1.234 maxv void *obj)
452 1.234 maxv {
453 1.234 maxv struct pool_item *pi = obj;
454 1.234 maxv
455 1.234 maxv #ifdef POOL_CHECK_MAGIC
456 1.234 maxv pi->pi_magic = PI_MAGIC;
457 1.234 maxv #endif
458 1.234 maxv
459 1.234 maxv if (pp->pr_redzone) {
460 1.234 maxv /*
461 1.234 maxv * Mark the pool_item as valid. The rest is already
462 1.234 maxv * invalid.
463 1.234 maxv */
464 1.248 maxv kasan_mark(pi, sizeof(*pi), sizeof(*pi), 0);
465 1.234 maxv }
466 1.234 maxv
467 1.234 maxv LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
468 1.234 maxv }
469 1.234 maxv
470 1.234 maxv static inline void *
471 1.234 maxv pr_item_linkedlist_get(struct pool *pp, struct pool_item_header *ph)
472 1.234 maxv {
473 1.234 maxv struct pool_item *pi;
474 1.234 maxv void *v;
475 1.234 maxv
476 1.234 maxv v = pi = LIST_FIRST(&ph->ph_itemlist);
477 1.234 maxv if (__predict_false(v == NULL)) {
478 1.234 maxv mutex_exit(&pp->pr_lock);
479 1.234 maxv panic("%s: [%s] page empty", __func__, pp->pr_wchan);
480 1.234 maxv }
481 1.234 maxv KASSERTMSG((pp->pr_nitems > 0),
482 1.234 maxv "%s: [%s] nitems %u inconsistent on itemlist",
483 1.234 maxv __func__, pp->pr_wchan, pp->pr_nitems);
484 1.234 maxv #ifdef POOL_CHECK_MAGIC
485 1.234 maxv KASSERTMSG((pi->pi_magic == PI_MAGIC),
486 1.234 maxv "%s: [%s] free list modified: "
487 1.234 maxv "magic=%x; page %p; item addr %p", __func__,
488 1.234 maxv pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
489 1.234 maxv #endif
490 1.234 maxv
491 1.234 maxv /*
492 1.234 maxv * Remove from item list.
493 1.234 maxv */
494 1.234 maxv LIST_REMOVE(pi, pi_list);
495 1.234 maxv
496 1.234 maxv return v;
497 1.234 maxv }
498 1.234 maxv
499 1.234 maxv /* -------------------------------------------------------------------------- */
500 1.234 maxv
501 1.253 maxv static inline void
502 1.253 maxv pr_phinpage_check(struct pool *pp, struct pool_item_header *ph, void *page,
503 1.253 maxv void *object)
504 1.253 maxv {
505 1.253 maxv if (__predict_false((void *)ph->ph_page != page)) {
506 1.253 maxv panic("%s: [%s] item %p not part of pool", __func__,
507 1.253 maxv pp->pr_wchan, object);
508 1.253 maxv }
509 1.253 maxv if (__predict_false((char *)object < (char *)page + ph->ph_off)) {
510 1.253 maxv panic("%s: [%s] item %p below item space", __func__,
511 1.253 maxv pp->pr_wchan, object);
512 1.253 maxv }
513 1.253 maxv if (__predict_false(ph->ph_poolid != pp->pr_poolid)) {
514 1.253 maxv panic("%s: [%s] item %p poolid %u != %u", __func__,
515 1.253 maxv pp->pr_wchan, object, ph->ph_poolid, pp->pr_poolid);
516 1.253 maxv }
517 1.253 maxv }
518 1.253 maxv
519 1.253 maxv static inline void
520 1.253 maxv pc_phinpage_check(pool_cache_t pc, void *object)
521 1.253 maxv {
522 1.253 maxv struct pool_item_header *ph;
523 1.253 maxv struct pool *pp;
524 1.253 maxv void *page;
525 1.253 maxv
526 1.253 maxv pp = &pc->pc_pool;
527 1.253 maxv page = POOL_OBJ_TO_PAGE(pp, object);
528 1.253 maxv ph = (struct pool_item_header *)page;
529 1.253 maxv
530 1.253 maxv pr_phinpage_check(pp, ph, page, object);
531 1.253 maxv }
532 1.253 maxv
533 1.253 maxv /* -------------------------------------------------------------------------- */
534 1.253 maxv
535 1.110 perry static inline int
536 1.88 chs phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
537 1.88 chs {
538 1.121 yamt
539 1.121 yamt /*
540 1.236 maxv * We consider pool_item_header with smaller ph_page bigger. This
541 1.236 maxv * unnatural ordering is for the benefit of pr_find_pagehead.
542 1.121 yamt */
543 1.88 chs if (a->ph_page < b->ph_page)
544 1.236 maxv return 1;
545 1.121 yamt else if (a->ph_page > b->ph_page)
546 1.236 maxv return -1;
547 1.88 chs else
548 1.236 maxv return 0;
549 1.88 chs }
550 1.88 chs
551 1.88 chs SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
552 1.88 chs SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
553 1.88 chs
554 1.141 yamt static inline struct pool_item_header *
555 1.141 yamt pr_find_pagehead_noalign(struct pool *pp, void *v)
556 1.141 yamt {
557 1.141 yamt struct pool_item_header *ph, tmp;
558 1.141 yamt
559 1.141 yamt tmp.ph_page = (void *)(uintptr_t)v;
560 1.141 yamt ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
561 1.141 yamt if (ph == NULL) {
562 1.141 yamt ph = SPLAY_ROOT(&pp->pr_phtree);
563 1.141 yamt if (ph != NULL && phtree_compare(&tmp, ph) >= 0) {
564 1.141 yamt ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph);
565 1.141 yamt }
566 1.141 yamt KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0);
567 1.141 yamt }
568 1.141 yamt
569 1.141 yamt return ph;
570 1.141 yamt }
571 1.141 yamt
572 1.3 pk /*
573 1.121 yamt * Return the pool page header based on item address.
574 1.3 pk */
575 1.110 perry static inline struct pool_item_header *
576 1.121 yamt pr_find_pagehead(struct pool *pp, void *v)
577 1.3 pk {
578 1.88 chs struct pool_item_header *ph, tmp;
579 1.3 pk
580 1.121 yamt if ((pp->pr_roflags & PR_NOALIGN) != 0) {
581 1.141 yamt ph = pr_find_pagehead_noalign(pp, v);
582 1.121 yamt } else {
583 1.253 maxv void *page = POOL_OBJ_TO_PAGE(pp, v);
584 1.121 yamt if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
585 1.241 maxv ph = (struct pool_item_header *)page;
586 1.253 maxv pr_phinpage_check(pp, ph, page, v);
587 1.121 yamt } else {
588 1.121 yamt tmp.ph_page = page;
589 1.121 yamt ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
590 1.121 yamt }
591 1.121 yamt }
592 1.3 pk
593 1.121 yamt KASSERT(ph == NULL || ((pp->pr_roflags & PR_PHINPAGE) != 0) ||
594 1.128 christos ((char *)ph->ph_page <= (char *)v &&
595 1.128 christos (char *)v < (char *)ph->ph_page + pp->pr_alloc->pa_pagesz));
596 1.88 chs return ph;
597 1.3 pk }
598 1.3 pk
599 1.101 thorpej static void
600 1.101 thorpej pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq)
601 1.101 thorpej {
602 1.101 thorpej struct pool_item_header *ph;
603 1.101 thorpej
604 1.101 thorpej while ((ph = LIST_FIRST(pq)) != NULL) {
605 1.101 thorpej LIST_REMOVE(ph, ph_pagelist);
606 1.101 thorpej pool_allocator_free(pp, ph->ph_page);
607 1.134 ad if ((pp->pr_roflags & PR_PHINPAGE) == 0)
608 1.101 thorpej pool_put(pp->pr_phpool, ph);
609 1.101 thorpej }
610 1.101 thorpej }
611 1.101 thorpej
612 1.3 pk /*
613 1.3 pk * Remove a page from the pool.
614 1.3 pk */
615 1.110 perry static inline void
616 1.61 chs pr_rmpage(struct pool *pp, struct pool_item_header *ph,
617 1.61 chs struct pool_pagelist *pq)
618 1.3 pk {
619 1.3 pk
620 1.134 ad KASSERT(mutex_owned(&pp->pr_lock));
621 1.91 yamt
622 1.3 pk /*
623 1.7 thorpej * If the page was idle, decrement the idle page count.
624 1.3 pk */
625 1.6 thorpej if (ph->ph_nmissing == 0) {
626 1.207 riastrad KASSERT(pp->pr_nidle != 0);
627 1.207 riastrad KASSERTMSG((pp->pr_nitems >= pp->pr_itemsperpage),
628 1.251 christos "%s: [%s] nitems=%u < itemsperpage=%u", __func__,
629 1.251 christos pp->pr_wchan, pp->pr_nitems, pp->pr_itemsperpage);
630 1.6 thorpej pp->pr_nidle--;
631 1.6 thorpej }
632 1.7 thorpej
633 1.20 thorpej pp->pr_nitems -= pp->pr_itemsperpage;
634 1.20 thorpej
635 1.7 thorpej /*
636 1.101 thorpej * Unlink the page from the pool and queue it for release.
637 1.7 thorpej */
638 1.88 chs LIST_REMOVE(ph, ph_pagelist);
639 1.245 maxv if (pp->pr_roflags & PR_PHINPAGE) {
640 1.245 maxv if (__predict_false(ph->ph_poolid != pp->pr_poolid)) {
641 1.245 maxv panic("%s: [%s] ph %p poolid %u != %u",
642 1.245 maxv __func__, pp->pr_wchan, ph, ph->ph_poolid,
643 1.245 maxv pp->pr_poolid);
644 1.245 maxv }
645 1.245 maxv } else {
646 1.91 yamt SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
647 1.245 maxv }
648 1.101 thorpej LIST_INSERT_HEAD(pq, ph, ph_pagelist);
649 1.101 thorpej
650 1.7 thorpej pp->pr_npages--;
651 1.7 thorpej pp->pr_npagefree++;
652 1.6 thorpej
653 1.88 chs pool_update_curpage(pp);
654 1.3 pk }
655 1.3 pk
656 1.3 pk /*
657 1.94 simonb * Initialize all the pools listed in the "pools" link set.
658 1.94 simonb */
659 1.94 simonb void
660 1.117 yamt pool_subsystem_init(void)
661 1.94 simonb {
662 1.192 rmind size_t size;
663 1.191 para int idx;
664 1.94 simonb
665 1.134 ad mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE);
666 1.179 mlelstv mutex_init(&pool_allocator_lock, MUTEX_DEFAULT, IPL_NONE);
667 1.134 ad cv_init(&pool_busy, "poolbusy");
668 1.134 ad
669 1.191 para /*
670 1.191 para * Initialize private page header pool and cache magazine pool if we
671 1.191 para * haven't done so yet.
672 1.191 para */
673 1.191 para for (idx = 0; idx < PHPOOL_MAX; idx++) {
674 1.191 para static char phpool_names[PHPOOL_MAX][6+1+6+1];
675 1.191 para int nelem;
676 1.191 para size_t sz;
677 1.191 para
678 1.191 para nelem = PHPOOL_FREELIST_NELEM(idx);
679 1.256 maxv KASSERT(nelem != 0);
680 1.191 para snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
681 1.191 para "phpool-%d", nelem);
682 1.256 maxv sz = offsetof(struct pool_item_header,
683 1.256 maxv ph_bitmap[howmany(nelem, BITMAP_SIZE)]);
684 1.191 para pool_init(&phpool[idx], sz, 0, 0, 0,
685 1.191 para phpool_names[idx], &pool_allocator_meta, IPL_VM);
686 1.117 yamt }
687 1.191 para
688 1.191 para size = sizeof(pcg_t) +
689 1.191 para (PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t);
690 1.191 para pool_init(&pcg_normal_pool, size, coherency_unit, 0, 0,
691 1.191 para "pcgnormal", &pool_allocator_meta, IPL_VM);
692 1.191 para
693 1.191 para size = sizeof(pcg_t) +
694 1.191 para (PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t);
695 1.191 para pool_init(&pcg_large_pool, size, coherency_unit, 0, 0,
696 1.191 para "pcglarge", &pool_allocator_meta, IPL_VM);
697 1.134 ad
698 1.156 ad pool_init(&cache_pool, sizeof(struct pool_cache), coherency_unit,
699 1.191 para 0, 0, "pcache", &pool_allocator_meta, IPL_NONE);
700 1.134 ad
701 1.156 ad pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), coherency_unit,
702 1.191 para 0, 0, "pcachecpu", &pool_allocator_meta, IPL_NONE);
703 1.94 simonb }
704 1.94 simonb
705 1.240 maxv static inline bool
706 1.240 maxv pool_init_is_phinpage(const struct pool *pp)
707 1.240 maxv {
708 1.240 maxv size_t pagesize;
709 1.240 maxv
710 1.240 maxv if (pp->pr_roflags & PR_PHINPAGE) {
711 1.240 maxv return true;
712 1.240 maxv }
713 1.240 maxv if (pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) {
714 1.240 maxv return false;
715 1.240 maxv }
716 1.240 maxv
717 1.240 maxv pagesize = pp->pr_alloc->pa_pagesz;
718 1.240 maxv
719 1.240 maxv /*
720 1.240 maxv * Threshold: the item size is below 1/16 of a page size, and below
721 1.240 maxv * 8 times the page header size. The latter ensures we go off-page
722 1.240 maxv * if the page header would make us waste a rather big item.
723 1.240 maxv */
724 1.240 maxv if (pp->pr_size < MIN(pagesize / 16, PHSIZE * 8)) {
725 1.240 maxv return true;
726 1.240 maxv }
727 1.240 maxv
728 1.240 maxv /* Put the header into the page if it doesn't waste any items. */
729 1.240 maxv if (pagesize / pp->pr_size == (pagesize - PHSIZE) / pp->pr_size) {
730 1.240 maxv return true;
731 1.240 maxv }
732 1.240 maxv
733 1.240 maxv return false;
734 1.240 maxv }
735 1.240 maxv
736 1.242 maxv static inline bool
737 1.242 maxv pool_init_is_usebmap(const struct pool *pp)
738 1.242 maxv {
739 1.243 maxv size_t bmapsize;
740 1.243 maxv
741 1.242 maxv if (pp->pr_roflags & PR_NOTOUCH) {
742 1.242 maxv return true;
743 1.242 maxv }
744 1.242 maxv
745 1.243 maxv /*
746 1.256 maxv * If we're off-page, go with a bitmap.
747 1.256 maxv */
748 1.256 maxv if (!(pp->pr_roflags & PR_PHINPAGE)) {
749 1.256 maxv return true;
750 1.256 maxv }
751 1.256 maxv
752 1.256 maxv /*
753 1.243 maxv * If we're on-page, and the page header can already contain a bitmap
754 1.243 maxv * big enough to cover all the items of the page, go with a bitmap.
755 1.243 maxv */
756 1.243 maxv bmapsize = roundup(PHSIZE, pp->pr_align) -
757 1.243 maxv offsetof(struct pool_item_header, ph_bitmap[0]);
758 1.243 maxv KASSERT(bmapsize % sizeof(pool_item_bitmap_t) == 0);
759 1.243 maxv if (pp->pr_itemsperpage <= bmapsize * CHAR_BIT) {
760 1.243 maxv return true;
761 1.243 maxv }
762 1.243 maxv
763 1.242 maxv return false;
764 1.242 maxv }
765 1.242 maxv
766 1.94 simonb /*
767 1.3 pk * Initialize the given pool resource structure.
768 1.3 pk *
769 1.3 pk * We export this routine to allow other kernel parts to declare
770 1.195 rmind * static pools that must be initialized before kmem(9) is available.
771 1.3 pk */
772 1.3 pk void
773 1.42 thorpej pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
774 1.129 ad const char *wchan, struct pool_allocator *palloc, int ipl)
775 1.3 pk {
776 1.116 simonb struct pool *pp1;
777 1.240 maxv size_t prsize;
778 1.237 maxv int itemspace, slack;
779 1.3 pk
780 1.238 maxv /* XXX ioff will be removed. */
781 1.238 maxv KASSERT(ioff == 0);
782 1.238 maxv
783 1.116 simonb #ifdef DEBUG
784 1.198 christos if (__predict_true(!cold))
785 1.198 christos mutex_enter(&pool_head_lock);
786 1.116 simonb /*
787 1.116 simonb * Check that the pool hasn't already been initialised and
788 1.116 simonb * added to the list of all pools.
789 1.116 simonb */
790 1.145 ad TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
791 1.116 simonb if (pp == pp1)
792 1.213 christos panic("%s: [%s] already initialised", __func__,
793 1.116 simonb wchan);
794 1.116 simonb }
795 1.198 christos if (__predict_true(!cold))
796 1.198 christos mutex_exit(&pool_head_lock);
797 1.116 simonb #endif
798 1.116 simonb
799 1.66 thorpej if (palloc == NULL)
800 1.66 thorpej palloc = &pool_allocator_kmem;
801 1.244 maxv
802 1.180 mlelstv if (!cold)
803 1.180 mlelstv mutex_enter(&pool_allocator_lock);
804 1.178 elad if (palloc->pa_refcnt++ == 0) {
805 1.112 bjh21 if (palloc->pa_pagesz == 0)
806 1.66 thorpej palloc->pa_pagesz = PAGE_SIZE;
807 1.66 thorpej
808 1.66 thorpej TAILQ_INIT(&palloc->pa_list);
809 1.66 thorpej
810 1.134 ad mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM);
811 1.66 thorpej palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
812 1.66 thorpej palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
813 1.4 thorpej }
814 1.180 mlelstv if (!cold)
815 1.180 mlelstv mutex_exit(&pool_allocator_lock);
816 1.3 pk
817 1.3 pk if (align == 0)
818 1.3 pk align = ALIGN(1);
819 1.14 thorpej
820 1.204 maxv prsize = size;
821 1.204 maxv if ((flags & PR_NOTOUCH) == 0 && prsize < sizeof(struct pool_item))
822 1.204 maxv prsize = sizeof(struct pool_item);
823 1.3 pk
824 1.204 maxv prsize = roundup(prsize, align);
825 1.207 riastrad KASSERTMSG((prsize <= palloc->pa_pagesz),
826 1.213 christos "%s: [%s] pool item size (%zu) larger than page size (%u)",
827 1.213 christos __func__, wchan, prsize, palloc->pa_pagesz);
828 1.35 pk
829 1.3 pk /*
830 1.3 pk * Initialize the pool structure.
831 1.3 pk */
832 1.88 chs LIST_INIT(&pp->pr_emptypages);
833 1.88 chs LIST_INIT(&pp->pr_fullpages);
834 1.88 chs LIST_INIT(&pp->pr_partpages);
835 1.134 ad pp->pr_cache = NULL;
836 1.3 pk pp->pr_curpage = NULL;
837 1.3 pk pp->pr_npages = 0;
838 1.3 pk pp->pr_minitems = 0;
839 1.3 pk pp->pr_minpages = 0;
840 1.3 pk pp->pr_maxpages = UINT_MAX;
841 1.20 thorpej pp->pr_roflags = flags;
842 1.20 thorpej pp->pr_flags = 0;
843 1.204 maxv pp->pr_size = prsize;
844 1.233 maxv pp->pr_reqsize = size;
845 1.3 pk pp->pr_align = align;
846 1.3 pk pp->pr_wchan = wchan;
847 1.66 thorpej pp->pr_alloc = palloc;
848 1.245 maxv pp->pr_poolid = atomic_inc_uint_nv(&poolid_counter);
849 1.20 thorpej pp->pr_nitems = 0;
850 1.20 thorpej pp->pr_nout = 0;
851 1.20 thorpej pp->pr_hardlimit = UINT_MAX;
852 1.20 thorpej pp->pr_hardlimit_warning = NULL;
853 1.31 thorpej pp->pr_hardlimit_ratecap.tv_sec = 0;
854 1.31 thorpej pp->pr_hardlimit_ratecap.tv_usec = 0;
855 1.31 thorpej pp->pr_hardlimit_warning_last.tv_sec = 0;
856 1.31 thorpej pp->pr_hardlimit_warning_last.tv_usec = 0;
857 1.68 thorpej pp->pr_drain_hook = NULL;
858 1.68 thorpej pp->pr_drain_hook_arg = NULL;
859 1.125 ad pp->pr_freecheck = NULL;
860 1.255 maxv pp->pr_redzone = false;
861 1.204 maxv pool_redzone_init(pp, size);
862 1.249 maxv pool_quarantine_init(pp);
863 1.3 pk
864 1.3 pk /*
865 1.240 maxv * Decide whether to put the page header off-page to avoid wasting too
866 1.240 maxv * large a part of the page or too big an item. Off-page page headers
867 1.240 maxv * go on a hash table, so we can match a returned item with its header
868 1.240 maxv * based on the page address.
869 1.3 pk */
870 1.240 maxv if (pool_init_is_phinpage(pp)) {
871 1.241 maxv /* Use the beginning of the page for the page header */
872 1.241 maxv itemspace = palloc->pa_pagesz - roundup(PHSIZE, align);
873 1.241 maxv pp->pr_itemoffset = roundup(PHSIZE, align);
874 1.239 maxv pp->pr_roflags |= PR_PHINPAGE;
875 1.2 pk } else {
876 1.3 pk /* The page header will be taken from our page header pool */
877 1.237 maxv itemspace = palloc->pa_pagesz;
878 1.241 maxv pp->pr_itemoffset = 0;
879 1.88 chs SPLAY_INIT(&pp->pr_phtree);
880 1.2 pk }
881 1.1 pk
882 1.243 maxv pp->pr_itemsperpage = itemspace / pp->pr_size;
883 1.243 maxv KASSERT(pp->pr_itemsperpage != 0);
884 1.243 maxv
885 1.242 maxv /*
886 1.242 maxv * Decide whether to use a bitmap or a linked list to manage freed
887 1.242 maxv * items.
888 1.242 maxv */
889 1.242 maxv if (pool_init_is_usebmap(pp)) {
890 1.242 maxv pp->pr_roflags |= PR_USEBMAP;
891 1.242 maxv }
892 1.242 maxv
893 1.242 maxv /*
894 1.256 maxv * If we're off-page, then we're using a bitmap; choose the appropriate
895 1.256 maxv * pool to allocate page headers, whose size varies depending on the
896 1.256 maxv * bitmap. If we're on-page, nothing to do.
897 1.242 maxv */
898 1.256 maxv if (!(pp->pr_roflags & PR_PHINPAGE)) {
899 1.97 yamt int idx;
900 1.97 yamt
901 1.256 maxv KASSERT(pp->pr_roflags & PR_USEBMAP);
902 1.256 maxv
903 1.97 yamt for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
904 1.97 yamt idx++) {
905 1.97 yamt /* nothing */
906 1.97 yamt }
907 1.97 yamt if (idx >= PHPOOL_MAX) {
908 1.97 yamt /*
909 1.97 yamt * if you see this panic, consider to tweak
910 1.97 yamt * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
911 1.97 yamt */
912 1.213 christos panic("%s: [%s] too large itemsperpage(%d) for "
913 1.242 maxv "PR_USEBMAP", __func__,
914 1.97 yamt pp->pr_wchan, pp->pr_itemsperpage);
915 1.97 yamt }
916 1.97 yamt pp->pr_phpool = &phpool[idx];
917 1.242 maxv } else {
918 1.97 yamt pp->pr_phpool = NULL;
919 1.97 yamt }
920 1.3 pk
921 1.3 pk /*
922 1.3 pk * Use the slack between the chunks and the page header
923 1.3 pk * for "cache coloring".
924 1.3 pk */
925 1.237 maxv slack = itemspace - pp->pr_itemsperpage * pp->pr_size;
926 1.239 maxv pp->pr_maxcolor = rounddown(slack, align);
927 1.3 pk pp->pr_curcolor = 0;
928 1.3 pk
929 1.3 pk pp->pr_nget = 0;
930 1.3 pk pp->pr_nfail = 0;
931 1.3 pk pp->pr_nput = 0;
932 1.3 pk pp->pr_npagealloc = 0;
933 1.3 pk pp->pr_npagefree = 0;
934 1.1 pk pp->pr_hiwat = 0;
935 1.8 thorpej pp->pr_nidle = 0;
936 1.134 ad pp->pr_refcnt = 0;
937 1.3 pk
938 1.157 ad mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl);
939 1.134 ad cv_init(&pp->pr_cv, wchan);
940 1.134 ad pp->pr_ipl = ipl;
941 1.1 pk
942 1.145 ad /* Insert into the list of all pools. */
943 1.181 mlelstv if (!cold)
944 1.134 ad mutex_enter(&pool_head_lock);
945 1.145 ad TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
946 1.145 ad if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0)
947 1.145 ad break;
948 1.145 ad }
949 1.145 ad if (pp1 == NULL)
950 1.145 ad TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
951 1.145 ad else
952 1.145 ad TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist);
953 1.181 mlelstv if (!cold)
954 1.134 ad mutex_exit(&pool_head_lock);
955 1.134 ad
956 1.167 skrll /* Insert this into the list of pools using this allocator. */
957 1.181 mlelstv if (!cold)
958 1.134 ad mutex_enter(&palloc->pa_lock);
959 1.145 ad TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
960 1.181 mlelstv if (!cold)
961 1.134 ad mutex_exit(&palloc->pa_lock);
962 1.1 pk }
963 1.1 pk
964 1.1 pk /*
965 1.1 pk * De-commision a pool resource.
966 1.1 pk */
967 1.1 pk void
968 1.42 thorpej pool_destroy(struct pool *pp)
969 1.1 pk {
970 1.101 thorpej struct pool_pagelist pq;
971 1.3 pk struct pool_item_header *ph;
972 1.43 thorpej
973 1.249 maxv pool_quarantine_flush(pp);
974 1.249 maxv
975 1.101 thorpej /* Remove from global pool list */
976 1.134 ad mutex_enter(&pool_head_lock);
977 1.134 ad while (pp->pr_refcnt != 0)
978 1.134 ad cv_wait(&pool_busy, &pool_head_lock);
979 1.145 ad TAILQ_REMOVE(&pool_head, pp, pr_poollist);
980 1.101 thorpej if (drainpp == pp)
981 1.101 thorpej drainpp = NULL;
982 1.134 ad mutex_exit(&pool_head_lock);
983 1.101 thorpej
984 1.101 thorpej /* Remove this pool from its allocator's list of pools. */
985 1.134 ad mutex_enter(&pp->pr_alloc->pa_lock);
986 1.66 thorpej TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
987 1.134 ad mutex_exit(&pp->pr_alloc->pa_lock);
988 1.66 thorpej
989 1.178 elad mutex_enter(&pool_allocator_lock);
990 1.178 elad if (--pp->pr_alloc->pa_refcnt == 0)
991 1.178 elad mutex_destroy(&pp->pr_alloc->pa_lock);
992 1.178 elad mutex_exit(&pool_allocator_lock);
993 1.178 elad
994 1.134 ad mutex_enter(&pp->pr_lock);
995 1.101 thorpej
996 1.134 ad KASSERT(pp->pr_cache == NULL);
997 1.207 riastrad KASSERTMSG((pp->pr_nout == 0),
998 1.251 christos "%s: [%s] pool busy: still out: %u", __func__, pp->pr_wchan,
999 1.251 christos pp->pr_nout);
1000 1.101 thorpej KASSERT(LIST_EMPTY(&pp->pr_fullpages));
1001 1.101 thorpej KASSERT(LIST_EMPTY(&pp->pr_partpages));
1002 1.101 thorpej
1003 1.3 pk /* Remove all pages */
1004 1.101 thorpej LIST_INIT(&pq);
1005 1.88 chs while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1006 1.101 thorpej pr_rmpage(pp, ph, &pq);
1007 1.101 thorpej
1008 1.134 ad mutex_exit(&pp->pr_lock);
1009 1.3 pk
1010 1.101 thorpej pr_pagelist_free(pp, &pq);
1011 1.134 ad cv_destroy(&pp->pr_cv);
1012 1.134 ad mutex_destroy(&pp->pr_lock);
1013 1.1 pk }
1014 1.1 pk
1015 1.68 thorpej void
1016 1.68 thorpej pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
1017 1.68 thorpej {
1018 1.68 thorpej
1019 1.68 thorpej /* XXX no locking -- must be used just after pool_init() */
1020 1.207 riastrad KASSERTMSG((pp->pr_drain_hook == NULL),
1021 1.213 christos "%s: [%s] already set", __func__, pp->pr_wchan);
1022 1.68 thorpej pp->pr_drain_hook = fn;
1023 1.68 thorpej pp->pr_drain_hook_arg = arg;
1024 1.68 thorpej }
1025 1.68 thorpej
1026 1.88 chs static struct pool_item_header *
1027 1.128 christos pool_alloc_item_header(struct pool *pp, void *storage, int flags)
1028 1.55 thorpej {
1029 1.55 thorpej struct pool_item_header *ph;
1030 1.55 thorpej
1031 1.55 thorpej if ((pp->pr_roflags & PR_PHINPAGE) != 0)
1032 1.241 maxv ph = storage;
1033 1.134 ad else
1034 1.97 yamt ph = pool_get(pp->pr_phpool, flags);
1035 1.55 thorpej
1036 1.236 maxv return ph;
1037 1.55 thorpej }
1038 1.1 pk
1039 1.1 pk /*
1040 1.134 ad * Grab an item from the pool.
1041 1.1 pk */
1042 1.3 pk void *
1043 1.56 sommerfe pool_get(struct pool *pp, int flags)
1044 1.1 pk {
1045 1.3 pk struct pool_item_header *ph;
1046 1.55 thorpej void *v;
1047 1.1 pk
1048 1.215 christos KASSERT(!(flags & PR_NOWAIT) != !(flags & PR_WAITOK));
1049 1.207 riastrad KASSERTMSG((pp->pr_itemsperpage != 0),
1050 1.213 christos "%s: [%s] pr_itemsperpage is zero, "
1051 1.213 christos "pool not initialized?", __func__, pp->pr_wchan);
1052 1.207 riastrad KASSERTMSG((!(cpu_intr_p() || cpu_softintr_p())
1053 1.207 riastrad || pp->pr_ipl != IPL_NONE || cold || panicstr != NULL),
1054 1.213 christos "%s: [%s] is IPL_NONE, but called from interrupt context",
1055 1.213 christos __func__, pp->pr_wchan);
1056 1.155 ad if (flags & PR_WAITOK) {
1057 1.154 yamt ASSERT_SLEEPABLE();
1058 1.155 ad }
1059 1.1 pk
1060 1.270 maxv if (flags & PR_NOWAIT) {
1061 1.270 maxv if (fault_inject())
1062 1.270 maxv return NULL;
1063 1.270 maxv }
1064 1.270 maxv
1065 1.134 ad mutex_enter(&pp->pr_lock);
1066 1.20 thorpej startover:
1067 1.20 thorpej /*
1068 1.20 thorpej * Check to see if we've reached the hard limit. If we have,
1069 1.20 thorpej * and we can wait, then wait until an item has been returned to
1070 1.20 thorpej * the pool.
1071 1.20 thorpej */
1072 1.207 riastrad KASSERTMSG((pp->pr_nout <= pp->pr_hardlimit),
1073 1.213 christos "%s: %s: crossed hard limit", __func__, pp->pr_wchan);
1074 1.34 thorpej if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
1075 1.68 thorpej if (pp->pr_drain_hook != NULL) {
1076 1.68 thorpej /*
1077 1.68 thorpej * Since the drain hook is going to free things
1078 1.68 thorpej * back to the pool, unlock, call the hook, re-lock,
1079 1.68 thorpej * and check the hardlimit condition again.
1080 1.68 thorpej */
1081 1.134 ad mutex_exit(&pp->pr_lock);
1082 1.68 thorpej (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
1083 1.134 ad mutex_enter(&pp->pr_lock);
1084 1.68 thorpej if (pp->pr_nout < pp->pr_hardlimit)
1085 1.68 thorpej goto startover;
1086 1.68 thorpej }
1087 1.68 thorpej
1088 1.29 sommerfe if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
1089 1.20 thorpej /*
1090 1.20 thorpej * XXX: A warning isn't logged in this case. Should
1091 1.20 thorpej * it be?
1092 1.20 thorpej */
1093 1.20 thorpej pp->pr_flags |= PR_WANTED;
1094 1.212 christos do {
1095 1.212 christos cv_wait(&pp->pr_cv, &pp->pr_lock);
1096 1.212 christos } while (pp->pr_flags & PR_WANTED);
1097 1.20 thorpej goto startover;
1098 1.20 thorpej }
1099 1.31 thorpej
1100 1.31 thorpej /*
1101 1.31 thorpej * Log a message that the hard limit has been hit.
1102 1.31 thorpej */
1103 1.31 thorpej if (pp->pr_hardlimit_warning != NULL &&
1104 1.31 thorpej ratecheck(&pp->pr_hardlimit_warning_last,
1105 1.31 thorpej &pp->pr_hardlimit_ratecap))
1106 1.31 thorpej log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
1107 1.21 thorpej
1108 1.21 thorpej pp->pr_nfail++;
1109 1.21 thorpej
1110 1.134 ad mutex_exit(&pp->pr_lock);
1111 1.216 christos KASSERT((flags & (PR_NOWAIT|PR_LIMITFAIL)) != 0);
1112 1.236 maxv return NULL;
1113 1.20 thorpej }
1114 1.20 thorpej
1115 1.3 pk /*
1116 1.3 pk * The convention we use is that if `curpage' is not NULL, then
1117 1.3 pk * it points at a non-empty bucket. In particular, `curpage'
1118 1.3 pk * never points at a page header which has PR_PHINPAGE set and
1119 1.3 pk * has no items in its bucket.
1120 1.3 pk */
1121 1.20 thorpej if ((ph = pp->pr_curpage) == NULL) {
1122 1.113 yamt int error;
1123 1.113 yamt
1124 1.207 riastrad KASSERTMSG((pp->pr_nitems == 0),
1125 1.213 christos "%s: [%s] curpage NULL, inconsistent nitems %u",
1126 1.213 christos __func__, pp->pr_wchan, pp->pr_nitems);
1127 1.20 thorpej
1128 1.21 thorpej /*
1129 1.21 thorpej * Call the back-end page allocator for more memory.
1130 1.21 thorpej * Release the pool lock, as the back-end page allocator
1131 1.21 thorpej * may block.
1132 1.21 thorpej */
1133 1.113 yamt error = pool_grow(pp, flags);
1134 1.113 yamt if (error != 0) {
1135 1.21 thorpej /*
1136 1.210 mlelstv * pool_grow aborts when another thread
1137 1.210 mlelstv * is allocating a new page. Retry if it
1138 1.210 mlelstv * waited for it.
1139 1.210 mlelstv */
1140 1.210 mlelstv if (error == ERESTART)
1141 1.210 mlelstv goto startover;
1142 1.210 mlelstv
1143 1.210 mlelstv /*
1144 1.55 thorpej * We were unable to allocate a page or item
1145 1.55 thorpej * header, but we released the lock during
1146 1.55 thorpej * allocation, so perhaps items were freed
1147 1.55 thorpej * back to the pool. Check for this case.
1148 1.21 thorpej */
1149 1.21 thorpej if (pp->pr_curpage != NULL)
1150 1.21 thorpej goto startover;
1151 1.15 pk
1152 1.117 yamt pp->pr_nfail++;
1153 1.134 ad mutex_exit(&pp->pr_lock);
1154 1.265 chs KASSERT((flags & (PR_NOWAIT|PR_LIMITFAIL)) != 0);
1155 1.236 maxv return NULL;
1156 1.1 pk }
1157 1.3 pk
1158 1.20 thorpej /* Start the allocation process over. */
1159 1.20 thorpej goto startover;
1160 1.3 pk }
1161 1.242 maxv if (pp->pr_roflags & PR_USEBMAP) {
1162 1.207 riastrad KASSERTMSG((ph->ph_nmissing < pp->pr_itemsperpage),
1163 1.251 christos "%s: [%s] pool page empty", __func__, pp->pr_wchan);
1164 1.234 maxv v = pr_item_bitmap_get(pp, ph);
1165 1.97 yamt } else {
1166 1.234 maxv v = pr_item_linkedlist_get(pp, ph);
1167 1.97 yamt }
1168 1.20 thorpej pp->pr_nitems--;
1169 1.20 thorpej pp->pr_nout++;
1170 1.6 thorpej if (ph->ph_nmissing == 0) {
1171 1.207 riastrad KASSERT(pp->pr_nidle > 0);
1172 1.6 thorpej pp->pr_nidle--;
1173 1.88 chs
1174 1.88 chs /*
1175 1.88 chs * This page was previously empty. Move it to the list of
1176 1.88 chs * partially-full pages. This page is already curpage.
1177 1.88 chs */
1178 1.88 chs LIST_REMOVE(ph, ph_pagelist);
1179 1.88 chs LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1180 1.6 thorpej }
1181 1.3 pk ph->ph_nmissing++;
1182 1.97 yamt if (ph->ph_nmissing == pp->pr_itemsperpage) {
1183 1.242 maxv KASSERTMSG(((pp->pr_roflags & PR_USEBMAP) ||
1184 1.207 riastrad LIST_EMPTY(&ph->ph_itemlist)),
1185 1.213 christos "%s: [%s] nmissing (%u) inconsistent", __func__,
1186 1.213 christos pp->pr_wchan, ph->ph_nmissing);
1187 1.3 pk /*
1188 1.88 chs * This page is now full. Move it to the full list
1189 1.88 chs * and select a new current page.
1190 1.3 pk */
1191 1.88 chs LIST_REMOVE(ph, ph_pagelist);
1192 1.88 chs LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
1193 1.88 chs pool_update_curpage(pp);
1194 1.1 pk }
1195 1.3 pk
1196 1.3 pk pp->pr_nget++;
1197 1.20 thorpej
1198 1.20 thorpej /*
1199 1.20 thorpej * If we have a low water mark and we are now below that low
1200 1.20 thorpej * water mark, add more items to the pool.
1201 1.20 thorpej */
1202 1.53 thorpej if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1203 1.20 thorpej /*
1204 1.20 thorpej * XXX: Should we log a warning? Should we set up a timeout
1205 1.20 thorpej * to try again in a second or so? The latter could break
1206 1.20 thorpej * a caller's assumptions about interrupt protection, etc.
1207 1.20 thorpej */
1208 1.20 thorpej }
1209 1.20 thorpej
1210 1.134 ad mutex_exit(&pp->pr_lock);
1211 1.238 maxv KASSERT((((vaddr_t)v) & (pp->pr_align - 1)) == 0);
1212 1.125 ad FREECHECK_OUT(&pp->pr_freecheck, v);
1213 1.204 maxv pool_redzone_fill(pp, v);
1214 1.262 maxv pool_get_kmsan(pp, v);
1215 1.232 christos if (flags & PR_ZERO)
1216 1.233 maxv memset(v, 0, pp->pr_reqsize);
1217 1.232 christos return v;
1218 1.1 pk }
1219 1.1 pk
1220 1.1 pk /*
1221 1.43 thorpej * Internal version of pool_put(). Pool is already locked/entered.
1222 1.1 pk */
1223 1.43 thorpej static void
1224 1.101 thorpej pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
1225 1.1 pk {
1226 1.3 pk struct pool_item_header *ph;
1227 1.3 pk
1228 1.134 ad KASSERT(mutex_owned(&pp->pr_lock));
1229 1.204 maxv pool_redzone_check(pp, v);
1230 1.262 maxv pool_put_kmsan(pp, v);
1231 1.125 ad FREECHECK_IN(&pp->pr_freecheck, v);
1232 1.134 ad LOCKDEBUG_MEM_CHECK(v, pp->pr_size);
1233 1.61 chs
1234 1.207 riastrad KASSERTMSG((pp->pr_nout > 0),
1235 1.213 christos "%s: [%s] putting with none out", __func__, pp->pr_wchan);
1236 1.3 pk
1237 1.121 yamt if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) {
1238 1.213 christos panic("%s: [%s] page header missing", __func__, pp->pr_wchan);
1239 1.3 pk }
1240 1.28 thorpej
1241 1.3 pk /*
1242 1.3 pk * Return to item list.
1243 1.3 pk */
1244 1.242 maxv if (pp->pr_roflags & PR_USEBMAP) {
1245 1.234 maxv pr_item_bitmap_put(pp, ph, v);
1246 1.97 yamt } else {
1247 1.234 maxv pr_item_linkedlist_put(pp, ph, v);
1248 1.97 yamt }
1249 1.79 thorpej KDASSERT(ph->ph_nmissing != 0);
1250 1.3 pk ph->ph_nmissing--;
1251 1.3 pk pp->pr_nput++;
1252 1.20 thorpej pp->pr_nitems++;
1253 1.20 thorpej pp->pr_nout--;
1254 1.3 pk
1255 1.3 pk /* Cancel "pool empty" condition if it exists */
1256 1.3 pk if (pp->pr_curpage == NULL)
1257 1.3 pk pp->pr_curpage = ph;
1258 1.3 pk
1259 1.3 pk if (pp->pr_flags & PR_WANTED) {
1260 1.3 pk pp->pr_flags &= ~PR_WANTED;
1261 1.134 ad cv_broadcast(&pp->pr_cv);
1262 1.3 pk }
1263 1.3 pk
1264 1.3 pk /*
1265 1.88 chs * If this page is now empty, do one of two things:
1266 1.21 thorpej *
1267 1.88 chs * (1) If we have more pages than the page high water mark,
1268 1.96 thorpej * free the page back to the system. ONLY CONSIDER
1269 1.90 thorpej * FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
1270 1.90 thorpej * CLAIM.
1271 1.21 thorpej *
1272 1.88 chs * (2) Otherwise, move the page to the empty page list.
1273 1.88 chs *
1274 1.88 chs * Either way, select a new current page (so we use a partially-full
1275 1.88 chs * page if one is available).
1276 1.3 pk */
1277 1.3 pk if (ph->ph_nmissing == 0) {
1278 1.6 thorpej pp->pr_nidle++;
1279 1.267 chs if (pp->pr_nitems - pp->pr_itemsperpage >= pp->pr_minitems &&
1280 1.267 chs pp->pr_npages > pp->pr_minpages &&
1281 1.152 yamt pp->pr_npages > pp->pr_maxpages) {
1282 1.101 thorpej pr_rmpage(pp, ph, pq);
1283 1.3 pk } else {
1284 1.88 chs LIST_REMOVE(ph, ph_pagelist);
1285 1.88 chs LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1286 1.3 pk
1287 1.21 thorpej /*
1288 1.21 thorpej * Update the timestamp on the page. A page must
1289 1.21 thorpej * be idle for some period of time before it can
1290 1.21 thorpej * be reclaimed by the pagedaemon. This minimizes
1291 1.21 thorpej * ping-pong'ing for memory.
1292 1.151 yamt *
1293 1.151 yamt * note for 64-bit time_t: truncating to 32-bit is not
1294 1.151 yamt * a problem for our usage.
1295 1.21 thorpej */
1296 1.151 yamt ph->ph_time = time_uptime;
1297 1.1 pk }
1298 1.88 chs pool_update_curpage(pp);
1299 1.1 pk }
1300 1.88 chs
1301 1.21 thorpej /*
1302 1.88 chs * If the page was previously completely full, move it to the
1303 1.88 chs * partially-full list and make it the current page. The next
1304 1.88 chs * allocation will get the item from this page, instead of
1305 1.88 chs * further fragmenting the pool.
1306 1.21 thorpej */
1307 1.21 thorpej else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1308 1.88 chs LIST_REMOVE(ph, ph_pagelist);
1309 1.88 chs LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1310 1.21 thorpej pp->pr_curpage = ph;
1311 1.21 thorpej }
1312 1.43 thorpej }
1313 1.43 thorpej
1314 1.56 sommerfe void
1315 1.56 sommerfe pool_put(struct pool *pp, void *v)
1316 1.56 sommerfe {
1317 1.101 thorpej struct pool_pagelist pq;
1318 1.101 thorpej
1319 1.101 thorpej LIST_INIT(&pq);
1320 1.56 sommerfe
1321 1.134 ad mutex_enter(&pp->pr_lock);
1322 1.249 maxv if (!pool_put_quarantine(pp, v, &pq)) {
1323 1.249 maxv pool_do_put(pp, v, &pq);
1324 1.249 maxv }
1325 1.134 ad mutex_exit(&pp->pr_lock);
1326 1.56 sommerfe
1327 1.102 chs pr_pagelist_free(pp, &pq);
1328 1.56 sommerfe }
1329 1.57 sommerfe
1330 1.74 thorpej /*
1331 1.113 yamt * pool_grow: grow a pool by a page.
1332 1.113 yamt *
1333 1.113 yamt * => called with pool locked.
1334 1.113 yamt * => unlock and relock the pool.
1335 1.113 yamt * => return with pool locked.
1336 1.113 yamt */
1337 1.113 yamt
1338 1.113 yamt static int
1339 1.113 yamt pool_grow(struct pool *pp, int flags)
1340 1.113 yamt {
1341 1.236 maxv struct pool_item_header *ph;
1342 1.237 maxv char *storage;
1343 1.236 maxv
1344 1.209 riastrad /*
1345 1.209 riastrad * If there's a pool_grow in progress, wait for it to complete
1346 1.209 riastrad * and try again from the top.
1347 1.209 riastrad */
1348 1.209 riastrad if (pp->pr_flags & PR_GROWING) {
1349 1.209 riastrad if (flags & PR_WAITOK) {
1350 1.209 riastrad do {
1351 1.209 riastrad cv_wait(&pp->pr_cv, &pp->pr_lock);
1352 1.209 riastrad } while (pp->pr_flags & PR_GROWING);
1353 1.209 riastrad return ERESTART;
1354 1.209 riastrad } else {
1355 1.219 mrg if (pp->pr_flags & PR_GROWINGNOWAIT) {
1356 1.219 mrg /*
1357 1.219 mrg * This needs an unlock/relock dance so
1358 1.219 mrg * that the other caller has a chance to
1359 1.219 mrg * run and actually do the thing. Note
1360 1.219 mrg * that this is effectively a busy-wait.
1361 1.219 mrg */
1362 1.219 mrg mutex_exit(&pp->pr_lock);
1363 1.219 mrg mutex_enter(&pp->pr_lock);
1364 1.219 mrg return ERESTART;
1365 1.219 mrg }
1366 1.209 riastrad return EWOULDBLOCK;
1367 1.209 riastrad }
1368 1.209 riastrad }
1369 1.209 riastrad pp->pr_flags |= PR_GROWING;
1370 1.220 christos if (flags & PR_WAITOK)
1371 1.220 christos mutex_exit(&pp->pr_lock);
1372 1.220 christos else
1373 1.219 mrg pp->pr_flags |= PR_GROWINGNOWAIT;
1374 1.113 yamt
1375 1.237 maxv storage = pool_allocator_alloc(pp, flags);
1376 1.237 maxv if (__predict_false(storage == NULL))
1377 1.216 christos goto out;
1378 1.216 christos
1379 1.237 maxv ph = pool_alloc_item_header(pp, storage, flags);
1380 1.216 christos if (__predict_false(ph == NULL)) {
1381 1.237 maxv pool_allocator_free(pp, storage);
1382 1.209 riastrad goto out;
1383 1.113 yamt }
1384 1.113 yamt
1385 1.220 christos if (flags & PR_WAITOK)
1386 1.220 christos mutex_enter(&pp->pr_lock);
1387 1.237 maxv pool_prime_page(pp, storage, ph);
1388 1.113 yamt pp->pr_npagealloc++;
1389 1.216 christos KASSERT(pp->pr_flags & PR_GROWING);
1390 1.219 mrg pp->pr_flags &= ~(PR_GROWING|PR_GROWINGNOWAIT);
1391 1.209 riastrad /*
1392 1.209 riastrad * If anyone was waiting for pool_grow, notify them that we
1393 1.209 riastrad * may have just done it.
1394 1.209 riastrad */
1395 1.216 christos cv_broadcast(&pp->pr_cv);
1396 1.216 christos return 0;
1397 1.216 christos out:
1398 1.220 christos if (flags & PR_WAITOK)
1399 1.220 christos mutex_enter(&pp->pr_lock);
1400 1.209 riastrad KASSERT(pp->pr_flags & PR_GROWING);
1401 1.219 mrg pp->pr_flags &= ~(PR_GROWING|PR_GROWINGNOWAIT);
1402 1.216 christos return ENOMEM;
1403 1.113 yamt }
1404 1.113 yamt
1405 1.267 chs void
1406 1.74 thorpej pool_prime(struct pool *pp, int n)
1407 1.74 thorpej {
1408 1.74 thorpej
1409 1.134 ad mutex_enter(&pp->pr_lock);
1410 1.267 chs pp->pr_minpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1411 1.267 chs if (pp->pr_maxpages <= pp->pr_minpages)
1412 1.74 thorpej pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1413 1.267 chs while (pp->pr_npages < pp->pr_minpages)
1414 1.267 chs (void) pool_grow(pp, PR_WAITOK);
1415 1.134 ad mutex_exit(&pp->pr_lock);
1416 1.74 thorpej }
1417 1.55 thorpej
1418 1.55 thorpej /*
1419 1.3 pk * Add a page worth of items to the pool.
1420 1.21 thorpej *
1421 1.21 thorpej * Note, we must be called with the pool descriptor LOCKED.
1422 1.3 pk */
1423 1.55 thorpej static void
1424 1.128 christos pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph)
1425 1.3 pk {
1426 1.236 maxv const unsigned int align = pp->pr_align;
1427 1.3 pk struct pool_item *pi;
1428 1.128 christos void *cp = storage;
1429 1.55 thorpej int n;
1430 1.36 pk
1431 1.134 ad KASSERT(mutex_owned(&pp->pr_lock));
1432 1.207 riastrad KASSERTMSG(((pp->pr_roflags & PR_NOALIGN) ||
1433 1.207 riastrad (((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) == 0)),
1434 1.213 christos "%s: [%s] unaligned page: %p", __func__, pp->pr_wchan, cp);
1435 1.3 pk
1436 1.3 pk /*
1437 1.3 pk * Insert page header.
1438 1.3 pk */
1439 1.88 chs LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1440 1.102 chs LIST_INIT(&ph->ph_itemlist);
1441 1.3 pk ph->ph_page = storage;
1442 1.3 pk ph->ph_nmissing = 0;
1443 1.151 yamt ph->ph_time = time_uptime;
1444 1.245 maxv if (pp->pr_roflags & PR_PHINPAGE)
1445 1.245 maxv ph->ph_poolid = pp->pr_poolid;
1446 1.245 maxv else
1447 1.88 chs SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
1448 1.3 pk
1449 1.6 thorpej pp->pr_nidle++;
1450 1.6 thorpej
1451 1.3 pk /*
1452 1.241 maxv * The item space starts after the on-page header, if any.
1453 1.241 maxv */
1454 1.241 maxv ph->ph_off = pp->pr_itemoffset;
1455 1.241 maxv
1456 1.241 maxv /*
1457 1.3 pk * Color this page.
1458 1.3 pk */
1459 1.241 maxv ph->ph_off += pp->pr_curcolor;
1460 1.141 yamt cp = (char *)cp + ph->ph_off;
1461 1.3 pk if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1462 1.3 pk pp->pr_curcolor = 0;
1463 1.3 pk
1464 1.238 maxv KASSERT((((vaddr_t)cp) & (align - 1)) == 0);
1465 1.125 ad
1466 1.3 pk /*
1467 1.3 pk * Insert remaining chunks on the bucket list.
1468 1.3 pk */
1469 1.3 pk n = pp->pr_itemsperpage;
1470 1.20 thorpej pp->pr_nitems += n;
1471 1.3 pk
1472 1.242 maxv if (pp->pr_roflags & PR_USEBMAP) {
1473 1.234 maxv pr_item_bitmap_init(pp, ph);
1474 1.97 yamt } else {
1475 1.97 yamt while (n--) {
1476 1.97 yamt pi = (struct pool_item *)cp;
1477 1.78 thorpej
1478 1.238 maxv KASSERT((((vaddr_t)pi) & (align - 1)) == 0);
1479 1.3 pk
1480 1.97 yamt /* Insert on page list */
1481 1.102 chs LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1482 1.229 maxv #ifdef POOL_CHECK_MAGIC
1483 1.97 yamt pi->pi_magic = PI_MAGIC;
1484 1.3 pk #endif
1485 1.128 christos cp = (char *)cp + pp->pr_size;
1486 1.125 ad
1487 1.238 maxv KASSERT((((vaddr_t)cp) & (align - 1)) == 0);
1488 1.97 yamt }
1489 1.3 pk }
1490 1.3 pk
1491 1.3 pk /*
1492 1.3 pk * If the pool was depleted, point at the new page.
1493 1.3 pk */
1494 1.3 pk if (pp->pr_curpage == NULL)
1495 1.3 pk pp->pr_curpage = ph;
1496 1.3 pk
1497 1.3 pk if (++pp->pr_npages > pp->pr_hiwat)
1498 1.3 pk pp->pr_hiwat = pp->pr_npages;
1499 1.3 pk }
1500 1.3 pk
1501 1.20 thorpej /*
1502 1.52 thorpej * Used by pool_get() when nitems drops below the low water mark. This
1503 1.88 chs * is used to catch up pr_nitems with the low water mark.
1504 1.20 thorpej *
1505 1.21 thorpej * Note 1, we never wait for memory here, we let the caller decide what to do.
1506 1.20 thorpej *
1507 1.73 thorpej * Note 2, we must be called with the pool already locked, and we return
1508 1.20 thorpej * with it locked.
1509 1.20 thorpej */
1510 1.20 thorpej static int
1511 1.42 thorpej pool_catchup(struct pool *pp)
1512 1.20 thorpej {
1513 1.20 thorpej int error = 0;
1514 1.20 thorpej
1515 1.54 thorpej while (POOL_NEEDS_CATCHUP(pp)) {
1516 1.113 yamt error = pool_grow(pp, PR_NOWAIT);
1517 1.113 yamt if (error) {
1518 1.214 christos if (error == ERESTART)
1519 1.214 christos continue;
1520 1.20 thorpej break;
1521 1.20 thorpej }
1522 1.20 thorpej }
1523 1.113 yamt return error;
1524 1.20 thorpej }
1525 1.20 thorpej
1526 1.88 chs static void
1527 1.88 chs pool_update_curpage(struct pool *pp)
1528 1.88 chs {
1529 1.88 chs
1530 1.88 chs pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
1531 1.88 chs if (pp->pr_curpage == NULL) {
1532 1.88 chs pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
1533 1.88 chs }
1534 1.168 yamt KASSERT((pp->pr_curpage == NULL && pp->pr_nitems == 0) ||
1535 1.168 yamt (pp->pr_curpage != NULL && pp->pr_nitems > 0));
1536 1.88 chs }
1537 1.88 chs
1538 1.3 pk void
1539 1.42 thorpej pool_setlowat(struct pool *pp, int n)
1540 1.3 pk {
1541 1.15 pk
1542 1.134 ad mutex_enter(&pp->pr_lock);
1543 1.3 pk pp->pr_minitems = n;
1544 1.20 thorpej
1545 1.20 thorpej /* Make sure we're caught up with the newly-set low water mark. */
1546 1.75 simonb if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1547 1.20 thorpej /*
1548 1.20 thorpej * XXX: Should we log a warning? Should we set up a timeout
1549 1.20 thorpej * to try again in a second or so? The latter could break
1550 1.20 thorpej * a caller's assumptions about interrupt protection, etc.
1551 1.20 thorpej */
1552 1.20 thorpej }
1553 1.21 thorpej
1554 1.134 ad mutex_exit(&pp->pr_lock);
1555 1.3 pk }
1556 1.3 pk
1557 1.3 pk void
1558 1.42 thorpej pool_sethiwat(struct pool *pp, int n)
1559 1.3 pk {
1560 1.15 pk
1561 1.134 ad mutex_enter(&pp->pr_lock);
1562 1.21 thorpej
1563 1.267 chs pp->pr_maxitems = n;
1564 1.21 thorpej
1565 1.134 ad mutex_exit(&pp->pr_lock);
1566 1.3 pk }
1567 1.3 pk
1568 1.20 thorpej void
1569 1.42 thorpej pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1570 1.20 thorpej {
1571 1.20 thorpej
1572 1.134 ad mutex_enter(&pp->pr_lock);
1573 1.20 thorpej
1574 1.20 thorpej pp->pr_hardlimit = n;
1575 1.20 thorpej pp->pr_hardlimit_warning = warnmess;
1576 1.31 thorpej pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1577 1.31 thorpej pp->pr_hardlimit_warning_last.tv_sec = 0;
1578 1.31 thorpej pp->pr_hardlimit_warning_last.tv_usec = 0;
1579 1.20 thorpej
1580 1.267 chs pp->pr_maxpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1581 1.21 thorpej
1582 1.134 ad mutex_exit(&pp->pr_lock);
1583 1.20 thorpej }
1584 1.3 pk
1585 1.3 pk /*
1586 1.3 pk * Release all complete pages that have not been used recently.
1587 1.184 rmind *
1588 1.197 jym * Must not be called from interrupt context.
1589 1.3 pk */
1590 1.66 thorpej int
1591 1.56 sommerfe pool_reclaim(struct pool *pp)
1592 1.3 pk {
1593 1.3 pk struct pool_item_header *ph, *phnext;
1594 1.61 chs struct pool_pagelist pq;
1595 1.151 yamt uint32_t curtime;
1596 1.134 ad bool klock;
1597 1.134 ad int rv;
1598 1.3 pk
1599 1.197 jym KASSERT(!cpu_intr_p() && !cpu_softintr_p());
1600 1.184 rmind
1601 1.68 thorpej if (pp->pr_drain_hook != NULL) {
1602 1.68 thorpej /*
1603 1.68 thorpej * The drain hook must be called with the pool unlocked.
1604 1.68 thorpej */
1605 1.68 thorpej (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1606 1.68 thorpej }
1607 1.68 thorpej
1608 1.134 ad /*
1609 1.157 ad * XXXSMP Because we do not want to cause non-MPSAFE code
1610 1.157 ad * to block.
1611 1.134 ad */
1612 1.134 ad if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK ||
1613 1.134 ad pp->pr_ipl == IPL_SOFTSERIAL) {
1614 1.134 ad KERNEL_LOCK(1, NULL);
1615 1.134 ad klock = true;
1616 1.134 ad } else
1617 1.134 ad klock = false;
1618 1.134 ad
1619 1.134 ad /* Reclaim items from the pool's cache (if any). */
1620 1.134 ad if (pp->pr_cache != NULL)
1621 1.134 ad pool_cache_invalidate(pp->pr_cache);
1622 1.134 ad
1623 1.134 ad if (mutex_tryenter(&pp->pr_lock) == 0) {
1624 1.134 ad if (klock) {
1625 1.134 ad KERNEL_UNLOCK_ONE(NULL);
1626 1.134 ad }
1627 1.236 maxv return 0;
1628 1.134 ad }
1629 1.68 thorpej
1630 1.88 chs LIST_INIT(&pq);
1631 1.43 thorpej
1632 1.151 yamt curtime = time_uptime;
1633 1.21 thorpej
1634 1.88 chs for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
1635 1.88 chs phnext = LIST_NEXT(ph, ph_pagelist);
1636 1.3 pk
1637 1.3 pk /* Check our minimum page claim */
1638 1.3 pk if (pp->pr_npages <= pp->pr_minpages)
1639 1.3 pk break;
1640 1.3 pk
1641 1.88 chs KASSERT(ph->ph_nmissing == 0);
1642 1.191 para if (curtime - ph->ph_time < pool_inactive_time)
1643 1.88 chs continue;
1644 1.21 thorpej
1645 1.88 chs /*
1646 1.267 chs * If freeing this page would put us below the minimum free items
1647 1.267 chs * or the minimum pages, stop now.
1648 1.88 chs */
1649 1.267 chs if (pp->pr_nitems - pp->pr_itemsperpage < pp->pr_minitems ||
1650 1.267 chs pp->pr_npages - 1 < pp->pr_minpages)
1651 1.88 chs break;
1652 1.21 thorpej
1653 1.88 chs pr_rmpage(pp, ph, &pq);
1654 1.3 pk }
1655 1.3 pk
1656 1.134 ad mutex_exit(&pp->pr_lock);
1657 1.134 ad
1658 1.134 ad if (LIST_EMPTY(&pq))
1659 1.134 ad rv = 0;
1660 1.134 ad else {
1661 1.134 ad pr_pagelist_free(pp, &pq);
1662 1.134 ad rv = 1;
1663 1.134 ad }
1664 1.134 ad
1665 1.134 ad if (klock) {
1666 1.134 ad KERNEL_UNLOCK_ONE(NULL);
1667 1.134 ad }
1668 1.66 thorpej
1669 1.236 maxv return rv;
1670 1.3 pk }
1671 1.3 pk
1672 1.3 pk /*
1673 1.197 jym * Drain pools, one at a time. The drained pool is returned within ppp.
1674 1.131 ad *
1675 1.134 ad * Note, must never be called from interrupt context.
1676 1.3 pk */
1677 1.197 jym bool
1678 1.197 jym pool_drain(struct pool **ppp)
1679 1.3 pk {
1680 1.197 jym bool reclaimed;
1681 1.3 pk struct pool *pp;
1682 1.134 ad
1683 1.145 ad KASSERT(!TAILQ_EMPTY(&pool_head));
1684 1.3 pk
1685 1.61 chs pp = NULL;
1686 1.134 ad
1687 1.134 ad /* Find next pool to drain, and add a reference. */
1688 1.134 ad mutex_enter(&pool_head_lock);
1689 1.134 ad do {
1690 1.134 ad if (drainpp == NULL) {
1691 1.145 ad drainpp = TAILQ_FIRST(&pool_head);
1692 1.134 ad }
1693 1.134 ad if (drainpp != NULL) {
1694 1.134 ad pp = drainpp;
1695 1.145 ad drainpp = TAILQ_NEXT(pp, pr_poollist);
1696 1.134 ad }
1697 1.134 ad /*
1698 1.134 ad * Skip completely idle pools. We depend on at least
1699 1.134 ad * one pool in the system being active.
1700 1.134 ad */
1701 1.134 ad } while (pp == NULL || pp->pr_npages == 0);
1702 1.134 ad pp->pr_refcnt++;
1703 1.134 ad mutex_exit(&pool_head_lock);
1704 1.134 ad
1705 1.134 ad /* Drain the cache (if any) and pool.. */
1706 1.186 pooka reclaimed = pool_reclaim(pp);
1707 1.134 ad
1708 1.134 ad /* Finally, unlock the pool. */
1709 1.134 ad mutex_enter(&pool_head_lock);
1710 1.134 ad pp->pr_refcnt--;
1711 1.134 ad cv_broadcast(&pool_busy);
1712 1.134 ad mutex_exit(&pool_head_lock);
1713 1.186 pooka
1714 1.197 jym if (ppp != NULL)
1715 1.197 jym *ppp = pp;
1716 1.197 jym
1717 1.186 pooka return reclaimed;
1718 1.3 pk }
1719 1.3 pk
1720 1.3 pk /*
1721 1.217 mrg * Calculate the total number of pages consumed by pools.
1722 1.217 mrg */
1723 1.217 mrg int
1724 1.217 mrg pool_totalpages(void)
1725 1.217 mrg {
1726 1.250 skrll
1727 1.250 skrll mutex_enter(&pool_head_lock);
1728 1.250 skrll int pages = pool_totalpages_locked();
1729 1.250 skrll mutex_exit(&pool_head_lock);
1730 1.250 skrll
1731 1.250 skrll return pages;
1732 1.250 skrll }
1733 1.250 skrll
1734 1.250 skrll int
1735 1.250 skrll pool_totalpages_locked(void)
1736 1.250 skrll {
1737 1.217 mrg struct pool *pp;
1738 1.218 mrg uint64_t total = 0;
1739 1.217 mrg
1740 1.218 mrg TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
1741 1.218 mrg uint64_t bytes = pp->pr_npages * pp->pr_alloc->pa_pagesz;
1742 1.218 mrg
1743 1.218 mrg if ((pp->pr_roflags & PR_RECURSIVE) != 0)
1744 1.218 mrg bytes -= (pp->pr_nout * pp->pr_size);
1745 1.218 mrg total += bytes;
1746 1.218 mrg }
1747 1.217 mrg
1748 1.218 mrg return atop(total);
1749 1.217 mrg }
1750 1.217 mrg
1751 1.217 mrg /*
1752 1.3 pk * Diagnostic helpers.
1753 1.3 pk */
1754 1.21 thorpej
1755 1.25 thorpej void
1756 1.108 yamt pool_printall(const char *modif, void (*pr)(const char *, ...))
1757 1.108 yamt {
1758 1.108 yamt struct pool *pp;
1759 1.108 yamt
1760 1.145 ad TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
1761 1.108 yamt pool_printit(pp, modif, pr);
1762 1.108 yamt }
1763 1.108 yamt }
1764 1.108 yamt
1765 1.108 yamt void
1766 1.42 thorpej pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1767 1.25 thorpej {
1768 1.25 thorpej
1769 1.25 thorpej if (pp == NULL) {
1770 1.25 thorpej (*pr)("Must specify a pool to print.\n");
1771 1.25 thorpej return;
1772 1.25 thorpej }
1773 1.25 thorpej
1774 1.25 thorpej pool_print1(pp, modif, pr);
1775 1.25 thorpej }
1776 1.25 thorpej
1777 1.21 thorpej static void
1778 1.124 yamt pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
1779 1.97 yamt void (*pr)(const char *, ...))
1780 1.88 chs {
1781 1.88 chs struct pool_item_header *ph;
1782 1.88 chs
1783 1.88 chs LIST_FOREACH(ph, pl, ph_pagelist) {
1784 1.151 yamt (*pr)("\t\tpage %p, nmissing %d, time %" PRIu32 "\n",
1785 1.151 yamt ph->ph_page, ph->ph_nmissing, ph->ph_time);
1786 1.229 maxv #ifdef POOL_CHECK_MAGIC
1787 1.229 maxv struct pool_item *pi;
1788 1.242 maxv if (!(pp->pr_roflags & PR_USEBMAP)) {
1789 1.102 chs LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1790 1.97 yamt if (pi->pi_magic != PI_MAGIC) {
1791 1.97 yamt (*pr)("\t\t\titem %p, magic 0x%x\n",
1792 1.97 yamt pi, pi->pi_magic);
1793 1.97 yamt }
1794 1.88 chs }
1795 1.88 chs }
1796 1.88 chs #endif
1797 1.88 chs }
1798 1.88 chs }
1799 1.88 chs
1800 1.88 chs static void
1801 1.42 thorpej pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1802 1.3 pk {
1803 1.25 thorpej struct pool_item_header *ph;
1804 1.134 ad pool_cache_t pc;
1805 1.134 ad pcg_t *pcg;
1806 1.134 ad pool_cache_cpu_t *cc;
1807 1.134 ad uint64_t cpuhit, cpumiss;
1808 1.44 thorpej int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1809 1.25 thorpej char c;
1810 1.25 thorpej
1811 1.25 thorpej while ((c = *modif++) != '\0') {
1812 1.25 thorpej if (c == 'l')
1813 1.25 thorpej print_log = 1;
1814 1.25 thorpej if (c == 'p')
1815 1.25 thorpej print_pagelist = 1;
1816 1.44 thorpej if (c == 'c')
1817 1.44 thorpej print_cache = 1;
1818 1.25 thorpej }
1819 1.25 thorpej
1820 1.134 ad if ((pc = pp->pr_cache) != NULL) {
1821 1.134 ad (*pr)("POOL CACHE");
1822 1.134 ad } else {
1823 1.134 ad (*pr)("POOL");
1824 1.134 ad }
1825 1.134 ad
1826 1.134 ad (*pr)(" %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1827 1.25 thorpej pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1828 1.25 thorpej pp->pr_roflags);
1829 1.66 thorpej (*pr)("\talloc %p\n", pp->pr_alloc);
1830 1.25 thorpej (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1831 1.25 thorpej pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1832 1.25 thorpej (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1833 1.25 thorpej pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1834 1.25 thorpej
1835 1.134 ad (*pr)("\tnget %lu, nfail %lu, nput %lu\n",
1836 1.25 thorpej pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1837 1.25 thorpej (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1838 1.25 thorpej pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1839 1.25 thorpej
1840 1.25 thorpej if (print_pagelist == 0)
1841 1.25 thorpej goto skip_pagelist;
1842 1.25 thorpej
1843 1.88 chs if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1844 1.88 chs (*pr)("\n\tempty page list:\n");
1845 1.97 yamt pool_print_pagelist(pp, &pp->pr_emptypages, pr);
1846 1.88 chs if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
1847 1.88 chs (*pr)("\n\tfull page list:\n");
1848 1.97 yamt pool_print_pagelist(pp, &pp->pr_fullpages, pr);
1849 1.88 chs if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
1850 1.88 chs (*pr)("\n\tpartial-page list:\n");
1851 1.97 yamt pool_print_pagelist(pp, &pp->pr_partpages, pr);
1852 1.88 chs
1853 1.25 thorpej if (pp->pr_curpage == NULL)
1854 1.25 thorpej (*pr)("\tno current page\n");
1855 1.25 thorpej else
1856 1.25 thorpej (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1857 1.25 thorpej
1858 1.25 thorpej skip_pagelist:
1859 1.25 thorpej if (print_log == 0)
1860 1.25 thorpej goto skip_log;
1861 1.25 thorpej
1862 1.25 thorpej (*pr)("\n");
1863 1.3 pk
1864 1.25 thorpej skip_log:
1865 1.44 thorpej
1866 1.102 chs #define PR_GROUPLIST(pcg) \
1867 1.102 chs (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); \
1868 1.142 ad for (i = 0; i < pcg->pcg_size; i++) { \
1869 1.102 chs if (pcg->pcg_objects[i].pcgo_pa != \
1870 1.102 chs POOL_PADDR_INVALID) { \
1871 1.102 chs (*pr)("\t\t\t%p, 0x%llx\n", \
1872 1.102 chs pcg->pcg_objects[i].pcgo_va, \
1873 1.102 chs (unsigned long long) \
1874 1.102 chs pcg->pcg_objects[i].pcgo_pa); \
1875 1.102 chs } else { \
1876 1.102 chs (*pr)("\t\t\t%p\n", \
1877 1.102 chs pcg->pcg_objects[i].pcgo_va); \
1878 1.102 chs } \
1879 1.102 chs }
1880 1.102 chs
1881 1.134 ad if (pc != NULL) {
1882 1.134 ad cpuhit = 0;
1883 1.134 ad cpumiss = 0;
1884 1.183 ad for (i = 0; i < __arraycount(pc->pc_cpus); i++) {
1885 1.134 ad if ((cc = pc->pc_cpus[i]) == NULL)
1886 1.134 ad continue;
1887 1.134 ad cpuhit += cc->cc_hits;
1888 1.134 ad cpumiss += cc->cc_misses;
1889 1.134 ad }
1890 1.134 ad (*pr)("\tcpu layer hits %llu misses %llu\n", cpuhit, cpumiss);
1891 1.134 ad (*pr)("\tcache layer hits %llu misses %llu\n",
1892 1.134 ad pc->pc_hits, pc->pc_misses);
1893 1.134 ad (*pr)("\tcache layer entry uncontended %llu contended %llu\n",
1894 1.134 ad pc->pc_hits + pc->pc_misses - pc->pc_contended,
1895 1.134 ad pc->pc_contended);
1896 1.134 ad (*pr)("\tcache layer empty groups %u full groups %u\n",
1897 1.134 ad pc->pc_nempty, pc->pc_nfull);
1898 1.134 ad if (print_cache) {
1899 1.134 ad (*pr)("\tfull cache groups:\n");
1900 1.134 ad for (pcg = pc->pc_fullgroups; pcg != NULL;
1901 1.134 ad pcg = pcg->pcg_next) {
1902 1.134 ad PR_GROUPLIST(pcg);
1903 1.134 ad }
1904 1.134 ad (*pr)("\tempty cache groups:\n");
1905 1.134 ad for (pcg = pc->pc_emptygroups; pcg != NULL;
1906 1.134 ad pcg = pcg->pcg_next) {
1907 1.134 ad PR_GROUPLIST(pcg);
1908 1.134 ad }
1909 1.103 chs }
1910 1.44 thorpej }
1911 1.102 chs #undef PR_GROUPLIST
1912 1.88 chs }
1913 1.88 chs
1914 1.88 chs static int
1915 1.88 chs pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
1916 1.88 chs {
1917 1.88 chs struct pool_item *pi;
1918 1.128 christos void *page;
1919 1.88 chs int n;
1920 1.88 chs
1921 1.121 yamt if ((pp->pr_roflags & PR_NOALIGN) == 0) {
1922 1.253 maxv page = POOL_OBJ_TO_PAGE(pp, ph);
1923 1.121 yamt if (page != ph->ph_page &&
1924 1.121 yamt (pp->pr_roflags & PR_PHINPAGE) != 0) {
1925 1.121 yamt if (label != NULL)
1926 1.121 yamt printf("%s: ", label);
1927 1.121 yamt printf("pool(%p:%s): page inconsistency: page %p;"
1928 1.121 yamt " at page head addr %p (p %p)\n", pp,
1929 1.121 yamt pp->pr_wchan, ph->ph_page,
1930 1.121 yamt ph, page);
1931 1.121 yamt return 1;
1932 1.121 yamt }
1933 1.88 chs }
1934 1.3 pk
1935 1.242 maxv if ((pp->pr_roflags & PR_USEBMAP) != 0)
1936 1.97 yamt return 0;
1937 1.97 yamt
1938 1.102 chs for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0;
1939 1.88 chs pi != NULL;
1940 1.102 chs pi = LIST_NEXT(pi,pi_list), n++) {
1941 1.88 chs
1942 1.229 maxv #ifdef POOL_CHECK_MAGIC
1943 1.88 chs if (pi->pi_magic != PI_MAGIC) {
1944 1.88 chs if (label != NULL)
1945 1.88 chs printf("%s: ", label);
1946 1.88 chs printf("pool(%s): free list modified: magic=%x;"
1947 1.121 yamt " page %p; item ordinal %d; addr %p\n",
1948 1.88 chs pp->pr_wchan, pi->pi_magic, ph->ph_page,
1949 1.121 yamt n, pi);
1950 1.88 chs panic("pool");
1951 1.88 chs }
1952 1.88 chs #endif
1953 1.121 yamt if ((pp->pr_roflags & PR_NOALIGN) != 0) {
1954 1.121 yamt continue;
1955 1.121 yamt }
1956 1.253 maxv page = POOL_OBJ_TO_PAGE(pp, pi);
1957 1.88 chs if (page == ph->ph_page)
1958 1.88 chs continue;
1959 1.88 chs
1960 1.88 chs if (label != NULL)
1961 1.88 chs printf("%s: ", label);
1962 1.88 chs printf("pool(%p:%s): page inconsistency: page %p;"
1963 1.88 chs " item ordinal %d; addr %p (p %p)\n", pp,
1964 1.88 chs pp->pr_wchan, ph->ph_page,
1965 1.88 chs n, pi, page);
1966 1.88 chs return 1;
1967 1.88 chs }
1968 1.88 chs return 0;
1969 1.3 pk }
1970 1.3 pk
1971 1.88 chs
1972 1.3 pk int
1973 1.42 thorpej pool_chk(struct pool *pp, const char *label)
1974 1.3 pk {
1975 1.3 pk struct pool_item_header *ph;
1976 1.3 pk int r = 0;
1977 1.3 pk
1978 1.134 ad mutex_enter(&pp->pr_lock);
1979 1.88 chs LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
1980 1.88 chs r = pool_chk_page(pp, label, ph);
1981 1.88 chs if (r) {
1982 1.88 chs goto out;
1983 1.88 chs }
1984 1.88 chs }
1985 1.88 chs LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
1986 1.88 chs r = pool_chk_page(pp, label, ph);
1987 1.88 chs if (r) {
1988 1.3 pk goto out;
1989 1.3 pk }
1990 1.88 chs }
1991 1.88 chs LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
1992 1.88 chs r = pool_chk_page(pp, label, ph);
1993 1.88 chs if (r) {
1994 1.3 pk goto out;
1995 1.3 pk }
1996 1.3 pk }
1997 1.88 chs
1998 1.3 pk out:
1999 1.134 ad mutex_exit(&pp->pr_lock);
2000 1.236 maxv return r;
2001 1.43 thorpej }
2002 1.43 thorpej
2003 1.43 thorpej /*
2004 1.43 thorpej * pool_cache_init:
2005 1.43 thorpej *
2006 1.43 thorpej * Initialize a pool cache.
2007 1.134 ad */
2008 1.134 ad pool_cache_t
2009 1.134 ad pool_cache_init(size_t size, u_int align, u_int align_offset, u_int flags,
2010 1.134 ad const char *wchan, struct pool_allocator *palloc, int ipl,
2011 1.134 ad int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), void *arg)
2012 1.134 ad {
2013 1.134 ad pool_cache_t pc;
2014 1.134 ad
2015 1.134 ad pc = pool_get(&cache_pool, PR_WAITOK);
2016 1.134 ad if (pc == NULL)
2017 1.134 ad return NULL;
2018 1.134 ad
2019 1.134 ad pool_cache_bootstrap(pc, size, align, align_offset, flags, wchan,
2020 1.134 ad palloc, ipl, ctor, dtor, arg);
2021 1.134 ad
2022 1.134 ad return pc;
2023 1.134 ad }
2024 1.134 ad
2025 1.134 ad /*
2026 1.134 ad * pool_cache_bootstrap:
2027 1.43 thorpej *
2028 1.134 ad * Kernel-private version of pool_cache_init(). The caller
2029 1.134 ad * provides initial storage.
2030 1.43 thorpej */
2031 1.43 thorpej void
2032 1.134 ad pool_cache_bootstrap(pool_cache_t pc, size_t size, u_int align,
2033 1.134 ad u_int align_offset, u_int flags, const char *wchan,
2034 1.134 ad struct pool_allocator *palloc, int ipl,
2035 1.134 ad int (*ctor)(void *, void *, int), void (*dtor)(void *, void *),
2036 1.43 thorpej void *arg)
2037 1.43 thorpej {
2038 1.134 ad CPU_INFO_ITERATOR cii;
2039 1.145 ad pool_cache_t pc1;
2040 1.134 ad struct cpu_info *ci;
2041 1.134 ad struct pool *pp;
2042 1.134 ad
2043 1.134 ad pp = &pc->pc_pool;
2044 1.208 chs if (palloc == NULL && ipl == IPL_NONE) {
2045 1.208 chs if (size > PAGE_SIZE) {
2046 1.208 chs int bigidx = pool_bigidx(size);
2047 1.208 chs
2048 1.208 chs palloc = &pool_allocator_big[bigidx];
2049 1.252 maxv flags |= PR_NOALIGN;
2050 1.208 chs } else
2051 1.208 chs palloc = &pool_allocator_nointr;
2052 1.208 chs }
2053 1.134 ad pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl);
2054 1.157 ad mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl);
2055 1.43 thorpej
2056 1.134 ad if (ctor == NULL) {
2057 1.261 christos ctor = NO_CTOR;
2058 1.134 ad }
2059 1.134 ad if (dtor == NULL) {
2060 1.261 christos dtor = NO_DTOR;
2061 1.134 ad }
2062 1.43 thorpej
2063 1.134 ad pc->pc_emptygroups = NULL;
2064 1.134 ad pc->pc_fullgroups = NULL;
2065 1.134 ad pc->pc_partgroups = NULL;
2066 1.43 thorpej pc->pc_ctor = ctor;
2067 1.43 thorpej pc->pc_dtor = dtor;
2068 1.43 thorpej pc->pc_arg = arg;
2069 1.134 ad pc->pc_hits = 0;
2070 1.48 thorpej pc->pc_misses = 0;
2071 1.134 ad pc->pc_nempty = 0;
2072 1.134 ad pc->pc_npart = 0;
2073 1.134 ad pc->pc_nfull = 0;
2074 1.134 ad pc->pc_contended = 0;
2075 1.134 ad pc->pc_refcnt = 0;
2076 1.136 yamt pc->pc_freecheck = NULL;
2077 1.134 ad
2078 1.142 ad if ((flags & PR_LARGECACHE) != 0) {
2079 1.142 ad pc->pc_pcgsize = PCG_NOBJECTS_LARGE;
2080 1.163 ad pc->pc_pcgpool = &pcg_large_pool;
2081 1.142 ad } else {
2082 1.142 ad pc->pc_pcgsize = PCG_NOBJECTS_NORMAL;
2083 1.163 ad pc->pc_pcgpool = &pcg_normal_pool;
2084 1.142 ad }
2085 1.142 ad
2086 1.134 ad /* Allocate per-CPU caches. */
2087 1.134 ad memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus));
2088 1.134 ad pc->pc_ncpu = 0;
2089 1.139 ad if (ncpu < 2) {
2090 1.137 ad /* XXX For sparc: boot CPU is not attached yet. */
2091 1.137 ad pool_cache_cpu_init1(curcpu(), pc);
2092 1.137 ad } else {
2093 1.137 ad for (CPU_INFO_FOREACH(cii, ci)) {
2094 1.137 ad pool_cache_cpu_init1(ci, pc);
2095 1.137 ad }
2096 1.134 ad }
2097 1.145 ad
2098 1.145 ad /* Add to list of all pools. */
2099 1.145 ad if (__predict_true(!cold))
2100 1.134 ad mutex_enter(&pool_head_lock);
2101 1.145 ad TAILQ_FOREACH(pc1, &pool_cache_head, pc_cachelist) {
2102 1.145 ad if (strcmp(pc1->pc_pool.pr_wchan, pc->pc_pool.pr_wchan) > 0)
2103 1.145 ad break;
2104 1.145 ad }
2105 1.145 ad if (pc1 == NULL)
2106 1.145 ad TAILQ_INSERT_TAIL(&pool_cache_head, pc, pc_cachelist);
2107 1.145 ad else
2108 1.145 ad TAILQ_INSERT_BEFORE(pc1, pc, pc_cachelist);
2109 1.145 ad if (__predict_true(!cold))
2110 1.134 ad mutex_exit(&pool_head_lock);
2111 1.145 ad
2112 1.145 ad membar_sync();
2113 1.145 ad pp->pr_cache = pc;
2114 1.43 thorpej }
2115 1.43 thorpej
2116 1.43 thorpej /*
2117 1.43 thorpej * pool_cache_destroy:
2118 1.43 thorpej *
2119 1.43 thorpej * Destroy a pool cache.
2120 1.43 thorpej */
2121 1.43 thorpej void
2122 1.134 ad pool_cache_destroy(pool_cache_t pc)
2123 1.43 thorpej {
2124 1.191 para
2125 1.191 para pool_cache_bootstrap_destroy(pc);
2126 1.191 para pool_put(&cache_pool, pc);
2127 1.191 para }
2128 1.191 para
2129 1.191 para /*
2130 1.191 para * pool_cache_bootstrap_destroy:
2131 1.191 para *
2132 1.191 para * Destroy a pool cache.
2133 1.191 para */
2134 1.191 para void
2135 1.191 para pool_cache_bootstrap_destroy(pool_cache_t pc)
2136 1.191 para {
2137 1.134 ad struct pool *pp = &pc->pc_pool;
2138 1.175 jym u_int i;
2139 1.134 ad
2140 1.134 ad /* Remove it from the global list. */
2141 1.134 ad mutex_enter(&pool_head_lock);
2142 1.134 ad while (pc->pc_refcnt != 0)
2143 1.134 ad cv_wait(&pool_busy, &pool_head_lock);
2144 1.145 ad TAILQ_REMOVE(&pool_cache_head, pc, pc_cachelist);
2145 1.134 ad mutex_exit(&pool_head_lock);
2146 1.43 thorpej
2147 1.43 thorpej /* First, invalidate the entire cache. */
2148 1.43 thorpej pool_cache_invalidate(pc);
2149 1.43 thorpej
2150 1.134 ad /* Disassociate it from the pool. */
2151 1.134 ad mutex_enter(&pp->pr_lock);
2152 1.134 ad pp->pr_cache = NULL;
2153 1.134 ad mutex_exit(&pp->pr_lock);
2154 1.134 ad
2155 1.134 ad /* Destroy per-CPU data */
2156 1.183 ad for (i = 0; i < __arraycount(pc->pc_cpus); i++)
2157 1.175 jym pool_cache_invalidate_cpu(pc, i);
2158 1.134 ad
2159 1.134 ad /* Finally, destroy it. */
2160 1.134 ad mutex_destroy(&pc->pc_lock);
2161 1.134 ad pool_destroy(pp);
2162 1.134 ad }
2163 1.134 ad
2164 1.134 ad /*
2165 1.134 ad * pool_cache_cpu_init1:
2166 1.134 ad *
2167 1.134 ad * Called for each pool_cache whenever a new CPU is attached.
2168 1.134 ad */
2169 1.134 ad static void
2170 1.134 ad pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc)
2171 1.134 ad {
2172 1.134 ad pool_cache_cpu_t *cc;
2173 1.137 ad int index;
2174 1.134 ad
2175 1.137 ad index = ci->ci_index;
2176 1.137 ad
2177 1.183 ad KASSERT(index < __arraycount(pc->pc_cpus));
2178 1.134 ad
2179 1.137 ad if ((cc = pc->pc_cpus[index]) != NULL) {
2180 1.137 ad KASSERT(cc->cc_cpuindex == index);
2181 1.134 ad return;
2182 1.134 ad }
2183 1.134 ad
2184 1.134 ad /*
2185 1.134 ad * The first CPU is 'free'. This needs to be the case for
2186 1.134 ad * bootstrap - we may not be able to allocate yet.
2187 1.134 ad */
2188 1.134 ad if (pc->pc_ncpu == 0) {
2189 1.134 ad cc = &pc->pc_cpu0;
2190 1.134 ad pc->pc_ncpu = 1;
2191 1.134 ad } else {
2192 1.134 ad mutex_enter(&pc->pc_lock);
2193 1.134 ad pc->pc_ncpu++;
2194 1.134 ad mutex_exit(&pc->pc_lock);
2195 1.134 ad cc = pool_get(&cache_cpu_pool, PR_WAITOK);
2196 1.134 ad }
2197 1.134 ad
2198 1.134 ad cc->cc_ipl = pc->pc_pool.pr_ipl;
2199 1.134 ad cc->cc_iplcookie = makeiplcookie(cc->cc_ipl);
2200 1.134 ad cc->cc_cache = pc;
2201 1.137 ad cc->cc_cpuindex = index;
2202 1.134 ad cc->cc_hits = 0;
2203 1.134 ad cc->cc_misses = 0;
2204 1.169 yamt cc->cc_current = __UNCONST(&pcg_dummy);
2205 1.169 yamt cc->cc_previous = __UNCONST(&pcg_dummy);
2206 1.134 ad
2207 1.137 ad pc->pc_cpus[index] = cc;
2208 1.43 thorpej }
2209 1.43 thorpej
2210 1.134 ad /*
2211 1.134 ad * pool_cache_cpu_init:
2212 1.134 ad *
2213 1.134 ad * Called whenever a new CPU is attached.
2214 1.134 ad */
2215 1.134 ad void
2216 1.134 ad pool_cache_cpu_init(struct cpu_info *ci)
2217 1.43 thorpej {
2218 1.134 ad pool_cache_t pc;
2219 1.134 ad
2220 1.134 ad mutex_enter(&pool_head_lock);
2221 1.145 ad TAILQ_FOREACH(pc, &pool_cache_head, pc_cachelist) {
2222 1.134 ad pc->pc_refcnt++;
2223 1.134 ad mutex_exit(&pool_head_lock);
2224 1.43 thorpej
2225 1.134 ad pool_cache_cpu_init1(ci, pc);
2226 1.43 thorpej
2227 1.134 ad mutex_enter(&pool_head_lock);
2228 1.134 ad pc->pc_refcnt--;
2229 1.134 ad cv_broadcast(&pool_busy);
2230 1.134 ad }
2231 1.134 ad mutex_exit(&pool_head_lock);
2232 1.43 thorpej }
2233 1.43 thorpej
2234 1.134 ad /*
2235 1.134 ad * pool_cache_reclaim:
2236 1.134 ad *
2237 1.134 ad * Reclaim memory from a pool cache.
2238 1.134 ad */
2239 1.134 ad bool
2240 1.134 ad pool_cache_reclaim(pool_cache_t pc)
2241 1.43 thorpej {
2242 1.43 thorpej
2243 1.134 ad return pool_reclaim(&pc->pc_pool);
2244 1.134 ad }
2245 1.43 thorpej
2246 1.136 yamt static void
2247 1.136 yamt pool_cache_destruct_object1(pool_cache_t pc, void *object)
2248 1.136 yamt {
2249 1.136 yamt (*pc->pc_dtor)(pc->pc_arg, object);
2250 1.136 yamt pool_put(&pc->pc_pool, object);
2251 1.136 yamt }
2252 1.136 yamt
2253 1.134 ad /*
2254 1.134 ad * pool_cache_destruct_object:
2255 1.134 ad *
2256 1.134 ad * Force destruction of an object and its release back into
2257 1.134 ad * the pool.
2258 1.134 ad */
2259 1.134 ad void
2260 1.134 ad pool_cache_destruct_object(pool_cache_t pc, void *object)
2261 1.134 ad {
2262 1.134 ad
2263 1.136 yamt FREECHECK_IN(&pc->pc_freecheck, object);
2264 1.136 yamt
2265 1.136 yamt pool_cache_destruct_object1(pc, object);
2266 1.43 thorpej }
2267 1.43 thorpej
2268 1.134 ad /*
2269 1.134 ad * pool_cache_invalidate_groups:
2270 1.134 ad *
2271 1.134 ad * Invalidate a chain of groups and destruct all objects.
2272 1.134 ad */
2273 1.102 chs static void
2274 1.134 ad pool_cache_invalidate_groups(pool_cache_t pc, pcg_t *pcg)
2275 1.102 chs {
2276 1.134 ad void *object;
2277 1.134 ad pcg_t *next;
2278 1.134 ad int i;
2279 1.134 ad
2280 1.134 ad for (; pcg != NULL; pcg = next) {
2281 1.134 ad next = pcg->pcg_next;
2282 1.134 ad
2283 1.134 ad for (i = 0; i < pcg->pcg_avail; i++) {
2284 1.134 ad object = pcg->pcg_objects[i].pcgo_va;
2285 1.136 yamt pool_cache_destruct_object1(pc, object);
2286 1.134 ad }
2287 1.102 chs
2288 1.142 ad if (pcg->pcg_size == PCG_NOBJECTS_LARGE) {
2289 1.142 ad pool_put(&pcg_large_pool, pcg);
2290 1.142 ad } else {
2291 1.142 ad KASSERT(pcg->pcg_size == PCG_NOBJECTS_NORMAL);
2292 1.142 ad pool_put(&pcg_normal_pool, pcg);
2293 1.142 ad }
2294 1.102 chs }
2295 1.102 chs }
2296 1.102 chs
2297 1.43 thorpej /*
2298 1.134 ad * pool_cache_invalidate:
2299 1.43 thorpej *
2300 1.134 ad * Invalidate a pool cache (destruct and release all of the
2301 1.134 ad * cached objects). Does not reclaim objects from the pool.
2302 1.176 thorpej *
2303 1.176 thorpej * Note: For pool caches that provide constructed objects, there
2304 1.176 thorpej * is an assumption that another level of synchronization is occurring
2305 1.176 thorpej * between the input to the constructor and the cache invalidation.
2306 1.196 jym *
2307 1.196 jym * Invalidation is a costly process and should not be called from
2308 1.196 jym * interrupt context.
2309 1.43 thorpej */
2310 1.134 ad void
2311 1.134 ad pool_cache_invalidate(pool_cache_t pc)
2312 1.134 ad {
2313 1.196 jym uint64_t where;
2314 1.134 ad pcg_t *full, *empty, *part;
2315 1.196 jym
2316 1.196 jym KASSERT(!cpu_intr_p() && !cpu_softintr_p());
2317 1.176 thorpej
2318 1.177 jym if (ncpu < 2 || !mp_online) {
2319 1.176 thorpej /*
2320 1.176 thorpej * We might be called early enough in the boot process
2321 1.176 thorpej * for the CPU data structures to not be fully initialized.
2322 1.196 jym * In this case, transfer the content of the local CPU's
2323 1.196 jym * cache back into global cache as only this CPU is currently
2324 1.196 jym * running.
2325 1.176 thorpej */
2326 1.196 jym pool_cache_transfer(pc);
2327 1.176 thorpej } else {
2328 1.176 thorpej /*
2329 1.196 jym * Signal all CPUs that they must transfer their local
2330 1.196 jym * cache back to the global pool then wait for the xcall to
2331 1.196 jym * complete.
2332 1.176 thorpej */
2333 1.261 christos where = xc_broadcast(0,
2334 1.261 christos __FPTRCAST(xcfunc_t, pool_cache_transfer), pc, NULL);
2335 1.176 thorpej xc_wait(where);
2336 1.176 thorpej }
2337 1.196 jym
2338 1.196 jym /* Empty pool caches, then invalidate objects */
2339 1.134 ad mutex_enter(&pc->pc_lock);
2340 1.134 ad full = pc->pc_fullgroups;
2341 1.134 ad empty = pc->pc_emptygroups;
2342 1.134 ad part = pc->pc_partgroups;
2343 1.134 ad pc->pc_fullgroups = NULL;
2344 1.134 ad pc->pc_emptygroups = NULL;
2345 1.134 ad pc->pc_partgroups = NULL;
2346 1.134 ad pc->pc_nfull = 0;
2347 1.134 ad pc->pc_nempty = 0;
2348 1.134 ad pc->pc_npart = 0;
2349 1.134 ad mutex_exit(&pc->pc_lock);
2350 1.134 ad
2351 1.134 ad pool_cache_invalidate_groups(pc, full);
2352 1.134 ad pool_cache_invalidate_groups(pc, empty);
2353 1.134 ad pool_cache_invalidate_groups(pc, part);
2354 1.134 ad }
2355 1.134 ad
2356 1.175 jym /*
2357 1.175 jym * pool_cache_invalidate_cpu:
2358 1.175 jym *
2359 1.175 jym * Invalidate all CPU-bound cached objects in pool cache, the CPU being
2360 1.175 jym * identified by its associated index.
2361 1.175 jym * It is caller's responsibility to ensure that no operation is
2362 1.175 jym * taking place on this pool cache while doing this invalidation.
2363 1.175 jym * WARNING: as no inter-CPU locking is enforced, trying to invalidate
2364 1.175 jym * pool cached objects from a CPU different from the one currently running
2365 1.175 jym * may result in an undefined behaviour.
2366 1.175 jym */
2367 1.175 jym static void
2368 1.175 jym pool_cache_invalidate_cpu(pool_cache_t pc, u_int index)
2369 1.175 jym {
2370 1.175 jym pool_cache_cpu_t *cc;
2371 1.175 jym pcg_t *pcg;
2372 1.175 jym
2373 1.175 jym if ((cc = pc->pc_cpus[index]) == NULL)
2374 1.175 jym return;
2375 1.175 jym
2376 1.175 jym if ((pcg = cc->cc_current) != &pcg_dummy) {
2377 1.175 jym pcg->pcg_next = NULL;
2378 1.175 jym pool_cache_invalidate_groups(pc, pcg);
2379 1.175 jym }
2380 1.175 jym if ((pcg = cc->cc_previous) != &pcg_dummy) {
2381 1.175 jym pcg->pcg_next = NULL;
2382 1.175 jym pool_cache_invalidate_groups(pc, pcg);
2383 1.175 jym }
2384 1.175 jym if (cc != &pc->pc_cpu0)
2385 1.175 jym pool_put(&cache_cpu_pool, cc);
2386 1.175 jym
2387 1.175 jym }
2388 1.175 jym
2389 1.134 ad void
2390 1.134 ad pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg)
2391 1.134 ad {
2392 1.134 ad
2393 1.134 ad pool_set_drain_hook(&pc->pc_pool, fn, arg);
2394 1.134 ad }
2395 1.134 ad
2396 1.134 ad void
2397 1.134 ad pool_cache_setlowat(pool_cache_t pc, int n)
2398 1.134 ad {
2399 1.134 ad
2400 1.134 ad pool_setlowat(&pc->pc_pool, n);
2401 1.134 ad }
2402 1.134 ad
2403 1.134 ad void
2404 1.134 ad pool_cache_sethiwat(pool_cache_t pc, int n)
2405 1.134 ad {
2406 1.134 ad
2407 1.134 ad pool_sethiwat(&pc->pc_pool, n);
2408 1.134 ad }
2409 1.134 ad
2410 1.134 ad void
2411 1.134 ad pool_cache_sethardlimit(pool_cache_t pc, int n, const char *warnmess, int ratecap)
2412 1.134 ad {
2413 1.134 ad
2414 1.134 ad pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap);
2415 1.134 ad }
2416 1.134 ad
2417 1.267 chs void
2418 1.267 chs pool_cache_prime(pool_cache_t pc, int n)
2419 1.267 chs {
2420 1.267 chs
2421 1.267 chs pool_prime(&pc->pc_pool, n);
2422 1.267 chs }
2423 1.267 chs
2424 1.162 ad static bool __noinline
2425 1.162 ad pool_cache_get_slow(pool_cache_cpu_t *cc, int s, void **objectp,
2426 1.134 ad paddr_t *pap, int flags)
2427 1.43 thorpej {
2428 1.134 ad pcg_t *pcg, *cur;
2429 1.134 ad uint64_t ncsw;
2430 1.134 ad pool_cache_t pc;
2431 1.43 thorpej void *object;
2432 1.58 thorpej
2433 1.168 yamt KASSERT(cc->cc_current->pcg_avail == 0);
2434 1.168 yamt KASSERT(cc->cc_previous->pcg_avail == 0);
2435 1.168 yamt
2436 1.134 ad pc = cc->cc_cache;
2437 1.134 ad cc->cc_misses++;
2438 1.43 thorpej
2439 1.134 ad /*
2440 1.134 ad * Nothing was available locally. Try and grab a group
2441 1.134 ad * from the cache.
2442 1.134 ad */
2443 1.162 ad if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {
2444 1.134 ad ncsw = curlwp->l_ncsw;
2445 1.263 riastrad __insn_barrier();
2446 1.134 ad mutex_enter(&pc->pc_lock);
2447 1.134 ad pc->pc_contended++;
2448 1.43 thorpej
2449 1.134 ad /*
2450 1.134 ad * If we context switched while locking, then
2451 1.134 ad * our view of the per-CPU data is invalid:
2452 1.134 ad * retry.
2453 1.134 ad */
2454 1.263 riastrad __insn_barrier();
2455 1.134 ad if (curlwp->l_ncsw != ncsw) {
2456 1.134 ad mutex_exit(&pc->pc_lock);
2457 1.162 ad return true;
2458 1.43 thorpej }
2459 1.102 chs }
2460 1.43 thorpej
2461 1.162 ad if (__predict_true((pcg = pc->pc_fullgroups) != NULL)) {
2462 1.43 thorpej /*
2463 1.134 ad * If there's a full group, release our empty
2464 1.134 ad * group back to the cache. Install the full
2465 1.134 ad * group as cc_current and return.
2466 1.43 thorpej */
2467 1.162 ad if (__predict_true((cur = cc->cc_current) != &pcg_dummy)) {
2468 1.134 ad KASSERT(cur->pcg_avail == 0);
2469 1.134 ad cur->pcg_next = pc->pc_emptygroups;
2470 1.134 ad pc->pc_emptygroups = cur;
2471 1.134 ad pc->pc_nempty++;
2472 1.87 thorpej }
2473 1.142 ad KASSERT(pcg->pcg_avail == pcg->pcg_size);
2474 1.134 ad cc->cc_current = pcg;
2475 1.134 ad pc->pc_fullgroups = pcg->pcg_next;
2476 1.134 ad pc->pc_hits++;
2477 1.134 ad pc->pc_nfull--;
2478 1.134 ad mutex_exit(&pc->pc_lock);
2479 1.162 ad return true;
2480 1.134 ad }
2481 1.134 ad
2482 1.134 ad /*
2483 1.134 ad * Nothing available locally or in cache. Take the slow
2484 1.134 ad * path: fetch a new object from the pool and construct
2485 1.134 ad * it.
2486 1.134 ad */
2487 1.134 ad pc->pc_misses++;
2488 1.134 ad mutex_exit(&pc->pc_lock);
2489 1.162 ad splx(s);
2490 1.134 ad
2491 1.134 ad object = pool_get(&pc->pc_pool, flags);
2492 1.134 ad *objectp = object;
2493 1.211 riastrad if (__predict_false(object == NULL)) {
2494 1.265 chs KASSERT((flags & (PR_NOWAIT|PR_LIMITFAIL)) != 0);
2495 1.162 ad return false;
2496 1.211 riastrad }
2497 1.125 ad
2498 1.162 ad if (__predict_false((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0)) {
2499 1.134 ad pool_put(&pc->pc_pool, object);
2500 1.134 ad *objectp = NULL;
2501 1.162 ad return false;
2502 1.43 thorpej }
2503 1.43 thorpej
2504 1.238 maxv KASSERT((((vaddr_t)object) & (pc->pc_pool.pr_align - 1)) == 0);
2505 1.43 thorpej
2506 1.134 ad if (pap != NULL) {
2507 1.134 ad #ifdef POOL_VTOPHYS
2508 1.134 ad *pap = POOL_VTOPHYS(object);
2509 1.134 ad #else
2510 1.134 ad *pap = POOL_PADDR_INVALID;
2511 1.134 ad #endif
2512 1.102 chs }
2513 1.43 thorpej
2514 1.125 ad FREECHECK_OUT(&pc->pc_freecheck, object);
2515 1.162 ad return false;
2516 1.43 thorpej }
2517 1.43 thorpej
2518 1.43 thorpej /*
2519 1.134 ad * pool_cache_get{,_paddr}:
2520 1.43 thorpej *
2521 1.134 ad * Get an object from a pool cache (optionally returning
2522 1.134 ad * the physical address of the object).
2523 1.43 thorpej */
2524 1.134 ad void *
2525 1.134 ad pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap)
2526 1.43 thorpej {
2527 1.134 ad pool_cache_cpu_t *cc;
2528 1.134 ad pcg_t *pcg;
2529 1.134 ad void *object;
2530 1.60 thorpej int s;
2531 1.43 thorpej
2532 1.215 christos KASSERT(!(flags & PR_NOWAIT) != !(flags & PR_WAITOK));
2533 1.184 rmind KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()) ||
2534 1.185 rmind (pc->pc_pool.pr_ipl != IPL_NONE || cold || panicstr != NULL),
2535 1.213 christos "%s: [%s] is IPL_NONE, but called from interrupt context",
2536 1.213 christos __func__, pc->pc_pool.pr_wchan);
2537 1.184 rmind
2538 1.155 ad if (flags & PR_WAITOK) {
2539 1.154 yamt ASSERT_SLEEPABLE();
2540 1.155 ad }
2541 1.125 ad
2542 1.270 maxv if (flags & PR_NOWAIT) {
2543 1.270 maxv if (fault_inject())
2544 1.270 maxv return NULL;
2545 1.270 maxv }
2546 1.270 maxv
2547 1.162 ad /* Lock out interrupts and disable preemption. */
2548 1.162 ad s = splvm();
2549 1.165 yamt while (/* CONSTCOND */ true) {
2550 1.134 ad /* Try and allocate an object from the current group. */
2551 1.162 ad cc = pc->pc_cpus[curcpu()->ci_index];
2552 1.162 ad KASSERT(cc->cc_cache == pc);
2553 1.134 ad pcg = cc->cc_current;
2554 1.162 ad if (__predict_true(pcg->pcg_avail > 0)) {
2555 1.134 ad object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va;
2556 1.162 ad if (__predict_false(pap != NULL))
2557 1.134 ad *pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa;
2558 1.148 yamt #if defined(DIAGNOSTIC)
2559 1.134 ad pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL;
2560 1.163 ad KASSERT(pcg->pcg_avail < pcg->pcg_size);
2561 1.134 ad KASSERT(object != NULL);
2562 1.163 ad #endif
2563 1.134 ad cc->cc_hits++;
2564 1.162 ad splx(s);
2565 1.134 ad FREECHECK_OUT(&pc->pc_freecheck, object);
2566 1.204 maxv pool_redzone_fill(&pc->pc_pool, object);
2567 1.262 maxv pool_cache_get_kmsan(pc, object);
2568 1.134 ad return object;
2569 1.43 thorpej }
2570 1.43 thorpej
2571 1.43 thorpej /*
2572 1.134 ad * That failed. If the previous group isn't empty, swap
2573 1.134 ad * it with the current group and allocate from there.
2574 1.43 thorpej */
2575 1.134 ad pcg = cc->cc_previous;
2576 1.162 ad if (__predict_true(pcg->pcg_avail > 0)) {
2577 1.134 ad cc->cc_previous = cc->cc_current;
2578 1.134 ad cc->cc_current = pcg;
2579 1.134 ad continue;
2580 1.43 thorpej }
2581 1.43 thorpej
2582 1.134 ad /*
2583 1.134 ad * Can't allocate from either group: try the slow path.
2584 1.134 ad * If get_slow() allocated an object for us, or if
2585 1.162 ad * no more objects are available, it will return false.
2586 1.134 ad * Otherwise, we need to retry.
2587 1.134 ad */
2588 1.269 maxv if (!pool_cache_get_slow(cc, s, &object, pap, flags)) {
2589 1.269 maxv if (object != NULL) {
2590 1.269 maxv kmsan_orig(object, pc->pc_pool.pr_size,
2591 1.269 maxv KMSAN_TYPE_POOL, __RET_ADDR);
2592 1.269 maxv }
2593 1.165 yamt break;
2594 1.269 maxv }
2595 1.165 yamt }
2596 1.43 thorpej
2597 1.211 riastrad /*
2598 1.211 riastrad * We would like to KASSERT(object || (flags & PR_NOWAIT)), but
2599 1.211 riastrad * pool_cache_get can fail even in the PR_WAITOK case, if the
2600 1.211 riastrad * constructor fails.
2601 1.211 riastrad */
2602 1.134 ad return object;
2603 1.51 thorpej }
2604 1.51 thorpej
2605 1.162 ad static bool __noinline
2606 1.162 ad pool_cache_put_slow(pool_cache_cpu_t *cc, int s, void *object)
2607 1.51 thorpej {
2608 1.200 pooka struct lwp *l = curlwp;
2609 1.163 ad pcg_t *pcg, *cur;
2610 1.134 ad uint64_t ncsw;
2611 1.134 ad pool_cache_t pc;
2612 1.51 thorpej
2613 1.168 yamt KASSERT(cc->cc_current->pcg_avail == cc->cc_current->pcg_size);
2614 1.168 yamt KASSERT(cc->cc_previous->pcg_avail == cc->cc_previous->pcg_size);
2615 1.168 yamt
2616 1.134 ad pc = cc->cc_cache;
2617 1.171 ad pcg = NULL;
2618 1.134 ad cc->cc_misses++;
2619 1.200 pooka ncsw = l->l_ncsw;
2620 1.263 riastrad __insn_barrier();
2621 1.43 thorpej
2622 1.171 ad /*
2623 1.171 ad * If there are no empty groups in the cache then allocate one
2624 1.171 ad * while still unlocked.
2625 1.171 ad */
2626 1.171 ad if (__predict_false(pc->pc_emptygroups == NULL)) {
2627 1.171 ad if (__predict_true(!pool_cache_disable)) {
2628 1.171 ad pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT);
2629 1.171 ad }
2630 1.200 pooka /*
2631 1.200 pooka * If pool_get() blocked, then our view of
2632 1.200 pooka * the per-CPU data is invalid: retry.
2633 1.200 pooka */
2634 1.263 riastrad __insn_barrier();
2635 1.200 pooka if (__predict_false(l->l_ncsw != ncsw)) {
2636 1.200 pooka if (pcg != NULL) {
2637 1.200 pooka pool_put(pc->pc_pcgpool, pcg);
2638 1.200 pooka }
2639 1.200 pooka return true;
2640 1.200 pooka }
2641 1.171 ad if (__predict_true(pcg != NULL)) {
2642 1.171 ad pcg->pcg_avail = 0;
2643 1.171 ad pcg->pcg_size = pc->pc_pcgsize;
2644 1.171 ad }
2645 1.171 ad }
2646 1.171 ad
2647 1.162 ad /* Lock the cache. */
2648 1.162 ad if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {
2649 1.134 ad mutex_enter(&pc->pc_lock);
2650 1.134 ad pc->pc_contended++;
2651 1.162 ad
2652 1.163 ad /*
2653 1.163 ad * If we context switched while locking, then our view of
2654 1.163 ad * the per-CPU data is invalid: retry.
2655 1.163 ad */
2656 1.263 riastrad __insn_barrier();
2657 1.200 pooka if (__predict_false(l->l_ncsw != ncsw)) {
2658 1.163 ad mutex_exit(&pc->pc_lock);
2659 1.171 ad if (pcg != NULL) {
2660 1.171 ad pool_put(pc->pc_pcgpool, pcg);
2661 1.171 ad }
2662 1.163 ad return true;
2663 1.163 ad }
2664 1.162 ad }
2665 1.102 chs
2666 1.163 ad /* If there are no empty groups in the cache then allocate one. */
2667 1.171 ad if (pcg == NULL && pc->pc_emptygroups != NULL) {
2668 1.171 ad pcg = pc->pc_emptygroups;
2669 1.163 ad pc->pc_emptygroups = pcg->pcg_next;
2670 1.163 ad pc->pc_nempty--;
2671 1.134 ad }
2672 1.130 ad
2673 1.162 ad /*
2674 1.162 ad * If there's a empty group, release our full group back
2675 1.162 ad * to the cache. Install the empty group to the local CPU
2676 1.162 ad * and return.
2677 1.162 ad */
2678 1.163 ad if (pcg != NULL) {
2679 1.134 ad KASSERT(pcg->pcg_avail == 0);
2680 1.162 ad if (__predict_false(cc->cc_previous == &pcg_dummy)) {
2681 1.146 ad cc->cc_previous = pcg;
2682 1.146 ad } else {
2683 1.162 ad cur = cc->cc_current;
2684 1.162 ad if (__predict_true(cur != &pcg_dummy)) {
2685 1.163 ad KASSERT(cur->pcg_avail == cur->pcg_size);
2686 1.146 ad cur->pcg_next = pc->pc_fullgroups;
2687 1.146 ad pc->pc_fullgroups = cur;
2688 1.146 ad pc->pc_nfull++;
2689 1.146 ad }
2690 1.146 ad cc->cc_current = pcg;
2691 1.146 ad }
2692 1.163 ad pc->pc_hits++;
2693 1.134 ad mutex_exit(&pc->pc_lock);
2694 1.162 ad return true;
2695 1.102 chs }
2696 1.105 christos
2697 1.134 ad /*
2698 1.162 ad * Nothing available locally or in cache, and we didn't
2699 1.162 ad * allocate an empty group. Take the slow path and destroy
2700 1.162 ad * the object here and now.
2701 1.134 ad */
2702 1.134 ad pc->pc_misses++;
2703 1.134 ad mutex_exit(&pc->pc_lock);
2704 1.162 ad splx(s);
2705 1.162 ad pool_cache_destruct_object(pc, object);
2706 1.105 christos
2707 1.162 ad return false;
2708 1.236 maxv }
2709 1.102 chs
2710 1.43 thorpej /*
2711 1.134 ad * pool_cache_put{,_paddr}:
2712 1.43 thorpej *
2713 1.134 ad * Put an object back to the pool cache (optionally caching the
2714 1.134 ad * physical address of the object).
2715 1.43 thorpej */
2716 1.101 thorpej void
2717 1.134 ad pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa)
2718 1.43 thorpej {
2719 1.134 ad pool_cache_cpu_t *cc;
2720 1.134 ad pcg_t *pcg;
2721 1.134 ad int s;
2722 1.101 thorpej
2723 1.172 yamt KASSERT(object != NULL);
2724 1.262 maxv pool_cache_put_kmsan(pc, object);
2725 1.229 maxv pool_cache_redzone_check(pc, object);
2726 1.134 ad FREECHECK_IN(&pc->pc_freecheck, object);
2727 1.101 thorpej
2728 1.253 maxv if (pc->pc_pool.pr_roflags & PR_PHINPAGE) {
2729 1.253 maxv pc_phinpage_check(pc, object);
2730 1.253 maxv }
2731 1.253 maxv
2732 1.268 maxv if (pool_cache_put_nocache(pc, object)) {
2733 1.249 maxv return;
2734 1.249 maxv }
2735 1.249 maxv
2736 1.162 ad /* Lock out interrupts and disable preemption. */
2737 1.162 ad s = splvm();
2738 1.165 yamt while (/* CONSTCOND */ true) {
2739 1.134 ad /* If the current group isn't full, release it there. */
2740 1.162 ad cc = pc->pc_cpus[curcpu()->ci_index];
2741 1.162 ad KASSERT(cc->cc_cache == pc);
2742 1.134 ad pcg = cc->cc_current;
2743 1.162 ad if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) {
2744 1.134 ad pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object;
2745 1.134 ad pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa;
2746 1.134 ad pcg->pcg_avail++;
2747 1.134 ad cc->cc_hits++;
2748 1.162 ad splx(s);
2749 1.134 ad return;
2750 1.134 ad }
2751 1.43 thorpej
2752 1.134 ad /*
2753 1.162 ad * That failed. If the previous group isn't full, swap
2754 1.134 ad * it with the current group and try again.
2755 1.134 ad */
2756 1.134 ad pcg = cc->cc_previous;
2757 1.162 ad if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) {
2758 1.134 ad cc->cc_previous = cc->cc_current;
2759 1.134 ad cc->cc_current = pcg;
2760 1.134 ad continue;
2761 1.134 ad }
2762 1.43 thorpej
2763 1.134 ad /*
2764 1.236 maxv * Can't free to either group: try the slow path.
2765 1.134 ad * If put_slow() releases the object for us, it
2766 1.162 ad * will return false. Otherwise we need to retry.
2767 1.134 ad */
2768 1.165 yamt if (!pool_cache_put_slow(cc, s, object))
2769 1.165 yamt break;
2770 1.165 yamt }
2771 1.43 thorpej }
2772 1.43 thorpej
2773 1.43 thorpej /*
2774 1.196 jym * pool_cache_transfer:
2775 1.43 thorpej *
2776 1.134 ad * Transfer objects from the per-CPU cache to the global cache.
2777 1.134 ad * Run within a cross-call thread.
2778 1.43 thorpej */
2779 1.43 thorpej static void
2780 1.196 jym pool_cache_transfer(pool_cache_t pc)
2781 1.43 thorpej {
2782 1.134 ad pool_cache_cpu_t *cc;
2783 1.134 ad pcg_t *prev, *cur, **list;
2784 1.162 ad int s;
2785 1.134 ad
2786 1.162 ad s = splvm();
2787 1.162 ad mutex_enter(&pc->pc_lock);
2788 1.162 ad cc = pc->pc_cpus[curcpu()->ci_index];
2789 1.134 ad cur = cc->cc_current;
2790 1.169 yamt cc->cc_current = __UNCONST(&pcg_dummy);
2791 1.134 ad prev = cc->cc_previous;
2792 1.169 yamt cc->cc_previous = __UNCONST(&pcg_dummy);
2793 1.162 ad if (cur != &pcg_dummy) {
2794 1.142 ad if (cur->pcg_avail == cur->pcg_size) {
2795 1.134 ad list = &pc->pc_fullgroups;
2796 1.134 ad pc->pc_nfull++;
2797 1.134 ad } else if (cur->pcg_avail == 0) {
2798 1.134 ad list = &pc->pc_emptygroups;
2799 1.134 ad pc->pc_nempty++;
2800 1.134 ad } else {
2801 1.134 ad list = &pc->pc_partgroups;
2802 1.134 ad pc->pc_npart++;
2803 1.134 ad }
2804 1.134 ad cur->pcg_next = *list;
2805 1.134 ad *list = cur;
2806 1.134 ad }
2807 1.162 ad if (prev != &pcg_dummy) {
2808 1.142 ad if (prev->pcg_avail == prev->pcg_size) {
2809 1.134 ad list = &pc->pc_fullgroups;
2810 1.134 ad pc->pc_nfull++;
2811 1.134 ad } else if (prev->pcg_avail == 0) {
2812 1.134 ad list = &pc->pc_emptygroups;
2813 1.134 ad pc->pc_nempty++;
2814 1.134 ad } else {
2815 1.134 ad list = &pc->pc_partgroups;
2816 1.134 ad pc->pc_npart++;
2817 1.134 ad }
2818 1.134 ad prev->pcg_next = *list;
2819 1.134 ad *list = prev;
2820 1.134 ad }
2821 1.134 ad mutex_exit(&pc->pc_lock);
2822 1.134 ad splx(s);
2823 1.3 pk }
2824 1.66 thorpej
2825 1.208 chs static int
2826 1.208 chs pool_bigidx(size_t size)
2827 1.208 chs {
2828 1.208 chs int i;
2829 1.208 chs
2830 1.208 chs for (i = 0; i < __arraycount(pool_allocator_big); i++) {
2831 1.208 chs if (1 << (i + POOL_ALLOCATOR_BIG_BASE) >= size)
2832 1.208 chs return i;
2833 1.208 chs }
2834 1.208 chs panic("pool item size %zu too large, use a custom allocator", size);
2835 1.208 chs }
2836 1.208 chs
2837 1.117 yamt static void *
2838 1.117 yamt pool_allocator_alloc(struct pool *pp, int flags)
2839 1.66 thorpej {
2840 1.117 yamt struct pool_allocator *pa = pp->pr_alloc;
2841 1.66 thorpej void *res;
2842 1.66 thorpej
2843 1.117 yamt res = (*pa->pa_alloc)(pp, flags);
2844 1.117 yamt if (res == NULL && (flags & PR_WAITOK) == 0) {
2845 1.66 thorpej /*
2846 1.117 yamt * We only run the drain hook here if PR_NOWAIT.
2847 1.117 yamt * In other cases, the hook will be run in
2848 1.117 yamt * pool_reclaim().
2849 1.66 thorpej */
2850 1.117 yamt if (pp->pr_drain_hook != NULL) {
2851 1.117 yamt (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
2852 1.117 yamt res = (*pa->pa_alloc)(pp, flags);
2853 1.66 thorpej }
2854 1.117 yamt }
2855 1.117 yamt return res;
2856 1.66 thorpej }
2857 1.66 thorpej
2858 1.117 yamt static void
2859 1.66 thorpej pool_allocator_free(struct pool *pp, void *v)
2860 1.66 thorpej {
2861 1.66 thorpej struct pool_allocator *pa = pp->pr_alloc;
2862 1.66 thorpej
2863 1.229 maxv if (pp->pr_redzone) {
2864 1.248 maxv kasan_mark(v, pa->pa_pagesz, pa->pa_pagesz, 0);
2865 1.229 maxv }
2866 1.66 thorpej (*pa->pa_free)(pp, v);
2867 1.66 thorpej }
2868 1.66 thorpej
2869 1.66 thorpej void *
2870 1.124 yamt pool_page_alloc(struct pool *pp, int flags)
2871 1.66 thorpej {
2872 1.192 rmind const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
2873 1.191 para vmem_addr_t va;
2874 1.192 rmind int ret;
2875 1.191 para
2876 1.192 rmind ret = uvm_km_kmem_alloc(kmem_va_arena, pp->pr_alloc->pa_pagesz,
2877 1.192 rmind vflags | VM_INSTANTFIT, &va);
2878 1.66 thorpej
2879 1.192 rmind return ret ? NULL : (void *)va;
2880 1.66 thorpej }
2881 1.66 thorpej
2882 1.66 thorpej void
2883 1.124 yamt pool_page_free(struct pool *pp, void *v)
2884 1.66 thorpej {
2885 1.66 thorpej
2886 1.191 para uvm_km_kmem_free(kmem_va_arena, (vaddr_t)v, pp->pr_alloc->pa_pagesz);
2887 1.98 yamt }
2888 1.98 yamt
2889 1.98 yamt static void *
2890 1.124 yamt pool_page_alloc_meta(struct pool *pp, int flags)
2891 1.98 yamt {
2892 1.192 rmind const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
2893 1.192 rmind vmem_addr_t va;
2894 1.192 rmind int ret;
2895 1.191 para
2896 1.192 rmind ret = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz,
2897 1.192 rmind vflags | VM_INSTANTFIT, &va);
2898 1.98 yamt
2899 1.192 rmind return ret ? NULL : (void *)va;
2900 1.98 yamt }
2901 1.98 yamt
2902 1.98 yamt static void
2903 1.124 yamt pool_page_free_meta(struct pool *pp, void *v)
2904 1.98 yamt {
2905 1.98 yamt
2906 1.192 rmind vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz);
2907 1.66 thorpej }
2908 1.66 thorpej
2909 1.262 maxv #ifdef KMSAN
2910 1.262 maxv static inline void
2911 1.262 maxv pool_get_kmsan(struct pool *pp, void *p)
2912 1.262 maxv {
2913 1.262 maxv kmsan_orig(p, pp->pr_size, KMSAN_TYPE_POOL, __RET_ADDR);
2914 1.262 maxv kmsan_mark(p, pp->pr_size, KMSAN_STATE_UNINIT);
2915 1.262 maxv }
2916 1.262 maxv
2917 1.262 maxv static inline void
2918 1.262 maxv pool_put_kmsan(struct pool *pp, void *p)
2919 1.262 maxv {
2920 1.262 maxv kmsan_mark(p, pp->pr_size, KMSAN_STATE_INITED);
2921 1.262 maxv }
2922 1.262 maxv
2923 1.262 maxv static inline void
2924 1.262 maxv pool_cache_get_kmsan(pool_cache_t pc, void *p)
2925 1.262 maxv {
2926 1.262 maxv if (__predict_false(pc_has_ctor(pc))) {
2927 1.262 maxv return;
2928 1.262 maxv }
2929 1.262 maxv pool_get_kmsan(&pc->pc_pool, p);
2930 1.262 maxv }
2931 1.262 maxv
2932 1.262 maxv static inline void
2933 1.262 maxv pool_cache_put_kmsan(pool_cache_t pc, void *p)
2934 1.262 maxv {
2935 1.262 maxv pool_put_kmsan(&pc->pc_pool, p);
2936 1.262 maxv }
2937 1.262 maxv #endif
2938 1.262 maxv
2939 1.249 maxv #ifdef POOL_QUARANTINE
2940 1.249 maxv static void
2941 1.249 maxv pool_quarantine_init(struct pool *pp)
2942 1.249 maxv {
2943 1.249 maxv pp->pr_quar.rotor = 0;
2944 1.249 maxv memset(&pp->pr_quar, 0, sizeof(pp->pr_quar));
2945 1.249 maxv }
2946 1.249 maxv
2947 1.249 maxv static void
2948 1.249 maxv pool_quarantine_flush(struct pool *pp)
2949 1.249 maxv {
2950 1.249 maxv pool_quar_t *quar = &pp->pr_quar;
2951 1.249 maxv struct pool_pagelist pq;
2952 1.249 maxv size_t i;
2953 1.249 maxv
2954 1.249 maxv LIST_INIT(&pq);
2955 1.249 maxv
2956 1.249 maxv mutex_enter(&pp->pr_lock);
2957 1.249 maxv for (i = 0; i < POOL_QUARANTINE_DEPTH; i++) {
2958 1.249 maxv if (quar->list[i] == 0)
2959 1.249 maxv continue;
2960 1.249 maxv pool_do_put(pp, (void *)quar->list[i], &pq);
2961 1.249 maxv }
2962 1.249 maxv mutex_exit(&pp->pr_lock);
2963 1.249 maxv
2964 1.249 maxv pr_pagelist_free(pp, &pq);
2965 1.249 maxv }
2966 1.249 maxv
2967 1.249 maxv static bool
2968 1.249 maxv pool_put_quarantine(struct pool *pp, void *v, struct pool_pagelist *pq)
2969 1.249 maxv {
2970 1.249 maxv pool_quar_t *quar = &pp->pr_quar;
2971 1.249 maxv uintptr_t old;
2972 1.249 maxv
2973 1.249 maxv if (pp->pr_roflags & PR_NOTOUCH) {
2974 1.249 maxv return false;
2975 1.249 maxv }
2976 1.249 maxv
2977 1.249 maxv pool_redzone_check(pp, v);
2978 1.249 maxv
2979 1.249 maxv old = quar->list[quar->rotor];
2980 1.249 maxv quar->list[quar->rotor] = (uintptr_t)v;
2981 1.249 maxv quar->rotor = (quar->rotor + 1) % POOL_QUARANTINE_DEPTH;
2982 1.249 maxv if (old != 0) {
2983 1.249 maxv pool_do_put(pp, (void *)old, pq);
2984 1.249 maxv }
2985 1.249 maxv
2986 1.249 maxv return true;
2987 1.249 maxv }
2988 1.268 maxv #endif
2989 1.249 maxv
2990 1.268 maxv #ifdef POOL_NOCACHE
2991 1.249 maxv static bool
2992 1.268 maxv pool_cache_put_nocache(pool_cache_t pc, void *p)
2993 1.249 maxv {
2994 1.249 maxv pool_cache_destruct_object(pc, p);
2995 1.249 maxv return true;
2996 1.249 maxv }
2997 1.249 maxv #endif
2998 1.249 maxv
2999 1.204 maxv #ifdef POOL_REDZONE
3000 1.204 maxv #if defined(_LP64)
3001 1.204 maxv # define PRIME 0x9e37fffffffc0000UL
3002 1.204 maxv #else /* defined(_LP64) */
3003 1.204 maxv # define PRIME 0x9e3779b1
3004 1.204 maxv #endif /* defined(_LP64) */
3005 1.204 maxv #define STATIC_BYTE 0xFE
3006 1.204 maxv CTASSERT(POOL_REDZONE_SIZE > 1);
3007 1.204 maxv
3008 1.224 maxv #ifndef KASAN
3009 1.204 maxv static inline uint8_t
3010 1.204 maxv pool_pattern_generate(const void *p)
3011 1.204 maxv {
3012 1.204 maxv return (uint8_t)(((uintptr_t)p) * PRIME
3013 1.204 maxv >> ((sizeof(uintptr_t) - sizeof(uint8_t))) * CHAR_BIT);
3014 1.204 maxv }
3015 1.224 maxv #endif
3016 1.204 maxv
3017 1.204 maxv static void
3018 1.204 maxv pool_redzone_init(struct pool *pp, size_t requested_size)
3019 1.204 maxv {
3020 1.227 maxv size_t redzsz;
3021 1.204 maxv size_t nsz;
3022 1.204 maxv
3023 1.227 maxv #ifdef KASAN
3024 1.227 maxv redzsz = requested_size;
3025 1.227 maxv kasan_add_redzone(&redzsz);
3026 1.227 maxv redzsz -= requested_size;
3027 1.227 maxv #else
3028 1.227 maxv redzsz = POOL_REDZONE_SIZE;
3029 1.227 maxv #endif
3030 1.227 maxv
3031 1.204 maxv if (pp->pr_roflags & PR_NOTOUCH) {
3032 1.204 maxv pp->pr_redzone = false;
3033 1.204 maxv return;
3034 1.204 maxv }
3035 1.204 maxv
3036 1.204 maxv /*
3037 1.204 maxv * We may have extended the requested size earlier; check if
3038 1.204 maxv * there's naturally space in the padding for a red zone.
3039 1.204 maxv */
3040 1.227 maxv if (pp->pr_size - requested_size >= redzsz) {
3041 1.229 maxv pp->pr_reqsize_with_redzone = requested_size + redzsz;
3042 1.204 maxv pp->pr_redzone = true;
3043 1.204 maxv return;
3044 1.204 maxv }
3045 1.204 maxv
3046 1.204 maxv /*
3047 1.204 maxv * No space in the natural padding; check if we can extend a
3048 1.204 maxv * bit the size of the pool.
3049 1.204 maxv */
3050 1.227 maxv nsz = roundup(pp->pr_size + redzsz, pp->pr_align);
3051 1.204 maxv if (nsz <= pp->pr_alloc->pa_pagesz) {
3052 1.204 maxv /* Ok, we can */
3053 1.204 maxv pp->pr_size = nsz;
3054 1.229 maxv pp->pr_reqsize_with_redzone = requested_size + redzsz;
3055 1.204 maxv pp->pr_redzone = true;
3056 1.204 maxv } else {
3057 1.204 maxv /* No space for a red zone... snif :'( */
3058 1.204 maxv pp->pr_redzone = false;
3059 1.204 maxv printf("pool redzone disabled for '%s'\n", pp->pr_wchan);
3060 1.204 maxv }
3061 1.204 maxv }
3062 1.204 maxv
3063 1.204 maxv static void
3064 1.204 maxv pool_redzone_fill(struct pool *pp, void *p)
3065 1.204 maxv {
3066 1.224 maxv if (!pp->pr_redzone)
3067 1.224 maxv return;
3068 1.224 maxv #ifdef KASAN
3069 1.248 maxv kasan_mark(p, pp->pr_reqsize, pp->pr_reqsize_with_redzone,
3070 1.248 maxv KASAN_POOL_REDZONE);
3071 1.224 maxv #else
3072 1.204 maxv uint8_t *cp, pat;
3073 1.204 maxv const uint8_t *ep;
3074 1.204 maxv
3075 1.204 maxv cp = (uint8_t *)p + pp->pr_reqsize;
3076 1.204 maxv ep = cp + POOL_REDZONE_SIZE;
3077 1.204 maxv
3078 1.204 maxv /*
3079 1.204 maxv * We really don't want the first byte of the red zone to be '\0';
3080 1.204 maxv * an off-by-one in a string may not be properly detected.
3081 1.204 maxv */
3082 1.204 maxv pat = pool_pattern_generate(cp);
3083 1.204 maxv *cp = (pat == '\0') ? STATIC_BYTE: pat;
3084 1.204 maxv cp++;
3085 1.204 maxv
3086 1.204 maxv while (cp < ep) {
3087 1.204 maxv *cp = pool_pattern_generate(cp);
3088 1.204 maxv cp++;
3089 1.204 maxv }
3090 1.224 maxv #endif
3091 1.204 maxv }
3092 1.204 maxv
3093 1.204 maxv static void
3094 1.204 maxv pool_redzone_check(struct pool *pp, void *p)
3095 1.204 maxv {
3096 1.224 maxv if (!pp->pr_redzone)
3097 1.224 maxv return;
3098 1.224 maxv #ifdef KASAN
3099 1.248 maxv kasan_mark(p, 0, pp->pr_reqsize_with_redzone, KASAN_POOL_FREED);
3100 1.224 maxv #else
3101 1.204 maxv uint8_t *cp, pat, expected;
3102 1.204 maxv const uint8_t *ep;
3103 1.204 maxv
3104 1.204 maxv cp = (uint8_t *)p + pp->pr_reqsize;
3105 1.204 maxv ep = cp + POOL_REDZONE_SIZE;
3106 1.204 maxv
3107 1.204 maxv pat = pool_pattern_generate(cp);
3108 1.204 maxv expected = (pat == '\0') ? STATIC_BYTE: pat;
3109 1.264 maxv if (__predict_false(*cp != expected)) {
3110 1.264 maxv panic("%s: [%s] 0x%02x != 0x%02x", __func__,
3111 1.264 maxv pp->pr_wchan, *cp, expected);
3112 1.204 maxv }
3113 1.204 maxv cp++;
3114 1.204 maxv
3115 1.204 maxv while (cp < ep) {
3116 1.204 maxv expected = pool_pattern_generate(cp);
3117 1.225 maxv if (__predict_false(*cp != expected)) {
3118 1.264 maxv panic("%s: [%s] 0x%02x != 0x%02x", __func__,
3119 1.264 maxv pp->pr_wchan, *cp, expected);
3120 1.204 maxv }
3121 1.204 maxv cp++;
3122 1.204 maxv }
3123 1.224 maxv #endif
3124 1.204 maxv }
3125 1.204 maxv
3126 1.229 maxv static void
3127 1.229 maxv pool_cache_redzone_check(pool_cache_t pc, void *p)
3128 1.229 maxv {
3129 1.229 maxv #ifdef KASAN
3130 1.257 maxv /* If there is a ctor/dtor, leave the data as valid. */
3131 1.257 maxv if (__predict_false(pc_has_ctor(pc) || pc_has_dtor(pc))) {
3132 1.229 maxv return;
3133 1.229 maxv }
3134 1.229 maxv #endif
3135 1.229 maxv pool_redzone_check(&pc->pc_pool, p);
3136 1.229 maxv }
3137 1.229 maxv
3138 1.204 maxv #endif /* POOL_REDZONE */
3139 1.204 maxv
3140 1.141 yamt #if defined(DDB)
3141 1.141 yamt static bool
3142 1.141 yamt pool_in_page(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
3143 1.141 yamt {
3144 1.141 yamt
3145 1.141 yamt return (uintptr_t)ph->ph_page <= addr &&
3146 1.141 yamt addr < (uintptr_t)ph->ph_page + pp->pr_alloc->pa_pagesz;
3147 1.141 yamt }
3148 1.141 yamt
3149 1.143 yamt static bool
3150 1.143 yamt pool_in_item(struct pool *pp, void *item, uintptr_t addr)
3151 1.143 yamt {
3152 1.143 yamt
3153 1.143 yamt return (uintptr_t)item <= addr && addr < (uintptr_t)item + pp->pr_size;
3154 1.143 yamt }
3155 1.143 yamt
3156 1.143 yamt static bool
3157 1.143 yamt pool_in_cg(struct pool *pp, struct pool_cache_group *pcg, uintptr_t addr)
3158 1.143 yamt {
3159 1.143 yamt int i;
3160 1.143 yamt
3161 1.143 yamt if (pcg == NULL) {
3162 1.143 yamt return false;
3163 1.143 yamt }
3164 1.144 yamt for (i = 0; i < pcg->pcg_avail; i++) {
3165 1.143 yamt if (pool_in_item(pp, pcg->pcg_objects[i].pcgo_va, addr)) {
3166 1.143 yamt return true;
3167 1.143 yamt }
3168 1.143 yamt }
3169 1.143 yamt return false;
3170 1.143 yamt }
3171 1.143 yamt
3172 1.143 yamt static bool
3173 1.143 yamt pool_allocated(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
3174 1.143 yamt {
3175 1.143 yamt
3176 1.242 maxv if ((pp->pr_roflags & PR_USEBMAP) != 0) {
3177 1.234 maxv unsigned int idx = pr_item_bitmap_index(pp, ph, (void *)addr);
3178 1.143 yamt pool_item_bitmap_t *bitmap =
3179 1.143 yamt ph->ph_bitmap + (idx / BITMAP_SIZE);
3180 1.143 yamt pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
3181 1.143 yamt
3182 1.143 yamt return (*bitmap & mask) == 0;
3183 1.143 yamt } else {
3184 1.143 yamt struct pool_item *pi;
3185 1.143 yamt
3186 1.143 yamt LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
3187 1.143 yamt if (pool_in_item(pp, pi, addr)) {
3188 1.143 yamt return false;
3189 1.143 yamt }
3190 1.143 yamt }
3191 1.143 yamt return true;
3192 1.143 yamt }
3193 1.143 yamt }
3194 1.143 yamt
3195 1.141 yamt void
3196 1.141 yamt pool_whatis(uintptr_t addr, void (*pr)(const char *, ...))
3197 1.141 yamt {
3198 1.141 yamt struct pool *pp;
3199 1.141 yamt
3200 1.145 ad TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
3201 1.141 yamt struct pool_item_header *ph;
3202 1.141 yamt uintptr_t item;
3203 1.143 yamt bool allocated = true;
3204 1.143 yamt bool incache = false;
3205 1.143 yamt bool incpucache = false;
3206 1.143 yamt char cpucachestr[32];
3207 1.141 yamt
3208 1.141 yamt if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
3209 1.141 yamt LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
3210 1.141 yamt if (pool_in_page(pp, ph, addr)) {
3211 1.141 yamt goto found;
3212 1.141 yamt }
3213 1.141 yamt }
3214 1.141 yamt LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
3215 1.141 yamt if (pool_in_page(pp, ph, addr)) {
3216 1.143 yamt allocated =
3217 1.143 yamt pool_allocated(pp, ph, addr);
3218 1.143 yamt goto found;
3219 1.143 yamt }
3220 1.143 yamt }
3221 1.143 yamt LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
3222 1.143 yamt if (pool_in_page(pp, ph, addr)) {
3223 1.143 yamt allocated = false;
3224 1.141 yamt goto found;
3225 1.141 yamt }
3226 1.141 yamt }
3227 1.141 yamt continue;
3228 1.141 yamt } else {
3229 1.141 yamt ph = pr_find_pagehead_noalign(pp, (void *)addr);
3230 1.141 yamt if (ph == NULL || !pool_in_page(pp, ph, addr)) {
3231 1.141 yamt continue;
3232 1.141 yamt }
3233 1.143 yamt allocated = pool_allocated(pp, ph, addr);
3234 1.141 yamt }
3235 1.141 yamt found:
3236 1.143 yamt if (allocated && pp->pr_cache) {
3237 1.143 yamt pool_cache_t pc = pp->pr_cache;
3238 1.143 yamt struct pool_cache_group *pcg;
3239 1.143 yamt int i;
3240 1.143 yamt
3241 1.143 yamt for (pcg = pc->pc_fullgroups; pcg != NULL;
3242 1.143 yamt pcg = pcg->pcg_next) {
3243 1.143 yamt if (pool_in_cg(pp, pcg, addr)) {
3244 1.143 yamt incache = true;
3245 1.143 yamt goto print;
3246 1.143 yamt }
3247 1.143 yamt }
3248 1.183 ad for (i = 0; i < __arraycount(pc->pc_cpus); i++) {
3249 1.143 yamt pool_cache_cpu_t *cc;
3250 1.143 yamt
3251 1.143 yamt if ((cc = pc->pc_cpus[i]) == NULL) {
3252 1.143 yamt continue;
3253 1.143 yamt }
3254 1.143 yamt if (pool_in_cg(pp, cc->cc_current, addr) ||
3255 1.143 yamt pool_in_cg(pp, cc->cc_previous, addr)) {
3256 1.143 yamt struct cpu_info *ci =
3257 1.170 ad cpu_lookup(i);
3258 1.143 yamt
3259 1.143 yamt incpucache = true;
3260 1.143 yamt snprintf(cpucachestr,
3261 1.143 yamt sizeof(cpucachestr),
3262 1.143 yamt "cached by CPU %u",
3263 1.153 martin ci->ci_index);
3264 1.143 yamt goto print;
3265 1.143 yamt }
3266 1.143 yamt }
3267 1.143 yamt }
3268 1.143 yamt print:
3269 1.141 yamt item = (uintptr_t)ph->ph_page + ph->ph_off;
3270 1.141 yamt item = item + rounddown(addr - item, pp->pr_size);
3271 1.143 yamt (*pr)("%p is %p+%zu in POOL '%s' (%s)\n",
3272 1.141 yamt (void *)addr, item, (size_t)(addr - item),
3273 1.143 yamt pp->pr_wchan,
3274 1.143 yamt incpucache ? cpucachestr :
3275 1.143 yamt incache ? "cached" : allocated ? "allocated" : "free");
3276 1.141 yamt }
3277 1.141 yamt }
3278 1.141 yamt #endif /* defined(DDB) */
3279 1.203 joerg
3280 1.203 joerg static int
3281 1.203 joerg pool_sysctl(SYSCTLFN_ARGS)
3282 1.203 joerg {
3283 1.203 joerg struct pool_sysctl data;
3284 1.203 joerg struct pool *pp;
3285 1.203 joerg struct pool_cache *pc;
3286 1.203 joerg pool_cache_cpu_t *cc;
3287 1.203 joerg int error;
3288 1.203 joerg size_t i, written;
3289 1.203 joerg
3290 1.203 joerg if (oldp == NULL) {
3291 1.203 joerg *oldlenp = 0;
3292 1.203 joerg TAILQ_FOREACH(pp, &pool_head, pr_poollist)
3293 1.203 joerg *oldlenp += sizeof(data);
3294 1.203 joerg return 0;
3295 1.203 joerg }
3296 1.203 joerg
3297 1.203 joerg memset(&data, 0, sizeof(data));
3298 1.203 joerg error = 0;
3299 1.203 joerg written = 0;
3300 1.203 joerg TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
3301 1.203 joerg if (written + sizeof(data) > *oldlenp)
3302 1.203 joerg break;
3303 1.203 joerg strlcpy(data.pr_wchan, pp->pr_wchan, sizeof(data.pr_wchan));
3304 1.203 joerg data.pr_pagesize = pp->pr_alloc->pa_pagesz;
3305 1.203 joerg data.pr_flags = pp->pr_roflags | pp->pr_flags;
3306 1.203 joerg #define COPY(field) data.field = pp->field
3307 1.203 joerg COPY(pr_size);
3308 1.203 joerg
3309 1.203 joerg COPY(pr_itemsperpage);
3310 1.203 joerg COPY(pr_nitems);
3311 1.203 joerg COPY(pr_nout);
3312 1.203 joerg COPY(pr_hardlimit);
3313 1.203 joerg COPY(pr_npages);
3314 1.203 joerg COPY(pr_minpages);
3315 1.203 joerg COPY(pr_maxpages);
3316 1.203 joerg
3317 1.203 joerg COPY(pr_nget);
3318 1.203 joerg COPY(pr_nfail);
3319 1.203 joerg COPY(pr_nput);
3320 1.203 joerg COPY(pr_npagealloc);
3321 1.203 joerg COPY(pr_npagefree);
3322 1.203 joerg COPY(pr_hiwat);
3323 1.203 joerg COPY(pr_nidle);
3324 1.203 joerg #undef COPY
3325 1.203 joerg
3326 1.203 joerg data.pr_cache_nmiss_pcpu = 0;
3327 1.203 joerg data.pr_cache_nhit_pcpu = 0;
3328 1.203 joerg if (pp->pr_cache) {
3329 1.203 joerg pc = pp->pr_cache;
3330 1.203 joerg data.pr_cache_meta_size = pc->pc_pcgsize;
3331 1.203 joerg data.pr_cache_nfull = pc->pc_nfull;
3332 1.203 joerg data.pr_cache_npartial = pc->pc_npart;
3333 1.203 joerg data.pr_cache_nempty = pc->pc_nempty;
3334 1.203 joerg data.pr_cache_ncontended = pc->pc_contended;
3335 1.203 joerg data.pr_cache_nmiss_global = pc->pc_misses;
3336 1.203 joerg data.pr_cache_nhit_global = pc->pc_hits;
3337 1.203 joerg for (i = 0; i < pc->pc_ncpu; ++i) {
3338 1.203 joerg cc = pc->pc_cpus[i];
3339 1.203 joerg if (cc == NULL)
3340 1.203 joerg continue;
3341 1.206 knakahar data.pr_cache_nmiss_pcpu += cc->cc_misses;
3342 1.206 knakahar data.pr_cache_nhit_pcpu += cc->cc_hits;
3343 1.203 joerg }
3344 1.203 joerg } else {
3345 1.203 joerg data.pr_cache_meta_size = 0;
3346 1.203 joerg data.pr_cache_nfull = 0;
3347 1.203 joerg data.pr_cache_npartial = 0;
3348 1.203 joerg data.pr_cache_nempty = 0;
3349 1.203 joerg data.pr_cache_ncontended = 0;
3350 1.203 joerg data.pr_cache_nmiss_global = 0;
3351 1.203 joerg data.pr_cache_nhit_global = 0;
3352 1.203 joerg }
3353 1.203 joerg
3354 1.203 joerg error = sysctl_copyout(l, &data, oldp, sizeof(data));
3355 1.203 joerg if (error)
3356 1.203 joerg break;
3357 1.203 joerg written += sizeof(data);
3358 1.203 joerg oldp = (char *)oldp + sizeof(data);
3359 1.203 joerg }
3360 1.203 joerg
3361 1.203 joerg *oldlenp = written;
3362 1.203 joerg return error;
3363 1.203 joerg }
3364 1.203 joerg
3365 1.203 joerg SYSCTL_SETUP(sysctl_pool_setup, "sysctl kern.pool setup")
3366 1.203 joerg {
3367 1.203 joerg const struct sysctlnode *rnode = NULL;
3368 1.203 joerg
3369 1.203 joerg sysctl_createv(clog, 0, NULL, &rnode,
3370 1.203 joerg CTLFLAG_PERMANENT,
3371 1.203 joerg CTLTYPE_STRUCT, "pool",
3372 1.203 joerg SYSCTL_DESCR("Get pool statistics"),
3373 1.203 joerg pool_sysctl, 0, NULL, 0,
3374 1.203 joerg CTL_KERN, CTL_CREATE, CTL_EOL);
3375 1.203 joerg }
3376