subr_pool.c revision 1.206 1 1.206 knakahar /* $NetBSD: subr_pool.c,v 1.206 2016/02/05 03:04:52 knakahara Exp $ */
2 1.1 pk
3 1.1 pk /*-
4 1.204 maxv * Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010, 2014, 2015
5 1.183 ad * The NetBSD Foundation, Inc.
6 1.1 pk * All rights reserved.
7 1.1 pk *
8 1.1 pk * This code is derived from software contributed to The NetBSD Foundation
9 1.20 thorpej * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
10 1.204 maxv * Simulation Facility, NASA Ames Research Center; by Andrew Doran, and by
11 1.204 maxv * Maxime Villard.
12 1.1 pk *
13 1.1 pk * Redistribution and use in source and binary forms, with or without
14 1.1 pk * modification, are permitted provided that the following conditions
15 1.1 pk * are met:
16 1.1 pk * 1. Redistributions of source code must retain the above copyright
17 1.1 pk * notice, this list of conditions and the following disclaimer.
18 1.1 pk * 2. Redistributions in binary form must reproduce the above copyright
19 1.1 pk * notice, this list of conditions and the following disclaimer in the
20 1.1 pk * documentation and/or other materials provided with the distribution.
21 1.1 pk *
22 1.1 pk * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23 1.1 pk * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24 1.1 pk * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 1.1 pk * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26 1.1 pk * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 1.1 pk * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 1.1 pk * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 1.1 pk * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 1.1 pk * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 1.1 pk * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 1.1 pk * POSSIBILITY OF SUCH DAMAGE.
33 1.1 pk */
34 1.64 lukem
35 1.64 lukem #include <sys/cdefs.h>
36 1.206 knakahar __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.206 2016/02/05 03:04:52 knakahara Exp $");
37 1.24 scottr
38 1.205 pooka #ifdef _KERNEL_OPT
39 1.141 yamt #include "opt_ddb.h"
40 1.28 thorpej #include "opt_lockdebug.h"
41 1.205 pooka #endif
42 1.1 pk
43 1.1 pk #include <sys/param.h>
44 1.1 pk #include <sys/systm.h>
45 1.203 joerg #include <sys/sysctl.h>
46 1.135 yamt #include <sys/bitops.h>
47 1.1 pk #include <sys/proc.h>
48 1.1 pk #include <sys/errno.h>
49 1.1 pk #include <sys/kernel.h>
50 1.191 para #include <sys/vmem.h>
51 1.1 pk #include <sys/pool.h>
52 1.20 thorpej #include <sys/syslog.h>
53 1.125 ad #include <sys/debug.h>
54 1.134 ad #include <sys/lockdebug.h>
55 1.134 ad #include <sys/xcall.h>
56 1.134 ad #include <sys/cpu.h>
57 1.145 ad #include <sys/atomic.h>
58 1.3 pk
59 1.187 uebayasi #include <uvm/uvm_extern.h>
60 1.3 pk
61 1.1 pk /*
62 1.1 pk * Pool resource management utility.
63 1.3 pk *
64 1.88 chs * Memory is allocated in pages which are split into pieces according to
65 1.88 chs * the pool item size. Each page is kept on one of three lists in the
66 1.88 chs * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
67 1.88 chs * for empty, full and partially-full pages respectively. The individual
68 1.88 chs * pool items are on a linked list headed by `ph_itemlist' in each page
69 1.88 chs * header. The memory for building the page list is either taken from
70 1.88 chs * the allocated pages themselves (for small pool items) or taken from
71 1.88 chs * an internal pool of page headers (`phpool').
72 1.1 pk */
73 1.1 pk
74 1.202 abs /* List of all pools. Non static as needed by 'vmstat -i' */
75 1.202 abs TAILQ_HEAD(, pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
76 1.134 ad
77 1.3 pk /* Private pool for page header structures */
78 1.97 yamt #define PHPOOL_MAX 8
79 1.97 yamt static struct pool phpool[PHPOOL_MAX];
80 1.135 yamt #define PHPOOL_FREELIST_NELEM(idx) \
81 1.135 yamt (((idx) == 0) ? 0 : BITMAP_SIZE * (1 << (idx)))
82 1.3 pk
83 1.62 bjh21 #ifdef POOL_SUBPAGE
84 1.62 bjh21 /* Pool of subpages for use by normal pools. */
85 1.62 bjh21 static struct pool psppool;
86 1.62 bjh21 #endif
87 1.62 bjh21
88 1.204 maxv #ifdef POOL_REDZONE
89 1.204 maxv # define POOL_REDZONE_SIZE 2
90 1.204 maxv static void pool_redzone_init(struct pool *, size_t);
91 1.204 maxv static void pool_redzone_fill(struct pool *, void *);
92 1.204 maxv static void pool_redzone_check(struct pool *, void *);
93 1.204 maxv #else
94 1.204 maxv # define pool_redzone_init(pp, sz) /* NOTHING */
95 1.204 maxv # define pool_redzone_fill(pp, ptr) /* NOTHING */
96 1.204 maxv # define pool_redzone_check(pp, ptr) /* NOTHING */
97 1.204 maxv #endif
98 1.204 maxv
99 1.98 yamt static void *pool_page_alloc_meta(struct pool *, int);
100 1.98 yamt static void pool_page_free_meta(struct pool *, void *);
101 1.98 yamt
102 1.98 yamt /* allocator for pool metadata */
103 1.134 ad struct pool_allocator pool_allocator_meta = {
104 1.191 para .pa_alloc = pool_page_alloc_meta,
105 1.191 para .pa_free = pool_page_free_meta,
106 1.191 para .pa_pagesz = 0
107 1.98 yamt };
108 1.98 yamt
109 1.3 pk /* # of seconds to retain page after last use */
110 1.3 pk int pool_inactive_time = 10;
111 1.3 pk
112 1.3 pk /* Next candidate for drainage (see pool_drain()) */
113 1.23 thorpej static struct pool *drainpp;
114 1.23 thorpej
115 1.134 ad /* This lock protects both pool_head and drainpp. */
116 1.134 ad static kmutex_t pool_head_lock;
117 1.134 ad static kcondvar_t pool_busy;
118 1.3 pk
119 1.178 elad /* This lock protects initialization of a potentially shared pool allocator */
120 1.178 elad static kmutex_t pool_allocator_lock;
121 1.178 elad
122 1.135 yamt typedef uint32_t pool_item_bitmap_t;
123 1.135 yamt #define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t))
124 1.135 yamt #define BITMAP_MASK (BITMAP_SIZE - 1)
125 1.99 yamt
126 1.3 pk struct pool_item_header {
127 1.3 pk /* Page headers */
128 1.88 chs LIST_ENTRY(pool_item_header)
129 1.3 pk ph_pagelist; /* pool page list */
130 1.88 chs SPLAY_ENTRY(pool_item_header)
131 1.88 chs ph_node; /* Off-page page headers */
132 1.128 christos void * ph_page; /* this page's address */
133 1.151 yamt uint32_t ph_time; /* last referenced */
134 1.135 yamt uint16_t ph_nmissing; /* # of chunks in use */
135 1.141 yamt uint16_t ph_off; /* start offset in page */
136 1.97 yamt union {
137 1.97 yamt /* !PR_NOTOUCH */
138 1.97 yamt struct {
139 1.102 chs LIST_HEAD(, pool_item)
140 1.97 yamt phu_itemlist; /* chunk list for this page */
141 1.97 yamt } phu_normal;
142 1.97 yamt /* PR_NOTOUCH */
143 1.97 yamt struct {
144 1.141 yamt pool_item_bitmap_t phu_bitmap[1];
145 1.97 yamt } phu_notouch;
146 1.97 yamt } ph_u;
147 1.3 pk };
148 1.97 yamt #define ph_itemlist ph_u.phu_normal.phu_itemlist
149 1.135 yamt #define ph_bitmap ph_u.phu_notouch.phu_bitmap
150 1.3 pk
151 1.1 pk struct pool_item {
152 1.3 pk #ifdef DIAGNOSTIC
153 1.82 thorpej u_int pi_magic;
154 1.33 chs #endif
155 1.134 ad #define PI_MAGIC 0xdeaddeadU
156 1.3 pk /* Other entries use only this list entry */
157 1.102 chs LIST_ENTRY(pool_item) pi_list;
158 1.3 pk };
159 1.3 pk
160 1.53 thorpej #define POOL_NEEDS_CATCHUP(pp) \
161 1.53 thorpej ((pp)->pr_nitems < (pp)->pr_minitems)
162 1.53 thorpej
163 1.43 thorpej /*
164 1.43 thorpej * Pool cache management.
165 1.43 thorpej *
166 1.43 thorpej * Pool caches provide a way for constructed objects to be cached by the
167 1.43 thorpej * pool subsystem. This can lead to performance improvements by avoiding
168 1.43 thorpej * needless object construction/destruction; it is deferred until absolutely
169 1.43 thorpej * necessary.
170 1.43 thorpej *
171 1.134 ad * Caches are grouped into cache groups. Each cache group references up
172 1.134 ad * to PCG_NUMOBJECTS constructed objects. When a cache allocates an
173 1.134 ad * object from the pool, it calls the object's constructor and places it
174 1.134 ad * into a cache group. When a cache group frees an object back to the
175 1.134 ad * pool, it first calls the object's destructor. This allows the object
176 1.134 ad * to persist in constructed form while freed to the cache.
177 1.134 ad *
178 1.134 ad * The pool references each cache, so that when a pool is drained by the
179 1.134 ad * pagedaemon, it can drain each individual cache as well. Each time a
180 1.134 ad * cache is drained, the most idle cache group is freed to the pool in
181 1.134 ad * its entirety.
182 1.43 thorpej *
183 1.43 thorpej * Pool caches are layed on top of pools. By layering them, we can avoid
184 1.43 thorpej * the complexity of cache management for pools which would not benefit
185 1.43 thorpej * from it.
186 1.43 thorpej */
187 1.43 thorpej
188 1.142 ad static struct pool pcg_normal_pool;
189 1.142 ad static struct pool pcg_large_pool;
190 1.134 ad static struct pool cache_pool;
191 1.134 ad static struct pool cache_cpu_pool;
192 1.3 pk
193 1.189 pooka pool_cache_t pnbuf_cache; /* pathname buffer cache */
194 1.189 pooka
195 1.145 ad /* List of all caches. */
196 1.145 ad TAILQ_HEAD(,pool_cache) pool_cache_head =
197 1.145 ad TAILQ_HEAD_INITIALIZER(pool_cache_head);
198 1.145 ad
199 1.162 ad int pool_cache_disable; /* global disable for caching */
200 1.169 yamt static const pcg_t pcg_dummy; /* zero sized: always empty, yet always full */
201 1.145 ad
202 1.162 ad static bool pool_cache_put_slow(pool_cache_cpu_t *, int,
203 1.162 ad void *);
204 1.162 ad static bool pool_cache_get_slow(pool_cache_cpu_t *, int,
205 1.162 ad void **, paddr_t *, int);
206 1.134 ad static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t);
207 1.134 ad static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *);
208 1.175 jym static void pool_cache_invalidate_cpu(pool_cache_t, u_int);
209 1.196 jym static void pool_cache_transfer(pool_cache_t);
210 1.3 pk
211 1.42 thorpej static int pool_catchup(struct pool *);
212 1.128 christos static void pool_prime_page(struct pool *, void *,
213 1.55 thorpej struct pool_item_header *);
214 1.88 chs static void pool_update_curpage(struct pool *);
215 1.66 thorpej
216 1.113 yamt static int pool_grow(struct pool *, int);
217 1.117 yamt static void *pool_allocator_alloc(struct pool *, int);
218 1.117 yamt static void pool_allocator_free(struct pool *, void *);
219 1.3 pk
220 1.97 yamt static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
221 1.199 christos void (*)(const char *, ...) __printflike(1, 2));
222 1.42 thorpej static void pool_print1(struct pool *, const char *,
223 1.199 christos void (*)(const char *, ...) __printflike(1, 2));
224 1.3 pk
225 1.88 chs static int pool_chk_page(struct pool *, const char *,
226 1.88 chs struct pool_item_header *);
227 1.88 chs
228 1.135 yamt static inline unsigned int
229 1.97 yamt pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph,
230 1.97 yamt const void *v)
231 1.97 yamt {
232 1.97 yamt const char *cp = v;
233 1.135 yamt unsigned int idx;
234 1.97 yamt
235 1.97 yamt KASSERT(pp->pr_roflags & PR_NOTOUCH);
236 1.128 christos idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size;
237 1.97 yamt KASSERT(idx < pp->pr_itemsperpage);
238 1.97 yamt return idx;
239 1.97 yamt }
240 1.97 yamt
241 1.110 perry static inline void
242 1.97 yamt pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph,
243 1.97 yamt void *obj)
244 1.97 yamt {
245 1.135 yamt unsigned int idx = pr_item_notouch_index(pp, ph, obj);
246 1.135 yamt pool_item_bitmap_t *bitmap = ph->ph_bitmap + (idx / BITMAP_SIZE);
247 1.135 yamt pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
248 1.97 yamt
249 1.135 yamt KASSERT((*bitmap & mask) == 0);
250 1.135 yamt *bitmap |= mask;
251 1.97 yamt }
252 1.97 yamt
253 1.110 perry static inline void *
254 1.97 yamt pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph)
255 1.97 yamt {
256 1.135 yamt pool_item_bitmap_t *bitmap = ph->ph_bitmap;
257 1.135 yamt unsigned int idx;
258 1.135 yamt int i;
259 1.97 yamt
260 1.135 yamt for (i = 0; ; i++) {
261 1.135 yamt int bit;
262 1.97 yamt
263 1.135 yamt KASSERT((i * BITMAP_SIZE) < pp->pr_itemsperpage);
264 1.135 yamt bit = ffs32(bitmap[i]);
265 1.135 yamt if (bit) {
266 1.135 yamt pool_item_bitmap_t mask;
267 1.135 yamt
268 1.135 yamt bit--;
269 1.135 yamt idx = (i * BITMAP_SIZE) + bit;
270 1.135 yamt mask = 1 << bit;
271 1.135 yamt KASSERT((bitmap[i] & mask) != 0);
272 1.135 yamt bitmap[i] &= ~mask;
273 1.135 yamt break;
274 1.135 yamt }
275 1.135 yamt }
276 1.135 yamt KASSERT(idx < pp->pr_itemsperpage);
277 1.128 christos return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size;
278 1.97 yamt }
279 1.97 yamt
280 1.135 yamt static inline void
281 1.141 yamt pr_item_notouch_init(const struct pool *pp, struct pool_item_header *ph)
282 1.135 yamt {
283 1.135 yamt pool_item_bitmap_t *bitmap = ph->ph_bitmap;
284 1.135 yamt const int n = howmany(pp->pr_itemsperpage, BITMAP_SIZE);
285 1.135 yamt int i;
286 1.135 yamt
287 1.135 yamt for (i = 0; i < n; i++) {
288 1.135 yamt bitmap[i] = (pool_item_bitmap_t)-1;
289 1.135 yamt }
290 1.135 yamt }
291 1.135 yamt
292 1.110 perry static inline int
293 1.88 chs phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
294 1.88 chs {
295 1.121 yamt
296 1.121 yamt /*
297 1.121 yamt * we consider pool_item_header with smaller ph_page bigger.
298 1.121 yamt * (this unnatural ordering is for the benefit of pr_find_pagehead.)
299 1.121 yamt */
300 1.121 yamt
301 1.88 chs if (a->ph_page < b->ph_page)
302 1.121 yamt return (1);
303 1.121 yamt else if (a->ph_page > b->ph_page)
304 1.88 chs return (-1);
305 1.88 chs else
306 1.88 chs return (0);
307 1.88 chs }
308 1.88 chs
309 1.88 chs SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
310 1.88 chs SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
311 1.88 chs
312 1.141 yamt static inline struct pool_item_header *
313 1.141 yamt pr_find_pagehead_noalign(struct pool *pp, void *v)
314 1.141 yamt {
315 1.141 yamt struct pool_item_header *ph, tmp;
316 1.141 yamt
317 1.141 yamt tmp.ph_page = (void *)(uintptr_t)v;
318 1.141 yamt ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
319 1.141 yamt if (ph == NULL) {
320 1.141 yamt ph = SPLAY_ROOT(&pp->pr_phtree);
321 1.141 yamt if (ph != NULL && phtree_compare(&tmp, ph) >= 0) {
322 1.141 yamt ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph);
323 1.141 yamt }
324 1.141 yamt KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0);
325 1.141 yamt }
326 1.141 yamt
327 1.141 yamt return ph;
328 1.141 yamt }
329 1.141 yamt
330 1.3 pk /*
331 1.121 yamt * Return the pool page header based on item address.
332 1.3 pk */
333 1.110 perry static inline struct pool_item_header *
334 1.121 yamt pr_find_pagehead(struct pool *pp, void *v)
335 1.3 pk {
336 1.88 chs struct pool_item_header *ph, tmp;
337 1.3 pk
338 1.121 yamt if ((pp->pr_roflags & PR_NOALIGN) != 0) {
339 1.141 yamt ph = pr_find_pagehead_noalign(pp, v);
340 1.121 yamt } else {
341 1.128 christos void *page =
342 1.128 christos (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask);
343 1.121 yamt
344 1.121 yamt if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
345 1.128 christos ph = (struct pool_item_header *)((char *)page + pp->pr_phoffset);
346 1.121 yamt } else {
347 1.121 yamt tmp.ph_page = page;
348 1.121 yamt ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
349 1.121 yamt }
350 1.121 yamt }
351 1.3 pk
352 1.121 yamt KASSERT(ph == NULL || ((pp->pr_roflags & PR_PHINPAGE) != 0) ||
353 1.128 christos ((char *)ph->ph_page <= (char *)v &&
354 1.128 christos (char *)v < (char *)ph->ph_page + pp->pr_alloc->pa_pagesz));
355 1.88 chs return ph;
356 1.3 pk }
357 1.3 pk
358 1.101 thorpej static void
359 1.101 thorpej pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq)
360 1.101 thorpej {
361 1.101 thorpej struct pool_item_header *ph;
362 1.101 thorpej
363 1.101 thorpej while ((ph = LIST_FIRST(pq)) != NULL) {
364 1.101 thorpej LIST_REMOVE(ph, ph_pagelist);
365 1.101 thorpej pool_allocator_free(pp, ph->ph_page);
366 1.134 ad if ((pp->pr_roflags & PR_PHINPAGE) == 0)
367 1.101 thorpej pool_put(pp->pr_phpool, ph);
368 1.101 thorpej }
369 1.101 thorpej }
370 1.101 thorpej
371 1.3 pk /*
372 1.3 pk * Remove a page from the pool.
373 1.3 pk */
374 1.110 perry static inline void
375 1.61 chs pr_rmpage(struct pool *pp, struct pool_item_header *ph,
376 1.61 chs struct pool_pagelist *pq)
377 1.3 pk {
378 1.3 pk
379 1.134 ad KASSERT(mutex_owned(&pp->pr_lock));
380 1.91 yamt
381 1.3 pk /*
382 1.7 thorpej * If the page was idle, decrement the idle page count.
383 1.3 pk */
384 1.6 thorpej if (ph->ph_nmissing == 0) {
385 1.6 thorpej #ifdef DIAGNOSTIC
386 1.6 thorpej if (pp->pr_nidle == 0)
387 1.6 thorpej panic("pr_rmpage: nidle inconsistent");
388 1.20 thorpej if (pp->pr_nitems < pp->pr_itemsperpage)
389 1.20 thorpej panic("pr_rmpage: nitems inconsistent");
390 1.6 thorpej #endif
391 1.6 thorpej pp->pr_nidle--;
392 1.6 thorpej }
393 1.7 thorpej
394 1.20 thorpej pp->pr_nitems -= pp->pr_itemsperpage;
395 1.20 thorpej
396 1.7 thorpej /*
397 1.101 thorpej * Unlink the page from the pool and queue it for release.
398 1.7 thorpej */
399 1.88 chs LIST_REMOVE(ph, ph_pagelist);
400 1.91 yamt if ((pp->pr_roflags & PR_PHINPAGE) == 0)
401 1.91 yamt SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
402 1.101 thorpej LIST_INSERT_HEAD(pq, ph, ph_pagelist);
403 1.101 thorpej
404 1.7 thorpej pp->pr_npages--;
405 1.7 thorpej pp->pr_npagefree++;
406 1.6 thorpej
407 1.88 chs pool_update_curpage(pp);
408 1.3 pk }
409 1.3 pk
410 1.3 pk /*
411 1.94 simonb * Initialize all the pools listed in the "pools" link set.
412 1.94 simonb */
413 1.94 simonb void
414 1.117 yamt pool_subsystem_init(void)
415 1.94 simonb {
416 1.192 rmind size_t size;
417 1.191 para int idx;
418 1.94 simonb
419 1.134 ad mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE);
420 1.179 mlelstv mutex_init(&pool_allocator_lock, MUTEX_DEFAULT, IPL_NONE);
421 1.134 ad cv_init(&pool_busy, "poolbusy");
422 1.134 ad
423 1.191 para /*
424 1.191 para * Initialize private page header pool and cache magazine pool if we
425 1.191 para * haven't done so yet.
426 1.191 para */
427 1.191 para for (idx = 0; idx < PHPOOL_MAX; idx++) {
428 1.191 para static char phpool_names[PHPOOL_MAX][6+1+6+1];
429 1.191 para int nelem;
430 1.191 para size_t sz;
431 1.191 para
432 1.191 para nelem = PHPOOL_FREELIST_NELEM(idx);
433 1.191 para snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
434 1.191 para "phpool-%d", nelem);
435 1.191 para sz = sizeof(struct pool_item_header);
436 1.191 para if (nelem) {
437 1.191 para sz = offsetof(struct pool_item_header,
438 1.191 para ph_bitmap[howmany(nelem, BITMAP_SIZE)]);
439 1.191 para }
440 1.191 para pool_init(&phpool[idx], sz, 0, 0, 0,
441 1.191 para phpool_names[idx], &pool_allocator_meta, IPL_VM);
442 1.117 yamt }
443 1.191 para #ifdef POOL_SUBPAGE
444 1.191 para pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
445 1.191 para PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM);
446 1.191 para #endif
447 1.191 para
448 1.191 para size = sizeof(pcg_t) +
449 1.191 para (PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t);
450 1.191 para pool_init(&pcg_normal_pool, size, coherency_unit, 0, 0,
451 1.191 para "pcgnormal", &pool_allocator_meta, IPL_VM);
452 1.191 para
453 1.191 para size = sizeof(pcg_t) +
454 1.191 para (PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t);
455 1.191 para pool_init(&pcg_large_pool, size, coherency_unit, 0, 0,
456 1.191 para "pcglarge", &pool_allocator_meta, IPL_VM);
457 1.134 ad
458 1.156 ad pool_init(&cache_pool, sizeof(struct pool_cache), coherency_unit,
459 1.191 para 0, 0, "pcache", &pool_allocator_meta, IPL_NONE);
460 1.134 ad
461 1.156 ad pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), coherency_unit,
462 1.191 para 0, 0, "pcachecpu", &pool_allocator_meta, IPL_NONE);
463 1.94 simonb }
464 1.94 simonb
465 1.94 simonb /*
466 1.3 pk * Initialize the given pool resource structure.
467 1.3 pk *
468 1.3 pk * We export this routine to allow other kernel parts to declare
469 1.195 rmind * static pools that must be initialized before kmem(9) is available.
470 1.3 pk */
471 1.3 pk void
472 1.42 thorpej pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
473 1.129 ad const char *wchan, struct pool_allocator *palloc, int ipl)
474 1.3 pk {
475 1.116 simonb struct pool *pp1;
476 1.204 maxv size_t trysize, phsize, prsize;
477 1.134 ad int off, slack;
478 1.3 pk
479 1.116 simonb #ifdef DEBUG
480 1.198 christos if (__predict_true(!cold))
481 1.198 christos mutex_enter(&pool_head_lock);
482 1.116 simonb /*
483 1.116 simonb * Check that the pool hasn't already been initialised and
484 1.116 simonb * added to the list of all pools.
485 1.116 simonb */
486 1.145 ad TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
487 1.116 simonb if (pp == pp1)
488 1.116 simonb panic("pool_init: pool %s already initialised",
489 1.116 simonb wchan);
490 1.116 simonb }
491 1.198 christos if (__predict_true(!cold))
492 1.198 christos mutex_exit(&pool_head_lock);
493 1.116 simonb #endif
494 1.116 simonb
495 1.66 thorpej if (palloc == NULL)
496 1.66 thorpej palloc = &pool_allocator_kmem;
497 1.112 bjh21 #ifdef POOL_SUBPAGE
498 1.112 bjh21 if (size > palloc->pa_pagesz) {
499 1.112 bjh21 if (palloc == &pool_allocator_kmem)
500 1.112 bjh21 palloc = &pool_allocator_kmem_fullpage;
501 1.112 bjh21 else if (palloc == &pool_allocator_nointr)
502 1.112 bjh21 palloc = &pool_allocator_nointr_fullpage;
503 1.112 bjh21 }
504 1.66 thorpej #endif /* POOL_SUBPAGE */
505 1.180 mlelstv if (!cold)
506 1.180 mlelstv mutex_enter(&pool_allocator_lock);
507 1.178 elad if (palloc->pa_refcnt++ == 0) {
508 1.112 bjh21 if (palloc->pa_pagesz == 0)
509 1.66 thorpej palloc->pa_pagesz = PAGE_SIZE;
510 1.66 thorpej
511 1.66 thorpej TAILQ_INIT(&palloc->pa_list);
512 1.66 thorpej
513 1.134 ad mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM);
514 1.66 thorpej palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
515 1.66 thorpej palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
516 1.4 thorpej }
517 1.180 mlelstv if (!cold)
518 1.180 mlelstv mutex_exit(&pool_allocator_lock);
519 1.3 pk
520 1.3 pk if (align == 0)
521 1.3 pk align = ALIGN(1);
522 1.14 thorpej
523 1.204 maxv prsize = size;
524 1.204 maxv if ((flags & PR_NOTOUCH) == 0 && prsize < sizeof(struct pool_item))
525 1.204 maxv prsize = sizeof(struct pool_item);
526 1.3 pk
527 1.204 maxv prsize = roundup(prsize, align);
528 1.66 thorpej #ifdef DIAGNOSTIC
529 1.204 maxv if (prsize > palloc->pa_pagesz)
530 1.204 maxv panic("pool_init: pool item size (%zu) too large", prsize);
531 1.66 thorpej #endif
532 1.35 pk
533 1.3 pk /*
534 1.3 pk * Initialize the pool structure.
535 1.3 pk */
536 1.88 chs LIST_INIT(&pp->pr_emptypages);
537 1.88 chs LIST_INIT(&pp->pr_fullpages);
538 1.88 chs LIST_INIT(&pp->pr_partpages);
539 1.134 ad pp->pr_cache = NULL;
540 1.3 pk pp->pr_curpage = NULL;
541 1.3 pk pp->pr_npages = 0;
542 1.3 pk pp->pr_minitems = 0;
543 1.3 pk pp->pr_minpages = 0;
544 1.3 pk pp->pr_maxpages = UINT_MAX;
545 1.20 thorpej pp->pr_roflags = flags;
546 1.20 thorpej pp->pr_flags = 0;
547 1.204 maxv pp->pr_size = prsize;
548 1.3 pk pp->pr_align = align;
549 1.3 pk pp->pr_wchan = wchan;
550 1.66 thorpej pp->pr_alloc = palloc;
551 1.20 thorpej pp->pr_nitems = 0;
552 1.20 thorpej pp->pr_nout = 0;
553 1.20 thorpej pp->pr_hardlimit = UINT_MAX;
554 1.20 thorpej pp->pr_hardlimit_warning = NULL;
555 1.31 thorpej pp->pr_hardlimit_ratecap.tv_sec = 0;
556 1.31 thorpej pp->pr_hardlimit_ratecap.tv_usec = 0;
557 1.31 thorpej pp->pr_hardlimit_warning_last.tv_sec = 0;
558 1.31 thorpej pp->pr_hardlimit_warning_last.tv_usec = 0;
559 1.68 thorpej pp->pr_drain_hook = NULL;
560 1.68 thorpej pp->pr_drain_hook_arg = NULL;
561 1.125 ad pp->pr_freecheck = NULL;
562 1.204 maxv pool_redzone_init(pp, size);
563 1.3 pk
564 1.3 pk /*
565 1.3 pk * Decide whether to put the page header off page to avoid
566 1.92 enami * wasting too large a part of the page or too big item.
567 1.92 enami * Off-page page headers go on a hash table, so we can match
568 1.92 enami * a returned item with its header based on the page address.
569 1.92 enami * We use 1/16 of the page size and about 8 times of the item
570 1.92 enami * size as the threshold (XXX: tune)
571 1.92 enami *
572 1.92 enami * However, we'll put the header into the page if we can put
573 1.92 enami * it without wasting any items.
574 1.92 enami *
575 1.92 enami * Silently enforce `0 <= ioff < align'.
576 1.3 pk */
577 1.92 enami pp->pr_itemoffset = ioff %= align;
578 1.92 enami /* See the comment below about reserved bytes. */
579 1.92 enami trysize = palloc->pa_pagesz - ((align - ioff) % align);
580 1.92 enami phsize = ALIGN(sizeof(struct pool_item_header));
581 1.201 para if (pp->pr_roflags & PR_PHINPAGE ||
582 1.201 para ((pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) == 0 &&
583 1.97 yamt (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
584 1.201 para trysize / pp->pr_size == (trysize - phsize) / pp->pr_size))) {
585 1.3 pk /* Use the end of the page for the page header */
586 1.20 thorpej pp->pr_roflags |= PR_PHINPAGE;
587 1.92 enami pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
588 1.2 pk } else {
589 1.3 pk /* The page header will be taken from our page header pool */
590 1.3 pk pp->pr_phoffset = 0;
591 1.66 thorpej off = palloc->pa_pagesz;
592 1.88 chs SPLAY_INIT(&pp->pr_phtree);
593 1.2 pk }
594 1.1 pk
595 1.3 pk /*
596 1.3 pk * Alignment is to take place at `ioff' within the item. This means
597 1.3 pk * we must reserve up to `align - 1' bytes on the page to allow
598 1.3 pk * appropriate positioning of each item.
599 1.3 pk */
600 1.3 pk pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
601 1.43 thorpej KASSERT(pp->pr_itemsperpage != 0);
602 1.97 yamt if ((pp->pr_roflags & PR_NOTOUCH)) {
603 1.97 yamt int idx;
604 1.97 yamt
605 1.97 yamt for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
606 1.97 yamt idx++) {
607 1.97 yamt /* nothing */
608 1.97 yamt }
609 1.97 yamt if (idx >= PHPOOL_MAX) {
610 1.97 yamt /*
611 1.97 yamt * if you see this panic, consider to tweak
612 1.97 yamt * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
613 1.97 yamt */
614 1.97 yamt panic("%s: too large itemsperpage(%d) for PR_NOTOUCH",
615 1.97 yamt pp->pr_wchan, pp->pr_itemsperpage);
616 1.97 yamt }
617 1.97 yamt pp->pr_phpool = &phpool[idx];
618 1.97 yamt } else if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
619 1.97 yamt pp->pr_phpool = &phpool[0];
620 1.97 yamt }
621 1.97 yamt #if defined(DIAGNOSTIC)
622 1.97 yamt else {
623 1.97 yamt pp->pr_phpool = NULL;
624 1.97 yamt }
625 1.97 yamt #endif
626 1.3 pk
627 1.3 pk /*
628 1.3 pk * Use the slack between the chunks and the page header
629 1.3 pk * for "cache coloring".
630 1.3 pk */
631 1.3 pk slack = off - pp->pr_itemsperpage * pp->pr_size;
632 1.3 pk pp->pr_maxcolor = (slack / align) * align;
633 1.3 pk pp->pr_curcolor = 0;
634 1.3 pk
635 1.3 pk pp->pr_nget = 0;
636 1.3 pk pp->pr_nfail = 0;
637 1.3 pk pp->pr_nput = 0;
638 1.3 pk pp->pr_npagealloc = 0;
639 1.3 pk pp->pr_npagefree = 0;
640 1.1 pk pp->pr_hiwat = 0;
641 1.8 thorpej pp->pr_nidle = 0;
642 1.134 ad pp->pr_refcnt = 0;
643 1.3 pk
644 1.157 ad mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl);
645 1.134 ad cv_init(&pp->pr_cv, wchan);
646 1.134 ad pp->pr_ipl = ipl;
647 1.1 pk
648 1.145 ad /* Insert into the list of all pools. */
649 1.181 mlelstv if (!cold)
650 1.134 ad mutex_enter(&pool_head_lock);
651 1.145 ad TAILQ_FOREACH(pp1, &pool_head, pr_poollist) {
652 1.145 ad if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0)
653 1.145 ad break;
654 1.145 ad }
655 1.145 ad if (pp1 == NULL)
656 1.145 ad TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
657 1.145 ad else
658 1.145 ad TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist);
659 1.181 mlelstv if (!cold)
660 1.134 ad mutex_exit(&pool_head_lock);
661 1.134 ad
662 1.167 skrll /* Insert this into the list of pools using this allocator. */
663 1.181 mlelstv if (!cold)
664 1.134 ad mutex_enter(&palloc->pa_lock);
665 1.145 ad TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
666 1.181 mlelstv if (!cold)
667 1.134 ad mutex_exit(&palloc->pa_lock);
668 1.1 pk }
669 1.1 pk
670 1.1 pk /*
671 1.1 pk * De-commision a pool resource.
672 1.1 pk */
673 1.1 pk void
674 1.42 thorpej pool_destroy(struct pool *pp)
675 1.1 pk {
676 1.101 thorpej struct pool_pagelist pq;
677 1.3 pk struct pool_item_header *ph;
678 1.43 thorpej
679 1.101 thorpej /* Remove from global pool list */
680 1.134 ad mutex_enter(&pool_head_lock);
681 1.134 ad while (pp->pr_refcnt != 0)
682 1.134 ad cv_wait(&pool_busy, &pool_head_lock);
683 1.145 ad TAILQ_REMOVE(&pool_head, pp, pr_poollist);
684 1.101 thorpej if (drainpp == pp)
685 1.101 thorpej drainpp = NULL;
686 1.134 ad mutex_exit(&pool_head_lock);
687 1.101 thorpej
688 1.101 thorpej /* Remove this pool from its allocator's list of pools. */
689 1.134 ad mutex_enter(&pp->pr_alloc->pa_lock);
690 1.66 thorpej TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
691 1.134 ad mutex_exit(&pp->pr_alloc->pa_lock);
692 1.66 thorpej
693 1.178 elad mutex_enter(&pool_allocator_lock);
694 1.178 elad if (--pp->pr_alloc->pa_refcnt == 0)
695 1.178 elad mutex_destroy(&pp->pr_alloc->pa_lock);
696 1.178 elad mutex_exit(&pool_allocator_lock);
697 1.178 elad
698 1.134 ad mutex_enter(&pp->pr_lock);
699 1.101 thorpej
700 1.134 ad KASSERT(pp->pr_cache == NULL);
701 1.3 pk
702 1.3 pk #ifdef DIAGNOSTIC
703 1.20 thorpej if (pp->pr_nout != 0) {
704 1.80 provos panic("pool_destroy: pool busy: still out: %u",
705 1.20 thorpej pp->pr_nout);
706 1.3 pk }
707 1.3 pk #endif
708 1.1 pk
709 1.101 thorpej KASSERT(LIST_EMPTY(&pp->pr_fullpages));
710 1.101 thorpej KASSERT(LIST_EMPTY(&pp->pr_partpages));
711 1.101 thorpej
712 1.3 pk /* Remove all pages */
713 1.101 thorpej LIST_INIT(&pq);
714 1.88 chs while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
715 1.101 thorpej pr_rmpage(pp, ph, &pq);
716 1.101 thorpej
717 1.134 ad mutex_exit(&pp->pr_lock);
718 1.3 pk
719 1.101 thorpej pr_pagelist_free(pp, &pq);
720 1.134 ad cv_destroy(&pp->pr_cv);
721 1.134 ad mutex_destroy(&pp->pr_lock);
722 1.1 pk }
723 1.1 pk
724 1.68 thorpej void
725 1.68 thorpej pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
726 1.68 thorpej {
727 1.68 thorpej
728 1.68 thorpej /* XXX no locking -- must be used just after pool_init() */
729 1.68 thorpej #ifdef DIAGNOSTIC
730 1.68 thorpej if (pp->pr_drain_hook != NULL)
731 1.68 thorpej panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
732 1.68 thorpej #endif
733 1.68 thorpej pp->pr_drain_hook = fn;
734 1.68 thorpej pp->pr_drain_hook_arg = arg;
735 1.68 thorpej }
736 1.68 thorpej
737 1.88 chs static struct pool_item_header *
738 1.128 christos pool_alloc_item_header(struct pool *pp, void *storage, int flags)
739 1.55 thorpej {
740 1.55 thorpej struct pool_item_header *ph;
741 1.55 thorpej
742 1.55 thorpej if ((pp->pr_roflags & PR_PHINPAGE) != 0)
743 1.128 christos ph = (struct pool_item_header *) ((char *)storage + pp->pr_phoffset);
744 1.134 ad else
745 1.97 yamt ph = pool_get(pp->pr_phpool, flags);
746 1.55 thorpej
747 1.55 thorpej return (ph);
748 1.55 thorpej }
749 1.1 pk
750 1.1 pk /*
751 1.134 ad * Grab an item from the pool.
752 1.1 pk */
753 1.3 pk void *
754 1.56 sommerfe pool_get(struct pool *pp, int flags)
755 1.1 pk {
756 1.1 pk struct pool_item *pi;
757 1.3 pk struct pool_item_header *ph;
758 1.55 thorpej void *v;
759 1.1 pk
760 1.2 pk #ifdef DIAGNOSTIC
761 1.184 rmind if (pp->pr_itemsperpage == 0)
762 1.184 rmind panic("pool_get: pool '%s': pr_itemsperpage is zero, "
763 1.184 rmind "pool not initialized?", pp->pr_wchan);
764 1.185 rmind if ((cpu_intr_p() || cpu_softintr_p()) && pp->pr_ipl == IPL_NONE &&
765 1.185 rmind !cold && panicstr == NULL)
766 1.184 rmind panic("pool '%s' is IPL_NONE, but called from "
767 1.184 rmind "interrupt context\n", pp->pr_wchan);
768 1.184 rmind #endif
769 1.155 ad if (flags & PR_WAITOK) {
770 1.154 yamt ASSERT_SLEEPABLE();
771 1.155 ad }
772 1.1 pk
773 1.134 ad mutex_enter(&pp->pr_lock);
774 1.20 thorpej startover:
775 1.20 thorpej /*
776 1.20 thorpej * Check to see if we've reached the hard limit. If we have,
777 1.20 thorpej * and we can wait, then wait until an item has been returned to
778 1.20 thorpej * the pool.
779 1.20 thorpej */
780 1.20 thorpej #ifdef DIAGNOSTIC
781 1.34 thorpej if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
782 1.134 ad mutex_exit(&pp->pr_lock);
783 1.20 thorpej panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
784 1.20 thorpej }
785 1.20 thorpej #endif
786 1.34 thorpej if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
787 1.68 thorpej if (pp->pr_drain_hook != NULL) {
788 1.68 thorpej /*
789 1.68 thorpej * Since the drain hook is going to free things
790 1.68 thorpej * back to the pool, unlock, call the hook, re-lock,
791 1.68 thorpej * and check the hardlimit condition again.
792 1.68 thorpej */
793 1.134 ad mutex_exit(&pp->pr_lock);
794 1.68 thorpej (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
795 1.134 ad mutex_enter(&pp->pr_lock);
796 1.68 thorpej if (pp->pr_nout < pp->pr_hardlimit)
797 1.68 thorpej goto startover;
798 1.68 thorpej }
799 1.68 thorpej
800 1.29 sommerfe if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
801 1.20 thorpej /*
802 1.20 thorpej * XXX: A warning isn't logged in this case. Should
803 1.20 thorpej * it be?
804 1.20 thorpej */
805 1.20 thorpej pp->pr_flags |= PR_WANTED;
806 1.134 ad cv_wait(&pp->pr_cv, &pp->pr_lock);
807 1.20 thorpej goto startover;
808 1.20 thorpej }
809 1.31 thorpej
810 1.31 thorpej /*
811 1.31 thorpej * Log a message that the hard limit has been hit.
812 1.31 thorpej */
813 1.31 thorpej if (pp->pr_hardlimit_warning != NULL &&
814 1.31 thorpej ratecheck(&pp->pr_hardlimit_warning_last,
815 1.31 thorpej &pp->pr_hardlimit_ratecap))
816 1.31 thorpej log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
817 1.21 thorpej
818 1.21 thorpej pp->pr_nfail++;
819 1.21 thorpej
820 1.134 ad mutex_exit(&pp->pr_lock);
821 1.20 thorpej return (NULL);
822 1.20 thorpej }
823 1.20 thorpej
824 1.3 pk /*
825 1.3 pk * The convention we use is that if `curpage' is not NULL, then
826 1.3 pk * it points at a non-empty bucket. In particular, `curpage'
827 1.3 pk * never points at a page header which has PR_PHINPAGE set and
828 1.3 pk * has no items in its bucket.
829 1.3 pk */
830 1.20 thorpej if ((ph = pp->pr_curpage) == NULL) {
831 1.113 yamt int error;
832 1.113 yamt
833 1.20 thorpej #ifdef DIAGNOSTIC
834 1.20 thorpej if (pp->pr_nitems != 0) {
835 1.134 ad mutex_exit(&pp->pr_lock);
836 1.20 thorpej printf("pool_get: %s: curpage NULL, nitems %u\n",
837 1.20 thorpej pp->pr_wchan, pp->pr_nitems);
838 1.80 provos panic("pool_get: nitems inconsistent");
839 1.20 thorpej }
840 1.20 thorpej #endif
841 1.20 thorpej
842 1.21 thorpej /*
843 1.21 thorpej * Call the back-end page allocator for more memory.
844 1.21 thorpej * Release the pool lock, as the back-end page allocator
845 1.21 thorpej * may block.
846 1.21 thorpej */
847 1.113 yamt error = pool_grow(pp, flags);
848 1.113 yamt if (error != 0) {
849 1.21 thorpej /*
850 1.55 thorpej * We were unable to allocate a page or item
851 1.55 thorpej * header, but we released the lock during
852 1.55 thorpej * allocation, so perhaps items were freed
853 1.55 thorpej * back to the pool. Check for this case.
854 1.21 thorpej */
855 1.21 thorpej if (pp->pr_curpage != NULL)
856 1.21 thorpej goto startover;
857 1.15 pk
858 1.117 yamt pp->pr_nfail++;
859 1.134 ad mutex_exit(&pp->pr_lock);
860 1.117 yamt return (NULL);
861 1.1 pk }
862 1.3 pk
863 1.20 thorpej /* Start the allocation process over. */
864 1.20 thorpej goto startover;
865 1.3 pk }
866 1.97 yamt if (pp->pr_roflags & PR_NOTOUCH) {
867 1.97 yamt #ifdef DIAGNOSTIC
868 1.97 yamt if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) {
869 1.134 ad mutex_exit(&pp->pr_lock);
870 1.97 yamt panic("pool_get: %s: page empty", pp->pr_wchan);
871 1.97 yamt }
872 1.97 yamt #endif
873 1.97 yamt v = pr_item_notouch_get(pp, ph);
874 1.97 yamt } else {
875 1.102 chs v = pi = LIST_FIRST(&ph->ph_itemlist);
876 1.97 yamt if (__predict_false(v == NULL)) {
877 1.134 ad mutex_exit(&pp->pr_lock);
878 1.97 yamt panic("pool_get: %s: page empty", pp->pr_wchan);
879 1.97 yamt }
880 1.20 thorpej #ifdef DIAGNOSTIC
881 1.97 yamt if (__predict_false(pp->pr_nitems == 0)) {
882 1.134 ad mutex_exit(&pp->pr_lock);
883 1.97 yamt printf("pool_get: %s: items on itemlist, nitems %u\n",
884 1.97 yamt pp->pr_wchan, pp->pr_nitems);
885 1.97 yamt panic("pool_get: nitems inconsistent");
886 1.97 yamt }
887 1.65 enami #endif
888 1.56 sommerfe
889 1.65 enami #ifdef DIAGNOSTIC
890 1.97 yamt if (__predict_false(pi->pi_magic != PI_MAGIC)) {
891 1.97 yamt panic("pool_get(%s): free list modified: "
892 1.97 yamt "magic=%x; page %p; item addr %p\n",
893 1.97 yamt pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
894 1.97 yamt }
895 1.3 pk #endif
896 1.3 pk
897 1.97 yamt /*
898 1.97 yamt * Remove from item list.
899 1.97 yamt */
900 1.102 chs LIST_REMOVE(pi, pi_list);
901 1.97 yamt }
902 1.20 thorpej pp->pr_nitems--;
903 1.20 thorpej pp->pr_nout++;
904 1.6 thorpej if (ph->ph_nmissing == 0) {
905 1.6 thorpej #ifdef DIAGNOSTIC
906 1.34 thorpej if (__predict_false(pp->pr_nidle == 0))
907 1.6 thorpej panic("pool_get: nidle inconsistent");
908 1.6 thorpej #endif
909 1.6 thorpej pp->pr_nidle--;
910 1.88 chs
911 1.88 chs /*
912 1.88 chs * This page was previously empty. Move it to the list of
913 1.88 chs * partially-full pages. This page is already curpage.
914 1.88 chs */
915 1.88 chs LIST_REMOVE(ph, ph_pagelist);
916 1.88 chs LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
917 1.6 thorpej }
918 1.3 pk ph->ph_nmissing++;
919 1.97 yamt if (ph->ph_nmissing == pp->pr_itemsperpage) {
920 1.21 thorpej #ifdef DIAGNOSTIC
921 1.97 yamt if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 &&
922 1.102 chs !LIST_EMPTY(&ph->ph_itemlist))) {
923 1.134 ad mutex_exit(&pp->pr_lock);
924 1.21 thorpej panic("pool_get: %s: nmissing inconsistent",
925 1.21 thorpej pp->pr_wchan);
926 1.21 thorpej }
927 1.21 thorpej #endif
928 1.3 pk /*
929 1.88 chs * This page is now full. Move it to the full list
930 1.88 chs * and select a new current page.
931 1.3 pk */
932 1.88 chs LIST_REMOVE(ph, ph_pagelist);
933 1.88 chs LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
934 1.88 chs pool_update_curpage(pp);
935 1.1 pk }
936 1.3 pk
937 1.3 pk pp->pr_nget++;
938 1.20 thorpej
939 1.20 thorpej /*
940 1.20 thorpej * If we have a low water mark and we are now below that low
941 1.20 thorpej * water mark, add more items to the pool.
942 1.20 thorpej */
943 1.53 thorpej if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
944 1.20 thorpej /*
945 1.20 thorpej * XXX: Should we log a warning? Should we set up a timeout
946 1.20 thorpej * to try again in a second or so? The latter could break
947 1.20 thorpej * a caller's assumptions about interrupt protection, etc.
948 1.20 thorpej */
949 1.20 thorpej }
950 1.20 thorpej
951 1.134 ad mutex_exit(&pp->pr_lock);
952 1.125 ad KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0);
953 1.125 ad FREECHECK_OUT(&pp->pr_freecheck, v);
954 1.204 maxv pool_redzone_fill(pp, v);
955 1.1 pk return (v);
956 1.1 pk }
957 1.1 pk
958 1.1 pk /*
959 1.43 thorpej * Internal version of pool_put(). Pool is already locked/entered.
960 1.1 pk */
961 1.43 thorpej static void
962 1.101 thorpej pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
963 1.1 pk {
964 1.1 pk struct pool_item *pi = v;
965 1.3 pk struct pool_item_header *ph;
966 1.3 pk
967 1.134 ad KASSERT(mutex_owned(&pp->pr_lock));
968 1.204 maxv pool_redzone_check(pp, v);
969 1.125 ad FREECHECK_IN(&pp->pr_freecheck, v);
970 1.134 ad LOCKDEBUG_MEM_CHECK(v, pp->pr_size);
971 1.61 chs
972 1.30 thorpej #ifdef DIAGNOSTIC
973 1.34 thorpej if (__predict_false(pp->pr_nout == 0)) {
974 1.30 thorpej printf("pool %s: putting with none out\n",
975 1.30 thorpej pp->pr_wchan);
976 1.30 thorpej panic("pool_put");
977 1.30 thorpej }
978 1.30 thorpej #endif
979 1.3 pk
980 1.121 yamt if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) {
981 1.3 pk panic("pool_put: %s: page header missing", pp->pr_wchan);
982 1.3 pk }
983 1.28 thorpej
984 1.3 pk /*
985 1.3 pk * Return to item list.
986 1.3 pk */
987 1.97 yamt if (pp->pr_roflags & PR_NOTOUCH) {
988 1.97 yamt pr_item_notouch_put(pp, ph, v);
989 1.97 yamt } else {
990 1.2 pk #ifdef DIAGNOSTIC
991 1.97 yamt pi->pi_magic = PI_MAGIC;
992 1.3 pk #endif
993 1.32 chs #ifdef DEBUG
994 1.97 yamt {
995 1.97 yamt int i, *ip = v;
996 1.32 chs
997 1.97 yamt for (i = 0; i < pp->pr_size / sizeof(int); i++) {
998 1.97 yamt *ip++ = PI_MAGIC;
999 1.97 yamt }
1000 1.32 chs }
1001 1.32 chs #endif
1002 1.32 chs
1003 1.102 chs LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1004 1.97 yamt }
1005 1.79 thorpej KDASSERT(ph->ph_nmissing != 0);
1006 1.3 pk ph->ph_nmissing--;
1007 1.3 pk pp->pr_nput++;
1008 1.20 thorpej pp->pr_nitems++;
1009 1.20 thorpej pp->pr_nout--;
1010 1.3 pk
1011 1.3 pk /* Cancel "pool empty" condition if it exists */
1012 1.3 pk if (pp->pr_curpage == NULL)
1013 1.3 pk pp->pr_curpage = ph;
1014 1.3 pk
1015 1.3 pk if (pp->pr_flags & PR_WANTED) {
1016 1.3 pk pp->pr_flags &= ~PR_WANTED;
1017 1.134 ad cv_broadcast(&pp->pr_cv);
1018 1.3 pk }
1019 1.3 pk
1020 1.3 pk /*
1021 1.88 chs * If this page is now empty, do one of two things:
1022 1.21 thorpej *
1023 1.88 chs * (1) If we have more pages than the page high water mark,
1024 1.96 thorpej * free the page back to the system. ONLY CONSIDER
1025 1.90 thorpej * FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
1026 1.90 thorpej * CLAIM.
1027 1.21 thorpej *
1028 1.88 chs * (2) Otherwise, move the page to the empty page list.
1029 1.88 chs *
1030 1.88 chs * Either way, select a new current page (so we use a partially-full
1031 1.88 chs * page if one is available).
1032 1.3 pk */
1033 1.3 pk if (ph->ph_nmissing == 0) {
1034 1.6 thorpej pp->pr_nidle++;
1035 1.90 thorpej if (pp->pr_npages > pp->pr_minpages &&
1036 1.152 yamt pp->pr_npages > pp->pr_maxpages) {
1037 1.101 thorpej pr_rmpage(pp, ph, pq);
1038 1.3 pk } else {
1039 1.88 chs LIST_REMOVE(ph, ph_pagelist);
1040 1.88 chs LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1041 1.3 pk
1042 1.21 thorpej /*
1043 1.21 thorpej * Update the timestamp on the page. A page must
1044 1.21 thorpej * be idle for some period of time before it can
1045 1.21 thorpej * be reclaimed by the pagedaemon. This minimizes
1046 1.21 thorpej * ping-pong'ing for memory.
1047 1.151 yamt *
1048 1.151 yamt * note for 64-bit time_t: truncating to 32-bit is not
1049 1.151 yamt * a problem for our usage.
1050 1.21 thorpej */
1051 1.151 yamt ph->ph_time = time_uptime;
1052 1.1 pk }
1053 1.88 chs pool_update_curpage(pp);
1054 1.1 pk }
1055 1.88 chs
1056 1.21 thorpej /*
1057 1.88 chs * If the page was previously completely full, move it to the
1058 1.88 chs * partially-full list and make it the current page. The next
1059 1.88 chs * allocation will get the item from this page, instead of
1060 1.88 chs * further fragmenting the pool.
1061 1.21 thorpej */
1062 1.21 thorpej else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1063 1.88 chs LIST_REMOVE(ph, ph_pagelist);
1064 1.88 chs LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1065 1.21 thorpej pp->pr_curpage = ph;
1066 1.21 thorpej }
1067 1.43 thorpej }
1068 1.43 thorpej
1069 1.56 sommerfe void
1070 1.56 sommerfe pool_put(struct pool *pp, void *v)
1071 1.56 sommerfe {
1072 1.101 thorpej struct pool_pagelist pq;
1073 1.101 thorpej
1074 1.101 thorpej LIST_INIT(&pq);
1075 1.56 sommerfe
1076 1.134 ad mutex_enter(&pp->pr_lock);
1077 1.101 thorpej pool_do_put(pp, v, &pq);
1078 1.134 ad mutex_exit(&pp->pr_lock);
1079 1.56 sommerfe
1080 1.102 chs pr_pagelist_free(pp, &pq);
1081 1.56 sommerfe }
1082 1.57 sommerfe
1083 1.74 thorpej /*
1084 1.113 yamt * pool_grow: grow a pool by a page.
1085 1.113 yamt *
1086 1.113 yamt * => called with pool locked.
1087 1.113 yamt * => unlock and relock the pool.
1088 1.113 yamt * => return with pool locked.
1089 1.113 yamt */
1090 1.113 yamt
1091 1.113 yamt static int
1092 1.113 yamt pool_grow(struct pool *pp, int flags)
1093 1.113 yamt {
1094 1.113 yamt struct pool_item_header *ph = NULL;
1095 1.113 yamt char *cp;
1096 1.113 yamt
1097 1.134 ad mutex_exit(&pp->pr_lock);
1098 1.113 yamt cp = pool_allocator_alloc(pp, flags);
1099 1.113 yamt if (__predict_true(cp != NULL)) {
1100 1.113 yamt ph = pool_alloc_item_header(pp, cp, flags);
1101 1.113 yamt }
1102 1.113 yamt if (__predict_false(cp == NULL || ph == NULL)) {
1103 1.113 yamt if (cp != NULL) {
1104 1.113 yamt pool_allocator_free(pp, cp);
1105 1.113 yamt }
1106 1.134 ad mutex_enter(&pp->pr_lock);
1107 1.113 yamt return ENOMEM;
1108 1.113 yamt }
1109 1.113 yamt
1110 1.134 ad mutex_enter(&pp->pr_lock);
1111 1.113 yamt pool_prime_page(pp, cp, ph);
1112 1.113 yamt pp->pr_npagealloc++;
1113 1.113 yamt return 0;
1114 1.113 yamt }
1115 1.113 yamt
1116 1.113 yamt /*
1117 1.74 thorpej * Add N items to the pool.
1118 1.74 thorpej */
1119 1.74 thorpej int
1120 1.74 thorpej pool_prime(struct pool *pp, int n)
1121 1.74 thorpej {
1122 1.75 simonb int newpages;
1123 1.113 yamt int error = 0;
1124 1.74 thorpej
1125 1.134 ad mutex_enter(&pp->pr_lock);
1126 1.74 thorpej
1127 1.74 thorpej newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1128 1.74 thorpej
1129 1.74 thorpej while (newpages-- > 0) {
1130 1.113 yamt error = pool_grow(pp, PR_NOWAIT);
1131 1.113 yamt if (error) {
1132 1.74 thorpej break;
1133 1.74 thorpej }
1134 1.74 thorpej pp->pr_minpages++;
1135 1.74 thorpej }
1136 1.74 thorpej
1137 1.74 thorpej if (pp->pr_minpages >= pp->pr_maxpages)
1138 1.74 thorpej pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1139 1.74 thorpej
1140 1.134 ad mutex_exit(&pp->pr_lock);
1141 1.113 yamt return error;
1142 1.74 thorpej }
1143 1.55 thorpej
1144 1.55 thorpej /*
1145 1.3 pk * Add a page worth of items to the pool.
1146 1.21 thorpej *
1147 1.21 thorpej * Note, we must be called with the pool descriptor LOCKED.
1148 1.3 pk */
1149 1.55 thorpej static void
1150 1.128 christos pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph)
1151 1.3 pk {
1152 1.3 pk struct pool_item *pi;
1153 1.128 christos void *cp = storage;
1154 1.125 ad const unsigned int align = pp->pr_align;
1155 1.125 ad const unsigned int ioff = pp->pr_itemoffset;
1156 1.55 thorpej int n;
1157 1.36 pk
1158 1.134 ad KASSERT(mutex_owned(&pp->pr_lock));
1159 1.91 yamt
1160 1.66 thorpej #ifdef DIAGNOSTIC
1161 1.121 yamt if ((pp->pr_roflags & PR_NOALIGN) == 0 &&
1162 1.150 skrll ((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
1163 1.36 pk panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1164 1.66 thorpej #endif
1165 1.3 pk
1166 1.3 pk /*
1167 1.3 pk * Insert page header.
1168 1.3 pk */
1169 1.88 chs LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1170 1.102 chs LIST_INIT(&ph->ph_itemlist);
1171 1.3 pk ph->ph_page = storage;
1172 1.3 pk ph->ph_nmissing = 0;
1173 1.151 yamt ph->ph_time = time_uptime;
1174 1.88 chs if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1175 1.88 chs SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
1176 1.3 pk
1177 1.6 thorpej pp->pr_nidle++;
1178 1.6 thorpej
1179 1.3 pk /*
1180 1.3 pk * Color this page.
1181 1.3 pk */
1182 1.141 yamt ph->ph_off = pp->pr_curcolor;
1183 1.141 yamt cp = (char *)cp + ph->ph_off;
1184 1.3 pk if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1185 1.3 pk pp->pr_curcolor = 0;
1186 1.3 pk
1187 1.3 pk /*
1188 1.3 pk * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1189 1.3 pk */
1190 1.3 pk if (ioff != 0)
1191 1.128 christos cp = (char *)cp + align - ioff;
1192 1.3 pk
1193 1.125 ad KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
1194 1.125 ad
1195 1.3 pk /*
1196 1.3 pk * Insert remaining chunks on the bucket list.
1197 1.3 pk */
1198 1.3 pk n = pp->pr_itemsperpage;
1199 1.20 thorpej pp->pr_nitems += n;
1200 1.3 pk
1201 1.97 yamt if (pp->pr_roflags & PR_NOTOUCH) {
1202 1.141 yamt pr_item_notouch_init(pp, ph);
1203 1.97 yamt } else {
1204 1.97 yamt while (n--) {
1205 1.97 yamt pi = (struct pool_item *)cp;
1206 1.78 thorpej
1207 1.97 yamt KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
1208 1.3 pk
1209 1.97 yamt /* Insert on page list */
1210 1.102 chs LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1211 1.3 pk #ifdef DIAGNOSTIC
1212 1.97 yamt pi->pi_magic = PI_MAGIC;
1213 1.3 pk #endif
1214 1.128 christos cp = (char *)cp + pp->pr_size;
1215 1.125 ad
1216 1.125 ad KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
1217 1.97 yamt }
1218 1.3 pk }
1219 1.3 pk
1220 1.3 pk /*
1221 1.3 pk * If the pool was depleted, point at the new page.
1222 1.3 pk */
1223 1.3 pk if (pp->pr_curpage == NULL)
1224 1.3 pk pp->pr_curpage = ph;
1225 1.3 pk
1226 1.3 pk if (++pp->pr_npages > pp->pr_hiwat)
1227 1.3 pk pp->pr_hiwat = pp->pr_npages;
1228 1.3 pk }
1229 1.3 pk
1230 1.20 thorpej /*
1231 1.52 thorpej * Used by pool_get() when nitems drops below the low water mark. This
1232 1.88 chs * is used to catch up pr_nitems with the low water mark.
1233 1.20 thorpej *
1234 1.21 thorpej * Note 1, we never wait for memory here, we let the caller decide what to do.
1235 1.20 thorpej *
1236 1.73 thorpej * Note 2, we must be called with the pool already locked, and we return
1237 1.20 thorpej * with it locked.
1238 1.20 thorpej */
1239 1.20 thorpej static int
1240 1.42 thorpej pool_catchup(struct pool *pp)
1241 1.20 thorpej {
1242 1.20 thorpej int error = 0;
1243 1.20 thorpej
1244 1.54 thorpej while (POOL_NEEDS_CATCHUP(pp)) {
1245 1.113 yamt error = pool_grow(pp, PR_NOWAIT);
1246 1.113 yamt if (error) {
1247 1.20 thorpej break;
1248 1.20 thorpej }
1249 1.20 thorpej }
1250 1.113 yamt return error;
1251 1.20 thorpej }
1252 1.20 thorpej
1253 1.88 chs static void
1254 1.88 chs pool_update_curpage(struct pool *pp)
1255 1.88 chs {
1256 1.88 chs
1257 1.88 chs pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
1258 1.88 chs if (pp->pr_curpage == NULL) {
1259 1.88 chs pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
1260 1.88 chs }
1261 1.168 yamt KASSERT((pp->pr_curpage == NULL && pp->pr_nitems == 0) ||
1262 1.168 yamt (pp->pr_curpage != NULL && pp->pr_nitems > 0));
1263 1.88 chs }
1264 1.88 chs
1265 1.3 pk void
1266 1.42 thorpej pool_setlowat(struct pool *pp, int n)
1267 1.3 pk {
1268 1.15 pk
1269 1.134 ad mutex_enter(&pp->pr_lock);
1270 1.21 thorpej
1271 1.3 pk pp->pr_minitems = n;
1272 1.15 pk pp->pr_minpages = (n == 0)
1273 1.15 pk ? 0
1274 1.18 thorpej : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1275 1.20 thorpej
1276 1.20 thorpej /* Make sure we're caught up with the newly-set low water mark. */
1277 1.75 simonb if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1278 1.20 thorpej /*
1279 1.20 thorpej * XXX: Should we log a warning? Should we set up a timeout
1280 1.20 thorpej * to try again in a second or so? The latter could break
1281 1.20 thorpej * a caller's assumptions about interrupt protection, etc.
1282 1.20 thorpej */
1283 1.20 thorpej }
1284 1.21 thorpej
1285 1.134 ad mutex_exit(&pp->pr_lock);
1286 1.3 pk }
1287 1.3 pk
1288 1.3 pk void
1289 1.42 thorpej pool_sethiwat(struct pool *pp, int n)
1290 1.3 pk {
1291 1.15 pk
1292 1.134 ad mutex_enter(&pp->pr_lock);
1293 1.21 thorpej
1294 1.15 pk pp->pr_maxpages = (n == 0)
1295 1.15 pk ? 0
1296 1.18 thorpej : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1297 1.21 thorpej
1298 1.134 ad mutex_exit(&pp->pr_lock);
1299 1.3 pk }
1300 1.3 pk
1301 1.20 thorpej void
1302 1.42 thorpej pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1303 1.20 thorpej {
1304 1.20 thorpej
1305 1.134 ad mutex_enter(&pp->pr_lock);
1306 1.20 thorpej
1307 1.20 thorpej pp->pr_hardlimit = n;
1308 1.20 thorpej pp->pr_hardlimit_warning = warnmess;
1309 1.31 thorpej pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1310 1.31 thorpej pp->pr_hardlimit_warning_last.tv_sec = 0;
1311 1.31 thorpej pp->pr_hardlimit_warning_last.tv_usec = 0;
1312 1.20 thorpej
1313 1.20 thorpej /*
1314 1.21 thorpej * In-line version of pool_sethiwat(), because we don't want to
1315 1.21 thorpej * release the lock.
1316 1.20 thorpej */
1317 1.20 thorpej pp->pr_maxpages = (n == 0)
1318 1.20 thorpej ? 0
1319 1.20 thorpej : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1320 1.21 thorpej
1321 1.134 ad mutex_exit(&pp->pr_lock);
1322 1.20 thorpej }
1323 1.3 pk
1324 1.3 pk /*
1325 1.3 pk * Release all complete pages that have not been used recently.
1326 1.184 rmind *
1327 1.197 jym * Must not be called from interrupt context.
1328 1.3 pk */
1329 1.66 thorpej int
1330 1.56 sommerfe pool_reclaim(struct pool *pp)
1331 1.3 pk {
1332 1.3 pk struct pool_item_header *ph, *phnext;
1333 1.61 chs struct pool_pagelist pq;
1334 1.151 yamt uint32_t curtime;
1335 1.134 ad bool klock;
1336 1.134 ad int rv;
1337 1.3 pk
1338 1.197 jym KASSERT(!cpu_intr_p() && !cpu_softintr_p());
1339 1.184 rmind
1340 1.68 thorpej if (pp->pr_drain_hook != NULL) {
1341 1.68 thorpej /*
1342 1.68 thorpej * The drain hook must be called with the pool unlocked.
1343 1.68 thorpej */
1344 1.68 thorpej (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1345 1.68 thorpej }
1346 1.68 thorpej
1347 1.134 ad /*
1348 1.157 ad * XXXSMP Because we do not want to cause non-MPSAFE code
1349 1.157 ad * to block.
1350 1.134 ad */
1351 1.134 ad if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK ||
1352 1.134 ad pp->pr_ipl == IPL_SOFTSERIAL) {
1353 1.134 ad KERNEL_LOCK(1, NULL);
1354 1.134 ad klock = true;
1355 1.134 ad } else
1356 1.134 ad klock = false;
1357 1.134 ad
1358 1.134 ad /* Reclaim items from the pool's cache (if any). */
1359 1.134 ad if (pp->pr_cache != NULL)
1360 1.134 ad pool_cache_invalidate(pp->pr_cache);
1361 1.134 ad
1362 1.134 ad if (mutex_tryenter(&pp->pr_lock) == 0) {
1363 1.134 ad if (klock) {
1364 1.134 ad KERNEL_UNLOCK_ONE(NULL);
1365 1.134 ad }
1366 1.66 thorpej return (0);
1367 1.134 ad }
1368 1.68 thorpej
1369 1.88 chs LIST_INIT(&pq);
1370 1.43 thorpej
1371 1.151 yamt curtime = time_uptime;
1372 1.21 thorpej
1373 1.88 chs for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
1374 1.88 chs phnext = LIST_NEXT(ph, ph_pagelist);
1375 1.3 pk
1376 1.3 pk /* Check our minimum page claim */
1377 1.3 pk if (pp->pr_npages <= pp->pr_minpages)
1378 1.3 pk break;
1379 1.3 pk
1380 1.88 chs KASSERT(ph->ph_nmissing == 0);
1381 1.191 para if (curtime - ph->ph_time < pool_inactive_time)
1382 1.88 chs continue;
1383 1.21 thorpej
1384 1.88 chs /*
1385 1.88 chs * If freeing this page would put us below
1386 1.88 chs * the low water mark, stop now.
1387 1.88 chs */
1388 1.88 chs if ((pp->pr_nitems - pp->pr_itemsperpage) <
1389 1.88 chs pp->pr_minitems)
1390 1.88 chs break;
1391 1.21 thorpej
1392 1.88 chs pr_rmpage(pp, ph, &pq);
1393 1.3 pk }
1394 1.3 pk
1395 1.134 ad mutex_exit(&pp->pr_lock);
1396 1.134 ad
1397 1.134 ad if (LIST_EMPTY(&pq))
1398 1.134 ad rv = 0;
1399 1.134 ad else {
1400 1.134 ad pr_pagelist_free(pp, &pq);
1401 1.134 ad rv = 1;
1402 1.134 ad }
1403 1.134 ad
1404 1.134 ad if (klock) {
1405 1.134 ad KERNEL_UNLOCK_ONE(NULL);
1406 1.134 ad }
1407 1.66 thorpej
1408 1.134 ad return (rv);
1409 1.3 pk }
1410 1.3 pk
1411 1.3 pk /*
1412 1.197 jym * Drain pools, one at a time. The drained pool is returned within ppp.
1413 1.131 ad *
1414 1.134 ad * Note, must never be called from interrupt context.
1415 1.3 pk */
1416 1.197 jym bool
1417 1.197 jym pool_drain(struct pool **ppp)
1418 1.3 pk {
1419 1.197 jym bool reclaimed;
1420 1.3 pk struct pool *pp;
1421 1.134 ad
1422 1.145 ad KASSERT(!TAILQ_EMPTY(&pool_head));
1423 1.3 pk
1424 1.61 chs pp = NULL;
1425 1.134 ad
1426 1.134 ad /* Find next pool to drain, and add a reference. */
1427 1.134 ad mutex_enter(&pool_head_lock);
1428 1.134 ad do {
1429 1.134 ad if (drainpp == NULL) {
1430 1.145 ad drainpp = TAILQ_FIRST(&pool_head);
1431 1.134 ad }
1432 1.134 ad if (drainpp != NULL) {
1433 1.134 ad pp = drainpp;
1434 1.145 ad drainpp = TAILQ_NEXT(pp, pr_poollist);
1435 1.134 ad }
1436 1.134 ad /*
1437 1.134 ad * Skip completely idle pools. We depend on at least
1438 1.134 ad * one pool in the system being active.
1439 1.134 ad */
1440 1.134 ad } while (pp == NULL || pp->pr_npages == 0);
1441 1.134 ad pp->pr_refcnt++;
1442 1.134 ad mutex_exit(&pool_head_lock);
1443 1.134 ad
1444 1.134 ad /* Drain the cache (if any) and pool.. */
1445 1.186 pooka reclaimed = pool_reclaim(pp);
1446 1.134 ad
1447 1.134 ad /* Finally, unlock the pool. */
1448 1.134 ad mutex_enter(&pool_head_lock);
1449 1.134 ad pp->pr_refcnt--;
1450 1.134 ad cv_broadcast(&pool_busy);
1451 1.134 ad mutex_exit(&pool_head_lock);
1452 1.186 pooka
1453 1.197 jym if (ppp != NULL)
1454 1.197 jym *ppp = pp;
1455 1.197 jym
1456 1.186 pooka return reclaimed;
1457 1.3 pk }
1458 1.3 pk
1459 1.3 pk /*
1460 1.3 pk * Diagnostic helpers.
1461 1.3 pk */
1462 1.21 thorpej
1463 1.25 thorpej void
1464 1.108 yamt pool_printall(const char *modif, void (*pr)(const char *, ...))
1465 1.108 yamt {
1466 1.108 yamt struct pool *pp;
1467 1.108 yamt
1468 1.145 ad TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
1469 1.108 yamt pool_printit(pp, modif, pr);
1470 1.108 yamt }
1471 1.108 yamt }
1472 1.108 yamt
1473 1.108 yamt void
1474 1.42 thorpej pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1475 1.25 thorpej {
1476 1.25 thorpej
1477 1.25 thorpej if (pp == NULL) {
1478 1.25 thorpej (*pr)("Must specify a pool to print.\n");
1479 1.25 thorpej return;
1480 1.25 thorpej }
1481 1.25 thorpej
1482 1.25 thorpej pool_print1(pp, modif, pr);
1483 1.25 thorpej }
1484 1.25 thorpej
1485 1.21 thorpej static void
1486 1.124 yamt pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
1487 1.97 yamt void (*pr)(const char *, ...))
1488 1.88 chs {
1489 1.88 chs struct pool_item_header *ph;
1490 1.88 chs #ifdef DIAGNOSTIC
1491 1.88 chs struct pool_item *pi;
1492 1.88 chs #endif
1493 1.88 chs
1494 1.88 chs LIST_FOREACH(ph, pl, ph_pagelist) {
1495 1.151 yamt (*pr)("\t\tpage %p, nmissing %d, time %" PRIu32 "\n",
1496 1.151 yamt ph->ph_page, ph->ph_nmissing, ph->ph_time);
1497 1.88 chs #ifdef DIAGNOSTIC
1498 1.97 yamt if (!(pp->pr_roflags & PR_NOTOUCH)) {
1499 1.102 chs LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1500 1.97 yamt if (pi->pi_magic != PI_MAGIC) {
1501 1.97 yamt (*pr)("\t\t\titem %p, magic 0x%x\n",
1502 1.97 yamt pi, pi->pi_magic);
1503 1.97 yamt }
1504 1.88 chs }
1505 1.88 chs }
1506 1.88 chs #endif
1507 1.88 chs }
1508 1.88 chs }
1509 1.88 chs
1510 1.88 chs static void
1511 1.42 thorpej pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1512 1.3 pk {
1513 1.25 thorpej struct pool_item_header *ph;
1514 1.134 ad pool_cache_t pc;
1515 1.134 ad pcg_t *pcg;
1516 1.134 ad pool_cache_cpu_t *cc;
1517 1.134 ad uint64_t cpuhit, cpumiss;
1518 1.44 thorpej int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1519 1.25 thorpej char c;
1520 1.25 thorpej
1521 1.25 thorpej while ((c = *modif++) != '\0') {
1522 1.25 thorpej if (c == 'l')
1523 1.25 thorpej print_log = 1;
1524 1.25 thorpej if (c == 'p')
1525 1.25 thorpej print_pagelist = 1;
1526 1.44 thorpej if (c == 'c')
1527 1.44 thorpej print_cache = 1;
1528 1.25 thorpej }
1529 1.25 thorpej
1530 1.134 ad if ((pc = pp->pr_cache) != NULL) {
1531 1.134 ad (*pr)("POOL CACHE");
1532 1.134 ad } else {
1533 1.134 ad (*pr)("POOL");
1534 1.134 ad }
1535 1.134 ad
1536 1.134 ad (*pr)(" %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1537 1.25 thorpej pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1538 1.25 thorpej pp->pr_roflags);
1539 1.66 thorpej (*pr)("\talloc %p\n", pp->pr_alloc);
1540 1.25 thorpej (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1541 1.25 thorpej pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1542 1.25 thorpej (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1543 1.25 thorpej pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1544 1.25 thorpej
1545 1.134 ad (*pr)("\tnget %lu, nfail %lu, nput %lu\n",
1546 1.25 thorpej pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1547 1.25 thorpej (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1548 1.25 thorpej pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1549 1.25 thorpej
1550 1.25 thorpej if (print_pagelist == 0)
1551 1.25 thorpej goto skip_pagelist;
1552 1.25 thorpej
1553 1.88 chs if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1554 1.88 chs (*pr)("\n\tempty page list:\n");
1555 1.97 yamt pool_print_pagelist(pp, &pp->pr_emptypages, pr);
1556 1.88 chs if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
1557 1.88 chs (*pr)("\n\tfull page list:\n");
1558 1.97 yamt pool_print_pagelist(pp, &pp->pr_fullpages, pr);
1559 1.88 chs if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
1560 1.88 chs (*pr)("\n\tpartial-page list:\n");
1561 1.97 yamt pool_print_pagelist(pp, &pp->pr_partpages, pr);
1562 1.88 chs
1563 1.25 thorpej if (pp->pr_curpage == NULL)
1564 1.25 thorpej (*pr)("\tno current page\n");
1565 1.25 thorpej else
1566 1.25 thorpej (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1567 1.25 thorpej
1568 1.25 thorpej skip_pagelist:
1569 1.25 thorpej if (print_log == 0)
1570 1.25 thorpej goto skip_log;
1571 1.25 thorpej
1572 1.25 thorpej (*pr)("\n");
1573 1.3 pk
1574 1.25 thorpej skip_log:
1575 1.44 thorpej
1576 1.102 chs #define PR_GROUPLIST(pcg) \
1577 1.102 chs (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); \
1578 1.142 ad for (i = 0; i < pcg->pcg_size; i++) { \
1579 1.102 chs if (pcg->pcg_objects[i].pcgo_pa != \
1580 1.102 chs POOL_PADDR_INVALID) { \
1581 1.102 chs (*pr)("\t\t\t%p, 0x%llx\n", \
1582 1.102 chs pcg->pcg_objects[i].pcgo_va, \
1583 1.102 chs (unsigned long long) \
1584 1.102 chs pcg->pcg_objects[i].pcgo_pa); \
1585 1.102 chs } else { \
1586 1.102 chs (*pr)("\t\t\t%p\n", \
1587 1.102 chs pcg->pcg_objects[i].pcgo_va); \
1588 1.102 chs } \
1589 1.102 chs }
1590 1.102 chs
1591 1.134 ad if (pc != NULL) {
1592 1.134 ad cpuhit = 0;
1593 1.134 ad cpumiss = 0;
1594 1.183 ad for (i = 0; i < __arraycount(pc->pc_cpus); i++) {
1595 1.134 ad if ((cc = pc->pc_cpus[i]) == NULL)
1596 1.134 ad continue;
1597 1.134 ad cpuhit += cc->cc_hits;
1598 1.134 ad cpumiss += cc->cc_misses;
1599 1.134 ad }
1600 1.134 ad (*pr)("\tcpu layer hits %llu misses %llu\n", cpuhit, cpumiss);
1601 1.134 ad (*pr)("\tcache layer hits %llu misses %llu\n",
1602 1.134 ad pc->pc_hits, pc->pc_misses);
1603 1.134 ad (*pr)("\tcache layer entry uncontended %llu contended %llu\n",
1604 1.134 ad pc->pc_hits + pc->pc_misses - pc->pc_contended,
1605 1.134 ad pc->pc_contended);
1606 1.134 ad (*pr)("\tcache layer empty groups %u full groups %u\n",
1607 1.134 ad pc->pc_nempty, pc->pc_nfull);
1608 1.134 ad if (print_cache) {
1609 1.134 ad (*pr)("\tfull cache groups:\n");
1610 1.134 ad for (pcg = pc->pc_fullgroups; pcg != NULL;
1611 1.134 ad pcg = pcg->pcg_next) {
1612 1.134 ad PR_GROUPLIST(pcg);
1613 1.134 ad }
1614 1.134 ad (*pr)("\tempty cache groups:\n");
1615 1.134 ad for (pcg = pc->pc_emptygroups; pcg != NULL;
1616 1.134 ad pcg = pcg->pcg_next) {
1617 1.134 ad PR_GROUPLIST(pcg);
1618 1.134 ad }
1619 1.103 chs }
1620 1.44 thorpej }
1621 1.102 chs #undef PR_GROUPLIST
1622 1.88 chs }
1623 1.88 chs
1624 1.88 chs static int
1625 1.88 chs pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
1626 1.88 chs {
1627 1.88 chs struct pool_item *pi;
1628 1.128 christos void *page;
1629 1.88 chs int n;
1630 1.88 chs
1631 1.121 yamt if ((pp->pr_roflags & PR_NOALIGN) == 0) {
1632 1.128 christos page = (void *)((uintptr_t)ph & pp->pr_alloc->pa_pagemask);
1633 1.121 yamt if (page != ph->ph_page &&
1634 1.121 yamt (pp->pr_roflags & PR_PHINPAGE) != 0) {
1635 1.121 yamt if (label != NULL)
1636 1.121 yamt printf("%s: ", label);
1637 1.121 yamt printf("pool(%p:%s): page inconsistency: page %p;"
1638 1.121 yamt " at page head addr %p (p %p)\n", pp,
1639 1.121 yamt pp->pr_wchan, ph->ph_page,
1640 1.121 yamt ph, page);
1641 1.121 yamt return 1;
1642 1.121 yamt }
1643 1.88 chs }
1644 1.3 pk
1645 1.97 yamt if ((pp->pr_roflags & PR_NOTOUCH) != 0)
1646 1.97 yamt return 0;
1647 1.97 yamt
1648 1.102 chs for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0;
1649 1.88 chs pi != NULL;
1650 1.102 chs pi = LIST_NEXT(pi,pi_list), n++) {
1651 1.88 chs
1652 1.88 chs #ifdef DIAGNOSTIC
1653 1.88 chs if (pi->pi_magic != PI_MAGIC) {
1654 1.88 chs if (label != NULL)
1655 1.88 chs printf("%s: ", label);
1656 1.88 chs printf("pool(%s): free list modified: magic=%x;"
1657 1.121 yamt " page %p; item ordinal %d; addr %p\n",
1658 1.88 chs pp->pr_wchan, pi->pi_magic, ph->ph_page,
1659 1.121 yamt n, pi);
1660 1.88 chs panic("pool");
1661 1.88 chs }
1662 1.88 chs #endif
1663 1.121 yamt if ((pp->pr_roflags & PR_NOALIGN) != 0) {
1664 1.121 yamt continue;
1665 1.121 yamt }
1666 1.128 christos page = (void *)((uintptr_t)pi & pp->pr_alloc->pa_pagemask);
1667 1.88 chs if (page == ph->ph_page)
1668 1.88 chs continue;
1669 1.88 chs
1670 1.88 chs if (label != NULL)
1671 1.88 chs printf("%s: ", label);
1672 1.88 chs printf("pool(%p:%s): page inconsistency: page %p;"
1673 1.88 chs " item ordinal %d; addr %p (p %p)\n", pp,
1674 1.88 chs pp->pr_wchan, ph->ph_page,
1675 1.88 chs n, pi, page);
1676 1.88 chs return 1;
1677 1.88 chs }
1678 1.88 chs return 0;
1679 1.3 pk }
1680 1.3 pk
1681 1.88 chs
1682 1.3 pk int
1683 1.42 thorpej pool_chk(struct pool *pp, const char *label)
1684 1.3 pk {
1685 1.3 pk struct pool_item_header *ph;
1686 1.3 pk int r = 0;
1687 1.3 pk
1688 1.134 ad mutex_enter(&pp->pr_lock);
1689 1.88 chs LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
1690 1.88 chs r = pool_chk_page(pp, label, ph);
1691 1.88 chs if (r) {
1692 1.88 chs goto out;
1693 1.88 chs }
1694 1.88 chs }
1695 1.88 chs LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
1696 1.88 chs r = pool_chk_page(pp, label, ph);
1697 1.88 chs if (r) {
1698 1.3 pk goto out;
1699 1.3 pk }
1700 1.88 chs }
1701 1.88 chs LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
1702 1.88 chs r = pool_chk_page(pp, label, ph);
1703 1.88 chs if (r) {
1704 1.3 pk goto out;
1705 1.3 pk }
1706 1.3 pk }
1707 1.88 chs
1708 1.3 pk out:
1709 1.134 ad mutex_exit(&pp->pr_lock);
1710 1.3 pk return (r);
1711 1.43 thorpej }
1712 1.43 thorpej
1713 1.43 thorpej /*
1714 1.43 thorpej * pool_cache_init:
1715 1.43 thorpej *
1716 1.43 thorpej * Initialize a pool cache.
1717 1.134 ad */
1718 1.134 ad pool_cache_t
1719 1.134 ad pool_cache_init(size_t size, u_int align, u_int align_offset, u_int flags,
1720 1.134 ad const char *wchan, struct pool_allocator *palloc, int ipl,
1721 1.134 ad int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), void *arg)
1722 1.134 ad {
1723 1.134 ad pool_cache_t pc;
1724 1.134 ad
1725 1.134 ad pc = pool_get(&cache_pool, PR_WAITOK);
1726 1.134 ad if (pc == NULL)
1727 1.134 ad return NULL;
1728 1.134 ad
1729 1.134 ad pool_cache_bootstrap(pc, size, align, align_offset, flags, wchan,
1730 1.134 ad palloc, ipl, ctor, dtor, arg);
1731 1.134 ad
1732 1.134 ad return pc;
1733 1.134 ad }
1734 1.134 ad
1735 1.134 ad /*
1736 1.134 ad * pool_cache_bootstrap:
1737 1.43 thorpej *
1738 1.134 ad * Kernel-private version of pool_cache_init(). The caller
1739 1.134 ad * provides initial storage.
1740 1.43 thorpej */
1741 1.43 thorpej void
1742 1.134 ad pool_cache_bootstrap(pool_cache_t pc, size_t size, u_int align,
1743 1.134 ad u_int align_offset, u_int flags, const char *wchan,
1744 1.134 ad struct pool_allocator *palloc, int ipl,
1745 1.134 ad int (*ctor)(void *, void *, int), void (*dtor)(void *, void *),
1746 1.43 thorpej void *arg)
1747 1.43 thorpej {
1748 1.134 ad CPU_INFO_ITERATOR cii;
1749 1.145 ad pool_cache_t pc1;
1750 1.134 ad struct cpu_info *ci;
1751 1.134 ad struct pool *pp;
1752 1.134 ad
1753 1.134 ad pp = &pc->pc_pool;
1754 1.134 ad if (palloc == NULL && ipl == IPL_NONE)
1755 1.134 ad palloc = &pool_allocator_nointr;
1756 1.134 ad pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl);
1757 1.157 ad mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl);
1758 1.43 thorpej
1759 1.134 ad if (ctor == NULL) {
1760 1.134 ad ctor = (int (*)(void *, void *, int))nullop;
1761 1.134 ad }
1762 1.134 ad if (dtor == NULL) {
1763 1.134 ad dtor = (void (*)(void *, void *))nullop;
1764 1.134 ad }
1765 1.43 thorpej
1766 1.134 ad pc->pc_emptygroups = NULL;
1767 1.134 ad pc->pc_fullgroups = NULL;
1768 1.134 ad pc->pc_partgroups = NULL;
1769 1.43 thorpej pc->pc_ctor = ctor;
1770 1.43 thorpej pc->pc_dtor = dtor;
1771 1.43 thorpej pc->pc_arg = arg;
1772 1.134 ad pc->pc_hits = 0;
1773 1.48 thorpej pc->pc_misses = 0;
1774 1.134 ad pc->pc_nempty = 0;
1775 1.134 ad pc->pc_npart = 0;
1776 1.134 ad pc->pc_nfull = 0;
1777 1.134 ad pc->pc_contended = 0;
1778 1.134 ad pc->pc_refcnt = 0;
1779 1.136 yamt pc->pc_freecheck = NULL;
1780 1.134 ad
1781 1.142 ad if ((flags & PR_LARGECACHE) != 0) {
1782 1.142 ad pc->pc_pcgsize = PCG_NOBJECTS_LARGE;
1783 1.163 ad pc->pc_pcgpool = &pcg_large_pool;
1784 1.142 ad } else {
1785 1.142 ad pc->pc_pcgsize = PCG_NOBJECTS_NORMAL;
1786 1.163 ad pc->pc_pcgpool = &pcg_normal_pool;
1787 1.142 ad }
1788 1.142 ad
1789 1.134 ad /* Allocate per-CPU caches. */
1790 1.134 ad memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus));
1791 1.134 ad pc->pc_ncpu = 0;
1792 1.139 ad if (ncpu < 2) {
1793 1.137 ad /* XXX For sparc: boot CPU is not attached yet. */
1794 1.137 ad pool_cache_cpu_init1(curcpu(), pc);
1795 1.137 ad } else {
1796 1.137 ad for (CPU_INFO_FOREACH(cii, ci)) {
1797 1.137 ad pool_cache_cpu_init1(ci, pc);
1798 1.137 ad }
1799 1.134 ad }
1800 1.145 ad
1801 1.145 ad /* Add to list of all pools. */
1802 1.145 ad if (__predict_true(!cold))
1803 1.134 ad mutex_enter(&pool_head_lock);
1804 1.145 ad TAILQ_FOREACH(pc1, &pool_cache_head, pc_cachelist) {
1805 1.145 ad if (strcmp(pc1->pc_pool.pr_wchan, pc->pc_pool.pr_wchan) > 0)
1806 1.145 ad break;
1807 1.145 ad }
1808 1.145 ad if (pc1 == NULL)
1809 1.145 ad TAILQ_INSERT_TAIL(&pool_cache_head, pc, pc_cachelist);
1810 1.145 ad else
1811 1.145 ad TAILQ_INSERT_BEFORE(pc1, pc, pc_cachelist);
1812 1.145 ad if (__predict_true(!cold))
1813 1.134 ad mutex_exit(&pool_head_lock);
1814 1.145 ad
1815 1.145 ad membar_sync();
1816 1.145 ad pp->pr_cache = pc;
1817 1.43 thorpej }
1818 1.43 thorpej
1819 1.43 thorpej /*
1820 1.43 thorpej * pool_cache_destroy:
1821 1.43 thorpej *
1822 1.43 thorpej * Destroy a pool cache.
1823 1.43 thorpej */
1824 1.43 thorpej void
1825 1.134 ad pool_cache_destroy(pool_cache_t pc)
1826 1.43 thorpej {
1827 1.191 para
1828 1.191 para pool_cache_bootstrap_destroy(pc);
1829 1.191 para pool_put(&cache_pool, pc);
1830 1.191 para }
1831 1.191 para
1832 1.191 para /*
1833 1.191 para * pool_cache_bootstrap_destroy:
1834 1.191 para *
1835 1.191 para * Destroy a pool cache.
1836 1.191 para */
1837 1.191 para void
1838 1.191 para pool_cache_bootstrap_destroy(pool_cache_t pc)
1839 1.191 para {
1840 1.134 ad struct pool *pp = &pc->pc_pool;
1841 1.175 jym u_int i;
1842 1.134 ad
1843 1.134 ad /* Remove it from the global list. */
1844 1.134 ad mutex_enter(&pool_head_lock);
1845 1.134 ad while (pc->pc_refcnt != 0)
1846 1.134 ad cv_wait(&pool_busy, &pool_head_lock);
1847 1.145 ad TAILQ_REMOVE(&pool_cache_head, pc, pc_cachelist);
1848 1.134 ad mutex_exit(&pool_head_lock);
1849 1.43 thorpej
1850 1.43 thorpej /* First, invalidate the entire cache. */
1851 1.43 thorpej pool_cache_invalidate(pc);
1852 1.43 thorpej
1853 1.134 ad /* Disassociate it from the pool. */
1854 1.134 ad mutex_enter(&pp->pr_lock);
1855 1.134 ad pp->pr_cache = NULL;
1856 1.134 ad mutex_exit(&pp->pr_lock);
1857 1.134 ad
1858 1.134 ad /* Destroy per-CPU data */
1859 1.183 ad for (i = 0; i < __arraycount(pc->pc_cpus); i++)
1860 1.175 jym pool_cache_invalidate_cpu(pc, i);
1861 1.134 ad
1862 1.134 ad /* Finally, destroy it. */
1863 1.134 ad mutex_destroy(&pc->pc_lock);
1864 1.134 ad pool_destroy(pp);
1865 1.134 ad }
1866 1.134 ad
1867 1.134 ad /*
1868 1.134 ad * pool_cache_cpu_init1:
1869 1.134 ad *
1870 1.134 ad * Called for each pool_cache whenever a new CPU is attached.
1871 1.134 ad */
1872 1.134 ad static void
1873 1.134 ad pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc)
1874 1.134 ad {
1875 1.134 ad pool_cache_cpu_t *cc;
1876 1.137 ad int index;
1877 1.134 ad
1878 1.137 ad index = ci->ci_index;
1879 1.137 ad
1880 1.183 ad KASSERT(index < __arraycount(pc->pc_cpus));
1881 1.134 ad
1882 1.137 ad if ((cc = pc->pc_cpus[index]) != NULL) {
1883 1.137 ad KASSERT(cc->cc_cpuindex == index);
1884 1.134 ad return;
1885 1.134 ad }
1886 1.134 ad
1887 1.134 ad /*
1888 1.134 ad * The first CPU is 'free'. This needs to be the case for
1889 1.134 ad * bootstrap - we may not be able to allocate yet.
1890 1.134 ad */
1891 1.134 ad if (pc->pc_ncpu == 0) {
1892 1.134 ad cc = &pc->pc_cpu0;
1893 1.134 ad pc->pc_ncpu = 1;
1894 1.134 ad } else {
1895 1.134 ad mutex_enter(&pc->pc_lock);
1896 1.134 ad pc->pc_ncpu++;
1897 1.134 ad mutex_exit(&pc->pc_lock);
1898 1.134 ad cc = pool_get(&cache_cpu_pool, PR_WAITOK);
1899 1.134 ad }
1900 1.134 ad
1901 1.134 ad cc->cc_ipl = pc->pc_pool.pr_ipl;
1902 1.134 ad cc->cc_iplcookie = makeiplcookie(cc->cc_ipl);
1903 1.134 ad cc->cc_cache = pc;
1904 1.137 ad cc->cc_cpuindex = index;
1905 1.134 ad cc->cc_hits = 0;
1906 1.134 ad cc->cc_misses = 0;
1907 1.169 yamt cc->cc_current = __UNCONST(&pcg_dummy);
1908 1.169 yamt cc->cc_previous = __UNCONST(&pcg_dummy);
1909 1.134 ad
1910 1.137 ad pc->pc_cpus[index] = cc;
1911 1.43 thorpej }
1912 1.43 thorpej
1913 1.134 ad /*
1914 1.134 ad * pool_cache_cpu_init:
1915 1.134 ad *
1916 1.134 ad * Called whenever a new CPU is attached.
1917 1.134 ad */
1918 1.134 ad void
1919 1.134 ad pool_cache_cpu_init(struct cpu_info *ci)
1920 1.43 thorpej {
1921 1.134 ad pool_cache_t pc;
1922 1.134 ad
1923 1.134 ad mutex_enter(&pool_head_lock);
1924 1.145 ad TAILQ_FOREACH(pc, &pool_cache_head, pc_cachelist) {
1925 1.134 ad pc->pc_refcnt++;
1926 1.134 ad mutex_exit(&pool_head_lock);
1927 1.43 thorpej
1928 1.134 ad pool_cache_cpu_init1(ci, pc);
1929 1.43 thorpej
1930 1.134 ad mutex_enter(&pool_head_lock);
1931 1.134 ad pc->pc_refcnt--;
1932 1.134 ad cv_broadcast(&pool_busy);
1933 1.134 ad }
1934 1.134 ad mutex_exit(&pool_head_lock);
1935 1.43 thorpej }
1936 1.43 thorpej
1937 1.134 ad /*
1938 1.134 ad * pool_cache_reclaim:
1939 1.134 ad *
1940 1.134 ad * Reclaim memory from a pool cache.
1941 1.134 ad */
1942 1.134 ad bool
1943 1.134 ad pool_cache_reclaim(pool_cache_t pc)
1944 1.43 thorpej {
1945 1.43 thorpej
1946 1.134 ad return pool_reclaim(&pc->pc_pool);
1947 1.134 ad }
1948 1.43 thorpej
1949 1.136 yamt static void
1950 1.136 yamt pool_cache_destruct_object1(pool_cache_t pc, void *object)
1951 1.136 yamt {
1952 1.136 yamt
1953 1.136 yamt (*pc->pc_dtor)(pc->pc_arg, object);
1954 1.136 yamt pool_put(&pc->pc_pool, object);
1955 1.136 yamt }
1956 1.136 yamt
1957 1.134 ad /*
1958 1.134 ad * pool_cache_destruct_object:
1959 1.134 ad *
1960 1.134 ad * Force destruction of an object and its release back into
1961 1.134 ad * the pool.
1962 1.134 ad */
1963 1.134 ad void
1964 1.134 ad pool_cache_destruct_object(pool_cache_t pc, void *object)
1965 1.134 ad {
1966 1.134 ad
1967 1.136 yamt FREECHECK_IN(&pc->pc_freecheck, object);
1968 1.136 yamt
1969 1.136 yamt pool_cache_destruct_object1(pc, object);
1970 1.43 thorpej }
1971 1.43 thorpej
1972 1.134 ad /*
1973 1.134 ad * pool_cache_invalidate_groups:
1974 1.134 ad *
1975 1.134 ad * Invalidate a chain of groups and destruct all objects.
1976 1.134 ad */
1977 1.102 chs static void
1978 1.134 ad pool_cache_invalidate_groups(pool_cache_t pc, pcg_t *pcg)
1979 1.102 chs {
1980 1.134 ad void *object;
1981 1.134 ad pcg_t *next;
1982 1.134 ad int i;
1983 1.134 ad
1984 1.134 ad for (; pcg != NULL; pcg = next) {
1985 1.134 ad next = pcg->pcg_next;
1986 1.134 ad
1987 1.134 ad for (i = 0; i < pcg->pcg_avail; i++) {
1988 1.134 ad object = pcg->pcg_objects[i].pcgo_va;
1989 1.136 yamt pool_cache_destruct_object1(pc, object);
1990 1.134 ad }
1991 1.102 chs
1992 1.142 ad if (pcg->pcg_size == PCG_NOBJECTS_LARGE) {
1993 1.142 ad pool_put(&pcg_large_pool, pcg);
1994 1.142 ad } else {
1995 1.142 ad KASSERT(pcg->pcg_size == PCG_NOBJECTS_NORMAL);
1996 1.142 ad pool_put(&pcg_normal_pool, pcg);
1997 1.142 ad }
1998 1.102 chs }
1999 1.102 chs }
2000 1.102 chs
2001 1.43 thorpej /*
2002 1.134 ad * pool_cache_invalidate:
2003 1.43 thorpej *
2004 1.134 ad * Invalidate a pool cache (destruct and release all of the
2005 1.134 ad * cached objects). Does not reclaim objects from the pool.
2006 1.176 thorpej *
2007 1.176 thorpej * Note: For pool caches that provide constructed objects, there
2008 1.176 thorpej * is an assumption that another level of synchronization is occurring
2009 1.176 thorpej * between the input to the constructor and the cache invalidation.
2010 1.196 jym *
2011 1.196 jym * Invalidation is a costly process and should not be called from
2012 1.196 jym * interrupt context.
2013 1.43 thorpej */
2014 1.134 ad void
2015 1.134 ad pool_cache_invalidate(pool_cache_t pc)
2016 1.134 ad {
2017 1.196 jym uint64_t where;
2018 1.134 ad pcg_t *full, *empty, *part;
2019 1.196 jym
2020 1.196 jym KASSERT(!cpu_intr_p() && !cpu_softintr_p());
2021 1.176 thorpej
2022 1.177 jym if (ncpu < 2 || !mp_online) {
2023 1.176 thorpej /*
2024 1.176 thorpej * We might be called early enough in the boot process
2025 1.176 thorpej * for the CPU data structures to not be fully initialized.
2026 1.196 jym * In this case, transfer the content of the local CPU's
2027 1.196 jym * cache back into global cache as only this CPU is currently
2028 1.196 jym * running.
2029 1.176 thorpej */
2030 1.196 jym pool_cache_transfer(pc);
2031 1.176 thorpej } else {
2032 1.176 thorpej /*
2033 1.196 jym * Signal all CPUs that they must transfer their local
2034 1.196 jym * cache back to the global pool then wait for the xcall to
2035 1.196 jym * complete.
2036 1.176 thorpej */
2037 1.196 jym where = xc_broadcast(0, (xcfunc_t)pool_cache_transfer,
2038 1.196 jym pc, NULL);
2039 1.176 thorpej xc_wait(where);
2040 1.176 thorpej }
2041 1.196 jym
2042 1.196 jym /* Empty pool caches, then invalidate objects */
2043 1.134 ad mutex_enter(&pc->pc_lock);
2044 1.134 ad full = pc->pc_fullgroups;
2045 1.134 ad empty = pc->pc_emptygroups;
2046 1.134 ad part = pc->pc_partgroups;
2047 1.134 ad pc->pc_fullgroups = NULL;
2048 1.134 ad pc->pc_emptygroups = NULL;
2049 1.134 ad pc->pc_partgroups = NULL;
2050 1.134 ad pc->pc_nfull = 0;
2051 1.134 ad pc->pc_nempty = 0;
2052 1.134 ad pc->pc_npart = 0;
2053 1.134 ad mutex_exit(&pc->pc_lock);
2054 1.134 ad
2055 1.134 ad pool_cache_invalidate_groups(pc, full);
2056 1.134 ad pool_cache_invalidate_groups(pc, empty);
2057 1.134 ad pool_cache_invalidate_groups(pc, part);
2058 1.134 ad }
2059 1.134 ad
2060 1.175 jym /*
2061 1.175 jym * pool_cache_invalidate_cpu:
2062 1.175 jym *
2063 1.175 jym * Invalidate all CPU-bound cached objects in pool cache, the CPU being
2064 1.175 jym * identified by its associated index.
2065 1.175 jym * It is caller's responsibility to ensure that no operation is
2066 1.175 jym * taking place on this pool cache while doing this invalidation.
2067 1.175 jym * WARNING: as no inter-CPU locking is enforced, trying to invalidate
2068 1.175 jym * pool cached objects from a CPU different from the one currently running
2069 1.175 jym * may result in an undefined behaviour.
2070 1.175 jym */
2071 1.175 jym static void
2072 1.175 jym pool_cache_invalidate_cpu(pool_cache_t pc, u_int index)
2073 1.175 jym {
2074 1.175 jym pool_cache_cpu_t *cc;
2075 1.175 jym pcg_t *pcg;
2076 1.175 jym
2077 1.175 jym if ((cc = pc->pc_cpus[index]) == NULL)
2078 1.175 jym return;
2079 1.175 jym
2080 1.175 jym if ((pcg = cc->cc_current) != &pcg_dummy) {
2081 1.175 jym pcg->pcg_next = NULL;
2082 1.175 jym pool_cache_invalidate_groups(pc, pcg);
2083 1.175 jym }
2084 1.175 jym if ((pcg = cc->cc_previous) != &pcg_dummy) {
2085 1.175 jym pcg->pcg_next = NULL;
2086 1.175 jym pool_cache_invalidate_groups(pc, pcg);
2087 1.175 jym }
2088 1.175 jym if (cc != &pc->pc_cpu0)
2089 1.175 jym pool_put(&cache_cpu_pool, cc);
2090 1.175 jym
2091 1.175 jym }
2092 1.175 jym
2093 1.134 ad void
2094 1.134 ad pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg)
2095 1.134 ad {
2096 1.134 ad
2097 1.134 ad pool_set_drain_hook(&pc->pc_pool, fn, arg);
2098 1.134 ad }
2099 1.134 ad
2100 1.134 ad void
2101 1.134 ad pool_cache_setlowat(pool_cache_t pc, int n)
2102 1.134 ad {
2103 1.134 ad
2104 1.134 ad pool_setlowat(&pc->pc_pool, n);
2105 1.134 ad }
2106 1.134 ad
2107 1.134 ad void
2108 1.134 ad pool_cache_sethiwat(pool_cache_t pc, int n)
2109 1.134 ad {
2110 1.134 ad
2111 1.134 ad pool_sethiwat(&pc->pc_pool, n);
2112 1.134 ad }
2113 1.134 ad
2114 1.134 ad void
2115 1.134 ad pool_cache_sethardlimit(pool_cache_t pc, int n, const char *warnmess, int ratecap)
2116 1.134 ad {
2117 1.134 ad
2118 1.134 ad pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap);
2119 1.134 ad }
2120 1.134 ad
2121 1.162 ad static bool __noinline
2122 1.162 ad pool_cache_get_slow(pool_cache_cpu_t *cc, int s, void **objectp,
2123 1.134 ad paddr_t *pap, int flags)
2124 1.43 thorpej {
2125 1.134 ad pcg_t *pcg, *cur;
2126 1.134 ad uint64_t ncsw;
2127 1.134 ad pool_cache_t pc;
2128 1.43 thorpej void *object;
2129 1.58 thorpej
2130 1.168 yamt KASSERT(cc->cc_current->pcg_avail == 0);
2131 1.168 yamt KASSERT(cc->cc_previous->pcg_avail == 0);
2132 1.168 yamt
2133 1.134 ad pc = cc->cc_cache;
2134 1.134 ad cc->cc_misses++;
2135 1.43 thorpej
2136 1.134 ad /*
2137 1.134 ad * Nothing was available locally. Try and grab a group
2138 1.134 ad * from the cache.
2139 1.134 ad */
2140 1.162 ad if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {
2141 1.134 ad ncsw = curlwp->l_ncsw;
2142 1.134 ad mutex_enter(&pc->pc_lock);
2143 1.134 ad pc->pc_contended++;
2144 1.43 thorpej
2145 1.134 ad /*
2146 1.134 ad * If we context switched while locking, then
2147 1.134 ad * our view of the per-CPU data is invalid:
2148 1.134 ad * retry.
2149 1.134 ad */
2150 1.134 ad if (curlwp->l_ncsw != ncsw) {
2151 1.134 ad mutex_exit(&pc->pc_lock);
2152 1.162 ad return true;
2153 1.43 thorpej }
2154 1.102 chs }
2155 1.43 thorpej
2156 1.162 ad if (__predict_true((pcg = pc->pc_fullgroups) != NULL)) {
2157 1.43 thorpej /*
2158 1.134 ad * If there's a full group, release our empty
2159 1.134 ad * group back to the cache. Install the full
2160 1.134 ad * group as cc_current and return.
2161 1.43 thorpej */
2162 1.162 ad if (__predict_true((cur = cc->cc_current) != &pcg_dummy)) {
2163 1.134 ad KASSERT(cur->pcg_avail == 0);
2164 1.134 ad cur->pcg_next = pc->pc_emptygroups;
2165 1.134 ad pc->pc_emptygroups = cur;
2166 1.134 ad pc->pc_nempty++;
2167 1.87 thorpej }
2168 1.142 ad KASSERT(pcg->pcg_avail == pcg->pcg_size);
2169 1.134 ad cc->cc_current = pcg;
2170 1.134 ad pc->pc_fullgroups = pcg->pcg_next;
2171 1.134 ad pc->pc_hits++;
2172 1.134 ad pc->pc_nfull--;
2173 1.134 ad mutex_exit(&pc->pc_lock);
2174 1.162 ad return true;
2175 1.134 ad }
2176 1.134 ad
2177 1.134 ad /*
2178 1.134 ad * Nothing available locally or in cache. Take the slow
2179 1.134 ad * path: fetch a new object from the pool and construct
2180 1.134 ad * it.
2181 1.134 ad */
2182 1.134 ad pc->pc_misses++;
2183 1.134 ad mutex_exit(&pc->pc_lock);
2184 1.162 ad splx(s);
2185 1.134 ad
2186 1.134 ad object = pool_get(&pc->pc_pool, flags);
2187 1.134 ad *objectp = object;
2188 1.162 ad if (__predict_false(object == NULL))
2189 1.162 ad return false;
2190 1.125 ad
2191 1.162 ad if (__predict_false((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0)) {
2192 1.134 ad pool_put(&pc->pc_pool, object);
2193 1.134 ad *objectp = NULL;
2194 1.162 ad return false;
2195 1.43 thorpej }
2196 1.43 thorpej
2197 1.134 ad KASSERT((((vaddr_t)object + pc->pc_pool.pr_itemoffset) &
2198 1.134 ad (pc->pc_pool.pr_align - 1)) == 0);
2199 1.43 thorpej
2200 1.134 ad if (pap != NULL) {
2201 1.134 ad #ifdef POOL_VTOPHYS
2202 1.134 ad *pap = POOL_VTOPHYS(object);
2203 1.134 ad #else
2204 1.134 ad *pap = POOL_PADDR_INVALID;
2205 1.134 ad #endif
2206 1.102 chs }
2207 1.43 thorpej
2208 1.125 ad FREECHECK_OUT(&pc->pc_freecheck, object);
2209 1.204 maxv pool_redzone_fill(&pc->pc_pool, object);
2210 1.162 ad return false;
2211 1.43 thorpej }
2212 1.43 thorpej
2213 1.43 thorpej /*
2214 1.134 ad * pool_cache_get{,_paddr}:
2215 1.43 thorpej *
2216 1.134 ad * Get an object from a pool cache (optionally returning
2217 1.134 ad * the physical address of the object).
2218 1.43 thorpej */
2219 1.134 ad void *
2220 1.134 ad pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap)
2221 1.43 thorpej {
2222 1.134 ad pool_cache_cpu_t *cc;
2223 1.134 ad pcg_t *pcg;
2224 1.134 ad void *object;
2225 1.60 thorpej int s;
2226 1.43 thorpej
2227 1.184 rmind KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()) ||
2228 1.185 rmind (pc->pc_pool.pr_ipl != IPL_NONE || cold || panicstr != NULL),
2229 1.190 jym "pool '%s' is IPL_NONE, but called from interrupt context\n",
2230 1.190 jym pc->pc_pool.pr_wchan);
2231 1.184 rmind
2232 1.155 ad if (flags & PR_WAITOK) {
2233 1.154 yamt ASSERT_SLEEPABLE();
2234 1.155 ad }
2235 1.125 ad
2236 1.162 ad /* Lock out interrupts and disable preemption. */
2237 1.162 ad s = splvm();
2238 1.165 yamt while (/* CONSTCOND */ true) {
2239 1.134 ad /* Try and allocate an object from the current group. */
2240 1.162 ad cc = pc->pc_cpus[curcpu()->ci_index];
2241 1.162 ad KASSERT(cc->cc_cache == pc);
2242 1.134 ad pcg = cc->cc_current;
2243 1.162 ad if (__predict_true(pcg->pcg_avail > 0)) {
2244 1.134 ad object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va;
2245 1.162 ad if (__predict_false(pap != NULL))
2246 1.134 ad *pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa;
2247 1.148 yamt #if defined(DIAGNOSTIC)
2248 1.134 ad pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL;
2249 1.163 ad KASSERT(pcg->pcg_avail < pcg->pcg_size);
2250 1.134 ad KASSERT(object != NULL);
2251 1.163 ad #endif
2252 1.134 ad cc->cc_hits++;
2253 1.162 ad splx(s);
2254 1.134 ad FREECHECK_OUT(&pc->pc_freecheck, object);
2255 1.204 maxv pool_redzone_fill(&pc->pc_pool, object);
2256 1.134 ad return object;
2257 1.43 thorpej }
2258 1.43 thorpej
2259 1.43 thorpej /*
2260 1.134 ad * That failed. If the previous group isn't empty, swap
2261 1.134 ad * it with the current group and allocate from there.
2262 1.43 thorpej */
2263 1.134 ad pcg = cc->cc_previous;
2264 1.162 ad if (__predict_true(pcg->pcg_avail > 0)) {
2265 1.134 ad cc->cc_previous = cc->cc_current;
2266 1.134 ad cc->cc_current = pcg;
2267 1.134 ad continue;
2268 1.43 thorpej }
2269 1.43 thorpej
2270 1.134 ad /*
2271 1.134 ad * Can't allocate from either group: try the slow path.
2272 1.134 ad * If get_slow() allocated an object for us, or if
2273 1.162 ad * no more objects are available, it will return false.
2274 1.134 ad * Otherwise, we need to retry.
2275 1.134 ad */
2276 1.165 yamt if (!pool_cache_get_slow(cc, s, &object, pap, flags))
2277 1.165 yamt break;
2278 1.165 yamt }
2279 1.43 thorpej
2280 1.134 ad return object;
2281 1.51 thorpej }
2282 1.51 thorpej
2283 1.162 ad static bool __noinline
2284 1.162 ad pool_cache_put_slow(pool_cache_cpu_t *cc, int s, void *object)
2285 1.51 thorpej {
2286 1.200 pooka struct lwp *l = curlwp;
2287 1.163 ad pcg_t *pcg, *cur;
2288 1.134 ad uint64_t ncsw;
2289 1.134 ad pool_cache_t pc;
2290 1.51 thorpej
2291 1.168 yamt KASSERT(cc->cc_current->pcg_avail == cc->cc_current->pcg_size);
2292 1.168 yamt KASSERT(cc->cc_previous->pcg_avail == cc->cc_previous->pcg_size);
2293 1.168 yamt
2294 1.134 ad pc = cc->cc_cache;
2295 1.171 ad pcg = NULL;
2296 1.134 ad cc->cc_misses++;
2297 1.200 pooka ncsw = l->l_ncsw;
2298 1.43 thorpej
2299 1.171 ad /*
2300 1.171 ad * If there are no empty groups in the cache then allocate one
2301 1.171 ad * while still unlocked.
2302 1.171 ad */
2303 1.171 ad if (__predict_false(pc->pc_emptygroups == NULL)) {
2304 1.171 ad if (__predict_true(!pool_cache_disable)) {
2305 1.171 ad pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT);
2306 1.171 ad }
2307 1.200 pooka /*
2308 1.200 pooka * If pool_get() blocked, then our view of
2309 1.200 pooka * the per-CPU data is invalid: retry.
2310 1.200 pooka */
2311 1.200 pooka if (__predict_false(l->l_ncsw != ncsw)) {
2312 1.200 pooka if (pcg != NULL) {
2313 1.200 pooka pool_put(pc->pc_pcgpool, pcg);
2314 1.200 pooka }
2315 1.200 pooka return true;
2316 1.200 pooka }
2317 1.171 ad if (__predict_true(pcg != NULL)) {
2318 1.171 ad pcg->pcg_avail = 0;
2319 1.171 ad pcg->pcg_size = pc->pc_pcgsize;
2320 1.171 ad }
2321 1.171 ad }
2322 1.171 ad
2323 1.162 ad /* Lock the cache. */
2324 1.162 ad if (__predict_false(!mutex_tryenter(&pc->pc_lock))) {
2325 1.134 ad mutex_enter(&pc->pc_lock);
2326 1.134 ad pc->pc_contended++;
2327 1.162 ad
2328 1.163 ad /*
2329 1.163 ad * If we context switched while locking, then our view of
2330 1.163 ad * the per-CPU data is invalid: retry.
2331 1.163 ad */
2332 1.200 pooka if (__predict_false(l->l_ncsw != ncsw)) {
2333 1.163 ad mutex_exit(&pc->pc_lock);
2334 1.171 ad if (pcg != NULL) {
2335 1.171 ad pool_put(pc->pc_pcgpool, pcg);
2336 1.171 ad }
2337 1.163 ad return true;
2338 1.163 ad }
2339 1.162 ad }
2340 1.102 chs
2341 1.163 ad /* If there are no empty groups in the cache then allocate one. */
2342 1.171 ad if (pcg == NULL && pc->pc_emptygroups != NULL) {
2343 1.171 ad pcg = pc->pc_emptygroups;
2344 1.163 ad pc->pc_emptygroups = pcg->pcg_next;
2345 1.163 ad pc->pc_nempty--;
2346 1.134 ad }
2347 1.130 ad
2348 1.162 ad /*
2349 1.162 ad * If there's a empty group, release our full group back
2350 1.162 ad * to the cache. Install the empty group to the local CPU
2351 1.162 ad * and return.
2352 1.162 ad */
2353 1.163 ad if (pcg != NULL) {
2354 1.134 ad KASSERT(pcg->pcg_avail == 0);
2355 1.162 ad if (__predict_false(cc->cc_previous == &pcg_dummy)) {
2356 1.146 ad cc->cc_previous = pcg;
2357 1.146 ad } else {
2358 1.162 ad cur = cc->cc_current;
2359 1.162 ad if (__predict_true(cur != &pcg_dummy)) {
2360 1.163 ad KASSERT(cur->pcg_avail == cur->pcg_size);
2361 1.146 ad cur->pcg_next = pc->pc_fullgroups;
2362 1.146 ad pc->pc_fullgroups = cur;
2363 1.146 ad pc->pc_nfull++;
2364 1.146 ad }
2365 1.146 ad cc->cc_current = pcg;
2366 1.146 ad }
2367 1.163 ad pc->pc_hits++;
2368 1.134 ad mutex_exit(&pc->pc_lock);
2369 1.162 ad return true;
2370 1.102 chs }
2371 1.105 christos
2372 1.134 ad /*
2373 1.162 ad * Nothing available locally or in cache, and we didn't
2374 1.162 ad * allocate an empty group. Take the slow path and destroy
2375 1.162 ad * the object here and now.
2376 1.134 ad */
2377 1.134 ad pc->pc_misses++;
2378 1.134 ad mutex_exit(&pc->pc_lock);
2379 1.162 ad splx(s);
2380 1.162 ad pool_cache_destruct_object(pc, object);
2381 1.105 christos
2382 1.162 ad return false;
2383 1.134 ad }
2384 1.102 chs
2385 1.43 thorpej /*
2386 1.134 ad * pool_cache_put{,_paddr}:
2387 1.43 thorpej *
2388 1.134 ad * Put an object back to the pool cache (optionally caching the
2389 1.134 ad * physical address of the object).
2390 1.43 thorpej */
2391 1.101 thorpej void
2392 1.134 ad pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa)
2393 1.43 thorpej {
2394 1.134 ad pool_cache_cpu_t *cc;
2395 1.134 ad pcg_t *pcg;
2396 1.134 ad int s;
2397 1.101 thorpej
2398 1.172 yamt KASSERT(object != NULL);
2399 1.204 maxv pool_redzone_check(&pc->pc_pool, object);
2400 1.134 ad FREECHECK_IN(&pc->pc_freecheck, object);
2401 1.101 thorpej
2402 1.162 ad /* Lock out interrupts and disable preemption. */
2403 1.162 ad s = splvm();
2404 1.165 yamt while (/* CONSTCOND */ true) {
2405 1.134 ad /* If the current group isn't full, release it there. */
2406 1.162 ad cc = pc->pc_cpus[curcpu()->ci_index];
2407 1.162 ad KASSERT(cc->cc_cache == pc);
2408 1.134 ad pcg = cc->cc_current;
2409 1.162 ad if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) {
2410 1.134 ad pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object;
2411 1.134 ad pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa;
2412 1.134 ad pcg->pcg_avail++;
2413 1.134 ad cc->cc_hits++;
2414 1.162 ad splx(s);
2415 1.134 ad return;
2416 1.134 ad }
2417 1.43 thorpej
2418 1.134 ad /*
2419 1.162 ad * That failed. If the previous group isn't full, swap
2420 1.134 ad * it with the current group and try again.
2421 1.134 ad */
2422 1.134 ad pcg = cc->cc_previous;
2423 1.162 ad if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) {
2424 1.134 ad cc->cc_previous = cc->cc_current;
2425 1.134 ad cc->cc_current = pcg;
2426 1.134 ad continue;
2427 1.134 ad }
2428 1.43 thorpej
2429 1.134 ad /*
2430 1.134 ad * Can't free to either group: try the slow path.
2431 1.134 ad * If put_slow() releases the object for us, it
2432 1.162 ad * will return false. Otherwise we need to retry.
2433 1.134 ad */
2434 1.165 yamt if (!pool_cache_put_slow(cc, s, object))
2435 1.165 yamt break;
2436 1.165 yamt }
2437 1.43 thorpej }
2438 1.43 thorpej
2439 1.43 thorpej /*
2440 1.196 jym * pool_cache_transfer:
2441 1.43 thorpej *
2442 1.134 ad * Transfer objects from the per-CPU cache to the global cache.
2443 1.134 ad * Run within a cross-call thread.
2444 1.43 thorpej */
2445 1.43 thorpej static void
2446 1.196 jym pool_cache_transfer(pool_cache_t pc)
2447 1.43 thorpej {
2448 1.134 ad pool_cache_cpu_t *cc;
2449 1.134 ad pcg_t *prev, *cur, **list;
2450 1.162 ad int s;
2451 1.134 ad
2452 1.162 ad s = splvm();
2453 1.162 ad mutex_enter(&pc->pc_lock);
2454 1.162 ad cc = pc->pc_cpus[curcpu()->ci_index];
2455 1.134 ad cur = cc->cc_current;
2456 1.169 yamt cc->cc_current = __UNCONST(&pcg_dummy);
2457 1.134 ad prev = cc->cc_previous;
2458 1.169 yamt cc->cc_previous = __UNCONST(&pcg_dummy);
2459 1.162 ad if (cur != &pcg_dummy) {
2460 1.142 ad if (cur->pcg_avail == cur->pcg_size) {
2461 1.134 ad list = &pc->pc_fullgroups;
2462 1.134 ad pc->pc_nfull++;
2463 1.134 ad } else if (cur->pcg_avail == 0) {
2464 1.134 ad list = &pc->pc_emptygroups;
2465 1.134 ad pc->pc_nempty++;
2466 1.134 ad } else {
2467 1.134 ad list = &pc->pc_partgroups;
2468 1.134 ad pc->pc_npart++;
2469 1.134 ad }
2470 1.134 ad cur->pcg_next = *list;
2471 1.134 ad *list = cur;
2472 1.134 ad }
2473 1.162 ad if (prev != &pcg_dummy) {
2474 1.142 ad if (prev->pcg_avail == prev->pcg_size) {
2475 1.134 ad list = &pc->pc_fullgroups;
2476 1.134 ad pc->pc_nfull++;
2477 1.134 ad } else if (prev->pcg_avail == 0) {
2478 1.134 ad list = &pc->pc_emptygroups;
2479 1.134 ad pc->pc_nempty++;
2480 1.134 ad } else {
2481 1.134 ad list = &pc->pc_partgroups;
2482 1.134 ad pc->pc_npart++;
2483 1.134 ad }
2484 1.134 ad prev->pcg_next = *list;
2485 1.134 ad *list = prev;
2486 1.134 ad }
2487 1.134 ad mutex_exit(&pc->pc_lock);
2488 1.134 ad splx(s);
2489 1.3 pk }
2490 1.66 thorpej
2491 1.66 thorpej /*
2492 1.66 thorpej * Pool backend allocators.
2493 1.66 thorpej *
2494 1.66 thorpej * Each pool has a backend allocator that handles allocation, deallocation,
2495 1.66 thorpej * and any additional draining that might be needed.
2496 1.66 thorpej *
2497 1.66 thorpej * We provide two standard allocators:
2498 1.66 thorpej *
2499 1.66 thorpej * pool_allocator_kmem - the default when no allocator is specified
2500 1.66 thorpej *
2501 1.66 thorpej * pool_allocator_nointr - used for pools that will not be accessed
2502 1.66 thorpej * in interrupt context.
2503 1.66 thorpej */
2504 1.66 thorpej void *pool_page_alloc(struct pool *, int);
2505 1.66 thorpej void pool_page_free(struct pool *, void *);
2506 1.66 thorpej
2507 1.112 bjh21 #ifdef POOL_SUBPAGE
2508 1.112 bjh21 struct pool_allocator pool_allocator_kmem_fullpage = {
2509 1.192 rmind .pa_alloc = pool_page_alloc,
2510 1.192 rmind .pa_free = pool_page_free,
2511 1.192 rmind .pa_pagesz = 0
2512 1.112 bjh21 };
2513 1.112 bjh21 #else
2514 1.66 thorpej struct pool_allocator pool_allocator_kmem = {
2515 1.191 para .pa_alloc = pool_page_alloc,
2516 1.191 para .pa_free = pool_page_free,
2517 1.191 para .pa_pagesz = 0
2518 1.66 thorpej };
2519 1.112 bjh21 #endif
2520 1.66 thorpej
2521 1.112 bjh21 #ifdef POOL_SUBPAGE
2522 1.112 bjh21 struct pool_allocator pool_allocator_nointr_fullpage = {
2523 1.194 para .pa_alloc = pool_page_alloc,
2524 1.194 para .pa_free = pool_page_free,
2525 1.192 rmind .pa_pagesz = 0
2526 1.112 bjh21 };
2527 1.112 bjh21 #else
2528 1.66 thorpej struct pool_allocator pool_allocator_nointr = {
2529 1.191 para .pa_alloc = pool_page_alloc,
2530 1.191 para .pa_free = pool_page_free,
2531 1.191 para .pa_pagesz = 0
2532 1.66 thorpej };
2533 1.112 bjh21 #endif
2534 1.66 thorpej
2535 1.66 thorpej #ifdef POOL_SUBPAGE
2536 1.66 thorpej void *pool_subpage_alloc(struct pool *, int);
2537 1.66 thorpej void pool_subpage_free(struct pool *, void *);
2538 1.66 thorpej
2539 1.112 bjh21 struct pool_allocator pool_allocator_kmem = {
2540 1.193 he .pa_alloc = pool_subpage_alloc,
2541 1.193 he .pa_free = pool_subpage_free,
2542 1.193 he .pa_pagesz = POOL_SUBPAGE
2543 1.112 bjh21 };
2544 1.112 bjh21
2545 1.112 bjh21 struct pool_allocator pool_allocator_nointr = {
2546 1.192 rmind .pa_alloc = pool_subpage_alloc,
2547 1.192 rmind .pa_free = pool_subpage_free,
2548 1.192 rmind .pa_pagesz = POOL_SUBPAGE
2549 1.66 thorpej };
2550 1.66 thorpej #endif /* POOL_SUBPAGE */
2551 1.66 thorpej
2552 1.117 yamt static void *
2553 1.117 yamt pool_allocator_alloc(struct pool *pp, int flags)
2554 1.66 thorpej {
2555 1.117 yamt struct pool_allocator *pa = pp->pr_alloc;
2556 1.66 thorpej void *res;
2557 1.66 thorpej
2558 1.117 yamt res = (*pa->pa_alloc)(pp, flags);
2559 1.117 yamt if (res == NULL && (flags & PR_WAITOK) == 0) {
2560 1.66 thorpej /*
2561 1.117 yamt * We only run the drain hook here if PR_NOWAIT.
2562 1.117 yamt * In other cases, the hook will be run in
2563 1.117 yamt * pool_reclaim().
2564 1.66 thorpej */
2565 1.117 yamt if (pp->pr_drain_hook != NULL) {
2566 1.117 yamt (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
2567 1.117 yamt res = (*pa->pa_alloc)(pp, flags);
2568 1.66 thorpej }
2569 1.117 yamt }
2570 1.117 yamt return res;
2571 1.66 thorpej }
2572 1.66 thorpej
2573 1.117 yamt static void
2574 1.66 thorpej pool_allocator_free(struct pool *pp, void *v)
2575 1.66 thorpej {
2576 1.66 thorpej struct pool_allocator *pa = pp->pr_alloc;
2577 1.66 thorpej
2578 1.66 thorpej (*pa->pa_free)(pp, v);
2579 1.66 thorpej }
2580 1.66 thorpej
2581 1.66 thorpej void *
2582 1.124 yamt pool_page_alloc(struct pool *pp, int flags)
2583 1.66 thorpej {
2584 1.192 rmind const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
2585 1.191 para vmem_addr_t va;
2586 1.192 rmind int ret;
2587 1.191 para
2588 1.192 rmind ret = uvm_km_kmem_alloc(kmem_va_arena, pp->pr_alloc->pa_pagesz,
2589 1.192 rmind vflags | VM_INSTANTFIT, &va);
2590 1.66 thorpej
2591 1.192 rmind return ret ? NULL : (void *)va;
2592 1.66 thorpej }
2593 1.66 thorpej
2594 1.66 thorpej void
2595 1.124 yamt pool_page_free(struct pool *pp, void *v)
2596 1.66 thorpej {
2597 1.66 thorpej
2598 1.191 para uvm_km_kmem_free(kmem_va_arena, (vaddr_t)v, pp->pr_alloc->pa_pagesz);
2599 1.98 yamt }
2600 1.98 yamt
2601 1.98 yamt static void *
2602 1.124 yamt pool_page_alloc_meta(struct pool *pp, int flags)
2603 1.98 yamt {
2604 1.192 rmind const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
2605 1.192 rmind vmem_addr_t va;
2606 1.192 rmind int ret;
2607 1.191 para
2608 1.192 rmind ret = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz,
2609 1.192 rmind vflags | VM_INSTANTFIT, &va);
2610 1.98 yamt
2611 1.192 rmind return ret ? NULL : (void *)va;
2612 1.98 yamt }
2613 1.98 yamt
2614 1.98 yamt static void
2615 1.124 yamt pool_page_free_meta(struct pool *pp, void *v)
2616 1.98 yamt {
2617 1.98 yamt
2618 1.192 rmind vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz);
2619 1.66 thorpej }
2620 1.66 thorpej
2621 1.204 maxv #ifdef POOL_REDZONE
2622 1.204 maxv #if defined(_LP64)
2623 1.204 maxv # define PRIME 0x9e37fffffffc0000UL
2624 1.204 maxv #else /* defined(_LP64) */
2625 1.204 maxv # define PRIME 0x9e3779b1
2626 1.204 maxv #endif /* defined(_LP64) */
2627 1.204 maxv #define STATIC_BYTE 0xFE
2628 1.204 maxv CTASSERT(POOL_REDZONE_SIZE > 1);
2629 1.204 maxv
2630 1.204 maxv static inline uint8_t
2631 1.204 maxv pool_pattern_generate(const void *p)
2632 1.204 maxv {
2633 1.204 maxv return (uint8_t)(((uintptr_t)p) * PRIME
2634 1.204 maxv >> ((sizeof(uintptr_t) - sizeof(uint8_t))) * CHAR_BIT);
2635 1.204 maxv }
2636 1.204 maxv
2637 1.204 maxv static void
2638 1.204 maxv pool_redzone_init(struct pool *pp, size_t requested_size)
2639 1.204 maxv {
2640 1.204 maxv size_t nsz;
2641 1.204 maxv
2642 1.204 maxv if (pp->pr_roflags & PR_NOTOUCH) {
2643 1.204 maxv pp->pr_reqsize = 0;
2644 1.204 maxv pp->pr_redzone = false;
2645 1.204 maxv return;
2646 1.204 maxv }
2647 1.204 maxv
2648 1.204 maxv /*
2649 1.204 maxv * We may have extended the requested size earlier; check if
2650 1.204 maxv * there's naturally space in the padding for a red zone.
2651 1.204 maxv */
2652 1.204 maxv if (pp->pr_size - requested_size >= POOL_REDZONE_SIZE) {
2653 1.204 maxv pp->pr_reqsize = requested_size;
2654 1.204 maxv pp->pr_redzone = true;
2655 1.204 maxv return;
2656 1.204 maxv }
2657 1.204 maxv
2658 1.204 maxv /*
2659 1.204 maxv * No space in the natural padding; check if we can extend a
2660 1.204 maxv * bit the size of the pool.
2661 1.204 maxv */
2662 1.204 maxv nsz = roundup(pp->pr_size + POOL_REDZONE_SIZE, pp->pr_align);
2663 1.204 maxv if (nsz <= pp->pr_alloc->pa_pagesz) {
2664 1.204 maxv /* Ok, we can */
2665 1.204 maxv pp->pr_size = nsz;
2666 1.204 maxv pp->pr_reqsize = requested_size;
2667 1.204 maxv pp->pr_redzone = true;
2668 1.204 maxv } else {
2669 1.204 maxv /* No space for a red zone... snif :'( */
2670 1.204 maxv pp->pr_reqsize = 0;
2671 1.204 maxv pp->pr_redzone = false;
2672 1.204 maxv printf("pool redzone disabled for '%s'\n", pp->pr_wchan);
2673 1.204 maxv }
2674 1.204 maxv }
2675 1.204 maxv
2676 1.204 maxv static void
2677 1.204 maxv pool_redzone_fill(struct pool *pp, void *p)
2678 1.204 maxv {
2679 1.204 maxv uint8_t *cp, pat;
2680 1.204 maxv const uint8_t *ep;
2681 1.204 maxv
2682 1.204 maxv if (!pp->pr_redzone)
2683 1.204 maxv return;
2684 1.204 maxv
2685 1.204 maxv cp = (uint8_t *)p + pp->pr_reqsize;
2686 1.204 maxv ep = cp + POOL_REDZONE_SIZE;
2687 1.204 maxv
2688 1.204 maxv /*
2689 1.204 maxv * We really don't want the first byte of the red zone to be '\0';
2690 1.204 maxv * an off-by-one in a string may not be properly detected.
2691 1.204 maxv */
2692 1.204 maxv pat = pool_pattern_generate(cp);
2693 1.204 maxv *cp = (pat == '\0') ? STATIC_BYTE: pat;
2694 1.204 maxv cp++;
2695 1.204 maxv
2696 1.204 maxv while (cp < ep) {
2697 1.204 maxv *cp = pool_pattern_generate(cp);
2698 1.204 maxv cp++;
2699 1.204 maxv }
2700 1.204 maxv }
2701 1.204 maxv
2702 1.204 maxv static void
2703 1.204 maxv pool_redzone_check(struct pool *pp, void *p)
2704 1.204 maxv {
2705 1.204 maxv uint8_t *cp, pat, expected;
2706 1.204 maxv const uint8_t *ep;
2707 1.204 maxv
2708 1.204 maxv if (!pp->pr_redzone)
2709 1.204 maxv return;
2710 1.204 maxv
2711 1.204 maxv cp = (uint8_t *)p + pp->pr_reqsize;
2712 1.204 maxv ep = cp + POOL_REDZONE_SIZE;
2713 1.204 maxv
2714 1.204 maxv pat = pool_pattern_generate(cp);
2715 1.204 maxv expected = (pat == '\0') ? STATIC_BYTE: pat;
2716 1.204 maxv if (expected != *cp) {
2717 1.204 maxv panic("%s: %p: 0x%02x != 0x%02x\n",
2718 1.204 maxv __func__, cp, *cp, expected);
2719 1.204 maxv }
2720 1.204 maxv cp++;
2721 1.204 maxv
2722 1.204 maxv while (cp < ep) {
2723 1.204 maxv expected = pool_pattern_generate(cp);
2724 1.204 maxv if (*cp != expected) {
2725 1.204 maxv panic("%s: %p: 0x%02x != 0x%02x\n",
2726 1.204 maxv __func__, cp, *cp, expected);
2727 1.204 maxv }
2728 1.204 maxv cp++;
2729 1.204 maxv }
2730 1.204 maxv }
2731 1.204 maxv
2732 1.204 maxv #endif /* POOL_REDZONE */
2733 1.204 maxv
2734 1.204 maxv
2735 1.66 thorpej #ifdef POOL_SUBPAGE
2736 1.66 thorpej /* Sub-page allocator, for machines with large hardware pages. */
2737 1.66 thorpej void *
2738 1.66 thorpej pool_subpage_alloc(struct pool *pp, int flags)
2739 1.66 thorpej {
2740 1.134 ad return pool_get(&psppool, flags);
2741 1.66 thorpej }
2742 1.66 thorpej
2743 1.66 thorpej void
2744 1.66 thorpej pool_subpage_free(struct pool *pp, void *v)
2745 1.66 thorpej {
2746 1.66 thorpej pool_put(&psppool, v);
2747 1.66 thorpej }
2748 1.66 thorpej
2749 1.112 bjh21 #endif /* POOL_SUBPAGE */
2750 1.141 yamt
2751 1.141 yamt #if defined(DDB)
2752 1.141 yamt static bool
2753 1.141 yamt pool_in_page(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
2754 1.141 yamt {
2755 1.141 yamt
2756 1.141 yamt return (uintptr_t)ph->ph_page <= addr &&
2757 1.141 yamt addr < (uintptr_t)ph->ph_page + pp->pr_alloc->pa_pagesz;
2758 1.141 yamt }
2759 1.141 yamt
2760 1.143 yamt static bool
2761 1.143 yamt pool_in_item(struct pool *pp, void *item, uintptr_t addr)
2762 1.143 yamt {
2763 1.143 yamt
2764 1.143 yamt return (uintptr_t)item <= addr && addr < (uintptr_t)item + pp->pr_size;
2765 1.143 yamt }
2766 1.143 yamt
2767 1.143 yamt static bool
2768 1.143 yamt pool_in_cg(struct pool *pp, struct pool_cache_group *pcg, uintptr_t addr)
2769 1.143 yamt {
2770 1.143 yamt int i;
2771 1.143 yamt
2772 1.143 yamt if (pcg == NULL) {
2773 1.143 yamt return false;
2774 1.143 yamt }
2775 1.144 yamt for (i = 0; i < pcg->pcg_avail; i++) {
2776 1.143 yamt if (pool_in_item(pp, pcg->pcg_objects[i].pcgo_va, addr)) {
2777 1.143 yamt return true;
2778 1.143 yamt }
2779 1.143 yamt }
2780 1.143 yamt return false;
2781 1.143 yamt }
2782 1.143 yamt
2783 1.143 yamt static bool
2784 1.143 yamt pool_allocated(struct pool *pp, struct pool_item_header *ph, uintptr_t addr)
2785 1.143 yamt {
2786 1.143 yamt
2787 1.143 yamt if ((pp->pr_roflags & PR_NOTOUCH) != 0) {
2788 1.143 yamt unsigned int idx = pr_item_notouch_index(pp, ph, (void *)addr);
2789 1.143 yamt pool_item_bitmap_t *bitmap =
2790 1.143 yamt ph->ph_bitmap + (idx / BITMAP_SIZE);
2791 1.143 yamt pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK);
2792 1.143 yamt
2793 1.143 yamt return (*bitmap & mask) == 0;
2794 1.143 yamt } else {
2795 1.143 yamt struct pool_item *pi;
2796 1.143 yamt
2797 1.143 yamt LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
2798 1.143 yamt if (pool_in_item(pp, pi, addr)) {
2799 1.143 yamt return false;
2800 1.143 yamt }
2801 1.143 yamt }
2802 1.143 yamt return true;
2803 1.143 yamt }
2804 1.143 yamt }
2805 1.143 yamt
2806 1.141 yamt void
2807 1.141 yamt pool_whatis(uintptr_t addr, void (*pr)(const char *, ...))
2808 1.141 yamt {
2809 1.141 yamt struct pool *pp;
2810 1.141 yamt
2811 1.145 ad TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
2812 1.141 yamt struct pool_item_header *ph;
2813 1.141 yamt uintptr_t item;
2814 1.143 yamt bool allocated = true;
2815 1.143 yamt bool incache = false;
2816 1.143 yamt bool incpucache = false;
2817 1.143 yamt char cpucachestr[32];
2818 1.141 yamt
2819 1.141 yamt if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
2820 1.141 yamt LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
2821 1.141 yamt if (pool_in_page(pp, ph, addr)) {
2822 1.141 yamt goto found;
2823 1.141 yamt }
2824 1.141 yamt }
2825 1.141 yamt LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
2826 1.141 yamt if (pool_in_page(pp, ph, addr)) {
2827 1.143 yamt allocated =
2828 1.143 yamt pool_allocated(pp, ph, addr);
2829 1.143 yamt goto found;
2830 1.143 yamt }
2831 1.143 yamt }
2832 1.143 yamt LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
2833 1.143 yamt if (pool_in_page(pp, ph, addr)) {
2834 1.143 yamt allocated = false;
2835 1.141 yamt goto found;
2836 1.141 yamt }
2837 1.141 yamt }
2838 1.141 yamt continue;
2839 1.141 yamt } else {
2840 1.141 yamt ph = pr_find_pagehead_noalign(pp, (void *)addr);
2841 1.141 yamt if (ph == NULL || !pool_in_page(pp, ph, addr)) {
2842 1.141 yamt continue;
2843 1.141 yamt }
2844 1.143 yamt allocated = pool_allocated(pp, ph, addr);
2845 1.141 yamt }
2846 1.141 yamt found:
2847 1.143 yamt if (allocated && pp->pr_cache) {
2848 1.143 yamt pool_cache_t pc = pp->pr_cache;
2849 1.143 yamt struct pool_cache_group *pcg;
2850 1.143 yamt int i;
2851 1.143 yamt
2852 1.143 yamt for (pcg = pc->pc_fullgroups; pcg != NULL;
2853 1.143 yamt pcg = pcg->pcg_next) {
2854 1.143 yamt if (pool_in_cg(pp, pcg, addr)) {
2855 1.143 yamt incache = true;
2856 1.143 yamt goto print;
2857 1.143 yamt }
2858 1.143 yamt }
2859 1.183 ad for (i = 0; i < __arraycount(pc->pc_cpus); i++) {
2860 1.143 yamt pool_cache_cpu_t *cc;
2861 1.143 yamt
2862 1.143 yamt if ((cc = pc->pc_cpus[i]) == NULL) {
2863 1.143 yamt continue;
2864 1.143 yamt }
2865 1.143 yamt if (pool_in_cg(pp, cc->cc_current, addr) ||
2866 1.143 yamt pool_in_cg(pp, cc->cc_previous, addr)) {
2867 1.143 yamt struct cpu_info *ci =
2868 1.170 ad cpu_lookup(i);
2869 1.143 yamt
2870 1.143 yamt incpucache = true;
2871 1.143 yamt snprintf(cpucachestr,
2872 1.143 yamt sizeof(cpucachestr),
2873 1.143 yamt "cached by CPU %u",
2874 1.153 martin ci->ci_index);
2875 1.143 yamt goto print;
2876 1.143 yamt }
2877 1.143 yamt }
2878 1.143 yamt }
2879 1.143 yamt print:
2880 1.141 yamt item = (uintptr_t)ph->ph_page + ph->ph_off;
2881 1.141 yamt item = item + rounddown(addr - item, pp->pr_size);
2882 1.143 yamt (*pr)("%p is %p+%zu in POOL '%s' (%s)\n",
2883 1.141 yamt (void *)addr, item, (size_t)(addr - item),
2884 1.143 yamt pp->pr_wchan,
2885 1.143 yamt incpucache ? cpucachestr :
2886 1.143 yamt incache ? "cached" : allocated ? "allocated" : "free");
2887 1.141 yamt }
2888 1.141 yamt }
2889 1.141 yamt #endif /* defined(DDB) */
2890 1.203 joerg
2891 1.203 joerg static int
2892 1.203 joerg pool_sysctl(SYSCTLFN_ARGS)
2893 1.203 joerg {
2894 1.203 joerg struct pool_sysctl data;
2895 1.203 joerg struct pool *pp;
2896 1.203 joerg struct pool_cache *pc;
2897 1.203 joerg pool_cache_cpu_t *cc;
2898 1.203 joerg int error;
2899 1.203 joerg size_t i, written;
2900 1.203 joerg
2901 1.203 joerg if (oldp == NULL) {
2902 1.203 joerg *oldlenp = 0;
2903 1.203 joerg TAILQ_FOREACH(pp, &pool_head, pr_poollist)
2904 1.203 joerg *oldlenp += sizeof(data);
2905 1.203 joerg return 0;
2906 1.203 joerg }
2907 1.203 joerg
2908 1.203 joerg memset(&data, 0, sizeof(data));
2909 1.203 joerg error = 0;
2910 1.203 joerg written = 0;
2911 1.203 joerg TAILQ_FOREACH(pp, &pool_head, pr_poollist) {
2912 1.203 joerg if (written + sizeof(data) > *oldlenp)
2913 1.203 joerg break;
2914 1.203 joerg strlcpy(data.pr_wchan, pp->pr_wchan, sizeof(data.pr_wchan));
2915 1.203 joerg data.pr_pagesize = pp->pr_alloc->pa_pagesz;
2916 1.203 joerg data.pr_flags = pp->pr_roflags | pp->pr_flags;
2917 1.203 joerg #define COPY(field) data.field = pp->field
2918 1.203 joerg COPY(pr_size);
2919 1.203 joerg
2920 1.203 joerg COPY(pr_itemsperpage);
2921 1.203 joerg COPY(pr_nitems);
2922 1.203 joerg COPY(pr_nout);
2923 1.203 joerg COPY(pr_hardlimit);
2924 1.203 joerg COPY(pr_npages);
2925 1.203 joerg COPY(pr_minpages);
2926 1.203 joerg COPY(pr_maxpages);
2927 1.203 joerg
2928 1.203 joerg COPY(pr_nget);
2929 1.203 joerg COPY(pr_nfail);
2930 1.203 joerg COPY(pr_nput);
2931 1.203 joerg COPY(pr_npagealloc);
2932 1.203 joerg COPY(pr_npagefree);
2933 1.203 joerg COPY(pr_hiwat);
2934 1.203 joerg COPY(pr_nidle);
2935 1.203 joerg #undef COPY
2936 1.203 joerg
2937 1.203 joerg data.pr_cache_nmiss_pcpu = 0;
2938 1.203 joerg data.pr_cache_nhit_pcpu = 0;
2939 1.203 joerg if (pp->pr_cache) {
2940 1.203 joerg pc = pp->pr_cache;
2941 1.203 joerg data.pr_cache_meta_size = pc->pc_pcgsize;
2942 1.203 joerg data.pr_cache_nfull = pc->pc_nfull;
2943 1.203 joerg data.pr_cache_npartial = pc->pc_npart;
2944 1.203 joerg data.pr_cache_nempty = pc->pc_nempty;
2945 1.203 joerg data.pr_cache_ncontended = pc->pc_contended;
2946 1.203 joerg data.pr_cache_nmiss_global = pc->pc_misses;
2947 1.203 joerg data.pr_cache_nhit_global = pc->pc_hits;
2948 1.203 joerg for (i = 0; i < pc->pc_ncpu; ++i) {
2949 1.203 joerg cc = pc->pc_cpus[i];
2950 1.203 joerg if (cc == NULL)
2951 1.203 joerg continue;
2952 1.206 knakahar data.pr_cache_nmiss_pcpu += cc->cc_misses;
2953 1.206 knakahar data.pr_cache_nhit_pcpu += cc->cc_hits;
2954 1.203 joerg }
2955 1.203 joerg } else {
2956 1.203 joerg data.pr_cache_meta_size = 0;
2957 1.203 joerg data.pr_cache_nfull = 0;
2958 1.203 joerg data.pr_cache_npartial = 0;
2959 1.203 joerg data.pr_cache_nempty = 0;
2960 1.203 joerg data.pr_cache_ncontended = 0;
2961 1.203 joerg data.pr_cache_nmiss_global = 0;
2962 1.203 joerg data.pr_cache_nhit_global = 0;
2963 1.203 joerg }
2964 1.203 joerg
2965 1.203 joerg error = sysctl_copyout(l, &data, oldp, sizeof(data));
2966 1.203 joerg if (error)
2967 1.203 joerg break;
2968 1.203 joerg written += sizeof(data);
2969 1.203 joerg oldp = (char *)oldp + sizeof(data);
2970 1.203 joerg }
2971 1.203 joerg
2972 1.203 joerg *oldlenp = written;
2973 1.203 joerg return error;
2974 1.203 joerg }
2975 1.203 joerg
2976 1.203 joerg SYSCTL_SETUP(sysctl_pool_setup, "sysctl kern.pool setup")
2977 1.203 joerg {
2978 1.203 joerg const struct sysctlnode *rnode = NULL;
2979 1.203 joerg
2980 1.203 joerg sysctl_createv(clog, 0, NULL, &rnode,
2981 1.203 joerg CTLFLAG_PERMANENT,
2982 1.203 joerg CTLTYPE_STRUCT, "pool",
2983 1.203 joerg SYSCTL_DESCR("Get pool statistics"),
2984 1.203 joerg pool_sysctl, 0, NULL, 0,
2985 1.203 joerg CTL_KERN, CTL_CREATE, CTL_EOL);
2986 1.203 joerg }
2987