subr_pool.c revision 1.50.2.9 1 1.50.2.9 nathanw /* $NetBSD: subr_pool.c,v 1.50.2.9 2002/08/01 02:46:24 nathanw Exp $ */
2 1.1 pk
3 1.1 pk /*-
4 1.43 thorpej * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
5 1.1 pk * All rights reserved.
6 1.1 pk *
7 1.1 pk * This code is derived from software contributed to The NetBSD Foundation
8 1.20 thorpej * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
9 1.20 thorpej * Simulation Facility, NASA Ames Research Center.
10 1.1 pk *
11 1.1 pk * Redistribution and use in source and binary forms, with or without
12 1.1 pk * modification, are permitted provided that the following conditions
13 1.1 pk * are met:
14 1.1 pk * 1. Redistributions of source code must retain the above copyright
15 1.1 pk * notice, this list of conditions and the following disclaimer.
16 1.1 pk * 2. Redistributions in binary form must reproduce the above copyright
17 1.1 pk * notice, this list of conditions and the following disclaimer in the
18 1.1 pk * documentation and/or other materials provided with the distribution.
19 1.1 pk * 3. All advertising materials mentioning features or use of this software
20 1.1 pk * must display the following acknowledgement:
21 1.13 christos * This product includes software developed by the NetBSD
22 1.13 christos * Foundation, Inc. and its contributors.
23 1.1 pk * 4. Neither the name of The NetBSD Foundation nor the names of its
24 1.1 pk * contributors may be used to endorse or promote products derived
25 1.1 pk * from this software without specific prior written permission.
26 1.1 pk *
27 1.1 pk * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 1.1 pk * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 1.1 pk * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 1.1 pk * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 1.1 pk * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 1.1 pk * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 1.1 pk * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 1.1 pk * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 1.1 pk * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 1.1 pk * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 1.1 pk * POSSIBILITY OF SUCH DAMAGE.
38 1.1 pk */
39 1.50.2.5 nathanw
40 1.50.2.5 nathanw #include <sys/cdefs.h>
41 1.50.2.9 nathanw __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.50.2.9 2002/08/01 02:46:24 nathanw Exp $");
42 1.24 scottr
43 1.25 thorpej #include "opt_pool.h"
44 1.24 scottr #include "opt_poollog.h"
45 1.28 thorpej #include "opt_lockdebug.h"
46 1.1 pk
47 1.1 pk #include <sys/param.h>
48 1.1 pk #include <sys/systm.h>
49 1.1 pk #include <sys/proc.h>
50 1.1 pk #include <sys/errno.h>
51 1.1 pk #include <sys/kernel.h>
52 1.1 pk #include <sys/malloc.h>
53 1.1 pk #include <sys/lock.h>
54 1.1 pk #include <sys/pool.h>
55 1.20 thorpej #include <sys/syslog.h>
56 1.3 pk
57 1.3 pk #include <uvm/uvm.h>
58 1.3 pk
59 1.1 pk /*
60 1.1 pk * Pool resource management utility.
61 1.3 pk *
62 1.3 pk * Memory is allocated in pages which are split into pieces according
63 1.3 pk * to the pool item size. Each page is kept on a list headed by `pr_pagelist'
64 1.3 pk * in the pool structure and the individual pool items are on a linked list
65 1.3 pk * headed by `ph_itemlist' in each page header. The memory for building
66 1.3 pk * the page list is either taken from the allocated pages themselves (for
67 1.3 pk * small pool items) or taken from an internal pool of page headers (`phpool').
68 1.1 pk */
69 1.1 pk
70 1.3 pk /* List of all pools */
71 1.5 thorpej TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
72 1.3 pk
73 1.3 pk /* Private pool for page header structures */
74 1.3 pk static struct pool phpool;
75 1.3 pk
76 1.50.2.4 nathanw #ifdef POOL_SUBPAGE
77 1.50.2.4 nathanw /* Pool of subpages for use by normal pools. */
78 1.50.2.4 nathanw static struct pool psppool;
79 1.50.2.4 nathanw #endif
80 1.50.2.4 nathanw
81 1.3 pk /* # of seconds to retain page after last use */
82 1.3 pk int pool_inactive_time = 10;
83 1.3 pk
84 1.3 pk /* Next candidate for drainage (see pool_drain()) */
85 1.23 thorpej static struct pool *drainpp;
86 1.23 thorpej
87 1.23 thorpej /* This spin lock protects both pool_head and drainpp. */
88 1.23 thorpej struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
89 1.3 pk
90 1.3 pk struct pool_item_header {
91 1.3 pk /* Page headers */
92 1.3 pk TAILQ_ENTRY(pool_item_header)
93 1.3 pk ph_pagelist; /* pool page list */
94 1.3 pk TAILQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */
95 1.3 pk LIST_ENTRY(pool_item_header)
96 1.3 pk ph_hashlist; /* Off-page page headers */
97 1.3 pk int ph_nmissing; /* # of chunks in use */
98 1.3 pk caddr_t ph_page; /* this page's address */
99 1.3 pk struct timeval ph_time; /* last referenced */
100 1.3 pk };
101 1.50.2.3 nathanw TAILQ_HEAD(pool_pagelist,pool_item_header);
102 1.3 pk
103 1.1 pk struct pool_item {
104 1.3 pk #ifdef DIAGNOSTIC
105 1.3 pk int pi_magic;
106 1.33 chs #endif
107 1.25 thorpej #define PI_MAGIC 0xdeadbeef
108 1.3 pk /* Other entries use only this list entry */
109 1.3 pk TAILQ_ENTRY(pool_item) pi_list;
110 1.3 pk };
111 1.3 pk
112 1.25 thorpej #define PR_HASH_INDEX(pp,addr) \
113 1.50.2.7 nathanw (((u_long)(addr) >> (pp)->pr_alloc->pa_pageshift) & \
114 1.50.2.7 nathanw (PR_HASHTABSIZE - 1))
115 1.3 pk
116 1.50.2.1 nathanw #define POOL_NEEDS_CATCHUP(pp) \
117 1.50.2.1 nathanw ((pp)->pr_nitems < (pp)->pr_minitems)
118 1.50.2.1 nathanw
119 1.43 thorpej /*
120 1.43 thorpej * Pool cache management.
121 1.43 thorpej *
122 1.43 thorpej * Pool caches provide a way for constructed objects to be cached by the
123 1.43 thorpej * pool subsystem. This can lead to performance improvements by avoiding
124 1.43 thorpej * needless object construction/destruction; it is deferred until absolutely
125 1.43 thorpej * necessary.
126 1.43 thorpej *
127 1.43 thorpej * Caches are grouped into cache groups. Each cache group references
128 1.43 thorpej * up to 16 constructed objects. When a cache allocates an object
129 1.43 thorpej * from the pool, it calls the object's constructor and places it into
130 1.43 thorpej * a cache group. When a cache group frees an object back to the pool,
131 1.43 thorpej * it first calls the object's destructor. This allows the object to
132 1.43 thorpej * persist in constructed form while freed to the cache.
133 1.43 thorpej *
134 1.43 thorpej * Multiple caches may exist for each pool. This allows a single
135 1.43 thorpej * object type to have multiple constructed forms. The pool references
136 1.43 thorpej * each cache, so that when a pool is drained by the pagedaemon, it can
137 1.43 thorpej * drain each individual cache as well. Each time a cache is drained,
138 1.43 thorpej * the most idle cache group is freed to the pool in its entirety.
139 1.43 thorpej *
140 1.43 thorpej * Pool caches are layed on top of pools. By layering them, we can avoid
141 1.43 thorpej * the complexity of cache management for pools which would not benefit
142 1.43 thorpej * from it.
143 1.43 thorpej */
144 1.43 thorpej
145 1.43 thorpej /* The cache group pool. */
146 1.43 thorpej static struct pool pcgpool;
147 1.43 thorpej
148 1.43 thorpej static void pool_cache_reclaim(struct pool_cache *);
149 1.3 pk
150 1.42 thorpej static int pool_catchup(struct pool *);
151 1.50.2.1 nathanw static void pool_prime_page(struct pool *, caddr_t,
152 1.50.2.1 nathanw struct pool_item_header *);
153 1.50.2.7 nathanw
154 1.50.2.7 nathanw void *pool_allocator_alloc(struct pool *, int);
155 1.50.2.7 nathanw void pool_allocator_free(struct pool *, void *);
156 1.3 pk
157 1.42 thorpej static void pool_print1(struct pool *, const char *,
158 1.42 thorpej void (*)(const char *, ...));
159 1.3 pk
160 1.3 pk /*
161 1.50.2.1 nathanw * Pool log entry. An array of these is allocated in pool_init().
162 1.3 pk */
163 1.3 pk struct pool_log {
164 1.3 pk const char *pl_file;
165 1.3 pk long pl_line;
166 1.3 pk int pl_action;
167 1.25 thorpej #define PRLOG_GET 1
168 1.25 thorpej #define PRLOG_PUT 2
169 1.3 pk void *pl_addr;
170 1.1 pk };
171 1.1 pk
172 1.3 pk /* Number of entries in pool log buffers */
173 1.17 thorpej #ifndef POOL_LOGSIZE
174 1.17 thorpej #define POOL_LOGSIZE 10
175 1.17 thorpej #endif
176 1.17 thorpej
177 1.17 thorpej int pool_logsize = POOL_LOGSIZE;
178 1.1 pk
179 1.50.2.1 nathanw #ifdef POOL_DIAGNOSTIC
180 1.42 thorpej static __inline void
181 1.42 thorpej pr_log(struct pool *pp, void *v, int action, const char *file, long line)
182 1.3 pk {
183 1.3 pk int n = pp->pr_curlogentry;
184 1.3 pk struct pool_log *pl;
185 1.3 pk
186 1.20 thorpej if ((pp->pr_roflags & PR_LOGGING) == 0)
187 1.3 pk return;
188 1.3 pk
189 1.3 pk /*
190 1.3 pk * Fill in the current entry. Wrap around and overwrite
191 1.3 pk * the oldest entry if necessary.
192 1.3 pk */
193 1.3 pk pl = &pp->pr_log[n];
194 1.3 pk pl->pl_file = file;
195 1.3 pk pl->pl_line = line;
196 1.3 pk pl->pl_action = action;
197 1.3 pk pl->pl_addr = v;
198 1.3 pk if (++n >= pp->pr_logsize)
199 1.3 pk n = 0;
200 1.3 pk pp->pr_curlogentry = n;
201 1.3 pk }
202 1.3 pk
203 1.3 pk static void
204 1.42 thorpej pr_printlog(struct pool *pp, struct pool_item *pi,
205 1.42 thorpej void (*pr)(const char *, ...))
206 1.3 pk {
207 1.3 pk int i = pp->pr_logsize;
208 1.3 pk int n = pp->pr_curlogentry;
209 1.3 pk
210 1.20 thorpej if ((pp->pr_roflags & PR_LOGGING) == 0)
211 1.3 pk return;
212 1.3 pk
213 1.3 pk /*
214 1.3 pk * Print all entries in this pool's log.
215 1.3 pk */
216 1.3 pk while (i-- > 0) {
217 1.3 pk struct pool_log *pl = &pp->pr_log[n];
218 1.3 pk if (pl->pl_action != 0) {
219 1.25 thorpej if (pi == NULL || pi == pl->pl_addr) {
220 1.25 thorpej (*pr)("\tlog entry %d:\n", i);
221 1.25 thorpej (*pr)("\t\taction = %s, addr = %p\n",
222 1.25 thorpej pl->pl_action == PRLOG_GET ? "get" : "put",
223 1.25 thorpej pl->pl_addr);
224 1.25 thorpej (*pr)("\t\tfile: %s at line %lu\n",
225 1.25 thorpej pl->pl_file, pl->pl_line);
226 1.25 thorpej }
227 1.3 pk }
228 1.3 pk if (++n >= pp->pr_logsize)
229 1.3 pk n = 0;
230 1.3 pk }
231 1.3 pk }
232 1.25 thorpej
233 1.42 thorpej static __inline void
234 1.42 thorpej pr_enter(struct pool *pp, const char *file, long line)
235 1.25 thorpej {
236 1.25 thorpej
237 1.34 thorpej if (__predict_false(pp->pr_entered_file != NULL)) {
238 1.25 thorpej printf("pool %s: reentrancy at file %s line %ld\n",
239 1.25 thorpej pp->pr_wchan, file, line);
240 1.25 thorpej printf(" previous entry at file %s line %ld\n",
241 1.25 thorpej pp->pr_entered_file, pp->pr_entered_line);
242 1.25 thorpej panic("pr_enter");
243 1.25 thorpej }
244 1.25 thorpej
245 1.25 thorpej pp->pr_entered_file = file;
246 1.25 thorpej pp->pr_entered_line = line;
247 1.25 thorpej }
248 1.25 thorpej
249 1.42 thorpej static __inline void
250 1.42 thorpej pr_leave(struct pool *pp)
251 1.25 thorpej {
252 1.25 thorpej
253 1.34 thorpej if (__predict_false(pp->pr_entered_file == NULL)) {
254 1.25 thorpej printf("pool %s not entered?\n", pp->pr_wchan);
255 1.25 thorpej panic("pr_leave");
256 1.25 thorpej }
257 1.25 thorpej
258 1.25 thorpej pp->pr_entered_file = NULL;
259 1.25 thorpej pp->pr_entered_line = 0;
260 1.25 thorpej }
261 1.25 thorpej
262 1.42 thorpej static __inline void
263 1.42 thorpej pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
264 1.25 thorpej {
265 1.25 thorpej
266 1.25 thorpej if (pp->pr_entered_file != NULL)
267 1.25 thorpej (*pr)("\n\tcurrently entered from file %s line %ld\n",
268 1.25 thorpej pp->pr_entered_file, pp->pr_entered_line);
269 1.25 thorpej }
270 1.3 pk #else
271 1.25 thorpej #define pr_log(pp, v, action, file, line)
272 1.25 thorpej #define pr_printlog(pp, pi, pr)
273 1.25 thorpej #define pr_enter(pp, file, line)
274 1.25 thorpej #define pr_leave(pp)
275 1.25 thorpej #define pr_enter_check(pp, pr)
276 1.50.2.1 nathanw #endif /* POOL_DIAGNOSTIC */
277 1.3 pk
278 1.3 pk /*
279 1.3 pk * Return the pool page header based on page address.
280 1.3 pk */
281 1.42 thorpej static __inline struct pool_item_header *
282 1.42 thorpej pr_find_pagehead(struct pool *pp, caddr_t page)
283 1.3 pk {
284 1.3 pk struct pool_item_header *ph;
285 1.3 pk
286 1.20 thorpej if ((pp->pr_roflags & PR_PHINPAGE) != 0)
287 1.3 pk return ((struct pool_item_header *)(page + pp->pr_phoffset));
288 1.3 pk
289 1.3 pk for (ph = LIST_FIRST(&pp->pr_hashtab[PR_HASH_INDEX(pp, page)]);
290 1.3 pk ph != NULL;
291 1.3 pk ph = LIST_NEXT(ph, ph_hashlist)) {
292 1.3 pk if (ph->ph_page == page)
293 1.3 pk return (ph);
294 1.3 pk }
295 1.3 pk return (NULL);
296 1.3 pk }
297 1.3 pk
298 1.3 pk /*
299 1.3 pk * Remove a page from the pool.
300 1.3 pk */
301 1.42 thorpej static __inline void
302 1.50.2.3 nathanw pr_rmpage(struct pool *pp, struct pool_item_header *ph,
303 1.50.2.3 nathanw struct pool_pagelist *pq)
304 1.3 pk {
305 1.50.2.3 nathanw int s;
306 1.3 pk
307 1.3 pk /*
308 1.7 thorpej * If the page was idle, decrement the idle page count.
309 1.3 pk */
310 1.6 thorpej if (ph->ph_nmissing == 0) {
311 1.6 thorpej #ifdef DIAGNOSTIC
312 1.6 thorpej if (pp->pr_nidle == 0)
313 1.6 thorpej panic("pr_rmpage: nidle inconsistent");
314 1.20 thorpej if (pp->pr_nitems < pp->pr_itemsperpage)
315 1.20 thorpej panic("pr_rmpage: nitems inconsistent");
316 1.6 thorpej #endif
317 1.6 thorpej pp->pr_nidle--;
318 1.6 thorpej }
319 1.7 thorpej
320 1.20 thorpej pp->pr_nitems -= pp->pr_itemsperpage;
321 1.20 thorpej
322 1.7 thorpej /*
323 1.50.2.3 nathanw * Unlink a page from the pool and release it (or queue it for release).
324 1.7 thorpej */
325 1.7 thorpej TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
326 1.50.2.3 nathanw if (pq) {
327 1.50.2.3 nathanw TAILQ_INSERT_HEAD(pq, ph, ph_pagelist);
328 1.50.2.3 nathanw } else {
329 1.50.2.7 nathanw pool_allocator_free(pp, ph->ph_page);
330 1.50.2.3 nathanw if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
331 1.50.2.3 nathanw LIST_REMOVE(ph, ph_hashlist);
332 1.50.2.3 nathanw s = splhigh();
333 1.50.2.3 nathanw pool_put(&phpool, ph);
334 1.50.2.3 nathanw splx(s);
335 1.50.2.3 nathanw }
336 1.50.2.3 nathanw }
337 1.7 thorpej pp->pr_npages--;
338 1.7 thorpej pp->pr_npagefree++;
339 1.6 thorpej
340 1.3 pk if (pp->pr_curpage == ph) {
341 1.3 pk /*
342 1.3 pk * Find a new non-empty page header, if any.
343 1.3 pk * Start search from the page head, to increase the
344 1.3 pk * chance for "high water" pages to be freed.
345 1.3 pk */
346 1.50.2.3 nathanw TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
347 1.3 pk if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
348 1.3 pk break;
349 1.3 pk
350 1.3 pk pp->pr_curpage = ph;
351 1.21 thorpej }
352 1.3 pk }
353 1.3 pk
354 1.3 pk /*
355 1.3 pk * Initialize the given pool resource structure.
356 1.3 pk *
357 1.3 pk * We export this routine to allow other kernel parts to declare
358 1.3 pk * static pools that must be initialized before malloc() is available.
359 1.3 pk */
360 1.3 pk void
361 1.42 thorpej pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
362 1.50.2.7 nathanw const char *wchan, struct pool_allocator *palloc)
363 1.3 pk {
364 1.16 briggs int off, slack, i;
365 1.3 pk
366 1.25 thorpej #ifdef POOL_DIAGNOSTIC
367 1.25 thorpej /*
368 1.25 thorpej * Always log if POOL_DIAGNOSTIC is defined.
369 1.25 thorpej */
370 1.25 thorpej if (pool_logsize != 0)
371 1.25 thorpej flags |= PR_LOGGING;
372 1.25 thorpej #endif
373 1.25 thorpej
374 1.50.2.7 nathanw #ifdef POOL_SUBPAGE
375 1.3 pk /*
376 1.50.2.7 nathanw * XXX We don't provide a real `nointr' back-end
377 1.50.2.7 nathanw * yet; all sub-pages come from a kmem back-end.
378 1.50.2.7 nathanw * maybe some day...
379 1.3 pk */
380 1.50.2.7 nathanw if (palloc == NULL) {
381 1.50.2.7 nathanw extern struct pool_allocator pool_allocator_kmem_subpage;
382 1.50.2.7 nathanw palloc = &pool_allocator_kmem_subpage;
383 1.50.2.7 nathanw }
384 1.50.2.7 nathanw /*
385 1.50.2.7 nathanw * We'll assume any user-specified back-end allocator
386 1.50.2.7 nathanw * will deal with sub-pages, or simply don't care.
387 1.50.2.7 nathanw */
388 1.50.2.7 nathanw #else
389 1.50.2.7 nathanw if (palloc == NULL)
390 1.50.2.7 nathanw palloc = &pool_allocator_kmem;
391 1.50.2.7 nathanw #endif /* POOL_SUBPAGE */
392 1.50.2.7 nathanw if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
393 1.50.2.7 nathanw if (palloc->pa_pagesz == 0) {
394 1.50.2.4 nathanw #ifdef POOL_SUBPAGE
395 1.50.2.7 nathanw if (palloc == &pool_allocator_kmem)
396 1.50.2.7 nathanw palloc->pa_pagesz = PAGE_SIZE;
397 1.50.2.7 nathanw else
398 1.50.2.7 nathanw palloc->pa_pagesz = POOL_SUBPAGE;
399 1.50.2.4 nathanw #else
400 1.50.2.7 nathanw palloc->pa_pagesz = PAGE_SIZE;
401 1.50.2.7 nathanw #endif /* POOL_SUBPAGE */
402 1.50.2.7 nathanw }
403 1.50.2.7 nathanw
404 1.50.2.7 nathanw TAILQ_INIT(&palloc->pa_list);
405 1.50.2.7 nathanw
406 1.50.2.7 nathanw simple_lock_init(&palloc->pa_slock);
407 1.50.2.7 nathanw palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
408 1.50.2.7 nathanw palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
409 1.50.2.7 nathanw palloc->pa_flags |= PA_INITIALIZED;
410 1.4 thorpej }
411 1.3 pk
412 1.3 pk if (align == 0)
413 1.3 pk align = ALIGN(1);
414 1.14 thorpej
415 1.14 thorpej if (size < sizeof(struct pool_item))
416 1.14 thorpej size = sizeof(struct pool_item);
417 1.3 pk
418 1.50.2.9 nathanw size = roundup(size, align);
419 1.50.2.7 nathanw #ifdef DIAGNOSTIC
420 1.50.2.7 nathanw if (size > palloc->pa_pagesz)
421 1.35 pk panic("pool_init: pool item size (%lu) too large",
422 1.35 pk (u_long)size);
423 1.50.2.7 nathanw #endif
424 1.35 pk
425 1.3 pk /*
426 1.3 pk * Initialize the pool structure.
427 1.3 pk */
428 1.3 pk TAILQ_INIT(&pp->pr_pagelist);
429 1.43 thorpej TAILQ_INIT(&pp->pr_cachelist);
430 1.3 pk pp->pr_curpage = NULL;
431 1.3 pk pp->pr_npages = 0;
432 1.3 pk pp->pr_minitems = 0;
433 1.3 pk pp->pr_minpages = 0;
434 1.3 pk pp->pr_maxpages = UINT_MAX;
435 1.20 thorpej pp->pr_roflags = flags;
436 1.20 thorpej pp->pr_flags = 0;
437 1.35 pk pp->pr_size = size;
438 1.3 pk pp->pr_align = align;
439 1.3 pk pp->pr_wchan = wchan;
440 1.50.2.7 nathanw pp->pr_alloc = palloc;
441 1.20 thorpej pp->pr_nitems = 0;
442 1.20 thorpej pp->pr_nout = 0;
443 1.20 thorpej pp->pr_hardlimit = UINT_MAX;
444 1.20 thorpej pp->pr_hardlimit_warning = NULL;
445 1.31 thorpej pp->pr_hardlimit_ratecap.tv_sec = 0;
446 1.31 thorpej pp->pr_hardlimit_ratecap.tv_usec = 0;
447 1.31 thorpej pp->pr_hardlimit_warning_last.tv_sec = 0;
448 1.31 thorpej pp->pr_hardlimit_warning_last.tv_usec = 0;
449 1.50.2.7 nathanw pp->pr_drain_hook = NULL;
450 1.50.2.7 nathanw pp->pr_drain_hook_arg = NULL;
451 1.3 pk
452 1.3 pk /*
453 1.3 pk * Decide whether to put the page header off page to avoid
454 1.3 pk * wasting too large a part of the page. Off-page page headers
455 1.3 pk * go on a hash table, so we can match a returned item
456 1.3 pk * with its header based on the page address.
457 1.3 pk * We use 1/16 of the page size as the threshold (XXX: tune)
458 1.3 pk */
459 1.50.2.7 nathanw if (pp->pr_size < palloc->pa_pagesz/16) {
460 1.3 pk /* Use the end of the page for the page header */
461 1.20 thorpej pp->pr_roflags |= PR_PHINPAGE;
462 1.50.2.7 nathanw pp->pr_phoffset = off = palloc->pa_pagesz -
463 1.50.2.7 nathanw ALIGN(sizeof(struct pool_item_header));
464 1.2 pk } else {
465 1.3 pk /* The page header will be taken from our page header pool */
466 1.3 pk pp->pr_phoffset = 0;
467 1.50.2.7 nathanw off = palloc->pa_pagesz;
468 1.16 briggs for (i = 0; i < PR_HASHTABSIZE; i++) {
469 1.16 briggs LIST_INIT(&pp->pr_hashtab[i]);
470 1.16 briggs }
471 1.2 pk }
472 1.1 pk
473 1.3 pk /*
474 1.3 pk * Alignment is to take place at `ioff' within the item. This means
475 1.3 pk * we must reserve up to `align - 1' bytes on the page to allow
476 1.3 pk * appropriate positioning of each item.
477 1.3 pk *
478 1.3 pk * Silently enforce `0 <= ioff < align'.
479 1.3 pk */
480 1.3 pk pp->pr_itemoffset = ioff = ioff % align;
481 1.3 pk pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
482 1.43 thorpej KASSERT(pp->pr_itemsperpage != 0);
483 1.3 pk
484 1.3 pk /*
485 1.3 pk * Use the slack between the chunks and the page header
486 1.3 pk * for "cache coloring".
487 1.3 pk */
488 1.3 pk slack = off - pp->pr_itemsperpage * pp->pr_size;
489 1.3 pk pp->pr_maxcolor = (slack / align) * align;
490 1.3 pk pp->pr_curcolor = 0;
491 1.3 pk
492 1.3 pk pp->pr_nget = 0;
493 1.3 pk pp->pr_nfail = 0;
494 1.3 pk pp->pr_nput = 0;
495 1.3 pk pp->pr_npagealloc = 0;
496 1.3 pk pp->pr_npagefree = 0;
497 1.1 pk pp->pr_hiwat = 0;
498 1.8 thorpej pp->pr_nidle = 0;
499 1.3 pk
500 1.50.2.1 nathanw #ifdef POOL_DIAGNOSTIC
501 1.25 thorpej if (flags & PR_LOGGING) {
502 1.25 thorpej if (kmem_map == NULL ||
503 1.25 thorpej (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
504 1.25 thorpej M_TEMP, M_NOWAIT)) == NULL)
505 1.20 thorpej pp->pr_roflags &= ~PR_LOGGING;
506 1.3 pk pp->pr_curlogentry = 0;
507 1.3 pk pp->pr_logsize = pool_logsize;
508 1.3 pk }
509 1.50.2.1 nathanw #endif
510 1.25 thorpej
511 1.25 thorpej pp->pr_entered_file = NULL;
512 1.25 thorpej pp->pr_entered_line = 0;
513 1.3 pk
514 1.21 thorpej simple_lock_init(&pp->pr_slock);
515 1.1 pk
516 1.3 pk /*
517 1.43 thorpej * Initialize private page header pool and cache magazine pool if we
518 1.43 thorpej * haven't done so yet.
519 1.23 thorpej * XXX LOCKING.
520 1.3 pk */
521 1.3 pk if (phpool.pr_size == 0) {
522 1.50.2.4 nathanw #ifdef POOL_SUBPAGE
523 1.50.2.4 nathanw pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, 0,
524 1.50.2.7 nathanw "phpool", &pool_allocator_kmem);
525 1.50.2.4 nathanw pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
526 1.50.2.7 nathanw PR_RECURSIVE, "psppool", &pool_allocator_kmem);
527 1.50.2.4 nathanw #else
528 1.3 pk pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,
529 1.50.2.7 nathanw 0, "phpool", NULL);
530 1.50.2.4 nathanw #endif
531 1.43 thorpej pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
532 1.50.2.7 nathanw 0, "pcgpool", NULL);
533 1.1 pk }
534 1.1 pk
535 1.23 thorpej /* Insert into the list of all pools. */
536 1.23 thorpej simple_lock(&pool_head_slock);
537 1.23 thorpej TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
538 1.23 thorpej simple_unlock(&pool_head_slock);
539 1.50.2.7 nathanw
540 1.50.2.7 nathanw /* Insert this into the list of pools using this allocator. */
541 1.50.2.7 nathanw simple_lock(&palloc->pa_slock);
542 1.50.2.7 nathanw TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
543 1.50.2.7 nathanw simple_unlock(&palloc->pa_slock);
544 1.1 pk }
545 1.1 pk
546 1.1 pk /*
547 1.1 pk * De-commision a pool resource.
548 1.1 pk */
549 1.1 pk void
550 1.42 thorpej pool_destroy(struct pool *pp)
551 1.1 pk {
552 1.3 pk struct pool_item_header *ph;
553 1.43 thorpej struct pool_cache *pc;
554 1.43 thorpej
555 1.50.2.7 nathanw /* Locking order: pool_allocator -> pool */
556 1.50.2.7 nathanw simple_lock(&pp->pr_alloc->pa_slock);
557 1.50.2.7 nathanw TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
558 1.50.2.7 nathanw simple_unlock(&pp->pr_alloc->pa_slock);
559 1.50.2.7 nathanw
560 1.43 thorpej /* Destroy all caches for this pool. */
561 1.43 thorpej while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL)
562 1.43 thorpej pool_cache_destroy(pc);
563 1.3 pk
564 1.3 pk #ifdef DIAGNOSTIC
565 1.20 thorpej if (pp->pr_nout != 0) {
566 1.25 thorpej pr_printlog(pp, NULL, printf);
567 1.20 thorpej panic("pool_destroy: pool busy: still out: %u\n",
568 1.20 thorpej pp->pr_nout);
569 1.3 pk }
570 1.3 pk #endif
571 1.1 pk
572 1.3 pk /* Remove all pages */
573 1.50.2.7 nathanw while ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL)
574 1.50.2.7 nathanw pr_rmpage(pp, ph, NULL);
575 1.3 pk
576 1.3 pk /* Remove from global pool list */
577 1.23 thorpej simple_lock(&pool_head_slock);
578 1.3 pk TAILQ_REMOVE(&pool_head, pp, pr_poollist);
579 1.50.2.3 nathanw if (drainpp == pp) {
580 1.50.2.3 nathanw drainpp = NULL;
581 1.50.2.3 nathanw }
582 1.23 thorpej simple_unlock(&pool_head_slock);
583 1.3 pk
584 1.50.2.1 nathanw #ifdef POOL_DIAGNOSTIC
585 1.20 thorpej if ((pp->pr_roflags & PR_LOGGING) != 0)
586 1.3 pk free(pp->pr_log, M_TEMP);
587 1.50.2.1 nathanw #endif
588 1.50.2.7 nathanw }
589 1.2 pk
590 1.50.2.7 nathanw void
591 1.50.2.7 nathanw pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
592 1.50.2.7 nathanw {
593 1.50.2.7 nathanw
594 1.50.2.7 nathanw /* XXX no locking -- must be used just after pool_init() */
595 1.50.2.7 nathanw #ifdef DIAGNOSTIC
596 1.50.2.7 nathanw if (pp->pr_drain_hook != NULL)
597 1.50.2.7 nathanw panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
598 1.50.2.7 nathanw #endif
599 1.50.2.7 nathanw pp->pr_drain_hook = fn;
600 1.50.2.7 nathanw pp->pr_drain_hook_arg = arg;
601 1.1 pk }
602 1.1 pk
603 1.50.2.1 nathanw static __inline struct pool_item_header *
604 1.50.2.1 nathanw pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags)
605 1.50.2.1 nathanw {
606 1.50.2.1 nathanw struct pool_item_header *ph;
607 1.50.2.1 nathanw int s;
608 1.50.2.1 nathanw
609 1.50.2.1 nathanw LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0);
610 1.50.2.1 nathanw
611 1.50.2.1 nathanw if ((pp->pr_roflags & PR_PHINPAGE) != 0)
612 1.50.2.1 nathanw ph = (struct pool_item_header *) (storage + pp->pr_phoffset);
613 1.50.2.1 nathanw else {
614 1.50.2.1 nathanw s = splhigh();
615 1.50.2.1 nathanw ph = pool_get(&phpool, flags);
616 1.50.2.1 nathanw splx(s);
617 1.50.2.1 nathanw }
618 1.50.2.1 nathanw
619 1.50.2.1 nathanw return (ph);
620 1.50.2.1 nathanw }
621 1.1 pk
622 1.1 pk /*
623 1.3 pk * Grab an item from the pool; must be called at appropriate spl level
624 1.1 pk */
625 1.3 pk void *
626 1.50.2.1 nathanw #ifdef POOL_DIAGNOSTIC
627 1.42 thorpej _pool_get(struct pool *pp, int flags, const char *file, long line)
628 1.50.2.1 nathanw #else
629 1.50.2.1 nathanw pool_get(struct pool *pp, int flags)
630 1.50.2.1 nathanw #endif
631 1.1 pk {
632 1.1 pk struct pool_item *pi;
633 1.3 pk struct pool_item_header *ph;
634 1.50.2.1 nathanw void *v;
635 1.1 pk
636 1.2 pk #ifdef DIAGNOSTIC
637 1.50.2.8 nathanw if (__predict_false(curlwp == NULL && doing_shutdown == 0 &&
638 1.37 sommerfe (flags & PR_WAITOK) != 0))
639 1.50.2.9 nathanw panic("pool_get: %s: must have NOWAIT", pp->pr_wchan);
640 1.1 pk
641 1.50.2.1 nathanw #ifdef LOCKDEBUG
642 1.50.2.1 nathanw if (flags & PR_WAITOK)
643 1.50.2.1 nathanw simple_lock_only_held(NULL, "pool_get(PR_WAITOK)");
644 1.50.2.1 nathanw #endif
645 1.50.2.1 nathanw #endif /* DIAGNOSTIC */
646 1.50.2.1 nathanw
647 1.21 thorpej simple_lock(&pp->pr_slock);
648 1.25 thorpej pr_enter(pp, file, line);
649 1.20 thorpej
650 1.20 thorpej startover:
651 1.20 thorpej /*
652 1.20 thorpej * Check to see if we've reached the hard limit. If we have,
653 1.20 thorpej * and we can wait, then wait until an item has been returned to
654 1.20 thorpej * the pool.
655 1.20 thorpej */
656 1.20 thorpej #ifdef DIAGNOSTIC
657 1.34 thorpej if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
658 1.25 thorpej pr_leave(pp);
659 1.21 thorpej simple_unlock(&pp->pr_slock);
660 1.20 thorpej panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
661 1.20 thorpej }
662 1.20 thorpej #endif
663 1.34 thorpej if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
664 1.50.2.7 nathanw if (pp->pr_drain_hook != NULL) {
665 1.50.2.7 nathanw /*
666 1.50.2.7 nathanw * Since the drain hook is going to free things
667 1.50.2.7 nathanw * back to the pool, unlock, call the hook, re-lock,
668 1.50.2.7 nathanw * and check the hardlimit condition again.
669 1.50.2.7 nathanw */
670 1.50.2.7 nathanw pr_leave(pp);
671 1.50.2.7 nathanw simple_unlock(&pp->pr_slock);
672 1.50.2.7 nathanw (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
673 1.50.2.7 nathanw simple_lock(&pp->pr_slock);
674 1.50.2.7 nathanw pr_enter(pp, file, line);
675 1.50.2.7 nathanw if (pp->pr_nout < pp->pr_hardlimit)
676 1.50.2.7 nathanw goto startover;
677 1.50.2.7 nathanw }
678 1.50.2.7 nathanw
679 1.29 sommerfe if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
680 1.20 thorpej /*
681 1.20 thorpej * XXX: A warning isn't logged in this case. Should
682 1.20 thorpej * it be?
683 1.20 thorpej */
684 1.20 thorpej pp->pr_flags |= PR_WANTED;
685 1.25 thorpej pr_leave(pp);
686 1.40 sommerfe ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
687 1.25 thorpej pr_enter(pp, file, line);
688 1.20 thorpej goto startover;
689 1.20 thorpej }
690 1.31 thorpej
691 1.31 thorpej /*
692 1.31 thorpej * Log a message that the hard limit has been hit.
693 1.31 thorpej */
694 1.31 thorpej if (pp->pr_hardlimit_warning != NULL &&
695 1.31 thorpej ratecheck(&pp->pr_hardlimit_warning_last,
696 1.31 thorpej &pp->pr_hardlimit_ratecap))
697 1.31 thorpej log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
698 1.21 thorpej
699 1.21 thorpej pp->pr_nfail++;
700 1.21 thorpej
701 1.25 thorpej pr_leave(pp);
702 1.21 thorpej simple_unlock(&pp->pr_slock);
703 1.20 thorpej return (NULL);
704 1.20 thorpej }
705 1.20 thorpej
706 1.3 pk /*
707 1.3 pk * The convention we use is that if `curpage' is not NULL, then
708 1.3 pk * it points at a non-empty bucket. In particular, `curpage'
709 1.3 pk * never points at a page header which has PR_PHINPAGE set and
710 1.3 pk * has no items in its bucket.
711 1.3 pk */
712 1.20 thorpej if ((ph = pp->pr_curpage) == NULL) {
713 1.20 thorpej #ifdef DIAGNOSTIC
714 1.20 thorpej if (pp->pr_nitems != 0) {
715 1.21 thorpej simple_unlock(&pp->pr_slock);
716 1.20 thorpej printf("pool_get: %s: curpage NULL, nitems %u\n",
717 1.20 thorpej pp->pr_wchan, pp->pr_nitems);
718 1.20 thorpej panic("pool_get: nitems inconsistent\n");
719 1.20 thorpej }
720 1.20 thorpej #endif
721 1.20 thorpej
722 1.21 thorpej /*
723 1.21 thorpej * Call the back-end page allocator for more memory.
724 1.21 thorpej * Release the pool lock, as the back-end page allocator
725 1.21 thorpej * may block.
726 1.21 thorpej */
727 1.25 thorpej pr_leave(pp);
728 1.21 thorpej simple_unlock(&pp->pr_slock);
729 1.50.2.7 nathanw v = pool_allocator_alloc(pp, flags);
730 1.50.2.1 nathanw if (__predict_true(v != NULL))
731 1.50.2.1 nathanw ph = pool_alloc_item_header(pp, v, flags);
732 1.21 thorpej simple_lock(&pp->pr_slock);
733 1.25 thorpej pr_enter(pp, file, line);
734 1.15 pk
735 1.50.2.1 nathanw if (__predict_false(v == NULL || ph == NULL)) {
736 1.50.2.1 nathanw if (v != NULL)
737 1.50.2.7 nathanw pool_allocator_free(pp, v);
738 1.50.2.1 nathanw
739 1.21 thorpej /*
740 1.50.2.1 nathanw * We were unable to allocate a page or item
741 1.50.2.1 nathanw * header, but we released the lock during
742 1.50.2.1 nathanw * allocation, so perhaps items were freed
743 1.50.2.1 nathanw * back to the pool. Check for this case.
744 1.21 thorpej */
745 1.21 thorpej if (pp->pr_curpage != NULL)
746 1.21 thorpej goto startover;
747 1.15 pk
748 1.3 pk if ((flags & PR_WAITOK) == 0) {
749 1.3 pk pp->pr_nfail++;
750 1.25 thorpej pr_leave(pp);
751 1.21 thorpej simple_unlock(&pp->pr_slock);
752 1.1 pk return (NULL);
753 1.3 pk }
754 1.3 pk
755 1.15 pk /*
756 1.15 pk * Wait for items to be returned to this pool.
757 1.21 thorpej *
758 1.20 thorpej * XXX: maybe we should wake up once a second and
759 1.20 thorpej * try again?
760 1.15 pk */
761 1.1 pk pp->pr_flags |= PR_WANTED;
762 1.50.2.7 nathanw /* PA_WANTED is already set on the allocator. */
763 1.25 thorpej pr_leave(pp);
764 1.40 sommerfe ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
765 1.25 thorpej pr_enter(pp, file, line);
766 1.20 thorpej goto startover;
767 1.1 pk }
768 1.3 pk
769 1.15 pk /* We have more memory; add it to the pool */
770 1.50.2.1 nathanw pool_prime_page(pp, v, ph);
771 1.15 pk pp->pr_npagealloc++;
772 1.15 pk
773 1.20 thorpej /* Start the allocation process over. */
774 1.20 thorpej goto startover;
775 1.3 pk }
776 1.3 pk
777 1.34 thorpej if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) {
778 1.25 thorpej pr_leave(pp);
779 1.21 thorpej simple_unlock(&pp->pr_slock);
780 1.3 pk panic("pool_get: %s: page empty", pp->pr_wchan);
781 1.21 thorpej }
782 1.20 thorpej #ifdef DIAGNOSTIC
783 1.34 thorpej if (__predict_false(pp->pr_nitems == 0)) {
784 1.25 thorpej pr_leave(pp);
785 1.21 thorpej simple_unlock(&pp->pr_slock);
786 1.20 thorpej printf("pool_get: %s: items on itemlist, nitems %u\n",
787 1.20 thorpej pp->pr_wchan, pp->pr_nitems);
788 1.20 thorpej panic("pool_get: nitems inconsistent\n");
789 1.20 thorpej }
790 1.50.2.6 nathanw #endif
791 1.50.2.1 nathanw
792 1.50.2.6 nathanw #ifdef POOL_DIAGNOSTIC
793 1.3 pk pr_log(pp, v, PRLOG_GET, file, line);
794 1.50.2.6 nathanw #endif
795 1.3 pk
796 1.50.2.6 nathanw #ifdef DIAGNOSTIC
797 1.34 thorpej if (__predict_false(pi->pi_magic != PI_MAGIC)) {
798 1.25 thorpej pr_printlog(pp, pi, printf);
799 1.3 pk panic("pool_get(%s): free list modified: magic=%x; page %p;"
800 1.3 pk " item addr %p\n",
801 1.3 pk pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
802 1.3 pk }
803 1.3 pk #endif
804 1.3 pk
805 1.3 pk /*
806 1.3 pk * Remove from item list.
807 1.3 pk */
808 1.3 pk TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
809 1.20 thorpej pp->pr_nitems--;
810 1.20 thorpej pp->pr_nout++;
811 1.6 thorpej if (ph->ph_nmissing == 0) {
812 1.6 thorpej #ifdef DIAGNOSTIC
813 1.34 thorpej if (__predict_false(pp->pr_nidle == 0))
814 1.6 thorpej panic("pool_get: nidle inconsistent");
815 1.6 thorpej #endif
816 1.6 thorpej pp->pr_nidle--;
817 1.6 thorpej }
818 1.3 pk ph->ph_nmissing++;
819 1.3 pk if (TAILQ_FIRST(&ph->ph_itemlist) == NULL) {
820 1.21 thorpej #ifdef DIAGNOSTIC
821 1.34 thorpej if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) {
822 1.25 thorpej pr_leave(pp);
823 1.21 thorpej simple_unlock(&pp->pr_slock);
824 1.21 thorpej panic("pool_get: %s: nmissing inconsistent",
825 1.21 thorpej pp->pr_wchan);
826 1.21 thorpej }
827 1.21 thorpej #endif
828 1.3 pk /*
829 1.3 pk * Find a new non-empty page header, if any.
830 1.3 pk * Start search from the page head, to increase
831 1.3 pk * the chance for "high water" pages to be freed.
832 1.3 pk *
833 1.21 thorpej * Migrate empty pages to the end of the list. This
834 1.21 thorpej * will speed the update of curpage as pages become
835 1.21 thorpej * idle. Empty pages intermingled with idle pages
836 1.21 thorpej * is no big deal. As soon as a page becomes un-empty,
837 1.21 thorpej * it will move back to the head of the list.
838 1.3 pk */
839 1.3 pk TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
840 1.21 thorpej TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
841 1.50.2.3 nathanw TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
842 1.3 pk if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
843 1.3 pk break;
844 1.3 pk
845 1.3 pk pp->pr_curpage = ph;
846 1.1 pk }
847 1.3 pk
848 1.3 pk pp->pr_nget++;
849 1.20 thorpej
850 1.20 thorpej /*
851 1.20 thorpej * If we have a low water mark and we are now below that low
852 1.20 thorpej * water mark, add more items to the pool.
853 1.20 thorpej */
854 1.50.2.1 nathanw if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
855 1.20 thorpej /*
856 1.20 thorpej * XXX: Should we log a warning? Should we set up a timeout
857 1.20 thorpej * to try again in a second or so? The latter could break
858 1.20 thorpej * a caller's assumptions about interrupt protection, etc.
859 1.20 thorpej */
860 1.20 thorpej }
861 1.20 thorpej
862 1.25 thorpej pr_leave(pp);
863 1.21 thorpej simple_unlock(&pp->pr_slock);
864 1.1 pk return (v);
865 1.1 pk }
866 1.1 pk
867 1.1 pk /*
868 1.43 thorpej * Internal version of pool_put(). Pool is already locked/entered.
869 1.1 pk */
870 1.43 thorpej static void
871 1.50.2.1 nathanw pool_do_put(struct pool *pp, void *v)
872 1.1 pk {
873 1.1 pk struct pool_item *pi = v;
874 1.3 pk struct pool_item_header *ph;
875 1.3 pk caddr_t page;
876 1.21 thorpej int s;
877 1.3 pk
878 1.50.2.3 nathanw LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
879 1.50.2.3 nathanw
880 1.50.2.7 nathanw page = (caddr_t)((u_long)v & pp->pr_alloc->pa_pagemask);
881 1.1 pk
882 1.30 thorpej #ifdef DIAGNOSTIC
883 1.34 thorpej if (__predict_false(pp->pr_nout == 0)) {
884 1.30 thorpej printf("pool %s: putting with none out\n",
885 1.30 thorpej pp->pr_wchan);
886 1.30 thorpej panic("pool_put");
887 1.30 thorpej }
888 1.30 thorpej #endif
889 1.3 pk
890 1.34 thorpej if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {
891 1.25 thorpej pr_printlog(pp, NULL, printf);
892 1.3 pk panic("pool_put: %s: page header missing", pp->pr_wchan);
893 1.3 pk }
894 1.28 thorpej
895 1.28 thorpej #ifdef LOCKDEBUG
896 1.28 thorpej /*
897 1.28 thorpej * Check if we're freeing a locked simple lock.
898 1.28 thorpej */
899 1.28 thorpej simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
900 1.28 thorpej #endif
901 1.3 pk
902 1.3 pk /*
903 1.3 pk * Return to item list.
904 1.3 pk */
905 1.2 pk #ifdef DIAGNOSTIC
906 1.3 pk pi->pi_magic = PI_MAGIC;
907 1.3 pk #endif
908 1.32 chs #ifdef DEBUG
909 1.32 chs {
910 1.32 chs int i, *ip = v;
911 1.32 chs
912 1.32 chs for (i = 0; i < pp->pr_size / sizeof(int); i++) {
913 1.32 chs *ip++ = PI_MAGIC;
914 1.32 chs }
915 1.32 chs }
916 1.32 chs #endif
917 1.32 chs
918 1.3 pk TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
919 1.3 pk ph->ph_nmissing--;
920 1.3 pk pp->pr_nput++;
921 1.20 thorpej pp->pr_nitems++;
922 1.20 thorpej pp->pr_nout--;
923 1.3 pk
924 1.3 pk /* Cancel "pool empty" condition if it exists */
925 1.3 pk if (pp->pr_curpage == NULL)
926 1.3 pk pp->pr_curpage = ph;
927 1.3 pk
928 1.3 pk if (pp->pr_flags & PR_WANTED) {
929 1.3 pk pp->pr_flags &= ~PR_WANTED;
930 1.15 pk if (ph->ph_nmissing == 0)
931 1.15 pk pp->pr_nidle++;
932 1.3 pk wakeup((caddr_t)pp);
933 1.3 pk return;
934 1.3 pk }
935 1.3 pk
936 1.3 pk /*
937 1.21 thorpej * If this page is now complete, do one of two things:
938 1.21 thorpej *
939 1.21 thorpej * (1) If we have more pages than the page high water
940 1.21 thorpej * mark, free the page back to the system.
941 1.21 thorpej *
942 1.21 thorpej * (2) Move it to the end of the page list, so that
943 1.21 thorpej * we minimize our chances of fragmenting the
944 1.21 thorpej * pool. Idle pages migrate to the end (along with
945 1.21 thorpej * completely empty pages, so that we find un-empty
946 1.21 thorpej * pages more quickly when we update curpage) of the
947 1.21 thorpej * list so they can be more easily swept up by
948 1.21 thorpej * the pagedaemon when pages are scarce.
949 1.3 pk */
950 1.3 pk if (ph->ph_nmissing == 0) {
951 1.6 thorpej pp->pr_nidle++;
952 1.50.2.7 nathanw if (pp->pr_npages > pp->pr_maxpages ||
953 1.50.2.7 nathanw (pp->pr_alloc->pa_flags & PA_WANT) != 0) {
954 1.50.2.3 nathanw pr_rmpage(pp, ph, NULL);
955 1.3 pk } else {
956 1.3 pk TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
957 1.3 pk TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
958 1.3 pk
959 1.21 thorpej /*
960 1.21 thorpej * Update the timestamp on the page. A page must
961 1.21 thorpej * be idle for some period of time before it can
962 1.21 thorpej * be reclaimed by the pagedaemon. This minimizes
963 1.21 thorpej * ping-pong'ing for memory.
964 1.21 thorpej */
965 1.21 thorpej s = splclock();
966 1.21 thorpej ph->ph_time = mono_time;
967 1.21 thorpej splx(s);
968 1.21 thorpej
969 1.21 thorpej /*
970 1.21 thorpej * Update the current page pointer. Just look for
971 1.21 thorpej * the first page with any free items.
972 1.21 thorpej *
973 1.21 thorpej * XXX: Maybe we want an option to look for the
974 1.21 thorpej * page with the fewest available items, to minimize
975 1.21 thorpej * fragmentation?
976 1.21 thorpej */
977 1.50.2.3 nathanw TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
978 1.3 pk if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
979 1.3 pk break;
980 1.1 pk
981 1.3 pk pp->pr_curpage = ph;
982 1.1 pk }
983 1.1 pk }
984 1.21 thorpej /*
985 1.21 thorpej * If the page has just become un-empty, move it to the head of
986 1.21 thorpej * the list, and make it the current page. The next allocation
987 1.21 thorpej * will get the item from this page, instead of further fragmenting
988 1.21 thorpej * the pool.
989 1.21 thorpej */
990 1.21 thorpej else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
991 1.21 thorpej TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
992 1.21 thorpej TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
993 1.21 thorpej pp->pr_curpage = ph;
994 1.21 thorpej }
995 1.43 thorpej }
996 1.43 thorpej
997 1.43 thorpej /*
998 1.43 thorpej * Return resource to the pool; must be called at appropriate spl level
999 1.43 thorpej */
1000 1.50.2.1 nathanw #ifdef POOL_DIAGNOSTIC
1001 1.43 thorpej void
1002 1.43 thorpej _pool_put(struct pool *pp, void *v, const char *file, long line)
1003 1.43 thorpej {
1004 1.43 thorpej
1005 1.43 thorpej simple_lock(&pp->pr_slock);
1006 1.43 thorpej pr_enter(pp, file, line);
1007 1.43 thorpej
1008 1.50.2.1 nathanw pr_log(pp, v, PRLOG_PUT, file, line);
1009 1.50.2.1 nathanw
1010 1.50.2.1 nathanw pool_do_put(pp, v);
1011 1.21 thorpej
1012 1.25 thorpej pr_leave(pp);
1013 1.21 thorpej simple_unlock(&pp->pr_slock);
1014 1.1 pk }
1015 1.50.2.1 nathanw #undef pool_put
1016 1.50.2.1 nathanw #endif /* POOL_DIAGNOSTIC */
1017 1.50.2.1 nathanw
1018 1.50.2.1 nathanw void
1019 1.50.2.1 nathanw pool_put(struct pool *pp, void *v)
1020 1.50.2.1 nathanw {
1021 1.50.2.1 nathanw
1022 1.50.2.1 nathanw simple_lock(&pp->pr_slock);
1023 1.50.2.1 nathanw
1024 1.50.2.1 nathanw pool_do_put(pp, v);
1025 1.50.2.1 nathanw
1026 1.50.2.1 nathanw simple_unlock(&pp->pr_slock);
1027 1.50.2.1 nathanw }
1028 1.50.2.1 nathanw
1029 1.50.2.1 nathanw #ifdef POOL_DIAGNOSTIC
1030 1.50.2.1 nathanw #define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__)
1031 1.50.2.1 nathanw #endif
1032 1.1 pk
1033 1.1 pk /*
1034 1.3 pk * Add N items to the pool.
1035 1.1 pk */
1036 1.1 pk int
1037 1.50.2.1 nathanw pool_prime(struct pool *pp, int n)
1038 1.1 pk {
1039 1.50.2.1 nathanw struct pool_item_header *ph;
1040 1.3 pk caddr_t cp;
1041 1.50.2.7 nathanw int newpages;
1042 1.1 pk
1043 1.21 thorpej simple_lock(&pp->pr_slock);
1044 1.21 thorpej
1045 1.50.2.1 nathanw newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1046 1.3 pk
1047 1.3 pk while (newpages-- > 0) {
1048 1.50.2.1 nathanw simple_unlock(&pp->pr_slock);
1049 1.50.2.7 nathanw cp = pool_allocator_alloc(pp, PR_NOWAIT);
1050 1.50.2.1 nathanw if (__predict_true(cp != NULL))
1051 1.50.2.1 nathanw ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1052 1.50.2.1 nathanw simple_lock(&pp->pr_slock);
1053 1.2 pk
1054 1.50.2.1 nathanw if (__predict_false(cp == NULL || ph == NULL)) {
1055 1.50.2.1 nathanw if (cp != NULL)
1056 1.50.2.7 nathanw pool_allocator_free(pp, cp);
1057 1.50.2.1 nathanw break;
1058 1.1 pk }
1059 1.1 pk
1060 1.50.2.1 nathanw pool_prime_page(pp, cp, ph);
1061 1.26 thorpej pp->pr_npagealloc++;
1062 1.3 pk pp->pr_minpages++;
1063 1.1 pk }
1064 1.3 pk
1065 1.3 pk if (pp->pr_minpages >= pp->pr_maxpages)
1066 1.3 pk pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1067 1.3 pk
1068 1.21 thorpej simple_unlock(&pp->pr_slock);
1069 1.1 pk return (0);
1070 1.1 pk }
1071 1.3 pk
1072 1.3 pk /*
1073 1.3 pk * Add a page worth of items to the pool.
1074 1.21 thorpej *
1075 1.21 thorpej * Note, we must be called with the pool descriptor LOCKED.
1076 1.3 pk */
1077 1.50.2.1 nathanw static void
1078 1.50.2.1 nathanw pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
1079 1.3 pk {
1080 1.3 pk struct pool_item *pi;
1081 1.3 pk caddr_t cp = storage;
1082 1.3 pk unsigned int align = pp->pr_align;
1083 1.3 pk unsigned int ioff = pp->pr_itemoffset;
1084 1.50.2.1 nathanw int n;
1085 1.36 pk
1086 1.50.2.7 nathanw #ifdef DIAGNOSTIC
1087 1.50.2.7 nathanw if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
1088 1.36 pk panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1089 1.50.2.7 nathanw #endif
1090 1.3 pk
1091 1.50.2.1 nathanw if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1092 1.3 pk LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)],
1093 1.50.2.1 nathanw ph, ph_hashlist);
1094 1.3 pk
1095 1.3 pk /*
1096 1.3 pk * Insert page header.
1097 1.3 pk */
1098 1.3 pk TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
1099 1.3 pk TAILQ_INIT(&ph->ph_itemlist);
1100 1.3 pk ph->ph_page = storage;
1101 1.3 pk ph->ph_nmissing = 0;
1102 1.21 thorpej memset(&ph->ph_time, 0, sizeof(ph->ph_time));
1103 1.3 pk
1104 1.6 thorpej pp->pr_nidle++;
1105 1.6 thorpej
1106 1.3 pk /*
1107 1.3 pk * Color this page.
1108 1.3 pk */
1109 1.3 pk cp = (caddr_t)(cp + pp->pr_curcolor);
1110 1.3 pk if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1111 1.3 pk pp->pr_curcolor = 0;
1112 1.3 pk
1113 1.3 pk /*
1114 1.3 pk * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1115 1.3 pk */
1116 1.3 pk if (ioff != 0)
1117 1.3 pk cp = (caddr_t)(cp + (align - ioff));
1118 1.3 pk
1119 1.3 pk /*
1120 1.3 pk * Insert remaining chunks on the bucket list.
1121 1.3 pk */
1122 1.3 pk n = pp->pr_itemsperpage;
1123 1.20 thorpej pp->pr_nitems += n;
1124 1.3 pk
1125 1.3 pk while (n--) {
1126 1.3 pk pi = (struct pool_item *)cp;
1127 1.50.2.9 nathanw
1128 1.50.2.9 nathanw KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
1129 1.3 pk
1130 1.3 pk /* Insert on page list */
1131 1.3 pk TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
1132 1.3 pk #ifdef DIAGNOSTIC
1133 1.3 pk pi->pi_magic = PI_MAGIC;
1134 1.3 pk #endif
1135 1.3 pk cp = (caddr_t)(cp + pp->pr_size);
1136 1.3 pk }
1137 1.3 pk
1138 1.3 pk /*
1139 1.3 pk * If the pool was depleted, point at the new page.
1140 1.3 pk */
1141 1.3 pk if (pp->pr_curpage == NULL)
1142 1.3 pk pp->pr_curpage = ph;
1143 1.3 pk
1144 1.3 pk if (++pp->pr_npages > pp->pr_hiwat)
1145 1.3 pk pp->pr_hiwat = pp->pr_npages;
1146 1.3 pk }
1147 1.3 pk
1148 1.20 thorpej /*
1149 1.50.2.1 nathanw * Used by pool_get() when nitems drops below the low water mark. This
1150 1.50.2.1 nathanw * is used to catch up nitmes with the low water mark.
1151 1.20 thorpej *
1152 1.21 thorpej * Note 1, we never wait for memory here, we let the caller decide what to do.
1153 1.20 thorpej *
1154 1.50.2.7 nathanw * Note 2, we must be called with the pool already locked, and we return
1155 1.20 thorpej * with it locked.
1156 1.20 thorpej */
1157 1.20 thorpej static int
1158 1.42 thorpej pool_catchup(struct pool *pp)
1159 1.20 thorpej {
1160 1.50.2.1 nathanw struct pool_item_header *ph;
1161 1.20 thorpej caddr_t cp;
1162 1.20 thorpej int error = 0;
1163 1.20 thorpej
1164 1.50.2.1 nathanw while (POOL_NEEDS_CATCHUP(pp)) {
1165 1.20 thorpej /*
1166 1.21 thorpej * Call the page back-end allocator for more memory.
1167 1.21 thorpej *
1168 1.21 thorpej * XXX: We never wait, so should we bother unlocking
1169 1.21 thorpej * the pool descriptor?
1170 1.20 thorpej */
1171 1.21 thorpej simple_unlock(&pp->pr_slock);
1172 1.50.2.7 nathanw cp = pool_allocator_alloc(pp, PR_NOWAIT);
1173 1.50.2.1 nathanw if (__predict_true(cp != NULL))
1174 1.50.2.1 nathanw ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1175 1.21 thorpej simple_lock(&pp->pr_slock);
1176 1.50.2.1 nathanw if (__predict_false(cp == NULL || ph == NULL)) {
1177 1.50.2.1 nathanw if (cp != NULL)
1178 1.50.2.7 nathanw pool_allocator_free(pp, cp);
1179 1.20 thorpej error = ENOMEM;
1180 1.20 thorpej break;
1181 1.20 thorpej }
1182 1.50.2.1 nathanw pool_prime_page(pp, cp, ph);
1183 1.26 thorpej pp->pr_npagealloc++;
1184 1.20 thorpej }
1185 1.20 thorpej
1186 1.20 thorpej return (error);
1187 1.20 thorpej }
1188 1.20 thorpej
1189 1.3 pk void
1190 1.42 thorpej pool_setlowat(struct pool *pp, int n)
1191 1.3 pk {
1192 1.15 pk
1193 1.21 thorpej simple_lock(&pp->pr_slock);
1194 1.21 thorpej
1195 1.3 pk pp->pr_minitems = n;
1196 1.15 pk pp->pr_minpages = (n == 0)
1197 1.15 pk ? 0
1198 1.18 thorpej : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1199 1.20 thorpej
1200 1.20 thorpej /* Make sure we're caught up with the newly-set low water mark. */
1201 1.50.2.7 nathanw if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1202 1.20 thorpej /*
1203 1.20 thorpej * XXX: Should we log a warning? Should we set up a timeout
1204 1.20 thorpej * to try again in a second or so? The latter could break
1205 1.20 thorpej * a caller's assumptions about interrupt protection, etc.
1206 1.20 thorpej */
1207 1.20 thorpej }
1208 1.21 thorpej
1209 1.21 thorpej simple_unlock(&pp->pr_slock);
1210 1.3 pk }
1211 1.3 pk
1212 1.3 pk void
1213 1.42 thorpej pool_sethiwat(struct pool *pp, int n)
1214 1.3 pk {
1215 1.15 pk
1216 1.21 thorpej simple_lock(&pp->pr_slock);
1217 1.21 thorpej
1218 1.15 pk pp->pr_maxpages = (n == 0)
1219 1.15 pk ? 0
1220 1.18 thorpej : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1221 1.21 thorpej
1222 1.21 thorpej simple_unlock(&pp->pr_slock);
1223 1.3 pk }
1224 1.3 pk
1225 1.20 thorpej void
1226 1.42 thorpej pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1227 1.20 thorpej {
1228 1.20 thorpej
1229 1.21 thorpej simple_lock(&pp->pr_slock);
1230 1.20 thorpej
1231 1.20 thorpej pp->pr_hardlimit = n;
1232 1.20 thorpej pp->pr_hardlimit_warning = warnmess;
1233 1.31 thorpej pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1234 1.31 thorpej pp->pr_hardlimit_warning_last.tv_sec = 0;
1235 1.31 thorpej pp->pr_hardlimit_warning_last.tv_usec = 0;
1236 1.20 thorpej
1237 1.20 thorpej /*
1238 1.21 thorpej * In-line version of pool_sethiwat(), because we don't want to
1239 1.21 thorpej * release the lock.
1240 1.20 thorpej */
1241 1.20 thorpej pp->pr_maxpages = (n == 0)
1242 1.20 thorpej ? 0
1243 1.20 thorpej : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1244 1.21 thorpej
1245 1.21 thorpej simple_unlock(&pp->pr_slock);
1246 1.20 thorpej }
1247 1.3 pk
1248 1.3 pk /*
1249 1.3 pk * Release all complete pages that have not been used recently.
1250 1.3 pk */
1251 1.50.2.7 nathanw int
1252 1.50.2.1 nathanw #ifdef POOL_DIAGNOSTIC
1253 1.42 thorpej _pool_reclaim(struct pool *pp, const char *file, long line)
1254 1.50.2.1 nathanw #else
1255 1.50.2.1 nathanw pool_reclaim(struct pool *pp)
1256 1.50.2.1 nathanw #endif
1257 1.3 pk {
1258 1.3 pk struct pool_item_header *ph, *phnext;
1259 1.43 thorpej struct pool_cache *pc;
1260 1.21 thorpej struct timeval curtime;
1261 1.50.2.3 nathanw struct pool_pagelist pq;
1262 1.21 thorpej int s;
1263 1.3 pk
1264 1.50.2.7 nathanw if (pp->pr_drain_hook != NULL) {
1265 1.50.2.7 nathanw /*
1266 1.50.2.7 nathanw * The drain hook must be called with the pool unlocked.
1267 1.50.2.7 nathanw */
1268 1.50.2.7 nathanw (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1269 1.50.2.7 nathanw }
1270 1.3 pk
1271 1.21 thorpej if (simple_lock_try(&pp->pr_slock) == 0)
1272 1.50.2.7 nathanw return (0);
1273 1.25 thorpej pr_enter(pp, file, line);
1274 1.50.2.7 nathanw
1275 1.50.2.3 nathanw TAILQ_INIT(&pq);
1276 1.3 pk
1277 1.43 thorpej /*
1278 1.43 thorpej * Reclaim items from the pool's caches.
1279 1.43 thorpej */
1280 1.50.2.3 nathanw TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist)
1281 1.43 thorpej pool_cache_reclaim(pc);
1282 1.43 thorpej
1283 1.21 thorpej s = splclock();
1284 1.21 thorpej curtime = mono_time;
1285 1.21 thorpej splx(s);
1286 1.21 thorpej
1287 1.3 pk for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; ph = phnext) {
1288 1.3 pk phnext = TAILQ_NEXT(ph, ph_pagelist);
1289 1.3 pk
1290 1.3 pk /* Check our minimum page claim */
1291 1.3 pk if (pp->pr_npages <= pp->pr_minpages)
1292 1.3 pk break;
1293 1.3 pk
1294 1.3 pk if (ph->ph_nmissing == 0) {
1295 1.3 pk struct timeval diff;
1296 1.3 pk timersub(&curtime, &ph->ph_time, &diff);
1297 1.3 pk if (diff.tv_sec < pool_inactive_time)
1298 1.3 pk continue;
1299 1.21 thorpej
1300 1.21 thorpej /*
1301 1.21 thorpej * If freeing this page would put us below
1302 1.21 thorpej * the low water mark, stop now.
1303 1.21 thorpej */
1304 1.21 thorpej if ((pp->pr_nitems - pp->pr_itemsperpage) <
1305 1.21 thorpej pp->pr_minitems)
1306 1.21 thorpej break;
1307 1.21 thorpej
1308 1.50.2.3 nathanw pr_rmpage(pp, ph, &pq);
1309 1.3 pk }
1310 1.3 pk }
1311 1.3 pk
1312 1.25 thorpej pr_leave(pp);
1313 1.21 thorpej simple_unlock(&pp->pr_slock);
1314 1.50.2.7 nathanw if (TAILQ_EMPTY(&pq))
1315 1.50.2.7 nathanw return (0);
1316 1.50.2.7 nathanw
1317 1.50.2.3 nathanw while ((ph = TAILQ_FIRST(&pq)) != NULL) {
1318 1.50.2.3 nathanw TAILQ_REMOVE(&pq, ph, ph_pagelist);
1319 1.50.2.7 nathanw pool_allocator_free(pp, ph->ph_page);
1320 1.50.2.3 nathanw if (pp->pr_roflags & PR_PHINPAGE) {
1321 1.50.2.3 nathanw continue;
1322 1.50.2.3 nathanw }
1323 1.50.2.3 nathanw LIST_REMOVE(ph, ph_hashlist);
1324 1.50.2.3 nathanw s = splhigh();
1325 1.50.2.3 nathanw pool_put(&phpool, ph);
1326 1.50.2.3 nathanw splx(s);
1327 1.50.2.3 nathanw }
1328 1.3 pk
1329 1.50.2.7 nathanw return (1);
1330 1.50.2.7 nathanw }
1331 1.3 pk
1332 1.3 pk /*
1333 1.3 pk * Drain pools, one at a time.
1334 1.21 thorpej *
1335 1.21 thorpej * Note, we must never be called from an interrupt context.
1336 1.3 pk */
1337 1.3 pk void
1338 1.42 thorpej pool_drain(void *arg)
1339 1.3 pk {
1340 1.3 pk struct pool *pp;
1341 1.23 thorpej int s;
1342 1.3 pk
1343 1.50.2.3 nathanw pp = NULL;
1344 1.49 thorpej s = splvm();
1345 1.23 thorpej simple_lock(&pool_head_slock);
1346 1.50.2.3 nathanw if (drainpp == NULL) {
1347 1.50.2.3 nathanw drainpp = TAILQ_FIRST(&pool_head);
1348 1.50.2.3 nathanw }
1349 1.50.2.3 nathanw if (drainpp) {
1350 1.50.2.3 nathanw pp = drainpp;
1351 1.50.2.3 nathanw drainpp = TAILQ_NEXT(pp, pr_poollist);
1352 1.50.2.3 nathanw }
1353 1.23 thorpej simple_unlock(&pool_head_slock);
1354 1.50.2.3 nathanw pool_reclaim(pp);
1355 1.50.2.4 nathanw splx(s);
1356 1.3 pk }
1357 1.3 pk
1358 1.3 pk /*
1359 1.3 pk * Diagnostic helpers.
1360 1.3 pk */
1361 1.3 pk void
1362 1.42 thorpej pool_print(struct pool *pp, const char *modif)
1363 1.21 thorpej {
1364 1.21 thorpej int s;
1365 1.21 thorpej
1366 1.49 thorpej s = splvm();
1367 1.25 thorpej if (simple_lock_try(&pp->pr_slock) == 0) {
1368 1.25 thorpej printf("pool %s is locked; try again later\n",
1369 1.25 thorpej pp->pr_wchan);
1370 1.25 thorpej splx(s);
1371 1.25 thorpej return;
1372 1.25 thorpej }
1373 1.25 thorpej pool_print1(pp, modif, printf);
1374 1.21 thorpej simple_unlock(&pp->pr_slock);
1375 1.21 thorpej splx(s);
1376 1.21 thorpej }
1377 1.21 thorpej
1378 1.25 thorpej void
1379 1.42 thorpej pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1380 1.25 thorpej {
1381 1.25 thorpej int didlock = 0;
1382 1.25 thorpej
1383 1.25 thorpej if (pp == NULL) {
1384 1.25 thorpej (*pr)("Must specify a pool to print.\n");
1385 1.25 thorpej return;
1386 1.25 thorpej }
1387 1.25 thorpej
1388 1.25 thorpej /*
1389 1.25 thorpej * Called from DDB; interrupts should be blocked, and all
1390 1.25 thorpej * other processors should be paused. We can skip locking
1391 1.25 thorpej * the pool in this case.
1392 1.25 thorpej *
1393 1.25 thorpej * We do a simple_lock_try() just to print the lock
1394 1.25 thorpej * status, however.
1395 1.25 thorpej */
1396 1.25 thorpej
1397 1.25 thorpej if (simple_lock_try(&pp->pr_slock) == 0)
1398 1.25 thorpej (*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
1399 1.25 thorpej else
1400 1.25 thorpej didlock = 1;
1401 1.25 thorpej
1402 1.25 thorpej pool_print1(pp, modif, pr);
1403 1.25 thorpej
1404 1.25 thorpej if (didlock)
1405 1.25 thorpej simple_unlock(&pp->pr_slock);
1406 1.25 thorpej }
1407 1.25 thorpej
1408 1.21 thorpej static void
1409 1.42 thorpej pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1410 1.3 pk {
1411 1.25 thorpej struct pool_item_header *ph;
1412 1.44 thorpej struct pool_cache *pc;
1413 1.44 thorpej struct pool_cache_group *pcg;
1414 1.25 thorpej #ifdef DIAGNOSTIC
1415 1.25 thorpej struct pool_item *pi;
1416 1.25 thorpej #endif
1417 1.44 thorpej int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1418 1.25 thorpej char c;
1419 1.25 thorpej
1420 1.25 thorpej while ((c = *modif++) != '\0') {
1421 1.25 thorpej if (c == 'l')
1422 1.25 thorpej print_log = 1;
1423 1.25 thorpej if (c == 'p')
1424 1.25 thorpej print_pagelist = 1;
1425 1.44 thorpej if (c == 'c')
1426 1.44 thorpej print_cache = 1;
1427 1.25 thorpej modif++;
1428 1.25 thorpej }
1429 1.25 thorpej
1430 1.25 thorpej (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1431 1.25 thorpej pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1432 1.25 thorpej pp->pr_roflags);
1433 1.50.2.7 nathanw (*pr)("\talloc %p\n", pp->pr_alloc);
1434 1.25 thorpej (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1435 1.25 thorpej pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1436 1.25 thorpej (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1437 1.25 thorpej pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1438 1.25 thorpej
1439 1.25 thorpej (*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
1440 1.25 thorpej pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1441 1.25 thorpej (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1442 1.25 thorpej pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1443 1.25 thorpej
1444 1.25 thorpej if (print_pagelist == 0)
1445 1.25 thorpej goto skip_pagelist;
1446 1.25 thorpej
1447 1.25 thorpej if ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL)
1448 1.25 thorpej (*pr)("\n\tpage list:\n");
1449 1.25 thorpej for (; ph != NULL; ph = TAILQ_NEXT(ph, ph_pagelist)) {
1450 1.25 thorpej (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
1451 1.25 thorpej ph->ph_page, ph->ph_nmissing,
1452 1.25 thorpej (u_long)ph->ph_time.tv_sec,
1453 1.25 thorpej (u_long)ph->ph_time.tv_usec);
1454 1.25 thorpej #ifdef DIAGNOSTIC
1455 1.50.2.3 nathanw TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1456 1.25 thorpej if (pi->pi_magic != PI_MAGIC) {
1457 1.25 thorpej (*pr)("\t\t\titem %p, magic 0x%x\n",
1458 1.25 thorpej pi, pi->pi_magic);
1459 1.25 thorpej }
1460 1.25 thorpej }
1461 1.25 thorpej #endif
1462 1.25 thorpej }
1463 1.25 thorpej if (pp->pr_curpage == NULL)
1464 1.25 thorpej (*pr)("\tno current page\n");
1465 1.25 thorpej else
1466 1.25 thorpej (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1467 1.25 thorpej
1468 1.25 thorpej skip_pagelist:
1469 1.25 thorpej
1470 1.25 thorpej if (print_log == 0)
1471 1.25 thorpej goto skip_log;
1472 1.25 thorpej
1473 1.25 thorpej (*pr)("\n");
1474 1.25 thorpej if ((pp->pr_roflags & PR_LOGGING) == 0)
1475 1.25 thorpej (*pr)("\tno log\n");
1476 1.25 thorpej else
1477 1.25 thorpej pr_printlog(pp, NULL, pr);
1478 1.3 pk
1479 1.25 thorpej skip_log:
1480 1.44 thorpej
1481 1.44 thorpej if (print_cache == 0)
1482 1.44 thorpej goto skip_cache;
1483 1.44 thorpej
1484 1.50.2.3 nathanw TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) {
1485 1.44 thorpej (*pr)("\tcache %p: allocfrom %p freeto %p\n", pc,
1486 1.44 thorpej pc->pc_allocfrom, pc->pc_freeto);
1487 1.48 thorpej (*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n",
1488 1.48 thorpej pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
1489 1.50.2.3 nathanw TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1490 1.44 thorpej (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);
1491 1.44 thorpej for (i = 0; i < PCG_NOBJECTS; i++)
1492 1.44 thorpej (*pr)("\t\t\t%p\n", pcg->pcg_objects[i]);
1493 1.44 thorpej }
1494 1.44 thorpej }
1495 1.44 thorpej
1496 1.44 thorpej skip_cache:
1497 1.3 pk
1498 1.25 thorpej pr_enter_check(pp, pr);
1499 1.3 pk }
1500 1.3 pk
1501 1.3 pk int
1502 1.42 thorpej pool_chk(struct pool *pp, const char *label)
1503 1.3 pk {
1504 1.3 pk struct pool_item_header *ph;
1505 1.3 pk int r = 0;
1506 1.3 pk
1507 1.21 thorpej simple_lock(&pp->pr_slock);
1508 1.3 pk
1509 1.50.2.3 nathanw TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) {
1510 1.3 pk struct pool_item *pi;
1511 1.3 pk int n;
1512 1.3 pk caddr_t page;
1513 1.3 pk
1514 1.50.2.7 nathanw page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask);
1515 1.20 thorpej if (page != ph->ph_page &&
1516 1.20 thorpej (pp->pr_roflags & PR_PHINPAGE) != 0) {
1517 1.3 pk if (label != NULL)
1518 1.3 pk printf("%s: ", label);
1519 1.16 briggs printf("pool(%p:%s): page inconsistency: page %p;"
1520 1.16 briggs " at page head addr %p (p %p)\n", pp,
1521 1.3 pk pp->pr_wchan, ph->ph_page,
1522 1.3 pk ph, page);
1523 1.3 pk r++;
1524 1.3 pk goto out;
1525 1.3 pk }
1526 1.3 pk
1527 1.3 pk for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
1528 1.3 pk pi != NULL;
1529 1.3 pk pi = TAILQ_NEXT(pi,pi_list), n++) {
1530 1.3 pk
1531 1.3 pk #ifdef DIAGNOSTIC
1532 1.3 pk if (pi->pi_magic != PI_MAGIC) {
1533 1.3 pk if (label != NULL)
1534 1.3 pk printf("%s: ", label);
1535 1.3 pk printf("pool(%s): free list modified: magic=%x;"
1536 1.3 pk " page %p; item ordinal %d;"
1537 1.3 pk " addr %p (p %p)\n",
1538 1.3 pk pp->pr_wchan, pi->pi_magic, ph->ph_page,
1539 1.3 pk n, pi, page);
1540 1.3 pk panic("pool");
1541 1.3 pk }
1542 1.3 pk #endif
1543 1.50.2.7 nathanw page =
1544 1.50.2.7 nathanw (caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask);
1545 1.3 pk if (page == ph->ph_page)
1546 1.3 pk continue;
1547 1.3 pk
1548 1.3 pk if (label != NULL)
1549 1.3 pk printf("%s: ", label);
1550 1.16 briggs printf("pool(%p:%s): page inconsistency: page %p;"
1551 1.16 briggs " item ordinal %d; addr %p (p %p)\n", pp,
1552 1.3 pk pp->pr_wchan, ph->ph_page,
1553 1.3 pk n, pi, page);
1554 1.3 pk r++;
1555 1.3 pk goto out;
1556 1.3 pk }
1557 1.3 pk }
1558 1.3 pk out:
1559 1.21 thorpej simple_unlock(&pp->pr_slock);
1560 1.3 pk return (r);
1561 1.43 thorpej }
1562 1.43 thorpej
1563 1.43 thorpej /*
1564 1.43 thorpej * pool_cache_init:
1565 1.43 thorpej *
1566 1.43 thorpej * Initialize a pool cache.
1567 1.43 thorpej *
1568 1.43 thorpej * NOTE: If the pool must be protected from interrupts, we expect
1569 1.43 thorpej * to be called at the appropriate interrupt priority level.
1570 1.43 thorpej */
1571 1.43 thorpej void
1572 1.43 thorpej pool_cache_init(struct pool_cache *pc, struct pool *pp,
1573 1.43 thorpej int (*ctor)(void *, void *, int),
1574 1.43 thorpej void (*dtor)(void *, void *),
1575 1.43 thorpej void *arg)
1576 1.43 thorpej {
1577 1.43 thorpej
1578 1.43 thorpej TAILQ_INIT(&pc->pc_grouplist);
1579 1.43 thorpej simple_lock_init(&pc->pc_slock);
1580 1.43 thorpej
1581 1.43 thorpej pc->pc_allocfrom = NULL;
1582 1.43 thorpej pc->pc_freeto = NULL;
1583 1.43 thorpej pc->pc_pool = pp;
1584 1.43 thorpej
1585 1.43 thorpej pc->pc_ctor = ctor;
1586 1.43 thorpej pc->pc_dtor = dtor;
1587 1.43 thorpej pc->pc_arg = arg;
1588 1.43 thorpej
1589 1.48 thorpej pc->pc_hits = 0;
1590 1.48 thorpej pc->pc_misses = 0;
1591 1.48 thorpej
1592 1.48 thorpej pc->pc_ngroups = 0;
1593 1.48 thorpej
1594 1.48 thorpej pc->pc_nitems = 0;
1595 1.48 thorpej
1596 1.43 thorpej simple_lock(&pp->pr_slock);
1597 1.43 thorpej TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist);
1598 1.43 thorpej simple_unlock(&pp->pr_slock);
1599 1.43 thorpej }
1600 1.43 thorpej
1601 1.43 thorpej /*
1602 1.43 thorpej * pool_cache_destroy:
1603 1.43 thorpej *
1604 1.43 thorpej * Destroy a pool cache.
1605 1.43 thorpej */
1606 1.43 thorpej void
1607 1.43 thorpej pool_cache_destroy(struct pool_cache *pc)
1608 1.43 thorpej {
1609 1.43 thorpej struct pool *pp = pc->pc_pool;
1610 1.43 thorpej
1611 1.43 thorpej /* First, invalidate the entire cache. */
1612 1.43 thorpej pool_cache_invalidate(pc);
1613 1.43 thorpej
1614 1.43 thorpej /* ...and remove it from the pool's cache list. */
1615 1.43 thorpej simple_lock(&pp->pr_slock);
1616 1.43 thorpej TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist);
1617 1.43 thorpej simple_unlock(&pp->pr_slock);
1618 1.43 thorpej }
1619 1.43 thorpej
1620 1.43 thorpej static __inline void *
1621 1.43 thorpej pcg_get(struct pool_cache_group *pcg)
1622 1.43 thorpej {
1623 1.43 thorpej void *object;
1624 1.43 thorpej u_int idx;
1625 1.43 thorpej
1626 1.43 thorpej KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
1627 1.45 thorpej KASSERT(pcg->pcg_avail != 0);
1628 1.43 thorpej idx = --pcg->pcg_avail;
1629 1.43 thorpej
1630 1.43 thorpej KASSERT(pcg->pcg_objects[idx] != NULL);
1631 1.43 thorpej object = pcg->pcg_objects[idx];
1632 1.43 thorpej pcg->pcg_objects[idx] = NULL;
1633 1.43 thorpej
1634 1.43 thorpej return (object);
1635 1.43 thorpej }
1636 1.43 thorpej
1637 1.43 thorpej static __inline void
1638 1.43 thorpej pcg_put(struct pool_cache_group *pcg, void *object)
1639 1.43 thorpej {
1640 1.43 thorpej u_int idx;
1641 1.43 thorpej
1642 1.43 thorpej KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
1643 1.43 thorpej idx = pcg->pcg_avail++;
1644 1.43 thorpej
1645 1.43 thorpej KASSERT(pcg->pcg_objects[idx] == NULL);
1646 1.43 thorpej pcg->pcg_objects[idx] = object;
1647 1.43 thorpej }
1648 1.43 thorpej
1649 1.43 thorpej /*
1650 1.43 thorpej * pool_cache_get:
1651 1.43 thorpej *
1652 1.43 thorpej * Get an object from a pool cache.
1653 1.43 thorpej */
1654 1.43 thorpej void *
1655 1.43 thorpej pool_cache_get(struct pool_cache *pc, int flags)
1656 1.43 thorpej {
1657 1.43 thorpej struct pool_cache_group *pcg;
1658 1.43 thorpej void *object;
1659 1.43 thorpej
1660 1.50.2.1 nathanw #ifdef LOCKDEBUG
1661 1.50.2.1 nathanw if (flags & PR_WAITOK)
1662 1.50.2.1 nathanw simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)");
1663 1.50.2.1 nathanw #endif
1664 1.50.2.1 nathanw
1665 1.43 thorpej simple_lock(&pc->pc_slock);
1666 1.43 thorpej
1667 1.43 thorpej if ((pcg = pc->pc_allocfrom) == NULL) {
1668 1.50.2.3 nathanw TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1669 1.43 thorpej if (pcg->pcg_avail != 0) {
1670 1.43 thorpej pc->pc_allocfrom = pcg;
1671 1.43 thorpej goto have_group;
1672 1.43 thorpej }
1673 1.43 thorpej }
1674 1.43 thorpej
1675 1.43 thorpej /*
1676 1.43 thorpej * No groups with any available objects. Allocate
1677 1.43 thorpej * a new object, construct it, and return it to
1678 1.43 thorpej * the caller. We will allocate a group, if necessary,
1679 1.43 thorpej * when the object is freed back to the cache.
1680 1.43 thorpej */
1681 1.48 thorpej pc->pc_misses++;
1682 1.43 thorpej simple_unlock(&pc->pc_slock);
1683 1.43 thorpej object = pool_get(pc->pc_pool, flags);
1684 1.43 thorpej if (object != NULL && pc->pc_ctor != NULL) {
1685 1.43 thorpej if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
1686 1.43 thorpej pool_put(pc->pc_pool, object);
1687 1.43 thorpej return (NULL);
1688 1.43 thorpej }
1689 1.43 thorpej }
1690 1.43 thorpej return (object);
1691 1.43 thorpej }
1692 1.43 thorpej
1693 1.43 thorpej have_group:
1694 1.48 thorpej pc->pc_hits++;
1695 1.48 thorpej pc->pc_nitems--;
1696 1.43 thorpej object = pcg_get(pcg);
1697 1.43 thorpej
1698 1.43 thorpej if (pcg->pcg_avail == 0)
1699 1.43 thorpej pc->pc_allocfrom = NULL;
1700 1.45 thorpej
1701 1.43 thorpej simple_unlock(&pc->pc_slock);
1702 1.43 thorpej
1703 1.43 thorpej return (object);
1704 1.43 thorpej }
1705 1.43 thorpej
1706 1.43 thorpej /*
1707 1.43 thorpej * pool_cache_put:
1708 1.43 thorpej *
1709 1.43 thorpej * Put an object back to the pool cache.
1710 1.43 thorpej */
1711 1.43 thorpej void
1712 1.43 thorpej pool_cache_put(struct pool_cache *pc, void *object)
1713 1.43 thorpej {
1714 1.43 thorpej struct pool_cache_group *pcg;
1715 1.50.2.2 nathanw int s;
1716 1.43 thorpej
1717 1.43 thorpej simple_lock(&pc->pc_slock);
1718 1.43 thorpej
1719 1.43 thorpej if ((pcg = pc->pc_freeto) == NULL) {
1720 1.50.2.3 nathanw TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1721 1.43 thorpej if (pcg->pcg_avail != PCG_NOBJECTS) {
1722 1.43 thorpej pc->pc_freeto = pcg;
1723 1.43 thorpej goto have_group;
1724 1.43 thorpej }
1725 1.43 thorpej }
1726 1.43 thorpej
1727 1.43 thorpej /*
1728 1.43 thorpej * No empty groups to free the object to. Attempt to
1729 1.47 thorpej * allocate one.
1730 1.43 thorpej */
1731 1.47 thorpej simple_unlock(&pc->pc_slock);
1732 1.50.2.2 nathanw s = splvm();
1733 1.43 thorpej pcg = pool_get(&pcgpool, PR_NOWAIT);
1734 1.50.2.2 nathanw splx(s);
1735 1.43 thorpej if (pcg != NULL) {
1736 1.43 thorpej memset(pcg, 0, sizeof(*pcg));
1737 1.47 thorpej simple_lock(&pc->pc_slock);
1738 1.48 thorpej pc->pc_ngroups++;
1739 1.43 thorpej TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list);
1740 1.47 thorpej if (pc->pc_freeto == NULL)
1741 1.47 thorpej pc->pc_freeto = pcg;
1742 1.43 thorpej goto have_group;
1743 1.43 thorpej }
1744 1.43 thorpej
1745 1.43 thorpej /*
1746 1.43 thorpej * Unable to allocate a cache group; destruct the object
1747 1.43 thorpej * and free it back to the pool.
1748 1.43 thorpej */
1749 1.50.2.1 nathanw pool_cache_destruct_object(pc, object);
1750 1.43 thorpej return;
1751 1.43 thorpej }
1752 1.43 thorpej
1753 1.43 thorpej have_group:
1754 1.48 thorpej pc->pc_nitems++;
1755 1.43 thorpej pcg_put(pcg, object);
1756 1.43 thorpej
1757 1.43 thorpej if (pcg->pcg_avail == PCG_NOBJECTS)
1758 1.43 thorpej pc->pc_freeto = NULL;
1759 1.43 thorpej
1760 1.43 thorpej simple_unlock(&pc->pc_slock);
1761 1.43 thorpej }
1762 1.43 thorpej
1763 1.43 thorpej /*
1764 1.50.2.1 nathanw * pool_cache_destruct_object:
1765 1.50.2.1 nathanw *
1766 1.50.2.1 nathanw * Force destruction of an object and its release back into
1767 1.50.2.1 nathanw * the pool.
1768 1.50.2.1 nathanw */
1769 1.50.2.1 nathanw void
1770 1.50.2.1 nathanw pool_cache_destruct_object(struct pool_cache *pc, void *object)
1771 1.50.2.1 nathanw {
1772 1.50.2.1 nathanw
1773 1.50.2.1 nathanw if (pc->pc_dtor != NULL)
1774 1.50.2.1 nathanw (*pc->pc_dtor)(pc->pc_arg, object);
1775 1.50.2.1 nathanw pool_put(pc->pc_pool, object);
1776 1.50.2.1 nathanw }
1777 1.50.2.1 nathanw
1778 1.50.2.1 nathanw /*
1779 1.43 thorpej * pool_cache_do_invalidate:
1780 1.43 thorpej *
1781 1.43 thorpej * This internal function implements pool_cache_invalidate() and
1782 1.43 thorpej * pool_cache_reclaim().
1783 1.43 thorpej */
1784 1.43 thorpej static void
1785 1.43 thorpej pool_cache_do_invalidate(struct pool_cache *pc, int free_groups,
1786 1.50.2.1 nathanw void (*putit)(struct pool *, void *))
1787 1.43 thorpej {
1788 1.43 thorpej struct pool_cache_group *pcg, *npcg;
1789 1.43 thorpej void *object;
1790 1.50.2.2 nathanw int s;
1791 1.43 thorpej
1792 1.43 thorpej for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
1793 1.43 thorpej pcg = npcg) {
1794 1.43 thorpej npcg = TAILQ_NEXT(pcg, pcg_list);
1795 1.43 thorpej while (pcg->pcg_avail != 0) {
1796 1.48 thorpej pc->pc_nitems--;
1797 1.43 thorpej object = pcg_get(pcg);
1798 1.45 thorpej if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg)
1799 1.45 thorpej pc->pc_allocfrom = NULL;
1800 1.43 thorpej if (pc->pc_dtor != NULL)
1801 1.43 thorpej (*pc->pc_dtor)(pc->pc_arg, object);
1802 1.50.2.1 nathanw (*putit)(pc->pc_pool, object);
1803 1.43 thorpej }
1804 1.43 thorpej if (free_groups) {
1805 1.48 thorpej pc->pc_ngroups--;
1806 1.43 thorpej TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list);
1807 1.46 thorpej if (pc->pc_freeto == pcg)
1808 1.46 thorpej pc->pc_freeto = NULL;
1809 1.50.2.2 nathanw s = splvm();
1810 1.43 thorpej pool_put(&pcgpool, pcg);
1811 1.50.2.2 nathanw splx(s);
1812 1.43 thorpej }
1813 1.43 thorpej }
1814 1.43 thorpej }
1815 1.43 thorpej
1816 1.43 thorpej /*
1817 1.43 thorpej * pool_cache_invalidate:
1818 1.43 thorpej *
1819 1.43 thorpej * Invalidate a pool cache (destruct and release all of the
1820 1.43 thorpej * cached objects).
1821 1.43 thorpej */
1822 1.43 thorpej void
1823 1.43 thorpej pool_cache_invalidate(struct pool_cache *pc)
1824 1.43 thorpej {
1825 1.43 thorpej
1826 1.43 thorpej simple_lock(&pc->pc_slock);
1827 1.50.2.1 nathanw pool_cache_do_invalidate(pc, 0, pool_put);
1828 1.43 thorpej simple_unlock(&pc->pc_slock);
1829 1.43 thorpej }
1830 1.43 thorpej
1831 1.43 thorpej /*
1832 1.43 thorpej * pool_cache_reclaim:
1833 1.43 thorpej *
1834 1.43 thorpej * Reclaim a pool cache for pool_reclaim().
1835 1.43 thorpej */
1836 1.43 thorpej static void
1837 1.43 thorpej pool_cache_reclaim(struct pool_cache *pc)
1838 1.43 thorpej {
1839 1.43 thorpej
1840 1.47 thorpej simple_lock(&pc->pc_slock);
1841 1.43 thorpej pool_cache_do_invalidate(pc, 1, pool_do_put);
1842 1.43 thorpej simple_unlock(&pc->pc_slock);
1843 1.3 pk }
1844 1.50.2.7 nathanw
1845 1.50.2.7 nathanw /*
1846 1.50.2.7 nathanw * Pool backend allocators.
1847 1.50.2.7 nathanw *
1848 1.50.2.7 nathanw * Each pool has a backend allocator that handles allocation, deallocation,
1849 1.50.2.7 nathanw * and any additional draining that might be needed.
1850 1.50.2.7 nathanw *
1851 1.50.2.7 nathanw * We provide two standard allocators:
1852 1.50.2.7 nathanw *
1853 1.50.2.7 nathanw * pool_allocator_kmem - the default when no allocator is specified
1854 1.50.2.7 nathanw *
1855 1.50.2.7 nathanw * pool_allocator_nointr - used for pools that will not be accessed
1856 1.50.2.7 nathanw * in interrupt context.
1857 1.50.2.7 nathanw */
1858 1.50.2.7 nathanw void *pool_page_alloc(struct pool *, int);
1859 1.50.2.7 nathanw void pool_page_free(struct pool *, void *);
1860 1.50.2.7 nathanw
1861 1.50.2.7 nathanw struct pool_allocator pool_allocator_kmem = {
1862 1.50.2.7 nathanw pool_page_alloc, pool_page_free, 0,
1863 1.50.2.7 nathanw };
1864 1.50.2.7 nathanw
1865 1.50.2.7 nathanw void *pool_page_alloc_nointr(struct pool *, int);
1866 1.50.2.7 nathanw void pool_page_free_nointr(struct pool *, void *);
1867 1.50.2.7 nathanw
1868 1.50.2.7 nathanw struct pool_allocator pool_allocator_nointr = {
1869 1.50.2.7 nathanw pool_page_alloc_nointr, pool_page_free_nointr, 0,
1870 1.50.2.7 nathanw };
1871 1.50.2.7 nathanw
1872 1.50.2.7 nathanw #ifdef POOL_SUBPAGE
1873 1.50.2.7 nathanw void *pool_subpage_alloc(struct pool *, int);
1874 1.50.2.7 nathanw void pool_subpage_free(struct pool *, void *);
1875 1.50.2.7 nathanw
1876 1.50.2.7 nathanw struct pool_allocator pool_allocator_kmem_subpage = {
1877 1.50.2.7 nathanw pool_subpage_alloc, pool_subpage_free, 0,
1878 1.50.2.7 nathanw };
1879 1.50.2.7 nathanw #endif /* POOL_SUBPAGE */
1880 1.50.2.7 nathanw
1881 1.50.2.7 nathanw /*
1882 1.50.2.7 nathanw * We have at least three different resources for the same allocation and
1883 1.50.2.7 nathanw * each resource can be depleted. First, we have the ready elements in the
1884 1.50.2.7 nathanw * pool. Then we have the resource (typically a vm_map) for this allocator.
1885 1.50.2.7 nathanw * Finally, we have physical memory. Waiting for any of these can be
1886 1.50.2.7 nathanw * unnecessary when any other is freed, but the kernel doesn't support
1887 1.50.2.7 nathanw * sleeping on multiple wait channels, so we have to employ another strategy.
1888 1.50.2.7 nathanw *
1889 1.50.2.7 nathanw * The caller sleeps on the pool (so that it can be awakened when an item
1890 1.50.2.7 nathanw * is returned to the pool), but we set PA_WANT on the allocator. When a
1891 1.50.2.7 nathanw * page is returned to the allocator and PA_WANT is set, pool_allocator_free
1892 1.50.2.7 nathanw * will wake up all sleeping pools belonging to this allocator.
1893 1.50.2.7 nathanw *
1894 1.50.2.7 nathanw * XXX Thundering herd.
1895 1.50.2.7 nathanw */
1896 1.50.2.7 nathanw void *
1897 1.50.2.7 nathanw pool_allocator_alloc(struct pool *org, int flags)
1898 1.50.2.7 nathanw {
1899 1.50.2.7 nathanw struct pool_allocator *pa = org->pr_alloc;
1900 1.50.2.7 nathanw struct pool *pp, *start;
1901 1.50.2.7 nathanw int s, freed;
1902 1.50.2.7 nathanw void *res;
1903 1.50.2.7 nathanw
1904 1.50.2.7 nathanw do {
1905 1.50.2.7 nathanw if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
1906 1.50.2.7 nathanw return (res);
1907 1.50.2.7 nathanw if ((flags & PR_WAITOK) == 0) {
1908 1.50.2.7 nathanw /*
1909 1.50.2.7 nathanw * We only run the drain hookhere if PR_NOWAIT.
1910 1.50.2.7 nathanw * In other cases, the hook will be run in
1911 1.50.2.7 nathanw * pool_reclaim().
1912 1.50.2.7 nathanw */
1913 1.50.2.7 nathanw if (org->pr_drain_hook != NULL) {
1914 1.50.2.7 nathanw (*org->pr_drain_hook)(org->pr_drain_hook_arg,
1915 1.50.2.7 nathanw flags);
1916 1.50.2.7 nathanw if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
1917 1.50.2.7 nathanw return (res);
1918 1.50.2.7 nathanw }
1919 1.50.2.7 nathanw break;
1920 1.50.2.7 nathanw }
1921 1.50.2.7 nathanw
1922 1.50.2.7 nathanw /*
1923 1.50.2.7 nathanw * Drain all pools, except "org", that use this
1924 1.50.2.7 nathanw * allocator. We do this to reclaim VA space.
1925 1.50.2.7 nathanw * pa_alloc is responsible for waiting for
1926 1.50.2.7 nathanw * physical memory.
1927 1.50.2.7 nathanw *
1928 1.50.2.7 nathanw * XXX We risk looping forever if start if someone
1929 1.50.2.7 nathanw * calls pool_destroy on "start". But there is no
1930 1.50.2.7 nathanw * other way to have potentially sleeping pool_reclaim,
1931 1.50.2.7 nathanw * non-sleeping locks on pool_allocator, and some
1932 1.50.2.7 nathanw * stirring of drained pools in the allocator.
1933 1.50.2.7 nathanw *
1934 1.50.2.7 nathanw * XXX Maybe we should use pool_head_slock for locking
1935 1.50.2.7 nathanw * the allocators?
1936 1.50.2.7 nathanw */
1937 1.50.2.7 nathanw freed = 0;
1938 1.50.2.7 nathanw
1939 1.50.2.7 nathanw s = splvm();
1940 1.50.2.7 nathanw simple_lock(&pa->pa_slock);
1941 1.50.2.7 nathanw pp = start = TAILQ_FIRST(&pa->pa_list);
1942 1.50.2.7 nathanw do {
1943 1.50.2.7 nathanw TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list);
1944 1.50.2.7 nathanw TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list);
1945 1.50.2.7 nathanw if (pp == org)
1946 1.50.2.7 nathanw continue;
1947 1.50.2.7 nathanw simple_unlock(&pa->pa_slock);
1948 1.50.2.7 nathanw freed = pool_reclaim(pp);
1949 1.50.2.7 nathanw simple_lock(&pa->pa_slock);
1950 1.50.2.7 nathanw } while ((pp = TAILQ_FIRST(&pa->pa_list)) != start &&
1951 1.50.2.7 nathanw freed == 0);
1952 1.50.2.7 nathanw
1953 1.50.2.7 nathanw if (freed == 0) {
1954 1.50.2.7 nathanw /*
1955 1.50.2.7 nathanw * We set PA_WANT here, the caller will most likely
1956 1.50.2.7 nathanw * sleep waiting for pages (if not, this won't hurt
1957 1.50.2.7 nathanw * that much), and there is no way to set this in
1958 1.50.2.7 nathanw * the caller without violating locking order.
1959 1.50.2.7 nathanw */
1960 1.50.2.7 nathanw pa->pa_flags |= PA_WANT;
1961 1.50.2.7 nathanw }
1962 1.50.2.7 nathanw simple_unlock(&pa->pa_slock);
1963 1.50.2.7 nathanw splx(s);
1964 1.50.2.7 nathanw } while (freed);
1965 1.50.2.7 nathanw return (NULL);
1966 1.50.2.7 nathanw }
1967 1.50.2.7 nathanw
1968 1.50.2.7 nathanw void
1969 1.50.2.7 nathanw pool_allocator_free(struct pool *pp, void *v)
1970 1.50.2.7 nathanw {
1971 1.50.2.7 nathanw struct pool_allocator *pa = pp->pr_alloc;
1972 1.50.2.7 nathanw int s;
1973 1.50.2.7 nathanw
1974 1.50.2.7 nathanw (*pa->pa_free)(pp, v);
1975 1.50.2.7 nathanw
1976 1.50.2.7 nathanw s = splvm();
1977 1.50.2.7 nathanw simple_lock(&pa->pa_slock);
1978 1.50.2.7 nathanw if ((pa->pa_flags & PA_WANT) == 0) {
1979 1.50.2.7 nathanw simple_unlock(&pa->pa_slock);
1980 1.50.2.7 nathanw splx(s);
1981 1.50.2.7 nathanw return;
1982 1.50.2.7 nathanw }
1983 1.50.2.7 nathanw
1984 1.50.2.7 nathanw TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
1985 1.50.2.7 nathanw simple_lock(&pp->pr_slock);
1986 1.50.2.7 nathanw if ((pp->pr_flags & PR_WANTED) != 0) {
1987 1.50.2.7 nathanw pp->pr_flags &= ~PR_WANTED;
1988 1.50.2.7 nathanw wakeup(pp);
1989 1.50.2.7 nathanw }
1990 1.50.2.7 nathanw simple_unlock(&pp->pr_slock);
1991 1.50.2.7 nathanw }
1992 1.50.2.7 nathanw pa->pa_flags &= ~PA_WANT;
1993 1.50.2.7 nathanw simple_unlock(&pa->pa_slock);
1994 1.50.2.7 nathanw splx(s);
1995 1.50.2.7 nathanw }
1996 1.50.2.7 nathanw
1997 1.50.2.7 nathanw void *
1998 1.50.2.7 nathanw pool_page_alloc(struct pool *pp, int flags)
1999 1.50.2.7 nathanw {
2000 1.50.2.7 nathanw boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2001 1.50.2.7 nathanw
2002 1.50.2.7 nathanw return ((void *) uvm_km_alloc_poolpage(waitok));
2003 1.50.2.7 nathanw }
2004 1.50.2.7 nathanw
2005 1.50.2.7 nathanw void
2006 1.50.2.7 nathanw pool_page_free(struct pool *pp, void *v)
2007 1.50.2.7 nathanw {
2008 1.50.2.7 nathanw
2009 1.50.2.7 nathanw uvm_km_free_poolpage((vaddr_t) v);
2010 1.50.2.7 nathanw }
2011 1.50.2.7 nathanw
2012 1.50.2.7 nathanw #ifdef POOL_SUBPAGE
2013 1.50.2.7 nathanw /* Sub-page allocator, for machines with large hardware pages. */
2014 1.50.2.7 nathanw void *
2015 1.50.2.7 nathanw pool_subpage_alloc(struct pool *pp, int flags)
2016 1.50.2.7 nathanw {
2017 1.50.2.7 nathanw
2018 1.50.2.7 nathanw return (pool_get(&psppool, flags));
2019 1.50.2.7 nathanw }
2020 1.50.2.7 nathanw
2021 1.50.2.7 nathanw void
2022 1.50.2.7 nathanw pool_subpage_free(struct pool *pp, void *v)
2023 1.50.2.7 nathanw {
2024 1.50.2.7 nathanw
2025 1.50.2.7 nathanw pool_put(&psppool, v);
2026 1.50.2.7 nathanw }
2027 1.50.2.7 nathanw
2028 1.50.2.7 nathanw /* We don't provide a real nointr allocator. Maybe later. */
2029 1.50.2.7 nathanw void *
2030 1.50.2.7 nathanw pool_page_alloc_nointr(struct pool *pp, int flags)
2031 1.50.2.7 nathanw {
2032 1.50.2.7 nathanw
2033 1.50.2.7 nathanw return (pool_subpage_alloc(pp, flags));
2034 1.50.2.7 nathanw }
2035 1.50.2.7 nathanw
2036 1.50.2.7 nathanw void
2037 1.50.2.7 nathanw pool_page_free_nointr(struct pool *pp, void *v)
2038 1.50.2.7 nathanw {
2039 1.50.2.7 nathanw
2040 1.50.2.7 nathanw pool_subpage_free(pp, v);
2041 1.50.2.7 nathanw }
2042 1.50.2.7 nathanw #else
2043 1.50.2.7 nathanw void *
2044 1.50.2.7 nathanw pool_page_alloc_nointr(struct pool *pp, int flags)
2045 1.50.2.7 nathanw {
2046 1.50.2.7 nathanw boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2047 1.50.2.7 nathanw
2048 1.50.2.7 nathanw return ((void *) uvm_km_alloc_poolpage1(kernel_map,
2049 1.50.2.7 nathanw uvm.kernel_object, waitok));
2050 1.50.2.7 nathanw }
2051 1.50.2.7 nathanw
2052 1.50.2.7 nathanw void
2053 1.50.2.7 nathanw pool_page_free_nointr(struct pool *pp, void *v)
2054 1.50.2.7 nathanw {
2055 1.50.2.7 nathanw
2056 1.50.2.7 nathanw uvm_km_free_poolpage1(kernel_map, (vaddr_t) v);
2057 1.50.2.7 nathanw }
2058 1.50.2.7 nathanw #endif /* POOL_SUBPAGE */
2059