subr_pool.c revision 1.98 1 1.98 yamt /* $NetBSD: subr_pool.c,v 1.98 2005/01/01 21:08:02 yamt Exp $ */
2 1.1 pk
3 1.1 pk /*-
4 1.43 thorpej * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
5 1.1 pk * All rights reserved.
6 1.1 pk *
7 1.1 pk * This code is derived from software contributed to The NetBSD Foundation
8 1.20 thorpej * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
9 1.20 thorpej * Simulation Facility, NASA Ames Research Center.
10 1.1 pk *
11 1.1 pk * Redistribution and use in source and binary forms, with or without
12 1.1 pk * modification, are permitted provided that the following conditions
13 1.1 pk * are met:
14 1.1 pk * 1. Redistributions of source code must retain the above copyright
15 1.1 pk * notice, this list of conditions and the following disclaimer.
16 1.1 pk * 2. Redistributions in binary form must reproduce the above copyright
17 1.1 pk * notice, this list of conditions and the following disclaimer in the
18 1.1 pk * documentation and/or other materials provided with the distribution.
19 1.1 pk * 3. All advertising materials mentioning features or use of this software
20 1.1 pk * must display the following acknowledgement:
21 1.13 christos * This product includes software developed by the NetBSD
22 1.13 christos * Foundation, Inc. and its contributors.
23 1.1 pk * 4. Neither the name of The NetBSD Foundation nor the names of its
24 1.1 pk * contributors may be used to endorse or promote products derived
25 1.1 pk * from this software without specific prior written permission.
26 1.1 pk *
27 1.1 pk * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 1.1 pk * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 1.1 pk * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 1.1 pk * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 1.1 pk * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 1.1 pk * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 1.1 pk * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 1.1 pk * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 1.1 pk * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 1.1 pk * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 1.1 pk * POSSIBILITY OF SUCH DAMAGE.
38 1.1 pk */
39 1.64 lukem
40 1.64 lukem #include <sys/cdefs.h>
41 1.98 yamt __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.98 2005/01/01 21:08:02 yamt Exp $");
42 1.24 scottr
43 1.25 thorpej #include "opt_pool.h"
44 1.24 scottr #include "opt_poollog.h"
45 1.28 thorpej #include "opt_lockdebug.h"
46 1.1 pk
47 1.1 pk #include <sys/param.h>
48 1.1 pk #include <sys/systm.h>
49 1.1 pk #include <sys/proc.h>
50 1.1 pk #include <sys/errno.h>
51 1.1 pk #include <sys/kernel.h>
52 1.1 pk #include <sys/malloc.h>
53 1.1 pk #include <sys/lock.h>
54 1.1 pk #include <sys/pool.h>
55 1.20 thorpej #include <sys/syslog.h>
56 1.3 pk
57 1.3 pk #include <uvm/uvm.h>
58 1.3 pk
59 1.1 pk /*
60 1.1 pk * Pool resource management utility.
61 1.3 pk *
62 1.88 chs * Memory is allocated in pages which are split into pieces according to
63 1.88 chs * the pool item size. Each page is kept on one of three lists in the
64 1.88 chs * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
65 1.88 chs * for empty, full and partially-full pages respectively. The individual
66 1.88 chs * pool items are on a linked list headed by `ph_itemlist' in each page
67 1.88 chs * header. The memory for building the page list is either taken from
68 1.88 chs * the allocated pages themselves (for small pool items) or taken from
69 1.88 chs * an internal pool of page headers (`phpool').
70 1.1 pk */
71 1.1 pk
72 1.3 pk /* List of all pools */
73 1.5 thorpej TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
74 1.3 pk
75 1.3 pk /* Private pool for page header structures */
76 1.97 yamt #define PHPOOL_MAX 8
77 1.97 yamt static struct pool phpool[PHPOOL_MAX];
78 1.97 yamt #define PHPOOL_FREELIST_NELEM(idx) (((idx) == 0) ? 0 : (1 << (idx)))
79 1.3 pk
80 1.62 bjh21 #ifdef POOL_SUBPAGE
81 1.62 bjh21 /* Pool of subpages for use by normal pools. */
82 1.62 bjh21 static struct pool psppool;
83 1.62 bjh21 #endif
84 1.62 bjh21
85 1.98 yamt static void *pool_page_alloc_meta(struct pool *, int);
86 1.98 yamt static void pool_page_free_meta(struct pool *, void *);
87 1.98 yamt
88 1.98 yamt /* allocator for pool metadata */
89 1.98 yamt static struct pool_allocator pool_allocator_meta = {
90 1.98 yamt pool_page_alloc_meta, pool_page_free_meta
91 1.98 yamt };
92 1.98 yamt
93 1.3 pk /* # of seconds to retain page after last use */
94 1.3 pk int pool_inactive_time = 10;
95 1.3 pk
96 1.3 pk /* Next candidate for drainage (see pool_drain()) */
97 1.23 thorpej static struct pool *drainpp;
98 1.23 thorpej
99 1.23 thorpej /* This spin lock protects both pool_head and drainpp. */
100 1.23 thorpej struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
101 1.3 pk
102 1.3 pk struct pool_item_header {
103 1.3 pk /* Page headers */
104 1.88 chs LIST_ENTRY(pool_item_header)
105 1.3 pk ph_pagelist; /* pool page list */
106 1.88 chs SPLAY_ENTRY(pool_item_header)
107 1.88 chs ph_node; /* Off-page page headers */
108 1.3 pk caddr_t ph_page; /* this page's address */
109 1.3 pk struct timeval ph_time; /* last referenced */
110 1.97 yamt union {
111 1.97 yamt /* !PR_NOTOUCH */
112 1.97 yamt struct {
113 1.97 yamt TAILQ_HEAD(, pool_item)
114 1.97 yamt phu_itemlist; /* chunk list for this page */
115 1.97 yamt } phu_normal;
116 1.97 yamt /* PR_NOTOUCH */
117 1.97 yamt struct {
118 1.97 yamt uint16_t
119 1.97 yamt phu_off; /* start offset in page */
120 1.97 yamt uint16_t
121 1.97 yamt phu_firstfree; /* first free item */
122 1.97 yamt } phu_notouch;
123 1.97 yamt } ph_u;
124 1.97 yamt uint16_t ph_nmissing; /* # of chunks in use */
125 1.3 pk };
126 1.97 yamt #define ph_itemlist ph_u.phu_normal.phu_itemlist
127 1.97 yamt #define ph_off ph_u.phu_notouch.phu_off
128 1.97 yamt #define ph_firstfree ph_u.phu_notouch.phu_firstfree
129 1.3 pk
130 1.1 pk struct pool_item {
131 1.3 pk #ifdef DIAGNOSTIC
132 1.82 thorpej u_int pi_magic;
133 1.33 chs #endif
134 1.82 thorpej #define PI_MAGIC 0xdeadbeefU
135 1.3 pk /* Other entries use only this list entry */
136 1.3 pk TAILQ_ENTRY(pool_item) pi_list;
137 1.3 pk };
138 1.3 pk
139 1.53 thorpej #define POOL_NEEDS_CATCHUP(pp) \
140 1.53 thorpej ((pp)->pr_nitems < (pp)->pr_minitems)
141 1.53 thorpej
142 1.43 thorpej /*
143 1.43 thorpej * Pool cache management.
144 1.43 thorpej *
145 1.43 thorpej * Pool caches provide a way for constructed objects to be cached by the
146 1.43 thorpej * pool subsystem. This can lead to performance improvements by avoiding
147 1.43 thorpej * needless object construction/destruction; it is deferred until absolutely
148 1.43 thorpej * necessary.
149 1.43 thorpej *
150 1.43 thorpej * Caches are grouped into cache groups. Each cache group references
151 1.43 thorpej * up to 16 constructed objects. When a cache allocates an object
152 1.43 thorpej * from the pool, it calls the object's constructor and places it into
153 1.43 thorpej * a cache group. When a cache group frees an object back to the pool,
154 1.43 thorpej * it first calls the object's destructor. This allows the object to
155 1.43 thorpej * persist in constructed form while freed to the cache.
156 1.43 thorpej *
157 1.43 thorpej * Multiple caches may exist for each pool. This allows a single
158 1.43 thorpej * object type to have multiple constructed forms. The pool references
159 1.43 thorpej * each cache, so that when a pool is drained by the pagedaemon, it can
160 1.43 thorpej * drain each individual cache as well. Each time a cache is drained,
161 1.43 thorpej * the most idle cache group is freed to the pool in its entirety.
162 1.43 thorpej *
163 1.43 thorpej * Pool caches are layed on top of pools. By layering them, we can avoid
164 1.43 thorpej * the complexity of cache management for pools which would not benefit
165 1.43 thorpej * from it.
166 1.43 thorpej */
167 1.43 thorpej
168 1.43 thorpej /* The cache group pool. */
169 1.43 thorpej static struct pool pcgpool;
170 1.3 pk
171 1.43 thorpej static void pool_cache_reclaim(struct pool_cache *);
172 1.3 pk
173 1.42 thorpej static int pool_catchup(struct pool *);
174 1.55 thorpej static void pool_prime_page(struct pool *, caddr_t,
175 1.55 thorpej struct pool_item_header *);
176 1.88 chs static void pool_update_curpage(struct pool *);
177 1.66 thorpej
178 1.66 thorpej void *pool_allocator_alloc(struct pool *, int);
179 1.66 thorpej void pool_allocator_free(struct pool *, void *);
180 1.3 pk
181 1.97 yamt static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
182 1.88 chs void (*)(const char *, ...));
183 1.42 thorpej static void pool_print1(struct pool *, const char *,
184 1.42 thorpej void (*)(const char *, ...));
185 1.3 pk
186 1.88 chs static int pool_chk_page(struct pool *, const char *,
187 1.88 chs struct pool_item_header *);
188 1.88 chs
189 1.3 pk /*
190 1.52 thorpej * Pool log entry. An array of these is allocated in pool_init().
191 1.3 pk */
192 1.3 pk struct pool_log {
193 1.3 pk const char *pl_file;
194 1.3 pk long pl_line;
195 1.3 pk int pl_action;
196 1.25 thorpej #define PRLOG_GET 1
197 1.25 thorpej #define PRLOG_PUT 2
198 1.3 pk void *pl_addr;
199 1.1 pk };
200 1.1 pk
201 1.86 matt #ifdef POOL_DIAGNOSTIC
202 1.3 pk /* Number of entries in pool log buffers */
203 1.17 thorpej #ifndef POOL_LOGSIZE
204 1.17 thorpej #define POOL_LOGSIZE 10
205 1.17 thorpej #endif
206 1.17 thorpej
207 1.17 thorpej int pool_logsize = POOL_LOGSIZE;
208 1.1 pk
209 1.42 thorpej static __inline void
210 1.42 thorpej pr_log(struct pool *pp, void *v, int action, const char *file, long line)
211 1.3 pk {
212 1.3 pk int n = pp->pr_curlogentry;
213 1.3 pk struct pool_log *pl;
214 1.3 pk
215 1.20 thorpej if ((pp->pr_roflags & PR_LOGGING) == 0)
216 1.3 pk return;
217 1.3 pk
218 1.3 pk /*
219 1.3 pk * Fill in the current entry. Wrap around and overwrite
220 1.3 pk * the oldest entry if necessary.
221 1.3 pk */
222 1.3 pk pl = &pp->pr_log[n];
223 1.3 pk pl->pl_file = file;
224 1.3 pk pl->pl_line = line;
225 1.3 pk pl->pl_action = action;
226 1.3 pk pl->pl_addr = v;
227 1.3 pk if (++n >= pp->pr_logsize)
228 1.3 pk n = 0;
229 1.3 pk pp->pr_curlogentry = n;
230 1.3 pk }
231 1.3 pk
232 1.3 pk static void
233 1.42 thorpej pr_printlog(struct pool *pp, struct pool_item *pi,
234 1.42 thorpej void (*pr)(const char *, ...))
235 1.3 pk {
236 1.3 pk int i = pp->pr_logsize;
237 1.3 pk int n = pp->pr_curlogentry;
238 1.3 pk
239 1.20 thorpej if ((pp->pr_roflags & PR_LOGGING) == 0)
240 1.3 pk return;
241 1.3 pk
242 1.3 pk /*
243 1.3 pk * Print all entries in this pool's log.
244 1.3 pk */
245 1.3 pk while (i-- > 0) {
246 1.3 pk struct pool_log *pl = &pp->pr_log[n];
247 1.3 pk if (pl->pl_action != 0) {
248 1.25 thorpej if (pi == NULL || pi == pl->pl_addr) {
249 1.25 thorpej (*pr)("\tlog entry %d:\n", i);
250 1.25 thorpej (*pr)("\t\taction = %s, addr = %p\n",
251 1.25 thorpej pl->pl_action == PRLOG_GET ? "get" : "put",
252 1.25 thorpej pl->pl_addr);
253 1.25 thorpej (*pr)("\t\tfile: %s at line %lu\n",
254 1.25 thorpej pl->pl_file, pl->pl_line);
255 1.25 thorpej }
256 1.3 pk }
257 1.3 pk if (++n >= pp->pr_logsize)
258 1.3 pk n = 0;
259 1.3 pk }
260 1.3 pk }
261 1.25 thorpej
262 1.42 thorpej static __inline void
263 1.42 thorpej pr_enter(struct pool *pp, const char *file, long line)
264 1.25 thorpej {
265 1.25 thorpej
266 1.34 thorpej if (__predict_false(pp->pr_entered_file != NULL)) {
267 1.25 thorpej printf("pool %s: reentrancy at file %s line %ld\n",
268 1.25 thorpej pp->pr_wchan, file, line);
269 1.25 thorpej printf(" previous entry at file %s line %ld\n",
270 1.25 thorpej pp->pr_entered_file, pp->pr_entered_line);
271 1.25 thorpej panic("pr_enter");
272 1.25 thorpej }
273 1.25 thorpej
274 1.25 thorpej pp->pr_entered_file = file;
275 1.25 thorpej pp->pr_entered_line = line;
276 1.25 thorpej }
277 1.25 thorpej
278 1.42 thorpej static __inline void
279 1.42 thorpej pr_leave(struct pool *pp)
280 1.25 thorpej {
281 1.25 thorpej
282 1.34 thorpej if (__predict_false(pp->pr_entered_file == NULL)) {
283 1.25 thorpej printf("pool %s not entered?\n", pp->pr_wchan);
284 1.25 thorpej panic("pr_leave");
285 1.25 thorpej }
286 1.25 thorpej
287 1.25 thorpej pp->pr_entered_file = NULL;
288 1.25 thorpej pp->pr_entered_line = 0;
289 1.25 thorpej }
290 1.25 thorpej
291 1.42 thorpej static __inline void
292 1.42 thorpej pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
293 1.25 thorpej {
294 1.25 thorpej
295 1.25 thorpej if (pp->pr_entered_file != NULL)
296 1.25 thorpej (*pr)("\n\tcurrently entered from file %s line %ld\n",
297 1.25 thorpej pp->pr_entered_file, pp->pr_entered_line);
298 1.25 thorpej }
299 1.3 pk #else
300 1.25 thorpej #define pr_log(pp, v, action, file, line)
301 1.25 thorpej #define pr_printlog(pp, pi, pr)
302 1.25 thorpej #define pr_enter(pp, file, line)
303 1.25 thorpej #define pr_leave(pp)
304 1.25 thorpej #define pr_enter_check(pp, pr)
305 1.59 thorpej #endif /* POOL_DIAGNOSTIC */
306 1.3 pk
307 1.88 chs static __inline int
308 1.97 yamt pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph,
309 1.97 yamt const void *v)
310 1.97 yamt {
311 1.97 yamt const char *cp = v;
312 1.97 yamt int idx;
313 1.97 yamt
314 1.97 yamt KASSERT(pp->pr_roflags & PR_NOTOUCH);
315 1.97 yamt idx = (cp - ph->ph_page - ph->ph_off) / pp->pr_size;
316 1.97 yamt KASSERT(idx < pp->pr_itemsperpage);
317 1.97 yamt return idx;
318 1.97 yamt }
319 1.97 yamt
320 1.97 yamt #define PR_FREELIST_ALIGN(p) roundup((uintptr_t)(p), sizeof(uint16_t))
321 1.97 yamt #define PR_FREELIST(ph) ((uint16_t *)PR_FREELIST_ALIGN((ph) + 1))
322 1.97 yamt #define PR_INDEX_USED ((uint16_t)-1)
323 1.97 yamt #define PR_INDEX_EOL ((uint16_t)-2)
324 1.97 yamt
325 1.97 yamt static __inline void
326 1.97 yamt pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph,
327 1.97 yamt void *obj)
328 1.97 yamt {
329 1.97 yamt int idx = pr_item_notouch_index(pp, ph, obj);
330 1.97 yamt uint16_t *freelist = PR_FREELIST(ph);
331 1.97 yamt
332 1.97 yamt KASSERT(freelist[idx] == PR_INDEX_USED);
333 1.97 yamt freelist[idx] = ph->ph_firstfree;
334 1.97 yamt ph->ph_firstfree = idx;
335 1.97 yamt }
336 1.97 yamt
337 1.97 yamt static __inline void *
338 1.97 yamt pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph)
339 1.97 yamt {
340 1.97 yamt int idx = ph->ph_firstfree;
341 1.97 yamt uint16_t *freelist = PR_FREELIST(ph);
342 1.97 yamt
343 1.97 yamt KASSERT(freelist[idx] != PR_INDEX_USED);
344 1.97 yamt ph->ph_firstfree = freelist[idx];
345 1.97 yamt freelist[idx] = PR_INDEX_USED;
346 1.97 yamt
347 1.97 yamt return ph->ph_page + ph->ph_off + idx * pp->pr_size;
348 1.97 yamt }
349 1.97 yamt
350 1.97 yamt static __inline int
351 1.88 chs phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
352 1.88 chs {
353 1.88 chs if (a->ph_page < b->ph_page)
354 1.88 chs return (-1);
355 1.88 chs else if (a->ph_page > b->ph_page)
356 1.88 chs return (1);
357 1.88 chs else
358 1.88 chs return (0);
359 1.88 chs }
360 1.88 chs
361 1.88 chs SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
362 1.88 chs SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
363 1.88 chs
364 1.3 pk /*
365 1.3 pk * Return the pool page header based on page address.
366 1.3 pk */
367 1.42 thorpej static __inline struct pool_item_header *
368 1.42 thorpej pr_find_pagehead(struct pool *pp, caddr_t page)
369 1.3 pk {
370 1.88 chs struct pool_item_header *ph, tmp;
371 1.3 pk
372 1.20 thorpej if ((pp->pr_roflags & PR_PHINPAGE) != 0)
373 1.3 pk return ((struct pool_item_header *)(page + pp->pr_phoffset));
374 1.3 pk
375 1.88 chs tmp.ph_page = page;
376 1.88 chs ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
377 1.88 chs return ph;
378 1.3 pk }
379 1.3 pk
380 1.3 pk /*
381 1.3 pk * Remove a page from the pool.
382 1.3 pk */
383 1.42 thorpej static __inline void
384 1.61 chs pr_rmpage(struct pool *pp, struct pool_item_header *ph,
385 1.61 chs struct pool_pagelist *pq)
386 1.3 pk {
387 1.61 chs int s;
388 1.3 pk
389 1.91 yamt LOCK_ASSERT(!simple_lock_held(&pp->pr_slock) || pq != NULL);
390 1.91 yamt
391 1.3 pk /*
392 1.7 thorpej * If the page was idle, decrement the idle page count.
393 1.3 pk */
394 1.6 thorpej if (ph->ph_nmissing == 0) {
395 1.6 thorpej #ifdef DIAGNOSTIC
396 1.6 thorpej if (pp->pr_nidle == 0)
397 1.6 thorpej panic("pr_rmpage: nidle inconsistent");
398 1.20 thorpej if (pp->pr_nitems < pp->pr_itemsperpage)
399 1.20 thorpej panic("pr_rmpage: nitems inconsistent");
400 1.6 thorpej #endif
401 1.6 thorpej pp->pr_nidle--;
402 1.6 thorpej }
403 1.7 thorpej
404 1.20 thorpej pp->pr_nitems -= pp->pr_itemsperpage;
405 1.20 thorpej
406 1.7 thorpej /*
407 1.61 chs * Unlink a page from the pool and release it (or queue it for release).
408 1.7 thorpej */
409 1.88 chs LIST_REMOVE(ph, ph_pagelist);
410 1.91 yamt if ((pp->pr_roflags & PR_PHINPAGE) == 0)
411 1.91 yamt SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
412 1.61 chs if (pq) {
413 1.88 chs LIST_INSERT_HEAD(pq, ph, ph_pagelist);
414 1.61 chs } else {
415 1.66 thorpej pool_allocator_free(pp, ph->ph_page);
416 1.61 chs if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
417 1.85 pk s = splvm();
418 1.97 yamt pool_put(pp->pr_phpool, ph);
419 1.61 chs splx(s);
420 1.61 chs }
421 1.61 chs }
422 1.7 thorpej pp->pr_npages--;
423 1.7 thorpej pp->pr_npagefree++;
424 1.6 thorpej
425 1.88 chs pool_update_curpage(pp);
426 1.3 pk }
427 1.3 pk
428 1.3 pk /*
429 1.94 simonb * Initialize all the pools listed in the "pools" link set.
430 1.94 simonb */
431 1.94 simonb void
432 1.94 simonb link_pool_init(void)
433 1.94 simonb {
434 1.94 simonb __link_set_decl(pools, struct link_pool_init);
435 1.94 simonb struct link_pool_init * const *pi;
436 1.94 simonb
437 1.94 simonb __link_set_foreach(pi, pools)
438 1.94 simonb pool_init((*pi)->pp, (*pi)->size, (*pi)->align,
439 1.94 simonb (*pi)->align_offset, (*pi)->flags, (*pi)->wchan,
440 1.94 simonb (*pi)->palloc);
441 1.94 simonb }
442 1.94 simonb
443 1.94 simonb /*
444 1.3 pk * Initialize the given pool resource structure.
445 1.3 pk *
446 1.3 pk * We export this routine to allow other kernel parts to declare
447 1.3 pk * static pools that must be initialized before malloc() is available.
448 1.3 pk */
449 1.3 pk void
450 1.42 thorpej pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
451 1.66 thorpej const char *wchan, struct pool_allocator *palloc)
452 1.3 pk {
453 1.88 chs int off, slack;
454 1.92 enami size_t trysize, phsize;
455 1.93 dbj int s;
456 1.3 pk
457 1.25 thorpej #ifdef POOL_DIAGNOSTIC
458 1.25 thorpej /*
459 1.25 thorpej * Always log if POOL_DIAGNOSTIC is defined.
460 1.25 thorpej */
461 1.25 thorpej if (pool_logsize != 0)
462 1.25 thorpej flags |= PR_LOGGING;
463 1.25 thorpej #endif
464 1.25 thorpej
465 1.66 thorpej #ifdef POOL_SUBPAGE
466 1.66 thorpej /*
467 1.66 thorpej * XXX We don't provide a real `nointr' back-end
468 1.66 thorpej * yet; all sub-pages come from a kmem back-end.
469 1.66 thorpej * maybe some day...
470 1.66 thorpej */
471 1.66 thorpej if (palloc == NULL) {
472 1.66 thorpej extern struct pool_allocator pool_allocator_kmem_subpage;
473 1.66 thorpej palloc = &pool_allocator_kmem_subpage;
474 1.66 thorpej }
475 1.3 pk /*
476 1.66 thorpej * We'll assume any user-specified back-end allocator
477 1.66 thorpej * will deal with sub-pages, or simply don't care.
478 1.3 pk */
479 1.66 thorpej #else
480 1.66 thorpej if (palloc == NULL)
481 1.66 thorpej palloc = &pool_allocator_kmem;
482 1.66 thorpej #endif /* POOL_SUBPAGE */
483 1.66 thorpej if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
484 1.66 thorpej if (palloc->pa_pagesz == 0) {
485 1.62 bjh21 #ifdef POOL_SUBPAGE
486 1.66 thorpej if (palloc == &pool_allocator_kmem)
487 1.66 thorpej palloc->pa_pagesz = PAGE_SIZE;
488 1.66 thorpej else
489 1.66 thorpej palloc->pa_pagesz = POOL_SUBPAGE;
490 1.62 bjh21 #else
491 1.66 thorpej palloc->pa_pagesz = PAGE_SIZE;
492 1.66 thorpej #endif /* POOL_SUBPAGE */
493 1.66 thorpej }
494 1.66 thorpej
495 1.66 thorpej TAILQ_INIT(&palloc->pa_list);
496 1.66 thorpej
497 1.66 thorpej simple_lock_init(&palloc->pa_slock);
498 1.66 thorpej palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
499 1.66 thorpej palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
500 1.66 thorpej palloc->pa_flags |= PA_INITIALIZED;
501 1.4 thorpej }
502 1.3 pk
503 1.3 pk if (align == 0)
504 1.3 pk align = ALIGN(1);
505 1.14 thorpej
506 1.14 thorpej if (size < sizeof(struct pool_item))
507 1.14 thorpej size = sizeof(struct pool_item);
508 1.3 pk
509 1.78 thorpej size = roundup(size, align);
510 1.66 thorpej #ifdef DIAGNOSTIC
511 1.66 thorpej if (size > palloc->pa_pagesz)
512 1.35 pk panic("pool_init: pool item size (%lu) too large",
513 1.35 pk (u_long)size);
514 1.66 thorpej #endif
515 1.35 pk
516 1.3 pk /*
517 1.3 pk * Initialize the pool structure.
518 1.3 pk */
519 1.88 chs LIST_INIT(&pp->pr_emptypages);
520 1.88 chs LIST_INIT(&pp->pr_fullpages);
521 1.88 chs LIST_INIT(&pp->pr_partpages);
522 1.43 thorpej TAILQ_INIT(&pp->pr_cachelist);
523 1.3 pk pp->pr_curpage = NULL;
524 1.3 pk pp->pr_npages = 0;
525 1.3 pk pp->pr_minitems = 0;
526 1.3 pk pp->pr_minpages = 0;
527 1.3 pk pp->pr_maxpages = UINT_MAX;
528 1.20 thorpej pp->pr_roflags = flags;
529 1.20 thorpej pp->pr_flags = 0;
530 1.35 pk pp->pr_size = size;
531 1.3 pk pp->pr_align = align;
532 1.3 pk pp->pr_wchan = wchan;
533 1.66 thorpej pp->pr_alloc = palloc;
534 1.20 thorpej pp->pr_nitems = 0;
535 1.20 thorpej pp->pr_nout = 0;
536 1.20 thorpej pp->pr_hardlimit = UINT_MAX;
537 1.20 thorpej pp->pr_hardlimit_warning = NULL;
538 1.31 thorpej pp->pr_hardlimit_ratecap.tv_sec = 0;
539 1.31 thorpej pp->pr_hardlimit_ratecap.tv_usec = 0;
540 1.31 thorpej pp->pr_hardlimit_warning_last.tv_sec = 0;
541 1.31 thorpej pp->pr_hardlimit_warning_last.tv_usec = 0;
542 1.68 thorpej pp->pr_drain_hook = NULL;
543 1.68 thorpej pp->pr_drain_hook_arg = NULL;
544 1.3 pk
545 1.3 pk /*
546 1.3 pk * Decide whether to put the page header off page to avoid
547 1.92 enami * wasting too large a part of the page or too big item.
548 1.92 enami * Off-page page headers go on a hash table, so we can match
549 1.92 enami * a returned item with its header based on the page address.
550 1.92 enami * We use 1/16 of the page size and about 8 times of the item
551 1.92 enami * size as the threshold (XXX: tune)
552 1.92 enami *
553 1.92 enami * However, we'll put the header into the page if we can put
554 1.92 enami * it without wasting any items.
555 1.92 enami *
556 1.92 enami * Silently enforce `0 <= ioff < align'.
557 1.3 pk */
558 1.92 enami pp->pr_itemoffset = ioff %= align;
559 1.92 enami /* See the comment below about reserved bytes. */
560 1.92 enami trysize = palloc->pa_pagesz - ((align - ioff) % align);
561 1.92 enami phsize = ALIGN(sizeof(struct pool_item_header));
562 1.97 yamt if ((pp->pr_roflags & PR_NOTOUCH) == 0 &&
563 1.97 yamt (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
564 1.97 yamt trysize / pp->pr_size == (trysize - phsize) / pp->pr_size)) {
565 1.3 pk /* Use the end of the page for the page header */
566 1.20 thorpej pp->pr_roflags |= PR_PHINPAGE;
567 1.92 enami pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
568 1.2 pk } else {
569 1.3 pk /* The page header will be taken from our page header pool */
570 1.3 pk pp->pr_phoffset = 0;
571 1.66 thorpej off = palloc->pa_pagesz;
572 1.88 chs SPLAY_INIT(&pp->pr_phtree);
573 1.2 pk }
574 1.1 pk
575 1.3 pk /*
576 1.3 pk * Alignment is to take place at `ioff' within the item. This means
577 1.3 pk * we must reserve up to `align - 1' bytes on the page to allow
578 1.3 pk * appropriate positioning of each item.
579 1.3 pk */
580 1.3 pk pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
581 1.43 thorpej KASSERT(pp->pr_itemsperpage != 0);
582 1.97 yamt if ((pp->pr_roflags & PR_NOTOUCH)) {
583 1.97 yamt int idx;
584 1.97 yamt
585 1.97 yamt for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
586 1.97 yamt idx++) {
587 1.97 yamt /* nothing */
588 1.97 yamt }
589 1.97 yamt if (idx >= PHPOOL_MAX) {
590 1.97 yamt /*
591 1.97 yamt * if you see this panic, consider to tweak
592 1.97 yamt * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
593 1.97 yamt */
594 1.97 yamt panic("%s: too large itemsperpage(%d) for PR_NOTOUCH",
595 1.97 yamt pp->pr_wchan, pp->pr_itemsperpage);
596 1.97 yamt }
597 1.97 yamt pp->pr_phpool = &phpool[idx];
598 1.97 yamt } else if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
599 1.97 yamt pp->pr_phpool = &phpool[0];
600 1.97 yamt }
601 1.97 yamt #if defined(DIAGNOSTIC)
602 1.97 yamt else {
603 1.97 yamt pp->pr_phpool = NULL;
604 1.97 yamt }
605 1.97 yamt #endif
606 1.3 pk
607 1.3 pk /*
608 1.3 pk * Use the slack between the chunks and the page header
609 1.3 pk * for "cache coloring".
610 1.3 pk */
611 1.3 pk slack = off - pp->pr_itemsperpage * pp->pr_size;
612 1.3 pk pp->pr_maxcolor = (slack / align) * align;
613 1.3 pk pp->pr_curcolor = 0;
614 1.3 pk
615 1.3 pk pp->pr_nget = 0;
616 1.3 pk pp->pr_nfail = 0;
617 1.3 pk pp->pr_nput = 0;
618 1.3 pk pp->pr_npagealloc = 0;
619 1.3 pk pp->pr_npagefree = 0;
620 1.1 pk pp->pr_hiwat = 0;
621 1.8 thorpej pp->pr_nidle = 0;
622 1.3 pk
623 1.59 thorpej #ifdef POOL_DIAGNOSTIC
624 1.25 thorpej if (flags & PR_LOGGING) {
625 1.25 thorpej if (kmem_map == NULL ||
626 1.25 thorpej (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
627 1.25 thorpej M_TEMP, M_NOWAIT)) == NULL)
628 1.20 thorpej pp->pr_roflags &= ~PR_LOGGING;
629 1.3 pk pp->pr_curlogentry = 0;
630 1.3 pk pp->pr_logsize = pool_logsize;
631 1.3 pk }
632 1.59 thorpej #endif
633 1.25 thorpej
634 1.25 thorpej pp->pr_entered_file = NULL;
635 1.25 thorpej pp->pr_entered_line = 0;
636 1.3 pk
637 1.21 thorpej simple_lock_init(&pp->pr_slock);
638 1.1 pk
639 1.3 pk /*
640 1.43 thorpej * Initialize private page header pool and cache magazine pool if we
641 1.43 thorpej * haven't done so yet.
642 1.23 thorpej * XXX LOCKING.
643 1.3 pk */
644 1.97 yamt if (phpool[0].pr_size == 0) {
645 1.97 yamt int idx;
646 1.97 yamt for (idx = 0; idx < PHPOOL_MAX; idx++) {
647 1.97 yamt static char phpool_names[PHPOOL_MAX][6+1+6+1];
648 1.97 yamt int nelem;
649 1.97 yamt size_t sz;
650 1.97 yamt
651 1.97 yamt nelem = PHPOOL_FREELIST_NELEM(idx);
652 1.97 yamt snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
653 1.97 yamt "phpool-%d", nelem);
654 1.97 yamt sz = sizeof(struct pool_item_header);
655 1.97 yamt if (nelem) {
656 1.97 yamt sz = PR_FREELIST_ALIGN(sz)
657 1.97 yamt + nelem * sizeof(uint16_t);
658 1.97 yamt }
659 1.97 yamt pool_init(&phpool[idx], sz, 0, 0, 0,
660 1.98 yamt phpool_names[idx], &pool_allocator_meta);
661 1.97 yamt }
662 1.62 bjh21 #ifdef POOL_SUBPAGE
663 1.62 bjh21 pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
664 1.98 yamt PR_RECURSIVE, "psppool", &pool_allocator_meta);
665 1.62 bjh21 #endif
666 1.43 thorpej pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
667 1.98 yamt 0, "pcgpool", &pool_allocator_meta);
668 1.1 pk }
669 1.1 pk
670 1.23 thorpej /* Insert into the list of all pools. */
671 1.23 thorpej simple_lock(&pool_head_slock);
672 1.23 thorpej TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
673 1.23 thorpej simple_unlock(&pool_head_slock);
674 1.66 thorpej
675 1.66 thorpej /* Insert this into the list of pools using this allocator. */
676 1.93 dbj s = splvm();
677 1.66 thorpej simple_lock(&palloc->pa_slock);
678 1.66 thorpej TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
679 1.66 thorpej simple_unlock(&palloc->pa_slock);
680 1.93 dbj splx(s);
681 1.1 pk }
682 1.1 pk
683 1.1 pk /*
684 1.1 pk * De-commision a pool resource.
685 1.1 pk */
686 1.1 pk void
687 1.42 thorpej pool_destroy(struct pool *pp)
688 1.1 pk {
689 1.3 pk struct pool_item_header *ph;
690 1.43 thorpej struct pool_cache *pc;
691 1.93 dbj int s;
692 1.43 thorpej
693 1.66 thorpej /* Locking order: pool_allocator -> pool */
694 1.93 dbj s = splvm();
695 1.66 thorpej simple_lock(&pp->pr_alloc->pa_slock);
696 1.66 thorpej TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
697 1.66 thorpej simple_unlock(&pp->pr_alloc->pa_slock);
698 1.93 dbj splx(s);
699 1.66 thorpej
700 1.43 thorpej /* Destroy all caches for this pool. */
701 1.43 thorpej while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL)
702 1.43 thorpej pool_cache_destroy(pc);
703 1.3 pk
704 1.3 pk #ifdef DIAGNOSTIC
705 1.20 thorpej if (pp->pr_nout != 0) {
706 1.25 thorpej pr_printlog(pp, NULL, printf);
707 1.80 provos panic("pool_destroy: pool busy: still out: %u",
708 1.20 thorpej pp->pr_nout);
709 1.3 pk }
710 1.3 pk #endif
711 1.1 pk
712 1.3 pk /* Remove all pages */
713 1.88 chs while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
714 1.70 thorpej pr_rmpage(pp, ph, NULL);
715 1.88 chs KASSERT(LIST_EMPTY(&pp->pr_fullpages));
716 1.88 chs KASSERT(LIST_EMPTY(&pp->pr_partpages));
717 1.3 pk
718 1.3 pk /* Remove from global pool list */
719 1.23 thorpej simple_lock(&pool_head_slock);
720 1.3 pk TAILQ_REMOVE(&pool_head, pp, pr_poollist);
721 1.61 chs if (drainpp == pp) {
722 1.61 chs drainpp = NULL;
723 1.61 chs }
724 1.23 thorpej simple_unlock(&pool_head_slock);
725 1.3 pk
726 1.59 thorpej #ifdef POOL_DIAGNOSTIC
727 1.20 thorpej if ((pp->pr_roflags & PR_LOGGING) != 0)
728 1.3 pk free(pp->pr_log, M_TEMP);
729 1.59 thorpej #endif
730 1.1 pk }
731 1.1 pk
732 1.68 thorpej void
733 1.68 thorpej pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
734 1.68 thorpej {
735 1.68 thorpej
736 1.68 thorpej /* XXX no locking -- must be used just after pool_init() */
737 1.68 thorpej #ifdef DIAGNOSTIC
738 1.68 thorpej if (pp->pr_drain_hook != NULL)
739 1.68 thorpej panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
740 1.68 thorpej #endif
741 1.68 thorpej pp->pr_drain_hook = fn;
742 1.68 thorpej pp->pr_drain_hook_arg = arg;
743 1.68 thorpej }
744 1.68 thorpej
745 1.88 chs static struct pool_item_header *
746 1.55 thorpej pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags)
747 1.55 thorpej {
748 1.55 thorpej struct pool_item_header *ph;
749 1.55 thorpej int s;
750 1.55 thorpej
751 1.55 thorpej LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0);
752 1.55 thorpej
753 1.55 thorpej if ((pp->pr_roflags & PR_PHINPAGE) != 0)
754 1.55 thorpej ph = (struct pool_item_header *) (storage + pp->pr_phoffset);
755 1.55 thorpej else {
756 1.85 pk s = splvm();
757 1.97 yamt ph = pool_get(pp->pr_phpool, flags);
758 1.55 thorpej splx(s);
759 1.55 thorpej }
760 1.55 thorpej
761 1.55 thorpej return (ph);
762 1.55 thorpej }
763 1.1 pk
764 1.1 pk /*
765 1.3 pk * Grab an item from the pool; must be called at appropriate spl level
766 1.1 pk */
767 1.3 pk void *
768 1.59 thorpej #ifdef POOL_DIAGNOSTIC
769 1.42 thorpej _pool_get(struct pool *pp, int flags, const char *file, long line)
770 1.56 sommerfe #else
771 1.56 sommerfe pool_get(struct pool *pp, int flags)
772 1.56 sommerfe #endif
773 1.1 pk {
774 1.1 pk struct pool_item *pi;
775 1.3 pk struct pool_item_header *ph;
776 1.55 thorpej void *v;
777 1.1 pk
778 1.2 pk #ifdef DIAGNOSTIC
779 1.95 atatat if (__predict_false(pp->pr_itemsperpage == 0))
780 1.95 atatat panic("pool_get: pool %p: pr_itemsperpage is zero, "
781 1.95 atatat "pool not initialized?", pp);
782 1.84 thorpej if (__predict_false(curlwp == NULL && doing_shutdown == 0 &&
783 1.37 sommerfe (flags & PR_WAITOK) != 0))
784 1.77 matt panic("pool_get: %s: must have NOWAIT", pp->pr_wchan);
785 1.58 thorpej
786 1.58 thorpej #ifdef LOCKDEBUG
787 1.58 thorpej if (flags & PR_WAITOK)
788 1.58 thorpej simple_lock_only_held(NULL, "pool_get(PR_WAITOK)");
789 1.56 sommerfe #endif
790 1.58 thorpej #endif /* DIAGNOSTIC */
791 1.1 pk
792 1.21 thorpej simple_lock(&pp->pr_slock);
793 1.25 thorpej pr_enter(pp, file, line);
794 1.20 thorpej
795 1.20 thorpej startover:
796 1.20 thorpej /*
797 1.20 thorpej * Check to see if we've reached the hard limit. If we have,
798 1.20 thorpej * and we can wait, then wait until an item has been returned to
799 1.20 thorpej * the pool.
800 1.20 thorpej */
801 1.20 thorpej #ifdef DIAGNOSTIC
802 1.34 thorpej if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
803 1.25 thorpej pr_leave(pp);
804 1.21 thorpej simple_unlock(&pp->pr_slock);
805 1.20 thorpej panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
806 1.20 thorpej }
807 1.20 thorpej #endif
808 1.34 thorpej if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
809 1.68 thorpej if (pp->pr_drain_hook != NULL) {
810 1.68 thorpej /*
811 1.68 thorpej * Since the drain hook is going to free things
812 1.68 thorpej * back to the pool, unlock, call the hook, re-lock,
813 1.68 thorpej * and check the hardlimit condition again.
814 1.68 thorpej */
815 1.68 thorpej pr_leave(pp);
816 1.68 thorpej simple_unlock(&pp->pr_slock);
817 1.68 thorpej (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
818 1.68 thorpej simple_lock(&pp->pr_slock);
819 1.68 thorpej pr_enter(pp, file, line);
820 1.68 thorpej if (pp->pr_nout < pp->pr_hardlimit)
821 1.68 thorpej goto startover;
822 1.68 thorpej }
823 1.68 thorpej
824 1.29 sommerfe if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
825 1.20 thorpej /*
826 1.20 thorpej * XXX: A warning isn't logged in this case. Should
827 1.20 thorpej * it be?
828 1.20 thorpej */
829 1.20 thorpej pp->pr_flags |= PR_WANTED;
830 1.25 thorpej pr_leave(pp);
831 1.40 sommerfe ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
832 1.25 thorpej pr_enter(pp, file, line);
833 1.20 thorpej goto startover;
834 1.20 thorpej }
835 1.31 thorpej
836 1.31 thorpej /*
837 1.31 thorpej * Log a message that the hard limit has been hit.
838 1.31 thorpej */
839 1.31 thorpej if (pp->pr_hardlimit_warning != NULL &&
840 1.31 thorpej ratecheck(&pp->pr_hardlimit_warning_last,
841 1.31 thorpej &pp->pr_hardlimit_ratecap))
842 1.31 thorpej log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
843 1.21 thorpej
844 1.21 thorpej pp->pr_nfail++;
845 1.21 thorpej
846 1.25 thorpej pr_leave(pp);
847 1.21 thorpej simple_unlock(&pp->pr_slock);
848 1.20 thorpej return (NULL);
849 1.20 thorpej }
850 1.20 thorpej
851 1.3 pk /*
852 1.3 pk * The convention we use is that if `curpage' is not NULL, then
853 1.3 pk * it points at a non-empty bucket. In particular, `curpage'
854 1.3 pk * never points at a page header which has PR_PHINPAGE set and
855 1.3 pk * has no items in its bucket.
856 1.3 pk */
857 1.20 thorpej if ((ph = pp->pr_curpage) == NULL) {
858 1.20 thorpej #ifdef DIAGNOSTIC
859 1.20 thorpej if (pp->pr_nitems != 0) {
860 1.21 thorpej simple_unlock(&pp->pr_slock);
861 1.20 thorpej printf("pool_get: %s: curpage NULL, nitems %u\n",
862 1.20 thorpej pp->pr_wchan, pp->pr_nitems);
863 1.80 provos panic("pool_get: nitems inconsistent");
864 1.20 thorpej }
865 1.20 thorpej #endif
866 1.20 thorpej
867 1.21 thorpej /*
868 1.21 thorpej * Call the back-end page allocator for more memory.
869 1.21 thorpej * Release the pool lock, as the back-end page allocator
870 1.21 thorpej * may block.
871 1.21 thorpej */
872 1.25 thorpej pr_leave(pp);
873 1.21 thorpej simple_unlock(&pp->pr_slock);
874 1.66 thorpej v = pool_allocator_alloc(pp, flags);
875 1.55 thorpej if (__predict_true(v != NULL))
876 1.55 thorpej ph = pool_alloc_item_header(pp, v, flags);
877 1.15 pk
878 1.55 thorpej if (__predict_false(v == NULL || ph == NULL)) {
879 1.55 thorpej if (v != NULL)
880 1.66 thorpej pool_allocator_free(pp, v);
881 1.55 thorpej
882 1.91 yamt simple_lock(&pp->pr_slock);
883 1.91 yamt pr_enter(pp, file, line);
884 1.91 yamt
885 1.21 thorpej /*
886 1.55 thorpej * We were unable to allocate a page or item
887 1.55 thorpej * header, but we released the lock during
888 1.55 thorpej * allocation, so perhaps items were freed
889 1.55 thorpej * back to the pool. Check for this case.
890 1.21 thorpej */
891 1.21 thorpej if (pp->pr_curpage != NULL)
892 1.21 thorpej goto startover;
893 1.15 pk
894 1.3 pk if ((flags & PR_WAITOK) == 0) {
895 1.3 pk pp->pr_nfail++;
896 1.25 thorpej pr_leave(pp);
897 1.21 thorpej simple_unlock(&pp->pr_slock);
898 1.1 pk return (NULL);
899 1.3 pk }
900 1.3 pk
901 1.15 pk /*
902 1.15 pk * Wait for items to be returned to this pool.
903 1.21 thorpej *
904 1.20 thorpej * XXX: maybe we should wake up once a second and
905 1.20 thorpej * try again?
906 1.15 pk */
907 1.1 pk pp->pr_flags |= PR_WANTED;
908 1.66 thorpej /* PA_WANTED is already set on the allocator. */
909 1.25 thorpej pr_leave(pp);
910 1.40 sommerfe ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
911 1.25 thorpej pr_enter(pp, file, line);
912 1.20 thorpej goto startover;
913 1.1 pk }
914 1.3 pk
915 1.15 pk /* We have more memory; add it to the pool */
916 1.91 yamt simple_lock(&pp->pr_slock);
917 1.91 yamt pr_enter(pp, file, line);
918 1.55 thorpej pool_prime_page(pp, v, ph);
919 1.15 pk pp->pr_npagealloc++;
920 1.15 pk
921 1.20 thorpej /* Start the allocation process over. */
922 1.20 thorpej goto startover;
923 1.3 pk }
924 1.97 yamt if (pp->pr_roflags & PR_NOTOUCH) {
925 1.97 yamt #ifdef DIAGNOSTIC
926 1.97 yamt if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) {
927 1.97 yamt pr_leave(pp);
928 1.97 yamt simple_unlock(&pp->pr_slock);
929 1.97 yamt panic("pool_get: %s: page empty", pp->pr_wchan);
930 1.97 yamt }
931 1.97 yamt #endif
932 1.97 yamt v = pr_item_notouch_get(pp, ph);
933 1.97 yamt #ifdef POOL_DIAGNOSTIC
934 1.97 yamt pr_log(pp, v, PRLOG_GET, file, line);
935 1.97 yamt #endif
936 1.97 yamt } else {
937 1.97 yamt v = pi = TAILQ_FIRST(&ph->ph_itemlist);
938 1.97 yamt if (__predict_false(v == NULL)) {
939 1.97 yamt pr_leave(pp);
940 1.97 yamt simple_unlock(&pp->pr_slock);
941 1.97 yamt panic("pool_get: %s: page empty", pp->pr_wchan);
942 1.97 yamt }
943 1.20 thorpej #ifdef DIAGNOSTIC
944 1.97 yamt if (__predict_false(pp->pr_nitems == 0)) {
945 1.97 yamt pr_leave(pp);
946 1.97 yamt simple_unlock(&pp->pr_slock);
947 1.97 yamt printf("pool_get: %s: items on itemlist, nitems %u\n",
948 1.97 yamt pp->pr_wchan, pp->pr_nitems);
949 1.97 yamt panic("pool_get: nitems inconsistent");
950 1.97 yamt }
951 1.65 enami #endif
952 1.56 sommerfe
953 1.65 enami #ifdef POOL_DIAGNOSTIC
954 1.97 yamt pr_log(pp, v, PRLOG_GET, file, line);
955 1.65 enami #endif
956 1.3 pk
957 1.65 enami #ifdef DIAGNOSTIC
958 1.97 yamt if (__predict_false(pi->pi_magic != PI_MAGIC)) {
959 1.97 yamt pr_printlog(pp, pi, printf);
960 1.97 yamt panic("pool_get(%s): free list modified: "
961 1.97 yamt "magic=%x; page %p; item addr %p\n",
962 1.97 yamt pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
963 1.97 yamt }
964 1.3 pk #endif
965 1.3 pk
966 1.97 yamt /*
967 1.97 yamt * Remove from item list.
968 1.97 yamt */
969 1.97 yamt TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
970 1.97 yamt }
971 1.20 thorpej pp->pr_nitems--;
972 1.20 thorpej pp->pr_nout++;
973 1.6 thorpej if (ph->ph_nmissing == 0) {
974 1.6 thorpej #ifdef DIAGNOSTIC
975 1.34 thorpej if (__predict_false(pp->pr_nidle == 0))
976 1.6 thorpej panic("pool_get: nidle inconsistent");
977 1.6 thorpej #endif
978 1.6 thorpej pp->pr_nidle--;
979 1.88 chs
980 1.88 chs /*
981 1.88 chs * This page was previously empty. Move it to the list of
982 1.88 chs * partially-full pages. This page is already curpage.
983 1.88 chs */
984 1.88 chs LIST_REMOVE(ph, ph_pagelist);
985 1.88 chs LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
986 1.6 thorpej }
987 1.3 pk ph->ph_nmissing++;
988 1.97 yamt if (ph->ph_nmissing == pp->pr_itemsperpage) {
989 1.21 thorpej #ifdef DIAGNOSTIC
990 1.97 yamt if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 &&
991 1.97 yamt !TAILQ_EMPTY(&ph->ph_itemlist))) {
992 1.25 thorpej pr_leave(pp);
993 1.21 thorpej simple_unlock(&pp->pr_slock);
994 1.21 thorpej panic("pool_get: %s: nmissing inconsistent",
995 1.21 thorpej pp->pr_wchan);
996 1.21 thorpej }
997 1.21 thorpej #endif
998 1.3 pk /*
999 1.88 chs * This page is now full. Move it to the full list
1000 1.88 chs * and select a new current page.
1001 1.3 pk */
1002 1.88 chs LIST_REMOVE(ph, ph_pagelist);
1003 1.88 chs LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
1004 1.88 chs pool_update_curpage(pp);
1005 1.1 pk }
1006 1.3 pk
1007 1.3 pk pp->pr_nget++;
1008 1.20 thorpej
1009 1.20 thorpej /*
1010 1.20 thorpej * If we have a low water mark and we are now below that low
1011 1.20 thorpej * water mark, add more items to the pool.
1012 1.20 thorpej */
1013 1.53 thorpej if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1014 1.20 thorpej /*
1015 1.20 thorpej * XXX: Should we log a warning? Should we set up a timeout
1016 1.20 thorpej * to try again in a second or so? The latter could break
1017 1.20 thorpej * a caller's assumptions about interrupt protection, etc.
1018 1.20 thorpej */
1019 1.20 thorpej }
1020 1.20 thorpej
1021 1.25 thorpej pr_leave(pp);
1022 1.21 thorpej simple_unlock(&pp->pr_slock);
1023 1.1 pk return (v);
1024 1.1 pk }
1025 1.1 pk
1026 1.1 pk /*
1027 1.43 thorpej * Internal version of pool_put(). Pool is already locked/entered.
1028 1.1 pk */
1029 1.43 thorpej static void
1030 1.56 sommerfe pool_do_put(struct pool *pp, void *v)
1031 1.1 pk {
1032 1.1 pk struct pool_item *pi = v;
1033 1.3 pk struct pool_item_header *ph;
1034 1.3 pk caddr_t page;
1035 1.21 thorpej int s;
1036 1.3 pk
1037 1.61 chs LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
1038 1.61 chs
1039 1.66 thorpej page = (caddr_t)((u_long)v & pp->pr_alloc->pa_pagemask);
1040 1.1 pk
1041 1.30 thorpej #ifdef DIAGNOSTIC
1042 1.34 thorpej if (__predict_false(pp->pr_nout == 0)) {
1043 1.30 thorpej printf("pool %s: putting with none out\n",
1044 1.30 thorpej pp->pr_wchan);
1045 1.30 thorpej panic("pool_put");
1046 1.30 thorpej }
1047 1.30 thorpej #endif
1048 1.3 pk
1049 1.34 thorpej if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {
1050 1.25 thorpej pr_printlog(pp, NULL, printf);
1051 1.3 pk panic("pool_put: %s: page header missing", pp->pr_wchan);
1052 1.3 pk }
1053 1.28 thorpej
1054 1.28 thorpej #ifdef LOCKDEBUG
1055 1.28 thorpej /*
1056 1.28 thorpej * Check if we're freeing a locked simple lock.
1057 1.28 thorpej */
1058 1.28 thorpej simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
1059 1.28 thorpej #endif
1060 1.3 pk
1061 1.3 pk /*
1062 1.3 pk * Return to item list.
1063 1.3 pk */
1064 1.97 yamt if (pp->pr_roflags & PR_NOTOUCH) {
1065 1.97 yamt pr_item_notouch_put(pp, ph, v);
1066 1.97 yamt } else {
1067 1.2 pk #ifdef DIAGNOSTIC
1068 1.97 yamt pi->pi_magic = PI_MAGIC;
1069 1.3 pk #endif
1070 1.32 chs #ifdef DEBUG
1071 1.97 yamt {
1072 1.97 yamt int i, *ip = v;
1073 1.32 chs
1074 1.97 yamt for (i = 0; i < pp->pr_size / sizeof(int); i++) {
1075 1.97 yamt *ip++ = PI_MAGIC;
1076 1.97 yamt }
1077 1.32 chs }
1078 1.32 chs #endif
1079 1.32 chs
1080 1.97 yamt TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1081 1.97 yamt }
1082 1.79 thorpej KDASSERT(ph->ph_nmissing != 0);
1083 1.3 pk ph->ph_nmissing--;
1084 1.3 pk pp->pr_nput++;
1085 1.20 thorpej pp->pr_nitems++;
1086 1.20 thorpej pp->pr_nout--;
1087 1.3 pk
1088 1.3 pk /* Cancel "pool empty" condition if it exists */
1089 1.3 pk if (pp->pr_curpage == NULL)
1090 1.3 pk pp->pr_curpage = ph;
1091 1.3 pk
1092 1.3 pk if (pp->pr_flags & PR_WANTED) {
1093 1.3 pk pp->pr_flags &= ~PR_WANTED;
1094 1.15 pk if (ph->ph_nmissing == 0)
1095 1.15 pk pp->pr_nidle++;
1096 1.3 pk wakeup((caddr_t)pp);
1097 1.3 pk return;
1098 1.3 pk }
1099 1.3 pk
1100 1.3 pk /*
1101 1.88 chs * If this page is now empty, do one of two things:
1102 1.21 thorpej *
1103 1.88 chs * (1) If we have more pages than the page high water mark,
1104 1.96 thorpej * free the page back to the system. ONLY CONSIDER
1105 1.90 thorpej * FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
1106 1.90 thorpej * CLAIM.
1107 1.21 thorpej *
1108 1.88 chs * (2) Otherwise, move the page to the empty page list.
1109 1.88 chs *
1110 1.88 chs * Either way, select a new current page (so we use a partially-full
1111 1.88 chs * page if one is available).
1112 1.3 pk */
1113 1.3 pk if (ph->ph_nmissing == 0) {
1114 1.6 thorpej pp->pr_nidle++;
1115 1.90 thorpej if (pp->pr_npages > pp->pr_minpages &&
1116 1.90 thorpej (pp->pr_npages > pp->pr_maxpages ||
1117 1.90 thorpej (pp->pr_alloc->pa_flags & PA_WANT) != 0)) {
1118 1.91 yamt simple_unlock(&pp->pr_slock);
1119 1.61 chs pr_rmpage(pp, ph, NULL);
1120 1.91 yamt simple_lock(&pp->pr_slock);
1121 1.3 pk } else {
1122 1.88 chs LIST_REMOVE(ph, ph_pagelist);
1123 1.88 chs LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1124 1.3 pk
1125 1.21 thorpej /*
1126 1.21 thorpej * Update the timestamp on the page. A page must
1127 1.21 thorpej * be idle for some period of time before it can
1128 1.21 thorpej * be reclaimed by the pagedaemon. This minimizes
1129 1.21 thorpej * ping-pong'ing for memory.
1130 1.21 thorpej */
1131 1.21 thorpej s = splclock();
1132 1.21 thorpej ph->ph_time = mono_time;
1133 1.21 thorpej splx(s);
1134 1.1 pk }
1135 1.88 chs pool_update_curpage(pp);
1136 1.1 pk }
1137 1.88 chs
1138 1.21 thorpej /*
1139 1.88 chs * If the page was previously completely full, move it to the
1140 1.88 chs * partially-full list and make it the current page. The next
1141 1.88 chs * allocation will get the item from this page, instead of
1142 1.88 chs * further fragmenting the pool.
1143 1.21 thorpej */
1144 1.21 thorpej else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1145 1.88 chs LIST_REMOVE(ph, ph_pagelist);
1146 1.88 chs LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1147 1.21 thorpej pp->pr_curpage = ph;
1148 1.21 thorpej }
1149 1.43 thorpej }
1150 1.43 thorpej
1151 1.43 thorpej /*
1152 1.43 thorpej * Return resource to the pool; must be called at appropriate spl level
1153 1.43 thorpej */
1154 1.59 thorpej #ifdef POOL_DIAGNOSTIC
1155 1.43 thorpej void
1156 1.43 thorpej _pool_put(struct pool *pp, void *v, const char *file, long line)
1157 1.43 thorpej {
1158 1.43 thorpej
1159 1.43 thorpej simple_lock(&pp->pr_slock);
1160 1.43 thorpej pr_enter(pp, file, line);
1161 1.43 thorpej
1162 1.56 sommerfe pr_log(pp, v, PRLOG_PUT, file, line);
1163 1.56 sommerfe
1164 1.56 sommerfe pool_do_put(pp, v);
1165 1.21 thorpej
1166 1.25 thorpej pr_leave(pp);
1167 1.21 thorpej simple_unlock(&pp->pr_slock);
1168 1.1 pk }
1169 1.57 sommerfe #undef pool_put
1170 1.59 thorpej #endif /* POOL_DIAGNOSTIC */
1171 1.1 pk
1172 1.56 sommerfe void
1173 1.56 sommerfe pool_put(struct pool *pp, void *v)
1174 1.56 sommerfe {
1175 1.56 sommerfe
1176 1.56 sommerfe simple_lock(&pp->pr_slock);
1177 1.56 sommerfe
1178 1.56 sommerfe pool_do_put(pp, v);
1179 1.56 sommerfe
1180 1.56 sommerfe simple_unlock(&pp->pr_slock);
1181 1.56 sommerfe }
1182 1.57 sommerfe
1183 1.59 thorpej #ifdef POOL_DIAGNOSTIC
1184 1.57 sommerfe #define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__)
1185 1.56 sommerfe #endif
1186 1.74 thorpej
1187 1.74 thorpej /*
1188 1.74 thorpej * Add N items to the pool.
1189 1.74 thorpej */
1190 1.74 thorpej int
1191 1.74 thorpej pool_prime(struct pool *pp, int n)
1192 1.74 thorpej {
1193 1.83 scw struct pool_item_header *ph = NULL;
1194 1.74 thorpej caddr_t cp;
1195 1.75 simonb int newpages;
1196 1.74 thorpej
1197 1.74 thorpej simple_lock(&pp->pr_slock);
1198 1.74 thorpej
1199 1.74 thorpej newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1200 1.74 thorpej
1201 1.74 thorpej while (newpages-- > 0) {
1202 1.74 thorpej simple_unlock(&pp->pr_slock);
1203 1.74 thorpej cp = pool_allocator_alloc(pp, PR_NOWAIT);
1204 1.74 thorpej if (__predict_true(cp != NULL))
1205 1.74 thorpej ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1206 1.74 thorpej
1207 1.74 thorpej if (__predict_false(cp == NULL || ph == NULL)) {
1208 1.74 thorpej if (cp != NULL)
1209 1.74 thorpej pool_allocator_free(pp, cp);
1210 1.91 yamt simple_lock(&pp->pr_slock);
1211 1.74 thorpej break;
1212 1.74 thorpej }
1213 1.74 thorpej
1214 1.91 yamt simple_lock(&pp->pr_slock);
1215 1.74 thorpej pool_prime_page(pp, cp, ph);
1216 1.74 thorpej pp->pr_npagealloc++;
1217 1.74 thorpej pp->pr_minpages++;
1218 1.74 thorpej }
1219 1.74 thorpej
1220 1.74 thorpej if (pp->pr_minpages >= pp->pr_maxpages)
1221 1.74 thorpej pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1222 1.74 thorpej
1223 1.74 thorpej simple_unlock(&pp->pr_slock);
1224 1.74 thorpej return (0);
1225 1.74 thorpej }
1226 1.55 thorpej
1227 1.55 thorpej /*
1228 1.3 pk * Add a page worth of items to the pool.
1229 1.21 thorpej *
1230 1.21 thorpej * Note, we must be called with the pool descriptor LOCKED.
1231 1.3 pk */
1232 1.55 thorpej static void
1233 1.55 thorpej pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
1234 1.3 pk {
1235 1.3 pk struct pool_item *pi;
1236 1.3 pk caddr_t cp = storage;
1237 1.3 pk unsigned int align = pp->pr_align;
1238 1.3 pk unsigned int ioff = pp->pr_itemoffset;
1239 1.55 thorpej int n;
1240 1.89 yamt int s;
1241 1.36 pk
1242 1.91 yamt LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
1243 1.91 yamt
1244 1.66 thorpej #ifdef DIAGNOSTIC
1245 1.66 thorpej if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
1246 1.36 pk panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1247 1.66 thorpej #endif
1248 1.3 pk
1249 1.3 pk /*
1250 1.3 pk * Insert page header.
1251 1.3 pk */
1252 1.88 chs LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1253 1.3 pk TAILQ_INIT(&ph->ph_itemlist);
1254 1.3 pk ph->ph_page = storage;
1255 1.3 pk ph->ph_nmissing = 0;
1256 1.89 yamt s = splclock();
1257 1.89 yamt ph->ph_time = mono_time;
1258 1.89 yamt splx(s);
1259 1.88 chs if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1260 1.88 chs SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
1261 1.3 pk
1262 1.6 thorpej pp->pr_nidle++;
1263 1.6 thorpej
1264 1.3 pk /*
1265 1.3 pk * Color this page.
1266 1.3 pk */
1267 1.3 pk cp = (caddr_t)(cp + pp->pr_curcolor);
1268 1.3 pk if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1269 1.3 pk pp->pr_curcolor = 0;
1270 1.3 pk
1271 1.3 pk /*
1272 1.3 pk * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1273 1.3 pk */
1274 1.3 pk if (ioff != 0)
1275 1.3 pk cp = (caddr_t)(cp + (align - ioff));
1276 1.3 pk
1277 1.3 pk /*
1278 1.3 pk * Insert remaining chunks on the bucket list.
1279 1.3 pk */
1280 1.3 pk n = pp->pr_itemsperpage;
1281 1.20 thorpej pp->pr_nitems += n;
1282 1.3 pk
1283 1.97 yamt ph->ph_off = cp - storage;
1284 1.97 yamt
1285 1.97 yamt if (pp->pr_roflags & PR_NOTOUCH) {
1286 1.97 yamt uint16_t *freelist = PR_FREELIST(ph);
1287 1.97 yamt int i;
1288 1.97 yamt
1289 1.97 yamt ph->ph_firstfree = 0;
1290 1.97 yamt for (i = 0; i < n - 1; i++)
1291 1.97 yamt freelist[i] = i + 1;
1292 1.97 yamt freelist[n - 1] = PR_INDEX_EOL;
1293 1.97 yamt } else {
1294 1.97 yamt while (n--) {
1295 1.97 yamt pi = (struct pool_item *)cp;
1296 1.78 thorpej
1297 1.97 yamt KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
1298 1.3 pk
1299 1.97 yamt /* Insert on page list */
1300 1.97 yamt TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
1301 1.3 pk #ifdef DIAGNOSTIC
1302 1.97 yamt pi->pi_magic = PI_MAGIC;
1303 1.3 pk #endif
1304 1.97 yamt cp = (caddr_t)(cp + pp->pr_size);
1305 1.97 yamt }
1306 1.3 pk }
1307 1.3 pk
1308 1.3 pk /*
1309 1.3 pk * If the pool was depleted, point at the new page.
1310 1.3 pk */
1311 1.3 pk if (pp->pr_curpage == NULL)
1312 1.3 pk pp->pr_curpage = ph;
1313 1.3 pk
1314 1.3 pk if (++pp->pr_npages > pp->pr_hiwat)
1315 1.3 pk pp->pr_hiwat = pp->pr_npages;
1316 1.3 pk }
1317 1.3 pk
1318 1.20 thorpej /*
1319 1.52 thorpej * Used by pool_get() when nitems drops below the low water mark. This
1320 1.88 chs * is used to catch up pr_nitems with the low water mark.
1321 1.20 thorpej *
1322 1.21 thorpej * Note 1, we never wait for memory here, we let the caller decide what to do.
1323 1.20 thorpej *
1324 1.73 thorpej * Note 2, we must be called with the pool already locked, and we return
1325 1.20 thorpej * with it locked.
1326 1.20 thorpej */
1327 1.20 thorpej static int
1328 1.42 thorpej pool_catchup(struct pool *pp)
1329 1.20 thorpej {
1330 1.83 scw struct pool_item_header *ph = NULL;
1331 1.20 thorpej caddr_t cp;
1332 1.20 thorpej int error = 0;
1333 1.20 thorpej
1334 1.54 thorpej while (POOL_NEEDS_CATCHUP(pp)) {
1335 1.20 thorpej /*
1336 1.21 thorpej * Call the page back-end allocator for more memory.
1337 1.21 thorpej *
1338 1.21 thorpej * XXX: We never wait, so should we bother unlocking
1339 1.21 thorpej * the pool descriptor?
1340 1.20 thorpej */
1341 1.21 thorpej simple_unlock(&pp->pr_slock);
1342 1.66 thorpej cp = pool_allocator_alloc(pp, PR_NOWAIT);
1343 1.55 thorpej if (__predict_true(cp != NULL))
1344 1.55 thorpej ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1345 1.55 thorpej if (__predict_false(cp == NULL || ph == NULL)) {
1346 1.55 thorpej if (cp != NULL)
1347 1.66 thorpej pool_allocator_free(pp, cp);
1348 1.20 thorpej error = ENOMEM;
1349 1.91 yamt simple_lock(&pp->pr_slock);
1350 1.20 thorpej break;
1351 1.20 thorpej }
1352 1.91 yamt simple_lock(&pp->pr_slock);
1353 1.55 thorpej pool_prime_page(pp, cp, ph);
1354 1.26 thorpej pp->pr_npagealloc++;
1355 1.20 thorpej }
1356 1.20 thorpej
1357 1.20 thorpej return (error);
1358 1.20 thorpej }
1359 1.20 thorpej
1360 1.88 chs static void
1361 1.88 chs pool_update_curpage(struct pool *pp)
1362 1.88 chs {
1363 1.88 chs
1364 1.88 chs pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
1365 1.88 chs if (pp->pr_curpage == NULL) {
1366 1.88 chs pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
1367 1.88 chs }
1368 1.88 chs }
1369 1.88 chs
1370 1.3 pk void
1371 1.42 thorpej pool_setlowat(struct pool *pp, int n)
1372 1.3 pk {
1373 1.15 pk
1374 1.21 thorpej simple_lock(&pp->pr_slock);
1375 1.21 thorpej
1376 1.3 pk pp->pr_minitems = n;
1377 1.15 pk pp->pr_minpages = (n == 0)
1378 1.15 pk ? 0
1379 1.18 thorpej : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1380 1.20 thorpej
1381 1.20 thorpej /* Make sure we're caught up with the newly-set low water mark. */
1382 1.75 simonb if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1383 1.20 thorpej /*
1384 1.20 thorpej * XXX: Should we log a warning? Should we set up a timeout
1385 1.20 thorpej * to try again in a second or so? The latter could break
1386 1.20 thorpej * a caller's assumptions about interrupt protection, etc.
1387 1.20 thorpej */
1388 1.20 thorpej }
1389 1.21 thorpej
1390 1.21 thorpej simple_unlock(&pp->pr_slock);
1391 1.3 pk }
1392 1.3 pk
1393 1.3 pk void
1394 1.42 thorpej pool_sethiwat(struct pool *pp, int n)
1395 1.3 pk {
1396 1.15 pk
1397 1.21 thorpej simple_lock(&pp->pr_slock);
1398 1.21 thorpej
1399 1.15 pk pp->pr_maxpages = (n == 0)
1400 1.15 pk ? 0
1401 1.18 thorpej : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1402 1.21 thorpej
1403 1.21 thorpej simple_unlock(&pp->pr_slock);
1404 1.3 pk }
1405 1.3 pk
1406 1.20 thorpej void
1407 1.42 thorpej pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1408 1.20 thorpej {
1409 1.20 thorpej
1410 1.21 thorpej simple_lock(&pp->pr_slock);
1411 1.20 thorpej
1412 1.20 thorpej pp->pr_hardlimit = n;
1413 1.20 thorpej pp->pr_hardlimit_warning = warnmess;
1414 1.31 thorpej pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1415 1.31 thorpej pp->pr_hardlimit_warning_last.tv_sec = 0;
1416 1.31 thorpej pp->pr_hardlimit_warning_last.tv_usec = 0;
1417 1.20 thorpej
1418 1.20 thorpej /*
1419 1.21 thorpej * In-line version of pool_sethiwat(), because we don't want to
1420 1.21 thorpej * release the lock.
1421 1.20 thorpej */
1422 1.20 thorpej pp->pr_maxpages = (n == 0)
1423 1.20 thorpej ? 0
1424 1.20 thorpej : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1425 1.21 thorpej
1426 1.21 thorpej simple_unlock(&pp->pr_slock);
1427 1.20 thorpej }
1428 1.3 pk
1429 1.3 pk /*
1430 1.3 pk * Release all complete pages that have not been used recently.
1431 1.3 pk */
1432 1.66 thorpej int
1433 1.59 thorpej #ifdef POOL_DIAGNOSTIC
1434 1.42 thorpej _pool_reclaim(struct pool *pp, const char *file, long line)
1435 1.56 sommerfe #else
1436 1.56 sommerfe pool_reclaim(struct pool *pp)
1437 1.56 sommerfe #endif
1438 1.3 pk {
1439 1.3 pk struct pool_item_header *ph, *phnext;
1440 1.43 thorpej struct pool_cache *pc;
1441 1.21 thorpej struct timeval curtime;
1442 1.61 chs struct pool_pagelist pq;
1443 1.88 chs struct timeval diff;
1444 1.21 thorpej int s;
1445 1.3 pk
1446 1.68 thorpej if (pp->pr_drain_hook != NULL) {
1447 1.68 thorpej /*
1448 1.68 thorpej * The drain hook must be called with the pool unlocked.
1449 1.68 thorpej */
1450 1.68 thorpej (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1451 1.68 thorpej }
1452 1.68 thorpej
1453 1.21 thorpej if (simple_lock_try(&pp->pr_slock) == 0)
1454 1.66 thorpej return (0);
1455 1.25 thorpej pr_enter(pp, file, line);
1456 1.68 thorpej
1457 1.88 chs LIST_INIT(&pq);
1458 1.3 pk
1459 1.43 thorpej /*
1460 1.43 thorpej * Reclaim items from the pool's caches.
1461 1.43 thorpej */
1462 1.61 chs TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist)
1463 1.43 thorpej pool_cache_reclaim(pc);
1464 1.43 thorpej
1465 1.21 thorpej s = splclock();
1466 1.21 thorpej curtime = mono_time;
1467 1.21 thorpej splx(s);
1468 1.21 thorpej
1469 1.88 chs for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
1470 1.88 chs phnext = LIST_NEXT(ph, ph_pagelist);
1471 1.3 pk
1472 1.3 pk /* Check our minimum page claim */
1473 1.3 pk if (pp->pr_npages <= pp->pr_minpages)
1474 1.3 pk break;
1475 1.3 pk
1476 1.88 chs KASSERT(ph->ph_nmissing == 0);
1477 1.88 chs timersub(&curtime, &ph->ph_time, &diff);
1478 1.88 chs if (diff.tv_sec < pool_inactive_time)
1479 1.88 chs continue;
1480 1.21 thorpej
1481 1.88 chs /*
1482 1.88 chs * If freeing this page would put us below
1483 1.88 chs * the low water mark, stop now.
1484 1.88 chs */
1485 1.88 chs if ((pp->pr_nitems - pp->pr_itemsperpage) <
1486 1.88 chs pp->pr_minitems)
1487 1.88 chs break;
1488 1.21 thorpej
1489 1.88 chs pr_rmpage(pp, ph, &pq);
1490 1.3 pk }
1491 1.3 pk
1492 1.25 thorpej pr_leave(pp);
1493 1.21 thorpej simple_unlock(&pp->pr_slock);
1494 1.88 chs if (LIST_EMPTY(&pq))
1495 1.66 thorpej return (0);
1496 1.66 thorpej
1497 1.88 chs while ((ph = LIST_FIRST(&pq)) != NULL) {
1498 1.88 chs LIST_REMOVE(ph, ph_pagelist);
1499 1.66 thorpej pool_allocator_free(pp, ph->ph_page);
1500 1.61 chs if (pp->pr_roflags & PR_PHINPAGE) {
1501 1.61 chs continue;
1502 1.61 chs }
1503 1.85 pk s = splvm();
1504 1.97 yamt pool_put(pp->pr_phpool, ph);
1505 1.61 chs splx(s);
1506 1.61 chs }
1507 1.66 thorpej
1508 1.66 thorpej return (1);
1509 1.3 pk }
1510 1.3 pk
1511 1.3 pk /*
1512 1.3 pk * Drain pools, one at a time.
1513 1.21 thorpej *
1514 1.21 thorpej * Note, we must never be called from an interrupt context.
1515 1.3 pk */
1516 1.3 pk void
1517 1.42 thorpej pool_drain(void *arg)
1518 1.3 pk {
1519 1.3 pk struct pool *pp;
1520 1.23 thorpej int s;
1521 1.3 pk
1522 1.61 chs pp = NULL;
1523 1.49 thorpej s = splvm();
1524 1.23 thorpej simple_lock(&pool_head_slock);
1525 1.61 chs if (drainpp == NULL) {
1526 1.61 chs drainpp = TAILQ_FIRST(&pool_head);
1527 1.61 chs }
1528 1.61 chs if (drainpp) {
1529 1.61 chs pp = drainpp;
1530 1.61 chs drainpp = TAILQ_NEXT(pp, pr_poollist);
1531 1.61 chs }
1532 1.61 chs simple_unlock(&pool_head_slock);
1533 1.63 chs pool_reclaim(pp);
1534 1.61 chs splx(s);
1535 1.3 pk }
1536 1.3 pk
1537 1.3 pk /*
1538 1.3 pk * Diagnostic helpers.
1539 1.3 pk */
1540 1.3 pk void
1541 1.42 thorpej pool_print(struct pool *pp, const char *modif)
1542 1.21 thorpej {
1543 1.21 thorpej int s;
1544 1.21 thorpej
1545 1.49 thorpej s = splvm();
1546 1.25 thorpej if (simple_lock_try(&pp->pr_slock) == 0) {
1547 1.25 thorpej printf("pool %s is locked; try again later\n",
1548 1.25 thorpej pp->pr_wchan);
1549 1.25 thorpej splx(s);
1550 1.25 thorpej return;
1551 1.25 thorpej }
1552 1.25 thorpej pool_print1(pp, modif, printf);
1553 1.21 thorpej simple_unlock(&pp->pr_slock);
1554 1.21 thorpej splx(s);
1555 1.21 thorpej }
1556 1.21 thorpej
1557 1.25 thorpej void
1558 1.42 thorpej pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1559 1.25 thorpej {
1560 1.25 thorpej int didlock = 0;
1561 1.25 thorpej
1562 1.25 thorpej if (pp == NULL) {
1563 1.25 thorpej (*pr)("Must specify a pool to print.\n");
1564 1.25 thorpej return;
1565 1.25 thorpej }
1566 1.25 thorpej
1567 1.25 thorpej /*
1568 1.25 thorpej * Called from DDB; interrupts should be blocked, and all
1569 1.25 thorpej * other processors should be paused. We can skip locking
1570 1.25 thorpej * the pool in this case.
1571 1.25 thorpej *
1572 1.25 thorpej * We do a simple_lock_try() just to print the lock
1573 1.25 thorpej * status, however.
1574 1.25 thorpej */
1575 1.25 thorpej
1576 1.25 thorpej if (simple_lock_try(&pp->pr_slock) == 0)
1577 1.25 thorpej (*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
1578 1.25 thorpej else
1579 1.25 thorpej didlock = 1;
1580 1.25 thorpej
1581 1.25 thorpej pool_print1(pp, modif, pr);
1582 1.25 thorpej
1583 1.25 thorpej if (didlock)
1584 1.25 thorpej simple_unlock(&pp->pr_slock);
1585 1.25 thorpej }
1586 1.25 thorpej
1587 1.21 thorpej static void
1588 1.97 yamt pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
1589 1.97 yamt void (*pr)(const char *, ...))
1590 1.88 chs {
1591 1.88 chs struct pool_item_header *ph;
1592 1.88 chs #ifdef DIAGNOSTIC
1593 1.88 chs struct pool_item *pi;
1594 1.88 chs #endif
1595 1.88 chs
1596 1.88 chs LIST_FOREACH(ph, pl, ph_pagelist) {
1597 1.88 chs (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
1598 1.88 chs ph->ph_page, ph->ph_nmissing,
1599 1.88 chs (u_long)ph->ph_time.tv_sec,
1600 1.88 chs (u_long)ph->ph_time.tv_usec);
1601 1.88 chs #ifdef DIAGNOSTIC
1602 1.97 yamt if (!(pp->pr_roflags & PR_NOTOUCH)) {
1603 1.97 yamt TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1604 1.97 yamt if (pi->pi_magic != PI_MAGIC) {
1605 1.97 yamt (*pr)("\t\t\titem %p, magic 0x%x\n",
1606 1.97 yamt pi, pi->pi_magic);
1607 1.97 yamt }
1608 1.88 chs }
1609 1.88 chs }
1610 1.88 chs #endif
1611 1.88 chs }
1612 1.88 chs }
1613 1.88 chs
1614 1.88 chs static void
1615 1.42 thorpej pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1616 1.3 pk {
1617 1.25 thorpej struct pool_item_header *ph;
1618 1.44 thorpej struct pool_cache *pc;
1619 1.44 thorpej struct pool_cache_group *pcg;
1620 1.44 thorpej int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1621 1.25 thorpej char c;
1622 1.25 thorpej
1623 1.25 thorpej while ((c = *modif++) != '\0') {
1624 1.25 thorpej if (c == 'l')
1625 1.25 thorpej print_log = 1;
1626 1.25 thorpej if (c == 'p')
1627 1.25 thorpej print_pagelist = 1;
1628 1.44 thorpej if (c == 'c')
1629 1.44 thorpej print_cache = 1;
1630 1.25 thorpej }
1631 1.25 thorpej
1632 1.25 thorpej (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1633 1.25 thorpej pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1634 1.25 thorpej pp->pr_roflags);
1635 1.66 thorpej (*pr)("\talloc %p\n", pp->pr_alloc);
1636 1.25 thorpej (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1637 1.25 thorpej pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1638 1.25 thorpej (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1639 1.25 thorpej pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1640 1.25 thorpej
1641 1.25 thorpej (*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
1642 1.25 thorpej pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1643 1.25 thorpej (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1644 1.25 thorpej pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1645 1.25 thorpej
1646 1.25 thorpej if (print_pagelist == 0)
1647 1.25 thorpej goto skip_pagelist;
1648 1.25 thorpej
1649 1.88 chs if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1650 1.88 chs (*pr)("\n\tempty page list:\n");
1651 1.97 yamt pool_print_pagelist(pp, &pp->pr_emptypages, pr);
1652 1.88 chs if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
1653 1.88 chs (*pr)("\n\tfull page list:\n");
1654 1.97 yamt pool_print_pagelist(pp, &pp->pr_fullpages, pr);
1655 1.88 chs if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
1656 1.88 chs (*pr)("\n\tpartial-page list:\n");
1657 1.97 yamt pool_print_pagelist(pp, &pp->pr_partpages, pr);
1658 1.88 chs
1659 1.25 thorpej if (pp->pr_curpage == NULL)
1660 1.25 thorpej (*pr)("\tno current page\n");
1661 1.25 thorpej else
1662 1.25 thorpej (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1663 1.25 thorpej
1664 1.25 thorpej skip_pagelist:
1665 1.25 thorpej if (print_log == 0)
1666 1.25 thorpej goto skip_log;
1667 1.25 thorpej
1668 1.25 thorpej (*pr)("\n");
1669 1.25 thorpej if ((pp->pr_roflags & PR_LOGGING) == 0)
1670 1.25 thorpej (*pr)("\tno log\n");
1671 1.25 thorpej else
1672 1.25 thorpej pr_printlog(pp, NULL, pr);
1673 1.3 pk
1674 1.25 thorpej skip_log:
1675 1.44 thorpej if (print_cache == 0)
1676 1.44 thorpej goto skip_cache;
1677 1.44 thorpej
1678 1.61 chs TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) {
1679 1.44 thorpej (*pr)("\tcache %p: allocfrom %p freeto %p\n", pc,
1680 1.44 thorpej pc->pc_allocfrom, pc->pc_freeto);
1681 1.48 thorpej (*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n",
1682 1.48 thorpej pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
1683 1.61 chs TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1684 1.44 thorpej (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);
1685 1.87 thorpej for (i = 0; i < PCG_NOBJECTS; i++) {
1686 1.87 thorpej if (pcg->pcg_objects[i].pcgo_pa !=
1687 1.87 thorpej POOL_PADDR_INVALID) {
1688 1.87 thorpej (*pr)("\t\t\t%p, 0x%llx\n",
1689 1.87 thorpej pcg->pcg_objects[i].pcgo_va,
1690 1.87 thorpej (unsigned long long)
1691 1.87 thorpej pcg->pcg_objects[i].pcgo_pa);
1692 1.87 thorpej } else {
1693 1.87 thorpej (*pr)("\t\t\t%p\n",
1694 1.87 thorpej pcg->pcg_objects[i].pcgo_va);
1695 1.87 thorpej }
1696 1.87 thorpej }
1697 1.44 thorpej }
1698 1.44 thorpej }
1699 1.44 thorpej
1700 1.44 thorpej skip_cache:
1701 1.88 chs pr_enter_check(pp, pr);
1702 1.88 chs }
1703 1.88 chs
1704 1.88 chs static int
1705 1.88 chs pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
1706 1.88 chs {
1707 1.88 chs struct pool_item *pi;
1708 1.88 chs caddr_t page;
1709 1.88 chs int n;
1710 1.88 chs
1711 1.88 chs page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask);
1712 1.88 chs if (page != ph->ph_page &&
1713 1.88 chs (pp->pr_roflags & PR_PHINPAGE) != 0) {
1714 1.88 chs if (label != NULL)
1715 1.88 chs printf("%s: ", label);
1716 1.88 chs printf("pool(%p:%s): page inconsistency: page %p;"
1717 1.88 chs " at page head addr %p (p %p)\n", pp,
1718 1.88 chs pp->pr_wchan, ph->ph_page,
1719 1.88 chs ph, page);
1720 1.88 chs return 1;
1721 1.88 chs }
1722 1.3 pk
1723 1.97 yamt if ((pp->pr_roflags & PR_NOTOUCH) != 0)
1724 1.97 yamt return 0;
1725 1.97 yamt
1726 1.88 chs for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
1727 1.88 chs pi != NULL;
1728 1.88 chs pi = TAILQ_NEXT(pi,pi_list), n++) {
1729 1.88 chs
1730 1.88 chs #ifdef DIAGNOSTIC
1731 1.88 chs if (pi->pi_magic != PI_MAGIC) {
1732 1.88 chs if (label != NULL)
1733 1.88 chs printf("%s: ", label);
1734 1.88 chs printf("pool(%s): free list modified: magic=%x;"
1735 1.88 chs " page %p; item ordinal %d;"
1736 1.88 chs " addr %p (p %p)\n",
1737 1.88 chs pp->pr_wchan, pi->pi_magic, ph->ph_page,
1738 1.88 chs n, pi, page);
1739 1.88 chs panic("pool");
1740 1.88 chs }
1741 1.88 chs #endif
1742 1.88 chs page =
1743 1.88 chs (caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask);
1744 1.88 chs if (page == ph->ph_page)
1745 1.88 chs continue;
1746 1.88 chs
1747 1.88 chs if (label != NULL)
1748 1.88 chs printf("%s: ", label);
1749 1.88 chs printf("pool(%p:%s): page inconsistency: page %p;"
1750 1.88 chs " item ordinal %d; addr %p (p %p)\n", pp,
1751 1.88 chs pp->pr_wchan, ph->ph_page,
1752 1.88 chs n, pi, page);
1753 1.88 chs return 1;
1754 1.88 chs }
1755 1.88 chs return 0;
1756 1.3 pk }
1757 1.3 pk
1758 1.88 chs
1759 1.3 pk int
1760 1.42 thorpej pool_chk(struct pool *pp, const char *label)
1761 1.3 pk {
1762 1.3 pk struct pool_item_header *ph;
1763 1.3 pk int r = 0;
1764 1.3 pk
1765 1.21 thorpej simple_lock(&pp->pr_slock);
1766 1.88 chs LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
1767 1.88 chs r = pool_chk_page(pp, label, ph);
1768 1.88 chs if (r) {
1769 1.88 chs goto out;
1770 1.88 chs }
1771 1.88 chs }
1772 1.88 chs LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
1773 1.88 chs r = pool_chk_page(pp, label, ph);
1774 1.88 chs if (r) {
1775 1.3 pk goto out;
1776 1.3 pk }
1777 1.88 chs }
1778 1.88 chs LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
1779 1.88 chs r = pool_chk_page(pp, label, ph);
1780 1.88 chs if (r) {
1781 1.3 pk goto out;
1782 1.3 pk }
1783 1.3 pk }
1784 1.88 chs
1785 1.3 pk out:
1786 1.21 thorpej simple_unlock(&pp->pr_slock);
1787 1.3 pk return (r);
1788 1.43 thorpej }
1789 1.43 thorpej
1790 1.43 thorpej /*
1791 1.43 thorpej * pool_cache_init:
1792 1.43 thorpej *
1793 1.43 thorpej * Initialize a pool cache.
1794 1.43 thorpej *
1795 1.43 thorpej * NOTE: If the pool must be protected from interrupts, we expect
1796 1.43 thorpej * to be called at the appropriate interrupt priority level.
1797 1.43 thorpej */
1798 1.43 thorpej void
1799 1.43 thorpej pool_cache_init(struct pool_cache *pc, struct pool *pp,
1800 1.43 thorpej int (*ctor)(void *, void *, int),
1801 1.43 thorpej void (*dtor)(void *, void *),
1802 1.43 thorpej void *arg)
1803 1.43 thorpej {
1804 1.43 thorpej
1805 1.43 thorpej TAILQ_INIT(&pc->pc_grouplist);
1806 1.43 thorpej simple_lock_init(&pc->pc_slock);
1807 1.43 thorpej
1808 1.43 thorpej pc->pc_allocfrom = NULL;
1809 1.43 thorpej pc->pc_freeto = NULL;
1810 1.43 thorpej pc->pc_pool = pp;
1811 1.43 thorpej
1812 1.43 thorpej pc->pc_ctor = ctor;
1813 1.43 thorpej pc->pc_dtor = dtor;
1814 1.43 thorpej pc->pc_arg = arg;
1815 1.43 thorpej
1816 1.48 thorpej pc->pc_hits = 0;
1817 1.48 thorpej pc->pc_misses = 0;
1818 1.48 thorpej
1819 1.48 thorpej pc->pc_ngroups = 0;
1820 1.48 thorpej
1821 1.48 thorpej pc->pc_nitems = 0;
1822 1.48 thorpej
1823 1.43 thorpej simple_lock(&pp->pr_slock);
1824 1.43 thorpej TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist);
1825 1.43 thorpej simple_unlock(&pp->pr_slock);
1826 1.43 thorpej }
1827 1.43 thorpej
1828 1.43 thorpej /*
1829 1.43 thorpej * pool_cache_destroy:
1830 1.43 thorpej *
1831 1.43 thorpej * Destroy a pool cache.
1832 1.43 thorpej */
1833 1.43 thorpej void
1834 1.43 thorpej pool_cache_destroy(struct pool_cache *pc)
1835 1.43 thorpej {
1836 1.43 thorpej struct pool *pp = pc->pc_pool;
1837 1.43 thorpej
1838 1.43 thorpej /* First, invalidate the entire cache. */
1839 1.43 thorpej pool_cache_invalidate(pc);
1840 1.43 thorpej
1841 1.43 thorpej /* ...and remove it from the pool's cache list. */
1842 1.43 thorpej simple_lock(&pp->pr_slock);
1843 1.43 thorpej TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist);
1844 1.43 thorpej simple_unlock(&pp->pr_slock);
1845 1.43 thorpej }
1846 1.43 thorpej
1847 1.43 thorpej static __inline void *
1848 1.87 thorpej pcg_get(struct pool_cache_group *pcg, paddr_t *pap)
1849 1.43 thorpej {
1850 1.43 thorpej void *object;
1851 1.43 thorpej u_int idx;
1852 1.43 thorpej
1853 1.43 thorpej KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
1854 1.45 thorpej KASSERT(pcg->pcg_avail != 0);
1855 1.43 thorpej idx = --pcg->pcg_avail;
1856 1.43 thorpej
1857 1.87 thorpej KASSERT(pcg->pcg_objects[idx].pcgo_va != NULL);
1858 1.87 thorpej object = pcg->pcg_objects[idx].pcgo_va;
1859 1.87 thorpej if (pap != NULL)
1860 1.87 thorpej *pap = pcg->pcg_objects[idx].pcgo_pa;
1861 1.87 thorpej pcg->pcg_objects[idx].pcgo_va = NULL;
1862 1.43 thorpej
1863 1.43 thorpej return (object);
1864 1.43 thorpej }
1865 1.43 thorpej
1866 1.43 thorpej static __inline void
1867 1.87 thorpej pcg_put(struct pool_cache_group *pcg, void *object, paddr_t pa)
1868 1.43 thorpej {
1869 1.43 thorpej u_int idx;
1870 1.43 thorpej
1871 1.43 thorpej KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
1872 1.43 thorpej idx = pcg->pcg_avail++;
1873 1.43 thorpej
1874 1.87 thorpej KASSERT(pcg->pcg_objects[idx].pcgo_va == NULL);
1875 1.87 thorpej pcg->pcg_objects[idx].pcgo_va = object;
1876 1.87 thorpej pcg->pcg_objects[idx].pcgo_pa = pa;
1877 1.43 thorpej }
1878 1.43 thorpej
1879 1.43 thorpej /*
1880 1.87 thorpej * pool_cache_get{,_paddr}:
1881 1.43 thorpej *
1882 1.87 thorpej * Get an object from a pool cache (optionally returning
1883 1.87 thorpej * the physical address of the object).
1884 1.43 thorpej */
1885 1.43 thorpej void *
1886 1.87 thorpej pool_cache_get_paddr(struct pool_cache *pc, int flags, paddr_t *pap)
1887 1.43 thorpej {
1888 1.43 thorpej struct pool_cache_group *pcg;
1889 1.43 thorpej void *object;
1890 1.58 thorpej
1891 1.58 thorpej #ifdef LOCKDEBUG
1892 1.58 thorpej if (flags & PR_WAITOK)
1893 1.58 thorpej simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)");
1894 1.58 thorpej #endif
1895 1.43 thorpej
1896 1.43 thorpej simple_lock(&pc->pc_slock);
1897 1.43 thorpej
1898 1.43 thorpej if ((pcg = pc->pc_allocfrom) == NULL) {
1899 1.61 chs TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1900 1.43 thorpej if (pcg->pcg_avail != 0) {
1901 1.43 thorpej pc->pc_allocfrom = pcg;
1902 1.43 thorpej goto have_group;
1903 1.43 thorpej }
1904 1.43 thorpej }
1905 1.43 thorpej
1906 1.43 thorpej /*
1907 1.43 thorpej * No groups with any available objects. Allocate
1908 1.43 thorpej * a new object, construct it, and return it to
1909 1.43 thorpej * the caller. We will allocate a group, if necessary,
1910 1.43 thorpej * when the object is freed back to the cache.
1911 1.43 thorpej */
1912 1.48 thorpej pc->pc_misses++;
1913 1.43 thorpej simple_unlock(&pc->pc_slock);
1914 1.43 thorpej object = pool_get(pc->pc_pool, flags);
1915 1.43 thorpej if (object != NULL && pc->pc_ctor != NULL) {
1916 1.43 thorpej if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
1917 1.43 thorpej pool_put(pc->pc_pool, object);
1918 1.43 thorpej return (NULL);
1919 1.43 thorpej }
1920 1.43 thorpej }
1921 1.87 thorpej if (object != NULL && pap != NULL) {
1922 1.87 thorpej #ifdef POOL_VTOPHYS
1923 1.87 thorpej *pap = POOL_VTOPHYS(object);
1924 1.87 thorpej #else
1925 1.87 thorpej *pap = POOL_PADDR_INVALID;
1926 1.87 thorpej #endif
1927 1.87 thorpej }
1928 1.43 thorpej return (object);
1929 1.43 thorpej }
1930 1.43 thorpej
1931 1.43 thorpej have_group:
1932 1.48 thorpej pc->pc_hits++;
1933 1.48 thorpej pc->pc_nitems--;
1934 1.87 thorpej object = pcg_get(pcg, pap);
1935 1.43 thorpej
1936 1.43 thorpej if (pcg->pcg_avail == 0)
1937 1.43 thorpej pc->pc_allocfrom = NULL;
1938 1.45 thorpej
1939 1.43 thorpej simple_unlock(&pc->pc_slock);
1940 1.43 thorpej
1941 1.43 thorpej return (object);
1942 1.43 thorpej }
1943 1.43 thorpej
1944 1.43 thorpej /*
1945 1.87 thorpej * pool_cache_put{,_paddr}:
1946 1.43 thorpej *
1947 1.87 thorpej * Put an object back to the pool cache (optionally caching the
1948 1.87 thorpej * physical address of the object).
1949 1.43 thorpej */
1950 1.43 thorpej void
1951 1.87 thorpej pool_cache_put_paddr(struct pool_cache *pc, void *object, paddr_t pa)
1952 1.43 thorpej {
1953 1.43 thorpej struct pool_cache_group *pcg;
1954 1.60 thorpej int s;
1955 1.43 thorpej
1956 1.43 thorpej simple_lock(&pc->pc_slock);
1957 1.43 thorpej
1958 1.43 thorpej if ((pcg = pc->pc_freeto) == NULL) {
1959 1.61 chs TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1960 1.43 thorpej if (pcg->pcg_avail != PCG_NOBJECTS) {
1961 1.43 thorpej pc->pc_freeto = pcg;
1962 1.43 thorpej goto have_group;
1963 1.43 thorpej }
1964 1.43 thorpej }
1965 1.43 thorpej
1966 1.43 thorpej /*
1967 1.43 thorpej * No empty groups to free the object to. Attempt to
1968 1.47 thorpej * allocate one.
1969 1.43 thorpej */
1970 1.47 thorpej simple_unlock(&pc->pc_slock);
1971 1.60 thorpej s = splvm();
1972 1.43 thorpej pcg = pool_get(&pcgpool, PR_NOWAIT);
1973 1.60 thorpej splx(s);
1974 1.43 thorpej if (pcg != NULL) {
1975 1.43 thorpej memset(pcg, 0, sizeof(*pcg));
1976 1.47 thorpej simple_lock(&pc->pc_slock);
1977 1.48 thorpej pc->pc_ngroups++;
1978 1.43 thorpej TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list);
1979 1.47 thorpej if (pc->pc_freeto == NULL)
1980 1.47 thorpej pc->pc_freeto = pcg;
1981 1.43 thorpej goto have_group;
1982 1.43 thorpej }
1983 1.43 thorpej
1984 1.43 thorpej /*
1985 1.43 thorpej * Unable to allocate a cache group; destruct the object
1986 1.43 thorpej * and free it back to the pool.
1987 1.43 thorpej */
1988 1.51 thorpej pool_cache_destruct_object(pc, object);
1989 1.43 thorpej return;
1990 1.43 thorpej }
1991 1.43 thorpej
1992 1.43 thorpej have_group:
1993 1.48 thorpej pc->pc_nitems++;
1994 1.87 thorpej pcg_put(pcg, object, pa);
1995 1.43 thorpej
1996 1.43 thorpej if (pcg->pcg_avail == PCG_NOBJECTS)
1997 1.43 thorpej pc->pc_freeto = NULL;
1998 1.43 thorpej
1999 1.43 thorpej simple_unlock(&pc->pc_slock);
2000 1.51 thorpej }
2001 1.51 thorpej
2002 1.51 thorpej /*
2003 1.51 thorpej * pool_cache_destruct_object:
2004 1.51 thorpej *
2005 1.51 thorpej * Force destruction of an object and its release back into
2006 1.51 thorpej * the pool.
2007 1.51 thorpej */
2008 1.51 thorpej void
2009 1.51 thorpej pool_cache_destruct_object(struct pool_cache *pc, void *object)
2010 1.51 thorpej {
2011 1.51 thorpej
2012 1.51 thorpej if (pc->pc_dtor != NULL)
2013 1.51 thorpej (*pc->pc_dtor)(pc->pc_arg, object);
2014 1.51 thorpej pool_put(pc->pc_pool, object);
2015 1.43 thorpej }
2016 1.43 thorpej
2017 1.43 thorpej /*
2018 1.43 thorpej * pool_cache_do_invalidate:
2019 1.43 thorpej *
2020 1.43 thorpej * This internal function implements pool_cache_invalidate() and
2021 1.43 thorpej * pool_cache_reclaim().
2022 1.43 thorpej */
2023 1.43 thorpej static void
2024 1.43 thorpej pool_cache_do_invalidate(struct pool_cache *pc, int free_groups,
2025 1.56 sommerfe void (*putit)(struct pool *, void *))
2026 1.43 thorpej {
2027 1.43 thorpej struct pool_cache_group *pcg, *npcg;
2028 1.43 thorpej void *object;
2029 1.60 thorpej int s;
2030 1.43 thorpej
2031 1.43 thorpej for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
2032 1.43 thorpej pcg = npcg) {
2033 1.43 thorpej npcg = TAILQ_NEXT(pcg, pcg_list);
2034 1.43 thorpej while (pcg->pcg_avail != 0) {
2035 1.48 thorpej pc->pc_nitems--;
2036 1.87 thorpej object = pcg_get(pcg, NULL);
2037 1.45 thorpej if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg)
2038 1.45 thorpej pc->pc_allocfrom = NULL;
2039 1.43 thorpej if (pc->pc_dtor != NULL)
2040 1.43 thorpej (*pc->pc_dtor)(pc->pc_arg, object);
2041 1.56 sommerfe (*putit)(pc->pc_pool, object);
2042 1.43 thorpej }
2043 1.43 thorpej if (free_groups) {
2044 1.48 thorpej pc->pc_ngroups--;
2045 1.43 thorpej TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list);
2046 1.46 thorpej if (pc->pc_freeto == pcg)
2047 1.46 thorpej pc->pc_freeto = NULL;
2048 1.60 thorpej s = splvm();
2049 1.43 thorpej pool_put(&pcgpool, pcg);
2050 1.60 thorpej splx(s);
2051 1.43 thorpej }
2052 1.43 thorpej }
2053 1.43 thorpej }
2054 1.43 thorpej
2055 1.43 thorpej /*
2056 1.43 thorpej * pool_cache_invalidate:
2057 1.43 thorpej *
2058 1.43 thorpej * Invalidate a pool cache (destruct and release all of the
2059 1.43 thorpej * cached objects).
2060 1.43 thorpej */
2061 1.43 thorpej void
2062 1.43 thorpej pool_cache_invalidate(struct pool_cache *pc)
2063 1.43 thorpej {
2064 1.43 thorpej
2065 1.43 thorpej simple_lock(&pc->pc_slock);
2066 1.56 sommerfe pool_cache_do_invalidate(pc, 0, pool_put);
2067 1.43 thorpej simple_unlock(&pc->pc_slock);
2068 1.43 thorpej }
2069 1.43 thorpej
2070 1.43 thorpej /*
2071 1.43 thorpej * pool_cache_reclaim:
2072 1.43 thorpej *
2073 1.43 thorpej * Reclaim a pool cache for pool_reclaim().
2074 1.43 thorpej */
2075 1.43 thorpej static void
2076 1.43 thorpej pool_cache_reclaim(struct pool_cache *pc)
2077 1.43 thorpej {
2078 1.43 thorpej
2079 1.47 thorpej simple_lock(&pc->pc_slock);
2080 1.43 thorpej pool_cache_do_invalidate(pc, 1, pool_do_put);
2081 1.43 thorpej simple_unlock(&pc->pc_slock);
2082 1.3 pk }
2083 1.66 thorpej
2084 1.66 thorpej /*
2085 1.66 thorpej * Pool backend allocators.
2086 1.66 thorpej *
2087 1.66 thorpej * Each pool has a backend allocator that handles allocation, deallocation,
2088 1.66 thorpej * and any additional draining that might be needed.
2089 1.66 thorpej *
2090 1.66 thorpej * We provide two standard allocators:
2091 1.66 thorpej *
2092 1.66 thorpej * pool_allocator_kmem - the default when no allocator is specified
2093 1.66 thorpej *
2094 1.66 thorpej * pool_allocator_nointr - used for pools that will not be accessed
2095 1.66 thorpej * in interrupt context.
2096 1.66 thorpej */
2097 1.66 thorpej void *pool_page_alloc(struct pool *, int);
2098 1.66 thorpej void pool_page_free(struct pool *, void *);
2099 1.66 thorpej
2100 1.66 thorpej struct pool_allocator pool_allocator_kmem = {
2101 1.66 thorpej pool_page_alloc, pool_page_free, 0,
2102 1.66 thorpej };
2103 1.66 thorpej
2104 1.66 thorpej void *pool_page_alloc_nointr(struct pool *, int);
2105 1.66 thorpej void pool_page_free_nointr(struct pool *, void *);
2106 1.66 thorpej
2107 1.66 thorpej struct pool_allocator pool_allocator_nointr = {
2108 1.66 thorpej pool_page_alloc_nointr, pool_page_free_nointr, 0,
2109 1.66 thorpej };
2110 1.66 thorpej
2111 1.66 thorpej #ifdef POOL_SUBPAGE
2112 1.66 thorpej void *pool_subpage_alloc(struct pool *, int);
2113 1.66 thorpej void pool_subpage_free(struct pool *, void *);
2114 1.66 thorpej
2115 1.66 thorpej struct pool_allocator pool_allocator_kmem_subpage = {
2116 1.66 thorpej pool_subpage_alloc, pool_subpage_free, 0,
2117 1.66 thorpej };
2118 1.66 thorpej #endif /* POOL_SUBPAGE */
2119 1.66 thorpej
2120 1.66 thorpej /*
2121 1.66 thorpej * We have at least three different resources for the same allocation and
2122 1.66 thorpej * each resource can be depleted. First, we have the ready elements in the
2123 1.66 thorpej * pool. Then we have the resource (typically a vm_map) for this allocator.
2124 1.66 thorpej * Finally, we have physical memory. Waiting for any of these can be
2125 1.66 thorpej * unnecessary when any other is freed, but the kernel doesn't support
2126 1.66 thorpej * sleeping on multiple wait channels, so we have to employ another strategy.
2127 1.66 thorpej *
2128 1.66 thorpej * The caller sleeps on the pool (so that it can be awakened when an item
2129 1.66 thorpej * is returned to the pool), but we set PA_WANT on the allocator. When a
2130 1.66 thorpej * page is returned to the allocator and PA_WANT is set, pool_allocator_free
2131 1.66 thorpej * will wake up all sleeping pools belonging to this allocator.
2132 1.66 thorpej *
2133 1.66 thorpej * XXX Thundering herd.
2134 1.66 thorpej */
2135 1.66 thorpej void *
2136 1.66 thorpej pool_allocator_alloc(struct pool *org, int flags)
2137 1.66 thorpej {
2138 1.66 thorpej struct pool_allocator *pa = org->pr_alloc;
2139 1.66 thorpej struct pool *pp, *start;
2140 1.66 thorpej int s, freed;
2141 1.66 thorpej void *res;
2142 1.66 thorpej
2143 1.91 yamt LOCK_ASSERT(!simple_lock_held(&org->pr_slock));
2144 1.91 yamt
2145 1.66 thorpej do {
2146 1.66 thorpej if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
2147 1.66 thorpej return (res);
2148 1.68 thorpej if ((flags & PR_WAITOK) == 0) {
2149 1.68 thorpej /*
2150 1.68 thorpej * We only run the drain hookhere if PR_NOWAIT.
2151 1.68 thorpej * In other cases, the hook will be run in
2152 1.68 thorpej * pool_reclaim().
2153 1.68 thorpej */
2154 1.68 thorpej if (org->pr_drain_hook != NULL) {
2155 1.68 thorpej (*org->pr_drain_hook)(org->pr_drain_hook_arg,
2156 1.68 thorpej flags);
2157 1.68 thorpej if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
2158 1.68 thorpej return (res);
2159 1.68 thorpej }
2160 1.66 thorpej break;
2161 1.68 thorpej }
2162 1.66 thorpej
2163 1.66 thorpej /*
2164 1.66 thorpej * Drain all pools, except "org", that use this
2165 1.66 thorpej * allocator. We do this to reclaim VA space.
2166 1.66 thorpej * pa_alloc is responsible for waiting for
2167 1.66 thorpej * physical memory.
2168 1.66 thorpej *
2169 1.66 thorpej * XXX We risk looping forever if start if someone
2170 1.66 thorpej * calls pool_destroy on "start". But there is no
2171 1.66 thorpej * other way to have potentially sleeping pool_reclaim,
2172 1.66 thorpej * non-sleeping locks on pool_allocator, and some
2173 1.66 thorpej * stirring of drained pools in the allocator.
2174 1.68 thorpej *
2175 1.68 thorpej * XXX Maybe we should use pool_head_slock for locking
2176 1.68 thorpej * the allocators?
2177 1.66 thorpej */
2178 1.66 thorpej freed = 0;
2179 1.66 thorpej
2180 1.66 thorpej s = splvm();
2181 1.66 thorpej simple_lock(&pa->pa_slock);
2182 1.66 thorpej pp = start = TAILQ_FIRST(&pa->pa_list);
2183 1.66 thorpej do {
2184 1.66 thorpej TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list);
2185 1.66 thorpej TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list);
2186 1.66 thorpej if (pp == org)
2187 1.66 thorpej continue;
2188 1.73 thorpej simple_unlock(&pa->pa_slock);
2189 1.66 thorpej freed = pool_reclaim(pp);
2190 1.73 thorpej simple_lock(&pa->pa_slock);
2191 1.66 thorpej } while ((pp = TAILQ_FIRST(&pa->pa_list)) != start &&
2192 1.66 thorpej freed == 0);
2193 1.66 thorpej
2194 1.66 thorpej if (freed == 0) {
2195 1.66 thorpej /*
2196 1.66 thorpej * We set PA_WANT here, the caller will most likely
2197 1.66 thorpej * sleep waiting for pages (if not, this won't hurt
2198 1.66 thorpej * that much), and there is no way to set this in
2199 1.66 thorpej * the caller without violating locking order.
2200 1.66 thorpej */
2201 1.66 thorpej pa->pa_flags |= PA_WANT;
2202 1.66 thorpej }
2203 1.66 thorpej simple_unlock(&pa->pa_slock);
2204 1.66 thorpej splx(s);
2205 1.66 thorpej } while (freed);
2206 1.66 thorpej return (NULL);
2207 1.66 thorpej }
2208 1.66 thorpej
2209 1.66 thorpej void
2210 1.66 thorpej pool_allocator_free(struct pool *pp, void *v)
2211 1.66 thorpej {
2212 1.66 thorpej struct pool_allocator *pa = pp->pr_alloc;
2213 1.66 thorpej int s;
2214 1.66 thorpej
2215 1.91 yamt LOCK_ASSERT(!simple_lock_held(&pp->pr_slock));
2216 1.91 yamt
2217 1.66 thorpej (*pa->pa_free)(pp, v);
2218 1.66 thorpej
2219 1.66 thorpej s = splvm();
2220 1.66 thorpej simple_lock(&pa->pa_slock);
2221 1.66 thorpej if ((pa->pa_flags & PA_WANT) == 0) {
2222 1.66 thorpej simple_unlock(&pa->pa_slock);
2223 1.66 thorpej splx(s);
2224 1.66 thorpej return;
2225 1.66 thorpej }
2226 1.66 thorpej
2227 1.66 thorpej TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
2228 1.66 thorpej simple_lock(&pp->pr_slock);
2229 1.66 thorpej if ((pp->pr_flags & PR_WANTED) != 0) {
2230 1.66 thorpej pp->pr_flags &= ~PR_WANTED;
2231 1.66 thorpej wakeup(pp);
2232 1.66 thorpej }
2233 1.69 thorpej simple_unlock(&pp->pr_slock);
2234 1.66 thorpej }
2235 1.66 thorpej pa->pa_flags &= ~PA_WANT;
2236 1.66 thorpej simple_unlock(&pa->pa_slock);
2237 1.66 thorpej splx(s);
2238 1.66 thorpej }
2239 1.66 thorpej
2240 1.66 thorpej void *
2241 1.66 thorpej pool_page_alloc(struct pool *pp, int flags)
2242 1.66 thorpej {
2243 1.66 thorpej boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2244 1.66 thorpej
2245 1.98 yamt return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, NULL, waitok));
2246 1.66 thorpej }
2247 1.66 thorpej
2248 1.66 thorpej void
2249 1.66 thorpej pool_page_free(struct pool *pp, void *v)
2250 1.66 thorpej {
2251 1.66 thorpej
2252 1.98 yamt uvm_km_free_poolpage_cache(kmem_map, (vaddr_t) v);
2253 1.98 yamt }
2254 1.98 yamt
2255 1.98 yamt static void *
2256 1.98 yamt pool_page_alloc_meta(struct pool *pp, int flags)
2257 1.98 yamt {
2258 1.98 yamt boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2259 1.98 yamt
2260 1.98 yamt return ((void *) uvm_km_alloc_poolpage1(kmem_map, NULL, waitok));
2261 1.98 yamt }
2262 1.98 yamt
2263 1.98 yamt static void
2264 1.98 yamt pool_page_free_meta(struct pool *pp, void *v)
2265 1.98 yamt {
2266 1.98 yamt
2267 1.98 yamt uvm_km_free_poolpage1(kmem_map, (vaddr_t) v);
2268 1.66 thorpej }
2269 1.66 thorpej
2270 1.66 thorpej #ifdef POOL_SUBPAGE
2271 1.66 thorpej /* Sub-page allocator, for machines with large hardware pages. */
2272 1.66 thorpej void *
2273 1.66 thorpej pool_subpage_alloc(struct pool *pp, int flags)
2274 1.66 thorpej {
2275 1.93 dbj void *v;
2276 1.93 dbj int s;
2277 1.93 dbj s = splvm();
2278 1.93 dbj v = pool_get(&psppool, flags);
2279 1.93 dbj splx(s);
2280 1.93 dbj return v;
2281 1.66 thorpej }
2282 1.66 thorpej
2283 1.66 thorpej void
2284 1.66 thorpej pool_subpage_free(struct pool *pp, void *v)
2285 1.66 thorpej {
2286 1.93 dbj int s;
2287 1.93 dbj s = splvm();
2288 1.66 thorpej pool_put(&psppool, v);
2289 1.93 dbj splx(s);
2290 1.66 thorpej }
2291 1.66 thorpej
2292 1.66 thorpej /* We don't provide a real nointr allocator. Maybe later. */
2293 1.66 thorpej void *
2294 1.66 thorpej pool_page_alloc_nointr(struct pool *pp, int flags)
2295 1.66 thorpej {
2296 1.66 thorpej
2297 1.66 thorpej return (pool_subpage_alloc(pp, flags));
2298 1.66 thorpej }
2299 1.66 thorpej
2300 1.66 thorpej void
2301 1.66 thorpej pool_page_free_nointr(struct pool *pp, void *v)
2302 1.66 thorpej {
2303 1.66 thorpej
2304 1.66 thorpej pool_subpage_free(pp, v);
2305 1.66 thorpej }
2306 1.66 thorpej #else
2307 1.66 thorpej void *
2308 1.66 thorpej pool_page_alloc_nointr(struct pool *pp, int flags)
2309 1.66 thorpej {
2310 1.66 thorpej boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2311 1.66 thorpej
2312 1.98 yamt return ((void *) uvm_km_alloc_poolpage_cache(kernel_map,
2313 1.66 thorpej uvm.kernel_object, waitok));
2314 1.66 thorpej }
2315 1.66 thorpej
2316 1.66 thorpej void
2317 1.66 thorpej pool_page_free_nointr(struct pool *pp, void *v)
2318 1.66 thorpej {
2319 1.66 thorpej
2320 1.98 yamt uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v);
2321 1.66 thorpej }
2322 1.66 thorpej #endif /* POOL_SUBPAGE */
2323