subr_pool.c revision 1.69 1 /* $NetBSD: subr_pool.c,v 1.69 2002/03/08 21:43:54 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.69 2002/03/08 21:43:54 thorpej Exp $");
42
43 #include "opt_pool.h"
44 #include "opt_poollog.h"
45 #include "opt_lockdebug.h"
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/proc.h>
50 #include <sys/errno.h>
51 #include <sys/kernel.h>
52 #include <sys/malloc.h>
53 #include <sys/lock.h>
54 #include <sys/pool.h>
55 #include <sys/syslog.h>
56
57 #include <uvm/uvm.h>
58
59 /*
60 * Pool resource management utility.
61 *
62 * Memory is allocated in pages which are split into pieces according
63 * to the pool item size. Each page is kept on a list headed by `pr_pagelist'
64 * in the pool structure and the individual pool items are on a linked list
65 * headed by `ph_itemlist' in each page header. The memory for building
66 * the page list is either taken from the allocated pages themselves (for
67 * small pool items) or taken from an internal pool of page headers (`phpool').
68 */
69
70 /* List of all pools */
71 TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
72
73 /* Private pool for page header structures */
74 static struct pool phpool;
75
76 #ifdef POOL_SUBPAGE
77 /* Pool of subpages for use by normal pools. */
78 static struct pool psppool;
79 #endif
80
81 /* # of seconds to retain page after last use */
82 int pool_inactive_time = 10;
83
84 /* Next candidate for drainage (see pool_drain()) */
85 static struct pool *drainpp;
86
87 /* This spin lock protects both pool_head and drainpp. */
88 struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
89
90 struct pool_item_header {
91 /* Page headers */
92 TAILQ_ENTRY(pool_item_header)
93 ph_pagelist; /* pool page list */
94 TAILQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */
95 LIST_ENTRY(pool_item_header)
96 ph_hashlist; /* Off-page page headers */
97 int ph_nmissing; /* # of chunks in use */
98 caddr_t ph_page; /* this page's address */
99 struct timeval ph_time; /* last referenced */
100 };
101 TAILQ_HEAD(pool_pagelist,pool_item_header);
102
103 struct pool_item {
104 #ifdef DIAGNOSTIC
105 int pi_magic;
106 #endif
107 #define PI_MAGIC 0xdeadbeef
108 /* Other entries use only this list entry */
109 TAILQ_ENTRY(pool_item) pi_list;
110 };
111
112 #define PR_HASH_INDEX(pp,addr) \
113 (((u_long)(addr) >> (pp)->pr_alloc->pa_pageshift) & \
114 (PR_HASHTABSIZE - 1))
115
116 #define POOL_NEEDS_CATCHUP(pp) \
117 ((pp)->pr_nitems < (pp)->pr_minitems)
118
119 /*
120 * Pool cache management.
121 *
122 * Pool caches provide a way for constructed objects to be cached by the
123 * pool subsystem. This can lead to performance improvements by avoiding
124 * needless object construction/destruction; it is deferred until absolutely
125 * necessary.
126 *
127 * Caches are grouped into cache groups. Each cache group references
128 * up to 16 constructed objects. When a cache allocates an object
129 * from the pool, it calls the object's constructor and places it into
130 * a cache group. When a cache group frees an object back to the pool,
131 * it first calls the object's destructor. This allows the object to
132 * persist in constructed form while freed to the cache.
133 *
134 * Multiple caches may exist for each pool. This allows a single
135 * object type to have multiple constructed forms. The pool references
136 * each cache, so that when a pool is drained by the pagedaemon, it can
137 * drain each individual cache as well. Each time a cache is drained,
138 * the most idle cache group is freed to the pool in its entirety.
139 *
140 * Pool caches are layed on top of pools. By layering them, we can avoid
141 * the complexity of cache management for pools which would not benefit
142 * from it.
143 */
144
145 /* The cache group pool. */
146 static struct pool pcgpool;
147
148 /* The pool cache group. */
149 #define PCG_NOBJECTS 16
150 struct pool_cache_group {
151 TAILQ_ENTRY(pool_cache_group)
152 pcg_list; /* link in the pool cache's group list */
153 u_int pcg_avail; /* # available objects */
154 /* pointers to the objects */
155 void *pcg_objects[PCG_NOBJECTS];
156 };
157
158 static void pool_cache_reclaim(struct pool_cache *);
159
160 static int pool_catchup(struct pool *);
161 static void pool_prime_page(struct pool *, caddr_t,
162 struct pool_item_header *);
163
164 void *pool_allocator_alloc(struct pool *, int);
165 void pool_allocator_free(struct pool *, void *);
166
167 static void pool_print1(struct pool *, const char *,
168 void (*)(const char *, ...));
169
170 /*
171 * Pool log entry. An array of these is allocated in pool_init().
172 */
173 struct pool_log {
174 const char *pl_file;
175 long pl_line;
176 int pl_action;
177 #define PRLOG_GET 1
178 #define PRLOG_PUT 2
179 void *pl_addr;
180 };
181
182 /* Number of entries in pool log buffers */
183 #ifndef POOL_LOGSIZE
184 #define POOL_LOGSIZE 10
185 #endif
186
187 int pool_logsize = POOL_LOGSIZE;
188
189 #ifdef POOL_DIAGNOSTIC
190 static __inline void
191 pr_log(struct pool *pp, void *v, int action, const char *file, long line)
192 {
193 int n = pp->pr_curlogentry;
194 struct pool_log *pl;
195
196 if ((pp->pr_roflags & PR_LOGGING) == 0)
197 return;
198
199 /*
200 * Fill in the current entry. Wrap around and overwrite
201 * the oldest entry if necessary.
202 */
203 pl = &pp->pr_log[n];
204 pl->pl_file = file;
205 pl->pl_line = line;
206 pl->pl_action = action;
207 pl->pl_addr = v;
208 if (++n >= pp->pr_logsize)
209 n = 0;
210 pp->pr_curlogentry = n;
211 }
212
213 static void
214 pr_printlog(struct pool *pp, struct pool_item *pi,
215 void (*pr)(const char *, ...))
216 {
217 int i = pp->pr_logsize;
218 int n = pp->pr_curlogentry;
219
220 if ((pp->pr_roflags & PR_LOGGING) == 0)
221 return;
222
223 /*
224 * Print all entries in this pool's log.
225 */
226 while (i-- > 0) {
227 struct pool_log *pl = &pp->pr_log[n];
228 if (pl->pl_action != 0) {
229 if (pi == NULL || pi == pl->pl_addr) {
230 (*pr)("\tlog entry %d:\n", i);
231 (*pr)("\t\taction = %s, addr = %p\n",
232 pl->pl_action == PRLOG_GET ? "get" : "put",
233 pl->pl_addr);
234 (*pr)("\t\tfile: %s at line %lu\n",
235 pl->pl_file, pl->pl_line);
236 }
237 }
238 if (++n >= pp->pr_logsize)
239 n = 0;
240 }
241 }
242
243 static __inline void
244 pr_enter(struct pool *pp, const char *file, long line)
245 {
246
247 if (__predict_false(pp->pr_entered_file != NULL)) {
248 printf("pool %s: reentrancy at file %s line %ld\n",
249 pp->pr_wchan, file, line);
250 printf(" previous entry at file %s line %ld\n",
251 pp->pr_entered_file, pp->pr_entered_line);
252 panic("pr_enter");
253 }
254
255 pp->pr_entered_file = file;
256 pp->pr_entered_line = line;
257 }
258
259 static __inline void
260 pr_leave(struct pool *pp)
261 {
262
263 if (__predict_false(pp->pr_entered_file == NULL)) {
264 printf("pool %s not entered?\n", pp->pr_wchan);
265 panic("pr_leave");
266 }
267
268 pp->pr_entered_file = NULL;
269 pp->pr_entered_line = 0;
270 }
271
272 static __inline void
273 pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
274 {
275
276 if (pp->pr_entered_file != NULL)
277 (*pr)("\n\tcurrently entered from file %s line %ld\n",
278 pp->pr_entered_file, pp->pr_entered_line);
279 }
280 #else
281 #define pr_log(pp, v, action, file, line)
282 #define pr_printlog(pp, pi, pr)
283 #define pr_enter(pp, file, line)
284 #define pr_leave(pp)
285 #define pr_enter_check(pp, pr)
286 #endif /* POOL_DIAGNOSTIC */
287
288 /*
289 * Return the pool page header based on page address.
290 */
291 static __inline struct pool_item_header *
292 pr_find_pagehead(struct pool *pp, caddr_t page)
293 {
294 struct pool_item_header *ph;
295
296 if ((pp->pr_roflags & PR_PHINPAGE) != 0)
297 return ((struct pool_item_header *)(page + pp->pr_phoffset));
298
299 for (ph = LIST_FIRST(&pp->pr_hashtab[PR_HASH_INDEX(pp, page)]);
300 ph != NULL;
301 ph = LIST_NEXT(ph, ph_hashlist)) {
302 if (ph->ph_page == page)
303 return (ph);
304 }
305 return (NULL);
306 }
307
308 /*
309 * Remove a page from the pool.
310 */
311 static __inline void
312 pr_rmpage(struct pool *pp, struct pool_item_header *ph,
313 struct pool_pagelist *pq)
314 {
315 int s;
316
317 /*
318 * If the page was idle, decrement the idle page count.
319 */
320 if (ph->ph_nmissing == 0) {
321 #ifdef DIAGNOSTIC
322 if (pp->pr_nidle == 0)
323 panic("pr_rmpage: nidle inconsistent");
324 if (pp->pr_nitems < pp->pr_itemsperpage)
325 panic("pr_rmpage: nitems inconsistent");
326 #endif
327 pp->pr_nidle--;
328 }
329
330 pp->pr_nitems -= pp->pr_itemsperpage;
331
332 /*
333 * Unlink a page from the pool and release it (or queue it for release).
334 */
335 TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
336 if (pq) {
337 TAILQ_INSERT_HEAD(pq, ph, ph_pagelist);
338 } else {
339 pool_allocator_free(pp, ph->ph_page);
340 if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
341 LIST_REMOVE(ph, ph_hashlist);
342 s = splhigh();
343 pool_put(&phpool, ph);
344 splx(s);
345 }
346 }
347 pp->pr_npages--;
348 pp->pr_npagefree++;
349
350 if (pp->pr_curpage == ph) {
351 /*
352 * Find a new non-empty page header, if any.
353 * Start search from the page head, to increase the
354 * chance for "high water" pages to be freed.
355 */
356 TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
357 if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
358 break;
359
360 pp->pr_curpage = ph;
361 }
362 }
363
364 /*
365 * Initialize the given pool resource structure.
366 *
367 * We export this routine to allow other kernel parts to declare
368 * static pools that must be initialized before malloc() is available.
369 */
370 void
371 pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
372 const char *wchan, struct pool_allocator *palloc)
373 {
374 int off, slack, i;
375
376 #ifdef POOL_DIAGNOSTIC
377 /*
378 * Always log if POOL_DIAGNOSTIC is defined.
379 */
380 if (pool_logsize != 0)
381 flags |= PR_LOGGING;
382 #endif
383
384 #ifdef POOL_SUBPAGE
385 /*
386 * XXX We don't provide a real `nointr' back-end
387 * yet; all sub-pages come from a kmem back-end.
388 * maybe some day...
389 */
390 if (palloc == NULL) {
391 extern struct pool_allocator pool_allocator_kmem_subpage;
392 palloc = &pool_allocator_kmem_subpage;
393 }
394 /*
395 * We'll assume any user-specified back-end allocator
396 * will deal with sub-pages, or simply don't care.
397 */
398 #else
399 if (palloc == NULL)
400 palloc = &pool_allocator_kmem;
401 #endif /* POOL_SUBPAGE */
402 if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
403 if (palloc->pa_pagesz == 0) {
404 #ifdef POOL_SUBPAGE
405 if (palloc == &pool_allocator_kmem)
406 palloc->pa_pagesz = PAGE_SIZE;
407 else
408 palloc->pa_pagesz = POOL_SUBPAGE;
409 #else
410 palloc->pa_pagesz = PAGE_SIZE;
411 #endif /* POOL_SUBPAGE */
412 }
413
414 TAILQ_INIT(&palloc->pa_list);
415
416 simple_lock_init(&palloc->pa_slock);
417 palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
418 palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
419 palloc->pa_flags |= PA_INITIALIZED;
420 }
421
422 if (align == 0)
423 align = ALIGN(1);
424
425 if (size < sizeof(struct pool_item))
426 size = sizeof(struct pool_item);
427
428 size = ALIGN(size);
429 #ifdef DIAGNOSTIC
430 if (size > palloc->pa_pagesz)
431 panic("pool_init: pool item size (%lu) too large",
432 (u_long)size);
433 #endif
434
435 /*
436 * Initialize the pool structure.
437 */
438 TAILQ_INIT(&pp->pr_pagelist);
439 TAILQ_INIT(&pp->pr_cachelist);
440 pp->pr_curpage = NULL;
441 pp->pr_npages = 0;
442 pp->pr_minitems = 0;
443 pp->pr_minpages = 0;
444 pp->pr_maxpages = UINT_MAX;
445 pp->pr_roflags = flags;
446 pp->pr_flags = 0;
447 pp->pr_size = size;
448 pp->pr_align = align;
449 pp->pr_wchan = wchan;
450 pp->pr_alloc = palloc;
451 pp->pr_nitems = 0;
452 pp->pr_nout = 0;
453 pp->pr_hardlimit = UINT_MAX;
454 pp->pr_hardlimit_warning = NULL;
455 pp->pr_hardlimit_ratecap.tv_sec = 0;
456 pp->pr_hardlimit_ratecap.tv_usec = 0;
457 pp->pr_hardlimit_warning_last.tv_sec = 0;
458 pp->pr_hardlimit_warning_last.tv_usec = 0;
459 pp->pr_drain_hook = NULL;
460 pp->pr_drain_hook_arg = NULL;
461
462 /*
463 * Decide whether to put the page header off page to avoid
464 * wasting too large a part of the page. Off-page page headers
465 * go on a hash table, so we can match a returned item
466 * with its header based on the page address.
467 * We use 1/16 of the page size as the threshold (XXX: tune)
468 */
469 if (pp->pr_size < palloc->pa_pagesz/16) {
470 /* Use the end of the page for the page header */
471 pp->pr_roflags |= PR_PHINPAGE;
472 pp->pr_phoffset = off = palloc->pa_pagesz -
473 ALIGN(sizeof(struct pool_item_header));
474 } else {
475 /* The page header will be taken from our page header pool */
476 pp->pr_phoffset = 0;
477 off = palloc->pa_pagesz;
478 for (i = 0; i < PR_HASHTABSIZE; i++) {
479 LIST_INIT(&pp->pr_hashtab[i]);
480 }
481 }
482
483 /*
484 * Alignment is to take place at `ioff' within the item. This means
485 * we must reserve up to `align - 1' bytes on the page to allow
486 * appropriate positioning of each item.
487 *
488 * Silently enforce `0 <= ioff < align'.
489 */
490 pp->pr_itemoffset = ioff = ioff % align;
491 pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
492 KASSERT(pp->pr_itemsperpage != 0);
493
494 /*
495 * Use the slack between the chunks and the page header
496 * for "cache coloring".
497 */
498 slack = off - pp->pr_itemsperpage * pp->pr_size;
499 pp->pr_maxcolor = (slack / align) * align;
500 pp->pr_curcolor = 0;
501
502 pp->pr_nget = 0;
503 pp->pr_nfail = 0;
504 pp->pr_nput = 0;
505 pp->pr_npagealloc = 0;
506 pp->pr_npagefree = 0;
507 pp->pr_hiwat = 0;
508 pp->pr_nidle = 0;
509
510 #ifdef POOL_DIAGNOSTIC
511 if (flags & PR_LOGGING) {
512 if (kmem_map == NULL ||
513 (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
514 M_TEMP, M_NOWAIT)) == NULL)
515 pp->pr_roflags &= ~PR_LOGGING;
516 pp->pr_curlogentry = 0;
517 pp->pr_logsize = pool_logsize;
518 }
519 #endif
520
521 pp->pr_entered_file = NULL;
522 pp->pr_entered_line = 0;
523
524 simple_lock_init(&pp->pr_slock);
525
526 /*
527 * Initialize private page header pool and cache magazine pool if we
528 * haven't done so yet.
529 * XXX LOCKING.
530 */
531 if (phpool.pr_size == 0) {
532 #ifdef POOL_SUBPAGE
533 pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, 0,
534 "phpool", &pool_allocator_kmem);
535 pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
536 PR_RECURSIVE, "psppool", &pool_allocator_kmem);
537 #else
538 pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,
539 0, "phpool", NULL);
540 #endif
541 pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
542 0, "pcgpool", NULL);
543 }
544
545 /* Insert into the list of all pools. */
546 simple_lock(&pool_head_slock);
547 TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
548 simple_unlock(&pool_head_slock);
549
550 /* Insert this into the list of pools using this allocator. */
551 simple_lock(&palloc->pa_slock);
552 TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
553 simple_unlock(&palloc->pa_slock);
554 }
555
556 /*
557 * De-commision a pool resource.
558 */
559 void
560 pool_destroy(struct pool *pp)
561 {
562 struct pool_item_header *ph;
563 struct pool_cache *pc;
564
565 /* Locking order: pool_allocator -> pool */
566 simple_lock(&pp->pr_alloc->pa_slock);
567 TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
568 simple_unlock(&pp->pr_alloc->pa_slock);
569
570 /* Destroy all caches for this pool. */
571 while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL)
572 pool_cache_destroy(pc);
573
574 #ifdef DIAGNOSTIC
575 if (pp->pr_nout != 0) {
576 pr_printlog(pp, NULL, printf);
577 panic("pool_destroy: pool busy: still out: %u\n",
578 pp->pr_nout);
579 }
580 #endif
581
582 /* Remove all pages */
583 if ((pp->pr_roflags & PR_STATIC) == 0)
584 while ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL)
585 pr_rmpage(pp, ph, NULL);
586
587 /* Remove from global pool list */
588 simple_lock(&pool_head_slock);
589 TAILQ_REMOVE(&pool_head, pp, pr_poollist);
590 if (drainpp == pp) {
591 drainpp = NULL;
592 }
593 simple_unlock(&pool_head_slock);
594
595 #ifdef POOL_DIAGNOSTIC
596 if ((pp->pr_roflags & PR_LOGGING) != 0)
597 free(pp->pr_log, M_TEMP);
598 #endif
599 }
600
601 void
602 pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
603 {
604
605 /* XXX no locking -- must be used just after pool_init() */
606 #ifdef DIAGNOSTIC
607 if (pp->pr_drain_hook != NULL)
608 panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
609 #endif
610 pp->pr_drain_hook = fn;
611 pp->pr_drain_hook_arg = arg;
612 }
613
614 static __inline struct pool_item_header *
615 pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags)
616 {
617 struct pool_item_header *ph;
618 int s;
619
620 LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0);
621
622 if ((pp->pr_roflags & PR_PHINPAGE) != 0)
623 ph = (struct pool_item_header *) (storage + pp->pr_phoffset);
624 else {
625 s = splhigh();
626 ph = pool_get(&phpool, flags);
627 splx(s);
628 }
629
630 return (ph);
631 }
632
633 /*
634 * Grab an item from the pool; must be called at appropriate spl level
635 */
636 void *
637 #ifdef POOL_DIAGNOSTIC
638 _pool_get(struct pool *pp, int flags, const char *file, long line)
639 #else
640 pool_get(struct pool *pp, int flags)
641 #endif
642 {
643 struct pool_item *pi;
644 struct pool_item_header *ph;
645 void *v;
646
647 #ifdef DIAGNOSTIC
648 if (__predict_false((pp->pr_roflags & PR_STATIC) &&
649 (flags & PR_MALLOCOK))) {
650 pr_printlog(pp, NULL, printf);
651 panic("pool_get: static");
652 }
653
654 if (__predict_false(curproc == NULL && doing_shutdown == 0 &&
655 (flags & PR_WAITOK) != 0))
656 panic("pool_get: must have NOWAIT");
657
658 #ifdef LOCKDEBUG
659 if (flags & PR_WAITOK)
660 simple_lock_only_held(NULL, "pool_get(PR_WAITOK)");
661 #endif
662 #endif /* DIAGNOSTIC */
663
664 simple_lock(&pp->pr_slock);
665 pr_enter(pp, file, line);
666
667 startover:
668 /*
669 * Check to see if we've reached the hard limit. If we have,
670 * and we can wait, then wait until an item has been returned to
671 * the pool.
672 */
673 #ifdef DIAGNOSTIC
674 if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
675 pr_leave(pp);
676 simple_unlock(&pp->pr_slock);
677 panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
678 }
679 #endif
680 if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
681 if (pp->pr_drain_hook != NULL) {
682 /*
683 * Since the drain hook is going to free things
684 * back to the pool, unlock, call the hook, re-lock,
685 * and check the hardlimit condition again.
686 */
687 pr_leave(pp);
688 simple_unlock(&pp->pr_slock);
689 (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
690 simple_lock(&pp->pr_slock);
691 pr_enter(pp, file, line);
692 if (pp->pr_nout < pp->pr_hardlimit)
693 goto startover;
694 }
695
696 if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
697 /*
698 * XXX: A warning isn't logged in this case. Should
699 * it be?
700 */
701 pp->pr_flags |= PR_WANTED;
702 pr_leave(pp);
703 ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
704 pr_enter(pp, file, line);
705 goto startover;
706 }
707
708 /*
709 * Log a message that the hard limit has been hit.
710 */
711 if (pp->pr_hardlimit_warning != NULL &&
712 ratecheck(&pp->pr_hardlimit_warning_last,
713 &pp->pr_hardlimit_ratecap))
714 log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
715
716 pp->pr_nfail++;
717
718 pr_leave(pp);
719 simple_unlock(&pp->pr_slock);
720 return (NULL);
721 }
722
723 /*
724 * The convention we use is that if `curpage' is not NULL, then
725 * it points at a non-empty bucket. In particular, `curpage'
726 * never points at a page header which has PR_PHINPAGE set and
727 * has no items in its bucket.
728 */
729 if ((ph = pp->pr_curpage) == NULL) {
730 #ifdef DIAGNOSTIC
731 if (pp->pr_nitems != 0) {
732 simple_unlock(&pp->pr_slock);
733 printf("pool_get: %s: curpage NULL, nitems %u\n",
734 pp->pr_wchan, pp->pr_nitems);
735 panic("pool_get: nitems inconsistent\n");
736 }
737 #endif
738
739 /*
740 * Call the back-end page allocator for more memory.
741 * Release the pool lock, as the back-end page allocator
742 * may block.
743 */
744 pr_leave(pp);
745 simple_unlock(&pp->pr_slock);
746 v = pool_allocator_alloc(pp, flags);
747 if (__predict_true(v != NULL))
748 ph = pool_alloc_item_header(pp, v, flags);
749 simple_lock(&pp->pr_slock);
750 pr_enter(pp, file, line);
751
752 if (__predict_false(v == NULL || ph == NULL)) {
753 if (v != NULL)
754 pool_allocator_free(pp, v);
755
756 /*
757 * We were unable to allocate a page or item
758 * header, but we released the lock during
759 * allocation, so perhaps items were freed
760 * back to the pool. Check for this case.
761 */
762 if (pp->pr_curpage != NULL)
763 goto startover;
764
765 if ((flags & PR_WAITOK) == 0) {
766 pp->pr_nfail++;
767 pr_leave(pp);
768 simple_unlock(&pp->pr_slock);
769 return (NULL);
770 }
771
772 /*
773 * Wait for items to be returned to this pool.
774 *
775 * XXX: maybe we should wake up once a second and
776 * try again?
777 */
778 pp->pr_flags |= PR_WANTED;
779 /* PA_WANTED is already set on the allocator. */
780 pr_leave(pp);
781 ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
782 pr_enter(pp, file, line);
783 goto startover;
784 }
785
786 /* We have more memory; add it to the pool */
787 pool_prime_page(pp, v, ph);
788 pp->pr_npagealloc++;
789
790 /* Start the allocation process over. */
791 goto startover;
792 }
793
794 if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) {
795 pr_leave(pp);
796 simple_unlock(&pp->pr_slock);
797 panic("pool_get: %s: page empty", pp->pr_wchan);
798 }
799 #ifdef DIAGNOSTIC
800 if (__predict_false(pp->pr_nitems == 0)) {
801 pr_leave(pp);
802 simple_unlock(&pp->pr_slock);
803 printf("pool_get: %s: items on itemlist, nitems %u\n",
804 pp->pr_wchan, pp->pr_nitems);
805 panic("pool_get: nitems inconsistent\n");
806 }
807 #endif
808
809 #ifdef POOL_DIAGNOSTIC
810 pr_log(pp, v, PRLOG_GET, file, line);
811 #endif
812
813 #ifdef DIAGNOSTIC
814 if (__predict_false(pi->pi_magic != PI_MAGIC)) {
815 pr_printlog(pp, pi, printf);
816 panic("pool_get(%s): free list modified: magic=%x; page %p;"
817 " item addr %p\n",
818 pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
819 }
820 #endif
821
822 /*
823 * Remove from item list.
824 */
825 TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
826 pp->pr_nitems--;
827 pp->pr_nout++;
828 if (ph->ph_nmissing == 0) {
829 #ifdef DIAGNOSTIC
830 if (__predict_false(pp->pr_nidle == 0))
831 panic("pool_get: nidle inconsistent");
832 #endif
833 pp->pr_nidle--;
834 }
835 ph->ph_nmissing++;
836 if (TAILQ_FIRST(&ph->ph_itemlist) == NULL) {
837 #ifdef DIAGNOSTIC
838 if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) {
839 pr_leave(pp);
840 simple_unlock(&pp->pr_slock);
841 panic("pool_get: %s: nmissing inconsistent",
842 pp->pr_wchan);
843 }
844 #endif
845 /*
846 * Find a new non-empty page header, if any.
847 * Start search from the page head, to increase
848 * the chance for "high water" pages to be freed.
849 *
850 * Migrate empty pages to the end of the list. This
851 * will speed the update of curpage as pages become
852 * idle. Empty pages intermingled with idle pages
853 * is no big deal. As soon as a page becomes un-empty,
854 * it will move back to the head of the list.
855 */
856 TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
857 TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
858 TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
859 if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
860 break;
861
862 pp->pr_curpage = ph;
863 }
864
865 pp->pr_nget++;
866
867 /*
868 * If we have a low water mark and we are now below that low
869 * water mark, add more items to the pool.
870 */
871 if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
872 /*
873 * XXX: Should we log a warning? Should we set up a timeout
874 * to try again in a second or so? The latter could break
875 * a caller's assumptions about interrupt protection, etc.
876 */
877 }
878
879 pr_leave(pp);
880 simple_unlock(&pp->pr_slock);
881 return (v);
882 }
883
884 /*
885 * Internal version of pool_put(). Pool is already locked/entered.
886 */
887 static void
888 pool_do_put(struct pool *pp, void *v)
889 {
890 struct pool_item *pi = v;
891 struct pool_item_header *ph;
892 caddr_t page;
893 int s;
894
895 LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
896
897 page = (caddr_t)((u_long)v & pp->pr_alloc->pa_pagemask);
898
899 #ifdef DIAGNOSTIC
900 if (__predict_false(pp->pr_nout == 0)) {
901 printf("pool %s: putting with none out\n",
902 pp->pr_wchan);
903 panic("pool_put");
904 }
905 #endif
906
907 if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {
908 pr_printlog(pp, NULL, printf);
909 panic("pool_put: %s: page header missing", pp->pr_wchan);
910 }
911
912 #ifdef LOCKDEBUG
913 /*
914 * Check if we're freeing a locked simple lock.
915 */
916 simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
917 #endif
918
919 /*
920 * Return to item list.
921 */
922 #ifdef DIAGNOSTIC
923 pi->pi_magic = PI_MAGIC;
924 #endif
925 #ifdef DEBUG
926 {
927 int i, *ip = v;
928
929 for (i = 0; i < pp->pr_size / sizeof(int); i++) {
930 *ip++ = PI_MAGIC;
931 }
932 }
933 #endif
934
935 TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
936 ph->ph_nmissing--;
937 pp->pr_nput++;
938 pp->pr_nitems++;
939 pp->pr_nout--;
940
941 /* Cancel "pool empty" condition if it exists */
942 if (pp->pr_curpage == NULL)
943 pp->pr_curpage = ph;
944
945 if (pp->pr_flags & PR_WANTED) {
946 pp->pr_flags &= ~PR_WANTED;
947 if (ph->ph_nmissing == 0)
948 pp->pr_nidle++;
949 wakeup((caddr_t)pp);
950 return;
951 }
952
953 /*
954 * If this page is now complete, do one of two things:
955 *
956 * (1) If we have more pages than the page high water
957 * mark, free the page back to the system.
958 *
959 * (2) Move it to the end of the page list, so that
960 * we minimize our chances of fragmenting the
961 * pool. Idle pages migrate to the end (along with
962 * completely empty pages, so that we find un-empty
963 * pages more quickly when we update curpage) of the
964 * list so they can be more easily swept up by
965 * the pagedaemon when pages are scarce.
966 */
967 if (ph->ph_nmissing == 0) {
968 pp->pr_nidle++;
969 if (pp->pr_npages > pp->pr_maxpages) {
970 pr_rmpage(pp, ph, NULL);
971 } else {
972 TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
973 TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
974
975 /*
976 * Update the timestamp on the page. A page must
977 * be idle for some period of time before it can
978 * be reclaimed by the pagedaemon. This minimizes
979 * ping-pong'ing for memory.
980 */
981 s = splclock();
982 ph->ph_time = mono_time;
983 splx(s);
984
985 /*
986 * Update the current page pointer. Just look for
987 * the first page with any free items.
988 *
989 * XXX: Maybe we want an option to look for the
990 * page with the fewest available items, to minimize
991 * fragmentation?
992 */
993 TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist)
994 if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
995 break;
996
997 pp->pr_curpage = ph;
998 }
999 }
1000 /*
1001 * If the page has just become un-empty, move it to the head of
1002 * the list, and make it the current page. The next allocation
1003 * will get the item from this page, instead of further fragmenting
1004 * the pool.
1005 */
1006 else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1007 TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
1008 TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
1009 pp->pr_curpage = ph;
1010 }
1011 }
1012
1013 /*
1014 * Return resource to the pool; must be called at appropriate spl level
1015 */
1016 #ifdef POOL_DIAGNOSTIC
1017 void
1018 _pool_put(struct pool *pp, void *v, const char *file, long line)
1019 {
1020
1021 simple_lock(&pp->pr_slock);
1022 pr_enter(pp, file, line);
1023
1024 pr_log(pp, v, PRLOG_PUT, file, line);
1025
1026 pool_do_put(pp, v);
1027
1028 pr_leave(pp);
1029 simple_unlock(&pp->pr_slock);
1030 }
1031 #undef pool_put
1032 #endif /* POOL_DIAGNOSTIC */
1033
1034 void
1035 pool_put(struct pool *pp, void *v)
1036 {
1037
1038 simple_lock(&pp->pr_slock);
1039
1040 pool_do_put(pp, v);
1041
1042 simple_unlock(&pp->pr_slock);
1043 }
1044
1045 #ifdef POOL_DIAGNOSTIC
1046 #define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__)
1047 #endif
1048
1049 /*
1050 * Add N items to the pool.
1051 */
1052 int
1053 pool_prime(struct pool *pp, int n)
1054 {
1055 struct pool_item_header *ph;
1056 caddr_t cp;
1057 int newpages, error = 0;
1058
1059 simple_lock(&pp->pr_slock);
1060
1061 newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1062
1063 while (newpages-- > 0) {
1064 simple_unlock(&pp->pr_slock);
1065 cp = pool_allocator_alloc(pp, PR_NOWAIT);
1066 if (__predict_true(cp != NULL))
1067 ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1068 simple_lock(&pp->pr_slock);
1069
1070 if (__predict_false(cp == NULL || ph == NULL)) {
1071 error = ENOMEM;
1072 if (cp != NULL)
1073 pool_allocator_free(pp, cp);
1074 break;
1075 }
1076
1077 pool_prime_page(pp, cp, ph);
1078 pp->pr_npagealloc++;
1079 pp->pr_minpages++;
1080 }
1081
1082 if (pp->pr_minpages >= pp->pr_maxpages)
1083 pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1084
1085 simple_unlock(&pp->pr_slock);
1086 return (0);
1087 }
1088
1089 /*
1090 * Add a page worth of items to the pool.
1091 *
1092 * Note, we must be called with the pool descriptor LOCKED.
1093 */
1094 static void
1095 pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
1096 {
1097 struct pool_item *pi;
1098 caddr_t cp = storage;
1099 unsigned int align = pp->pr_align;
1100 unsigned int ioff = pp->pr_itemoffset;
1101 int n;
1102
1103 #ifdef DIAGNOSTIC
1104 if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
1105 panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1106 #endif
1107
1108 if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1109 LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)],
1110 ph, ph_hashlist);
1111
1112 /*
1113 * Insert page header.
1114 */
1115 TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
1116 TAILQ_INIT(&ph->ph_itemlist);
1117 ph->ph_page = storage;
1118 ph->ph_nmissing = 0;
1119 memset(&ph->ph_time, 0, sizeof(ph->ph_time));
1120
1121 pp->pr_nidle++;
1122
1123 /*
1124 * Color this page.
1125 */
1126 cp = (caddr_t)(cp + pp->pr_curcolor);
1127 if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1128 pp->pr_curcolor = 0;
1129
1130 /*
1131 * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1132 */
1133 if (ioff != 0)
1134 cp = (caddr_t)(cp + (align - ioff));
1135
1136 /*
1137 * Insert remaining chunks on the bucket list.
1138 */
1139 n = pp->pr_itemsperpage;
1140 pp->pr_nitems += n;
1141
1142 while (n--) {
1143 pi = (struct pool_item *)cp;
1144
1145 /* Insert on page list */
1146 TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
1147 #ifdef DIAGNOSTIC
1148 pi->pi_magic = PI_MAGIC;
1149 #endif
1150 cp = (caddr_t)(cp + pp->pr_size);
1151 }
1152
1153 /*
1154 * If the pool was depleted, point at the new page.
1155 */
1156 if (pp->pr_curpage == NULL)
1157 pp->pr_curpage = ph;
1158
1159 if (++pp->pr_npages > pp->pr_hiwat)
1160 pp->pr_hiwat = pp->pr_npages;
1161 }
1162
1163 /*
1164 * Used by pool_get() when nitems drops below the low water mark. This
1165 * is used to catch up nitmes with the low water mark.
1166 *
1167 * Note 1, we never wait for memory here, we let the caller decide what to do.
1168 *
1169 * Note 2, this doesn't work with static pools.
1170 *
1171 * Note 3, we must be called with the pool already locked, and we return
1172 * with it locked.
1173 */
1174 static int
1175 pool_catchup(struct pool *pp)
1176 {
1177 struct pool_item_header *ph;
1178 caddr_t cp;
1179 int error = 0;
1180
1181 if (pp->pr_roflags & PR_STATIC) {
1182 /*
1183 * We dropped below the low water mark, and this is not a
1184 * good thing. Log a warning.
1185 *
1186 * XXX: rate-limit this?
1187 */
1188 printf("WARNING: static pool `%s' dropped below low water "
1189 "mark\n", pp->pr_wchan);
1190 return (0);
1191 }
1192
1193 while (POOL_NEEDS_CATCHUP(pp)) {
1194 /*
1195 * Call the page back-end allocator for more memory.
1196 *
1197 * XXX: We never wait, so should we bother unlocking
1198 * the pool descriptor?
1199 */
1200 simple_unlock(&pp->pr_slock);
1201 cp = pool_allocator_alloc(pp, PR_NOWAIT);
1202 if (__predict_true(cp != NULL))
1203 ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1204 simple_lock(&pp->pr_slock);
1205 if (__predict_false(cp == NULL || ph == NULL)) {
1206 if (cp != NULL)
1207 pool_allocator_free(pp, cp);
1208 error = ENOMEM;
1209 break;
1210 }
1211 pool_prime_page(pp, cp, ph);
1212 pp->pr_npagealloc++;
1213 }
1214
1215 return (error);
1216 }
1217
1218 void
1219 pool_setlowat(struct pool *pp, int n)
1220 {
1221 int error;
1222
1223 simple_lock(&pp->pr_slock);
1224
1225 pp->pr_minitems = n;
1226 pp->pr_minpages = (n == 0)
1227 ? 0
1228 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1229
1230 /* Make sure we're caught up with the newly-set low water mark. */
1231 if (POOL_NEEDS_CATCHUP(pp) && (error = pool_catchup(pp) != 0)) {
1232 /*
1233 * XXX: Should we log a warning? Should we set up a timeout
1234 * to try again in a second or so? The latter could break
1235 * a caller's assumptions about interrupt protection, etc.
1236 */
1237 }
1238
1239 simple_unlock(&pp->pr_slock);
1240 }
1241
1242 void
1243 pool_sethiwat(struct pool *pp, int n)
1244 {
1245
1246 simple_lock(&pp->pr_slock);
1247
1248 pp->pr_maxpages = (n == 0)
1249 ? 0
1250 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1251
1252 simple_unlock(&pp->pr_slock);
1253 }
1254
1255 void
1256 pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1257 {
1258
1259 simple_lock(&pp->pr_slock);
1260
1261 pp->pr_hardlimit = n;
1262 pp->pr_hardlimit_warning = warnmess;
1263 pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1264 pp->pr_hardlimit_warning_last.tv_sec = 0;
1265 pp->pr_hardlimit_warning_last.tv_usec = 0;
1266
1267 /*
1268 * In-line version of pool_sethiwat(), because we don't want to
1269 * release the lock.
1270 */
1271 pp->pr_maxpages = (n == 0)
1272 ? 0
1273 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1274
1275 simple_unlock(&pp->pr_slock);
1276 }
1277
1278 /*
1279 * Release all complete pages that have not been used recently.
1280 */
1281 int
1282 #ifdef POOL_DIAGNOSTIC
1283 _pool_reclaim(struct pool *pp, const char *file, long line)
1284 #else
1285 pool_reclaim(struct pool *pp)
1286 #endif
1287 {
1288 struct pool_item_header *ph, *phnext;
1289 struct pool_cache *pc;
1290 struct timeval curtime;
1291 struct pool_pagelist pq;
1292 int s;
1293
1294 if (pp->pr_roflags & PR_STATIC)
1295 return (0);
1296
1297 if (pp->pr_drain_hook != NULL) {
1298 /*
1299 * The drain hook must be called with the pool unlocked.
1300 */
1301 (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1302 }
1303
1304 if (simple_lock_try(&pp->pr_slock) == 0)
1305 return (0);
1306 pr_enter(pp, file, line);
1307
1308 TAILQ_INIT(&pq);
1309
1310 /*
1311 * Reclaim items from the pool's caches.
1312 */
1313 TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist)
1314 pool_cache_reclaim(pc);
1315
1316 s = splclock();
1317 curtime = mono_time;
1318 splx(s);
1319
1320 for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; ph = phnext) {
1321 phnext = TAILQ_NEXT(ph, ph_pagelist);
1322
1323 /* Check our minimum page claim */
1324 if (pp->pr_npages <= pp->pr_minpages)
1325 break;
1326
1327 if (ph->ph_nmissing == 0) {
1328 struct timeval diff;
1329 timersub(&curtime, &ph->ph_time, &diff);
1330 if (diff.tv_sec < pool_inactive_time)
1331 continue;
1332
1333 /*
1334 * If freeing this page would put us below
1335 * the low water mark, stop now.
1336 */
1337 if ((pp->pr_nitems - pp->pr_itemsperpage) <
1338 pp->pr_minitems)
1339 break;
1340
1341 pr_rmpage(pp, ph, &pq);
1342 }
1343 }
1344
1345 pr_leave(pp);
1346 simple_unlock(&pp->pr_slock);
1347 if (TAILQ_EMPTY(&pq))
1348 return (0);
1349
1350 while ((ph = TAILQ_FIRST(&pq)) != NULL) {
1351 TAILQ_REMOVE(&pq, ph, ph_pagelist);
1352 pool_allocator_free(pp, ph->ph_page);
1353 if (pp->pr_roflags & PR_PHINPAGE) {
1354 continue;
1355 }
1356 LIST_REMOVE(ph, ph_hashlist);
1357 s = splhigh();
1358 pool_put(&phpool, ph);
1359 splx(s);
1360 }
1361
1362 return (1);
1363 }
1364
1365 /*
1366 * Drain pools, one at a time.
1367 *
1368 * Note, we must never be called from an interrupt context.
1369 */
1370 void
1371 pool_drain(void *arg)
1372 {
1373 struct pool *pp;
1374 int s;
1375
1376 pp = NULL;
1377 s = splvm();
1378 simple_lock(&pool_head_slock);
1379 if (drainpp == NULL) {
1380 drainpp = TAILQ_FIRST(&pool_head);
1381 }
1382 if (drainpp) {
1383 pp = drainpp;
1384 drainpp = TAILQ_NEXT(pp, pr_poollist);
1385 }
1386 simple_unlock(&pool_head_slock);
1387 pool_reclaim(pp);
1388 splx(s);
1389 }
1390
1391 /*
1392 * Diagnostic helpers.
1393 */
1394 void
1395 pool_print(struct pool *pp, const char *modif)
1396 {
1397 int s;
1398
1399 s = splvm();
1400 if (simple_lock_try(&pp->pr_slock) == 0) {
1401 printf("pool %s is locked; try again later\n",
1402 pp->pr_wchan);
1403 splx(s);
1404 return;
1405 }
1406 pool_print1(pp, modif, printf);
1407 simple_unlock(&pp->pr_slock);
1408 splx(s);
1409 }
1410
1411 void
1412 pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1413 {
1414 int didlock = 0;
1415
1416 if (pp == NULL) {
1417 (*pr)("Must specify a pool to print.\n");
1418 return;
1419 }
1420
1421 /*
1422 * Called from DDB; interrupts should be blocked, and all
1423 * other processors should be paused. We can skip locking
1424 * the pool in this case.
1425 *
1426 * We do a simple_lock_try() just to print the lock
1427 * status, however.
1428 */
1429
1430 if (simple_lock_try(&pp->pr_slock) == 0)
1431 (*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
1432 else
1433 didlock = 1;
1434
1435 pool_print1(pp, modif, pr);
1436
1437 if (didlock)
1438 simple_unlock(&pp->pr_slock);
1439 }
1440
1441 static void
1442 pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1443 {
1444 struct pool_item_header *ph;
1445 struct pool_cache *pc;
1446 struct pool_cache_group *pcg;
1447 #ifdef DIAGNOSTIC
1448 struct pool_item *pi;
1449 #endif
1450 int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1451 char c;
1452
1453 while ((c = *modif++) != '\0') {
1454 if (c == 'l')
1455 print_log = 1;
1456 if (c == 'p')
1457 print_pagelist = 1;
1458 if (c == 'c')
1459 print_cache = 1;
1460 modif++;
1461 }
1462
1463 (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1464 pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1465 pp->pr_roflags);
1466 (*pr)("\talloc %p\n", pp->pr_alloc);
1467 (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1468 pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1469 (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1470 pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1471
1472 (*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
1473 pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1474 (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1475 pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1476
1477 if (print_pagelist == 0)
1478 goto skip_pagelist;
1479
1480 if ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL)
1481 (*pr)("\n\tpage list:\n");
1482 for (; ph != NULL; ph = TAILQ_NEXT(ph, ph_pagelist)) {
1483 (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
1484 ph->ph_page, ph->ph_nmissing,
1485 (u_long)ph->ph_time.tv_sec,
1486 (u_long)ph->ph_time.tv_usec);
1487 #ifdef DIAGNOSTIC
1488 TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1489 if (pi->pi_magic != PI_MAGIC) {
1490 (*pr)("\t\t\titem %p, magic 0x%x\n",
1491 pi, pi->pi_magic);
1492 }
1493 }
1494 #endif
1495 }
1496 if (pp->pr_curpage == NULL)
1497 (*pr)("\tno current page\n");
1498 else
1499 (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1500
1501 skip_pagelist:
1502
1503 if (print_log == 0)
1504 goto skip_log;
1505
1506 (*pr)("\n");
1507 if ((pp->pr_roflags & PR_LOGGING) == 0)
1508 (*pr)("\tno log\n");
1509 else
1510 pr_printlog(pp, NULL, pr);
1511
1512 skip_log:
1513
1514 if (print_cache == 0)
1515 goto skip_cache;
1516
1517 TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) {
1518 (*pr)("\tcache %p: allocfrom %p freeto %p\n", pc,
1519 pc->pc_allocfrom, pc->pc_freeto);
1520 (*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n",
1521 pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
1522 TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1523 (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);
1524 for (i = 0; i < PCG_NOBJECTS; i++)
1525 (*pr)("\t\t\t%p\n", pcg->pcg_objects[i]);
1526 }
1527 }
1528
1529 skip_cache:
1530
1531 pr_enter_check(pp, pr);
1532 }
1533
1534 int
1535 pool_chk(struct pool *pp, const char *label)
1536 {
1537 struct pool_item_header *ph;
1538 int r = 0;
1539
1540 simple_lock(&pp->pr_slock);
1541
1542 TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) {
1543 struct pool_item *pi;
1544 int n;
1545 caddr_t page;
1546
1547 page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask);
1548 if (page != ph->ph_page &&
1549 (pp->pr_roflags & PR_PHINPAGE) != 0) {
1550 if (label != NULL)
1551 printf("%s: ", label);
1552 printf("pool(%p:%s): page inconsistency: page %p;"
1553 " at page head addr %p (p %p)\n", pp,
1554 pp->pr_wchan, ph->ph_page,
1555 ph, page);
1556 r++;
1557 goto out;
1558 }
1559
1560 for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
1561 pi != NULL;
1562 pi = TAILQ_NEXT(pi,pi_list), n++) {
1563
1564 #ifdef DIAGNOSTIC
1565 if (pi->pi_magic != PI_MAGIC) {
1566 if (label != NULL)
1567 printf("%s: ", label);
1568 printf("pool(%s): free list modified: magic=%x;"
1569 " page %p; item ordinal %d;"
1570 " addr %p (p %p)\n",
1571 pp->pr_wchan, pi->pi_magic, ph->ph_page,
1572 n, pi, page);
1573 panic("pool");
1574 }
1575 #endif
1576 page =
1577 (caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask);
1578 if (page == ph->ph_page)
1579 continue;
1580
1581 if (label != NULL)
1582 printf("%s: ", label);
1583 printf("pool(%p:%s): page inconsistency: page %p;"
1584 " item ordinal %d; addr %p (p %p)\n", pp,
1585 pp->pr_wchan, ph->ph_page,
1586 n, pi, page);
1587 r++;
1588 goto out;
1589 }
1590 }
1591 out:
1592 simple_unlock(&pp->pr_slock);
1593 return (r);
1594 }
1595
1596 /*
1597 * pool_cache_init:
1598 *
1599 * Initialize a pool cache.
1600 *
1601 * NOTE: If the pool must be protected from interrupts, we expect
1602 * to be called at the appropriate interrupt priority level.
1603 */
1604 void
1605 pool_cache_init(struct pool_cache *pc, struct pool *pp,
1606 int (*ctor)(void *, void *, int),
1607 void (*dtor)(void *, void *),
1608 void *arg)
1609 {
1610
1611 TAILQ_INIT(&pc->pc_grouplist);
1612 simple_lock_init(&pc->pc_slock);
1613
1614 pc->pc_allocfrom = NULL;
1615 pc->pc_freeto = NULL;
1616 pc->pc_pool = pp;
1617
1618 pc->pc_ctor = ctor;
1619 pc->pc_dtor = dtor;
1620 pc->pc_arg = arg;
1621
1622 pc->pc_hits = 0;
1623 pc->pc_misses = 0;
1624
1625 pc->pc_ngroups = 0;
1626
1627 pc->pc_nitems = 0;
1628
1629 simple_lock(&pp->pr_slock);
1630 TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist);
1631 simple_unlock(&pp->pr_slock);
1632 }
1633
1634 /*
1635 * pool_cache_destroy:
1636 *
1637 * Destroy a pool cache.
1638 */
1639 void
1640 pool_cache_destroy(struct pool_cache *pc)
1641 {
1642 struct pool *pp = pc->pc_pool;
1643
1644 /* First, invalidate the entire cache. */
1645 pool_cache_invalidate(pc);
1646
1647 /* ...and remove it from the pool's cache list. */
1648 simple_lock(&pp->pr_slock);
1649 TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist);
1650 simple_unlock(&pp->pr_slock);
1651 }
1652
1653 static __inline void *
1654 pcg_get(struct pool_cache_group *pcg)
1655 {
1656 void *object;
1657 u_int idx;
1658
1659 KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
1660 KASSERT(pcg->pcg_avail != 0);
1661 idx = --pcg->pcg_avail;
1662
1663 KASSERT(pcg->pcg_objects[idx] != NULL);
1664 object = pcg->pcg_objects[idx];
1665 pcg->pcg_objects[idx] = NULL;
1666
1667 return (object);
1668 }
1669
1670 static __inline void
1671 pcg_put(struct pool_cache_group *pcg, void *object)
1672 {
1673 u_int idx;
1674
1675 KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
1676 idx = pcg->pcg_avail++;
1677
1678 KASSERT(pcg->pcg_objects[idx] == NULL);
1679 pcg->pcg_objects[idx] = object;
1680 }
1681
1682 /*
1683 * pool_cache_get:
1684 *
1685 * Get an object from a pool cache.
1686 */
1687 void *
1688 pool_cache_get(struct pool_cache *pc, int flags)
1689 {
1690 struct pool_cache_group *pcg;
1691 void *object;
1692
1693 #ifdef LOCKDEBUG
1694 if (flags & PR_WAITOK)
1695 simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)");
1696 #endif
1697
1698 simple_lock(&pc->pc_slock);
1699
1700 if ((pcg = pc->pc_allocfrom) == NULL) {
1701 TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1702 if (pcg->pcg_avail != 0) {
1703 pc->pc_allocfrom = pcg;
1704 goto have_group;
1705 }
1706 }
1707
1708 /*
1709 * No groups with any available objects. Allocate
1710 * a new object, construct it, and return it to
1711 * the caller. We will allocate a group, if necessary,
1712 * when the object is freed back to the cache.
1713 */
1714 pc->pc_misses++;
1715 simple_unlock(&pc->pc_slock);
1716 object = pool_get(pc->pc_pool, flags);
1717 if (object != NULL && pc->pc_ctor != NULL) {
1718 if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
1719 pool_put(pc->pc_pool, object);
1720 return (NULL);
1721 }
1722 }
1723 return (object);
1724 }
1725
1726 have_group:
1727 pc->pc_hits++;
1728 pc->pc_nitems--;
1729 object = pcg_get(pcg);
1730
1731 if (pcg->pcg_avail == 0)
1732 pc->pc_allocfrom = NULL;
1733
1734 simple_unlock(&pc->pc_slock);
1735
1736 return (object);
1737 }
1738
1739 /*
1740 * pool_cache_put:
1741 *
1742 * Put an object back to the pool cache.
1743 */
1744 void
1745 pool_cache_put(struct pool_cache *pc, void *object)
1746 {
1747 struct pool_cache_group *pcg;
1748 int s;
1749
1750 simple_lock(&pc->pc_slock);
1751
1752 if ((pcg = pc->pc_freeto) == NULL) {
1753 TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
1754 if (pcg->pcg_avail != PCG_NOBJECTS) {
1755 pc->pc_freeto = pcg;
1756 goto have_group;
1757 }
1758 }
1759
1760 /*
1761 * No empty groups to free the object to. Attempt to
1762 * allocate one.
1763 */
1764 simple_unlock(&pc->pc_slock);
1765 s = splvm();
1766 pcg = pool_get(&pcgpool, PR_NOWAIT);
1767 splx(s);
1768 if (pcg != NULL) {
1769 memset(pcg, 0, sizeof(*pcg));
1770 simple_lock(&pc->pc_slock);
1771 pc->pc_ngroups++;
1772 TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list);
1773 if (pc->pc_freeto == NULL)
1774 pc->pc_freeto = pcg;
1775 goto have_group;
1776 }
1777
1778 /*
1779 * Unable to allocate a cache group; destruct the object
1780 * and free it back to the pool.
1781 */
1782 pool_cache_destruct_object(pc, object);
1783 return;
1784 }
1785
1786 have_group:
1787 pc->pc_nitems++;
1788 pcg_put(pcg, object);
1789
1790 if (pcg->pcg_avail == PCG_NOBJECTS)
1791 pc->pc_freeto = NULL;
1792
1793 simple_unlock(&pc->pc_slock);
1794 }
1795
1796 /*
1797 * pool_cache_destruct_object:
1798 *
1799 * Force destruction of an object and its release back into
1800 * the pool.
1801 */
1802 void
1803 pool_cache_destruct_object(struct pool_cache *pc, void *object)
1804 {
1805
1806 if (pc->pc_dtor != NULL)
1807 (*pc->pc_dtor)(pc->pc_arg, object);
1808 pool_put(pc->pc_pool, object);
1809 }
1810
1811 /*
1812 * pool_cache_do_invalidate:
1813 *
1814 * This internal function implements pool_cache_invalidate() and
1815 * pool_cache_reclaim().
1816 */
1817 static void
1818 pool_cache_do_invalidate(struct pool_cache *pc, int free_groups,
1819 void (*putit)(struct pool *, void *))
1820 {
1821 struct pool_cache_group *pcg, *npcg;
1822 void *object;
1823 int s;
1824
1825 for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
1826 pcg = npcg) {
1827 npcg = TAILQ_NEXT(pcg, pcg_list);
1828 while (pcg->pcg_avail != 0) {
1829 pc->pc_nitems--;
1830 object = pcg_get(pcg);
1831 if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg)
1832 pc->pc_allocfrom = NULL;
1833 if (pc->pc_dtor != NULL)
1834 (*pc->pc_dtor)(pc->pc_arg, object);
1835 (*putit)(pc->pc_pool, object);
1836 }
1837 if (free_groups) {
1838 pc->pc_ngroups--;
1839 TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list);
1840 if (pc->pc_freeto == pcg)
1841 pc->pc_freeto = NULL;
1842 s = splvm();
1843 pool_put(&pcgpool, pcg);
1844 splx(s);
1845 }
1846 }
1847 }
1848
1849 /*
1850 * pool_cache_invalidate:
1851 *
1852 * Invalidate a pool cache (destruct and release all of the
1853 * cached objects).
1854 */
1855 void
1856 pool_cache_invalidate(struct pool_cache *pc)
1857 {
1858
1859 simple_lock(&pc->pc_slock);
1860 pool_cache_do_invalidate(pc, 0, pool_put);
1861 simple_unlock(&pc->pc_slock);
1862 }
1863
1864 /*
1865 * pool_cache_reclaim:
1866 *
1867 * Reclaim a pool cache for pool_reclaim().
1868 */
1869 static void
1870 pool_cache_reclaim(struct pool_cache *pc)
1871 {
1872
1873 simple_lock(&pc->pc_slock);
1874 pool_cache_do_invalidate(pc, 1, pool_do_put);
1875 simple_unlock(&pc->pc_slock);
1876 }
1877
1878 /*
1879 * Pool backend allocators.
1880 *
1881 * Each pool has a backend allocator that handles allocation, deallocation,
1882 * and any additional draining that might be needed.
1883 *
1884 * We provide two standard allocators:
1885 *
1886 * pool_allocator_kmem - the default when no allocator is specified
1887 *
1888 * pool_allocator_nointr - used for pools that will not be accessed
1889 * in interrupt context.
1890 */
1891 void *pool_page_alloc(struct pool *, int);
1892 void pool_page_free(struct pool *, void *);
1893
1894 struct pool_allocator pool_allocator_kmem = {
1895 pool_page_alloc, pool_page_free, 0,
1896 };
1897
1898 void *pool_page_alloc_nointr(struct pool *, int);
1899 void pool_page_free_nointr(struct pool *, void *);
1900
1901 struct pool_allocator pool_allocator_nointr = {
1902 pool_page_alloc_nointr, pool_page_free_nointr, 0,
1903 };
1904
1905 #ifdef POOL_SUBPAGE
1906 void *pool_subpage_alloc(struct pool *, int);
1907 void pool_subpage_free(struct pool *, void *);
1908
1909 struct pool_allocator pool_allocator_kmem_subpage = {
1910 pool_subpage_alloc, pool_subpage_free, 0,
1911 };
1912 #endif /* POOL_SUBPAGE */
1913
1914 /*
1915 * We have at least three different resources for the same allocation and
1916 * each resource can be depleted. First, we have the ready elements in the
1917 * pool. Then we have the resource (typically a vm_map) for this allocator.
1918 * Finally, we have physical memory. Waiting for any of these can be
1919 * unnecessary when any other is freed, but the kernel doesn't support
1920 * sleeping on multiple wait channels, so we have to employ another strategy.
1921 *
1922 * The caller sleeps on the pool (so that it can be awakened when an item
1923 * is returned to the pool), but we set PA_WANT on the allocator. When a
1924 * page is returned to the allocator and PA_WANT is set, pool_allocator_free
1925 * will wake up all sleeping pools belonging to this allocator.
1926 *
1927 * XXX Thundering herd.
1928 */
1929 void *
1930 pool_allocator_alloc(struct pool *org, int flags)
1931 {
1932 struct pool_allocator *pa = org->pr_alloc;
1933 struct pool *pp, *start;
1934 int s, freed;
1935 void *res;
1936
1937 do {
1938 if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
1939 return (res);
1940 if ((flags & PR_WAITOK) == 0) {
1941 /*
1942 * We only run the drain hookhere if PR_NOWAIT.
1943 * In other cases, the hook will be run in
1944 * pool_reclaim().
1945 */
1946 if (org->pr_drain_hook != NULL) {
1947 (*org->pr_drain_hook)(org->pr_drain_hook_arg,
1948 flags);
1949 if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
1950 return (res);
1951 }
1952 break;
1953 }
1954
1955 /*
1956 * Drain all pools, except "org", that use this
1957 * allocator. We do this to reclaim VA space.
1958 * pa_alloc is responsible for waiting for
1959 * physical memory.
1960 *
1961 * XXX We risk looping forever if start if someone
1962 * calls pool_destroy on "start". But there is no
1963 * other way to have potentially sleeping pool_reclaim,
1964 * non-sleeping locks on pool_allocator, and some
1965 * stirring of drained pools in the allocator.
1966 *
1967 * XXX Maybe we should use pool_head_slock for locking
1968 * the allocators?
1969 */
1970 freed = 0;
1971
1972 s = splvm();
1973 simple_lock(&pa->pa_slock);
1974 pp = start = TAILQ_FIRST(&pa->pa_list);
1975 do {
1976 TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list);
1977 TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list);
1978 if (pp == org)
1979 continue;
1980 simple_unlock(&pa->pa_list);
1981 freed = pool_reclaim(pp);
1982 simple_lock(&pa->pa_list);
1983 } while ((pp = TAILQ_FIRST(&pa->pa_list)) != start &&
1984 freed == 0);
1985
1986 if (freed == 0) {
1987 /*
1988 * We set PA_WANT here, the caller will most likely
1989 * sleep waiting for pages (if not, this won't hurt
1990 * that much), and there is no way to set this in
1991 * the caller without violating locking order.
1992 */
1993 pa->pa_flags |= PA_WANT;
1994 }
1995 simple_unlock(&pa->pa_slock);
1996 splx(s);
1997 } while (freed);
1998 return (NULL);
1999 }
2000
2001 void
2002 pool_allocator_free(struct pool *pp, void *v)
2003 {
2004 struct pool_allocator *pa = pp->pr_alloc;
2005 int s;
2006
2007 (*pa->pa_free)(pp, v);
2008
2009 s = splvm();
2010 simple_lock(&pa->pa_slock);
2011 if ((pa->pa_flags & PA_WANT) == 0) {
2012 simple_unlock(&pa->pa_slock);
2013 splx(s);
2014 return;
2015 }
2016
2017 TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
2018 simple_lock(&pp->pr_slock);
2019 if ((pp->pr_flags & PR_WANTED) != 0) {
2020 pp->pr_flags &= ~PR_WANTED;
2021 wakeup(pp);
2022 }
2023 simple_unlock(&pp->pr_slock);
2024 }
2025 pa->pa_flags &= ~PA_WANT;
2026 simple_unlock(&pa->pa_slock);
2027 splx(s);
2028 }
2029
2030 void *
2031 pool_page_alloc(struct pool *pp, int flags)
2032 {
2033 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2034
2035 return ((void *) uvm_km_alloc_poolpage(waitok));
2036 }
2037
2038 void
2039 pool_page_free(struct pool *pp, void *v)
2040 {
2041
2042 uvm_km_free_poolpage((vaddr_t) v);
2043 }
2044
2045 #ifdef POOL_SUBPAGE
2046 /* Sub-page allocator, for machines with large hardware pages. */
2047 void *
2048 pool_subpage_alloc(struct pool *pp, int flags)
2049 {
2050
2051 return (pool_get(&psppool, flags));
2052 }
2053
2054 void
2055 pool_subpage_free(struct pool *pp, void *v)
2056 {
2057
2058 pool_put(&psppool, v);
2059 }
2060
2061 /* We don't provide a real nointr allocator. Maybe later. */
2062 void *
2063 pool_page_alloc_nointr(struct pool *pp, int flags)
2064 {
2065
2066 return (pool_subpage_alloc(pp, flags));
2067 }
2068
2069 void
2070 pool_page_free_nointr(struct pool *pp, void *v)
2071 {
2072
2073 pool_subpage_free(pp, v);
2074 }
2075 #else
2076 void *
2077 pool_page_alloc_nointr(struct pool *pp, int flags)
2078 {
2079 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2080
2081 return ((void *) uvm_km_alloc_poolpage1(kernel_map,
2082 uvm.kernel_object, waitok));
2083 }
2084
2085 void
2086 pool_page_free_nointr(struct pool *pp, void *v)
2087 {
2088
2089 uvm_km_free_poolpage1(kernel_map, (vaddr_t) v);
2090 }
2091 #endif /* POOL_SUBPAGE */
2092