subr_pool.c revision 1.53 1 /* $NetBSD: subr_pool.c,v 1.53 2001/05/10 01:37:40 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include "opt_pool.h"
41 #include "opt_poollog.h"
42 #include "opt_lockdebug.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/proc.h>
47 #include <sys/errno.h>
48 #include <sys/kernel.h>
49 #include <sys/malloc.h>
50 #include <sys/lock.h>
51 #include <sys/pool.h>
52 #include <sys/syslog.h>
53
54 #include <uvm/uvm.h>
55
56 /*
57 * Pool resource management utility.
58 *
59 * Memory is allocated in pages which are split into pieces according
60 * to the pool item size. Each page is kept on a list headed by `pr_pagelist'
61 * in the pool structure and the individual pool items are on a linked list
62 * headed by `ph_itemlist' in each page header. The memory for building
63 * the page list is either taken from the allocated pages themselves (for
64 * small pool items) or taken from an internal pool of page headers (`phpool').
65 */
66
67 /* List of all pools */
68 TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
69
70 /* Private pool for page header structures */
71 static struct pool phpool;
72
73 /* # of seconds to retain page after last use */
74 int pool_inactive_time = 10;
75
76 /* Next candidate for drainage (see pool_drain()) */
77 static struct pool *drainpp;
78
79 /* This spin lock protects both pool_head and drainpp. */
80 struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
81
82 struct pool_item_header {
83 /* Page headers */
84 TAILQ_ENTRY(pool_item_header)
85 ph_pagelist; /* pool page list */
86 TAILQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */
87 LIST_ENTRY(pool_item_header)
88 ph_hashlist; /* Off-page page headers */
89 int ph_nmissing; /* # of chunks in use */
90 caddr_t ph_page; /* this page's address */
91 struct timeval ph_time; /* last referenced */
92 };
93
94 struct pool_item {
95 #ifdef DIAGNOSTIC
96 int pi_magic;
97 #endif
98 #define PI_MAGIC 0xdeadbeef
99 /* Other entries use only this list entry */
100 TAILQ_ENTRY(pool_item) pi_list;
101 };
102
103 #define PR_HASH_INDEX(pp,addr) \
104 (((u_long)(addr) >> (pp)->pr_pageshift) & (PR_HASHTABSIZE - 1))
105
106 #define POOL_NEEDS_CATCHUP(pp) \
107 ((pp)->pr_nitems < (pp)->pr_minitems)
108
109 /*
110 * Pool cache management.
111 *
112 * Pool caches provide a way for constructed objects to be cached by the
113 * pool subsystem. This can lead to performance improvements by avoiding
114 * needless object construction/destruction; it is deferred until absolutely
115 * necessary.
116 *
117 * Caches are grouped into cache groups. Each cache group references
118 * up to 16 constructed objects. When a cache allocates an object
119 * from the pool, it calls the object's constructor and places it into
120 * a cache group. When a cache group frees an object back to the pool,
121 * it first calls the object's destructor. This allows the object to
122 * persist in constructed form while freed to the cache.
123 *
124 * Multiple caches may exist for each pool. This allows a single
125 * object type to have multiple constructed forms. The pool references
126 * each cache, so that when a pool is drained by the pagedaemon, it can
127 * drain each individual cache as well. Each time a cache is drained,
128 * the most idle cache group is freed to the pool in its entirety.
129 *
130 * Pool caches are layed on top of pools. By layering them, we can avoid
131 * the complexity of cache management for pools which would not benefit
132 * from it.
133 */
134
135 /* The cache group pool. */
136 static struct pool pcgpool;
137
138 /* The pool cache group. */
139 #define PCG_NOBJECTS 16
140 struct pool_cache_group {
141 TAILQ_ENTRY(pool_cache_group)
142 pcg_list; /* link in the pool cache's group list */
143 u_int pcg_avail; /* # available objects */
144 /* pointers to the objects */
145 void *pcg_objects[PCG_NOBJECTS];
146 };
147
148 static void pool_cache_reclaim(struct pool_cache *);
149
150 static int pool_catchup(struct pool *);
151 static int pool_prime_page(struct pool *, caddr_t, int);
152 static void *pool_page_alloc(unsigned long, int, int);
153 static void pool_page_free(void *, unsigned long, int);
154
155 static void pool_print1(struct pool *, const char *,
156 void (*)(const char *, ...));
157
158 /*
159 * Pool log entry. An array of these is allocated in pool_init().
160 */
161 struct pool_log {
162 const char *pl_file;
163 long pl_line;
164 int pl_action;
165 #define PRLOG_GET 1
166 #define PRLOG_PUT 2
167 void *pl_addr;
168 };
169
170 /* Number of entries in pool log buffers */
171 #ifndef POOL_LOGSIZE
172 #define POOL_LOGSIZE 10
173 #endif
174
175 int pool_logsize = POOL_LOGSIZE;
176
177 #ifdef DIAGNOSTIC
178 static __inline void
179 pr_log(struct pool *pp, void *v, int action, const char *file, long line)
180 {
181 int n = pp->pr_curlogentry;
182 struct pool_log *pl;
183
184 if ((pp->pr_roflags & PR_LOGGING) == 0)
185 return;
186
187 /*
188 * Fill in the current entry. Wrap around and overwrite
189 * the oldest entry if necessary.
190 */
191 pl = &pp->pr_log[n];
192 pl->pl_file = file;
193 pl->pl_line = line;
194 pl->pl_action = action;
195 pl->pl_addr = v;
196 if (++n >= pp->pr_logsize)
197 n = 0;
198 pp->pr_curlogentry = n;
199 }
200
201 static void
202 pr_printlog(struct pool *pp, struct pool_item *pi,
203 void (*pr)(const char *, ...))
204 {
205 int i = pp->pr_logsize;
206 int n = pp->pr_curlogentry;
207
208 if ((pp->pr_roflags & PR_LOGGING) == 0)
209 return;
210
211 /*
212 * Print all entries in this pool's log.
213 */
214 while (i-- > 0) {
215 struct pool_log *pl = &pp->pr_log[n];
216 if (pl->pl_action != 0) {
217 if (pi == NULL || pi == pl->pl_addr) {
218 (*pr)("\tlog entry %d:\n", i);
219 (*pr)("\t\taction = %s, addr = %p\n",
220 pl->pl_action == PRLOG_GET ? "get" : "put",
221 pl->pl_addr);
222 (*pr)("\t\tfile: %s at line %lu\n",
223 pl->pl_file, pl->pl_line);
224 }
225 }
226 if (++n >= pp->pr_logsize)
227 n = 0;
228 }
229 }
230
231 static __inline void
232 pr_enter(struct pool *pp, const char *file, long line)
233 {
234
235 if (__predict_false(pp->pr_entered_file != NULL)) {
236 printf("pool %s: reentrancy at file %s line %ld\n",
237 pp->pr_wchan, file, line);
238 printf(" previous entry at file %s line %ld\n",
239 pp->pr_entered_file, pp->pr_entered_line);
240 panic("pr_enter");
241 }
242
243 pp->pr_entered_file = file;
244 pp->pr_entered_line = line;
245 }
246
247 static __inline void
248 pr_leave(struct pool *pp)
249 {
250
251 if (__predict_false(pp->pr_entered_file == NULL)) {
252 printf("pool %s not entered?\n", pp->pr_wchan);
253 panic("pr_leave");
254 }
255
256 pp->pr_entered_file = NULL;
257 pp->pr_entered_line = 0;
258 }
259
260 static __inline void
261 pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
262 {
263
264 if (pp->pr_entered_file != NULL)
265 (*pr)("\n\tcurrently entered from file %s line %ld\n",
266 pp->pr_entered_file, pp->pr_entered_line);
267 }
268 #else
269 #define pr_log(pp, v, action, file, line)
270 #define pr_printlog(pp, pi, pr)
271 #define pr_enter(pp, file, line)
272 #define pr_leave(pp)
273 #define pr_enter_check(pp, pr)
274 #endif /* DIAGNOSTIC */
275
276 /*
277 * Return the pool page header based on page address.
278 */
279 static __inline struct pool_item_header *
280 pr_find_pagehead(struct pool *pp, caddr_t page)
281 {
282 struct pool_item_header *ph;
283
284 if ((pp->pr_roflags & PR_PHINPAGE) != 0)
285 return ((struct pool_item_header *)(page + pp->pr_phoffset));
286
287 for (ph = LIST_FIRST(&pp->pr_hashtab[PR_HASH_INDEX(pp, page)]);
288 ph != NULL;
289 ph = LIST_NEXT(ph, ph_hashlist)) {
290 if (ph->ph_page == page)
291 return (ph);
292 }
293 return (NULL);
294 }
295
296 /*
297 * Remove a page from the pool.
298 */
299 static __inline void
300 pr_rmpage(struct pool *pp, struct pool_item_header *ph)
301 {
302
303 /*
304 * If the page was idle, decrement the idle page count.
305 */
306 if (ph->ph_nmissing == 0) {
307 #ifdef DIAGNOSTIC
308 if (pp->pr_nidle == 0)
309 panic("pr_rmpage: nidle inconsistent");
310 if (pp->pr_nitems < pp->pr_itemsperpage)
311 panic("pr_rmpage: nitems inconsistent");
312 #endif
313 pp->pr_nidle--;
314 }
315
316 pp->pr_nitems -= pp->pr_itemsperpage;
317
318 /*
319 * Unlink a page from the pool and release it.
320 */
321 TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
322 (*pp->pr_free)(ph->ph_page, pp->pr_pagesz, pp->pr_mtype);
323 pp->pr_npages--;
324 pp->pr_npagefree++;
325
326 if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
327 int s;
328 LIST_REMOVE(ph, ph_hashlist);
329 s = splhigh();
330 pool_put(&phpool, ph);
331 splx(s);
332 }
333
334 if (pp->pr_curpage == ph) {
335 /*
336 * Find a new non-empty page header, if any.
337 * Start search from the page head, to increase the
338 * chance for "high water" pages to be freed.
339 */
340 for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
341 ph = TAILQ_NEXT(ph, ph_pagelist))
342 if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
343 break;
344
345 pp->pr_curpage = ph;
346 }
347 }
348
349 /*
350 * Initialize the given pool resource structure.
351 *
352 * We export this routine to allow other kernel parts to declare
353 * static pools that must be initialized before malloc() is available.
354 */
355 void
356 pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
357 const char *wchan, size_t pagesz,
358 void *(*alloc)(unsigned long, int, int),
359 void (*release)(void *, unsigned long, int),
360 int mtype)
361 {
362 int off, slack, i;
363
364 #ifdef POOL_DIAGNOSTIC
365 /*
366 * Always log if POOL_DIAGNOSTIC is defined.
367 */
368 if (pool_logsize != 0)
369 flags |= PR_LOGGING;
370 #endif
371
372 /*
373 * Check arguments and construct default values.
374 */
375 if (!powerof2(pagesz))
376 panic("pool_init: page size invalid (%lx)\n", (u_long)pagesz);
377
378 if (alloc == NULL && release == NULL) {
379 alloc = pool_page_alloc;
380 release = pool_page_free;
381 pagesz = PAGE_SIZE; /* Rounds to PAGE_SIZE anyhow. */
382 } else if ((alloc != NULL && release != NULL) == 0) {
383 /* If you specifiy one, must specify both. */
384 panic("pool_init: must specify alloc and release together");
385 }
386
387 if (pagesz == 0)
388 pagesz = PAGE_SIZE;
389
390 if (align == 0)
391 align = ALIGN(1);
392
393 if (size < sizeof(struct pool_item))
394 size = sizeof(struct pool_item);
395
396 size = ALIGN(size);
397 if (size > pagesz)
398 panic("pool_init: pool item size (%lu) too large",
399 (u_long)size);
400
401 /*
402 * Initialize the pool structure.
403 */
404 TAILQ_INIT(&pp->pr_pagelist);
405 TAILQ_INIT(&pp->pr_cachelist);
406 pp->pr_curpage = NULL;
407 pp->pr_npages = 0;
408 pp->pr_minitems = 0;
409 pp->pr_minpages = 0;
410 pp->pr_maxpages = UINT_MAX;
411 pp->pr_roflags = flags;
412 pp->pr_flags = 0;
413 pp->pr_size = size;
414 pp->pr_align = align;
415 pp->pr_wchan = wchan;
416 pp->pr_mtype = mtype;
417 pp->pr_alloc = alloc;
418 pp->pr_free = release;
419 pp->pr_pagesz = pagesz;
420 pp->pr_pagemask = ~(pagesz - 1);
421 pp->pr_pageshift = ffs(pagesz) - 1;
422 pp->pr_nitems = 0;
423 pp->pr_nout = 0;
424 pp->pr_hardlimit = UINT_MAX;
425 pp->pr_hardlimit_warning = NULL;
426 pp->pr_hardlimit_ratecap.tv_sec = 0;
427 pp->pr_hardlimit_ratecap.tv_usec = 0;
428 pp->pr_hardlimit_warning_last.tv_sec = 0;
429 pp->pr_hardlimit_warning_last.tv_usec = 0;
430
431 /*
432 * Decide whether to put the page header off page to avoid
433 * wasting too large a part of the page. Off-page page headers
434 * go on a hash table, so we can match a returned item
435 * with its header based on the page address.
436 * We use 1/16 of the page size as the threshold (XXX: tune)
437 */
438 if (pp->pr_size < pagesz/16) {
439 /* Use the end of the page for the page header */
440 pp->pr_roflags |= PR_PHINPAGE;
441 pp->pr_phoffset = off =
442 pagesz - ALIGN(sizeof(struct pool_item_header));
443 } else {
444 /* The page header will be taken from our page header pool */
445 pp->pr_phoffset = 0;
446 off = pagesz;
447 for (i = 0; i < PR_HASHTABSIZE; i++) {
448 LIST_INIT(&pp->pr_hashtab[i]);
449 }
450 }
451
452 /*
453 * Alignment is to take place at `ioff' within the item. This means
454 * we must reserve up to `align - 1' bytes on the page to allow
455 * appropriate positioning of each item.
456 *
457 * Silently enforce `0 <= ioff < align'.
458 */
459 pp->pr_itemoffset = ioff = ioff % align;
460 pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
461 KASSERT(pp->pr_itemsperpage != 0);
462
463 /*
464 * Use the slack between the chunks and the page header
465 * for "cache coloring".
466 */
467 slack = off - pp->pr_itemsperpage * pp->pr_size;
468 pp->pr_maxcolor = (slack / align) * align;
469 pp->pr_curcolor = 0;
470
471 pp->pr_nget = 0;
472 pp->pr_nfail = 0;
473 pp->pr_nput = 0;
474 pp->pr_npagealloc = 0;
475 pp->pr_npagefree = 0;
476 pp->pr_hiwat = 0;
477 pp->pr_nidle = 0;
478
479 if (flags & PR_LOGGING) {
480 if (kmem_map == NULL ||
481 (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
482 M_TEMP, M_NOWAIT)) == NULL)
483 pp->pr_roflags &= ~PR_LOGGING;
484 pp->pr_curlogentry = 0;
485 pp->pr_logsize = pool_logsize;
486 }
487
488 pp->pr_entered_file = NULL;
489 pp->pr_entered_line = 0;
490
491 simple_lock_init(&pp->pr_slock);
492
493 /*
494 * Initialize private page header pool and cache magazine pool if we
495 * haven't done so yet.
496 * XXX LOCKING.
497 */
498 if (phpool.pr_size == 0) {
499 pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,
500 0, "phpool", 0, 0, 0, 0);
501 pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
502 0, "pcgpool", 0, 0, 0, 0);
503 }
504
505 /* Insert into the list of all pools. */
506 simple_lock(&pool_head_slock);
507 TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
508 simple_unlock(&pool_head_slock);
509 }
510
511 /*
512 * De-commision a pool resource.
513 */
514 void
515 pool_destroy(struct pool *pp)
516 {
517 struct pool_item_header *ph;
518 struct pool_cache *pc;
519
520 /* Destroy all caches for this pool. */
521 while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL)
522 pool_cache_destroy(pc);
523
524 #ifdef DIAGNOSTIC
525 if (pp->pr_nout != 0) {
526 pr_printlog(pp, NULL, printf);
527 panic("pool_destroy: pool busy: still out: %u\n",
528 pp->pr_nout);
529 }
530 #endif
531
532 /* Remove all pages */
533 if ((pp->pr_roflags & PR_STATIC) == 0)
534 while ((ph = pp->pr_pagelist.tqh_first) != NULL)
535 pr_rmpage(pp, ph);
536
537 /* Remove from global pool list */
538 simple_lock(&pool_head_slock);
539 TAILQ_REMOVE(&pool_head, pp, pr_poollist);
540 /* XXX Only clear this if we were drainpp? */
541 drainpp = NULL;
542 simple_unlock(&pool_head_slock);
543
544 if ((pp->pr_roflags & PR_LOGGING) != 0)
545 free(pp->pr_log, M_TEMP);
546
547 if (pp->pr_roflags & PR_FREEHEADER)
548 free(pp, M_POOL);
549 }
550
551
552 /*
553 * Grab an item from the pool; must be called at appropriate spl level
554 */
555 void *
556 _pool_get(struct pool *pp, int flags, const char *file, long line)
557 {
558 void *v;
559 struct pool_item *pi;
560 struct pool_item_header *ph;
561
562 #ifdef DIAGNOSTIC
563 if (__predict_false((pp->pr_roflags & PR_STATIC) &&
564 (flags & PR_MALLOCOK))) {
565 pr_printlog(pp, NULL, printf);
566 panic("pool_get: static");
567 }
568 #endif
569
570 if (__predict_false(curproc == NULL && doing_shutdown == 0 &&
571 (flags & PR_WAITOK) != 0))
572 panic("pool_get: must have NOWAIT");
573
574 simple_lock(&pp->pr_slock);
575 pr_enter(pp, file, line);
576
577 startover:
578 /*
579 * Check to see if we've reached the hard limit. If we have,
580 * and we can wait, then wait until an item has been returned to
581 * the pool.
582 */
583 #ifdef DIAGNOSTIC
584 if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
585 pr_leave(pp);
586 simple_unlock(&pp->pr_slock);
587 panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
588 }
589 #endif
590 if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
591 if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
592 /*
593 * XXX: A warning isn't logged in this case. Should
594 * it be?
595 */
596 pp->pr_flags |= PR_WANTED;
597 pr_leave(pp);
598 ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
599 pr_enter(pp, file, line);
600 goto startover;
601 }
602
603 /*
604 * Log a message that the hard limit has been hit.
605 */
606 if (pp->pr_hardlimit_warning != NULL &&
607 ratecheck(&pp->pr_hardlimit_warning_last,
608 &pp->pr_hardlimit_ratecap))
609 log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
610
611 if (flags & PR_URGENT)
612 panic("pool_get: urgent");
613
614 pp->pr_nfail++;
615
616 pr_leave(pp);
617 simple_unlock(&pp->pr_slock);
618 return (NULL);
619 }
620
621 /*
622 * The convention we use is that if `curpage' is not NULL, then
623 * it points at a non-empty bucket. In particular, `curpage'
624 * never points at a page header which has PR_PHINPAGE set and
625 * has no items in its bucket.
626 */
627 if ((ph = pp->pr_curpage) == NULL) {
628 void *v;
629
630 #ifdef DIAGNOSTIC
631 if (pp->pr_nitems != 0) {
632 simple_unlock(&pp->pr_slock);
633 printf("pool_get: %s: curpage NULL, nitems %u\n",
634 pp->pr_wchan, pp->pr_nitems);
635 panic("pool_get: nitems inconsistent\n");
636 }
637 #endif
638
639 /*
640 * Call the back-end page allocator for more memory.
641 * Release the pool lock, as the back-end page allocator
642 * may block.
643 */
644 pr_leave(pp);
645 simple_unlock(&pp->pr_slock);
646 v = (*pp->pr_alloc)(pp->pr_pagesz, flags, pp->pr_mtype);
647 simple_lock(&pp->pr_slock);
648 pr_enter(pp, file, line);
649
650 if (v == NULL) {
651 /*
652 * We were unable to allocate a page, but
653 * we released the lock during allocation,
654 * so perhaps items were freed back to the
655 * pool. Check for this case.
656 */
657 if (pp->pr_curpage != NULL)
658 goto startover;
659
660 if (flags & PR_URGENT)
661 panic("pool_get: urgent");
662
663 if ((flags & PR_WAITOK) == 0) {
664 pp->pr_nfail++;
665 pr_leave(pp);
666 simple_unlock(&pp->pr_slock);
667 return (NULL);
668 }
669
670 /*
671 * Wait for items to be returned to this pool.
672 *
673 * XXX: we actually want to wait just until
674 * the page allocator has memory again. Depending
675 * on this pool's usage, we might get stuck here
676 * for a long time.
677 *
678 * XXX: maybe we should wake up once a second and
679 * try again?
680 */
681 pp->pr_flags |= PR_WANTED;
682 pr_leave(pp);
683 ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
684 pr_enter(pp, file, line);
685 goto startover;
686 }
687
688 /* We have more memory; add it to the pool */
689 if (pool_prime_page(pp, v, flags & PR_WAITOK) != 0) {
690 /*
691 * Probably, we don't allowed to wait and
692 * couldn't allocate a page header.
693 */
694 (*pp->pr_free)(v, pp->pr_pagesz, pp->pr_mtype);
695 pp->pr_nfail++;
696 pr_leave(pp);
697 simple_unlock(&pp->pr_slock);
698 return (NULL);
699 }
700 pp->pr_npagealloc++;
701
702 /* Start the allocation process over. */
703 goto startover;
704 }
705
706 if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) {
707 pr_leave(pp);
708 simple_unlock(&pp->pr_slock);
709 panic("pool_get: %s: page empty", pp->pr_wchan);
710 }
711 #ifdef DIAGNOSTIC
712 if (__predict_false(pp->pr_nitems == 0)) {
713 pr_leave(pp);
714 simple_unlock(&pp->pr_slock);
715 printf("pool_get: %s: items on itemlist, nitems %u\n",
716 pp->pr_wchan, pp->pr_nitems);
717 panic("pool_get: nitems inconsistent\n");
718 }
719 #endif
720 pr_log(pp, v, PRLOG_GET, file, line);
721
722 #ifdef DIAGNOSTIC
723 if (__predict_false(pi->pi_magic != PI_MAGIC)) {
724 pr_printlog(pp, pi, printf);
725 panic("pool_get(%s): free list modified: magic=%x; page %p;"
726 " item addr %p\n",
727 pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
728 }
729 #endif
730
731 /*
732 * Remove from item list.
733 */
734 TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
735 pp->pr_nitems--;
736 pp->pr_nout++;
737 if (ph->ph_nmissing == 0) {
738 #ifdef DIAGNOSTIC
739 if (__predict_false(pp->pr_nidle == 0))
740 panic("pool_get: nidle inconsistent");
741 #endif
742 pp->pr_nidle--;
743 }
744 ph->ph_nmissing++;
745 if (TAILQ_FIRST(&ph->ph_itemlist) == NULL) {
746 #ifdef DIAGNOSTIC
747 if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) {
748 pr_leave(pp);
749 simple_unlock(&pp->pr_slock);
750 panic("pool_get: %s: nmissing inconsistent",
751 pp->pr_wchan);
752 }
753 #endif
754 /*
755 * Find a new non-empty page header, if any.
756 * Start search from the page head, to increase
757 * the chance for "high water" pages to be freed.
758 *
759 * Migrate empty pages to the end of the list. This
760 * will speed the update of curpage as pages become
761 * idle. Empty pages intermingled with idle pages
762 * is no big deal. As soon as a page becomes un-empty,
763 * it will move back to the head of the list.
764 */
765 TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
766 TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
767 for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
768 ph = TAILQ_NEXT(ph, ph_pagelist))
769 if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
770 break;
771
772 pp->pr_curpage = ph;
773 }
774
775 pp->pr_nget++;
776
777 /*
778 * If we have a low water mark and we are now below that low
779 * water mark, add more items to the pool.
780 */
781 if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
782 /*
783 * XXX: Should we log a warning? Should we set up a timeout
784 * to try again in a second or so? The latter could break
785 * a caller's assumptions about interrupt protection, etc.
786 */
787 }
788
789 pr_leave(pp);
790 simple_unlock(&pp->pr_slock);
791 return (v);
792 }
793
794 /*
795 * Internal version of pool_put(). Pool is already locked/entered.
796 */
797 static void
798 pool_do_put(struct pool *pp, void *v, const char *file, long line)
799 {
800 struct pool_item *pi = v;
801 struct pool_item_header *ph;
802 caddr_t page;
803 int s;
804
805 page = (caddr_t)((u_long)v & pp->pr_pagemask);
806
807 #ifdef DIAGNOSTIC
808 if (__predict_false(pp->pr_nout == 0)) {
809 printf("pool %s: putting with none out\n",
810 pp->pr_wchan);
811 panic("pool_put");
812 }
813 #endif
814
815 pr_log(pp, v, PRLOG_PUT, file, line);
816
817 if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {
818 pr_printlog(pp, NULL, printf);
819 panic("pool_put: %s: page header missing", pp->pr_wchan);
820 }
821
822 #ifdef LOCKDEBUG
823 /*
824 * Check if we're freeing a locked simple lock.
825 */
826 simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
827 #endif
828
829 /*
830 * Return to item list.
831 */
832 #ifdef DIAGNOSTIC
833 pi->pi_magic = PI_MAGIC;
834 #endif
835 #ifdef DEBUG
836 {
837 int i, *ip = v;
838
839 for (i = 0; i < pp->pr_size / sizeof(int); i++) {
840 *ip++ = PI_MAGIC;
841 }
842 }
843 #endif
844
845 TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
846 ph->ph_nmissing--;
847 pp->pr_nput++;
848 pp->pr_nitems++;
849 pp->pr_nout--;
850
851 /* Cancel "pool empty" condition if it exists */
852 if (pp->pr_curpage == NULL)
853 pp->pr_curpage = ph;
854
855 if (pp->pr_flags & PR_WANTED) {
856 pp->pr_flags &= ~PR_WANTED;
857 if (ph->ph_nmissing == 0)
858 pp->pr_nidle++;
859 wakeup((caddr_t)pp);
860 return;
861 }
862
863 /*
864 * If this page is now complete, do one of two things:
865 *
866 * (1) If we have more pages than the page high water
867 * mark, free the page back to the system.
868 *
869 * (2) Move it to the end of the page list, so that
870 * we minimize our chances of fragmenting the
871 * pool. Idle pages migrate to the end (along with
872 * completely empty pages, so that we find un-empty
873 * pages more quickly when we update curpage) of the
874 * list so they can be more easily swept up by
875 * the pagedaemon when pages are scarce.
876 */
877 if (ph->ph_nmissing == 0) {
878 pp->pr_nidle++;
879 if (pp->pr_npages > pp->pr_maxpages) {
880 pr_rmpage(pp, ph);
881 } else {
882 TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
883 TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
884
885 /*
886 * Update the timestamp on the page. A page must
887 * be idle for some period of time before it can
888 * be reclaimed by the pagedaemon. This minimizes
889 * ping-pong'ing for memory.
890 */
891 s = splclock();
892 ph->ph_time = mono_time;
893 splx(s);
894
895 /*
896 * Update the current page pointer. Just look for
897 * the first page with any free items.
898 *
899 * XXX: Maybe we want an option to look for the
900 * page with the fewest available items, to minimize
901 * fragmentation?
902 */
903 for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
904 ph = TAILQ_NEXT(ph, ph_pagelist))
905 if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
906 break;
907
908 pp->pr_curpage = ph;
909 }
910 }
911 /*
912 * If the page has just become un-empty, move it to the head of
913 * the list, and make it the current page. The next allocation
914 * will get the item from this page, instead of further fragmenting
915 * the pool.
916 */
917 else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
918 TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
919 TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
920 pp->pr_curpage = ph;
921 }
922 }
923
924 /*
925 * Return resource to the pool; must be called at appropriate spl level
926 */
927 void
928 _pool_put(struct pool *pp, void *v, const char *file, long line)
929 {
930
931 simple_lock(&pp->pr_slock);
932 pr_enter(pp, file, line);
933
934 pool_do_put(pp, v, file, line);
935
936 pr_leave(pp);
937 simple_unlock(&pp->pr_slock);
938 }
939
940 /*
941 * Add a page worth of items to the pool.
942 *
943 * Note, we must be called with the pool descriptor LOCKED.
944 */
945 static int
946 pool_prime_page(struct pool *pp, caddr_t storage, int flags)
947 {
948 struct pool_item *pi;
949 struct pool_item_header *ph;
950 caddr_t cp = storage;
951 unsigned int align = pp->pr_align;
952 unsigned int ioff = pp->pr_itemoffset;
953 int s, n;
954
955 if (((u_long)cp & (pp->pr_pagesz - 1)) != 0)
956 panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
957
958 if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
959 ph = (struct pool_item_header *)(cp + pp->pr_phoffset);
960 } else {
961 s = splhigh();
962 ph = pool_get(&phpool, flags);
963 splx(s);
964 if (ph == NULL)
965 return (ENOMEM);
966 LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)],
967 ph, ph_hashlist);
968 }
969
970 /*
971 * Insert page header.
972 */
973 TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
974 TAILQ_INIT(&ph->ph_itemlist);
975 ph->ph_page = storage;
976 ph->ph_nmissing = 0;
977 memset(&ph->ph_time, 0, sizeof(ph->ph_time));
978
979 pp->pr_nidle++;
980
981 /*
982 * Color this page.
983 */
984 cp = (caddr_t)(cp + pp->pr_curcolor);
985 if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
986 pp->pr_curcolor = 0;
987
988 /*
989 * Adjust storage to apply aligment to `pr_itemoffset' in each item.
990 */
991 if (ioff != 0)
992 cp = (caddr_t)(cp + (align - ioff));
993
994 /*
995 * Insert remaining chunks on the bucket list.
996 */
997 n = pp->pr_itemsperpage;
998 pp->pr_nitems += n;
999
1000 while (n--) {
1001 pi = (struct pool_item *)cp;
1002
1003 /* Insert on page list */
1004 TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
1005 #ifdef DIAGNOSTIC
1006 pi->pi_magic = PI_MAGIC;
1007 #endif
1008 cp = (caddr_t)(cp + pp->pr_size);
1009 }
1010
1011 /*
1012 * If the pool was depleted, point at the new page.
1013 */
1014 if (pp->pr_curpage == NULL)
1015 pp->pr_curpage = ph;
1016
1017 if (++pp->pr_npages > pp->pr_hiwat)
1018 pp->pr_hiwat = pp->pr_npages;
1019
1020 return (0);
1021 }
1022
1023 /*
1024 * Used by pool_get() when nitems drops below the low water mark. This
1025 * is used to catch up nitmes with the low water mark.
1026 *
1027 * Note 1, we never wait for memory here, we let the caller decide what to do.
1028 *
1029 * Note 2, this doesn't work with static pools.
1030 *
1031 * Note 3, we must be called with the pool already locked, and we return
1032 * with it locked.
1033 */
1034 static int
1035 pool_catchup(struct pool *pp)
1036 {
1037 caddr_t cp;
1038 int error = 0;
1039
1040 if (pp->pr_roflags & PR_STATIC) {
1041 /*
1042 * We dropped below the low water mark, and this is not a
1043 * good thing. Log a warning.
1044 *
1045 * XXX: rate-limit this?
1046 */
1047 printf("WARNING: static pool `%s' dropped below low water "
1048 "mark\n", pp->pr_wchan);
1049 return (0);
1050 }
1051
1052 while (pp->pr_nitems < pp->pr_minitems) {
1053 /*
1054 * Call the page back-end allocator for more memory.
1055 *
1056 * XXX: We never wait, so should we bother unlocking
1057 * the pool descriptor?
1058 */
1059 simple_unlock(&pp->pr_slock);
1060 cp = (*pp->pr_alloc)(pp->pr_pagesz, 0, pp->pr_mtype);
1061 simple_lock(&pp->pr_slock);
1062 if (__predict_false(cp == NULL)) {
1063 error = ENOMEM;
1064 break;
1065 }
1066 if ((error = pool_prime_page(pp, cp, PR_NOWAIT)) != 0) {
1067 (*pp->pr_free)(cp, pp->pr_pagesz, pp->pr_mtype);
1068 break;
1069 }
1070 pp->pr_npagealloc++;
1071 }
1072
1073 return (error);
1074 }
1075
1076 void
1077 pool_setlowat(struct pool *pp, int n)
1078 {
1079 int error;
1080
1081 simple_lock(&pp->pr_slock);
1082
1083 pp->pr_minitems = n;
1084 pp->pr_minpages = (n == 0)
1085 ? 0
1086 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1087
1088 /* Make sure we're caught up with the newly-set low water mark. */
1089 if (POOL_NEEDS_CATCHUP(pp) && (error = pool_catchup(pp) != 0)) {
1090 /*
1091 * XXX: Should we log a warning? Should we set up a timeout
1092 * to try again in a second or so? The latter could break
1093 * a caller's assumptions about interrupt protection, etc.
1094 */
1095 }
1096
1097 simple_unlock(&pp->pr_slock);
1098 }
1099
1100 void
1101 pool_sethiwat(struct pool *pp, int n)
1102 {
1103
1104 simple_lock(&pp->pr_slock);
1105
1106 pp->pr_maxpages = (n == 0)
1107 ? 0
1108 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1109
1110 simple_unlock(&pp->pr_slock);
1111 }
1112
1113 void
1114 pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1115 {
1116
1117 simple_lock(&pp->pr_slock);
1118
1119 pp->pr_hardlimit = n;
1120 pp->pr_hardlimit_warning = warnmess;
1121 pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1122 pp->pr_hardlimit_warning_last.tv_sec = 0;
1123 pp->pr_hardlimit_warning_last.tv_usec = 0;
1124
1125 /*
1126 * In-line version of pool_sethiwat(), because we don't want to
1127 * release the lock.
1128 */
1129 pp->pr_maxpages = (n == 0)
1130 ? 0
1131 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1132
1133 simple_unlock(&pp->pr_slock);
1134 }
1135
1136 /*
1137 * Default page allocator.
1138 */
1139 static void *
1140 pool_page_alloc(unsigned long sz, int flags, int mtype)
1141 {
1142 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
1143
1144 return ((void *)uvm_km_alloc_poolpage(waitok));
1145 }
1146
1147 static void
1148 pool_page_free(void *v, unsigned long sz, int mtype)
1149 {
1150
1151 uvm_km_free_poolpage((vaddr_t)v);
1152 }
1153
1154 /*
1155 * Alternate pool page allocator for pools that know they will
1156 * never be accessed in interrupt context.
1157 */
1158 void *
1159 pool_page_alloc_nointr(unsigned long sz, int flags, int mtype)
1160 {
1161 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
1162
1163 return ((void *)uvm_km_alloc_poolpage1(kernel_map, uvm.kernel_object,
1164 waitok));
1165 }
1166
1167 void
1168 pool_page_free_nointr(void *v, unsigned long sz, int mtype)
1169 {
1170
1171 uvm_km_free_poolpage1(kernel_map, (vaddr_t)v);
1172 }
1173
1174
1175 /*
1176 * Release all complete pages that have not been used recently.
1177 */
1178 void
1179 _pool_reclaim(struct pool *pp, const char *file, long line)
1180 {
1181 struct pool_item_header *ph, *phnext;
1182 struct pool_cache *pc;
1183 struct timeval curtime;
1184 int s;
1185
1186 if (pp->pr_roflags & PR_STATIC)
1187 return;
1188
1189 if (simple_lock_try(&pp->pr_slock) == 0)
1190 return;
1191 pr_enter(pp, file, line);
1192
1193 /*
1194 * Reclaim items from the pool's caches.
1195 */
1196 for (pc = TAILQ_FIRST(&pp->pr_cachelist); pc != NULL;
1197 pc = TAILQ_NEXT(pc, pc_poollist))
1198 pool_cache_reclaim(pc);
1199
1200 s = splclock();
1201 curtime = mono_time;
1202 splx(s);
1203
1204 for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; ph = phnext) {
1205 phnext = TAILQ_NEXT(ph, ph_pagelist);
1206
1207 /* Check our minimum page claim */
1208 if (pp->pr_npages <= pp->pr_minpages)
1209 break;
1210
1211 if (ph->ph_nmissing == 0) {
1212 struct timeval diff;
1213 timersub(&curtime, &ph->ph_time, &diff);
1214 if (diff.tv_sec < pool_inactive_time)
1215 continue;
1216
1217 /*
1218 * If freeing this page would put us below
1219 * the low water mark, stop now.
1220 */
1221 if ((pp->pr_nitems - pp->pr_itemsperpage) <
1222 pp->pr_minitems)
1223 break;
1224
1225 pr_rmpage(pp, ph);
1226 }
1227 }
1228
1229 pr_leave(pp);
1230 simple_unlock(&pp->pr_slock);
1231 }
1232
1233
1234 /*
1235 * Drain pools, one at a time.
1236 *
1237 * Note, we must never be called from an interrupt context.
1238 */
1239 void
1240 pool_drain(void *arg)
1241 {
1242 struct pool *pp;
1243 int s;
1244
1245 s = splvm();
1246 simple_lock(&pool_head_slock);
1247
1248 if (drainpp == NULL && (drainpp = TAILQ_FIRST(&pool_head)) == NULL)
1249 goto out;
1250
1251 pp = drainpp;
1252 drainpp = TAILQ_NEXT(pp, pr_poollist);
1253
1254 pool_reclaim(pp);
1255
1256 out:
1257 simple_unlock(&pool_head_slock);
1258 splx(s);
1259 }
1260
1261
1262 /*
1263 * Diagnostic helpers.
1264 */
1265 void
1266 pool_print(struct pool *pp, const char *modif)
1267 {
1268 int s;
1269
1270 s = splvm();
1271 if (simple_lock_try(&pp->pr_slock) == 0) {
1272 printf("pool %s is locked; try again later\n",
1273 pp->pr_wchan);
1274 splx(s);
1275 return;
1276 }
1277 pool_print1(pp, modif, printf);
1278 simple_unlock(&pp->pr_slock);
1279 splx(s);
1280 }
1281
1282 void
1283 pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1284 {
1285 int didlock = 0;
1286
1287 if (pp == NULL) {
1288 (*pr)("Must specify a pool to print.\n");
1289 return;
1290 }
1291
1292 /*
1293 * Called from DDB; interrupts should be blocked, and all
1294 * other processors should be paused. We can skip locking
1295 * the pool in this case.
1296 *
1297 * We do a simple_lock_try() just to print the lock
1298 * status, however.
1299 */
1300
1301 if (simple_lock_try(&pp->pr_slock) == 0)
1302 (*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
1303 else
1304 didlock = 1;
1305
1306 pool_print1(pp, modif, pr);
1307
1308 if (didlock)
1309 simple_unlock(&pp->pr_slock);
1310 }
1311
1312 static void
1313 pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1314 {
1315 struct pool_item_header *ph;
1316 struct pool_cache *pc;
1317 struct pool_cache_group *pcg;
1318 #ifdef DIAGNOSTIC
1319 struct pool_item *pi;
1320 #endif
1321 int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1322 char c;
1323
1324 while ((c = *modif++) != '\0') {
1325 if (c == 'l')
1326 print_log = 1;
1327 if (c == 'p')
1328 print_pagelist = 1;
1329 if (c == 'c')
1330 print_cache = 1;
1331 modif++;
1332 }
1333
1334 (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1335 pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1336 pp->pr_roflags);
1337 (*pr)("\tpagesz %u, mtype %d\n", pp->pr_pagesz, pp->pr_mtype);
1338 (*pr)("\talloc %p, release %p\n", pp->pr_alloc, pp->pr_free);
1339 (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1340 pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1341 (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1342 pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1343
1344 (*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
1345 pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1346 (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1347 pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1348
1349 if (print_pagelist == 0)
1350 goto skip_pagelist;
1351
1352 if ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL)
1353 (*pr)("\n\tpage list:\n");
1354 for (; ph != NULL; ph = TAILQ_NEXT(ph, ph_pagelist)) {
1355 (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
1356 ph->ph_page, ph->ph_nmissing,
1357 (u_long)ph->ph_time.tv_sec,
1358 (u_long)ph->ph_time.tv_usec);
1359 #ifdef DIAGNOSTIC
1360 for (pi = TAILQ_FIRST(&ph->ph_itemlist); pi != NULL;
1361 pi = TAILQ_NEXT(pi, pi_list)) {
1362 if (pi->pi_magic != PI_MAGIC) {
1363 (*pr)("\t\t\titem %p, magic 0x%x\n",
1364 pi, pi->pi_magic);
1365 }
1366 }
1367 #endif
1368 }
1369 if (pp->pr_curpage == NULL)
1370 (*pr)("\tno current page\n");
1371 else
1372 (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1373
1374 skip_pagelist:
1375
1376 if (print_log == 0)
1377 goto skip_log;
1378
1379 (*pr)("\n");
1380 if ((pp->pr_roflags & PR_LOGGING) == 0)
1381 (*pr)("\tno log\n");
1382 else
1383 pr_printlog(pp, NULL, pr);
1384
1385 skip_log:
1386
1387 if (print_cache == 0)
1388 goto skip_cache;
1389
1390 for (pc = TAILQ_FIRST(&pp->pr_cachelist); pc != NULL;
1391 pc = TAILQ_NEXT(pc, pc_poollist)) {
1392 (*pr)("\tcache %p: allocfrom %p freeto %p\n", pc,
1393 pc->pc_allocfrom, pc->pc_freeto);
1394 (*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n",
1395 pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
1396 for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
1397 pcg = TAILQ_NEXT(pcg, pcg_list)) {
1398 (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);
1399 for (i = 0; i < PCG_NOBJECTS; i++)
1400 (*pr)("\t\t\t%p\n", pcg->pcg_objects[i]);
1401 }
1402 }
1403
1404 skip_cache:
1405
1406 pr_enter_check(pp, pr);
1407 }
1408
1409 int
1410 pool_chk(struct pool *pp, const char *label)
1411 {
1412 struct pool_item_header *ph;
1413 int r = 0;
1414
1415 simple_lock(&pp->pr_slock);
1416
1417 for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
1418 ph = TAILQ_NEXT(ph, ph_pagelist)) {
1419
1420 struct pool_item *pi;
1421 int n;
1422 caddr_t page;
1423
1424 page = (caddr_t)((u_long)ph & pp->pr_pagemask);
1425 if (page != ph->ph_page &&
1426 (pp->pr_roflags & PR_PHINPAGE) != 0) {
1427 if (label != NULL)
1428 printf("%s: ", label);
1429 printf("pool(%p:%s): page inconsistency: page %p;"
1430 " at page head addr %p (p %p)\n", pp,
1431 pp->pr_wchan, ph->ph_page,
1432 ph, page);
1433 r++;
1434 goto out;
1435 }
1436
1437 for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
1438 pi != NULL;
1439 pi = TAILQ_NEXT(pi,pi_list), n++) {
1440
1441 #ifdef DIAGNOSTIC
1442 if (pi->pi_magic != PI_MAGIC) {
1443 if (label != NULL)
1444 printf("%s: ", label);
1445 printf("pool(%s): free list modified: magic=%x;"
1446 " page %p; item ordinal %d;"
1447 " addr %p (p %p)\n",
1448 pp->pr_wchan, pi->pi_magic, ph->ph_page,
1449 n, pi, page);
1450 panic("pool");
1451 }
1452 #endif
1453 page = (caddr_t)((u_long)pi & pp->pr_pagemask);
1454 if (page == ph->ph_page)
1455 continue;
1456
1457 if (label != NULL)
1458 printf("%s: ", label);
1459 printf("pool(%p:%s): page inconsistency: page %p;"
1460 " item ordinal %d; addr %p (p %p)\n", pp,
1461 pp->pr_wchan, ph->ph_page,
1462 n, pi, page);
1463 r++;
1464 goto out;
1465 }
1466 }
1467 out:
1468 simple_unlock(&pp->pr_slock);
1469 return (r);
1470 }
1471
1472 /*
1473 * pool_cache_init:
1474 *
1475 * Initialize a pool cache.
1476 *
1477 * NOTE: If the pool must be protected from interrupts, we expect
1478 * to be called at the appropriate interrupt priority level.
1479 */
1480 void
1481 pool_cache_init(struct pool_cache *pc, struct pool *pp,
1482 int (*ctor)(void *, void *, int),
1483 void (*dtor)(void *, void *),
1484 void *arg)
1485 {
1486
1487 TAILQ_INIT(&pc->pc_grouplist);
1488 simple_lock_init(&pc->pc_slock);
1489
1490 pc->pc_allocfrom = NULL;
1491 pc->pc_freeto = NULL;
1492 pc->pc_pool = pp;
1493
1494 pc->pc_ctor = ctor;
1495 pc->pc_dtor = dtor;
1496 pc->pc_arg = arg;
1497
1498 pc->pc_hits = 0;
1499 pc->pc_misses = 0;
1500
1501 pc->pc_ngroups = 0;
1502
1503 pc->pc_nitems = 0;
1504
1505 simple_lock(&pp->pr_slock);
1506 TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist);
1507 simple_unlock(&pp->pr_slock);
1508 }
1509
1510 /*
1511 * pool_cache_destroy:
1512 *
1513 * Destroy a pool cache.
1514 */
1515 void
1516 pool_cache_destroy(struct pool_cache *pc)
1517 {
1518 struct pool *pp = pc->pc_pool;
1519
1520 /* First, invalidate the entire cache. */
1521 pool_cache_invalidate(pc);
1522
1523 /* ...and remove it from the pool's cache list. */
1524 simple_lock(&pp->pr_slock);
1525 TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist);
1526 simple_unlock(&pp->pr_slock);
1527 }
1528
1529 static __inline void *
1530 pcg_get(struct pool_cache_group *pcg)
1531 {
1532 void *object;
1533 u_int idx;
1534
1535 KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
1536 KASSERT(pcg->pcg_avail != 0);
1537 idx = --pcg->pcg_avail;
1538
1539 KASSERT(pcg->pcg_objects[idx] != NULL);
1540 object = pcg->pcg_objects[idx];
1541 pcg->pcg_objects[idx] = NULL;
1542
1543 return (object);
1544 }
1545
1546 static __inline void
1547 pcg_put(struct pool_cache_group *pcg, void *object)
1548 {
1549 u_int idx;
1550
1551 KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
1552 idx = pcg->pcg_avail++;
1553
1554 KASSERT(pcg->pcg_objects[idx] == NULL);
1555 pcg->pcg_objects[idx] = object;
1556 }
1557
1558 /*
1559 * pool_cache_get:
1560 *
1561 * Get an object from a pool cache.
1562 */
1563 void *
1564 pool_cache_get(struct pool_cache *pc, int flags)
1565 {
1566 struct pool_cache_group *pcg;
1567 void *object;
1568
1569 simple_lock(&pc->pc_slock);
1570
1571 if ((pcg = pc->pc_allocfrom) == NULL) {
1572 for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
1573 pcg = TAILQ_NEXT(pcg, pcg_list)) {
1574 if (pcg->pcg_avail != 0) {
1575 pc->pc_allocfrom = pcg;
1576 goto have_group;
1577 }
1578 }
1579
1580 /*
1581 * No groups with any available objects. Allocate
1582 * a new object, construct it, and return it to
1583 * the caller. We will allocate a group, if necessary,
1584 * when the object is freed back to the cache.
1585 */
1586 pc->pc_misses++;
1587 simple_unlock(&pc->pc_slock);
1588 object = pool_get(pc->pc_pool, flags);
1589 if (object != NULL && pc->pc_ctor != NULL) {
1590 if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
1591 pool_put(pc->pc_pool, object);
1592 return (NULL);
1593 }
1594 }
1595 return (object);
1596 }
1597
1598 have_group:
1599 pc->pc_hits++;
1600 pc->pc_nitems--;
1601 object = pcg_get(pcg);
1602
1603 if (pcg->pcg_avail == 0)
1604 pc->pc_allocfrom = NULL;
1605
1606 simple_unlock(&pc->pc_slock);
1607
1608 return (object);
1609 }
1610
1611 /*
1612 * pool_cache_put:
1613 *
1614 * Put an object back to the pool cache.
1615 */
1616 void
1617 pool_cache_put(struct pool_cache *pc, void *object)
1618 {
1619 struct pool_cache_group *pcg;
1620
1621 simple_lock(&pc->pc_slock);
1622
1623 if ((pcg = pc->pc_freeto) == NULL) {
1624 for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
1625 pcg = TAILQ_NEXT(pcg, pcg_list)) {
1626 if (pcg->pcg_avail != PCG_NOBJECTS) {
1627 pc->pc_freeto = pcg;
1628 goto have_group;
1629 }
1630 }
1631
1632 /*
1633 * No empty groups to free the object to. Attempt to
1634 * allocate one.
1635 */
1636 simple_unlock(&pc->pc_slock);
1637 pcg = pool_get(&pcgpool, PR_NOWAIT);
1638 if (pcg != NULL) {
1639 memset(pcg, 0, sizeof(*pcg));
1640 simple_lock(&pc->pc_slock);
1641 pc->pc_ngroups++;
1642 TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list);
1643 if (pc->pc_freeto == NULL)
1644 pc->pc_freeto = pcg;
1645 goto have_group;
1646 }
1647
1648 /*
1649 * Unable to allocate a cache group; destruct the object
1650 * and free it back to the pool.
1651 */
1652 pool_cache_destruct_object(pc, object);
1653 return;
1654 }
1655
1656 have_group:
1657 pc->pc_nitems++;
1658 pcg_put(pcg, object);
1659
1660 if (pcg->pcg_avail == PCG_NOBJECTS)
1661 pc->pc_freeto = NULL;
1662
1663 simple_unlock(&pc->pc_slock);
1664 }
1665
1666 /*
1667 * pool_cache_destruct_object:
1668 *
1669 * Force destruction of an object and its release back into
1670 * the pool.
1671 */
1672 void
1673 pool_cache_destruct_object(struct pool_cache *pc, void *object)
1674 {
1675
1676 if (pc->pc_dtor != NULL)
1677 (*pc->pc_dtor)(pc->pc_arg, object);
1678 pool_put(pc->pc_pool, object);
1679 }
1680
1681 /*
1682 * pool_cache_do_invalidate:
1683 *
1684 * This internal function implements pool_cache_invalidate() and
1685 * pool_cache_reclaim().
1686 */
1687 static void
1688 pool_cache_do_invalidate(struct pool_cache *pc, int free_groups,
1689 void (*putit)(struct pool *, void *, const char *, long))
1690 {
1691 struct pool_cache_group *pcg, *npcg;
1692 void *object;
1693
1694 for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
1695 pcg = npcg) {
1696 npcg = TAILQ_NEXT(pcg, pcg_list);
1697 while (pcg->pcg_avail != 0) {
1698 pc->pc_nitems--;
1699 object = pcg_get(pcg);
1700 if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg)
1701 pc->pc_allocfrom = NULL;
1702 if (pc->pc_dtor != NULL)
1703 (*pc->pc_dtor)(pc->pc_arg, object);
1704 (*putit)(pc->pc_pool, object, __FILE__, __LINE__);
1705 }
1706 if (free_groups) {
1707 pc->pc_ngroups--;
1708 TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list);
1709 if (pc->pc_freeto == pcg)
1710 pc->pc_freeto = NULL;
1711 pool_put(&pcgpool, pcg);
1712 }
1713 }
1714 }
1715
1716 /*
1717 * pool_cache_invalidate:
1718 *
1719 * Invalidate a pool cache (destruct and release all of the
1720 * cached objects).
1721 */
1722 void
1723 pool_cache_invalidate(struct pool_cache *pc)
1724 {
1725
1726 simple_lock(&pc->pc_slock);
1727 pool_cache_do_invalidate(pc, 0, _pool_put);
1728 simple_unlock(&pc->pc_slock);
1729 }
1730
1731 /*
1732 * pool_cache_reclaim:
1733 *
1734 * Reclaim a pool cache for pool_reclaim().
1735 */
1736 static void
1737 pool_cache_reclaim(struct pool_cache *pc)
1738 {
1739
1740 simple_lock(&pc->pc_slock);
1741 pool_cache_do_invalidate(pc, 1, pool_do_put);
1742 simple_unlock(&pc->pc_slock);
1743 }
1744