subr_pool.c revision 1.16 1 /* $NetBSD: subr_pool.c,v 1.16 1998/12/16 04:28:23 briggs Exp $ */
2
3 /*-
4 * Copyright (c) 1997 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/proc.h>
42 #include <sys/errno.h>
43 #include <sys/kernel.h>
44 #include <sys/malloc.h>
45 #include <sys/lock.h>
46 #include <sys/pool.h>
47
48 #include <vm/vm.h>
49 #include <vm/vm_kern.h>
50
51 #if defined(UVM)
52 #include <uvm/uvm.h>
53 #endif
54
55 /*
56 * Pool resource management utility.
57 *
58 * Memory is allocated in pages which are split into pieces according
59 * to the pool item size. Each page is kept on a list headed by `pr_pagelist'
60 * in the pool structure and the individual pool items are on a linked list
61 * headed by `ph_itemlist' in each page header. The memory for building
62 * the page list is either taken from the allocated pages themselves (for
63 * small pool items) or taken from an internal pool of page headers (`phpool').
64 *
65 */
66
67 /* List of all pools */
68 TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
69
70 /* Private pool for page header structures */
71 static struct pool phpool;
72
73 /* # of seconds to retain page after last use */
74 int pool_inactive_time = 10;
75
76 /* Next candidate for drainage (see pool_drain()) */
77 static struct pool *drainpp = NULL;
78
79 struct pool_item_header {
80 /* Page headers */
81 TAILQ_ENTRY(pool_item_header)
82 ph_pagelist; /* pool page list */
83 TAILQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */
84 LIST_ENTRY(pool_item_header)
85 ph_hashlist; /* Off-page page headers */
86 int ph_nmissing; /* # of chunks in use */
87 caddr_t ph_page; /* this page's address */
88 struct timeval ph_time; /* last referenced */
89 };
90
91 struct pool_item {
92 #ifdef DIAGNOSTIC
93 int pi_magic;
94 #define PI_MAGIC 0xdeadbeef
95 #endif
96 /* Other entries use only this list entry */
97 TAILQ_ENTRY(pool_item) pi_list;
98 };
99
100
101 #define PR_HASH_INDEX(pp,addr) \
102 (((u_long)(addr) >> (pp)->pr_pageshift) & (PR_HASHTABSIZE - 1))
103
104
105
106 static struct pool_item_header
107 *pr_find_pagehead __P((struct pool *, caddr_t));
108 static void pr_rmpage __P((struct pool *, struct pool_item_header *));
109 static int pool_prime_page __P((struct pool *, caddr_t));
110 static void *pool_page_alloc __P((unsigned long, int, int));
111 static void pool_page_free __P((void *, unsigned long, int));
112
113
114 #ifdef POOL_DIAGNOSTIC
115 /*
116 * Pool log entry. An array of these is allocated in pool_create().
117 */
118 struct pool_log {
119 const char *pl_file;
120 long pl_line;
121 int pl_action;
122 #define PRLOG_GET 1
123 #define PRLOG_PUT 2
124 void *pl_addr;
125 };
126
127 /* Number of entries in pool log buffers */
128 int pool_logsize = 10;
129
130 static void pr_log __P((struct pool *, void *, int, const char *, long));
131 static void pr_printlog __P((struct pool *));
132
133 static __inline__ void
134 pr_log(pp, v, action, file, line)
135 struct pool *pp;
136 void *v;
137 int action;
138 const char *file;
139 long line;
140 {
141 int n = pp->pr_curlogentry;
142 struct pool_log *pl;
143
144 if ((pp->pr_flags & PR_LOGGING) == 0)
145 return;
146
147 /*
148 * Fill in the current entry. Wrap around and overwrite
149 * the oldest entry if necessary.
150 */
151 pl = &pp->pr_log[n];
152 pl->pl_file = file;
153 pl->pl_line = line;
154 pl->pl_action = action;
155 pl->pl_addr = v;
156 if (++n >= pp->pr_logsize)
157 n = 0;
158 pp->pr_curlogentry = n;
159 }
160
161 static void
162 pr_printlog(pp)
163 struct pool *pp;
164 {
165 int i = pp->pr_logsize;
166 int n = pp->pr_curlogentry;
167
168 if ((pp->pr_flags & PR_LOGGING) == 0)
169 return;
170
171 pool_print(pp, "printlog");
172
173 /*
174 * Print all entries in this pool's log.
175 */
176 while (i-- > 0) {
177 struct pool_log *pl = &pp->pr_log[n];
178 if (pl->pl_action != 0) {
179 printf("log entry %d:\n", i);
180 printf("\taction = %s, addr = %p\n",
181 pl->pl_action == PRLOG_GET ? "get" : "put",
182 pl->pl_addr);
183 printf("\tfile: %s at line %lu\n",
184 pl->pl_file, pl->pl_line);
185 }
186 if (++n >= pp->pr_logsize)
187 n = 0;
188 }
189 }
190 #else
191 #define pr_log(pp, v, action, file, line)
192 #define pr_printlog(pp)
193 #endif
194
195
196 /*
197 * Return the pool page header based on page address.
198 */
199 static __inline__ struct pool_item_header *
200 pr_find_pagehead(pp, page)
201 struct pool *pp;
202 caddr_t page;
203 {
204 struct pool_item_header *ph;
205
206 if ((pp->pr_flags & PR_PHINPAGE) != 0)
207 return ((struct pool_item_header *)(page + pp->pr_phoffset));
208
209 for (ph = LIST_FIRST(&pp->pr_hashtab[PR_HASH_INDEX(pp, page)]);
210 ph != NULL;
211 ph = LIST_NEXT(ph, ph_hashlist)) {
212 if (ph->ph_page == page)
213 return (ph);
214 }
215 return (NULL);
216 }
217
218 /*
219 * Remove a page from the pool.
220 */
221 static __inline__ void
222 pr_rmpage(pp, ph)
223 struct pool *pp;
224 struct pool_item_header *ph;
225 {
226
227 /*
228 * If the page was idle, decrement the idle page count.
229 */
230 if (ph->ph_nmissing == 0) {
231 #ifdef DIAGNOSTIC
232 if (pp->pr_nidle == 0)
233 panic("pr_rmpage: nidle inconsistent");
234 #endif
235 pp->pr_nidle--;
236 }
237
238 /*
239 * Unlink a page from the pool and release it.
240 */
241 TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
242 (*pp->pr_free)(ph->ph_page, pp->pr_pagesz, pp->pr_mtype);
243 pp->pr_npages--;
244 pp->pr_npagefree++;
245
246 if ((pp->pr_flags & PR_PHINPAGE) == 0) {
247 LIST_REMOVE(ph, ph_hashlist);
248 pool_put(&phpool, ph);
249 }
250
251 if (pp->pr_curpage == ph) {
252 /*
253 * Find a new non-empty page header, if any.
254 * Start search from the page head, to increase the
255 * chance for "high water" pages to be freed.
256 */
257 for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
258 ph = TAILQ_NEXT(ph, ph_pagelist))
259 if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
260 break;
261
262 pp->pr_curpage = ph;
263 }
264 }
265
266 /*
267 * Allocate and initialize a pool.
268 */
269 struct pool *
270 pool_create(size, align, ioff, nitems, wchan, pagesz, alloc, release, mtype)
271 size_t size;
272 u_int align;
273 u_int ioff;
274 int nitems;
275 char *wchan;
276 size_t pagesz;
277 void *(*alloc) __P((unsigned long, int, int));
278 void (*release) __P((void *, unsigned long, int));
279 int mtype;
280 {
281 struct pool *pp;
282 int flags;
283
284 pp = (struct pool *)malloc(sizeof(*pp), M_POOL, M_NOWAIT);
285 if (pp == NULL)
286 return (NULL);
287
288 flags = PR_FREEHEADER;
289 #ifdef POOL_DIAGNOSTIC
290 if (pool_logsize != 0)
291 flags |= PR_LOGGING;
292 #endif
293
294 pool_init(pp, size, align, ioff, flags, wchan, pagesz,
295 alloc, release, mtype);
296
297 if (nitems != 0) {
298 if (pool_prime(pp, nitems, NULL) != 0) {
299 pool_destroy(pp);
300 return (NULL);
301 }
302 }
303
304 return (pp);
305 }
306
307 /*
308 * Initialize the given pool resource structure.
309 *
310 * We export this routine to allow other kernel parts to declare
311 * static pools that must be initialized before malloc() is available.
312 */
313 void
314 pool_init(pp, size, align, ioff, flags, wchan, pagesz, alloc, release, mtype)
315 struct pool *pp;
316 size_t size;
317 u_int align;
318 u_int ioff;
319 int flags;
320 char *wchan;
321 size_t pagesz;
322 void *(*alloc) __P((unsigned long, int, int));
323 void (*release) __P((void *, unsigned long, int));
324 int mtype;
325 {
326 int off, slack, i;
327
328 /*
329 * Check arguments and construct default values.
330 */
331 if (!powerof2(pagesz) || pagesz > PAGE_SIZE)
332 panic("pool_init: page size invalid (%lx)\n", (u_long)pagesz);
333
334 if (alloc == NULL && release == NULL) {
335 alloc = pool_page_alloc;
336 release = pool_page_free;
337 pagesz = PAGE_SIZE; /* Rounds to PAGE_SIZE anyhow. */
338 } else if ((alloc != NULL && release != NULL) == 0) {
339 /* If you specifiy one, must specify both. */
340 panic("pool_init: must specify alloc and release together");
341 }
342
343 if (pagesz == 0)
344 pagesz = PAGE_SIZE;
345
346 if (align == 0)
347 align = ALIGN(1);
348
349 if (size < sizeof(struct pool_item))
350 size = sizeof(struct pool_item);
351
352 /*
353 * Initialize the pool structure.
354 */
355 TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
356 TAILQ_INIT(&pp->pr_pagelist);
357 pp->pr_curpage = NULL;
358 pp->pr_npages = 0;
359 pp->pr_minitems = 0;
360 pp->pr_minpages = 0;
361 pp->pr_maxpages = UINT_MAX;
362 pp->pr_flags = flags;
363 pp->pr_size = ALIGN(size);
364 pp->pr_align = align;
365 pp->pr_wchan = wchan;
366 pp->pr_mtype = mtype;
367 pp->pr_alloc = alloc;
368 pp->pr_free = release;
369 pp->pr_pagesz = pagesz;
370 pp->pr_pagemask = ~(pagesz - 1);
371 pp->pr_pageshift = ffs(pagesz) - 1;
372
373 /*
374 * Decide whether to put the page header off page to avoid
375 * wasting too large a part of the page. Off-page page headers
376 * go on a hash table, so we can match a returned item
377 * with its header based on the page address.
378 * We use 1/16 of the page size as the threshold (XXX: tune)
379 */
380 if (pp->pr_size < pagesz/16) {
381 /* Use the end of the page for the page header */
382 pp->pr_flags |= PR_PHINPAGE;
383 pp->pr_phoffset = off =
384 pagesz - ALIGN(sizeof(struct pool_item_header));
385 } else {
386 /* The page header will be taken from our page header pool */
387 pp->pr_phoffset = 0;
388 off = pagesz;
389 for (i = 0; i < PR_HASHTABSIZE; i++) {
390 LIST_INIT(&pp->pr_hashtab[i]);
391 }
392 }
393
394 /*
395 * Alignment is to take place at `ioff' within the item. This means
396 * we must reserve up to `align - 1' bytes on the page to allow
397 * appropriate positioning of each item.
398 *
399 * Silently enforce `0 <= ioff < align'.
400 */
401 pp->pr_itemoffset = ioff = ioff % align;
402 pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
403
404 /*
405 * Use the slack between the chunks and the page header
406 * for "cache coloring".
407 */
408 slack = off - pp->pr_itemsperpage * pp->pr_size;
409 pp->pr_maxcolor = (slack / align) * align;
410 pp->pr_curcolor = 0;
411
412 pp->pr_nget = 0;
413 pp->pr_nfail = 0;
414 pp->pr_nput = 0;
415 pp->pr_npagealloc = 0;
416 pp->pr_npagefree = 0;
417 pp->pr_hiwat = 0;
418 pp->pr_nidle = 0;
419
420 #ifdef POOL_DIAGNOSTIC
421 if ((flags & PR_LOGGING) != 0) {
422 pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
423 M_TEMP, M_NOWAIT);
424 if (pp->pr_log == NULL)
425 pp->pr_flags &= ~PR_LOGGING;
426 pp->pr_curlogentry = 0;
427 pp->pr_logsize = pool_logsize;
428 }
429 #endif
430
431 simple_lock_init(&pp->pr_lock);
432 lockinit(&pp->pr_resourcelock, PSWP, wchan, 0, 0);
433
434 /*
435 * Initialize private page header pool if we haven't done so yet.
436 */
437 if (phpool.pr_size == 0) {
438 pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,
439 0, "phpool", 0, 0, 0, 0);
440 }
441
442 return;
443 }
444
445 /*
446 * De-commision a pool resource.
447 */
448 void
449 pool_destroy(pp)
450 struct pool *pp;
451 {
452 struct pool_item_header *ph;
453
454 #ifdef DIAGNOSTIC
455 if (pp->pr_nget - pp->pr_nput != 0) {
456 pr_printlog(pp);
457 panic("pool_destroy: pool busy: still out: %lu\n",
458 pp->pr_nget - pp->pr_nput);
459 }
460 #endif
461
462 /* Remove all pages */
463 if ((pp->pr_flags & PR_STATIC) == 0)
464 while ((ph = pp->pr_pagelist.tqh_first) != NULL)
465 pr_rmpage(pp, ph);
466
467 /* Remove from global pool list */
468 TAILQ_REMOVE(&pool_head, pp, pr_poollist);
469 drainpp = NULL;
470
471 #ifdef POOL_DIAGNOSTIC
472 if ((pp->pr_flags & PR_LOGGING) != 0)
473 free(pp->pr_log, M_TEMP);
474 #endif
475
476 if (pp->pr_flags & PR_FREEHEADER)
477 free(pp, M_POOL);
478 }
479
480
481 /*
482 * Grab an item from the pool; must be called at appropriate spl level
483 */
484 #ifdef POOL_DIAGNOSTIC
485 void *
486 _pool_get(pp, flags, file, line)
487 struct pool *pp;
488 int flags;
489 const char *file;
490 long line;
491 #else
492 void *
493 pool_get(pp, flags)
494 struct pool *pp;
495 int flags;
496 #endif
497 {
498 void *v;
499 struct pool_item *pi;
500 struct pool_item_header *ph;
501
502 #ifdef DIAGNOSTIC
503 if ((pp->pr_flags & PR_STATIC) && (flags & PR_MALLOCOK)) {
504 pr_printlog(pp);
505 panic("pool_get: static");
506 }
507 #endif
508
509 simple_lock(&pp->pr_lock);
510 if (curproc == NULL && (flags & PR_WAITOK) != 0)
511 panic("pool_get: must have NOWAIT");
512
513 /*
514 * The convention we use is that if `curpage' is not NULL, then
515 * it points at a non-empty bucket. In particular, `curpage'
516 * never points at a page header which has PR_PHINPAGE set and
517 * has no items in its bucket.
518 */
519 while ((ph = pp->pr_curpage) == NULL) {
520 void *v;
521 int lkflags = LK_EXCLUSIVE | LK_INTERLOCK |
522 ((flags & PR_WAITOK) == 0 ? LK_NOWAIT : 0);
523
524 /* Get long-term lock on pool */
525 if (lockmgr(&pp->pr_resourcelock, lkflags, &pp->pr_lock) != 0)
526 return (NULL);
527
528 /* Check if pool became non-empty while we slept */
529 if ((ph = pp->pr_curpage) != NULL)
530 goto again;
531
532 /* Call the page back-end allocator for more memory */
533 v = (*pp->pr_alloc)(pp->pr_pagesz, flags, pp->pr_mtype);
534 if (v == NULL) {
535 if (flags & PR_URGENT)
536 panic("pool_get: urgent");
537 if ((flags & PR_WAITOK) == 0) {
538 pp->pr_nfail++;
539 lockmgr(&pp->pr_resourcelock, LK_RELEASE, NULL);
540 return (NULL);
541 }
542
543 /*
544 * Wait for items to be returned to this pool.
545 * XXX: we actually want to wait just until
546 * the page allocator has memory again. Depending
547 * on this pool's usage, we might get stuck here
548 * for a long time.
549 */
550 pp->pr_flags |= PR_WANTED;
551 lockmgr(&pp->pr_resourcelock, LK_RELEASE, NULL);
552 tsleep((caddr_t)pp, PSWP, pp->pr_wchan, 0);
553 simple_lock(&pp->pr_lock);
554 continue;
555 }
556
557 /* We have more memory; add it to the pool */
558 pp->pr_npagealloc++;
559 pool_prime_page(pp, v);
560
561 again:
562 /* Re-acquire pool interlock */
563 simple_lock(&pp->pr_lock);
564 lockmgr(&pp->pr_resourcelock, LK_RELEASE, NULL);
565 }
566
567 if ((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)
568 panic("pool_get: %s: page empty", pp->pr_wchan);
569
570 pr_log(pp, v, PRLOG_GET, file, line);
571
572 #ifdef DIAGNOSTIC
573 if (pi->pi_magic != PI_MAGIC) {
574 pr_printlog(pp);
575 panic("pool_get(%s): free list modified: magic=%x; page %p;"
576 " item addr %p\n",
577 pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
578 }
579 #endif
580
581 /*
582 * Remove from item list.
583 */
584 TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
585 if (ph->ph_nmissing == 0) {
586 #ifdef DIAGNOSTIC
587 if (pp->pr_nidle == 0)
588 panic("pool_get: nidle inconsistent");
589 #endif
590 pp->pr_nidle--;
591 }
592 ph->ph_nmissing++;
593 if (TAILQ_FIRST(&ph->ph_itemlist) == NULL) {
594 /*
595 * Find a new non-empty page header, if any.
596 * Start search from the page head, to increase
597 * the chance for "high water" pages to be freed.
598 *
599 * First, move the now empty page to the head of
600 * the page list.
601 */
602 TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
603 TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
604 while ((ph = TAILQ_NEXT(ph, ph_pagelist)) != NULL)
605 if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
606 break;
607
608 pp->pr_curpage = ph;
609 }
610
611 pp->pr_nget++;
612 simple_unlock(&pp->pr_lock);
613 return (v);
614 }
615
616 /*
617 * Return resource to the pool; must be called at appropriate spl level
618 */
619 #ifdef POOL_DIAGNOSTIC
620 void
621 _pool_put(pp, v, file, line)
622 struct pool *pp;
623 void *v;
624 const char *file;
625 long line;
626 #else
627 void
628 pool_put(pp, v)
629 struct pool *pp;
630 void *v;
631 #endif
632 {
633 struct pool_item *pi = v;
634 struct pool_item_header *ph;
635 caddr_t page;
636
637 page = (caddr_t)((u_long)v & pp->pr_pagemask);
638
639 simple_lock(&pp->pr_lock);
640
641 pr_log(pp, v, PRLOG_PUT, file, line);
642
643 if ((ph = pr_find_pagehead(pp, page)) == NULL) {
644 pr_printlog(pp);
645 panic("pool_put: %s: page header missing", pp->pr_wchan);
646 }
647
648 /*
649 * Return to item list.
650 */
651 #ifdef DIAGNOSTIC
652 pi->pi_magic = PI_MAGIC;
653 #endif
654 TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
655 ph->ph_nmissing--;
656 pp->pr_nput++;
657
658 /* Cancel "pool empty" condition if it exists */
659 if (pp->pr_curpage == NULL)
660 pp->pr_curpage = ph;
661
662 if (pp->pr_flags & PR_WANTED) {
663 pp->pr_flags &= ~PR_WANTED;
664 if (ph->ph_nmissing == 0)
665 pp->pr_nidle++;
666 wakeup((caddr_t)pp);
667 simple_unlock(&pp->pr_lock);
668 return;
669 }
670
671 /*
672 * If this page is now complete, move it to the end of the pagelist.
673 * If this page has just become un-empty, move it the head.
674 */
675 if (ph->ph_nmissing == 0) {
676 pp->pr_nidle++;
677 if (pp->pr_npages > pp->pr_maxpages) {
678 #if 0
679 timeout(pool_drain, 0, pool_inactive_time*hz);
680 #else
681 pr_rmpage(pp, ph);
682 #endif
683 } else {
684 TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
685 TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
686 ph->ph_time = time;
687
688 /* XXX - update curpage */
689 for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
690 ph = TAILQ_NEXT(ph, ph_pagelist))
691 if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
692 break;
693
694 pp->pr_curpage = ph;
695 }
696 }
697
698 simple_unlock(&pp->pr_lock);
699 }
700
701 /*
702 * Add N items to the pool.
703 */
704 int
705 pool_prime(pp, n, storage)
706 struct pool *pp;
707 int n;
708 caddr_t storage;
709 {
710 caddr_t cp;
711 int newnitems, newpages;
712
713 #ifdef DIAGNOSTIC
714 if (storage && !(pp->pr_flags & PR_STATIC))
715 panic("pool_prime: static");
716 /* !storage && static caught below */
717 #endif
718
719 (void)lockmgr(&pp->pr_resourcelock, LK_EXCLUSIVE, NULL);
720 newnitems = pp->pr_minitems + n;
721 newpages =
722 roundup(pp->pr_itemsperpage,newnitems) / pp->pr_itemsperpage
723 - pp->pr_minpages;
724
725 while (newpages-- > 0) {
726
727 if (pp->pr_flags & PR_STATIC) {
728 cp = storage;
729 storage += pp->pr_pagesz;
730 } else {
731 cp = (*pp->pr_alloc)(pp->pr_pagesz, 0, pp->pr_mtype);
732 }
733
734 if (cp == NULL) {
735 (void)lockmgr(&pp->pr_resourcelock, LK_RELEASE, NULL);
736 return (ENOMEM);
737 }
738
739 pool_prime_page(pp, cp);
740 pp->pr_minpages++;
741 }
742
743 pp->pr_minitems = newnitems;
744
745 if (pp->pr_minpages >= pp->pr_maxpages)
746 pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
747
748 (void)lockmgr(&pp->pr_resourcelock, LK_RELEASE, NULL);
749 return (0);
750 }
751
752 /*
753 * Add a page worth of items to the pool.
754 */
755 int
756 pool_prime_page(pp, storage)
757 struct pool *pp;
758 caddr_t storage;
759 {
760 struct pool_item *pi;
761 struct pool_item_header *ph;
762 caddr_t cp = storage;
763 unsigned int align = pp->pr_align;
764 unsigned int ioff = pp->pr_itemoffset;
765 int n;
766
767 simple_lock(&pp->pr_lock);
768
769 if ((pp->pr_flags & PR_PHINPAGE) != 0) {
770 ph = (struct pool_item_header *)(cp + pp->pr_phoffset);
771 } else {
772 ph = pool_get(&phpool, PR_URGENT);
773 LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)],
774 ph, ph_hashlist);
775 }
776
777 /*
778 * Insert page header.
779 */
780 TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
781 TAILQ_INIT(&ph->ph_itemlist);
782 ph->ph_page = storage;
783 ph->ph_nmissing = 0;
784 ph->ph_time.tv_sec = ph->ph_time.tv_usec = 0;
785
786 pp->pr_nidle++;
787
788 /*
789 * Color this page.
790 */
791 cp = (caddr_t)(cp + pp->pr_curcolor);
792 if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
793 pp->pr_curcolor = 0;
794
795 /*
796 * Adjust storage to apply aligment to `pr_itemoffset' in each item.
797 */
798 if (ioff != 0)
799 cp = (caddr_t)(cp + (align - ioff));
800
801 /*
802 * Insert remaining chunks on the bucket list.
803 */
804 n = pp->pr_itemsperpage;
805
806 while (n--) {
807 pi = (struct pool_item *)cp;
808
809 /* Insert on page list */
810 TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
811 #ifdef DIAGNOSTIC
812 pi->pi_magic = PI_MAGIC;
813 #endif
814 cp = (caddr_t)(cp + pp->pr_size);
815 }
816
817 /*
818 * If the pool was depleted, point at the new page.
819 */
820 if (pp->pr_curpage == NULL)
821 pp->pr_curpage = ph;
822
823 if (++pp->pr_npages > pp->pr_hiwat)
824 pp->pr_hiwat = pp->pr_npages;
825
826 simple_unlock(&pp->pr_lock);
827 return (0);
828 }
829
830 void
831 pool_setlowat(pp, n)
832 pool_handle_t pp;
833 int n;
834 {
835
836 (void)lockmgr(&pp->pr_resourcelock, LK_EXCLUSIVE, NULL);
837 pp->pr_minitems = n;
838 pp->pr_minpages = (n == 0)
839 ? 0
840 : roundup(pp->pr_itemsperpage,n) / pp->pr_itemsperpage;
841 (void)lockmgr(&pp->pr_resourcelock, LK_RELEASE, NULL);
842 }
843
844 void
845 pool_sethiwat(pp, n)
846 pool_handle_t pp;
847 int n;
848 {
849
850 (void)lockmgr(&pp->pr_resourcelock, LK_EXCLUSIVE, NULL);
851 pp->pr_maxpages = (n == 0)
852 ? 0
853 : roundup(pp->pr_itemsperpage,n) / pp->pr_itemsperpage;
854 (void)lockmgr(&pp->pr_resourcelock, LK_RELEASE, NULL);
855 }
856
857
858 /*
859 * Default page allocator.
860 */
861 static void *
862 pool_page_alloc(sz, flags, mtype)
863 unsigned long sz;
864 int flags;
865 int mtype;
866 {
867 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
868
869 #if defined(UVM)
870 return ((void *)uvm_km_alloc_poolpage(waitok));
871 #else
872 return ((void *)kmem_alloc_poolpage(waitok));
873 #endif
874 }
875
876 static void
877 pool_page_free(v, sz, mtype)
878 void *v;
879 unsigned long sz;
880 int mtype;
881 {
882
883 #if defined(UVM)
884 uvm_km_free_poolpage((vaddr_t)v);
885 #else
886 kmem_free_poolpage((vaddr_t)v);
887 #endif
888 }
889
890 /*
891 * Alternate pool page allocator for pools that know they will
892 * never be accessed in interrupt context.
893 */
894 void *
895 pool_page_alloc_nointr(sz, flags, mtype)
896 unsigned long sz;
897 int flags;
898 int mtype;
899 {
900 #if defined(UVM)
901 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
902
903 /*
904 * With UVM, we can use the kernel_map.
905 */
906 return ((void *)uvm_km_alloc_poolpage1(kernel_map, uvm.kernel_object,
907 waitok));
908 #else
909 /*
910 * Can't do anything so cool with Mach VM.
911 */
912 return (pool_page_alloc(sz, flags, mtype));
913 #endif
914 }
915
916 void
917 pool_page_free_nointr(v, sz, mtype)
918 void *v;
919 unsigned long sz;
920 int mtype;
921 {
922
923 #if defined(UVM)
924 uvm_km_free_poolpage1(kernel_map, (vaddr_t)v);
925 #else
926 pool_page_free(v, sz, mtype);
927 #endif
928 }
929
930
931 /*
932 * Release all complete pages that have not been used recently.
933 */
934 void
935 pool_reclaim (pp)
936 pool_handle_t pp;
937 {
938 struct pool_item_header *ph, *phnext;
939 struct timeval curtime = time;
940
941 if (pp->pr_flags & PR_STATIC)
942 return;
943
944 if (simple_lock_try(&pp->pr_lock) == 0)
945 return;
946
947 for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; ph = phnext) {
948 phnext = TAILQ_NEXT(ph, ph_pagelist);
949
950 /* Check our minimum page claim */
951 if (pp->pr_npages <= pp->pr_minpages)
952 break;
953
954 if (ph->ph_nmissing == 0) {
955 struct timeval diff;
956 timersub(&curtime, &ph->ph_time, &diff);
957 if (diff.tv_sec < pool_inactive_time)
958 continue;
959 pr_rmpage(pp, ph);
960 }
961 }
962
963 simple_unlock(&pp->pr_lock);
964 }
965
966
967 /*
968 * Drain pools, one at a time.
969 */
970 void
971 pool_drain(arg)
972 void *arg;
973 {
974 struct pool *pp;
975 int s = splimp();
976
977 /* XXX:lock pool head */
978 if (drainpp == NULL && (drainpp = TAILQ_FIRST(&pool_head)) == NULL) {
979 splx(s);
980 return;
981 }
982
983 pp = drainpp;
984 drainpp = TAILQ_NEXT(pp, pr_poollist);
985 /* XXX:unlock pool head */
986
987 pool_reclaim(pp);
988 splx(s);
989 }
990
991
992 #ifdef DEBUG
993 /*
994 * Diagnostic helpers.
995 */
996 void
997 pool_print(pp, label)
998 struct pool *pp;
999 char *label;
1000 {
1001
1002 if (label != NULL)
1003 printf("%s: ", label);
1004
1005 printf("pool %s: nalloc %lu nfree %lu npagealloc %lu npagefree %lu\n"
1006 " npages %u minitems %u itemsperpage %u itemoffset %u\n"
1007 " nidle %lu\n",
1008 pp->pr_wchan,
1009 pp->pr_nget,
1010 pp->pr_nput,
1011 pp->pr_npagealloc,
1012 pp->pr_npagefree,
1013 pp->pr_npages,
1014 pp->pr_minitems,
1015 pp->pr_itemsperpage,
1016 pp->pr_itemoffset,
1017 pp->pr_nidle);
1018 }
1019
1020 int
1021 pool_chk(pp, label)
1022 struct pool *pp;
1023 char *label;
1024 {
1025 struct pool_item_header *ph;
1026 int r = 0;
1027
1028 simple_lock(&pp->pr_lock);
1029
1030 for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
1031 ph = TAILQ_NEXT(ph, ph_pagelist)) {
1032
1033 struct pool_item *pi;
1034 int n;
1035 caddr_t page;
1036
1037 page = (caddr_t)((u_long)ph & pp->pr_pagemask);
1038 if (page != ph->ph_page && (pp->pr_flags & PR_PHINPAGE) != 0) {
1039 if (label != NULL)
1040 printf("%s: ", label);
1041 printf("pool(%p:%s): page inconsistency: page %p;"
1042 " at page head addr %p (p %p)\n", pp,
1043 pp->pr_wchan, ph->ph_page,
1044 ph, page);
1045 r++;
1046 goto out;
1047 }
1048
1049 for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
1050 pi != NULL;
1051 pi = TAILQ_NEXT(pi,pi_list), n++) {
1052
1053 #ifdef DIAGNOSTIC
1054 if (pi->pi_magic != PI_MAGIC) {
1055 if (label != NULL)
1056 printf("%s: ", label);
1057 printf("pool(%s): free list modified: magic=%x;"
1058 " page %p; item ordinal %d;"
1059 " addr %p (p %p)\n",
1060 pp->pr_wchan, pi->pi_magic, ph->ph_page,
1061 n, pi, page);
1062 panic("pool");
1063 }
1064 #endif
1065 page = (caddr_t)((u_long)pi & pp->pr_pagemask);
1066 if (page == ph->ph_page)
1067 continue;
1068
1069 if (label != NULL)
1070 printf("%s: ", label);
1071 printf("pool(%p:%s): page inconsistency: page %p;"
1072 " item ordinal %d; addr %p (p %p)\n", pp,
1073 pp->pr_wchan, ph->ph_page,
1074 n, pi, page);
1075 r++;
1076 goto out;
1077 }
1078 }
1079 out:
1080 simple_unlock(&pp->pr_lock);
1081 return (r);
1082 }
1083 #endif
1084