subr_pool.c revision 1.14 1 /* $NetBSD: subr_pool.c,v 1.14 1998/09/22 03:01:29 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1997 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/proc.h>
42 #include <sys/errno.h>
43 #include <sys/kernel.h>
44 #include <sys/malloc.h>
45 #include <sys/lock.h>
46 #include <sys/pool.h>
47
48 #include <vm/vm.h>
49 #include <vm/vm_kern.h>
50
51 #if defined(UVM)
52 #include <uvm/uvm.h>
53 #endif
54
55 /*
56 * Pool resource management utility.
57 *
58 * Memory is allocated in pages which are split into pieces according
59 * to the pool item size. Each page is kept on a list headed by `pr_pagelist'
60 * in the pool structure and the individual pool items are on a linked list
61 * headed by `ph_itemlist' in each page header. The memory for building
62 * the page list is either taken from the allocated pages themselves (for
63 * small pool items) or taken from an internal pool of page headers (`phpool').
64 *
65 */
66
67 /* List of all pools */
68 TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
69
70 /* Private pool for page header structures */
71 static struct pool phpool;
72
73 /* # of seconds to retain page after last use */
74 int pool_inactive_time = 10;
75
76 /* Next candidate for drainage (see pool_drain()) */
77 static struct pool *drainpp = NULL;
78
79 struct pool_item_header {
80 /* Page headers */
81 TAILQ_ENTRY(pool_item_header)
82 ph_pagelist; /* pool page list */
83 TAILQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */
84 LIST_ENTRY(pool_item_header)
85 ph_hashlist; /* Off-page page headers */
86 int ph_nmissing; /* # of chunks in use */
87 caddr_t ph_page; /* this page's address */
88 struct timeval ph_time; /* last referenced */
89 };
90
91 struct pool_item {
92 #ifdef DIAGNOSTIC
93 int pi_magic;
94 #define PI_MAGIC 0xdeadbeef
95 #endif
96 /* Other entries use only this list entry */
97 TAILQ_ENTRY(pool_item) pi_list;
98 };
99
100
101 #define PR_HASH_INDEX(pp,addr) \
102 (((u_long)(addr) >> (pp)->pr_pageshift) & (PR_HASHTABSIZE - 1))
103
104
105
106 static struct pool_item_header
107 *pr_find_pagehead __P((struct pool *, caddr_t));
108 static void pr_rmpage __P((struct pool *, struct pool_item_header *));
109 static int pool_prime_page __P((struct pool *, caddr_t));
110 static void *pool_page_alloc __P((unsigned long, int, int));
111 static void pool_page_free __P((void *, unsigned long, int));
112 int pool_chk __P((struct pool *, char *));
113
114
115 #ifdef POOL_DIAGNOSTIC
116 /*
117 * Pool log entry. An array of these is allocated in pool_create().
118 */
119 struct pool_log {
120 const char *pl_file;
121 long pl_line;
122 int pl_action;
123 #define PRLOG_GET 1
124 #define PRLOG_PUT 2
125 void *pl_addr;
126 };
127
128 /* Number of entries in pool log buffers */
129 int pool_logsize = 10;
130
131 static void pr_log __P((struct pool *, void *, int, const char *, long));
132 static void pr_printlog __P((struct pool *));
133
134 static __inline__ void
135 pr_log(pp, v, action, file, line)
136 struct pool *pp;
137 void *v;
138 int action;
139 const char *file;
140 long line;
141 {
142 int n = pp->pr_curlogentry;
143 struct pool_log *pl;
144
145 if ((pp->pr_flags & PR_LOGGING) == 0)
146 return;
147
148 /*
149 * Fill in the current entry. Wrap around and overwrite
150 * the oldest entry if necessary.
151 */
152 pl = &pp->pr_log[n];
153 pl->pl_file = file;
154 pl->pl_line = line;
155 pl->pl_action = action;
156 pl->pl_addr = v;
157 if (++n >= pp->pr_logsize)
158 n = 0;
159 pp->pr_curlogentry = n;
160 }
161
162 static void
163 pr_printlog(pp)
164 struct pool *pp;
165 {
166 int i = pp->pr_logsize;
167 int n = pp->pr_curlogentry;
168
169 if ((pp->pr_flags & PR_LOGGING) == 0)
170 return;
171
172 pool_print(pp, "printlog");
173
174 /*
175 * Print all entries in this pool's log.
176 */
177 while (i-- > 0) {
178 struct pool_log *pl = &pp->pr_log[n];
179 if (pl->pl_action != 0) {
180 printf("log entry %d:\n", i);
181 printf("\taction = %s, addr = %p\n",
182 pl->pl_action == PRLOG_GET ? "get" : "put",
183 pl->pl_addr);
184 printf("\tfile: %s at line %lu\n",
185 pl->pl_file, pl->pl_line);
186 }
187 if (++n >= pp->pr_logsize)
188 n = 0;
189 }
190 }
191 #else
192 #define pr_log(pp, v, action, file, line)
193 #define pr_printlog(pp)
194 #endif
195
196
197 /*
198 * Return the pool page header based on page address.
199 */
200 static __inline__ struct pool_item_header *
201 pr_find_pagehead(pp, page)
202 struct pool *pp;
203 caddr_t page;
204 {
205 struct pool_item_header *ph;
206
207 if ((pp->pr_flags & PR_PHINPAGE) != 0)
208 return ((struct pool_item_header *)(page + pp->pr_phoffset));
209
210 for (ph = LIST_FIRST(&pp->pr_hashtab[PR_HASH_INDEX(pp, page)]);
211 ph != NULL;
212 ph = LIST_NEXT(ph, ph_hashlist)) {
213 if (ph->ph_page == page)
214 return (ph);
215 }
216 return (NULL);
217 }
218
219 /*
220 * Remove a page from the pool.
221 */
222 static __inline__ void
223 pr_rmpage(pp, ph)
224 struct pool *pp;
225 struct pool_item_header *ph;
226 {
227
228 /*
229 * If the page was idle, decrement the idle page count.
230 */
231 if (ph->ph_nmissing == 0) {
232 #ifdef DIAGNOSTIC
233 if (pp->pr_nidle == 0)
234 panic("pr_rmpage: nidle inconsistent");
235 #endif
236 pp->pr_nidle--;
237 }
238
239 /*
240 * Unlink a page from the pool and release it.
241 */
242 TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
243 (*pp->pr_free)(ph->ph_page, pp->pr_pagesz, pp->pr_mtype);
244 pp->pr_npages--;
245 pp->pr_npagefree++;
246
247 if ((pp->pr_flags & PR_PHINPAGE) == 0) {
248 LIST_REMOVE(ph, ph_hashlist);
249 pool_put(&phpool, ph);
250 }
251
252 if (pp->pr_curpage == ph) {
253 /*
254 * Find a new non-empty page header, if any.
255 * Start search from the page head, to increase the
256 * chance for "high water" pages to be freed.
257 */
258 for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
259 ph = TAILQ_NEXT(ph, ph_pagelist))
260 if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
261 break;
262
263 pp->pr_curpage = ph;
264 }
265 }
266
267 /*
268 * Allocate and initialize a pool.
269 */
270 struct pool *
271 pool_create(size, align, ioff, nitems, wchan, pagesz, alloc, release, mtype)
272 size_t size;
273 u_int align;
274 u_int ioff;
275 int nitems;
276 char *wchan;
277 size_t pagesz;
278 void *(*alloc) __P((unsigned long, int, int));
279 void (*release) __P((void *, unsigned long, int));
280 int mtype;
281 {
282 struct pool *pp;
283 int flags;
284
285 pp = (struct pool *)malloc(sizeof(*pp), M_POOL, M_NOWAIT);
286 if (pp == NULL)
287 return (NULL);
288
289 flags = PR_FREEHEADER;
290 #ifdef POOL_DIAGNOSTIC
291 if (pool_logsize != 0)
292 flags |= PR_LOGGING;
293 #endif
294
295 pool_init(pp, size, align, ioff, flags, wchan, pagesz,
296 alloc, release, mtype);
297
298 if (nitems != 0) {
299 if (pool_prime(pp, nitems, NULL) != 0) {
300 pool_destroy(pp);
301 return (NULL);
302 }
303 }
304
305 return (pp);
306 }
307
308 /*
309 * Initialize the given pool resource structure.
310 *
311 * We export this routine to allow other kernel parts to declare
312 * static pools that must be initialized before malloc() is available.
313 */
314 void
315 pool_init(pp, size, align, ioff, flags, wchan, pagesz, alloc, release, mtype)
316 struct pool *pp;
317 size_t size;
318 u_int align;
319 u_int ioff;
320 int flags;
321 char *wchan;
322 size_t pagesz;
323 void *(*alloc) __P((unsigned long, int, int));
324 void (*release) __P((void *, unsigned long, int));
325 int mtype;
326 {
327 int off, slack;
328
329 /*
330 * Check arguments and construct default values.
331 */
332 if (!powerof2(pagesz) || pagesz > PAGE_SIZE)
333 panic("pool_init: page size invalid (%lx)\n", (u_long)pagesz);
334
335 if (alloc == NULL && release == NULL) {
336 alloc = pool_page_alloc;
337 release = pool_page_free;
338 pagesz = PAGE_SIZE; /* Rounds to PAGE_SIZE anyhow. */
339 } else if ((alloc != NULL && release != NULL) == 0) {
340 /* If you specifiy one, must specify both. */
341 panic("pool_init: must specify alloc and release together");
342 }
343
344 if (pagesz == 0)
345 pagesz = PAGE_SIZE;
346
347 if (align == 0)
348 align = ALIGN(1);
349
350 if (size < sizeof(struct pool_item))
351 size = sizeof(struct pool_item);
352
353 /*
354 * Initialize the pool structure.
355 */
356 TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
357 TAILQ_INIT(&pp->pr_pagelist);
358 pp->pr_curpage = NULL;
359 pp->pr_npages = 0;
360 pp->pr_minitems = 0;
361 pp->pr_minpages = 0;
362 pp->pr_maxpages = UINT_MAX;
363 pp->pr_flags = flags;
364 pp->pr_size = ALIGN(size);
365 pp->pr_align = align;
366 pp->pr_wchan = wchan;
367 pp->pr_mtype = mtype;
368 pp->pr_alloc = alloc;
369 pp->pr_free = release;
370 pp->pr_pagesz = pagesz;
371 pp->pr_pagemask = ~(pagesz - 1);
372 pp->pr_pageshift = ffs(pagesz) - 1;
373
374 /*
375 * Decide whether to put the page header off page to avoid
376 * wasting too large a part of the page. Off-page page headers
377 * go on a hash table, so we can match a returned item
378 * with its header based on the page address.
379 * We use 1/16 of the page size as the threshold (XXX: tune)
380 */
381 if (pp->pr_size < pagesz/16) {
382 /* Use the end of the page for the page header */
383 pp->pr_flags |= PR_PHINPAGE;
384 pp->pr_phoffset = off =
385 pagesz - ALIGN(sizeof(struct pool_item_header));
386 } else {
387 /* The page header will be taken from our page header pool */
388 pp->pr_phoffset = 0;
389 off = pagesz;
390 memset(pp->pr_hashtab, 0, sizeof(pp->pr_hashtab));
391 }
392
393 /*
394 * Alignment is to take place at `ioff' within the item. This means
395 * we must reserve up to `align - 1' bytes on the page to allow
396 * appropriate positioning of each item.
397 *
398 * Silently enforce `0 <= ioff < align'.
399 */
400 pp->pr_itemoffset = ioff = ioff % align;
401 pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
402
403 /*
404 * Use the slack between the chunks and the page header
405 * for "cache coloring".
406 */
407 slack = off - pp->pr_itemsperpage * pp->pr_size;
408 pp->pr_maxcolor = (slack / align) * align;
409 pp->pr_curcolor = 0;
410
411 pp->pr_nget = 0;
412 pp->pr_nfail = 0;
413 pp->pr_nput = 0;
414 pp->pr_npagealloc = 0;
415 pp->pr_npagefree = 0;
416 pp->pr_hiwat = 0;
417 pp->pr_nidle = 0;
418
419 #ifdef POOL_DIAGNOSTIC
420 if ((flags & PR_LOGGING) != 0) {
421 pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
422 M_TEMP, M_NOWAIT);
423 if (pp->pr_log == NULL)
424 pp->pr_flags &= ~PR_LOGGING;
425 pp->pr_curlogentry = 0;
426 pp->pr_logsize = pool_logsize;
427 }
428 #endif
429
430 simple_lock_init(&pp->pr_lock);
431
432 /*
433 * Initialize private page header pool if we haven't done so yet.
434 */
435 if (phpool.pr_size == 0) {
436 pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,
437 0, "phpool", 0, 0, 0, 0);
438 }
439
440 return;
441 }
442
443 /*
444 * De-commision a pool resource.
445 */
446 void
447 pool_destroy(pp)
448 struct pool *pp;
449 {
450 struct pool_item_header *ph;
451
452 #ifdef DIAGNOSTIC
453 if (pp->pr_nget - pp->pr_nput != 0) {
454 pr_printlog(pp);
455 panic("pool_destroy: pool busy: still out: %lu\n",
456 pp->pr_nget - pp->pr_nput);
457 }
458 #endif
459
460 /* Remove all pages */
461 if ((pp->pr_flags & PR_STATIC) == 0)
462 while ((ph = pp->pr_pagelist.tqh_first) != NULL)
463 pr_rmpage(pp, ph);
464
465 /* Remove from global pool list */
466 TAILQ_REMOVE(&pool_head, pp, pr_poollist);
467 drainpp = NULL;
468
469 #ifdef POOL_DIAGNOSTIC
470 if ((pp->pr_flags & PR_LOGGING) != 0)
471 free(pp->pr_log, M_TEMP);
472 #endif
473
474 if (pp->pr_flags & PR_FREEHEADER)
475 free(pp, M_POOL);
476 }
477
478
479 /*
480 * Grab an item from the pool; must be called at appropriate spl level
481 */
482 #ifdef POOL_DIAGNOSTIC
483 void *
484 _pool_get(pp, flags, file, line)
485 struct pool *pp;
486 int flags;
487 const char *file;
488 long line;
489 #else
490 void *
491 pool_get(pp, flags)
492 struct pool *pp;
493 int flags;
494 #endif
495 {
496 void *v;
497 struct pool_item *pi;
498 struct pool_item_header *ph;
499
500 #ifdef DIAGNOSTIC
501 if ((pp->pr_flags & PR_STATIC) && (flags & PR_MALLOCOK)) {
502 pr_printlog(pp);
503 panic("pool_get: static");
504 }
505 #endif
506
507 simple_lock(&pp->pr_lock);
508 if (curproc == NULL && (flags & PR_WAITOK) != 0)
509 panic("pool_get: must have NOWAIT");
510
511 /*
512 * The convention we use is that if `curpage' is not NULL, then
513 * it points at a non-empty bucket. In particular, `curpage'
514 * never points at a page header which has PR_PHINPAGE set and
515 * has no items in its bucket.
516 */
517 again:
518 if ((ph = pp->pr_curpage) == NULL) {
519 void *v = (*pp->pr_alloc)(pp->pr_pagesz, flags, pp->pr_mtype);
520 if (v == NULL) {
521 if (flags & PR_URGENT)
522 panic("pool_get: urgent");
523 if ((flags & PR_WAITOK) == 0) {
524 pp->pr_nfail++;
525 simple_unlock(&pp->pr_lock);
526 return (NULL);
527 }
528
529 pp->pr_flags |= PR_WANTED;
530 simple_unlock(&pp->pr_lock);
531 tsleep((caddr_t)pp, PSWP, pp->pr_wchan, 0);
532 simple_lock(&pp->pr_lock);
533 } else {
534 pp->pr_npagealloc++;
535 pool_prime_page(pp, v);
536 }
537
538 goto again;
539 }
540
541 if ((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)
542 panic("pool_get: %s: page empty", pp->pr_wchan);
543
544 pr_log(pp, v, PRLOG_GET, file, line);
545
546 #ifdef DIAGNOSTIC
547 if (pi->pi_magic != PI_MAGIC) {
548 pr_printlog(pp);
549 panic("pool_get(%s): free list modified: magic=%x; page %p;"
550 " item addr %p\n",
551 pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
552 }
553 #endif
554
555 /*
556 * Remove from item list.
557 */
558 TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
559 if (ph->ph_nmissing == 0) {
560 #ifdef DIAGNOSTIC
561 if (pp->pr_nidle == 0)
562 panic("pool_get: nidle inconsistent");
563 #endif
564 pp->pr_nidle--;
565 }
566 ph->ph_nmissing++;
567 if (TAILQ_FIRST(&ph->ph_itemlist) == NULL) {
568 /*
569 * Find a new non-empty page header, if any.
570 * Start search from the page head, to increase
571 * the chance for "high water" pages to be freed.
572 *
573 * First, move the now empty page to the head of
574 * the page list.
575 */
576 TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
577 TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
578 while ((ph = TAILQ_NEXT(ph, ph_pagelist)) != NULL)
579 if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
580 break;
581
582 pp->pr_curpage = ph;
583 }
584
585 pp->pr_nget++;
586 simple_unlock(&pp->pr_lock);
587 return (v);
588 }
589
590 /*
591 * Return resource to the pool; must be called at appropriate spl level
592 */
593 #ifdef POOL_DIAGNOSTIC
594 void
595 _pool_put(pp, v, file, line)
596 struct pool *pp;
597 void *v;
598 const char *file;
599 long line;
600 #else
601 void
602 pool_put(pp, v)
603 struct pool *pp;
604 void *v;
605 #endif
606 {
607 struct pool_item *pi = v;
608 struct pool_item_header *ph;
609 caddr_t page;
610
611 page = (caddr_t)((u_long)v & pp->pr_pagemask);
612
613 simple_lock(&pp->pr_lock);
614
615 pr_log(pp, v, PRLOG_PUT, file, line);
616
617 if ((ph = pr_find_pagehead(pp, page)) == NULL) {
618 pr_printlog(pp);
619 panic("pool_put: %s: page header missing", pp->pr_wchan);
620 }
621
622 /*
623 * Return to item list.
624 */
625 #ifdef DIAGNOSTIC
626 pi->pi_magic = PI_MAGIC;
627 #endif
628 TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
629 ph->ph_nmissing--;
630 pp->pr_nput++;
631
632 /* Cancel "pool empty" condition if it exists */
633 if (pp->pr_curpage == NULL)
634 pp->pr_curpage = ph;
635
636 if (pp->pr_flags & PR_WANTED) {
637 pp->pr_flags &= ~PR_WANTED;
638 wakeup((caddr_t)pp);
639 simple_unlock(&pp->pr_lock);
640 return;
641 }
642
643 /*
644 * If this page is now complete, move it to the end of the pagelist.
645 * If this page has just become un-empty, move it the head.
646 */
647 if (ph->ph_nmissing == 0) {
648 pp->pr_nidle++;
649 if (pp->pr_npages > pp->pr_maxpages) {
650 #if 0
651 timeout(pool_drain, 0, pool_inactive_time*hz);
652 #else
653 pr_rmpage(pp, ph);
654 #endif
655 } else {
656 TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
657 TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
658 ph->ph_time = time;
659
660 /* XXX - update curpage */
661 for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
662 ph = TAILQ_NEXT(ph, ph_pagelist))
663 if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
664 break;
665
666 pp->pr_curpage = ph;
667 }
668 }
669
670 simple_unlock(&pp->pr_lock);
671 }
672
673 /*
674 * Add N items to the pool.
675 */
676 int
677 pool_prime(pp, n, storage)
678 struct pool *pp;
679 int n;
680 caddr_t storage;
681 {
682 caddr_t cp;
683 int newnitems, newpages;
684
685 #ifdef DIAGNOSTIC
686 if (storage && !(pp->pr_flags & PR_STATIC))
687 panic("pool_prime: static");
688 /* !storage && static caught below */
689 #endif
690
691 newnitems = pp->pr_minitems + n;
692 newpages =
693 roundup(pp->pr_itemsperpage,newnitems) / pp->pr_itemsperpage
694 - pp->pr_minpages;
695
696 simple_lock(&pp->pr_lock);
697 while (newpages-- > 0) {
698
699 if (pp->pr_flags & PR_STATIC) {
700 cp = storage;
701 storage += pp->pr_pagesz;
702 } else {
703 cp = (*pp->pr_alloc)(pp->pr_pagesz, 0, pp->pr_mtype);
704 }
705
706 if (cp == NULL) {
707 simple_unlock(&pp->pr_lock);
708 return (ENOMEM);
709 }
710
711 pool_prime_page(pp, cp);
712 pp->pr_minpages++;
713 }
714
715 pp->pr_minitems = newnitems;
716
717 if (pp->pr_minpages >= pp->pr_maxpages)
718 pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
719
720 simple_unlock(&pp->pr_lock);
721 return (0);
722 }
723
724 /*
725 * Add a page worth of items to the pool.
726 */
727 int
728 pool_prime_page(pp, storage)
729 struct pool *pp;
730 caddr_t storage;
731 {
732 struct pool_item *pi;
733 struct pool_item_header *ph;
734 caddr_t cp = storage;
735 unsigned int align = pp->pr_align;
736 unsigned int ioff = pp->pr_itemoffset;
737 int n;
738
739 if ((pp->pr_flags & PR_PHINPAGE) != 0) {
740 ph = (struct pool_item_header *)(cp + pp->pr_phoffset);
741 } else {
742 ph = pool_get(&phpool, PR_URGENT);
743 LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)],
744 ph, ph_hashlist);
745 }
746
747 /*
748 * Insert page header.
749 */
750 TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
751 TAILQ_INIT(&ph->ph_itemlist);
752 ph->ph_page = storage;
753 ph->ph_nmissing = 0;
754 ph->ph_time.tv_sec = ph->ph_time.tv_usec = 0;
755
756 pp->pr_nidle++;
757
758 /*
759 * Color this page.
760 */
761 cp = (caddr_t)(cp + pp->pr_curcolor);
762 if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
763 pp->pr_curcolor = 0;
764
765 /*
766 * Adjust storage to apply aligment to `pr_itemoffset' in each item.
767 */
768 if (ioff != 0)
769 cp = (caddr_t)(cp + (align - ioff));
770
771 /*
772 * Insert remaining chunks on the bucket list.
773 */
774 n = pp->pr_itemsperpage;
775
776 while (n--) {
777 pi = (struct pool_item *)cp;
778
779 /* Insert on page list */
780 TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
781 #ifdef DIAGNOSTIC
782 pi->pi_magic = PI_MAGIC;
783 #endif
784 cp = (caddr_t)(cp + pp->pr_size);
785 }
786
787 /*
788 * If the pool was depleted, point at the new page.
789 */
790 if (pp->pr_curpage == NULL)
791 pp->pr_curpage = ph;
792
793 if (++pp->pr_npages > pp->pr_hiwat)
794 pp->pr_hiwat = pp->pr_npages;
795
796 return (0);
797 }
798
799 void
800 pool_setlowat(pp, n)
801 pool_handle_t pp;
802 int n;
803 {
804 pp->pr_minitems = n;
805 if (n == 0) {
806 pp->pr_minpages = 0;
807 return;
808 }
809 pp->pr_minpages =
810 roundup(pp->pr_itemsperpage,n) / pp->pr_itemsperpage;
811 }
812
813 void
814 pool_sethiwat(pp, n)
815 pool_handle_t pp;
816 int n;
817 {
818 if (n == 0) {
819 pp->pr_maxpages = 0;
820 return;
821 }
822 pp->pr_maxpages =
823 roundup(pp->pr_itemsperpage,n) / pp->pr_itemsperpage;
824 }
825
826
827 /*
828 * Default page allocator.
829 */
830 static void *
831 pool_page_alloc(sz, flags, mtype)
832 unsigned long sz;
833 int flags;
834 int mtype;
835 {
836 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
837
838 #if defined(UVM)
839 return ((void *)uvm_km_alloc_poolpage(waitok));
840 #else
841 return ((void *)kmem_alloc_poolpage(waitok));
842 #endif
843 }
844
845 static void
846 pool_page_free(v, sz, mtype)
847 void *v;
848 unsigned long sz;
849 int mtype;
850 {
851
852 #if defined(UVM)
853 uvm_km_free_poolpage((vaddr_t)v);
854 #else
855 kmem_free_poolpage((vaddr_t)v);
856 #endif
857 }
858
859 /*
860 * Alternate pool page allocator for pools that know they will
861 * never be accessed in interrupt context.
862 */
863 void *
864 pool_page_alloc_nointr(sz, flags, mtype)
865 unsigned long sz;
866 int flags;
867 int mtype;
868 {
869 #if defined(UVM)
870 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
871
872 /*
873 * With UVM, we can use the kernel_map.
874 */
875 return ((void *)uvm_km_alloc_poolpage1(kernel_map, uvm.kernel_object,
876 waitok));
877 #else
878 /*
879 * Can't do anything so cool with Mach VM.
880 */
881 return (pool_page_alloc(sz, flags, mtype));
882 #endif
883 }
884
885 void
886 pool_page_free_nointr(v, sz, mtype)
887 void *v;
888 unsigned long sz;
889 int mtype;
890 {
891
892 #if defined(UVM)
893 uvm_km_free_poolpage1(kernel_map, (vaddr_t)v);
894 #else
895 pool_page_free(v, sz, mtype);
896 #endif
897 }
898
899
900 /*
901 * Release all complete pages that have not been used recently.
902 */
903 void
904 pool_reclaim (pp)
905 pool_handle_t pp;
906 {
907 struct pool_item_header *ph, *phnext;
908 struct timeval curtime = time;
909
910 if (pp->pr_flags & PR_STATIC)
911 return;
912
913 if (simple_lock_try(&pp->pr_lock) == 0)
914 return;
915
916 for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; ph = phnext) {
917 phnext = TAILQ_NEXT(ph, ph_pagelist);
918
919 /* Check our minimum page claim */
920 if (pp->pr_npages <= pp->pr_minpages)
921 break;
922
923 if (ph->ph_nmissing == 0) {
924 struct timeval diff;
925 timersub(&curtime, &ph->ph_time, &diff);
926 if (diff.tv_sec < pool_inactive_time)
927 continue;
928 pr_rmpage(pp, ph);
929 }
930 }
931
932 simple_unlock(&pp->pr_lock);
933 }
934
935
936 /*
937 * Drain pools, one at a time.
938 */
939 void
940 pool_drain(arg)
941 void *arg;
942 {
943 struct pool *pp;
944 int s = splimp();
945
946 /* XXX:lock pool head */
947 if (drainpp == NULL && (drainpp = TAILQ_FIRST(&pool_head)) == NULL) {
948 splx(s);
949 return;
950 }
951
952 pp = drainpp;
953 drainpp = TAILQ_NEXT(pp, pr_poollist);
954 /* XXX:unlock pool head */
955
956 pool_reclaim(pp);
957 splx(s);
958 }
959
960
961 #ifdef DEBUG
962 /*
963 * Diagnostic helpers.
964 */
965 void
966 pool_print(pp, label)
967 struct pool *pp;
968 char *label;
969 {
970
971 if (label != NULL)
972 printf("%s: ", label);
973
974 printf("pool %s: nalloc %lu nfree %lu npagealloc %lu npagefree %lu\n"
975 " npages %u minitems %u itemsperpage %u itemoffset %u\n"
976 " nidle %lu\n",
977 pp->pr_wchan,
978 pp->pr_nget,
979 pp->pr_nput,
980 pp->pr_npagealloc,
981 pp->pr_npagefree,
982 pp->pr_npages,
983 pp->pr_minitems,
984 pp->pr_itemsperpage,
985 pp->pr_itemoffset,
986 pp->pr_nidle);
987 }
988
989 int
990 pool_chk(pp, label)
991 struct pool *pp;
992 char *label;
993 {
994 struct pool_item_header *ph;
995 int r = 0;
996
997 simple_lock(&pp->pr_lock);
998
999 for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
1000 ph = TAILQ_NEXT(ph, ph_pagelist)) {
1001
1002 struct pool_item *pi;
1003 int n;
1004 caddr_t page;
1005
1006 page = (caddr_t)((u_long)ph & pp->pr_pagemask);
1007 if (page != ph->ph_page) {
1008 if (label != NULL)
1009 printf("%s: ", label);
1010 printf("pool(%s): page inconsistency: page %p;"
1011 " at page head addr %p (p %p)\n",
1012 pp->pr_wchan, ph->ph_page,
1013 ph, page);
1014 r++;
1015 goto out;
1016 }
1017
1018 for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
1019 pi != NULL;
1020 pi = TAILQ_NEXT(pi,pi_list), n++) {
1021
1022 #ifdef DIAGNOSTIC
1023 if (pi->pi_magic != PI_MAGIC) {
1024 if (label != NULL)
1025 printf("%s: ", label);
1026 printf("pool(%s): free list modified: magic=%x;"
1027 " page %p; item ordinal %d;"
1028 " addr %p (p %p)\n",
1029 pp->pr_wchan, pi->pi_magic, ph->ph_page,
1030 n, pi, page);
1031 panic("pool");
1032 }
1033 #endif
1034 page = (caddr_t)((u_long)pi & pp->pr_pagemask);
1035 if (page == ph->ph_page)
1036 continue;
1037
1038 if (label != NULL)
1039 printf("%s: ", label);
1040 printf("pool(%s): page inconsistency: page %p;"
1041 " item ordinal %d; addr %p (p %p)\n",
1042 pp->pr_wchan, ph->ph_page,
1043 n, pi, page);
1044 r++;
1045 goto out;
1046 }
1047 }
1048 out:
1049 simple_unlock(&pp->pr_lock);
1050 return (r);
1051 }
1052 #endif
1053