subr_pool.c revision 1.12 1 /* $NetBSD: subr_pool.c,v 1.12 1998/08/28 21:18:37 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1997 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/proc.h>
42 #include <sys/errno.h>
43 #include <sys/kernel.h>
44 #include <sys/malloc.h>
45 #include <sys/lock.h>
46 #include <sys/pool.h>
47
48 #include <vm/vm.h>
49 #include <vm/vm_kern.h>
50
51 #if defined(UVM)
52 #include <uvm/uvm.h>
53 #endif
54
55 /*
56 * Pool resource management utility.
57 *
58 * Memory is allocated in pages which are split into pieces according
59 * to the pool item size. Each page is kept on a list headed by `pr_pagelist'
60 * in the pool structure and the individual pool items are on a linked list
61 * headed by `ph_itemlist' in each page header. The memory for building
62 * the page list is either taken from the allocated pages themselves (for
63 * small pool items) or taken from an internal pool of page headers (`phpool').
64 *
65 */
66
67 /* List of all pools */
68 TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
69
70 /* Private pool for page header structures */
71 static struct pool phpool;
72
73 /* # of seconds to retain page after last use */
74 int pool_inactive_time = 10;
75
76 /* Next candidate for drainage (see pool_drain()) */
77 static struct pool *drainpp = NULL;
78
79 struct pool_item_header {
80 /* Page headers */
81 TAILQ_ENTRY(pool_item_header)
82 ph_pagelist; /* pool page list */
83 TAILQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */
84 LIST_ENTRY(pool_item_header)
85 ph_hashlist; /* Off-page page headers */
86 int ph_nmissing; /* # of chunks in use */
87 caddr_t ph_page; /* this page's address */
88 struct timeval ph_time; /* last referenced */
89 };
90
91 struct pool_item {
92 #ifdef DIAGNOSTIC
93 int pi_magic;
94 #define PI_MAGIC 0xdeadbeef
95 #endif
96 /* Other entries use only this list entry */
97 TAILQ_ENTRY(pool_item) pi_list;
98 };
99
100
101 #define PR_HASH_INDEX(pp,addr) \
102 (((u_long)(addr) >> (pp)->pr_pageshift) & (PR_HASHTABSIZE - 1))
103
104
105
106 static struct pool_item_header
107 *pr_find_pagehead __P((struct pool *, caddr_t));
108 static void pr_rmpage __P((struct pool *, struct pool_item_header *));
109 static int pool_prime_page __P((struct pool *, caddr_t));
110 static void *pool_page_alloc __P((unsigned long, int, int));
111 static void pool_page_free __P((void *, unsigned long, int));
112 int pool_chk __P((struct pool *, char *));
113
114
115 #ifdef POOL_DIAGNOSTIC
116 /*
117 * Pool log entry. An array of these is allocated in pool_create().
118 */
119 struct pool_log {
120 const char *pl_file;
121 long pl_line;
122 int pl_action;
123 #define PRLOG_GET 1
124 #define PRLOG_PUT 2
125 void *pl_addr;
126 };
127
128 /* Number of entries in pool log buffers */
129 int pool_logsize = 10;
130
131 static void pr_log __P((struct pool *, void *, int, const char *, long));
132 static void pr_printlog __P((struct pool *));
133
134 static __inline__ void
135 pr_log(pp, v, action, file, line)
136 struct pool *pp;
137 void *v;
138 int action;
139 const char *file;
140 long line;
141 {
142 int n = pp->pr_curlogentry;
143 struct pool_log *pl;
144
145 if ((pp->pr_flags & PR_LOGGING) == 0)
146 return;
147
148 /*
149 * Fill in the current entry. Wrap around and overwrite
150 * the oldest entry if necessary.
151 */
152 pl = &pp->pr_log[n];
153 pl->pl_file = file;
154 pl->pl_line = line;
155 pl->pl_action = action;
156 pl->pl_addr = v;
157 if (++n >= pp->pr_logsize)
158 n = 0;
159 pp->pr_curlogentry = n;
160 }
161
162 static void
163 pr_printlog(pp)
164 struct pool *pp;
165 {
166 int i = pp->pr_logsize;
167 int n = pp->pr_curlogentry;
168
169 if ((pp->pr_flags & PR_LOGGING) == 0)
170 return;
171
172 pool_print(pp, "printlog");
173
174 /*
175 * Print all entries in this pool's log.
176 */
177 while (i-- > 0) {
178 struct pool_log *pl = &pp->pr_log[n];
179 if (pl->pl_action != 0) {
180 printf("log entry %d:\n", i);
181 printf("\taction = %s, addr = %p\n",
182 pl->pl_action == PRLOG_GET ? "get" : "put",
183 pl->pl_addr);
184 printf("\tfile: %s at line %lu\n",
185 pl->pl_file, pl->pl_line);
186 }
187 if (++n >= pp->pr_logsize)
188 n = 0;
189 }
190 }
191 #else
192 #define pr_log(pp, v, action, file, line)
193 #define pr_printlog(pp)
194 #endif
195
196
197 /*
198 * Return the pool page header based on page address.
199 */
200 static __inline__ struct pool_item_header *
201 pr_find_pagehead(pp, page)
202 struct pool *pp;
203 caddr_t page;
204 {
205 struct pool_item_header *ph;
206
207 if ((pp->pr_flags & PR_PHINPAGE) != 0)
208 return ((struct pool_item_header *)(page + pp->pr_phoffset));
209
210 for (ph = LIST_FIRST(&pp->pr_hashtab[PR_HASH_INDEX(pp, page)]);
211 ph != NULL;
212 ph = LIST_NEXT(ph, ph_hashlist)) {
213 if (ph->ph_page == page)
214 return (ph);
215 }
216 return (NULL);
217 }
218
219 /*
220 * Remove a page from the pool.
221 */
222 static __inline__ void
223 pr_rmpage(pp, ph)
224 struct pool *pp;
225 struct pool_item_header *ph;
226 {
227
228 /*
229 * If the page was idle, decrement the idle page count.
230 */
231 if (ph->ph_nmissing == 0) {
232 #ifdef DIAGNOSTIC
233 if (pp->pr_nidle == 0)
234 panic("pr_rmpage: nidle inconsistent");
235 #endif
236 pp->pr_nidle--;
237 }
238
239 /*
240 * Unlink a page from the pool and release it.
241 */
242 TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
243 (*pp->pr_free)(ph->ph_page, pp->pr_pagesz, pp->pr_mtype);
244 pp->pr_npages--;
245 pp->pr_npagefree++;
246
247 if ((pp->pr_flags & PR_PHINPAGE) == 0) {
248 LIST_REMOVE(ph, ph_hashlist);
249 pool_put(&phpool, ph);
250 }
251
252 if (pp->pr_curpage == ph) {
253 /*
254 * Find a new non-empty page header, if any.
255 * Start search from the page head, to increase the
256 * chance for "high water" pages to be freed.
257 */
258 for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
259 ph = TAILQ_NEXT(ph, ph_pagelist))
260 if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
261 break;
262
263 pp->pr_curpage = ph;
264 }
265 }
266
267 /*
268 * Allocate and initialize a pool.
269 */
270 struct pool *
271 pool_create(size, align, ioff, nitems, wchan, pagesz, alloc, release, mtype)
272 size_t size;
273 u_int align;
274 u_int ioff;
275 int nitems;
276 char *wchan;
277 size_t pagesz;
278 void *(*alloc) __P((unsigned long, int, int));
279 void (*release) __P((void *, unsigned long, int));
280 int mtype;
281 {
282 struct pool *pp;
283 int flags;
284
285 pp = (struct pool *)malloc(sizeof(*pp), M_POOL, M_NOWAIT);
286 if (pp == NULL)
287 return (NULL);
288
289 flags = PR_FREEHEADER;
290 #ifdef POOL_DIAGNOSTIC
291 if (pool_logsize != 0)
292 flags |= PR_LOGGING;
293 #endif
294
295 pool_init(pp, size, align, ioff, flags, wchan, pagesz,
296 alloc, release, mtype);
297
298 if (nitems != 0) {
299 if (pool_prime(pp, nitems, NULL) != 0) {
300 pool_destroy(pp);
301 return (NULL);
302 }
303 }
304
305 return (pp);
306 }
307
308 /*
309 * Initialize the given pool resource structure.
310 *
311 * We export this routine to allow other kernel parts to declare
312 * static pools that must be initialized before malloc() is available.
313 */
314 void
315 pool_init(pp, size, align, ioff, flags, wchan, pagesz, alloc, release, mtype)
316 struct pool *pp;
317 size_t size;
318 u_int align;
319 u_int ioff;
320 int flags;
321 char *wchan;
322 size_t pagesz;
323 void *(*alloc) __P((unsigned long, int, int));
324 void (*release) __P((void *, unsigned long, int));
325 int mtype;
326 {
327 int off, slack;
328
329 /*
330 * Check arguments and construct default values.
331 */
332 if (!powerof2(pagesz) || pagesz > PAGE_SIZE)
333 panic("pool_init: page size invalid (%lx)\n", (u_long)pagesz);
334
335 if (alloc == NULL && release == NULL) {
336 alloc = pool_page_alloc;
337 release = pool_page_free;
338 pagesz = PAGE_SIZE; /* Rounds to PAGE_SIZE anyhow. */
339 } else if ((alloc != NULL && release != NULL) == 0) {
340 /* If you specifiy one, must specify both. */
341 panic("pool_init: must specify alloc and release together");
342 }
343
344 if (pagesz == 0)
345 pagesz = PAGE_SIZE;
346
347 if (align == 0)
348 align = ALIGN(1);
349
350 /*
351 * Initialize the pool structure.
352 */
353 TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
354 TAILQ_INIT(&pp->pr_pagelist);
355 pp->pr_curpage = NULL;
356 pp->pr_npages = 0;
357 pp->pr_minitems = 0;
358 pp->pr_minpages = 0;
359 pp->pr_maxpages = UINT_MAX;
360 pp->pr_flags = flags;
361 pp->pr_size = ALIGN(size);
362 pp->pr_align = align;
363 pp->pr_wchan = wchan;
364 pp->pr_mtype = mtype;
365 pp->pr_alloc = alloc;
366 pp->pr_free = release;
367 pp->pr_pagesz = pagesz;
368 pp->pr_pagemask = ~(pagesz - 1);
369 pp->pr_pageshift = ffs(pagesz) - 1;
370
371 /*
372 * Decide whether to put the page header off page to avoid
373 * wasting too large a part of the page. Off-page page headers
374 * go on a hash table, so we can match a returned item
375 * with its header based on the page address.
376 * We use 1/16 of the page size as the threshold (XXX: tune)
377 */
378 if (pp->pr_size < pagesz/16) {
379 /* Use the end of the page for the page header */
380 pp->pr_flags |= PR_PHINPAGE;
381 pp->pr_phoffset = off =
382 pagesz - ALIGN(sizeof(struct pool_item_header));
383 } else {
384 /* The page header will be taken from our page header pool */
385 pp->pr_phoffset = 0;
386 off = pagesz;
387 memset(pp->pr_hashtab, 0, sizeof(pp->pr_hashtab));
388 }
389
390 /*
391 * Alignment is to take place at `ioff' within the item. This means
392 * we must reserve up to `align - 1' bytes on the page to allow
393 * appropriate positioning of each item.
394 *
395 * Silently enforce `0 <= ioff < align'.
396 */
397 pp->pr_itemoffset = ioff = ioff % align;
398 pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
399
400 /*
401 * Use the slack between the chunks and the page header
402 * for "cache coloring".
403 */
404 slack = off - pp->pr_itemsperpage * pp->pr_size;
405 pp->pr_maxcolor = (slack / align) * align;
406 pp->pr_curcolor = 0;
407
408 pp->pr_nget = 0;
409 pp->pr_nfail = 0;
410 pp->pr_nput = 0;
411 pp->pr_npagealloc = 0;
412 pp->pr_npagefree = 0;
413 pp->pr_hiwat = 0;
414 pp->pr_nidle = 0;
415
416 #ifdef POOL_DIAGNOSTIC
417 if ((flags & PR_LOGGING) != 0) {
418 pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
419 M_TEMP, M_NOWAIT);
420 if (pp->pr_log == NULL)
421 pp->pr_flags &= ~PR_LOGGING;
422 pp->pr_curlogentry = 0;
423 pp->pr_logsize = pool_logsize;
424 }
425 #endif
426
427 simple_lock_init(&pp->pr_lock);
428
429 /*
430 * Initialize private page header pool if we haven't done so yet.
431 */
432 if (phpool.pr_size == 0) {
433 pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,
434 0, "phpool", 0, 0, 0, 0);
435 }
436
437 return;
438 }
439
440 /*
441 * De-commision a pool resource.
442 */
443 void
444 pool_destroy(pp)
445 struct pool *pp;
446 {
447 struct pool_item_header *ph;
448
449 #ifdef DIAGNOSTIC
450 if (pp->pr_nget - pp->pr_nput != 0) {
451 pr_printlog(pp);
452 panic("pool_destroy: pool busy: still out: %lu\n",
453 pp->pr_nget - pp->pr_nput);
454 }
455 #endif
456
457 /* Remove all pages */
458 if ((pp->pr_flags & PR_STATIC) == 0)
459 while ((ph = pp->pr_pagelist.tqh_first) != NULL)
460 pr_rmpage(pp, ph);
461
462 /* Remove from global pool list */
463 TAILQ_REMOVE(&pool_head, pp, pr_poollist);
464 drainpp = NULL;
465
466 #ifdef POOL_DIAGNOSTIC
467 if ((pp->pr_flags & PR_LOGGING) != 0)
468 free(pp->pr_log, M_TEMP);
469 #endif
470
471 if (pp->pr_flags & PR_FREEHEADER)
472 free(pp, M_POOL);
473 }
474
475
476 /*
477 * Grab an item from the pool; must be called at appropriate spl level
478 */
479 #ifdef POOL_DIAGNOSTIC
480 void *
481 _pool_get(pp, flags, file, line)
482 struct pool *pp;
483 int flags;
484 const char *file;
485 long line;
486 #else
487 void *
488 pool_get(pp, flags)
489 struct pool *pp;
490 int flags;
491 #endif
492 {
493 void *v;
494 struct pool_item *pi;
495 struct pool_item_header *ph;
496
497 #ifdef DIAGNOSTIC
498 if ((pp->pr_flags & PR_STATIC) && (flags & PR_MALLOCOK)) {
499 pr_printlog(pp);
500 panic("pool_get: static");
501 }
502 #endif
503
504 simple_lock(&pp->pr_lock);
505 if (curproc == NULL && (flags & PR_WAITOK) != 0)
506 panic("pool_get: must have NOWAIT");
507
508 /*
509 * The convention we use is that if `curpage' is not NULL, then
510 * it points at a non-empty bucket. In particular, `curpage'
511 * never points at a page header which has PR_PHINPAGE set and
512 * has no items in its bucket.
513 */
514 again:
515 if ((ph = pp->pr_curpage) == NULL) {
516 void *v = (*pp->pr_alloc)(pp->pr_pagesz, flags, pp->pr_mtype);
517 if (v == NULL) {
518 if (flags & PR_URGENT)
519 panic("pool_get: urgent");
520 if ((flags & PR_WAITOK) == 0) {
521 pp->pr_nfail++;
522 simple_unlock(&pp->pr_lock);
523 return (NULL);
524 }
525
526 pp->pr_flags |= PR_WANTED;
527 simple_unlock(&pp->pr_lock);
528 tsleep((caddr_t)pp, PSWP, pp->pr_wchan, 0);
529 simple_lock(&pp->pr_lock);
530 } else {
531 pp->pr_npagealloc++;
532 pool_prime_page(pp, v);
533 }
534
535 goto again;
536 }
537
538 if ((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)
539 panic("pool_get: %s: page empty", pp->pr_wchan);
540
541 pr_log(pp, v, PRLOG_GET, file, line);
542
543 #ifdef DIAGNOSTIC
544 if (pi->pi_magic != PI_MAGIC) {
545 pr_printlog(pp);
546 panic("pool_get(%s): free list modified: magic=%x; page %p;"
547 " item addr %p\n",
548 pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
549 }
550 #endif
551
552 /*
553 * Remove from item list.
554 */
555 TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
556 if (ph->ph_nmissing == 0) {
557 #ifdef DIAGNOSTIC
558 if (pp->pr_nidle == 0)
559 panic("pool_get: nidle inconsistent");
560 #endif
561 pp->pr_nidle--;
562 }
563 ph->ph_nmissing++;
564 if (TAILQ_FIRST(&ph->ph_itemlist) == NULL) {
565 /*
566 * Find a new non-empty page header, if any.
567 * Start search from the page head, to increase
568 * the chance for "high water" pages to be freed.
569 *
570 * First, move the now empty page to the head of
571 * the page list.
572 */
573 TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
574 TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
575 while ((ph = TAILQ_NEXT(ph, ph_pagelist)) != NULL)
576 if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
577 break;
578
579 pp->pr_curpage = ph;
580 }
581
582 pp->pr_nget++;
583 simple_unlock(&pp->pr_lock);
584 return (v);
585 }
586
587 /*
588 * Return resource to the pool; must be called at appropriate spl level
589 */
590 #ifdef POOL_DIAGNOSTIC
591 void
592 _pool_put(pp, v, file, line)
593 struct pool *pp;
594 void *v;
595 const char *file;
596 long line;
597 #else
598 void
599 pool_put(pp, v)
600 struct pool *pp;
601 void *v;
602 #endif
603 {
604 struct pool_item *pi = v;
605 struct pool_item_header *ph;
606 caddr_t page;
607
608 page = (caddr_t)((u_long)v & pp->pr_pagemask);
609
610 simple_lock(&pp->pr_lock);
611
612 pr_log(pp, v, PRLOG_PUT, file, line);
613
614 if ((ph = pr_find_pagehead(pp, page)) == NULL) {
615 pr_printlog(pp);
616 panic("pool_put: %s: page header missing", pp->pr_wchan);
617 }
618
619 /*
620 * Return to item list.
621 */
622 #ifdef DIAGNOSTIC
623 pi->pi_magic = PI_MAGIC;
624 #endif
625 TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
626 ph->ph_nmissing--;
627 pp->pr_nput++;
628
629 /* Cancel "pool empty" condition if it exists */
630 if (pp->pr_curpage == NULL)
631 pp->pr_curpage = ph;
632
633 if (pp->pr_flags & PR_WANTED) {
634 pp->pr_flags &= ~PR_WANTED;
635 wakeup((caddr_t)pp);
636 simple_unlock(&pp->pr_lock);
637 return;
638 }
639
640 /*
641 * If this page is now complete, move it to the end of the pagelist.
642 * If this page has just become un-empty, move it the head.
643 */
644 if (ph->ph_nmissing == 0) {
645 pp->pr_nidle++;
646 if (pp->pr_npages > pp->pr_maxpages) {
647 #if 0
648 timeout(pool_drain, 0, pool_inactive_time*hz);
649 #else
650 pr_rmpage(pp, ph);
651 #endif
652 } else {
653 TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist);
654 TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist);
655 ph->ph_time = time;
656
657 /* XXX - update curpage */
658 for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
659 ph = TAILQ_NEXT(ph, ph_pagelist))
660 if (TAILQ_FIRST(&ph->ph_itemlist) != NULL)
661 break;
662
663 pp->pr_curpage = ph;
664 }
665 }
666
667 simple_unlock(&pp->pr_lock);
668 }
669
670 /*
671 * Add N items to the pool.
672 */
673 int
674 pool_prime(pp, n, storage)
675 struct pool *pp;
676 int n;
677 caddr_t storage;
678 {
679 caddr_t cp;
680 int newnitems, newpages;
681
682 #ifdef DIAGNOSTIC
683 if (storage && !(pp->pr_flags & PR_STATIC))
684 panic("pool_prime: static");
685 /* !storage && static caught below */
686 #endif
687
688 newnitems = pp->pr_minitems + n;
689 newpages =
690 roundup(pp->pr_itemsperpage,newnitems) / pp->pr_itemsperpage
691 - pp->pr_minpages;
692
693 simple_lock(&pp->pr_lock);
694 while (newpages-- > 0) {
695
696 if (pp->pr_flags & PR_STATIC) {
697 cp = storage;
698 storage += pp->pr_pagesz;
699 } else {
700 cp = (*pp->pr_alloc)(pp->pr_pagesz, 0, pp->pr_mtype);
701 }
702
703 if (cp == NULL) {
704 simple_unlock(&pp->pr_lock);
705 return (ENOMEM);
706 }
707
708 pool_prime_page(pp, cp);
709 pp->pr_minpages++;
710 }
711
712 pp->pr_minitems = newnitems;
713
714 if (pp->pr_minpages >= pp->pr_maxpages)
715 pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
716
717 simple_unlock(&pp->pr_lock);
718 return (0);
719 }
720
721 /*
722 * Add a page worth of items to the pool.
723 */
724 int
725 pool_prime_page(pp, storage)
726 struct pool *pp;
727 caddr_t storage;
728 {
729 struct pool_item *pi;
730 struct pool_item_header *ph;
731 caddr_t cp = storage;
732 unsigned int align = pp->pr_align;
733 unsigned int ioff = pp->pr_itemoffset;
734 int n;
735
736 if ((pp->pr_flags & PR_PHINPAGE) != 0) {
737 ph = (struct pool_item_header *)(cp + pp->pr_phoffset);
738 } else {
739 ph = pool_get(&phpool, PR_URGENT);
740 LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)],
741 ph, ph_hashlist);
742 }
743
744 /*
745 * Insert page header.
746 */
747 TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist);
748 TAILQ_INIT(&ph->ph_itemlist);
749 ph->ph_page = storage;
750 ph->ph_nmissing = 0;
751 ph->ph_time.tv_sec = ph->ph_time.tv_usec = 0;
752
753 pp->pr_nidle++;
754
755 /*
756 * Color this page.
757 */
758 cp = (caddr_t)(cp + pp->pr_curcolor);
759 if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
760 pp->pr_curcolor = 0;
761
762 /*
763 * Adjust storage to apply aligment to `pr_itemoffset' in each item.
764 */
765 if (ioff != 0)
766 cp = (caddr_t)(cp + (align - ioff));
767
768 /*
769 * Insert remaining chunks on the bucket list.
770 */
771 n = pp->pr_itemsperpage;
772
773 while (n--) {
774 pi = (struct pool_item *)cp;
775
776 /* Insert on page list */
777 TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
778 #ifdef DIAGNOSTIC
779 pi->pi_magic = PI_MAGIC;
780 #endif
781 cp = (caddr_t)(cp + pp->pr_size);
782 }
783
784 /*
785 * If the pool was depleted, point at the new page.
786 */
787 if (pp->pr_curpage == NULL)
788 pp->pr_curpage = ph;
789
790 if (++pp->pr_npages > pp->pr_hiwat)
791 pp->pr_hiwat = pp->pr_npages;
792
793 return (0);
794 }
795
796 void
797 pool_setlowat(pp, n)
798 pool_handle_t pp;
799 int n;
800 {
801 pp->pr_minitems = n;
802 if (n == 0) {
803 pp->pr_minpages = 0;
804 return;
805 }
806 pp->pr_minpages =
807 roundup(pp->pr_itemsperpage,n) / pp->pr_itemsperpage;
808 }
809
810 void
811 pool_sethiwat(pp, n)
812 pool_handle_t pp;
813 int n;
814 {
815 if (n == 0) {
816 pp->pr_maxpages = 0;
817 return;
818 }
819 pp->pr_maxpages =
820 roundup(pp->pr_itemsperpage,n) / pp->pr_itemsperpage;
821 }
822
823
824 /*
825 * Default page allocator.
826 */
827 static void *
828 pool_page_alloc(sz, flags, mtype)
829 unsigned long sz;
830 int flags;
831 int mtype;
832 {
833 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
834
835 #if defined(UVM)
836 return ((void *)uvm_km_alloc_poolpage(waitok));
837 #else
838 return ((void *)kmem_alloc_poolpage(waitok));
839 #endif
840 }
841
842 static void
843 pool_page_free(v, sz, mtype)
844 void *v;
845 unsigned long sz;
846 int mtype;
847 {
848
849 #if defined(UVM)
850 uvm_km_free_poolpage((vaddr_t)v);
851 #else
852 kmem_free_poolpage((vaddr_t)v);
853 #endif
854 }
855
856 /*
857 * Alternate pool page allocator for pools that know they will
858 * never be accessed in interrupt context.
859 */
860 void *
861 pool_page_alloc_nointr(sz, flags, mtype)
862 unsigned long sz;
863 int flags;
864 int mtype;
865 {
866 #if defined(UVM)
867 boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
868
869 /*
870 * With UVM, we can use the kernel_map.
871 */
872 return ((void *)uvm_km_alloc_poolpage1(kernel_map, uvm.kernel_object,
873 waitok));
874 #else
875 /*
876 * Can't do anything so cool with Mach VM.
877 */
878 return (pool_page_alloc(sz, flags, mtype));
879 #endif
880 }
881
882 void
883 pool_page_free_nointr(v, sz, mtype)
884 void *v;
885 unsigned long sz;
886 int mtype;
887 {
888
889 #if defined(UVM)
890 uvm_km_free_poolpage1(kernel_map, (vaddr_t)v);
891 #else
892 pool_page_free(v, sz, mtype);
893 #endif
894 }
895
896
897 /*
898 * Release all complete pages that have not been used recently.
899 */
900 void
901 pool_reclaim (pp)
902 pool_handle_t pp;
903 {
904 struct pool_item_header *ph, *phnext;
905 struct timeval curtime = time;
906
907 if (pp->pr_flags & PR_STATIC)
908 return;
909
910 if (simple_lock_try(&pp->pr_lock) == 0)
911 return;
912
913 for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; ph = phnext) {
914 phnext = TAILQ_NEXT(ph, ph_pagelist);
915
916 /* Check our minimum page claim */
917 if (pp->pr_npages <= pp->pr_minpages)
918 break;
919
920 if (ph->ph_nmissing == 0) {
921 struct timeval diff;
922 timersub(&curtime, &ph->ph_time, &diff);
923 if (diff.tv_sec < pool_inactive_time)
924 continue;
925 pr_rmpage(pp, ph);
926 }
927 }
928
929 simple_unlock(&pp->pr_lock);
930 }
931
932
933 /*
934 * Drain pools, one at a time.
935 */
936 void
937 pool_drain(arg)
938 void *arg;
939 {
940 struct pool *pp;
941 int s = splimp();
942
943 /* XXX:lock pool head */
944 if (drainpp == NULL && (drainpp = TAILQ_FIRST(&pool_head)) == NULL) {
945 splx(s);
946 return;
947 }
948
949 pp = drainpp;
950 drainpp = TAILQ_NEXT(pp, pr_poollist);
951 /* XXX:unlock pool head */
952
953 pool_reclaim(pp);
954 splx(s);
955 }
956
957
958 #ifdef DEBUG
959 /*
960 * Diagnostic helpers.
961 */
962 void
963 pool_print(pp, label)
964 struct pool *pp;
965 char *label;
966 {
967
968 if (label != NULL)
969 printf("%s: ", label);
970
971 printf("pool %s: nalloc %lu nfree %lu npagealloc %lu npagefree %lu\n"
972 " npages %u minitems %u itemsperpage %u itemoffset %u\n"
973 " nidle %lu\n",
974 pp->pr_wchan,
975 pp->pr_nget,
976 pp->pr_nput,
977 pp->pr_npagealloc,
978 pp->pr_npagefree,
979 pp->pr_npages,
980 pp->pr_minitems,
981 pp->pr_itemsperpage,
982 pp->pr_itemoffset,
983 pp->pr_nidle);
984 }
985
986 int
987 pool_chk(pp, label)
988 struct pool *pp;
989 char *label;
990 {
991 struct pool_item_header *ph;
992 int r = 0;
993
994 simple_lock(&pp->pr_lock);
995
996 for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL;
997 ph = TAILQ_NEXT(ph, ph_pagelist)) {
998
999 struct pool_item *pi;
1000 int n;
1001 caddr_t page;
1002
1003 page = (caddr_t)((u_long)ph & pp->pr_pagemask);
1004 if (page != ph->ph_page) {
1005 if (label != NULL)
1006 printf("%s: ", label);
1007 printf("pool(%s): page inconsistency: page %p;"
1008 " at page head addr %p (p %p)\n",
1009 pp->pr_wchan, ph->ph_page,
1010 ph, page);
1011 r++;
1012 goto out;
1013 }
1014
1015 for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
1016 pi != NULL;
1017 pi = TAILQ_NEXT(pi,pi_list), n++) {
1018
1019 #ifdef DIAGNOSTIC
1020 if (pi->pi_magic != PI_MAGIC) {
1021 if (label != NULL)
1022 printf("%s: ", label);
1023 printf("pool(%s): free list modified: magic=%x;"
1024 " page %p; item ordinal %d;"
1025 " addr %p (p %p)\n",
1026 pp->pr_wchan, pi->pi_magic, ph->ph_page,
1027 n, pi, page);
1028 panic("pool");
1029 }
1030 #endif
1031 page = (caddr_t)((u_long)pi & pp->pr_pagemask);
1032 if (page == ph->ph_page)
1033 continue;
1034
1035 if (label != NULL)
1036 printf("%s: ", label);
1037 printf("pool(%s): page inconsistency: page %p;"
1038 " item ordinal %d; addr %p (p %p)\n",
1039 pp->pr_wchan, ph->ph_page,
1040 n, pi, page);
1041 r++;
1042 goto out;
1043 }
1044 }
1045 out:
1046 simple_unlock(&pp->pr_lock);
1047 return (r);
1048 }
1049 #endif
1050