xmalloc.c revision 1.5.16.1 1 1.5.16.1 matt /* $NetBSD: xmalloc.c,v 1.5.16.1 2007/11/06 23:12:12 matt Exp $ */
2 1.1 cgd
3 1.1 cgd /*
4 1.1 cgd * Copyright 1996 John D. Polstra.
5 1.1 cgd * Copyright 1996 Matt Thomas <matt (at) 3am-software.com>
6 1.1 cgd * All rights reserved.
7 1.1 cgd *
8 1.1 cgd * Redistribution and use in source and binary forms, with or without
9 1.1 cgd * modification, are permitted provided that the following conditions
10 1.1 cgd * are met:
11 1.1 cgd * 1. Redistributions of source code must retain the above copyright
12 1.1 cgd * notice, this list of conditions and the following disclaimer.
13 1.1 cgd * 2. Redistributions in binary form must reproduce the above copyright
14 1.1 cgd * notice, this list of conditions and the following disclaimer in the
15 1.1 cgd * documentation and/or other materials provided with the distribution.
16 1.1 cgd * 3. All advertising materials mentioning features or use of this software
17 1.1 cgd * must display the following acknowledgement:
18 1.1 cgd * This product includes software developed by John Polstra.
19 1.1 cgd * 4. The name of the author may not be used to endorse or promote products
20 1.1 cgd * derived from this software without specific prior written permission.
21 1.1 cgd *
22 1.1 cgd * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 1.1 cgd * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 1.1 cgd * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 1.1 cgd * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 1.1 cgd * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 1.1 cgd * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 1.1 cgd * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 1.1 cgd * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 1.1 cgd * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 1.1 cgd * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 1.1 cgd */
33 1.1 cgd
34 1.5.16.1 matt /*
35 1.5.16.1 matt * Copyright (c) 1983 Regents of the University of California.
36 1.5.16.1 matt * All rights reserved.
37 1.5.16.1 matt *
38 1.5.16.1 matt * Redistribution and use in source and binary forms, with or without
39 1.5.16.1 matt * modification, are permitted provided that the following conditions
40 1.5.16.1 matt * are met:
41 1.5.16.1 matt * 1. Redistributions of source code must retain the above copyright
42 1.5.16.1 matt * notice, this list of conditions and the following disclaimer.
43 1.5.16.1 matt * 2. Redistributions in binary form must reproduce the above copyright
44 1.5.16.1 matt * notice, this list of conditions and the following disclaimer in the
45 1.5.16.1 matt * documentation and/or other materials provided with the distribution.
46 1.5.16.1 matt * 3. Neither the name of the University nor the names of its contributors
47 1.5.16.1 matt * may be used to endorse or promote products derived from this software
48 1.5.16.1 matt * without specific prior written permission.
49 1.5.16.1 matt *
50 1.5.16.1 matt * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
51 1.5.16.1 matt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
52 1.5.16.1 matt * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
53 1.5.16.1 matt * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
54 1.5.16.1 matt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
55 1.5.16.1 matt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
56 1.5.16.1 matt * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
57 1.5.16.1 matt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
58 1.5.16.1 matt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
59 1.5.16.1 matt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
60 1.5.16.1 matt * SUCH DAMAGE.
61 1.5.16.1 matt */
62 1.5.16.1 matt
63 1.5.16.1 matt #if defined(LIBC_SCCS) && !defined(lint)
64 1.5.16.1 matt /*static char *sccsid = "from: @(#)malloc.c 5.11 (Berkeley) 2/23/91";*/
65 1.5.16.1 matt #endif /* LIBC_SCCS and not lint */
66 1.5.16.1 matt
67 1.5.16.1 matt /*
68 1.5.16.1 matt * malloc.c (Caltech) 2/21/82
69 1.5.16.1 matt * Chris Kingsley, kingsley@cit-20.
70 1.5.16.1 matt *
71 1.5.16.1 matt * This is a very fast storage allocator. It allocates blocks of a small
72 1.5.16.1 matt * number of different sizes, and keeps free lists of each size. Blocks that
73 1.5.16.1 matt * don't exactly fit are passed up to the next larger size. In this
74 1.5.16.1 matt * implementation, the available sizes are 2^n-4 (or 2^n-10) bytes long.
75 1.5.16.1 matt * This is designed for use in a virtual memory environment.
76 1.5.16.1 matt */
77 1.5.16.1 matt
78 1.2 christos #include <sys/cdefs.h>
79 1.5 skrll #ifndef lint
80 1.5.16.1 matt __RCSID("$NetBSD: xmalloc.c,v 1.5.16.1 2007/11/06 23:12:12 matt Exp $");
81 1.5 skrll #endif /* not lint */
82 1.5 skrll
83 1.1 cgd #include <stdlib.h>
84 1.1 cgd #include <string.h>
85 1.5.16.1 matt #include <unistd.h>
86 1.5.16.1 matt #include <errno.h>
87 1.5.16.1 matt
88 1.5.16.1 matt #include <sys/types.h>
89 1.5.16.1 matt #include <sys/param.h>
90 1.5.16.1 matt #include <sys/mman.h>
91 1.5.16.1 matt #include <sys/stat.h>
92 1.5.16.1 matt
93 1.5.16.1 matt #include "rtld.h"
94 1.5.16.1 matt
95 1.5.16.1 matt /*
96 1.5.16.1 matt * Pre-allocate mmap'ed pages
97 1.5.16.1 matt */
98 1.5.16.1 matt #define NPOOLPAGES (32*1024/pagesz)
99 1.5.16.1 matt static caddr_t pagepool_start, pagepool_end;
100 1.5.16.1 matt static int morepages(int);
101 1.5.16.1 matt
102 1.5.16.1 matt /*
103 1.5.16.1 matt * The overhead on a block is at least 4 bytes. When free, this space
104 1.5.16.1 matt * contains a pointer to the next free block, and the bottom two bits must
105 1.5.16.1 matt * be zero. When in use, the first byte is set to MAGIC, and the second
106 1.5.16.1 matt * byte is the size index. The remaining bytes are for alignment.
107 1.5.16.1 matt * If range checking is enabled then a second word holds the size of the
108 1.5.16.1 matt * requested block, less 1, rounded up to a multiple of sizeof(RMAGIC).
109 1.5.16.1 matt * The order of elements is critical: ov_magic must overlay the low order
110 1.5.16.1 matt * bits of ov_next, and ov_magic can not be a valid ov_next bit pattern.
111 1.5.16.1 matt */
112 1.5.16.1 matt union overhead {
113 1.5.16.1 matt union overhead *ov_next; /* when free */
114 1.5.16.1 matt struct {
115 1.5.16.1 matt u_char ovu_magic; /* magic number */
116 1.5.16.1 matt u_char ovu_index; /* bucket # */
117 1.5.16.1 matt #ifdef RCHECK
118 1.5.16.1 matt u_short ovu_rmagic; /* range magic number */
119 1.5.16.1 matt u_int ovu_size; /* actual block size */
120 1.5.16.1 matt #endif
121 1.5.16.1 matt } ovu;
122 1.5.16.1 matt #define ov_magic ovu.ovu_magic
123 1.5.16.1 matt #define ov_index ovu.ovu_index
124 1.5.16.1 matt #define ov_rmagic ovu.ovu_rmagic
125 1.5.16.1 matt #define ov_size ovu.ovu_size
126 1.5.16.1 matt };
127 1.5.16.1 matt
128 1.5.16.1 matt static void morecore(int);
129 1.5.16.1 matt static void *imalloc(size_t);
130 1.5.16.1 matt
131 1.5.16.1 matt #define MAGIC 0xef /* magic # on accounting info */
132 1.5.16.1 matt #define RMAGIC 0x5555 /* magic # on range info */
133 1.5.16.1 matt
134 1.5.16.1 matt #ifdef RCHECK
135 1.5.16.1 matt #define RSLOP (sizeof (u_short))
136 1.5.16.1 matt #else
137 1.5.16.1 matt #define RSLOP 0
138 1.5.16.1 matt #endif
139 1.5.16.1 matt
140 1.5.16.1 matt /*
141 1.5.16.1 matt * nextf[i] is the pointer to the next free block of size 2^(i+3). The
142 1.5.16.1 matt * smallest allocatable block is 8 bytes. The overhead information
143 1.5.16.1 matt * precedes the data area returned to the user.
144 1.5.16.1 matt */
145 1.5.16.1 matt #define NBUCKETS 30
146 1.5.16.1 matt static union overhead *nextf[NBUCKETS];
147 1.5.16.1 matt
148 1.5.16.1 matt static int pagesz; /* page size */
149 1.5.16.1 matt static int pagebucket; /* page size bucket */
150 1.5.16.1 matt
151 1.5.16.1 matt #ifdef MSTATS
152 1.5.16.1 matt /*
153 1.5.16.1 matt * nmalloc[i] is the difference between the number of mallocs and frees
154 1.5.16.1 matt * for a given block size.
155 1.5.16.1 matt */
156 1.5.16.1 matt static u_int nmalloc[NBUCKETS];
157 1.5.16.1 matt #endif
158 1.5.16.1 matt
159 1.5.16.1 matt #if defined(MALLOC_DEBUG) || defined(RCHECK)
160 1.5.16.1 matt #define ASSERT(p) if (!(p)) botch("p")
161 1.5.16.1 matt static void
162 1.5.16.1 matt botch(
163 1.5.16.1 matt const char *s)
164 1.5.16.1 matt {
165 1.5.16.1 matt xwarnx("\r\nassertion botched: %s\r\n", s);
166 1.5.16.1 matt abort();
167 1.5.16.1 matt }
168 1.5.16.1 matt #else
169 1.5.16.1 matt #define ASSERT(p)
170 1.5.16.1 matt #endif
171 1.5.16.1 matt
172 1.5.16.1 matt #define TRACE() xprintf("TRACE %s:%d\n", __FILE__, __LINE__)
173 1.5.16.1 matt
174 1.5.16.1 matt static void *
175 1.5.16.1 matt imalloc(size_t nbytes)
176 1.5.16.1 matt {
177 1.5.16.1 matt register union overhead *op;
178 1.5.16.1 matt register int bucket;
179 1.5.16.1 matt register long n;
180 1.5.16.1 matt register unsigned amt;
181 1.5.16.1 matt
182 1.5.16.1 matt /*
183 1.5.16.1 matt * First time malloc is called, setup page size and
184 1.5.16.1 matt * align break pointer so all data will be page aligned.
185 1.5.16.1 matt */
186 1.5.16.1 matt if (pagesz == 0) {
187 1.5.16.1 matt pagesz = n = _rtld_pagesz;
188 1.5.16.1 matt if (morepages(NPOOLPAGES) == 0)
189 1.5.16.1 matt return NULL;
190 1.5.16.1 matt op = (union overhead *)(pagepool_start);
191 1.5.16.1 matt n = n - sizeof (*op) - (((char *)op - (char *)NULL) & (n - 1));
192 1.5.16.1 matt if (n < 0)
193 1.5.16.1 matt n += pagesz;
194 1.5.16.1 matt if (n) {
195 1.5.16.1 matt pagepool_start += n;
196 1.5.16.1 matt }
197 1.5.16.1 matt bucket = 0;
198 1.5.16.1 matt amt = sizeof(union overhead);
199 1.5.16.1 matt while (pagesz > amt) {
200 1.5.16.1 matt amt <<= 1;
201 1.5.16.1 matt bucket++;
202 1.5.16.1 matt }
203 1.5.16.1 matt pagebucket = bucket;
204 1.5.16.1 matt }
205 1.5.16.1 matt /*
206 1.5.16.1 matt * Convert amount of memory requested into closest block size
207 1.5.16.1 matt * stored in hash buckets which satisfies request.
208 1.5.16.1 matt * Account for space used per block for accounting.
209 1.5.16.1 matt */
210 1.5.16.1 matt if (nbytes <= (n = pagesz - sizeof (*op) - RSLOP)) {
211 1.5.16.1 matt if (sizeof(union overhead) & (sizeof(union overhead) - 1)) {
212 1.5.16.1 matt amt = sizeof(union overhead) * 2;
213 1.5.16.1 matt bucket = 1;
214 1.5.16.1 matt } else {
215 1.5.16.1 matt amt = sizeof(union overhead); /* size of first bucket */
216 1.5.16.1 matt bucket = 0;
217 1.5.16.1 matt }
218 1.5.16.1 matt n = -(sizeof (*op) + RSLOP);
219 1.5.16.1 matt } else {
220 1.5.16.1 matt amt = pagesz;
221 1.5.16.1 matt bucket = pagebucket;
222 1.5.16.1 matt }
223 1.5.16.1 matt while (nbytes > amt + n) {
224 1.5.16.1 matt amt <<= 1;
225 1.5.16.1 matt if (amt == 0)
226 1.5.16.1 matt return (NULL);
227 1.5.16.1 matt bucket++;
228 1.5.16.1 matt }
229 1.5.16.1 matt /*
230 1.5.16.1 matt * If nothing in hash bucket right now,
231 1.5.16.1 matt * request more memory from the system.
232 1.5.16.1 matt */
233 1.5.16.1 matt if ((op = nextf[bucket]) == NULL) {
234 1.5.16.1 matt morecore(bucket);
235 1.5.16.1 matt if ((op = nextf[bucket]) == NULL)
236 1.5.16.1 matt return (NULL);
237 1.5.16.1 matt }
238 1.5.16.1 matt /* remove from linked list */
239 1.5.16.1 matt nextf[bucket] = op->ov_next;
240 1.5.16.1 matt op->ov_magic = MAGIC;
241 1.5.16.1 matt op->ov_index = bucket;
242 1.5.16.1 matt #ifdef MSTATS
243 1.5.16.1 matt nmalloc[bucket]++;
244 1.5.16.1 matt #endif
245 1.5.16.1 matt #ifdef RCHECK
246 1.5.16.1 matt /*
247 1.5.16.1 matt * Record allocated size of block and
248 1.5.16.1 matt * bound space with magic numbers.
249 1.5.16.1 matt */
250 1.5.16.1 matt op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1);
251 1.5.16.1 matt op->ov_rmagic = RMAGIC;
252 1.5.16.1 matt *(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC;
253 1.5.16.1 matt #endif
254 1.5.16.1 matt return ((char *)(op + 1));
255 1.5.16.1 matt }
256 1.5.16.1 matt
257 1.5.16.1 matt /*
258 1.5.16.1 matt * Allocate more memory to the indicated bucket.
259 1.5.16.1 matt */
260 1.5.16.1 matt static void
261 1.5.16.1 matt morecore(int bucket)
262 1.5.16.1 matt {
263 1.5.16.1 matt register union overhead *op;
264 1.5.16.1 matt register int sz; /* size of desired block */
265 1.5.16.1 matt int amt; /* amount to allocate */
266 1.5.16.1 matt int nblks; /* how many blocks we get */
267 1.5.16.1 matt
268 1.5.16.1 matt /*
269 1.5.16.1 matt * sbrk_size <= 0 only for big, FLUFFY, requests (about
270 1.5.16.1 matt * 2^30 bytes on a VAX, I think) or for a negative arg.
271 1.5.16.1 matt */
272 1.5.16.1 matt sz = 1 << (bucket + 3);
273 1.5.16.1 matt #ifdef MALLOC_DEBUG
274 1.5.16.1 matt ASSERT(sz > 0);
275 1.5.16.1 matt #else
276 1.5.16.1 matt if (sz <= 0)
277 1.5.16.1 matt return;
278 1.5.16.1 matt #endif
279 1.5.16.1 matt if (sz < pagesz) {
280 1.5.16.1 matt amt = pagesz;
281 1.5.16.1 matt nblks = amt / sz;
282 1.5.16.1 matt } else {
283 1.5.16.1 matt amt = sz + pagesz;
284 1.5.16.1 matt nblks = 1;
285 1.5.16.1 matt }
286 1.5.16.1 matt if (amt > pagepool_end - pagepool_start)
287 1.5.16.1 matt if (morepages(amt/pagesz + NPOOLPAGES) == 0)
288 1.5.16.1 matt return;
289 1.5.16.1 matt op = (union overhead *)pagepool_start;
290 1.5.16.1 matt pagepool_start += amt;
291 1.5.16.1 matt
292 1.5.16.1 matt /*
293 1.5.16.1 matt * Add new memory allocated to that on
294 1.5.16.1 matt * free list for this hash bucket.
295 1.5.16.1 matt */
296 1.5.16.1 matt nextf[bucket] = op;
297 1.5.16.1 matt while (--nblks > 0) {
298 1.5.16.1 matt op->ov_next = (union overhead *)((caddr_t)op + sz);
299 1.5.16.1 matt op = (union overhead *)((caddr_t)op + sz);
300 1.5.16.1 matt }
301 1.5.16.1 matt }
302 1.5.16.1 matt
303 1.5.16.1 matt void
304 1.5.16.1 matt xfree(cp)
305 1.5.16.1 matt void *cp;
306 1.5.16.1 matt {
307 1.5.16.1 matt register int size;
308 1.5.16.1 matt register union overhead *op;
309 1.5.16.1 matt
310 1.5.16.1 matt if (cp == NULL)
311 1.5.16.1 matt return;
312 1.5.16.1 matt op = (union overhead *)((caddr_t)cp - sizeof (union overhead));
313 1.5.16.1 matt #ifdef MALLOC_DEBUG
314 1.5.16.1 matt ASSERT(op->ov_magic == MAGIC); /* make sure it was in use */
315 1.5.16.1 matt #else
316 1.5.16.1 matt if (op->ov_magic != MAGIC)
317 1.5.16.1 matt return; /* sanity */
318 1.5.16.1 matt #endif
319 1.5.16.1 matt #ifdef RCHECK
320 1.5.16.1 matt ASSERT(op->ov_rmagic == RMAGIC);
321 1.5.16.1 matt ASSERT(*(u_short *)((caddr_t)(op + 1) + op->ov_size) == RMAGIC);
322 1.5.16.1 matt #endif
323 1.5.16.1 matt size = op->ov_index;
324 1.5.16.1 matt ASSERT(size < NBUCKETS);
325 1.5.16.1 matt op->ov_next = nextf[size]; /* also clobbers ov_magic */
326 1.5.16.1 matt nextf[size] = op;
327 1.5.16.1 matt #ifdef MSTATS
328 1.5.16.1 matt nmalloc[size]--;
329 1.5.16.1 matt #endif
330 1.5.16.1 matt }
331 1.5.16.1 matt
332 1.5.16.1 matt static void *
333 1.5.16.1 matt irealloc(void *cp, size_t nbytes)
334 1.5.16.1 matt {
335 1.5.16.1 matt register u_int onb;
336 1.5.16.1 matt register int i;
337 1.5.16.1 matt union overhead *op;
338 1.5.16.1 matt char *res;
339 1.5.16.1 matt
340 1.5.16.1 matt if (cp == NULL)
341 1.5.16.1 matt return (imalloc(nbytes));
342 1.5.16.1 matt op = (union overhead *)((caddr_t)cp - sizeof (union overhead));
343 1.5.16.1 matt if (op->ov_magic != MAGIC) {
344 1.5.16.1 matt static const char *err_str =
345 1.5.16.1 matt "memory corruption or double free in realloc\n";
346 1.5.16.1 matt extern const char *__progname;
347 1.5.16.1 matt write(STDERR_FILENO, __progname, strlen(__progname));
348 1.5.16.1 matt write(STDERR_FILENO, err_str, strlen(err_str));
349 1.5.16.1 matt abort();
350 1.5.16.1 matt }
351 1.5.16.1 matt
352 1.5.16.1 matt i = op->ov_index;
353 1.5.16.1 matt onb = 1 << (i + 3);
354 1.5.16.1 matt if (onb < pagesz)
355 1.5.16.1 matt onb -= sizeof (*op) + RSLOP;
356 1.5.16.1 matt else
357 1.5.16.1 matt onb += pagesz - sizeof (*op) - RSLOP;
358 1.5.16.1 matt /* avoid the copy if same size block */
359 1.5.16.1 matt if (i) {
360 1.5.16.1 matt i = 1 << (i + 2);
361 1.5.16.1 matt if (i < pagesz)
362 1.5.16.1 matt i -= sizeof (*op) + RSLOP;
363 1.5.16.1 matt else
364 1.5.16.1 matt i += pagesz - sizeof (*op) - RSLOP;
365 1.5.16.1 matt }
366 1.5.16.1 matt if (nbytes <= onb && nbytes > i) {
367 1.5.16.1 matt #ifdef RCHECK
368 1.5.16.1 matt op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1);
369 1.5.16.1 matt *(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC;
370 1.5.16.1 matt #endif
371 1.5.16.1 matt return(cp);
372 1.5.16.1 matt } else
373 1.5.16.1 matt xfree(cp);
374 1.5.16.1 matt if ((res = imalloc(nbytes)) == NULL)
375 1.5.16.1 matt return (NULL);
376 1.5.16.1 matt if (cp != res) /* common optimization if "compacting" */
377 1.5.16.1 matt memcpy(res, cp, (nbytes < onb) ? nbytes : onb);
378 1.5.16.1 matt return (res);
379 1.5.16.1 matt }
380 1.5.16.1 matt
381 1.5.16.1 matt #ifdef MSTATS
382 1.5.16.1 matt /*
383 1.5.16.1 matt * mstats - print out statistics about malloc
384 1.5.16.1 matt *
385 1.5.16.1 matt * Prints two lines of numbers, one showing the length of the free list
386 1.5.16.1 matt * for each size category, the second showing the number of mallocs -
387 1.5.16.1 matt * frees for each size category.
388 1.5.16.1 matt */
389 1.5.16.1 matt mstats(char *s)
390 1.5.16.1 matt {
391 1.5.16.1 matt register int i, j;
392 1.5.16.1 matt register union overhead *p;
393 1.5.16.1 matt int totfree = 0,
394 1.5.16.1 matt totused = 0;
395 1.5.16.1 matt
396 1.5.16.1 matt xprintf("Memory allocation statistics %s\nfree:\t", s);
397 1.5.16.1 matt for (i = 0; i < NBUCKETS; i++) {
398 1.5.16.1 matt for (j = 0, p = nextf[i]; p; p = p->ov_next, j++)
399 1.5.16.1 matt ;
400 1.5.16.1 matt xprintf(" %d", j);
401 1.5.16.1 matt totfree += j * (1 << (i + 3));
402 1.5.16.1 matt }
403 1.5.16.1 matt xprintf("\nused:\t");
404 1.5.16.1 matt for (i = 0; i < NBUCKETS; i++) {
405 1.5.16.1 matt xprintf(" %d", nmalloc[i]);
406 1.5.16.1 matt totused += nmalloc[i] * (1 << (i + 3));
407 1.5.16.1 matt }
408 1.5.16.1 matt xprintf("\n\tTotal in use: %d, total free: %d\n",
409 1.5.16.1 matt totused, totfree);
410 1.5.16.1 matt }
411 1.5.16.1 matt #endif
412 1.5.16.1 matt
413 1.5.16.1 matt
414 1.5.16.1 matt static int
415 1.5.16.1 matt morepages(int n)
416 1.5.16.1 matt {
417 1.5.16.1 matt int fd = -1;
418 1.5.16.1 matt int offset;
419 1.5.16.1 matt
420 1.5.16.1 matt #ifdef NEED_DEV_ZERO
421 1.5.16.1 matt fd = open("/dev/zero", O_RDWR, 0);
422 1.5.16.1 matt if (fd == -1)
423 1.5.16.1 matt xerr(1, "/dev/zero");
424 1.5.16.1 matt #endif
425 1.5.16.1 matt
426 1.5.16.1 matt if (pagepool_end - pagepool_start > pagesz) {
427 1.5.16.1 matt caddr_t addr = (caddr_t)
428 1.5.16.1 matt (((long)pagepool_start + pagesz - 1) & ~(pagesz - 1));
429 1.5.16.1 matt if (munmap(addr, pagepool_end - addr) != 0)
430 1.5.16.1 matt xwarn("morepages: munmap %p", addr);
431 1.5.16.1 matt }
432 1.5.16.1 matt
433 1.5.16.1 matt offset = (long)pagepool_start - ((long)pagepool_start & ~(pagesz - 1));
434 1.5.16.1 matt
435 1.5.16.1 matt if ((pagepool_start = mmap(0, n * pagesz,
436 1.5.16.1 matt PROT_READ|PROT_WRITE,
437 1.5.16.1 matt MAP_ANON|MAP_PRIVATE, fd, 0)) == (caddr_t)-1) {
438 1.5.16.1 matt xprintf("Cannot map anonymous memory");
439 1.5.16.1 matt return 0;
440 1.5.16.1 matt }
441 1.5.16.1 matt pagepool_end = pagepool_start + n * pagesz;
442 1.5.16.1 matt pagepool_start += offset;
443 1.5.16.1 matt
444 1.5.16.1 matt #ifdef NEED_DEV_ZERO
445 1.5.16.1 matt close(fd);
446 1.5.16.1 matt #endif
447 1.5.16.1 matt return n;
448 1.5.16.1 matt }
449 1.1 cgd
450 1.1 cgd void *
451 1.4 skrll xcalloc(size_t size)
452 1.1 cgd {
453 1.3 simonb
454 1.2 christos return memset(xmalloc(size), 0, size);
455 1.1 cgd }
456 1.1 cgd
457 1.1 cgd void *
458 1.4 skrll xmalloc(size_t size)
459 1.1 cgd {
460 1.5.16.1 matt void *p = imalloc(size);
461 1.3 simonb
462 1.2 christos if (p == NULL)
463 1.2 christos xerr(1, "%s", xstrerror(errno));
464 1.2 christos return p;
465 1.1 cgd }
466 1.1 cgd
467 1.5.16.1 matt void *
468 1.5.16.1 matt xrealloc(void *p, size_t size)
469 1.1 cgd {
470 1.5.16.1 matt p = irealloc(p, size);
471 1.3 simonb
472 1.2 christos if (p == NULL)
473 1.2 christos xerr(1, "%s", xstrerror(errno));
474 1.2 christos return p;
475 1.1 cgd }
476 1.5.16.1 matt
477 1.5.16.1 matt char *
478 1.5.16.1 matt xstrdup(const char *str)
479 1.5.16.1 matt {
480 1.5.16.1 matt size_t len;
481 1.5.16.1 matt char *copy;
482 1.5.16.1 matt
483 1.5.16.1 matt len = strlen(str) + 1;
484 1.5.16.1 matt copy = xmalloc(len);
485 1.5.16.1 matt memcpy(copy, str, len);
486 1.5.16.1 matt return (copy);
487 1.5.16.1 matt }
488