sl_malloc.c revision 1.1.1.3.24.1 1 /* $NetBSD: sl_malloc.c,v 1.1.1.3.24.1 2014/08/10 07:09:48 tls Exp $ */
2
3 /* sl_malloc.c - malloc routines using a per-thread slab */
4 /* $OpenLDAP$ */
5 /* This work is part of OpenLDAP Software <http://www.openldap.org/>.
6 *
7 * Copyright 2003-2014 The OpenLDAP Foundation.
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted only as authorized by the OpenLDAP
12 * Public License.
13 *
14 * A copy of this license is available in the file LICENSE in the
15 * top-level directory of the distribution or, alternatively, at
16 * <http://www.OpenLDAP.org/license.html>.
17 */
18
19 #include "portable.h"
20
21 #include <stdio.h>
22 #include <ac/string.h>
23
24 #include "slap.h"
25
26 #ifdef USE_VALGRIND
27 /* Get debugging help from Valgrind */
28 #include <valgrind/memcheck.h>
29 #define VGMEMP_MARK(m,s) VALGRIND_MAKE_MEM_NOACCESS(m,s)
30 #define VGMEMP_CREATE(h,r,z) VALGRIND_CREATE_MEMPOOL(h,r,z)
31 #define VGMEMP_TRIM(h,a,s) VALGRIND_MEMPOOL_TRIM(h,a,s)
32 #define VGMEMP_ALLOC(h,a,s) VALGRIND_MEMPOOL_ALLOC(h,a,s)
33 #define VGMEMP_CHANGE(h,a,b,s) VALGRIND_MEMPOOL_CHANGE(h,a,b,s)
34 #else
35 #define VGMEMP_MARK(m,s)
36 #define VGMEMP_CREATE(h,r,z)
37 #define VGMEMP_TRIM(h,a,s)
38 #define VGMEMP_ALLOC(h,a,s)
39 #define VGMEMP_CHANGE(h,a,b,s)
40 #endif
41
42 /*
43 * This allocator returns temporary memory from a slab in a given memory
44 * context, aligned on a 2-int boundary. It cannot be used for data
45 * which will outlive the task allocating it.
46 *
47 * A new memory context attaches to the creator's thread context, if any.
48 * Threads cannot use other threads' memory contexts; there are no locks.
49 *
50 * The caller of slap_sl_malloc, usually a thread pool task, must
51 * slap_sl_free the memory before finishing: New tasks reuse the context
52 * and normally reset it, reclaiming memory left over from last task.
53 *
54 * The allocator helps memory fragmentation, speed and memory leaks.
55 * It is not (yet) reliable as a garbage collector:
56 *
57 * It falls back to context NULL - plain ber_memalloc() - when the
58 * context's slab is full. A reset does not reclaim such memory.
59 * Conversely, free/realloc of data not from the given context assumes
60 * context NULL. The data must not belong to another memory context.
61 *
62 * Code which has lost track of the current memory context can try
63 * slap_sl_context() or ch_malloc.c:ch_free/ch_realloc().
64 *
65 * Allocations cannot yet return failure. Like ch_malloc, they succeed
66 * or abort slapd. This will change, do fix code which assumes success.
67 */
68
69 /*
70 * The stack-based allocator stores (ber_len_t)sizeof(head+block) at
71 * allocated blocks' head - and in freed blocks also at the tail, marked
72 * by ORing *next* block's head with 1. Freed blocks are only reclaimed
73 * from the last block forward. This is fast, but when a block is never
74 * freed, older blocks will not be reclaimed until the slab is reset...
75 */
76
77 #ifdef SLAP_NO_SL_MALLOC /* Useful with memory debuggers like Valgrind */
78 enum { No_sl_malloc = 1 };
79 #else
80 enum { No_sl_malloc = 0 };
81 #endif
82
83 #define SLAP_SLAB_SOBLOCK 64
84
85 struct slab_object {
86 void *so_ptr;
87 int so_blockhead;
88 LDAP_LIST_ENTRY(slab_object) so_link;
89 };
90
91 struct slab_heap {
92 void *sh_base;
93 void *sh_last;
94 void *sh_end;
95 int sh_stack;
96 int sh_maxorder;
97 unsigned char **sh_map;
98 LDAP_LIST_HEAD(sh_freelist, slab_object) *sh_free;
99 LDAP_LIST_HEAD(sh_so, slab_object) sh_sopool;
100 };
101
102 enum {
103 Align = sizeof(ber_len_t) > 2*sizeof(int)
104 ? sizeof(ber_len_t) : 2*sizeof(int),
105 Align_log2 = 1 + (Align>2) + (Align>4) + (Align>8) + (Align>16),
106 order_start = Align_log2 - 1,
107 pad = Align - 1
108 };
109
110 static struct slab_object * slap_replenish_sopool(struct slab_heap* sh);
111 #ifdef SLAPD_UNUSED
112 static void print_slheap(int level, void *ctx);
113 #endif
114
115 /* Keep memory context in a thread-local var, or in a global when no threads */
116 #ifdef NO_THREADS
117 static struct slab_heap *slheap;
118 # define SET_MEMCTX(thrctx, memctx, sfree) ((void) (slheap = (memctx)))
119 # define GET_MEMCTX(thrctx, memctxp) (*(memctxp) = slheap)
120 #else
121 # define memctx_key ((void *) slap_sl_mem_init)
122 # define SET_MEMCTX(thrctx, memctx, kfree) \
123 ldap_pvt_thread_pool_setkey(thrctx,memctx_key, memctx,kfree, NULL,NULL)
124 # define GET_MEMCTX(thrctx, memctxp) \
125 ((void) (*(memctxp) = NULL), \
126 (void) ldap_pvt_thread_pool_getkey(thrctx,memctx_key, memctxp,NULL), \
127 *(memctxp))
128 #endif /* NO_THREADS */
129
130
131 /* Destroy the context, or if key==NULL clean it up for reuse. */
132 void
133 slap_sl_mem_destroy(
134 void *key,
135 void *data
136 )
137 {
138 struct slab_heap *sh = data;
139 struct slab_object *so;
140 int i;
141
142 if (!sh->sh_stack) {
143 for (i = 0; i <= sh->sh_maxorder - order_start; i++) {
144 so = LDAP_LIST_FIRST(&sh->sh_free[i]);
145 while (so) {
146 struct slab_object *so_tmp = so;
147 so = LDAP_LIST_NEXT(so, so_link);
148 LDAP_LIST_INSERT_HEAD(&sh->sh_sopool, so_tmp, so_link);
149 }
150 ch_free(sh->sh_map[i]);
151 }
152 ch_free(sh->sh_free);
153 ch_free(sh->sh_map);
154
155 so = LDAP_LIST_FIRST(&sh->sh_sopool);
156 while (so) {
157 struct slab_object *so_tmp = so;
158 so = LDAP_LIST_NEXT(so, so_link);
159 if (!so_tmp->so_blockhead) {
160 LDAP_LIST_REMOVE(so_tmp, so_link);
161 }
162 }
163 so = LDAP_LIST_FIRST(&sh->sh_sopool);
164 while (so) {
165 struct slab_object *so_tmp = so;
166 so = LDAP_LIST_NEXT(so, so_link);
167 ch_free(so_tmp);
168 }
169 }
170
171 if (key != NULL) {
172 ber_memfree_x(sh->sh_base, NULL);
173 ber_memfree_x(sh, NULL);
174 }
175 }
176
177 BerMemoryFunctions slap_sl_mfuncs =
178 { slap_sl_malloc, slap_sl_calloc, slap_sl_realloc, slap_sl_free };
179
180 void
181 slap_sl_mem_init()
182 {
183 assert( Align == 1 << Align_log2 );
184
185 ber_set_option( NULL, LBER_OPT_MEMORY_FNS, &slap_sl_mfuncs );
186 }
187
188 /* Create, reset or just return the memory context of the current thread. */
189 void *
190 slap_sl_mem_create(
191 ber_len_t size,
192 int stack,
193 void *thrctx,
194 int new
195 )
196 {
197 void *memctx;
198 struct slab_heap *sh;
199 ber_len_t size_shift;
200 struct slab_object *so;
201 char *base, *newptr;
202 enum { Base_offset = (unsigned) -sizeof(ber_len_t) % Align };
203
204 sh = GET_MEMCTX(thrctx, &memctx);
205 if ( sh && !new )
206 return sh;
207
208 /* Round up to doubleword boundary, then make room for initial
209 * padding, preserving expected available size for pool version */
210 size = ((size + Align-1) & -Align) + Base_offset;
211
212 if (!sh) {
213 sh = ch_malloc(sizeof(struct slab_heap));
214 base = ch_malloc(size);
215 SET_MEMCTX(thrctx, sh, slap_sl_mem_destroy);
216 VGMEMP_MARK(base, size);
217 VGMEMP_CREATE(sh, 0, 0);
218 } else {
219 slap_sl_mem_destroy(NULL, sh);
220 base = sh->sh_base;
221 if (size > (ber_len_t) ((char *) sh->sh_end - base)) {
222 newptr = ch_realloc(base, size);
223 if ( newptr == NULL ) return NULL;
224 VGMEMP_CHANGE(sh, base, newptr, size);
225 base = newptr;
226 }
227 VGMEMP_TRIM(sh, base, 0);
228 }
229 sh->sh_base = base;
230 sh->sh_end = base + size;
231
232 /* Align (base + head of first block) == first returned block */
233 base += Base_offset;
234 size -= Base_offset;
235
236 sh->sh_stack = stack;
237 if (stack) {
238 sh->sh_last = base;
239
240 } else {
241 int i, order = -1, order_end = -1;
242
243 size_shift = size - 1;
244 do {
245 order_end++;
246 } while (size_shift >>= 1);
247 order = order_end - order_start + 1;
248 sh->sh_maxorder = order_end;
249
250 sh->sh_free = (struct sh_freelist *)
251 ch_malloc(order * sizeof(struct sh_freelist));
252 for (i = 0; i < order; i++) {
253 LDAP_LIST_INIT(&sh->sh_free[i]);
254 }
255
256 LDAP_LIST_INIT(&sh->sh_sopool);
257
258 if (LDAP_LIST_EMPTY(&sh->sh_sopool)) {
259 slap_replenish_sopool(sh);
260 }
261 so = LDAP_LIST_FIRST(&sh->sh_sopool);
262 LDAP_LIST_REMOVE(so, so_link);
263 so->so_ptr = base;
264
265 LDAP_LIST_INSERT_HEAD(&sh->sh_free[order-1], so, so_link);
266
267 sh->sh_map = (unsigned char **)
268 ch_malloc(order * sizeof(unsigned char *));
269 for (i = 0; i < order; i++) {
270 int shiftamt = order_start + 1 + i;
271 int nummaps = size >> shiftamt;
272 assert(nummaps);
273 nummaps >>= 3;
274 if (!nummaps) nummaps = 1;
275 sh->sh_map[i] = (unsigned char *) ch_malloc(nummaps);
276 memset(sh->sh_map[i], 0, nummaps);
277 }
278 }
279
280 return sh;
281 }
282
283 /*
284 * Separate memory context from thread context. Future users must
285 * know the context, since ch_free/slap_sl_context() cannot find it.
286 */
287 void
288 slap_sl_mem_detach(
289 void *thrctx,
290 void *memctx
291 )
292 {
293 SET_MEMCTX(thrctx, NULL, 0);
294 }
295
296 void *
297 slap_sl_malloc(
298 ber_len_t size,
299 void *ctx
300 )
301 {
302 struct slab_heap *sh = ctx;
303 ber_len_t *ptr, *newptr;
304
305 /* ber_set_option calls us like this */
306 if (No_sl_malloc || !ctx) {
307 newptr = ber_memalloc_x( size, NULL );
308 if ( newptr ) return newptr;
309 Debug(LDAP_DEBUG_ANY, "slap_sl_malloc of %lu bytes failed\n",
310 (unsigned long) size, 0, 0);
311 assert( 0 );
312 exit( EXIT_FAILURE );
313 }
314
315 /* Add room for head, ensure room for tail when freed, and
316 * round up to doubleword boundary. */
317 size = (size + sizeof(ber_len_t) + Align-1 + !size) & -Align;
318
319 if (sh->sh_stack) {
320 if (size < (ber_len_t) ((char *) sh->sh_end - (char *) sh->sh_last)) {
321 newptr = sh->sh_last;
322 sh->sh_last = (char *) sh->sh_last + size;
323 VGMEMP_ALLOC(sh, newptr, size);
324 *newptr++ = size;
325 return( (void *)newptr );
326 }
327
328 size -= sizeof(ber_len_t);
329
330 } else {
331 struct slab_object *so_new, *so_left, *so_right;
332 ber_len_t size_shift;
333 unsigned long diff;
334 int i, j, order = -1;
335
336 size_shift = size - 1;
337 do {
338 order++;
339 } while (size_shift >>= 1);
340
341 size -= sizeof(ber_len_t);
342
343 for (i = order; i <= sh->sh_maxorder &&
344 LDAP_LIST_EMPTY(&sh->sh_free[i-order_start]); i++);
345
346 if (i == order) {
347 so_new = LDAP_LIST_FIRST(&sh->sh_free[i-order_start]);
348 LDAP_LIST_REMOVE(so_new, so_link);
349 ptr = so_new->so_ptr;
350 diff = (unsigned long)((char*)ptr -
351 (char*)sh->sh_base) >> (order + 1);
352 sh->sh_map[order-order_start][diff>>3] |= (1 << (diff & 0x7));
353 *ptr++ = size;
354 LDAP_LIST_INSERT_HEAD(&sh->sh_sopool, so_new, so_link);
355 return((void*)ptr);
356 } else if (i <= sh->sh_maxorder) {
357 for (j = i; j > order; j--) {
358 so_left = LDAP_LIST_FIRST(&sh->sh_free[j-order_start]);
359 LDAP_LIST_REMOVE(so_left, so_link);
360 if (LDAP_LIST_EMPTY(&sh->sh_sopool)) {
361 slap_replenish_sopool(sh);
362 }
363 so_right = LDAP_LIST_FIRST(&sh->sh_sopool);
364 LDAP_LIST_REMOVE(so_right, so_link);
365 so_right->so_ptr = (void *)((char *)so_left->so_ptr + (1 << j));
366 if (j == order + 1) {
367 ptr = so_left->so_ptr;
368 diff = (unsigned long)((char*)ptr -
369 (char*)sh->sh_base) >> (order+1);
370 sh->sh_map[order-order_start][diff>>3] |=
371 (1 << (diff & 0x7));
372 *ptr++ = size;
373 LDAP_LIST_INSERT_HEAD(
374 &sh->sh_free[j-1-order_start], so_right, so_link);
375 LDAP_LIST_INSERT_HEAD(&sh->sh_sopool, so_left, so_link);
376 return((void*)ptr);
377 } else {
378 LDAP_LIST_INSERT_HEAD(
379 &sh->sh_free[j-1-order_start], so_right, so_link);
380 LDAP_LIST_INSERT_HEAD(
381 &sh->sh_free[j-1-order_start], so_left, so_link);
382 }
383 }
384 }
385 /* FIXME: missing return; guessing we failed... */
386 }
387
388 Debug(LDAP_DEBUG_TRACE,
389 "sl_malloc %lu: ch_malloc\n",
390 (unsigned long) size, 0, 0);
391 return ch_malloc(size);
392 }
393
394 #define LIM_SQRT(t) /* some value < sqrt(max value of unsigned type t) */ \
395 ((0UL|(t)-1) >>31>>31 > 1 ? ((t)1 <<32) - 1 : \
396 (0UL|(t)-1) >>31 ? 65535U : (0UL|(t)-1) >>15 ? 255U : 15U)
397
398 void *
399 slap_sl_calloc( ber_len_t n, ber_len_t size, void *ctx )
400 {
401 void *newptr;
402 ber_len_t total = n * size;
403
404 /* The sqrt test is a slight optimization: often avoids the division */
405 if ((n | size) <= LIM_SQRT(ber_len_t) || n == 0 || total/n == size) {
406 newptr = slap_sl_malloc( total, ctx );
407 memset( newptr, 0, n*size );
408 } else {
409 Debug(LDAP_DEBUG_ANY, "slap_sl_calloc(%lu,%lu) out of range\n",
410 (unsigned long) n, (unsigned long) size, 0);
411 assert(0);
412 exit(EXIT_FAILURE);
413 }
414 return newptr;
415 }
416
417 void *
418 slap_sl_realloc(void *ptr, ber_len_t size, void *ctx)
419 {
420 struct slab_heap *sh = ctx;
421 ber_len_t oldsize, *p = (ber_len_t *) ptr, *nextp;
422 void *newptr;
423
424 if (ptr == NULL)
425 return slap_sl_malloc(size, ctx);
426
427 /* Not our memory? */
428 if (No_sl_malloc || !sh || ptr < sh->sh_base || ptr >= sh->sh_end) {
429 /* Like ch_realloc(), except not trying a new context */
430 newptr = ber_memrealloc_x(ptr, size, NULL);
431 if (newptr) {
432 return newptr;
433 }
434 Debug(LDAP_DEBUG_ANY, "slap_sl_realloc of %lu bytes failed\n",
435 (unsigned long) size, 0, 0);
436 assert(0);
437 exit( EXIT_FAILURE );
438 }
439
440 if (size == 0) {
441 slap_sl_free(ptr, ctx);
442 return NULL;
443 }
444
445 oldsize = p[-1];
446
447 if (sh->sh_stack) {
448 /* Add room for head, round up to doubleword boundary */
449 size = (size + sizeof(ber_len_t) + Align-1) & -Align;
450
451 p--;
452
453 /* Never shrink blocks */
454 if (size <= oldsize) {
455 return ptr;
456 }
457
458 oldsize &= -2;
459 nextp = (ber_len_t *) ((char *) p + oldsize);
460
461 /* If reallocing the last block, try to grow it */
462 if (nextp == sh->sh_last) {
463 if (size < (ber_len_t) ((char *) sh->sh_end - (char *) p)) {
464 sh->sh_last = (char *) p + size;
465 p[0] = (p[0] & 1) | size;
466 return ptr;
467 }
468
469 /* Nowhere to grow, need to alloc and copy */
470 } else {
471 /* Slight optimization of the final realloc variant */
472 newptr = slap_sl_malloc(size-sizeof(ber_len_t), ctx);
473 AC_MEMCPY(newptr, ptr, oldsize-sizeof(ber_len_t));
474 /* Not last block, can just mark old region as free */
475 nextp[-1] = oldsize;
476 nextp[0] |= 1;
477 return newptr;
478 }
479
480 size -= sizeof(ber_len_t);
481 oldsize -= sizeof(ber_len_t);
482
483 } else if (oldsize > size) {
484 oldsize = size;
485 }
486
487 newptr = slap_sl_malloc(size, ctx);
488 AC_MEMCPY(newptr, ptr, oldsize);
489 slap_sl_free(ptr, ctx);
490 return newptr;
491 }
492
493 void
494 slap_sl_free(void *ptr, void *ctx)
495 {
496 struct slab_heap *sh = ctx;
497 ber_len_t size;
498 ber_len_t *p = ptr, *nextp, *tmpp;
499
500 if (!ptr)
501 return;
502
503 if (No_sl_malloc || !sh || ptr < sh->sh_base || ptr >= sh->sh_end) {
504 ber_memfree_x(ptr, NULL);
505 return;
506 }
507
508 size = *(--p);
509
510 if (sh->sh_stack) {
511 size &= -2;
512 nextp = (ber_len_t *) ((char *) p + size);
513 if (sh->sh_last != nextp) {
514 /* Mark it free: tail = size, head of next block |= 1 */
515 nextp[-1] = size;
516 nextp[0] |= 1;
517 /* We can't tell Valgrind about it yet, because we
518 * still need read/write access to this block for
519 * when we eventually get to reclaim it.
520 */
521 } else {
522 /* Reclaim freed block(s) off tail */
523 while (*p & 1) {
524 p = (ber_len_t *) ((char *) p - p[-1]);
525 }
526 sh->sh_last = p;
527 VGMEMP_TRIM(sh, sh->sh_base,
528 (char *) sh->sh_last - (char *) sh->sh_base);
529 }
530
531 } else {
532 int size_shift, order_size;
533 struct slab_object *so;
534 unsigned long diff;
535 int i, inserted = 0, order = -1;
536
537 size_shift = size + sizeof(ber_len_t) - 1;
538 do {
539 order++;
540 } while (size_shift >>= 1);
541
542 for (i = order, tmpp = p; i <= sh->sh_maxorder; i++) {
543 order_size = 1 << (i+1);
544 diff = (unsigned long)((char*)tmpp - (char*)sh->sh_base) >> (i+1);
545 sh->sh_map[i-order_start][diff>>3] &= (~(1 << (diff & 0x7)));
546 if (diff == ((diff>>1)<<1)) {
547 if (!(sh->sh_map[i-order_start][(diff+1)>>3] &
548 (1<<((diff+1)&0x7)))) {
549 so = LDAP_LIST_FIRST(&sh->sh_free[i-order_start]);
550 while (so) {
551 if ((char*)so->so_ptr == (char*)tmpp) {
552 LDAP_LIST_REMOVE( so, so_link );
553 } else if ((char*)so->so_ptr ==
554 (char*)tmpp + order_size) {
555 LDAP_LIST_REMOVE(so, so_link);
556 break;
557 }
558 so = LDAP_LIST_NEXT(so, so_link);
559 }
560 if (so) {
561 if (i < sh->sh_maxorder) {
562 inserted = 1;
563 so->so_ptr = tmpp;
564 LDAP_LIST_INSERT_HEAD(&sh->sh_free[i-order_start+1],
565 so, so_link);
566 }
567 continue;
568 } else {
569 if (LDAP_LIST_EMPTY(&sh->sh_sopool)) {
570 slap_replenish_sopool(sh);
571 }
572 so = LDAP_LIST_FIRST(&sh->sh_sopool);
573 LDAP_LIST_REMOVE(so, so_link);
574 so->so_ptr = tmpp;
575 LDAP_LIST_INSERT_HEAD(&sh->sh_free[i-order_start],
576 so, so_link);
577 break;
578
579 Debug(LDAP_DEBUG_TRACE, "slap_sl_free: "
580 "free object not found while bit is clear.\n",
581 0, 0, 0);
582 assert(so != NULL);
583
584 }
585 } else {
586 if (!inserted) {
587 if (LDAP_LIST_EMPTY(&sh->sh_sopool)) {
588 slap_replenish_sopool(sh);
589 }
590 so = LDAP_LIST_FIRST(&sh->sh_sopool);
591 LDAP_LIST_REMOVE(so, so_link);
592 so->so_ptr = tmpp;
593 LDAP_LIST_INSERT_HEAD(&sh->sh_free[i-order_start],
594 so, so_link);
595 }
596 break;
597 }
598 } else {
599 if (!(sh->sh_map[i-order_start][(diff-1)>>3] &
600 (1<<((diff-1)&0x7)))) {
601 so = LDAP_LIST_FIRST(&sh->sh_free[i-order_start]);
602 while (so) {
603 if ((char*)so->so_ptr == (char*)tmpp) {
604 LDAP_LIST_REMOVE(so, so_link);
605 } else if ((char*)tmpp == (char *)so->so_ptr + order_size) {
606 LDAP_LIST_REMOVE(so, so_link);
607 tmpp = so->so_ptr;
608 break;
609 }
610 so = LDAP_LIST_NEXT(so, so_link);
611 }
612 if (so) {
613 if (i < sh->sh_maxorder) {
614 inserted = 1;
615 LDAP_LIST_INSERT_HEAD(&sh->sh_free[i-order_start+1], so, so_link);
616 continue;
617 }
618 } else {
619 if (LDAP_LIST_EMPTY(&sh->sh_sopool)) {
620 slap_replenish_sopool(sh);
621 }
622 so = LDAP_LIST_FIRST(&sh->sh_sopool);
623 LDAP_LIST_REMOVE(so, so_link);
624 so->so_ptr = tmpp;
625 LDAP_LIST_INSERT_HEAD(&sh->sh_free[i-order_start],
626 so, so_link);
627 break;
628
629 Debug(LDAP_DEBUG_TRACE, "slap_sl_free: "
630 "free object not found while bit is clear.\n",
631 0, 0, 0 );
632 assert(so != NULL);
633
634 }
635 } else {
636 if ( !inserted ) {
637 if (LDAP_LIST_EMPTY(&sh->sh_sopool)) {
638 slap_replenish_sopool(sh);
639 }
640 so = LDAP_LIST_FIRST(&sh->sh_sopool);
641 LDAP_LIST_REMOVE(so, so_link);
642 so->so_ptr = tmpp;
643 LDAP_LIST_INSERT_HEAD(&sh->sh_free[i-order_start],
644 so, so_link);
645 }
646 break;
647 }
648 }
649 }
650 }
651 }
652
653 /*
654 * Return the memory context of the current thread if the given block of
655 * memory belongs to it, otherwise return NULL.
656 */
657 void *
658 slap_sl_context( void *ptr )
659 {
660 void *memctx;
661 struct slab_heap *sh;
662
663 if ( slapMode & SLAP_TOOL_MODE ) return NULL;
664
665 sh = GET_MEMCTX(ldap_pvt_thread_pool_context(), &memctx);
666 if (sh && ptr >= sh->sh_base && ptr <= sh->sh_end) {
667 return sh;
668 }
669 return NULL;
670 }
671
672 static struct slab_object *
673 slap_replenish_sopool(
674 struct slab_heap* sh
675 )
676 {
677 struct slab_object *so_block;
678 int i;
679
680 so_block = (struct slab_object *)ch_malloc(
681 SLAP_SLAB_SOBLOCK * sizeof(struct slab_object));
682
683 if ( so_block == NULL ) {
684 return NULL;
685 }
686
687 so_block[0].so_blockhead = 1;
688 LDAP_LIST_INSERT_HEAD(&sh->sh_sopool, &so_block[0], so_link);
689 for (i = 1; i < SLAP_SLAB_SOBLOCK; i++) {
690 so_block[i].so_blockhead = 0;
691 LDAP_LIST_INSERT_HEAD(&sh->sh_sopool, &so_block[i], so_link );
692 }
693
694 return so_block;
695 }
696
697 #ifdef SLAPD_UNUSED
698 static void
699 print_slheap(int level, void *ctx)
700 {
701 struct slab_heap *sh = ctx;
702 struct slab_object *so;
703 int i, j, once = 0;
704
705 if (!ctx) {
706 Debug(level, "NULL memctx\n", 0, 0, 0);
707 return;
708 }
709
710 Debug(level, "sh->sh_maxorder=%d\n", sh->sh_maxorder, 0, 0);
711
712 for (i = order_start; i <= sh->sh_maxorder; i++) {
713 once = 0;
714 Debug(level, "order=%d\n", i, 0, 0);
715 for (j = 0; j < (1<<(sh->sh_maxorder-i))/8; j++) {
716 Debug(level, "%02x ", sh->sh_map[i-order_start][j], 0, 0);
717 once = 1;
718 }
719 if (!once) {
720 Debug(level, "%02x ", sh->sh_map[i-order_start][0], 0, 0);
721 }
722 Debug(level, "\n", 0, 0, 0);
723 Debug(level, "free list:\n", 0, 0, 0);
724 so = LDAP_LIST_FIRST(&sh->sh_free[i-order_start]);
725 while (so) {
726 Debug(level, "%p\n", so->so_ptr, 0, 0);
727 so = LDAP_LIST_NEXT(so, so_link);
728 }
729 }
730 }
731 #endif
732