Home | History | Annotate | Line # | Download | only in slapd
sl_malloc.c revision 1.3
      1 /*	$NetBSD: sl_malloc.c,v 1.3 2021/08/14 16:14:58 christos Exp $	*/
      2 
      3 /* sl_malloc.c - malloc routines using a per-thread slab */
      4 /* $OpenLDAP$ */
      5 /* This work is part of OpenLDAP Software <http://www.openldap.org/>.
      6  *
      7  * Copyright 2003-2021 The OpenLDAP Foundation.
      8  * All rights reserved.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted only as authorized by the OpenLDAP
     12  * Public License.
     13  *
     14  * A copy of this license is available in the file LICENSE in the
     15  * top-level directory of the distribution or, alternatively, at
     16  * <http://www.OpenLDAP.org/license.html>.
     17  */
     18 
     19 #include <sys/cdefs.h>
     20 __RCSID("$NetBSD: sl_malloc.c,v 1.3 2021/08/14 16:14:58 christos Exp $");
     21 
     22 #include "portable.h"
     23 
     24 #include <stdio.h>
     25 #include <ac/string.h>
     26 
     27 #include "slap.h"
     28 
     29 #ifdef USE_VALGRIND
     30 /* Get debugging help from Valgrind */
     31 #include <valgrind/memcheck.h>
     32 #define	VGMEMP_MARK(m,s)	VALGRIND_MAKE_MEM_NOACCESS(m,s)
     33 #define VGMEMP_CREATE(h,r,z)	VALGRIND_CREATE_MEMPOOL(h,r,z)
     34 #define VGMEMP_TRIM(h,a,s)	VALGRIND_MEMPOOL_TRIM(h,a,s)
     35 #define VGMEMP_ALLOC(h,a,s)	VALGRIND_MEMPOOL_ALLOC(h,a,s)
     36 #define VGMEMP_CHANGE(h,a,b,s)	VALGRIND_MEMPOOL_CHANGE(h,a,b,s)
     37 #else
     38 #define	VGMEMP_MARK(m,s)
     39 #define VGMEMP_CREATE(h,r,z)
     40 #define VGMEMP_TRIM(h,a,s)
     41 #define VGMEMP_ALLOC(h,a,s)
     42 #define VGMEMP_CHANGE(h,a,b,s)
     43 #endif
     44 
     45 /*
     46  * This allocator returns temporary memory from a slab in a given memory
     47  * context, aligned on a 2-int boundary.  It cannot be used for data
     48  * which will outlive the task allocating it.
     49  *
     50  * A new memory context attaches to the creator's thread context, if any.
     51  * Threads cannot use other threads' memory contexts; there are no locks.
     52  *
     53  * The caller of slap_sl_malloc, usually a thread pool task, must
     54  * slap_sl_free the memory before finishing: New tasks reuse the context
     55  * and normally reset it, reclaiming memory left over from last task.
     56  *
     57  * The allocator helps memory fragmentation, speed and memory leaks.
     58  * It is not (yet) reliable as a garbage collector:
     59  *
     60  * It falls back to context NULL - plain ber_memalloc() - when the
     61  * context's slab is full.  A reset does not reclaim such memory.
     62  * Conversely, free/realloc of data not from the given context assumes
     63  * context NULL.  The data must not belong to another memory context.
     64  *
     65  * Code which has lost track of the current memory context can try
     66  * slap_sl_context() or ch_malloc.c:ch_free/ch_realloc().
     67  *
     68  * Allocations cannot yet return failure.  Like ch_malloc, they succeed
     69  * or abort slapd.  This will change, do fix code which assumes success.
     70  */
     71 
     72 /*
     73  * The stack-based allocator stores (ber_len_t)sizeof(head+block) at
     74  * allocated blocks' head - and in freed blocks also at the tail, marked
     75  * by ORing *next* block's head with 1.  Freed blocks are only reclaimed
     76  * from the last block forward.  This is fast, but when a block is never
     77  * freed, older blocks will not be reclaimed until the slab is reset...
     78  */
     79 
     80 #ifdef SLAP_NO_SL_MALLOC /* Useful with memory debuggers like Valgrind */
     81 enum { No_sl_malloc = 1 };
     82 #else
     83 enum { No_sl_malloc = 0 };
     84 #endif
     85 
     86 #define SLAP_SLAB_SOBLOCK 64
     87 
     88 struct slab_object {
     89     void *so_ptr;
     90 	int so_blockhead;
     91     LDAP_LIST_ENTRY(slab_object) so_link;
     92 };
     93 
     94 struct slab_heap {
     95     void *sh_base;
     96     void *sh_last;
     97     void *sh_end;
     98 	int sh_stack;
     99 	int sh_maxorder;
    100     unsigned char **sh_map;
    101     LDAP_LIST_HEAD(sh_freelist, slab_object) *sh_free;
    102 	LDAP_LIST_HEAD(sh_so, slab_object) sh_sopool;
    103 };
    104 
    105 enum {
    106 	Align = sizeof(ber_len_t) > 2*sizeof(int)
    107 		? sizeof(ber_len_t) : 2*sizeof(int),
    108 	Align_log2 = 1 + (Align>2) + (Align>4) + (Align>8) + (Align>16),
    109 	order_start = Align_log2 - 1,
    110 	pad = Align - 1
    111 };
    112 
    113 static struct slab_object * slap_replenish_sopool(struct slab_heap* sh);
    114 #ifdef SLAPD_UNUSED
    115 static void print_slheap(int level, void *ctx);
    116 #endif
    117 
    118 /* Keep memory context in a thread-local var */
    119 # define memctx_key ((void *) slap_sl_mem_init)
    120 # define SET_MEMCTX(thrctx, memctx, kfree) \
    121 	ldap_pvt_thread_pool_setkey(thrctx,memctx_key, memctx,kfree, NULL,NULL)
    122 # define GET_MEMCTX(thrctx, memctxp) \
    123 	((void) (*(memctxp) = NULL), \
    124 	 (void) ldap_pvt_thread_pool_getkey(thrctx,memctx_key, memctxp,NULL), \
    125 	 *(memctxp))
    126 
    127 /* Destroy the context, or if key==NULL clean it up for reuse. */
    128 void
    129 slap_sl_mem_destroy(
    130 	void *key,
    131 	void *data
    132 )
    133 {
    134 	struct slab_heap *sh = data;
    135 	struct slab_object *so;
    136 	int i;
    137 
    138 	if (!sh)
    139 		return;
    140 
    141 	if (!sh->sh_stack) {
    142 		for (i = 0; i <= sh->sh_maxorder - order_start; i++) {
    143 			so = LDAP_LIST_FIRST(&sh->sh_free[i]);
    144 			while (so) {
    145 				struct slab_object *so_tmp = so;
    146 				so = LDAP_LIST_NEXT(so, so_link);
    147 				LDAP_LIST_INSERT_HEAD(&sh->sh_sopool, so_tmp, so_link);
    148 			}
    149 			ch_free(sh->sh_map[i]);
    150 		}
    151 		ch_free(sh->sh_free);
    152 		ch_free(sh->sh_map);
    153 
    154 		so = LDAP_LIST_FIRST(&sh->sh_sopool);
    155 		while (so) {
    156 			struct slab_object *so_tmp = so;
    157 			so = LDAP_LIST_NEXT(so, so_link);
    158 			if (!so_tmp->so_blockhead) {
    159 				LDAP_LIST_REMOVE(so_tmp, so_link);
    160 			}
    161 		}
    162 		so = LDAP_LIST_FIRST(&sh->sh_sopool);
    163 		while (so) {
    164 			struct slab_object *so_tmp = so;
    165 			so = LDAP_LIST_NEXT(so, so_link);
    166 			ch_free(so_tmp);
    167 		}
    168 	}
    169 
    170 	if (key != NULL) {
    171 		ber_memfree_x(sh->sh_base, NULL);
    172 		ber_memfree_x(sh, NULL);
    173 	}
    174 }
    175 
    176 BerMemoryFunctions slap_sl_mfuncs =
    177 	{ slap_sl_malloc, slap_sl_calloc, slap_sl_realloc, slap_sl_free };
    178 
    179 void
    180 slap_sl_mem_init()
    181 {
    182 	assert( Align == 1 << Align_log2 );
    183 
    184 	ber_set_option( NULL, LBER_OPT_MEMORY_FNS, &slap_sl_mfuncs );
    185 }
    186 
    187 /* Create, reset or just return the memory context of the current thread. */
    188 void *
    189 slap_sl_mem_create(
    190 	ber_len_t size,
    191 	int stack,
    192 	void *thrctx,
    193 	int new
    194 )
    195 {
    196 	void *memctx;
    197 	struct slab_heap *sh;
    198 	ber_len_t size_shift;
    199 	struct slab_object *so;
    200 	char *base, *newptr;
    201 	enum { Base_offset = (unsigned) -sizeof(ber_len_t) % Align };
    202 
    203 	sh = GET_MEMCTX(thrctx, &memctx);
    204 	if ( sh && !new )
    205 		return sh;
    206 
    207 	/* Round up to doubleword boundary, then make room for initial
    208 	 * padding, preserving expected available size for pool version */
    209 	size = ((size + Align-1) & -Align) + Base_offset;
    210 
    211 	if (!sh) {
    212 		sh = ch_malloc(sizeof(struct slab_heap));
    213 		base = ch_malloc(size);
    214 		SET_MEMCTX(thrctx, sh, slap_sl_mem_destroy);
    215 		VGMEMP_MARK(base, size);
    216 		VGMEMP_CREATE(sh, 0, 0);
    217 	} else {
    218 		slap_sl_mem_destroy(NULL, sh);
    219 		base = sh->sh_base;
    220 		if (size > (ber_len_t) ((char *) sh->sh_end - base)) {
    221 			newptr = ch_realloc(base, size);
    222 			if ( newptr == NULL ) return NULL;
    223 			VGMEMP_CHANGE(sh, base, newptr, size);
    224 			base = newptr;
    225 		}
    226 		VGMEMP_TRIM(sh, base, 0);
    227 	}
    228 	sh->sh_base = base;
    229 	sh->sh_end = base + size;
    230 
    231 	/* Align (base + head of first block) == first returned block */
    232 	base += Base_offset;
    233 	size -= Base_offset;
    234 
    235 	sh->sh_stack = stack;
    236 	if (stack) {
    237 		sh->sh_last = base;
    238 
    239 	} else {
    240 		int i, order = -1, order_end = -1;
    241 
    242 		size_shift = size - 1;
    243 		do {
    244 			order_end++;
    245 		} while (size_shift >>= 1);
    246 		order = order_end - order_start + 1;
    247 		sh->sh_maxorder = order_end;
    248 
    249 		sh->sh_free = (struct sh_freelist *)
    250 						ch_malloc(order * sizeof(struct sh_freelist));
    251 		for (i = 0; i < order; i++) {
    252 			LDAP_LIST_INIT(&sh->sh_free[i]);
    253 		}
    254 
    255 		LDAP_LIST_INIT(&sh->sh_sopool);
    256 
    257 		if (LDAP_LIST_EMPTY(&sh->sh_sopool)) {
    258 			slap_replenish_sopool(sh);
    259 		}
    260 		so = LDAP_LIST_FIRST(&sh->sh_sopool);
    261 		LDAP_LIST_REMOVE(so, so_link);
    262 		so->so_ptr = base;
    263 
    264 		LDAP_LIST_INSERT_HEAD(&sh->sh_free[order-1], so, so_link);
    265 
    266 		sh->sh_map = (unsigned char **)
    267 					ch_malloc(order * sizeof(unsigned char *));
    268 		for (i = 0; i < order; i++) {
    269 			int shiftamt = order_start + 1 + i;
    270 			int nummaps = size >> shiftamt;
    271 			assert(nummaps);
    272 			nummaps >>= 3;
    273 			if (!nummaps) nummaps = 1;
    274 			sh->sh_map[i] = (unsigned char *) ch_malloc(nummaps);
    275 			memset(sh->sh_map[i], 0, nummaps);
    276 		}
    277 	}
    278 
    279 	return sh;
    280 }
    281 
    282 /*
    283  * Assign memory context to thread context. Use NULL to detach
    284  * current memory context from thread. Future users must
    285  * know the context, since ch_free/slap_sl_context() cannot find it.
    286  */
    287 void
    288 slap_sl_mem_setctx(
    289 	void *thrctx,
    290 	void *memctx
    291 )
    292 {
    293 	SET_MEMCTX(thrctx, memctx, slap_sl_mem_destroy);
    294 }
    295 
    296 void *
    297 slap_sl_malloc(
    298     ber_len_t	size,
    299     void *ctx
    300 )
    301 {
    302 	struct slab_heap *sh = ctx;
    303 	ber_len_t *ptr, *newptr;
    304 
    305 	/* ber_set_option calls us like this */
    306 	if (No_sl_malloc || !ctx) {
    307 		newptr = ber_memalloc_x( size, NULL );
    308 		if ( newptr ) return newptr;
    309 		Debug(LDAP_DEBUG_ANY, "slap_sl_malloc of %lu bytes failed\n",
    310 			(unsigned long) size );
    311 		assert( 0 );
    312 		exit( EXIT_FAILURE );
    313 	}
    314 
    315 	/* Add room for head, ensure room for tail when freed, and
    316 	 * round up to doubleword boundary. */
    317 	size = (size + sizeof(ber_len_t) + Align-1 + !size) & -Align;
    318 
    319 	if (sh->sh_stack) {
    320 		if (size < (ber_len_t) ((char *) sh->sh_end - (char *) sh->sh_last)) {
    321 			newptr = sh->sh_last;
    322 			sh->sh_last = (char *) sh->sh_last + size;
    323 			VGMEMP_ALLOC(sh, newptr, size);
    324 			*newptr++ = size;
    325 			return( (void *)newptr );
    326 		}
    327 
    328 		size -= sizeof(ber_len_t);
    329 
    330 	} else {
    331 		struct slab_object *so_new, *so_left, *so_right;
    332 		ber_len_t size_shift;
    333 		unsigned long diff;
    334 		int i, j, order = -1;
    335 
    336 		size_shift = size - 1;
    337 		do {
    338 			order++;
    339 		} while (size_shift >>= 1);
    340 
    341 		size -= sizeof(ber_len_t);
    342 
    343 		for (i = order; i <= sh->sh_maxorder &&
    344 				LDAP_LIST_EMPTY(&sh->sh_free[i-order_start]); i++);
    345 
    346 		if (i == order) {
    347 			so_new = LDAP_LIST_FIRST(&sh->sh_free[i-order_start]);
    348 			LDAP_LIST_REMOVE(so_new, so_link);
    349 			ptr = so_new->so_ptr;
    350 			diff = (unsigned long)((char*)ptr -
    351 					(char*)sh->sh_base) >> (order + 1);
    352 			sh->sh_map[order-order_start][diff>>3] |= (1 << (diff & 0x7));
    353 			*ptr++ = size;
    354 			LDAP_LIST_INSERT_HEAD(&sh->sh_sopool, so_new, so_link);
    355 			return((void*)ptr);
    356 		} else if (i <= sh->sh_maxorder) {
    357 			for (j = i; j > order; j--) {
    358 				so_left = LDAP_LIST_FIRST(&sh->sh_free[j-order_start]);
    359 				LDAP_LIST_REMOVE(so_left, so_link);
    360 				if (LDAP_LIST_EMPTY(&sh->sh_sopool)) {
    361 					slap_replenish_sopool(sh);
    362 				}
    363 				so_right = LDAP_LIST_FIRST(&sh->sh_sopool);
    364 				LDAP_LIST_REMOVE(so_right, so_link);
    365 				so_right->so_ptr = (void *)((char *)so_left->so_ptr + (1 << j));
    366 				if (j == order + 1) {
    367 					ptr = so_left->so_ptr;
    368 					diff = (unsigned long)((char*)ptr -
    369 							(char*)sh->sh_base) >> (order+1);
    370 					sh->sh_map[order-order_start][diff>>3] |=
    371 							(1 << (diff & 0x7));
    372 					*ptr++ = size;
    373 					LDAP_LIST_INSERT_HEAD(
    374 							&sh->sh_free[j-1-order_start], so_right, so_link);
    375 					LDAP_LIST_INSERT_HEAD(&sh->sh_sopool, so_left, so_link);
    376 					return((void*)ptr);
    377 				} else {
    378 					LDAP_LIST_INSERT_HEAD(
    379 							&sh->sh_free[j-1-order_start], so_right, so_link);
    380 					LDAP_LIST_INSERT_HEAD(
    381 							&sh->sh_free[j-1-order_start], so_left, so_link);
    382 				}
    383 			}
    384 		}
    385 		/* FIXME: missing return; guessing we failed... */
    386 	}
    387 
    388 	Debug(LDAP_DEBUG_TRACE,
    389 		"sl_malloc %lu: ch_malloc\n",
    390 		(unsigned long) size );
    391 	return ch_malloc(size);
    392 }
    393 
    394 #define LIM_SQRT(t) /* some value < sqrt(max value of unsigned type t) */ \
    395 	((0UL|(t)-1) >>31>>31 > 1 ? ((t)1 <<32) - 1 : \
    396 	 (0UL|(t)-1) >>31 ? 65535U : (0UL|(t)-1) >>15 ? 255U : 15U)
    397 
    398 void *
    399 slap_sl_calloc( ber_len_t n, ber_len_t size, void *ctx )
    400 {
    401 	void *newptr;
    402 	ber_len_t total = n * size;
    403 
    404 	/* The sqrt test is a slight optimization: often avoids the division */
    405 	if ((n | size) <= LIM_SQRT(ber_len_t) || n == 0 || total/n == size) {
    406 		newptr = slap_sl_malloc( total, ctx );
    407 		memset( newptr, 0, n*size );
    408 	} else {
    409 		Debug(LDAP_DEBUG_ANY, "slap_sl_calloc(%lu,%lu) out of range\n",
    410 			(unsigned long) n, (unsigned long) size );
    411 		assert(0);
    412 		exit(EXIT_FAILURE);
    413 	}
    414 	return newptr;
    415 }
    416 
    417 void *
    418 slap_sl_realloc(void *ptr, ber_len_t size, void *ctx)
    419 {
    420 	struct slab_heap *sh = ctx;
    421 	ber_len_t oldsize, *p = (ber_len_t *) ptr, *nextp;
    422 	void *newptr;
    423 
    424 	if (ptr == NULL)
    425 		return slap_sl_malloc(size, ctx);
    426 
    427 	/* Not our memory? */
    428 	if (No_sl_malloc || !sh || ptr < sh->sh_base || ptr >= sh->sh_end) {
    429 		/* Like ch_realloc(), except not trying a new context */
    430 		newptr = ber_memrealloc_x(ptr, size, NULL);
    431 		if (newptr) {
    432 			return newptr;
    433 		}
    434 		Debug(LDAP_DEBUG_ANY, "slap_sl_realloc of %lu bytes failed\n",
    435 			(unsigned long) size );
    436 		assert(0);
    437 		exit( EXIT_FAILURE );
    438 	}
    439 
    440 	if (size == 0) {
    441 		slap_sl_free(ptr, ctx);
    442 		return NULL;
    443 	}
    444 
    445 	oldsize = p[-1];
    446 
    447 	if (sh->sh_stack) {
    448 		/* Add room for head, round up to doubleword boundary */
    449 		size = (size + sizeof(ber_len_t) + Align-1) & -Align;
    450 
    451 		p--;
    452 
    453 		/* Never shrink blocks */
    454 		if (size <= oldsize) {
    455 			return ptr;
    456 		}
    457 
    458 		oldsize &= -2;
    459 		nextp = (ber_len_t *) ((char *) p + oldsize);
    460 
    461 		/* If reallocing the last block, try to grow it */
    462 		if (nextp == sh->sh_last) {
    463 			if (size < (ber_len_t) ((char *) sh->sh_end - (char *) p)) {
    464 				sh->sh_last = (char *) p + size;
    465 				p[0] = (p[0] & 1) | size;
    466 				return ptr;
    467 			}
    468 
    469 		/* Nowhere to grow, need to alloc and copy */
    470 		} else {
    471 			/* Slight optimization of the final realloc variant */
    472 			newptr = slap_sl_malloc(size-sizeof(ber_len_t), ctx);
    473 			AC_MEMCPY(newptr, ptr, oldsize-sizeof(ber_len_t));
    474 			/* Not last block, can just mark old region as free */
    475 			nextp[-1] = oldsize;
    476 			nextp[0] |= 1;
    477 			return newptr;
    478 		}
    479 
    480 		size -= sizeof(ber_len_t);
    481 		oldsize -= sizeof(ber_len_t);
    482 
    483 	} else if (oldsize > size) {
    484 		oldsize = size;
    485 	}
    486 
    487 	newptr = slap_sl_malloc(size, ctx);
    488 	AC_MEMCPY(newptr, ptr, oldsize);
    489 	slap_sl_free(ptr, ctx);
    490 	return newptr;
    491 }
    492 
    493 void
    494 slap_sl_free(void *ptr, void *ctx)
    495 {
    496 	struct slab_heap *sh = ctx;
    497 	ber_len_t size;
    498 	ber_len_t *p = ptr, *nextp, *tmpp;
    499 
    500 	if (!ptr)
    501 		return;
    502 
    503 	if (No_sl_malloc || !sh || ptr < sh->sh_base || ptr >= sh->sh_end) {
    504 		ber_memfree_x(ptr, NULL);
    505 		return;
    506 	}
    507 
    508 	size = *(--p);
    509 
    510 	if (sh->sh_stack) {
    511 		size &= -2;
    512 		nextp = (ber_len_t *) ((char *) p + size);
    513 		if (sh->sh_last != nextp) {
    514 			/* Mark it free: tail = size, head of next block |= 1 */
    515 			nextp[-1] = size;
    516 			nextp[0] |= 1;
    517 			/* We can't tell Valgrind about it yet, because we
    518 			 * still need read/write access to this block for
    519 			 * when we eventually get to reclaim it.
    520 			 */
    521 		} else {
    522 			/* Reclaim freed block(s) off tail */
    523 			while (*p & 1) {
    524 				p = (ber_len_t *) ((char *) p - p[-1]);
    525 			}
    526 			sh->sh_last = p;
    527 			VGMEMP_TRIM(sh, sh->sh_base,
    528 				(char *) sh->sh_last - (char *) sh->sh_base);
    529 		}
    530 
    531 	} else {
    532 		int size_shift, order_size;
    533 		struct slab_object *so;
    534 		unsigned long diff;
    535 		int i, inserted = 0, order = -1;
    536 
    537 		size_shift = size + sizeof(ber_len_t) - 1;
    538 		do {
    539 			order++;
    540 		} while (size_shift >>= 1);
    541 
    542 		for (i = order, tmpp = p; i <= sh->sh_maxorder; i++) {
    543 			order_size = 1 << (i+1);
    544 			diff = (unsigned long)((char*)tmpp - (char*)sh->sh_base) >> (i+1);
    545 			sh->sh_map[i-order_start][diff>>3] &= (~(1 << (diff & 0x7)));
    546 			if (diff == ((diff>>1)<<1)) {
    547 				if (!(sh->sh_map[i-order_start][(diff+1)>>3] &
    548 						(1<<((diff+1)&0x7)))) {
    549 					so = LDAP_LIST_FIRST(&sh->sh_free[i-order_start]);
    550 					while (so) {
    551 						if ((char*)so->so_ptr == (char*)tmpp) {
    552 							LDAP_LIST_REMOVE( so, so_link );
    553 						} else if ((char*)so->so_ptr ==
    554 								(char*)tmpp + order_size) {
    555 							LDAP_LIST_REMOVE(so, so_link);
    556 							break;
    557 						}
    558 						so = LDAP_LIST_NEXT(so, so_link);
    559 					}
    560 					if (so) {
    561 						if (i < sh->sh_maxorder) {
    562 							inserted = 1;
    563 							so->so_ptr = tmpp;
    564 							LDAP_LIST_INSERT_HEAD(&sh->sh_free[i-order_start+1],
    565 									so, so_link);
    566 						}
    567 						continue;
    568 					} else {
    569 						if (LDAP_LIST_EMPTY(&sh->sh_sopool)) {
    570 							slap_replenish_sopool(sh);
    571 						}
    572 						so = LDAP_LIST_FIRST(&sh->sh_sopool);
    573 						LDAP_LIST_REMOVE(so, so_link);
    574 						so->so_ptr = tmpp;
    575 						LDAP_LIST_INSERT_HEAD(&sh->sh_free[i-order_start],
    576 								so, so_link);
    577 						break;
    578 
    579 						Debug(LDAP_DEBUG_TRACE, "slap_sl_free: "
    580 							"free object not found while bit is clear.\n" );
    581 						assert(so != NULL);
    582 
    583 					}
    584 				} else {
    585 					if (!inserted) {
    586 						if (LDAP_LIST_EMPTY(&sh->sh_sopool)) {
    587 							slap_replenish_sopool(sh);
    588 						}
    589 						so = LDAP_LIST_FIRST(&sh->sh_sopool);
    590 						LDAP_LIST_REMOVE(so, so_link);
    591 						so->so_ptr = tmpp;
    592 						LDAP_LIST_INSERT_HEAD(&sh->sh_free[i-order_start],
    593 								so, so_link);
    594 					}
    595 					break;
    596 				}
    597 			} else {
    598 				if (!(sh->sh_map[i-order_start][(diff-1)>>3] &
    599 						(1<<((diff-1)&0x7)))) {
    600 					so = LDAP_LIST_FIRST(&sh->sh_free[i-order_start]);
    601 					while (so) {
    602 						if ((char*)so->so_ptr == (char*)tmpp) {
    603 							LDAP_LIST_REMOVE(so, so_link);
    604 						} else if ((char*)tmpp == (char *)so->so_ptr + order_size) {
    605 							LDAP_LIST_REMOVE(so, so_link);
    606 							tmpp = so->so_ptr;
    607 							break;
    608 						}
    609 						so = LDAP_LIST_NEXT(so, so_link);
    610 					}
    611 					if (so) {
    612 						if (i < sh->sh_maxorder) {
    613 							inserted = 1;
    614 							LDAP_LIST_INSERT_HEAD(&sh->sh_free[i-order_start+1],									so, so_link);
    615 							continue;
    616 						}
    617 					} else {
    618 						if (LDAP_LIST_EMPTY(&sh->sh_sopool)) {
    619 							slap_replenish_sopool(sh);
    620 						}
    621 						so = LDAP_LIST_FIRST(&sh->sh_sopool);
    622 						LDAP_LIST_REMOVE(so, so_link);
    623 						so->so_ptr = tmpp;
    624 						LDAP_LIST_INSERT_HEAD(&sh->sh_free[i-order_start],
    625 								so, so_link);
    626 						break;
    627 
    628 						Debug(LDAP_DEBUG_TRACE, "slap_sl_free: "
    629 							"free object not found while bit is clear.\n" );
    630 						assert(so != NULL);
    631 
    632 					}
    633 				} else {
    634 					if ( !inserted ) {
    635 						if (LDAP_LIST_EMPTY(&sh->sh_sopool)) {
    636 							slap_replenish_sopool(sh);
    637 						}
    638 						so = LDAP_LIST_FIRST(&sh->sh_sopool);
    639 						LDAP_LIST_REMOVE(so, so_link);
    640 						so->so_ptr = tmpp;
    641 						LDAP_LIST_INSERT_HEAD(&sh->sh_free[i-order_start],
    642 								so, so_link);
    643 					}
    644 					break;
    645 				}
    646 			}
    647 		}
    648 	}
    649 }
    650 
    651 void
    652 slap_sl_release( void *ptr, void *ctx )
    653 {
    654 	struct slab_heap *sh = ctx;
    655 	if ( sh && ptr >= sh->sh_base && ptr <= sh->sh_end )
    656 		sh->sh_last = ptr;
    657 }
    658 
    659 void *
    660 slap_sl_mark( void *ctx )
    661 {
    662 	struct slab_heap *sh = ctx;
    663 	return sh->sh_last;
    664 }
    665 
    666 /*
    667  * Return the memory context of the current thread if the given block of
    668  * memory belongs to it, otherwise return NULL.
    669  */
    670 void *
    671 slap_sl_context( void *ptr )
    672 {
    673 	void *memctx;
    674 	struct slab_heap *sh;
    675 
    676 	if ( slapMode & SLAP_TOOL_MODE ) return NULL;
    677 
    678 	sh = GET_MEMCTX(ldap_pvt_thread_pool_context(), &memctx);
    679 	if (sh && ptr >= sh->sh_base && ptr <= sh->sh_end) {
    680 		return sh;
    681 	}
    682 	return NULL;
    683 }
    684 
    685 static struct slab_object *
    686 slap_replenish_sopool(
    687     struct slab_heap* sh
    688 )
    689 {
    690     struct slab_object *so_block;
    691     int i;
    692 
    693     so_block = (struct slab_object *)ch_malloc(
    694                     SLAP_SLAB_SOBLOCK * sizeof(struct slab_object));
    695 
    696     if ( so_block == NULL ) {
    697         return NULL;
    698     }
    699 
    700     so_block[0].so_blockhead = 1;
    701     LDAP_LIST_INSERT_HEAD(&sh->sh_sopool, &so_block[0], so_link);
    702     for (i = 1; i < SLAP_SLAB_SOBLOCK; i++) {
    703         so_block[i].so_blockhead = 0;
    704         LDAP_LIST_INSERT_HEAD(&sh->sh_sopool, &so_block[i], so_link );
    705     }
    706 
    707     return so_block;
    708 }
    709 
    710 #ifdef SLAPD_UNUSED
    711 static void
    712 print_slheap(int level, void *ctx)
    713 {
    714 	struct slab_heap *sh = ctx;
    715 	struct slab_object *so;
    716 	int i, j, once = 0;
    717 
    718 	if (!ctx) {
    719 		Debug(level, "NULL memctx\n" );
    720 		return;
    721 	}
    722 
    723 	Debug(level, "sh->sh_maxorder=%d\n", sh->sh_maxorder );
    724 
    725 	for (i = order_start; i <= sh->sh_maxorder; i++) {
    726 		once = 0;
    727 		Debug(level, "order=%d\n", i );
    728 		for (j = 0; j < (1<<(sh->sh_maxorder-i))/8; j++) {
    729 			Debug(level, "%02x ", sh->sh_map[i-order_start][j] );
    730 			once = 1;
    731 		}
    732 		if (!once) {
    733 			Debug(level, "%02x ", sh->sh_map[i-order_start][0] );
    734 		}
    735 		Debug(level, "\n" );
    736 		Debug(level, "free list:\n" );
    737 		so = LDAP_LIST_FIRST(&sh->sh_free[i-order_start]);
    738 		while (so) {
    739 			Debug(level, "%p\n", so->so_ptr );
    740 			so = LDAP_LIST_NEXT(so, so_link);
    741 		}
    742 	}
    743 }
    744 #endif
    745