mem.c revision 1.16 1 /* $NetBSD: mem.c,v 1.16 2024/09/22 00:14:08 christos Exp $ */
2
3 /*
4 * Copyright (C) Internet Systems Consortium, Inc. ("ISC")
5 *
6 * SPDX-License-Identifier: MPL-2.0
7 *
8 * This Source Code Form is subject to the terms of the Mozilla Public
9 * License, v. 2.0. If a copy of the MPL was not distributed with this
10 * file, you can obtain one at https://mozilla.org/MPL/2.0/.
11 *
12 * See the COPYRIGHT file distributed with this work for additional
13 * information regarding copyright ownership.
14 */
15
16 /*! \file */
17
18 #include <errno.h>
19 #include <inttypes.h>
20 #include <limits.h>
21 #include <stdbool.h>
22 #include <stddef.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25
26 #include <isc/align.h>
27 #include <isc/hash.h>
28 #include <isc/magic.h>
29 #include <isc/mem.h>
30 #include <isc/mutex.h>
31 #include <isc/once.h>
32 #include <isc/os.h>
33 #include <isc/print.h>
34 #include <isc/refcount.h>
35 #include <isc/string.h>
36 #include <isc/types.h>
37 #include <isc/util.h>
38
39 #ifdef HAVE_LIBXML2
40 #include <libxml/xmlwriter.h>
41 #define ISC_XMLCHAR (const xmlChar *)
42 #endif /* HAVE_LIBXML2 */
43
44 #ifdef HAVE_JSON_C
45 #include <json_object.h>
46 #endif /* HAVE_JSON_C */
47
48 /* On DragonFly BSD the header does not provide jemalloc API */
49 #if defined(HAVE_MALLOC_NP_H) && !defined(__DragonFly__)
50 #include <malloc_np.h>
51 #define JEMALLOC_API_SUPPORTED 1
52 #elif defined(HAVE_JEMALLOC)
53 #include <jemalloc/jemalloc.h>
54 #define JEMALLOC_API_SUPPORTED 1
55
56 #if JEMALLOC_VERSION_MAJOR < 4
57 #define sdallocx(ptr, size, flags) dallocx(ptr, flags)
58 #define MALLOCX_TCACHE_NONE (0)
59 #endif /* JEMALLOC_VERSION_MAJOR < 4 */
60
61 #else
62 #include "jemalloc_shim.h"
63 #endif
64
65 #include "mem_p.h"
66
67 #define MCTXLOCK(m) LOCK(&m->lock)
68 #define MCTXUNLOCK(m) UNLOCK(&m->lock)
69
70 #ifndef ISC_MEM_DEBUGGING
71 #define ISC_MEM_DEBUGGING 0
72 #endif /* ifndef ISC_MEM_DEBUGGING */
73 unsigned int isc_mem_debugging = ISC_MEM_DEBUGGING;
74 unsigned int isc_mem_defaultflags = ISC_MEMFLAG_DEFAULT;
75
76 #define ISC_MEM_ILLEGAL_ARENA (UINT_MAX)
77
78 volatile void *isc__mem_malloc = mallocx;
79
80 /*
81 * Constants.
82 */
83
84 #define ZERO_ALLOCATION_SIZE sizeof(void *)
85 #define ALIGNMENT 8U /*%< must be a power of 2 */
86 #define ALIGNMENT_SIZE sizeof(size_info)
87 #define DEBUG_TABLE_COUNT 512U
88 #define STATS_BUCKETS 512U
89 #define STATS_BUCKET_SIZE 32U
90
91 /*
92 * Types.
93 */
94 #if ISC_MEM_TRACKLINES
95 typedef struct debuglink debuglink_t;
96 struct debuglink {
97 ISC_LINK(debuglink_t) link;
98 const void *ptr;
99 size_t size;
100 const char *file;
101 unsigned int line;
102 };
103
104 typedef ISC_LIST(debuglink_t) debuglist_t;
105
106 #define FLARG_PASS , file, line
107 #define FLARG , const char *file, unsigned int line
108 #else /* if ISC_MEM_TRACKLINES */
109 #define FLARG_PASS
110 #define FLARG
111 #endif /* if ISC_MEM_TRACKLINES */
112
113 typedef struct element element;
114 struct element {
115 element *next;
116 };
117
118 struct stats {
119 atomic_size_t gets;
120 atomic_size_t totalgets;
121 };
122
123 #define MEM_MAGIC ISC_MAGIC('M', 'e', 'm', 'C')
124 #define VALID_CONTEXT(c) ISC_MAGIC_VALID(c, MEM_MAGIC)
125
126 /* List of all active memory contexts. */
127
128 static ISC_LIST(isc_mem_t) contexts;
129
130 static isc_once_t init_once = ISC_ONCE_INIT;
131 static isc_once_t shut_once = ISC_ONCE_INIT;
132 static isc_mutex_t contextslock;
133
134 /*%
135 * Total size of lost memory due to a bug of external library.
136 * Locked by the global lock.
137 */
138 static uint64_t totallost;
139
140 struct isc_mem {
141 unsigned int magic;
142 unsigned int flags;
143 unsigned int jemalloc_flags;
144 unsigned int jemalloc_arena;
145 isc_mutex_t lock;
146 bool checkfree;
147 struct stats stats[STATS_BUCKETS + 1];
148 isc_refcount_t references;
149 char name[16];
150 atomic_size_t total;
151 atomic_size_t inuse;
152 atomic_size_t maxinuse;
153 atomic_size_t malloced;
154 atomic_size_t maxmalloced;
155 atomic_bool hi_called;
156 atomic_bool is_overmem;
157 isc_mem_water_t water;
158 void *water_arg;
159 atomic_size_t hi_water;
160 atomic_size_t lo_water;
161 ISC_LIST(isc_mempool_t) pools;
162 unsigned int poolcnt;
163
164 #if ISC_MEM_TRACKLINES
165 debuglist_t *debuglist;
166 size_t debuglistcnt;
167 #endif /* if ISC_MEM_TRACKLINES */
168
169 ISC_LINK(isc_mem_t) link;
170 };
171
172 #define MEMPOOL_MAGIC ISC_MAGIC('M', 'E', 'M', 'p')
173 #define VALID_MEMPOOL(c) ISC_MAGIC_VALID(c, MEMPOOL_MAGIC)
174
175 struct isc_mempool {
176 /* always unlocked */
177 unsigned int magic;
178 isc_mem_t *mctx; /*%< our memory context */
179 ISC_LINK(isc_mempool_t) link; /*%< next pool in this mem context */
180 element *items; /*%< low water item list */
181 size_t size; /*%< size of each item on this pool */
182 size_t allocated; /*%< # of items currently given out */
183 size_t freecount; /*%< # of items on reserved list */
184 size_t freemax; /*%< # of items allowed on free list */
185 size_t fillcount; /*%< # of items to fetch on each fill */
186 /*%< Stats only. */
187 size_t gets; /*%< # of requests to this pool */
188 /*%< Debugging only. */
189 char name[16]; /*%< printed name in stats reports */
190 };
191
192 /*
193 * Private Inline-able.
194 */
195
196 #if !ISC_MEM_TRACKLINES
197 #define ADD_TRACE(a, b, c, d, e)
198 #define DELETE_TRACE(a, b, c, d, e)
199 #define ISC_MEMFUNC_SCOPE
200 #else /* if !ISC_MEM_TRACKLINES */
201 #define TRACE_OR_RECORD (ISC_MEM_DEBUGTRACE | ISC_MEM_DEBUGRECORD)
202
203 #define SHOULD_TRACE_OR_RECORD(ptr) \
204 ((isc_mem_debugging & TRACE_OR_RECORD) != 0 && ptr != NULL)
205
206 #define ADD_TRACE(a, b, c, d, e) \
207 if (SHOULD_TRACE_OR_RECORD(b)) { \
208 add_trace_entry(a, b, c, d, e); \
209 }
210
211 #define DELETE_TRACE(a, b, c, d, e) \
212 if (SHOULD_TRACE_OR_RECORD(b)) { \
213 delete_trace_entry(a, b, c, d, e); \
214 }
215
216 static void
217 print_active(isc_mem_t *ctx, FILE *out);
218 #endif /* ISC_MEM_TRACKLINES */
219
220 static size_t
221 increment_malloced(isc_mem_t *ctx, size_t size) {
222 size_t malloced = atomic_fetch_add_relaxed(&ctx->malloced, size) + size;
223 size_t maxmalloced = atomic_load_relaxed(&ctx->maxmalloced);
224
225 if (malloced > maxmalloced) {
226 atomic_compare_exchange_strong(&ctx->maxmalloced, &maxmalloced,
227 malloced);
228 }
229
230 return (malloced);
231 }
232
233 static size_t
234 decrement_malloced(isc_mem_t *ctx, size_t size) {
235 size_t malloced = atomic_fetch_sub_relaxed(&ctx->malloced, size) - size;
236
237 return (malloced);
238 }
239
240 #if ISC_MEM_TRACKLINES
241 /*!
242 * mctx must not be locked.
243 */
244 static void
245 add_trace_entry(isc_mem_t *mctx, const void *ptr, size_t size FLARG) {
246 debuglink_t *dl = NULL;
247 uint32_t hash;
248 uint32_t idx;
249
250 MCTXLOCK(mctx);
251
252 if ((isc_mem_debugging & ISC_MEM_DEBUGTRACE) != 0) {
253 fprintf(stderr, "add %p size %zu file %s line %u mctx %p\n",
254 ptr, size, file, line, mctx);
255 }
256
257 if (mctx->debuglist == NULL) {
258 goto unlock;
259 }
260
261 #ifdef __COVERITY__
262 /*
263 * Use simple conversion from pointer to hash to avoid
264 * tainting 'ptr' due to byte swap in isc_hash_function.
265 */
266 hash = (uintptr_t)ptr >> 3;
267 #else
268 hash = isc_hash_function(&ptr, sizeof(ptr), true);
269 #endif
270 idx = hash % DEBUG_TABLE_COUNT;
271
272 dl = mallocx(sizeof(*dl), mctx->jemalloc_flags);
273 INSIST(dl != NULL);
274 increment_malloced(mctx, sizeof(*dl));
275
276 ISC_LINK_INIT(dl, link);
277 dl->ptr = ptr;
278 dl->size = size;
279 dl->file = file;
280 dl->line = line;
281
282 ISC_LIST_PREPEND(mctx->debuglist[idx], dl, link);
283 mctx->debuglistcnt++;
284 unlock:
285 MCTXUNLOCK(mctx);
286 }
287
288 static void
289 delete_trace_entry(isc_mem_t *mctx, const void *ptr, size_t size,
290 const char *file, unsigned int line) {
291 debuglink_t *dl = NULL;
292 uint32_t hash;
293 uint32_t idx;
294
295 MCTXLOCK(mctx);
296
297 if ((isc_mem_debugging & ISC_MEM_DEBUGTRACE) != 0) {
298 fprintf(stderr, "del %p size %zu file %s line %u mctx %p\n",
299 ptr, size, file, line, mctx);
300 }
301
302 if (mctx->debuglist == NULL) {
303 goto unlock;
304 }
305
306 #ifdef __COVERITY__
307 /*
308 * Use simple conversion from pointer to hash to avoid
309 * tainting 'ptr' due to byte swap in isc_hash_function.
310 */
311 hash = (uintptr_t)ptr >> 3;
312 #else
313 hash = isc_hash_function(&ptr, sizeof(ptr), true);
314 #endif
315 idx = hash % DEBUG_TABLE_COUNT;
316
317 dl = ISC_LIST_HEAD(mctx->debuglist[idx]);
318 while (dl != NULL) {
319 if (dl->ptr == ptr) {
320 ISC_LIST_UNLINK(mctx->debuglist[idx], dl, link);
321 decrement_malloced(mctx, sizeof(*dl));
322 sdallocx(dl, sizeof(*dl), mctx->jemalloc_flags);
323 goto unlock;
324 }
325 dl = ISC_LIST_NEXT(dl, link);
326 }
327
328 /*
329 * If we get here, we didn't find the item on the list. We're
330 * screwed.
331 */
332 UNREACHABLE();
333 unlock:
334 MCTXUNLOCK(mctx);
335 }
336 #endif /* ISC_MEM_TRACKLINES */
337
338 #define ADJUST_ZERO_ALLOCATION_SIZE(s) \
339 if (s == 0) { \
340 s = ZERO_ALLOCATION_SIZE; \
341 }
342
343 #define MEM_ALIGN(a) ((a) ? MALLOCX_ALIGN(a) : 0)
344
345 /*!
346 * Perform a malloc, doing memory filling and overrun detection as necessary.
347 */
348 static void *
349 mem_get(isc_mem_t *ctx, size_t size, int flags) {
350 char *ret = NULL;
351
352 ADJUST_ZERO_ALLOCATION_SIZE(size);
353
354 ret = mallocx(size, flags | ctx->jemalloc_flags);
355 INSIST(ret != NULL);
356
357 if ((ctx->flags & ISC_MEMFLAG_FILL) != 0) {
358 memset(ret, 0xbe, size); /* Mnemonic for "beef". */
359 }
360
361 return (ret);
362 }
363
364 /*!
365 * Perform a free, doing memory filling and overrun detection as necessary.
366 */
367 /* coverity[+free : arg-1] */
368 static void
369 mem_put(isc_mem_t *ctx, void *mem, size_t size, int flags) {
370 ADJUST_ZERO_ALLOCATION_SIZE(size);
371
372 if ((ctx->flags & ISC_MEMFLAG_FILL) != 0) {
373 memset(mem, 0xde, size); /* Mnemonic for "dead". */
374 }
375 sdallocx(mem, size, flags | ctx->jemalloc_flags);
376 }
377
378 static void *
379 mem_realloc(isc_mem_t *ctx, void *old_ptr, size_t old_size, size_t new_size,
380 int flags) {
381 void *new_ptr = NULL;
382
383 ADJUST_ZERO_ALLOCATION_SIZE(new_size);
384
385 new_ptr = rallocx(old_ptr, new_size, flags | ctx->jemalloc_flags);
386 INSIST(new_ptr != NULL);
387
388 if ((ctx->flags & ISC_MEMFLAG_FILL) != 0) {
389 ssize_t diff_size = new_size - old_size;
390 void *diff_ptr = (uint8_t *)new_ptr + old_size;
391 if (diff_size > 0) {
392 /* Mnemonic for "beef". */
393 memset(diff_ptr, 0xbe, diff_size);
394 }
395 }
396
397 return (new_ptr);
398 }
399
400 #define stats_bucket(ctx, size) \
401 ((size / STATS_BUCKET_SIZE) >= STATS_BUCKETS \
402 ? &ctx->stats[STATS_BUCKETS] \
403 : &ctx->stats[size / STATS_BUCKET_SIZE])
404
405 /*!
406 * Update internal counters after a memory get.
407 */
408 static void
409 mem_getstats(isc_mem_t *ctx, size_t size) {
410 struct stats *stats = stats_bucket(ctx, size);
411
412 atomic_fetch_add_relaxed(&ctx->total, size);
413 atomic_fetch_add_release(&ctx->inuse, size);
414
415 atomic_fetch_add_relaxed(&stats->gets, 1);
416 atomic_fetch_add_relaxed(&stats->totalgets, 1);
417
418 increment_malloced(ctx, size);
419 }
420
421 /*!
422 * Update internal counters after a memory put.
423 */
424 static void
425 mem_putstats(isc_mem_t *ctx, void *ptr, size_t size) {
426 struct stats *stats = stats_bucket(ctx, size);
427 atomic_size_t s, g;
428
429 UNUSED(ptr);
430
431 s = atomic_fetch_sub_release(&ctx->inuse, size);
432 INSIST(s >= size);
433
434 g = atomic_fetch_sub_release(&stats->gets, 1);
435 INSIST(g >= 1);
436
437 decrement_malloced(ctx, size);
438 }
439
440 /*
441 * Private.
442 */
443
444 static bool
445 mem_jemalloc_arena_create(unsigned int *pnew_arenano) {
446 REQUIRE(pnew_arenano != NULL);
447
448 #if defined(JEMALLOC_API_SUPPORTED) && JEMALLOC_VERSION_MAJOR >= 4
449 unsigned int arenano = 0;
450 size_t len = sizeof(arenano);
451 int res = 0;
452
453 res = mallctl("arenas.create", &arenano, &len, NULL, 0);
454 if (res != 0) {
455 return (false);
456 }
457
458 *pnew_arenano = arenano;
459
460 return (true);
461 #else
462 *pnew_arenano = ISC_MEM_ILLEGAL_ARENA;
463 return (true);
464 #endif /* defined(JEMALLOC_API_SUPPORTED) && JEMALLOC_VERSION_MAJOR >= 4 */
465 }
466
467 static bool
468 mem_jemalloc_arena_destroy(unsigned int arenano) {
469 #if defined(JEMALLOC_API_SUPPORTED) && JEMALLOC_VERSION_MAJOR >= 4
470 int res = 0;
471 char buf[256] = { 0 };
472
473 (void)snprintf(buf, sizeof(buf), "arena.%u.destroy", arenano);
474 res = mallctl(buf, NULL, NULL, NULL, 0);
475 if (res != 0) {
476 return (false);
477 }
478
479 return (true);
480 #else
481 UNUSED(arenano);
482 return (true);
483 #endif /* defined(JEMALLOC_API_SUPPORTED) && JEMALLOC_VERSION_MAJOR >= 4 */
484 }
485
486 static void
487 mem_initialize(void) {
488 isc_mutex_init(&contextslock);
489 ISC_LIST_INIT(contexts);
490 totallost = 0;
491 }
492
493 void
494 isc__mem_initialize(void) {
495 RUNTIME_CHECK(isc_once_do(&init_once, mem_initialize) == ISC_R_SUCCESS);
496 }
497
498 static void
499 mem_shutdown(void) {
500 isc__mem_checkdestroyed();
501
502 isc_mutex_destroy(&contextslock);
503 }
504
505 void
506 isc__mem_shutdown(void) {
507 RUNTIME_CHECK(isc_once_do(&shut_once, mem_shutdown) == ISC_R_SUCCESS);
508 }
509
510 static void
511 mem_create(isc_mem_t **ctxp, unsigned int flags, unsigned int jemalloc_flags) {
512 isc_mem_t *ctx = NULL;
513
514 REQUIRE(ctxp != NULL && *ctxp == NULL);
515
516 ctx = mallocx(sizeof(*ctx),
517 MALLOCX_ALIGN(isc_os_cacheline()) | jemalloc_flags);
518 INSIST(ctx != NULL);
519
520 *ctx = (isc_mem_t){
521 .magic = MEM_MAGIC,
522 .flags = flags,
523 .jemalloc_flags = jemalloc_flags,
524 .jemalloc_arena = ISC_MEM_ILLEGAL_ARENA,
525 .checkfree = true,
526 };
527
528 isc_mutex_init(&ctx->lock);
529 isc_refcount_init(&ctx->references, 1);
530
531 atomic_init(&ctx->total, 0);
532 atomic_init(&ctx->inuse, 0);
533 atomic_init(&ctx->maxinuse, 0);
534 atomic_init(&ctx->malloced, sizeof(*ctx));
535 atomic_init(&ctx->maxmalloced, sizeof(*ctx));
536 atomic_init(&ctx->hi_water, 0);
537 atomic_init(&ctx->lo_water, 0);
538 atomic_init(&ctx->hi_called, false);
539 atomic_init(&ctx->is_overmem, false);
540
541 for (size_t i = 0; i < STATS_BUCKETS + 1; i++) {
542 atomic_init(&ctx->stats[i].gets, 0);
543 atomic_init(&ctx->stats[i].totalgets, 0);
544 }
545 ISC_LIST_INIT(ctx->pools);
546
547 #if ISC_MEM_TRACKLINES
548 if ((isc_mem_debugging & ISC_MEM_DEBUGRECORD) != 0) {
549 unsigned int i;
550
551 ctx->debuglist =
552 mallocx((DEBUG_TABLE_COUNT * sizeof(debuglist_t)),
553 ctx->jemalloc_flags);
554 INSIST(ctx->debuglist != NULL);
555
556 for (i = 0; i < DEBUG_TABLE_COUNT; i++) {
557 ISC_LIST_INIT(ctx->debuglist[i]);
558 }
559 increment_malloced(ctx,
560 DEBUG_TABLE_COUNT * sizeof(debuglist_t));
561 }
562 #endif /* if ISC_MEM_TRACKLINES */
563
564 LOCK(&contextslock);
565 ISC_LIST_INITANDAPPEND(contexts, ctx, link);
566 UNLOCK(&contextslock);
567
568 *ctxp = ctx;
569 }
570
571 /*
572 * Public.
573 */
574
575 static void
576 destroy(isc_mem_t *ctx) {
577 unsigned int i;
578 size_t malloced;
579 unsigned int arena_no;
580
581 LOCK(&contextslock);
582 ISC_LIST_UNLINK(contexts, ctx, link);
583 totallost += isc_mem_inuse(ctx);
584 UNLOCK(&contextslock);
585
586 ctx->magic = 0;
587
588 arena_no = ctx->jemalloc_arena;
589
590 INSIST(ISC_LIST_EMPTY(ctx->pools));
591
592 #if ISC_MEM_TRACKLINES
593 if (ctx->debuglist != NULL) {
594 debuglink_t *dl;
595 for (i = 0; i < DEBUG_TABLE_COUNT; i++) {
596 for (dl = ISC_LIST_HEAD(ctx->debuglist[i]); dl != NULL;
597 dl = ISC_LIST_HEAD(ctx->debuglist[i]))
598 {
599 if (ctx->checkfree && dl->ptr != NULL) {
600 print_active(ctx, stderr);
601 }
602 INSIST(!ctx->checkfree || dl->ptr == NULL);
603
604 ISC_LIST_UNLINK(ctx->debuglist[i], dl, link);
605 sdallocx(dl, sizeof(*dl), ctx->jemalloc_flags);
606 decrement_malloced(ctx, sizeof(*dl));
607 }
608 }
609
610 sdallocx(ctx->debuglist,
611 (DEBUG_TABLE_COUNT * sizeof(debuglist_t)),
612 ctx->jemalloc_flags);
613 decrement_malloced(ctx,
614 DEBUG_TABLE_COUNT * sizeof(debuglist_t));
615 }
616 #endif /* if ISC_MEM_TRACKLINES */
617
618 if (ctx->checkfree) {
619 for (i = 0; i <= STATS_BUCKETS; i++) {
620 struct stats *stats = &ctx->stats[i];
621 size_t gets = atomic_load_acquire(&stats->gets);
622 if (gets != 0U) {
623 fprintf(stderr,
624 "Failing assertion due to probable "
625 "leaked memory in context %p (\"%s\") "
626 "(stats[%u].gets == %zu).\n",
627 ctx, ctx->name, i, gets);
628 #if ISC_MEM_TRACKLINES
629 print_active(ctx, stderr);
630 #endif /* if ISC_MEM_TRACKLINES */
631 INSIST(gets == 0U);
632 }
633 }
634 }
635
636 isc_mutex_destroy(&ctx->lock);
637
638 malloced = decrement_malloced(ctx, sizeof(*ctx));
639
640 if (ctx->checkfree) {
641 INSIST(malloced == 0);
642 }
643 sdallocx(ctx, sizeof(*ctx),
644 MALLOCX_ALIGN(isc_os_cacheline()) | ctx->jemalloc_flags);
645
646 if (arena_no != ISC_MEM_ILLEGAL_ARENA) {
647 RUNTIME_CHECK(mem_jemalloc_arena_destroy(arena_no) == true);
648 }
649 }
650
651 void
652 isc_mem_attach(isc_mem_t *source, isc_mem_t **targetp) {
653 REQUIRE(VALID_CONTEXT(source));
654 REQUIRE(targetp != NULL && *targetp == NULL);
655
656 isc_refcount_increment(&source->references);
657
658 *targetp = source;
659 }
660
661 void
662 isc__mem_detach(isc_mem_t **ctxp FLARG) {
663 isc_mem_t *ctx = NULL;
664
665 REQUIRE(ctxp != NULL && VALID_CONTEXT(*ctxp));
666
667 ctx = *ctxp;
668 *ctxp = NULL;
669
670 if (isc_refcount_decrement(&ctx->references) == 1) {
671 isc_refcount_destroy(&ctx->references);
672 #if ISC_MEM_TRACKLINES
673 if ((isc_mem_debugging & ISC_MEM_DEBUGTRACE) != 0) {
674 fprintf(stderr, "destroy mctx %p file %s line %u\n",
675 ctx, file, line);
676 }
677 #endif
678 destroy(ctx);
679 }
680 }
681
682 /*
683 * isc_mem_putanddetach() is the equivalent of:
684 *
685 * mctx = NULL;
686 * isc_mem_attach(ptr->mctx, &mctx);
687 * isc_mem_detach(&ptr->mctx);
688 * isc_mem_put(mctx, ptr, sizeof(*ptr);
689 * isc_mem_detach(&mctx);
690 */
691
692 void
693 isc__mem_putanddetach(isc_mem_t **ctxp, void *ptr, size_t size,
694 size_t alignment FLARG) {
695 isc_mem_t *ctx = NULL;
696
697 REQUIRE(ctxp != NULL && VALID_CONTEXT(*ctxp));
698 REQUIRE(ptr != NULL);
699 REQUIRE(size != 0);
700
701 ctx = *ctxp;
702 *ctxp = NULL;
703
704 DELETE_TRACE(ctx, ptr, size, file, line);
705
706 mem_putstats(ctx, ptr, size);
707 mem_put(ctx, ptr, size, MEM_ALIGN(alignment));
708
709 if (isc_refcount_decrement(&ctx->references) == 1) {
710 isc_refcount_destroy(&ctx->references);
711 destroy(ctx);
712 }
713 }
714
715 void
716 isc__mem_destroy(isc_mem_t **ctxp FLARG) {
717 isc_mem_t *ctx = NULL;
718
719 /*
720 * This routine provides legacy support for callers who use mctxs
721 * without attaching/detaching.
722 */
723
724 REQUIRE(ctxp != NULL && VALID_CONTEXT(*ctxp));
725
726 ctx = *ctxp;
727 *ctxp = NULL;
728
729 #if ISC_MEM_TRACKLINES
730 if ((isc_mem_debugging & ISC_MEM_DEBUGTRACE) != 0) {
731 fprintf(stderr, "destroy mctx %p file %s line %u\n", ctx, file,
732 line);
733 }
734
735 if (isc_refcount_decrement(&ctx->references) > 1) {
736 print_active(ctx, stderr);
737 }
738 #else /* if ISC_MEM_TRACKLINES */
739 isc_refcount_decrementz(&ctx->references);
740 #endif /* if ISC_MEM_TRACKLINES */
741 isc_refcount_destroy(&ctx->references);
742 destroy(ctx);
743
744 *ctxp = NULL;
745 }
746
747 #define CALL_HI_WATER(ctx) \
748 { \
749 if (ctx->water != NULL && hi_water(ctx)) { \
750 (ctx->water)(ctx->water_arg, ISC_MEM_HIWATER); \
751 } \
752 }
753
754 #define CALL_LO_WATER(ctx) \
755 { \
756 if ((ctx->water != NULL) && lo_water(ctx)) { \
757 (ctx->water)(ctx->water_arg, ISC_MEM_LOWATER); \
758 } \
759 }
760
761 static bool
762 hi_water(isc_mem_t *ctx) {
763 size_t inuse;
764 size_t maxinuse;
765 size_t hiwater = atomic_load_relaxed(&ctx->hi_water);
766
767 if (hiwater == 0) {
768 return (false);
769 }
770
771 inuse = atomic_load_acquire(&ctx->inuse);
772 if (inuse <= hiwater) {
773 return (false);
774 }
775
776 maxinuse = atomic_load_acquire(&ctx->maxinuse);
777 if (inuse > maxinuse) {
778 (void)atomic_compare_exchange_strong(&ctx->maxinuse, &maxinuse,
779 inuse);
780
781 if ((isc_mem_debugging & ISC_MEM_DEBUGUSAGE) != 0) {
782 fprintf(stderr, "maxinuse = %lu\n",
783 (unsigned long)inuse);
784 }
785 }
786
787 if (atomic_load_acquire(&ctx->hi_called)) {
788 return (false);
789 }
790
791 /* We are over water (for the first time) */
792 atomic_store_release(&ctx->is_overmem, true);
793
794 return (true);
795 }
796
797 static bool
798 lo_water(isc_mem_t *ctx) {
799 size_t inuse;
800 size_t lowater = atomic_load_relaxed(&ctx->lo_water);
801
802 if (lowater == 0) {
803 return (false);
804 }
805
806 inuse = atomic_load_acquire(&ctx->inuse);
807 if (inuse >= lowater) {
808 return (false);
809 }
810
811 if (!atomic_load_acquire(&ctx->hi_called)) {
812 return (false);
813 }
814
815 /* We are no longer overmem */
816 atomic_store_release(&ctx->is_overmem, false);
817
818 return (true);
819 }
820
821 void *
822 isc__mem_get(isc_mem_t *ctx, size_t size, size_t alignment FLARG) {
823 void *ptr = NULL;
824
825 REQUIRE(VALID_CONTEXT(ctx));
826
827 ptr = mem_get(ctx, size, MEM_ALIGN(alignment));
828
829 mem_getstats(ctx, size);
830 ADD_TRACE(ctx, ptr, size, file, line);
831
832 CALL_HI_WATER(ctx);
833
834 return (ptr);
835 }
836
837 void
838 isc__mem_put(isc_mem_t *ctx, void *ptr, size_t size, size_t alignment FLARG) {
839 REQUIRE(VALID_CONTEXT(ctx));
840
841 DELETE_TRACE(ctx, ptr, size, file, line);
842
843 mem_putstats(ctx, ptr, size);
844 mem_put(ctx, ptr, size, MEM_ALIGN(alignment));
845
846 CALL_LO_WATER(ctx);
847 }
848
849 void
850 isc_mem_waterack(isc_mem_t *ctx, int flag) {
851 REQUIRE(VALID_CONTEXT(ctx));
852
853 if (flag == ISC_MEM_LOWATER) {
854 atomic_store(&ctx->hi_called, false);
855 } else if (flag == ISC_MEM_HIWATER) {
856 atomic_store(&ctx->hi_called, true);
857 }
858 }
859
860 #if ISC_MEM_TRACKLINES
861 static void
862 print_active(isc_mem_t *mctx, FILE *out) {
863 if (mctx->debuglist != NULL) {
864 debuglink_t *dl;
865 unsigned int i;
866 bool found;
867
868 fprintf(out, "Dump of all outstanding memory "
869 "allocations:\n");
870 found = false;
871 for (i = 0; i < DEBUG_TABLE_COUNT; i++) {
872 dl = ISC_LIST_HEAD(mctx->debuglist[i]);
873
874 if (dl != NULL) {
875 found = true;
876 }
877
878 while (dl != NULL) {
879 if (dl->ptr != NULL) {
880 fprintf(out,
881 "\tptr %p size %zu "
882 "file %s "
883 "line %u\n",
884 dl->ptr, dl->size, dl->file,
885 dl->line);
886 }
887 dl = ISC_LIST_NEXT(dl, link);
888 }
889 }
890
891 if (!found) {
892 fprintf(out, "\tNone.\n");
893 }
894 }
895 }
896 #endif /* if ISC_MEM_TRACKLINES */
897
898 /*
899 * Print the stats[] on the stream "out" with suitable formatting.
900 */
901 void
902 isc_mem_stats(isc_mem_t *ctx, FILE *out) {
903 isc_mempool_t *pool = NULL;
904
905 REQUIRE(VALID_CONTEXT(ctx));
906
907 MCTXLOCK(ctx);
908
909 for (size_t i = 0; i <= STATS_BUCKETS; i++) {
910 size_t totalgets;
911 size_t gets;
912 struct stats *stats = &ctx->stats[i];
913
914 totalgets = atomic_load_acquire(&stats->totalgets);
915 gets = atomic_load_acquire(&stats->gets);
916
917 if (totalgets != 0U && gets != 0U) {
918 fprintf(out, "%s%5zu: %11zu gets, %11zu rem",
919 (i == STATS_BUCKETS) ? ">=" : " ", i,
920 totalgets, gets);
921 fputc('\n', out);
922 }
923 }
924
925 /*
926 * Note that since a pool can be locked now, these stats might
927 * be somewhat off if the pool is in active use at the time the
928 * stats are dumped. The link fields are protected by the
929 * isc_mem_t's lock, however, so walking this list and
930 * extracting integers from stats fields is always safe.
931 */
932 pool = ISC_LIST_HEAD(ctx->pools);
933 if (pool != NULL) {
934 fprintf(out, "[Pool statistics]\n");
935 fprintf(out, "%15s %10s %10s %10s %10s %10s %10s %1s\n", "name",
936 "size", "allocated", "freecount", "freemax",
937 "fillcount", "gets", "L");
938 }
939 while (pool != NULL) {
940 fprintf(out,
941 "%15s %10zu %10zu %10zu %10zu %10zu %10zu %10zu %s\n",
942 pool->name, pool->size, (size_t)0, pool->allocated,
943 pool->freecount, pool->freemax, pool->fillcount,
944 pool->gets, "N");
945 pool = ISC_LIST_NEXT(pool, link);
946 }
947
948 #if ISC_MEM_TRACKLINES
949 print_active(ctx, out);
950 #endif /* if ISC_MEM_TRACKLINES */
951
952 MCTXUNLOCK(ctx);
953 }
954
955 void *
956 isc__mem_allocate(isc_mem_t *ctx, size_t size FLARG) {
957 void *ptr = NULL;
958
959 REQUIRE(VALID_CONTEXT(ctx));
960
961 ptr = mem_get(ctx, size, 0);
962
963 /* Recalculate the real allocated size */
964 size = sallocx(ptr, ctx->jemalloc_flags);
965
966 mem_getstats(ctx, size);
967 ADD_TRACE(ctx, ptr, size, file, line);
968
969 CALL_HI_WATER(ctx);
970
971 return (ptr);
972 }
973
974 void *
975 isc__mem_reget(isc_mem_t *ctx, void *old_ptr, size_t old_size, size_t new_size,
976 size_t alignment FLARG) {
977 void *new_ptr = NULL;
978
979 if (old_ptr == NULL) {
980 REQUIRE(old_size == 0);
981 new_ptr = isc__mem_get(ctx, new_size, alignment FLARG_PASS);
982 } else if (new_size == 0) {
983 isc__mem_put(ctx, old_ptr, old_size, alignment FLARG_PASS);
984 } else {
985 DELETE_TRACE(ctx, old_ptr, old_size, file, line);
986 mem_putstats(ctx, old_ptr, old_size);
987
988 new_ptr = mem_realloc(ctx, old_ptr, old_size, new_size,
989 MEM_ALIGN(alignment));
990
991 mem_getstats(ctx, new_size);
992 ADD_TRACE(ctx, new_ptr, new_size, file, line);
993
994 /*
995 * We want to postpone the call to water in edge case
996 * where the realloc will exactly hit on the boundary of
997 * the water and we would call water twice.
998 */
999 CALL_LO_WATER(ctx);
1000 CALL_HI_WATER(ctx);
1001 }
1002
1003 return (new_ptr);
1004 }
1005
1006 void *
1007 isc__mem_reallocate(isc_mem_t *ctx, void *old_ptr, size_t new_size FLARG) {
1008 void *new_ptr = NULL;
1009
1010 REQUIRE(VALID_CONTEXT(ctx));
1011
1012 if (old_ptr == NULL) {
1013 new_ptr = isc__mem_allocate(ctx, new_size FLARG_PASS);
1014 } else if (new_size == 0) {
1015 isc__mem_free(ctx, old_ptr FLARG_PASS);
1016 } else {
1017 size_t old_size = sallocx(old_ptr, ctx->jemalloc_flags);
1018
1019 DELETE_TRACE(ctx, old_ptr, old_size, file, line);
1020 mem_putstats(ctx, old_ptr, old_size);
1021
1022 new_ptr = mem_realloc(ctx, old_ptr, old_size, new_size, 0);
1023
1024 /* Recalculate the real allocated size */
1025 new_size = sallocx(new_ptr, ctx->jemalloc_flags);
1026
1027 mem_getstats(ctx, new_size);
1028 ADD_TRACE(ctx, new_ptr, new_size, file, line);
1029
1030 /*
1031 * We want to postpone the call to water in edge case
1032 * where the realloc will exactly hit on the boundary of
1033 * the water and we would call water twice.
1034 */
1035 CALL_LO_WATER(ctx);
1036 CALL_HI_WATER(ctx);
1037 }
1038
1039 return (new_ptr);
1040 }
1041
1042 void
1043 isc__mem_free(isc_mem_t *ctx, void *ptr FLARG) {
1044 size_t size = 0;
1045
1046 REQUIRE(VALID_CONTEXT(ctx));
1047
1048 size = sallocx(ptr, ctx->jemalloc_flags);
1049
1050 DELETE_TRACE(ctx, ptr, size, file, line);
1051
1052 mem_putstats(ctx, ptr, size);
1053 mem_put(ctx, ptr, size, 0);
1054
1055 CALL_LO_WATER(ctx);
1056 }
1057
1058 /*
1059 * Other useful things.
1060 */
1061
1062 char *
1063 isc__mem_strdup(isc_mem_t *mctx, const char *s FLARG) {
1064 size_t len;
1065 char *ns = NULL;
1066
1067 REQUIRE(VALID_CONTEXT(mctx));
1068 REQUIRE(s != NULL);
1069
1070 len = strlen(s) + 1;
1071
1072 ns = isc__mem_allocate(mctx, len FLARG_PASS);
1073
1074 strlcpy(ns, s, len);
1075
1076 return (ns);
1077 }
1078
1079 char *
1080 isc__mem_strndup(isc_mem_t *mctx, const char *s, size_t size FLARG) {
1081 size_t len;
1082 char *ns = NULL;
1083
1084 REQUIRE(VALID_CONTEXT(mctx));
1085 REQUIRE(s != NULL);
1086 REQUIRE(size != 0);
1087
1088 len = strlen(s) + 1;
1089 if (len > size) {
1090 len = size;
1091 }
1092
1093 ns = isc__mem_allocate(mctx, len FLARG_PASS);
1094
1095 strlcpy(ns, s, len);
1096
1097 return (ns);
1098 }
1099
1100 void
1101 isc_mem_setdestroycheck(isc_mem_t *ctx, bool flag) {
1102 REQUIRE(VALID_CONTEXT(ctx));
1103
1104 MCTXLOCK(ctx);
1105
1106 ctx->checkfree = flag;
1107
1108 MCTXUNLOCK(ctx);
1109 }
1110
1111 size_t
1112 isc_mem_inuse(isc_mem_t *ctx) {
1113 REQUIRE(VALID_CONTEXT(ctx));
1114
1115 return (atomic_load_acquire(&ctx->inuse));
1116 }
1117
1118 size_t
1119 isc_mem_maxinuse(isc_mem_t *ctx) {
1120 REQUIRE(VALID_CONTEXT(ctx));
1121
1122 return (atomic_load_acquire(&ctx->maxinuse));
1123 }
1124
1125 size_t
1126 isc_mem_total(isc_mem_t *ctx) {
1127 REQUIRE(VALID_CONTEXT(ctx));
1128
1129 return (atomic_load_acquire(&ctx->total));
1130 }
1131
1132 size_t
1133 isc_mem_malloced(isc_mem_t *ctx) {
1134 REQUIRE(VALID_CONTEXT(ctx));
1135
1136 return (atomic_load_acquire(&ctx->malloced));
1137 }
1138
1139 size_t
1140 isc_mem_maxmalloced(isc_mem_t *ctx) {
1141 REQUIRE(VALID_CONTEXT(ctx));
1142
1143 return (atomic_load_acquire(&ctx->maxmalloced));
1144 }
1145
1146 void
1147 isc_mem_clearwater(isc_mem_t *mctx) {
1148 isc_mem_setwater(mctx, NULL, NULL, 0, 0);
1149 }
1150
1151 void
1152 isc_mem_setwater(isc_mem_t *ctx, isc_mem_water_t water, void *water_arg,
1153 size_t hiwater, size_t lowater) {
1154 isc_mem_water_t oldwater;
1155 void *oldwater_arg;
1156
1157 REQUIRE(VALID_CONTEXT(ctx));
1158 REQUIRE(hiwater >= lowater);
1159
1160 oldwater = ctx->water;
1161 oldwater_arg = ctx->water_arg;
1162
1163 /* No water was set and new water is also NULL */
1164 if (oldwater == NULL && water == NULL) {
1165 return;
1166 }
1167
1168 /* The water function is being set for the first time */
1169 if (oldwater == NULL) {
1170 REQUIRE(water != NULL && lowater > 0);
1171
1172 INSIST(atomic_load(&ctx->hi_water) == 0);
1173 INSIST(atomic_load(&ctx->lo_water) == 0);
1174
1175 ctx->water = water;
1176 ctx->water_arg = water_arg;
1177 atomic_store(&ctx->hi_water, hiwater);
1178 atomic_store(&ctx->lo_water, lowater);
1179
1180 return;
1181 }
1182
1183 REQUIRE((water == oldwater && water_arg == oldwater_arg) ||
1184 (water == NULL && water_arg == NULL && hiwater == 0));
1185
1186 atomic_store(&ctx->hi_water, hiwater);
1187 atomic_store(&ctx->lo_water, lowater);
1188
1189 if (atomic_load_acquire(&ctx->hi_called) &&
1190 (atomic_load_acquire(&ctx->inuse) < lowater || lowater == 0U))
1191 {
1192 (oldwater)(oldwater_arg, ISC_MEM_LOWATER);
1193 }
1194 }
1195
1196 bool
1197 isc_mem_isovermem(isc_mem_t *ctx) {
1198 REQUIRE(VALID_CONTEXT(ctx));
1199
1200 return (atomic_load_relaxed(&ctx->is_overmem));
1201 }
1202
1203 void
1204 isc_mem_setname(isc_mem_t *ctx, const char *name) {
1205 REQUIRE(VALID_CONTEXT(ctx));
1206
1207 LOCK(&ctx->lock);
1208 strlcpy(ctx->name, name, sizeof(ctx->name));
1209 UNLOCK(&ctx->lock);
1210 }
1211
1212 const char *
1213 isc_mem_getname(isc_mem_t *ctx) {
1214 REQUIRE(VALID_CONTEXT(ctx));
1215
1216 if (ctx->name[0] == 0) {
1217 return ("");
1218 }
1219
1220 return (ctx->name);
1221 }
1222
1223 /*
1224 * Memory pool stuff
1225 */
1226
1227 void
1228 isc__mempool_create(isc_mem_t *restrict mctx, const size_t element_size,
1229 isc_mempool_t **restrict mpctxp FLARG) {
1230 isc_mempool_t *restrict mpctx = NULL;
1231 size_t size = element_size;
1232
1233 REQUIRE(VALID_CONTEXT(mctx));
1234 REQUIRE(size > 0U);
1235 REQUIRE(mpctxp != NULL && *mpctxp == NULL);
1236
1237 /*
1238 * Mempools are stored as a linked list of element.
1239 */
1240 if (size < sizeof(element)) {
1241 size = sizeof(element);
1242 }
1243
1244 /*
1245 * Allocate space for this pool, initialize values, and if all
1246 * works well, attach to the memory context.
1247 */
1248 mpctx = isc_mem_get(mctx, sizeof(isc_mempool_t));
1249
1250 *mpctx = (isc_mempool_t){
1251 .size = size,
1252 .freemax = 1,
1253 .fillcount = 1,
1254 };
1255
1256 #if ISC_MEM_TRACKLINES
1257 if ((isc_mem_debugging & ISC_MEM_DEBUGTRACE) != 0) {
1258 fprintf(stderr, "create pool %p file %s line %u mctx %p\n",
1259 mpctx, file, line, mctx);
1260 }
1261 #endif /* ISC_MEM_TRACKLINES */
1262
1263 isc_mem_attach(mctx, &mpctx->mctx);
1264 mpctx->magic = MEMPOOL_MAGIC;
1265
1266 *mpctxp = (isc_mempool_t *)mpctx;
1267
1268 MCTXLOCK(mctx);
1269 ISC_LIST_INITANDAPPEND(mctx->pools, mpctx, link);
1270 mctx->poolcnt++;
1271 MCTXUNLOCK(mctx);
1272 }
1273
1274 void
1275 isc_mempool_setname(isc_mempool_t *restrict mpctx, const char *name) {
1276 REQUIRE(VALID_MEMPOOL(mpctx));
1277 REQUIRE(name != NULL);
1278
1279 strlcpy(mpctx->name, name, sizeof(mpctx->name));
1280 }
1281
1282 void
1283 isc__mempool_destroy(isc_mempool_t **restrict mpctxp FLARG) {
1284 isc_mempool_t *restrict mpctx = NULL;
1285 isc_mem_t *mctx = NULL;
1286 element *restrict item = NULL;
1287
1288 REQUIRE(mpctxp != NULL);
1289 REQUIRE(VALID_MEMPOOL(*mpctxp));
1290
1291 mpctx = *mpctxp;
1292 *mpctxp = NULL;
1293
1294 mctx = mpctx->mctx;
1295
1296 #if ISC_MEM_TRACKLINES
1297 if ((isc_mem_debugging & ISC_MEM_DEBUGTRACE) != 0) {
1298 fprintf(stderr, "destroy pool %p file %s line %u mctx %p\n",
1299 mpctx, file, line, mctx);
1300 }
1301 #endif
1302
1303 if (mpctx->allocated > 0) {
1304 UNEXPECTED_ERROR("mempool %s leaked memory", mpctx->name);
1305 }
1306 REQUIRE(mpctx->allocated == 0);
1307
1308 /*
1309 * Return any items on the free list
1310 */
1311 while (mpctx->items != NULL) {
1312 INSIST(mpctx->freecount > 0);
1313 mpctx->freecount--;
1314
1315 item = mpctx->items;
1316 mpctx->items = item->next;
1317
1318 mem_putstats(mctx, item, mpctx->size);
1319 mem_put(mctx, item, mpctx->size, 0);
1320 }
1321
1322 /*
1323 * Remove our linked list entry from the memory context.
1324 */
1325 MCTXLOCK(mctx);
1326 ISC_LIST_UNLINK(mctx->pools, mpctx, link);
1327 mctx->poolcnt--;
1328 MCTXUNLOCK(mctx);
1329
1330 mpctx->magic = 0;
1331
1332 isc_mem_putanddetach(&mpctx->mctx, mpctx, sizeof(isc_mempool_t));
1333 }
1334
1335 void *
1336 isc__mempool_get(isc_mempool_t *restrict mpctx FLARG) {
1337 element *restrict item = NULL;
1338
1339 REQUIRE(VALID_MEMPOOL(mpctx));
1340
1341 mpctx->allocated++;
1342
1343 if (mpctx->items == NULL) {
1344 isc_mem_t *mctx = mpctx->mctx;
1345 #if !__SANITIZE_ADDRESS__
1346 const size_t fillcount = mpctx->fillcount;
1347 #else
1348 const size_t fillcount = 1;
1349 #endif
1350 /*
1351 * We need to dip into the well. Fill up our free list.
1352 */
1353 for (size_t i = 0; i < fillcount; i++) {
1354 item = mem_get(mctx, mpctx->size, 0);
1355 mem_getstats(mctx, mpctx->size);
1356 item->next = mpctx->items;
1357 mpctx->items = item;
1358 mpctx->freecount++;
1359 }
1360 }
1361
1362 item = mpctx->items;
1363 INSIST(item != NULL);
1364
1365 mpctx->items = item->next;
1366
1367 INSIST(mpctx->freecount > 0);
1368 mpctx->freecount--;
1369 mpctx->gets++;
1370
1371 ADD_TRACE(mpctx->mctx, item, mpctx->size, file, line);
1372
1373 return (item);
1374 }
1375
1376 /* coverity[+free : arg-1] */
1377 void
1378 isc__mempool_put(isc_mempool_t *restrict mpctx, void *mem FLARG) {
1379 element *restrict item = NULL;
1380
1381 REQUIRE(VALID_MEMPOOL(mpctx));
1382 REQUIRE(mem != NULL);
1383
1384 isc_mem_t *mctx = mpctx->mctx;
1385 const size_t freecount = mpctx->freecount;
1386 #if !__SANITIZE_ADDRESS__
1387 const size_t freemax = mpctx->freemax;
1388 #else
1389 const size_t freemax = 0;
1390 #endif
1391
1392 INSIST(mpctx->allocated > 0);
1393 mpctx->allocated--;
1394
1395 DELETE_TRACE(mctx, mem, mpctx->size, file, line);
1396
1397 /*
1398 * If our free list is full, return this to the mctx directly.
1399 */
1400 if (freecount >= freemax) {
1401 mem_putstats(mctx, mem, mpctx->size);
1402 mem_put(mctx, mem, mpctx->size, 0);
1403 return;
1404 }
1405
1406 /*
1407 * Otherwise, attach it to our free list and bump the counter.
1408 */
1409 item = (element *)mem;
1410 item->next = mpctx->items;
1411 mpctx->items = item;
1412 mpctx->freecount++;
1413 }
1414
1415 /*
1416 * Quotas
1417 */
1418
1419 void
1420 isc_mempool_setfreemax(isc_mempool_t *restrict mpctx,
1421 const unsigned int limit) {
1422 REQUIRE(VALID_MEMPOOL(mpctx));
1423 mpctx->freemax = limit;
1424 }
1425
1426 unsigned int
1427 isc_mempool_getfreemax(isc_mempool_t *restrict mpctx) {
1428 REQUIRE(VALID_MEMPOOL(mpctx));
1429
1430 return (mpctx->freemax);
1431 }
1432
1433 unsigned int
1434 isc_mempool_getfreecount(isc_mempool_t *restrict mpctx) {
1435 REQUIRE(VALID_MEMPOOL(mpctx));
1436
1437 return (mpctx->freecount);
1438 }
1439
1440 unsigned int
1441 isc_mempool_getallocated(isc_mempool_t *restrict mpctx) {
1442 REQUIRE(VALID_MEMPOOL(mpctx));
1443
1444 return (mpctx->allocated);
1445 }
1446
1447 void
1448 isc_mempool_setfillcount(isc_mempool_t *restrict mpctx,
1449 unsigned int const limit) {
1450 REQUIRE(VALID_MEMPOOL(mpctx));
1451 REQUIRE(limit > 0);
1452
1453 mpctx->fillcount = limit;
1454 }
1455
1456 unsigned int
1457 isc_mempool_getfillcount(isc_mempool_t *restrict mpctx) {
1458 REQUIRE(VALID_MEMPOOL(mpctx));
1459
1460 return (mpctx->fillcount);
1461 }
1462
1463 /*
1464 * Requires contextslock to be held by caller.
1465 */
1466 #if ISC_MEM_TRACKLINES
1467 static void
1468 print_contexts(FILE *file) {
1469 isc_mem_t *ctx;
1470
1471 for (ctx = ISC_LIST_HEAD(contexts); ctx != NULL;
1472 ctx = ISC_LIST_NEXT(ctx, link))
1473 {
1474 fprintf(file, "context: %p (%s): %" PRIuFAST32 " references\n",
1475 ctx, ctx->name[0] == 0 ? "<unknown>" : ctx->name,
1476 isc_refcount_current(&ctx->references));
1477 print_active(ctx, file);
1478 }
1479 fflush(file);
1480 }
1481 #endif
1482
1483 static atomic_uintptr_t checkdestroyed = 0;
1484
1485 void
1486 isc_mem_checkdestroyed(FILE *file) {
1487 atomic_store_release(&checkdestroyed, (uintptr_t)file);
1488 }
1489
1490 void
1491 isc__mem_checkdestroyed(void) {
1492 FILE *file = (FILE *)atomic_load_acquire(&checkdestroyed);
1493
1494 if (file == NULL) {
1495 return;
1496 }
1497
1498 LOCK(&contextslock);
1499 if (!ISC_LIST_EMPTY(contexts)) {
1500 #if ISC_MEM_TRACKLINES
1501 if ((isc_mem_debugging & TRACE_OR_RECORD) != 0) {
1502 print_contexts(file);
1503 }
1504 #endif /* if ISC_MEM_TRACKLINES */
1505 UNREACHABLE();
1506 }
1507 UNLOCK(&contextslock);
1508 }
1509
1510 unsigned int
1511 isc_mem_references(isc_mem_t *ctx) {
1512 return (isc_refcount_current(&ctx->references));
1513 }
1514
1515 typedef struct summarystat {
1516 uint64_t total;
1517 uint64_t inuse;
1518 uint64_t malloced;
1519 uint64_t contextsize;
1520 } summarystat_t;
1521
1522 #ifdef HAVE_LIBXML2
1523 #define TRY0(a) \
1524 do { \
1525 xmlrc = (a); \
1526 if (xmlrc < 0) \
1527 goto error; \
1528 } while (0)
1529 static int
1530 xml_renderctx(isc_mem_t *ctx, summarystat_t *summary, xmlTextWriterPtr writer) {
1531 REQUIRE(VALID_CONTEXT(ctx));
1532
1533 int xmlrc;
1534
1535 MCTXLOCK(ctx);
1536
1537 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "context"));
1538
1539 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "id"));
1540 TRY0(xmlTextWriterWriteFormatString(writer, "%p", ctx));
1541 TRY0(xmlTextWriterEndElement(writer)); /* id */
1542
1543 if (ctx->name[0] != 0) {
1544 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "name"));
1545 TRY0(xmlTextWriterWriteFormatString(writer, "%s", ctx->name));
1546 TRY0(xmlTextWriterEndElement(writer)); /* name */
1547 }
1548
1549 summary->contextsize += sizeof(*ctx);
1550 #if ISC_MEM_TRACKLINES
1551 if (ctx->debuglist != NULL) {
1552 summary->contextsize += DEBUG_TABLE_COUNT *
1553 sizeof(debuglist_t) +
1554 ctx->debuglistcnt * sizeof(debuglink_t);
1555 }
1556 #endif /* if ISC_MEM_TRACKLINES */
1557 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "references"));
1558 TRY0(xmlTextWriterWriteFormatString(
1559 writer, "%" PRIuFAST32,
1560 isc_refcount_current(&ctx->references)));
1561 TRY0(xmlTextWriterEndElement(writer)); /* references */
1562
1563 summary->total += isc_mem_total(ctx);
1564 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "total"));
1565 TRY0(xmlTextWriterWriteFormatString(writer, "%" PRIu64 "",
1566 (uint64_t)isc_mem_total(ctx)));
1567 TRY0(xmlTextWriterEndElement(writer)); /* total */
1568
1569 summary->inuse += isc_mem_inuse(ctx);
1570 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "inuse"));
1571 TRY0(xmlTextWriterWriteFormatString(writer, "%" PRIu64 "",
1572 (uint64_t)isc_mem_inuse(ctx)));
1573 TRY0(xmlTextWriterEndElement(writer)); /* inuse */
1574
1575 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "maxinuse"));
1576 TRY0(xmlTextWriterWriteFormatString(writer, "%" PRIu64 "",
1577 (uint64_t)isc_mem_maxinuse(ctx)));
1578 TRY0(xmlTextWriterEndElement(writer)); /* maxinuse */
1579
1580 summary->malloced += isc_mem_malloced(ctx);
1581 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "malloced"));
1582 TRY0(xmlTextWriterWriteFormatString(writer, "%" PRIu64 "",
1583 (uint64_t)isc_mem_malloced(ctx)));
1584 TRY0(xmlTextWriterEndElement(writer)); /* malloced */
1585
1586 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "maxmalloced"));
1587 TRY0(xmlTextWriterWriteFormatString(
1588 writer, "%" PRIu64 "", (uint64_t)isc_mem_maxmalloced(ctx)));
1589 TRY0(xmlTextWriterEndElement(writer)); /* maxmalloced */
1590
1591 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "pools"));
1592 TRY0(xmlTextWriterWriteFormatString(writer, "%u", ctx->poolcnt));
1593 TRY0(xmlTextWriterEndElement(writer)); /* pools */
1594 summary->contextsize += ctx->poolcnt * sizeof(isc_mempool_t);
1595
1596 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "hiwater"));
1597 TRY0(xmlTextWriterWriteFormatString(
1598 writer, "%" PRIu64 "",
1599 (uint64_t)atomic_load_relaxed(&ctx->hi_water)));
1600 TRY0(xmlTextWriterEndElement(writer)); /* hiwater */
1601
1602 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "lowater"));
1603 TRY0(xmlTextWriterWriteFormatString(
1604 writer, "%" PRIu64 "",
1605 (uint64_t)atomic_load_relaxed(&ctx->lo_water)));
1606 TRY0(xmlTextWriterEndElement(writer)); /* lowater */
1607
1608 TRY0(xmlTextWriterEndElement(writer)); /* context */
1609
1610 error:
1611 MCTXUNLOCK(ctx);
1612
1613 return (xmlrc);
1614 }
1615
1616 int
1617 isc_mem_renderxml(void *writer0) {
1618 isc_mem_t *ctx;
1619 summarystat_t summary = { 0 };
1620 uint64_t lost;
1621 int xmlrc;
1622 xmlTextWriterPtr writer = (xmlTextWriterPtr)writer0;
1623
1624 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "contexts"));
1625
1626 LOCK(&contextslock);
1627 lost = totallost;
1628 for (ctx = ISC_LIST_HEAD(contexts); ctx != NULL;
1629 ctx = ISC_LIST_NEXT(ctx, link))
1630 {
1631 xmlrc = xml_renderctx(ctx, &summary, writer);
1632 if (xmlrc < 0) {
1633 UNLOCK(&contextslock);
1634 goto error;
1635 }
1636 }
1637 UNLOCK(&contextslock);
1638
1639 TRY0(xmlTextWriterEndElement(writer)); /* contexts */
1640
1641 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "summary"));
1642
1643 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "TotalUse"));
1644 TRY0(xmlTextWriterWriteFormatString(writer, "%" PRIu64 "",
1645 summary.total));
1646 TRY0(xmlTextWriterEndElement(writer)); /* TotalUse */
1647
1648 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "InUse"));
1649 TRY0(xmlTextWriterWriteFormatString(writer, "%" PRIu64 "",
1650 summary.inuse));
1651 TRY0(xmlTextWriterEndElement(writer)); /* InUse */
1652
1653 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "Malloced"));
1654 TRY0(xmlTextWriterWriteFormatString(writer, "%" PRIu64 "",
1655 summary.malloced));
1656 TRY0(xmlTextWriterEndElement(writer)); /* InUse */
1657
1658 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "ContextSize"));
1659 TRY0(xmlTextWriterWriteFormatString(writer, "%" PRIu64 "",
1660 summary.contextsize));
1661 TRY0(xmlTextWriterEndElement(writer)); /* ContextSize */
1662
1663 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "Lost"));
1664 TRY0(xmlTextWriterWriteFormatString(writer, "%" PRIu64 "", lost));
1665 TRY0(xmlTextWriterEndElement(writer)); /* Lost */
1666
1667 TRY0(xmlTextWriterEndElement(writer)); /* summary */
1668 error:
1669 return (xmlrc);
1670 }
1671
1672 #endif /* HAVE_LIBXML2 */
1673
1674 #ifdef HAVE_JSON_C
1675 #define CHECKMEM(m) RUNTIME_CHECK(m != NULL)
1676
1677 static isc_result_t
1678 json_renderctx(isc_mem_t *ctx, summarystat_t *summary, json_object *array) {
1679 REQUIRE(VALID_CONTEXT(ctx));
1680 REQUIRE(summary != NULL);
1681 REQUIRE(array != NULL);
1682
1683 json_object *ctxobj, *obj;
1684 char buf[1024];
1685
1686 MCTXLOCK(ctx);
1687
1688 summary->contextsize += sizeof(*ctx);
1689 summary->total += isc_mem_total(ctx);
1690 summary->inuse += isc_mem_inuse(ctx);
1691 summary->malloced += isc_mem_malloced(ctx);
1692 #if ISC_MEM_TRACKLINES
1693 if (ctx->debuglist != NULL) {
1694 summary->contextsize += DEBUG_TABLE_COUNT *
1695 sizeof(debuglist_t) +
1696 ctx->debuglistcnt * sizeof(debuglink_t);
1697 }
1698 #endif /* if ISC_MEM_TRACKLINES */
1699
1700 ctxobj = json_object_new_object();
1701 CHECKMEM(ctxobj);
1702
1703 snprintf(buf, sizeof(buf), "%p", ctx);
1704 obj = json_object_new_string(buf);
1705 CHECKMEM(obj);
1706 json_object_object_add(ctxobj, "id", obj);
1707
1708 if (ctx->name[0] != 0) {
1709 obj = json_object_new_string(ctx->name);
1710 CHECKMEM(obj);
1711 json_object_object_add(ctxobj, "name", obj);
1712 }
1713
1714 obj = json_object_new_int64(isc_refcount_current(&ctx->references));
1715 CHECKMEM(obj);
1716 json_object_object_add(ctxobj, "references", obj);
1717
1718 obj = json_object_new_int64(isc_mem_total(ctx));
1719 CHECKMEM(obj);
1720 json_object_object_add(ctxobj, "total", obj);
1721
1722 obj = json_object_new_int64(isc_mem_inuse(ctx));
1723 CHECKMEM(obj);
1724 json_object_object_add(ctxobj, "inuse", obj);
1725
1726 obj = json_object_new_int64(isc_mem_maxinuse(ctx));
1727 CHECKMEM(obj);
1728 json_object_object_add(ctxobj, "maxinuse", obj);
1729
1730 obj = json_object_new_int64(isc_mem_malloced(ctx));
1731 CHECKMEM(obj);
1732 json_object_object_add(ctxobj, "malloced", obj);
1733
1734 obj = json_object_new_int64(isc_mem_maxmalloced(ctx));
1735 CHECKMEM(obj);
1736 json_object_object_add(ctxobj, "maxmalloced", obj);
1737
1738 obj = json_object_new_int64(ctx->poolcnt);
1739 CHECKMEM(obj);
1740 json_object_object_add(ctxobj, "pools", obj);
1741
1742 summary->contextsize += ctx->poolcnt * sizeof(isc_mempool_t);
1743
1744 obj = json_object_new_int64(atomic_load_relaxed(&ctx->hi_water));
1745 CHECKMEM(obj);
1746 json_object_object_add(ctxobj, "hiwater", obj);
1747
1748 obj = json_object_new_int64(atomic_load_relaxed(&ctx->lo_water));
1749 CHECKMEM(obj);
1750 json_object_object_add(ctxobj, "lowater", obj);
1751
1752 MCTXUNLOCK(ctx);
1753 json_object_array_add(array, ctxobj);
1754 return (ISC_R_SUCCESS);
1755 }
1756
1757 isc_result_t
1758 isc_mem_renderjson(void *memobj0) {
1759 isc_result_t result = ISC_R_SUCCESS;
1760 isc_mem_t *ctx;
1761 summarystat_t summary = { 0 };
1762 uint64_t lost;
1763 json_object *ctxarray, *obj;
1764 json_object *memobj = (json_object *)memobj0;
1765
1766 ctxarray = json_object_new_array();
1767 CHECKMEM(ctxarray);
1768
1769 LOCK(&contextslock);
1770 lost = totallost;
1771 for (ctx = ISC_LIST_HEAD(contexts); ctx != NULL;
1772 ctx = ISC_LIST_NEXT(ctx, link))
1773 {
1774 result = json_renderctx(ctx, &summary, ctxarray);
1775 if (result != ISC_R_SUCCESS) {
1776 UNLOCK(&contextslock);
1777 goto error;
1778 }
1779 }
1780 UNLOCK(&contextslock);
1781
1782 obj = json_object_new_int64(summary.total);
1783 CHECKMEM(obj);
1784 json_object_object_add(memobj, "TotalUse", obj);
1785
1786 obj = json_object_new_int64(summary.inuse);
1787 CHECKMEM(obj);
1788 json_object_object_add(memobj, "InUse", obj);
1789
1790 obj = json_object_new_int64(summary.malloced);
1791 CHECKMEM(obj);
1792 json_object_object_add(memobj, "Malloced", obj);
1793
1794 obj = json_object_new_int64(summary.contextsize);
1795 CHECKMEM(obj);
1796 json_object_object_add(memobj, "ContextSize", obj);
1797
1798 obj = json_object_new_int64(lost);
1799 CHECKMEM(obj);
1800 json_object_object_add(memobj, "Lost", obj);
1801
1802 json_object_object_add(memobj, "contexts", ctxarray);
1803 return (ISC_R_SUCCESS);
1804
1805 error:
1806 if (ctxarray != NULL) {
1807 json_object_put(ctxarray);
1808 }
1809 return (result);
1810 }
1811 #endif /* HAVE_JSON_C */
1812
1813 void
1814 isc__mem_create(isc_mem_t **mctxp FLARG) {
1815 mem_create(mctxp, isc_mem_defaultflags, 0);
1816 #if ISC_MEM_TRACKLINES
1817 if ((isc_mem_debugging & ISC_MEM_DEBUGTRACE) != 0) {
1818 fprintf(stderr, "create mctx %p file %s line %u\n", *mctxp,
1819 file, line);
1820 }
1821 #endif /* ISC_MEM_TRACKLINES */
1822 }
1823
1824 void
1825 isc__mem_create_arena(isc_mem_t **mctxp FLARG) {
1826 unsigned int arena_no = ISC_MEM_ILLEGAL_ARENA;
1827
1828 RUNTIME_CHECK(mem_jemalloc_arena_create(&arena_no));
1829
1830 /*
1831 * We use MALLOCX_TCACHE_NONE to bypass the tcache and route
1832 * allocations directly to the arena. That is a recommendation
1833 * from jemalloc developers:
1834 *
1835 * https://github.com/jemalloc/jemalloc/issues/2483#issuecomment-1698173849
1836 */
1837 mem_create(mctxp, isc_mem_defaultflags,
1838 arena_no == ISC_MEM_ILLEGAL_ARENA
1839 ? 0
1840 : MALLOCX_ARENA(arena_no) | MALLOCX_TCACHE_NONE);
1841 (*mctxp)->jemalloc_arena = arena_no;
1842 #if ISC_MEM_TRACKLINES
1843 if ((isc_mem_debugging & ISC_MEM_DEBUGTRACE) != 0) {
1844 fprintf(stderr,
1845 "create mctx %p file %s line %u for jemalloc arena "
1846 "%u\n",
1847 *mctxp, file, line, arena_no);
1848 }
1849 #endif /* ISC_MEM_TRACKLINES */
1850 }
1851
1852 #if defined(JEMALLOC_API_SUPPORTED) && JEMALLOC_VERSION_MAJOR >= 4
1853 static bool
1854 jemalloc_set_ssize_value(const char *valname, ssize_t newval) {
1855 int ret;
1856
1857 ret = mallctl(valname, NULL, NULL, &newval, sizeof(newval));
1858 return (ret == 0);
1859 }
1860 #endif /* defined(JEMALLOC_API_SUPPORTED) && JEMALLOC_VERSION_MAJOR >= 4 */
1861
1862 static isc_result_t
1863 mem_set_arena_ssize_value(isc_mem_t *mctx, const char *arena_valname,
1864 const ssize_t newval) {
1865 REQUIRE(VALID_CONTEXT(mctx));
1866 #if defined(JEMALLOC_API_SUPPORTED) && JEMALLOC_VERSION_MAJOR >= 4
1867 bool ret;
1868 char buf[256] = { 0 };
1869
1870 if (mctx->jemalloc_arena == ISC_MEM_ILLEGAL_ARENA) {
1871 return (ISC_R_UNEXPECTED);
1872 }
1873
1874 (void)snprintf(buf, sizeof(buf), "arena.%u.%s", mctx->jemalloc_arena,
1875 arena_valname);
1876
1877 ret = jemalloc_set_ssize_value(buf, newval);
1878
1879 if (!ret) {
1880 return (ISC_R_FAILURE);
1881 }
1882
1883 return (ISC_R_SUCCESS);
1884 #else
1885 UNUSED(arena_valname);
1886 UNUSED(newval);
1887 return (ISC_R_NOTIMPLEMENTED);
1888 #endif /* defined(JEMALLOC_API_SUPPORTED) && JEMALLOC_VERSION_MAJOR >= 4 */
1889 }
1890
1891 isc_result_t
1892 isc_mem_arena_set_muzzy_decay_ms(isc_mem_t *mctx, const ssize_t decay_ms) {
1893 return (mem_set_arena_ssize_value(mctx, "muzzy_decay_ms", decay_ms));
1894 }
1895
1896 isc_result_t
1897 isc_mem_arena_set_dirty_decay_ms(isc_mem_t *mctx, const ssize_t decay_ms) {
1898 return (mem_set_arena_ssize_value(mctx, "dirty_decay_ms", decay_ms));
1899 }
1900
1901 void
1902 isc__mem_printactive(isc_mem_t *ctx, FILE *file) {
1903 #if ISC_MEM_TRACKLINES
1904 REQUIRE(VALID_CONTEXT(ctx));
1905 REQUIRE(file != NULL);
1906
1907 print_active(ctx, file);
1908 #else /* if ISC_MEM_TRACKLINES */
1909 UNUSED(ctx);
1910 UNUSED(file);
1911 #endif /* if ISC_MEM_TRACKLINES */
1912 }
1913
1914 void *
1915 isc__mem_alloc_noctx(size_t size) {
1916 return mallocx(size, 0);
1917 }
1918
1919 void
1920 isc__mem_free_noctx(void *ptr, size_t size) {
1921 ADJUST_ZERO_ALLOCATION_SIZE(size);
1922 sdallocx(ptr, size, 0);
1923 }
1924