mem.c revision 1.15 1 /* $NetBSD: mem.c,v 1.15 2024/02/21 22:52:28 christos Exp $ */
2
3 /*
4 * Copyright (C) Internet Systems Consortium, Inc. ("ISC")
5 *
6 * SPDX-License-Identifier: MPL-2.0
7 *
8 * This Source Code Form is subject to the terms of the Mozilla Public
9 * License, v. 2.0. If a copy of the MPL was not distributed with this
10 * file, you can obtain one at https://mozilla.org/MPL/2.0/.
11 *
12 * See the COPYRIGHT file distributed with this work for additional
13 * information regarding copyright ownership.
14 */
15
16 /*! \file */
17
18 #include <errno.h>
19 #include <inttypes.h>
20 #include <limits.h>
21 #include <stdbool.h>
22 #include <stddef.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25
26 #include <isc/align.h>
27 #include <isc/hash.h>
28 #include <isc/magic.h>
29 #include <isc/mem.h>
30 #include <isc/mutex.h>
31 #include <isc/once.h>
32 #include <isc/os.h>
33 #include <isc/print.h>
34 #include <isc/refcount.h>
35 #include <isc/string.h>
36 #include <isc/types.h>
37 #include <isc/util.h>
38
39 #ifdef HAVE_LIBXML2
40 #include <libxml/xmlwriter.h>
41 #define ISC_XMLCHAR (const xmlChar *)
42 #endif /* HAVE_LIBXML2 */
43
44 #ifdef HAVE_JSON_C
45 #include <json_object.h>
46 #endif /* HAVE_JSON_C */
47
48 /* On DragonFly BSD the header does not provide jemalloc API */
49 #if defined(HAVE_MALLOC_NP_H) && !defined(__DragonFly__)
50 #include <malloc_np.h>
51 #define JEMALLOC_API_SUPPORTED 1
52 #elif defined(HAVE_JEMALLOC)
53 #include <jemalloc/jemalloc.h>
54 #define JEMALLOC_API_SUPPORTED 1
55
56 #if JEMALLOC_VERSION_MAJOR < 4
57 #define sdallocx(ptr, size, flags) dallocx(ptr, flags)
58 #define MALLOCX_TCACHE_NONE (0)
59 #endif /* JEMALLOC_VERSION_MAJOR < 4 */
60
61 #else
62 #include "jemalloc_shim.h"
63 #endif
64
65 #include "mem_p.h"
66
67 #define MCTXLOCK(m) LOCK(&m->lock)
68 #define MCTXUNLOCK(m) UNLOCK(&m->lock)
69
70 #ifndef ISC_MEM_DEBUGGING
71 #define ISC_MEM_DEBUGGING 0
72 #endif /* ifndef ISC_MEM_DEBUGGING */
73 unsigned int isc_mem_debugging = ISC_MEM_DEBUGGING;
74 unsigned int isc_mem_defaultflags = ISC_MEMFLAG_DEFAULT;
75
76 #define ISC_MEM_ILLEGAL_ARENA (UINT_MAX)
77
78 /*
79 * Constants.
80 */
81
82 #define ZERO_ALLOCATION_SIZE sizeof(void *)
83 #define ALIGNMENT 8U /*%< must be a power of 2 */
84 #define ALIGNMENT_SIZE sizeof(size_info)
85 #define DEBUG_TABLE_COUNT 512U
86 #define STATS_BUCKETS 512U
87 #define STATS_BUCKET_SIZE 32U
88
89 /*
90 * Types.
91 */
92 #if ISC_MEM_TRACKLINES
93 typedef struct debuglink debuglink_t;
94 struct debuglink {
95 ISC_LINK(debuglink_t) link;
96 const void *ptr;
97 size_t size;
98 const char *file;
99 unsigned int line;
100 };
101
102 typedef ISC_LIST(debuglink_t) debuglist_t;
103
104 #define FLARG_PASS , file, line
105 #define FLARG , const char *file, unsigned int line
106 #else /* if ISC_MEM_TRACKLINES */
107 #define FLARG_PASS
108 #define FLARG
109 #endif /* if ISC_MEM_TRACKLINES */
110
111 typedef struct element element;
112 struct element {
113 element *next;
114 };
115
116 struct stats {
117 atomic_size_t gets;
118 atomic_size_t totalgets;
119 };
120
121 #define MEM_MAGIC ISC_MAGIC('M', 'e', 'm', 'C')
122 #define VALID_CONTEXT(c) ISC_MAGIC_VALID(c, MEM_MAGIC)
123
124 /* List of all active memory contexts. */
125
126 static ISC_LIST(isc_mem_t) contexts;
127
128 static isc_once_t init_once = ISC_ONCE_INIT;
129 static isc_once_t shut_once = ISC_ONCE_INIT;
130 static isc_mutex_t contextslock;
131
132 /*%
133 * Total size of lost memory due to a bug of external library.
134 * Locked by the global lock.
135 */
136 static uint64_t totallost;
137
138 struct isc_mem {
139 unsigned int magic;
140 unsigned int flags;
141 unsigned int jemalloc_flags;
142 unsigned int jemalloc_arena;
143 isc_mutex_t lock;
144 bool checkfree;
145 struct stats stats[STATS_BUCKETS + 1];
146 isc_refcount_t references;
147 char name[16];
148 atomic_size_t total;
149 atomic_size_t inuse;
150 atomic_size_t maxinuse;
151 atomic_size_t malloced;
152 atomic_size_t maxmalloced;
153 atomic_bool hi_called;
154 atomic_bool is_overmem;
155 isc_mem_water_t water;
156 void *water_arg;
157 atomic_size_t hi_water;
158 atomic_size_t lo_water;
159 ISC_LIST(isc_mempool_t) pools;
160 unsigned int poolcnt;
161
162 #if ISC_MEM_TRACKLINES
163 debuglist_t *debuglist;
164 size_t debuglistcnt;
165 #endif /* if ISC_MEM_TRACKLINES */
166
167 ISC_LINK(isc_mem_t) link;
168 };
169
170 #define MEMPOOL_MAGIC ISC_MAGIC('M', 'E', 'M', 'p')
171 #define VALID_MEMPOOL(c) ISC_MAGIC_VALID(c, MEMPOOL_MAGIC)
172
173 struct isc_mempool {
174 /* always unlocked */
175 unsigned int magic;
176 isc_mem_t *mctx; /*%< our memory context */
177 ISC_LINK(isc_mempool_t) link; /*%< next pool in this mem context */
178 element *items; /*%< low water item list */
179 size_t size; /*%< size of each item on this pool */
180 size_t allocated; /*%< # of items currently given out */
181 size_t freecount; /*%< # of items on reserved list */
182 size_t freemax; /*%< # of items allowed on free list */
183 size_t fillcount; /*%< # of items to fetch on each fill */
184 /*%< Stats only. */
185 size_t gets; /*%< # of requests to this pool */
186 /*%< Debugging only. */
187 char name[16]; /*%< printed name in stats reports */
188 };
189
190 /*
191 * Private Inline-able.
192 */
193
194 #if !ISC_MEM_TRACKLINES
195 #define ADD_TRACE(a, b, c, d, e)
196 #define DELETE_TRACE(a, b, c, d, e)
197 #define ISC_MEMFUNC_SCOPE
198 #else /* if !ISC_MEM_TRACKLINES */
199 #define TRACE_OR_RECORD (ISC_MEM_DEBUGTRACE | ISC_MEM_DEBUGRECORD)
200
201 #define SHOULD_TRACE_OR_RECORD(ptr) \
202 ((isc_mem_debugging & TRACE_OR_RECORD) != 0 && ptr != NULL)
203
204 #define ADD_TRACE(a, b, c, d, e) \
205 if (SHOULD_TRACE_OR_RECORD(b)) { \
206 add_trace_entry(a, b, c, d, e); \
207 }
208
209 #define DELETE_TRACE(a, b, c, d, e) \
210 if (SHOULD_TRACE_OR_RECORD(b)) { \
211 delete_trace_entry(a, b, c, d, e); \
212 }
213
214 static void
215 print_active(isc_mem_t *ctx, FILE *out);
216 #endif /* ISC_MEM_TRACKLINES */
217
218 static size_t
219 increment_malloced(isc_mem_t *ctx, size_t size) {
220 size_t malloced = atomic_fetch_add_relaxed(&ctx->malloced, size) + size;
221 size_t maxmalloced = atomic_load_relaxed(&ctx->maxmalloced);
222
223 if (malloced > maxmalloced) {
224 atomic_compare_exchange_strong(&ctx->maxmalloced, &maxmalloced,
225 malloced);
226 }
227
228 return (malloced);
229 }
230
231 static size_t
232 decrement_malloced(isc_mem_t *ctx, size_t size) {
233 size_t malloced = atomic_fetch_sub_relaxed(&ctx->malloced, size) - size;
234
235 return (malloced);
236 }
237
238 #if ISC_MEM_TRACKLINES
239 /*!
240 * mctx must not be locked.
241 */
242 static void
243 add_trace_entry(isc_mem_t *mctx, const void *ptr, size_t size FLARG) {
244 debuglink_t *dl = NULL;
245 uint32_t hash;
246 uint32_t idx;
247
248 MCTXLOCK(mctx);
249
250 if ((isc_mem_debugging & ISC_MEM_DEBUGTRACE) != 0) {
251 fprintf(stderr, "add %p size %zu file %s line %u mctx %p\n",
252 ptr, size, file, line, mctx);
253 }
254
255 if (mctx->debuglist == NULL) {
256 goto unlock;
257 }
258
259 #ifdef __COVERITY__
260 /*
261 * Use simple conversion from pointer to hash to avoid
262 * tainting 'ptr' due to byte swap in isc_hash_function.
263 */
264 hash = (uintptr_t)ptr >> 3;
265 #else
266 hash = isc_hash_function(&ptr, sizeof(ptr), true);
267 #endif
268 idx = hash % DEBUG_TABLE_COUNT;
269
270 dl = mallocx(sizeof(*dl), mctx->jemalloc_flags);
271 INSIST(dl != NULL);
272 increment_malloced(mctx, sizeof(*dl));
273
274 ISC_LINK_INIT(dl, link);
275 dl->ptr = ptr;
276 dl->size = size;
277 dl->file = file;
278 dl->line = line;
279
280 ISC_LIST_PREPEND(mctx->debuglist[idx], dl, link);
281 mctx->debuglistcnt++;
282 unlock:
283 MCTXUNLOCK(mctx);
284 }
285
286 static void
287 delete_trace_entry(isc_mem_t *mctx, const void *ptr, size_t size,
288 const char *file, unsigned int line) {
289 debuglink_t *dl = NULL;
290 uint32_t hash;
291 uint32_t idx;
292
293 MCTXLOCK(mctx);
294
295 if ((isc_mem_debugging & ISC_MEM_DEBUGTRACE) != 0) {
296 fprintf(stderr, "del %p size %zu file %s line %u mctx %p\n",
297 ptr, size, file, line, mctx);
298 }
299
300 if (mctx->debuglist == NULL) {
301 goto unlock;
302 }
303
304 #ifdef __COVERITY__
305 /*
306 * Use simple conversion from pointer to hash to avoid
307 * tainting 'ptr' due to byte swap in isc_hash_function.
308 */
309 hash = (uintptr_t)ptr >> 3;
310 #else
311 hash = isc_hash_function(&ptr, sizeof(ptr), true);
312 #endif
313 idx = hash % DEBUG_TABLE_COUNT;
314
315 dl = ISC_LIST_HEAD(mctx->debuglist[idx]);
316 while (dl != NULL) {
317 if (dl->ptr == ptr) {
318 ISC_LIST_UNLINK(mctx->debuglist[idx], dl, link);
319 decrement_malloced(mctx, sizeof(*dl));
320 sdallocx(dl, sizeof(*dl), mctx->jemalloc_flags);
321 goto unlock;
322 }
323 dl = ISC_LIST_NEXT(dl, link);
324 }
325
326 /*
327 * If we get here, we didn't find the item on the list. We're
328 * screwed.
329 */
330 UNREACHABLE();
331 unlock:
332 MCTXUNLOCK(mctx);
333 }
334 #endif /* ISC_MEM_TRACKLINES */
335
336 #define ADJUST_ZERO_ALLOCATION_SIZE(s) \
337 if (s == 0) { \
338 s = ZERO_ALLOCATION_SIZE; \
339 }
340
341 #define MEM_ALIGN(a) ((a) ? MALLOCX_ALIGN(a) : 0)
342
343 /*!
344 * Perform a malloc, doing memory filling and overrun detection as necessary.
345 */
346 static void *
347 mem_get(isc_mem_t *ctx, size_t size, int flags) {
348 char *ret = NULL;
349
350 ADJUST_ZERO_ALLOCATION_SIZE(size);
351
352 ret = mallocx(size, flags | ctx->jemalloc_flags);
353 INSIST(ret != NULL);
354
355 if ((ctx->flags & ISC_MEMFLAG_FILL) != 0) {
356 memset(ret, 0xbe, size); /* Mnemonic for "beef". */
357 }
358
359 return (ret);
360 }
361
362 /*!
363 * Perform a free, doing memory filling and overrun detection as necessary.
364 */
365 /* coverity[+free : arg-1] */
366 static void
367 mem_put(isc_mem_t *ctx, void *mem, size_t size, int flags) {
368 ADJUST_ZERO_ALLOCATION_SIZE(size);
369
370 if ((ctx->flags & ISC_MEMFLAG_FILL) != 0) {
371 memset(mem, 0xde, size); /* Mnemonic for "dead". */
372 }
373 sdallocx(mem, size, flags | ctx->jemalloc_flags);
374 }
375
376 static void *
377 mem_realloc(isc_mem_t *ctx, void *old_ptr, size_t old_size, size_t new_size,
378 int flags) {
379 void *new_ptr = NULL;
380
381 ADJUST_ZERO_ALLOCATION_SIZE(new_size);
382
383 new_ptr = rallocx(old_ptr, new_size, flags | ctx->jemalloc_flags);
384 INSIST(new_ptr != NULL);
385
386 if ((ctx->flags & ISC_MEMFLAG_FILL) != 0) {
387 ssize_t diff_size = new_size - old_size;
388 void *diff_ptr = (uint8_t *)new_ptr + old_size;
389 if (diff_size > 0) {
390 /* Mnemonic for "beef". */
391 memset(diff_ptr, 0xbe, diff_size);
392 }
393 }
394
395 return (new_ptr);
396 }
397
398 #define stats_bucket(ctx, size) \
399 ((size / STATS_BUCKET_SIZE) >= STATS_BUCKETS \
400 ? &ctx->stats[STATS_BUCKETS] \
401 : &ctx->stats[size / STATS_BUCKET_SIZE])
402
403 /*!
404 * Update internal counters after a memory get.
405 */
406 static void
407 mem_getstats(isc_mem_t *ctx, size_t size) {
408 struct stats *stats = stats_bucket(ctx, size);
409
410 atomic_fetch_add_relaxed(&ctx->total, size);
411 atomic_fetch_add_release(&ctx->inuse, size);
412
413 atomic_fetch_add_relaxed(&stats->gets, 1);
414 atomic_fetch_add_relaxed(&stats->totalgets, 1);
415
416 increment_malloced(ctx, size);
417 }
418
419 /*!
420 * Update internal counters after a memory put.
421 */
422 static void
423 mem_putstats(isc_mem_t *ctx, void *ptr, size_t size) {
424 struct stats *stats = stats_bucket(ctx, size);
425 atomic_size_t s, g;
426
427 UNUSED(ptr);
428
429 s = atomic_fetch_sub_release(&ctx->inuse, size);
430 INSIST(s >= size);
431
432 g = atomic_fetch_sub_release(&stats->gets, 1);
433 INSIST(g >= 1);
434
435 decrement_malloced(ctx, size);
436 }
437
438 /*
439 * Private.
440 */
441
442 static bool
443 mem_jemalloc_arena_create(unsigned int *pnew_arenano) {
444 REQUIRE(pnew_arenano != NULL);
445
446 #if defined(JEMALLOC_API_SUPPORTED) && JEMALLOC_VERSION_MAJOR >= 4
447 unsigned int arenano = 0;
448 size_t len = sizeof(arenano);
449 int res = 0;
450
451 res = mallctl("arenas.create", &arenano, &len, NULL, 0);
452 if (res != 0) {
453 return (false);
454 }
455
456 *pnew_arenano = arenano;
457
458 return (true);
459 #else
460 *pnew_arenano = ISC_MEM_ILLEGAL_ARENA;
461 return (true);
462 #endif /* defined(JEMALLOC_API_SUPPORTED) && JEMALLOC_VERSION_MAJOR >= 4 */
463 }
464
465 static bool
466 mem_jemalloc_arena_destroy(unsigned int arenano) {
467 #if defined(JEMALLOC_API_SUPPORTED) && JEMALLOC_VERSION_MAJOR >= 4
468 int res = 0;
469 char buf[256] = { 0 };
470
471 (void)snprintf(buf, sizeof(buf), "arena.%u.destroy", arenano);
472 res = mallctl(buf, NULL, NULL, NULL, 0);
473 if (res != 0) {
474 return (false);
475 }
476
477 return (true);
478 #else
479 UNUSED(arenano);
480 return (true);
481 #endif /* defined(JEMALLOC_API_SUPPORTED) && JEMALLOC_VERSION_MAJOR >= 4 */
482 }
483
484 static void
485 mem_initialize(void) {
486 isc_mutex_init(&contextslock);
487 ISC_LIST_INIT(contexts);
488 totallost = 0;
489 }
490
491 void
492 isc__mem_initialize(void) {
493 RUNTIME_CHECK(isc_once_do(&init_once, mem_initialize) == ISC_R_SUCCESS);
494 }
495
496 static void
497 mem_shutdown(void) {
498 isc__mem_checkdestroyed();
499
500 isc_mutex_destroy(&contextslock);
501 }
502
503 void
504 isc__mem_shutdown(void) {
505 RUNTIME_CHECK(isc_once_do(&shut_once, mem_shutdown) == ISC_R_SUCCESS);
506 }
507
508 static void
509 mem_create(isc_mem_t **ctxp, unsigned int flags, unsigned int jemalloc_flags) {
510 isc_mem_t *ctx = NULL;
511
512 REQUIRE(ctxp != NULL && *ctxp == NULL);
513
514 ctx = mallocx(sizeof(*ctx),
515 MALLOCX_ALIGN(isc_os_cacheline()) | jemalloc_flags);
516 INSIST(ctx != NULL);
517
518 *ctx = (isc_mem_t){
519 .magic = MEM_MAGIC,
520 .flags = flags,
521 .jemalloc_flags = jemalloc_flags,
522 .jemalloc_arena = ISC_MEM_ILLEGAL_ARENA,
523 .checkfree = true,
524 };
525
526 isc_mutex_init(&ctx->lock);
527 isc_refcount_init(&ctx->references, 1);
528
529 atomic_init(&ctx->total, 0);
530 atomic_init(&ctx->inuse, 0);
531 atomic_init(&ctx->maxinuse, 0);
532 atomic_init(&ctx->malloced, sizeof(*ctx));
533 atomic_init(&ctx->maxmalloced, sizeof(*ctx));
534 atomic_init(&ctx->hi_water, 0);
535 atomic_init(&ctx->lo_water, 0);
536 atomic_init(&ctx->hi_called, false);
537 atomic_init(&ctx->is_overmem, false);
538
539 for (size_t i = 0; i < STATS_BUCKETS + 1; i++) {
540 atomic_init(&ctx->stats[i].gets, 0);
541 atomic_init(&ctx->stats[i].totalgets, 0);
542 }
543 ISC_LIST_INIT(ctx->pools);
544
545 #if ISC_MEM_TRACKLINES
546 if ((isc_mem_debugging & ISC_MEM_DEBUGRECORD) != 0) {
547 unsigned int i;
548
549 ctx->debuglist =
550 mallocx((DEBUG_TABLE_COUNT * sizeof(debuglist_t)),
551 ctx->jemalloc_flags);
552 INSIST(ctx->debuglist != NULL);
553
554 for (i = 0; i < DEBUG_TABLE_COUNT; i++) {
555 ISC_LIST_INIT(ctx->debuglist[i]);
556 }
557 increment_malloced(ctx,
558 DEBUG_TABLE_COUNT * sizeof(debuglist_t));
559 }
560 #endif /* if ISC_MEM_TRACKLINES */
561
562 LOCK(&contextslock);
563 ISC_LIST_INITANDAPPEND(contexts, ctx, link);
564 UNLOCK(&contextslock);
565
566 *ctxp = ctx;
567 }
568
569 /*
570 * Public.
571 */
572
573 static void
574 destroy(isc_mem_t *ctx) {
575 unsigned int i;
576 size_t malloced;
577 unsigned int arena_no;
578
579 LOCK(&contextslock);
580 ISC_LIST_UNLINK(contexts, ctx, link);
581 totallost += isc_mem_inuse(ctx);
582 UNLOCK(&contextslock);
583
584 ctx->magic = 0;
585
586 arena_no = ctx->jemalloc_arena;
587
588 INSIST(ISC_LIST_EMPTY(ctx->pools));
589
590 #if ISC_MEM_TRACKLINES
591 if (ctx->debuglist != NULL) {
592 debuglink_t *dl;
593 for (i = 0; i < DEBUG_TABLE_COUNT; i++) {
594 for (dl = ISC_LIST_HEAD(ctx->debuglist[i]); dl != NULL;
595 dl = ISC_LIST_HEAD(ctx->debuglist[i]))
596 {
597 if (ctx->checkfree && dl->ptr != NULL) {
598 print_active(ctx, stderr);
599 }
600 INSIST(!ctx->checkfree || dl->ptr == NULL);
601
602 ISC_LIST_UNLINK(ctx->debuglist[i], dl, link);
603 sdallocx(dl, sizeof(*dl), ctx->jemalloc_flags);
604 decrement_malloced(ctx, sizeof(*dl));
605 }
606 }
607
608 sdallocx(ctx->debuglist,
609 (DEBUG_TABLE_COUNT * sizeof(debuglist_t)),
610 ctx->jemalloc_flags);
611 decrement_malloced(ctx,
612 DEBUG_TABLE_COUNT * sizeof(debuglist_t));
613 }
614 #endif /* if ISC_MEM_TRACKLINES */
615
616 if (ctx->checkfree) {
617 for (i = 0; i <= STATS_BUCKETS; i++) {
618 struct stats *stats = &ctx->stats[i];
619 size_t gets = atomic_load_acquire(&stats->gets);
620 if (gets != 0U) {
621 fprintf(stderr,
622 "Failing assertion due to probable "
623 "leaked memory in context %p (\"%s\") "
624 "(stats[%u].gets == %zu).\n",
625 ctx, ctx->name, i, gets);
626 #if ISC_MEM_TRACKLINES
627 print_active(ctx, stderr);
628 #endif /* if ISC_MEM_TRACKLINES */
629 INSIST(gets == 0U);
630 }
631 }
632 }
633
634 isc_mutex_destroy(&ctx->lock);
635
636 malloced = decrement_malloced(ctx, sizeof(*ctx));
637
638 if (ctx->checkfree) {
639 INSIST(malloced == 0);
640 }
641 sdallocx(ctx, sizeof(*ctx),
642 MALLOCX_ALIGN(isc_os_cacheline()) | ctx->jemalloc_flags);
643
644 if (arena_no != ISC_MEM_ILLEGAL_ARENA) {
645 RUNTIME_CHECK(mem_jemalloc_arena_destroy(arena_no) == true);
646 }
647 }
648
649 void
650 isc_mem_attach(isc_mem_t *source, isc_mem_t **targetp) {
651 REQUIRE(VALID_CONTEXT(source));
652 REQUIRE(targetp != NULL && *targetp == NULL);
653
654 isc_refcount_increment(&source->references);
655
656 *targetp = source;
657 }
658
659 void
660 isc__mem_detach(isc_mem_t **ctxp FLARG) {
661 isc_mem_t *ctx = NULL;
662
663 REQUIRE(ctxp != NULL && VALID_CONTEXT(*ctxp));
664
665 ctx = *ctxp;
666 *ctxp = NULL;
667
668 if (isc_refcount_decrement(&ctx->references) == 1) {
669 isc_refcount_destroy(&ctx->references);
670 #if ISC_MEM_TRACKLINES
671 if ((isc_mem_debugging & ISC_MEM_DEBUGTRACE) != 0) {
672 fprintf(stderr, "destroy mctx %p file %s line %u\n",
673 ctx, file, line);
674 }
675 #endif
676 destroy(ctx);
677 }
678 }
679
680 /*
681 * isc_mem_putanddetach() is the equivalent of:
682 *
683 * mctx = NULL;
684 * isc_mem_attach(ptr->mctx, &mctx);
685 * isc_mem_detach(&ptr->mctx);
686 * isc_mem_put(mctx, ptr, sizeof(*ptr);
687 * isc_mem_detach(&mctx);
688 */
689
690 void
691 isc__mem_putanddetach(isc_mem_t **ctxp, void *ptr, size_t size,
692 size_t alignment FLARG) {
693 isc_mem_t *ctx = NULL;
694
695 REQUIRE(ctxp != NULL && VALID_CONTEXT(*ctxp));
696 REQUIRE(ptr != NULL);
697 REQUIRE(size != 0);
698
699 ctx = *ctxp;
700 *ctxp = NULL;
701
702 DELETE_TRACE(ctx, ptr, size, file, line);
703
704 mem_putstats(ctx, ptr, size);
705 mem_put(ctx, ptr, size, MEM_ALIGN(alignment));
706
707 if (isc_refcount_decrement(&ctx->references) == 1) {
708 isc_refcount_destroy(&ctx->references);
709 destroy(ctx);
710 }
711 }
712
713 void
714 isc__mem_destroy(isc_mem_t **ctxp FLARG) {
715 isc_mem_t *ctx = NULL;
716
717 /*
718 * This routine provides legacy support for callers who use mctxs
719 * without attaching/detaching.
720 */
721
722 REQUIRE(ctxp != NULL && VALID_CONTEXT(*ctxp));
723
724 ctx = *ctxp;
725 *ctxp = NULL;
726
727 #if ISC_MEM_TRACKLINES
728 if ((isc_mem_debugging & ISC_MEM_DEBUGTRACE) != 0) {
729 fprintf(stderr, "destroy mctx %p file %s line %u\n", ctx, file,
730 line);
731 }
732
733 if (isc_refcount_decrement(&ctx->references) > 1) {
734 print_active(ctx, stderr);
735 }
736 #else /* if ISC_MEM_TRACKLINES */
737 isc_refcount_decrementz(&ctx->references);
738 #endif /* if ISC_MEM_TRACKLINES */
739 isc_refcount_destroy(&ctx->references);
740 destroy(ctx);
741
742 *ctxp = NULL;
743 }
744
745 #define CALL_HI_WATER(ctx) \
746 { \
747 if (ctx->water != NULL && hi_water(ctx)) { \
748 (ctx->water)(ctx->water_arg, ISC_MEM_HIWATER); \
749 } \
750 }
751
752 #define CALL_LO_WATER(ctx) \
753 { \
754 if ((ctx->water != NULL) && lo_water(ctx)) { \
755 (ctx->water)(ctx->water_arg, ISC_MEM_LOWATER); \
756 } \
757 }
758
759 static bool
760 hi_water(isc_mem_t *ctx) {
761 size_t inuse;
762 size_t maxinuse;
763 size_t hiwater = atomic_load_relaxed(&ctx->hi_water);
764
765 if (hiwater == 0) {
766 return (false);
767 }
768
769 inuse = atomic_load_acquire(&ctx->inuse);
770 if (inuse <= hiwater) {
771 return (false);
772 }
773
774 maxinuse = atomic_load_acquire(&ctx->maxinuse);
775 if (inuse > maxinuse) {
776 (void)atomic_compare_exchange_strong(&ctx->maxinuse, &maxinuse,
777 inuse);
778
779 if ((isc_mem_debugging & ISC_MEM_DEBUGUSAGE) != 0) {
780 fprintf(stderr, "maxinuse = %lu\n",
781 (unsigned long)inuse);
782 }
783 }
784
785 if (atomic_load_acquire(&ctx->hi_called)) {
786 return (false);
787 }
788
789 /* We are over water (for the first time) */
790 atomic_store_release(&ctx->is_overmem, true);
791
792 return (true);
793 }
794
795 static bool
796 lo_water(isc_mem_t *ctx) {
797 size_t inuse;
798 size_t lowater = atomic_load_relaxed(&ctx->lo_water);
799
800 if (lowater == 0) {
801 return (false);
802 }
803
804 inuse = atomic_load_acquire(&ctx->inuse);
805 if (inuse >= lowater) {
806 return (false);
807 }
808
809 if (!atomic_load_acquire(&ctx->hi_called)) {
810 return (false);
811 }
812
813 /* We are no longer overmem */
814 atomic_store_release(&ctx->is_overmem, false);
815
816 return (true);
817 }
818
819 void *
820 isc__mem_get(isc_mem_t *ctx, size_t size, size_t alignment FLARG) {
821 void *ptr = NULL;
822
823 REQUIRE(VALID_CONTEXT(ctx));
824
825 ptr = mem_get(ctx, size, MEM_ALIGN(alignment));
826
827 mem_getstats(ctx, size);
828 ADD_TRACE(ctx, ptr, size, file, line);
829
830 CALL_HI_WATER(ctx);
831
832 return (ptr);
833 }
834
835 void
836 isc__mem_put(isc_mem_t *ctx, void *ptr, size_t size, size_t alignment FLARG) {
837 REQUIRE(VALID_CONTEXT(ctx));
838
839 DELETE_TRACE(ctx, ptr, size, file, line);
840
841 mem_putstats(ctx, ptr, size);
842 mem_put(ctx, ptr, size, MEM_ALIGN(alignment));
843
844 CALL_LO_WATER(ctx);
845 }
846
847 void
848 isc_mem_waterack(isc_mem_t *ctx, int flag) {
849 REQUIRE(VALID_CONTEXT(ctx));
850
851 if (flag == ISC_MEM_LOWATER) {
852 atomic_store(&ctx->hi_called, false);
853 } else if (flag == ISC_MEM_HIWATER) {
854 atomic_store(&ctx->hi_called, true);
855 }
856 }
857
858 #if ISC_MEM_TRACKLINES
859 static void
860 print_active(isc_mem_t *mctx, FILE *out) {
861 if (mctx->debuglist != NULL) {
862 debuglink_t *dl;
863 unsigned int i;
864 bool found;
865
866 fprintf(out, "Dump of all outstanding memory "
867 "allocations:\n");
868 found = false;
869 for (i = 0; i < DEBUG_TABLE_COUNT; i++) {
870 dl = ISC_LIST_HEAD(mctx->debuglist[i]);
871
872 if (dl != NULL) {
873 found = true;
874 }
875
876 while (dl != NULL) {
877 if (dl->ptr != NULL) {
878 fprintf(out,
879 "\tptr %p size %zu "
880 "file %s "
881 "line %u\n",
882 dl->ptr, dl->size, dl->file,
883 dl->line);
884 }
885 dl = ISC_LIST_NEXT(dl, link);
886 }
887 }
888
889 if (!found) {
890 fprintf(out, "\tNone.\n");
891 }
892 }
893 }
894 #endif /* if ISC_MEM_TRACKLINES */
895
896 /*
897 * Print the stats[] on the stream "out" with suitable formatting.
898 */
899 void
900 isc_mem_stats(isc_mem_t *ctx, FILE *out) {
901 isc_mempool_t *pool = NULL;
902
903 REQUIRE(VALID_CONTEXT(ctx));
904
905 MCTXLOCK(ctx);
906
907 for (size_t i = 0; i <= STATS_BUCKETS; i++) {
908 size_t totalgets;
909 size_t gets;
910 struct stats *stats = &ctx->stats[i];
911
912 totalgets = atomic_load_acquire(&stats->totalgets);
913 gets = atomic_load_acquire(&stats->gets);
914
915 if (totalgets != 0U && gets != 0U) {
916 fprintf(out, "%s%5zu: %11zu gets, %11zu rem",
917 (i == STATS_BUCKETS) ? ">=" : " ", i,
918 totalgets, gets);
919 fputc('\n', out);
920 }
921 }
922
923 /*
924 * Note that since a pool can be locked now, these stats might
925 * be somewhat off if the pool is in active use at the time the
926 * stats are dumped. The link fields are protected by the
927 * isc_mem_t's lock, however, so walking this list and
928 * extracting integers from stats fields is always safe.
929 */
930 pool = ISC_LIST_HEAD(ctx->pools);
931 if (pool != NULL) {
932 fprintf(out, "[Pool statistics]\n");
933 fprintf(out, "%15s %10s %10s %10s %10s %10s %10s %1s\n", "name",
934 "size", "allocated", "freecount", "freemax",
935 "fillcount", "gets", "L");
936 }
937 while (pool != NULL) {
938 fprintf(out,
939 "%15s %10zu %10zu %10zu %10zu %10zu %10zu %10zu %s\n",
940 pool->name, pool->size, (size_t)0, pool->allocated,
941 pool->freecount, pool->freemax, pool->fillcount,
942 pool->gets, "N");
943 pool = ISC_LIST_NEXT(pool, link);
944 }
945
946 #if ISC_MEM_TRACKLINES
947 print_active(ctx, out);
948 #endif /* if ISC_MEM_TRACKLINES */
949
950 MCTXUNLOCK(ctx);
951 }
952
953 void *
954 isc__mem_allocate(isc_mem_t *ctx, size_t size FLARG) {
955 void *ptr = NULL;
956
957 REQUIRE(VALID_CONTEXT(ctx));
958
959 ptr = mem_get(ctx, size, 0);
960
961 /* Recalculate the real allocated size */
962 size = sallocx(ptr, ctx->jemalloc_flags);
963
964 mem_getstats(ctx, size);
965 ADD_TRACE(ctx, ptr, size, file, line);
966
967 CALL_HI_WATER(ctx);
968
969 return (ptr);
970 }
971
972 void *
973 isc__mem_reget(isc_mem_t *ctx, void *old_ptr, size_t old_size, size_t new_size,
974 size_t alignment FLARG) {
975 void *new_ptr = NULL;
976
977 if (old_ptr == NULL) {
978 REQUIRE(old_size == 0);
979 new_ptr = isc__mem_get(ctx, new_size, alignment FLARG_PASS);
980 } else if (new_size == 0) {
981 isc__mem_put(ctx, old_ptr, old_size, alignment FLARG_PASS);
982 } else {
983 DELETE_TRACE(ctx, old_ptr, old_size, file, line);
984 mem_putstats(ctx, old_ptr, old_size);
985
986 new_ptr = mem_realloc(ctx, old_ptr, old_size, new_size,
987 MEM_ALIGN(alignment));
988
989 mem_getstats(ctx, new_size);
990 ADD_TRACE(ctx, new_ptr, new_size, file, line);
991
992 /*
993 * We want to postpone the call to water in edge case
994 * where the realloc will exactly hit on the boundary of
995 * the water and we would call water twice.
996 */
997 CALL_LO_WATER(ctx);
998 CALL_HI_WATER(ctx);
999 }
1000
1001 return (new_ptr);
1002 }
1003
1004 void *
1005 isc__mem_reallocate(isc_mem_t *ctx, void *old_ptr, size_t new_size FLARG) {
1006 void *new_ptr = NULL;
1007
1008 REQUIRE(VALID_CONTEXT(ctx));
1009
1010 if (old_ptr == NULL) {
1011 new_ptr = isc__mem_allocate(ctx, new_size FLARG_PASS);
1012 } else if (new_size == 0) {
1013 isc__mem_free(ctx, old_ptr FLARG_PASS);
1014 } else {
1015 size_t old_size = sallocx(old_ptr, ctx->jemalloc_flags);
1016
1017 DELETE_TRACE(ctx, old_ptr, old_size, file, line);
1018 mem_putstats(ctx, old_ptr, old_size);
1019
1020 new_ptr = mem_realloc(ctx, old_ptr, old_size, new_size, 0);
1021
1022 /* Recalculate the real allocated size */
1023 new_size = sallocx(new_ptr, ctx->jemalloc_flags);
1024
1025 mem_getstats(ctx, new_size);
1026 ADD_TRACE(ctx, new_ptr, new_size, file, line);
1027
1028 /*
1029 * We want to postpone the call to water in edge case
1030 * where the realloc will exactly hit on the boundary of
1031 * the water and we would call water twice.
1032 */
1033 CALL_LO_WATER(ctx);
1034 CALL_HI_WATER(ctx);
1035 }
1036
1037 return (new_ptr);
1038 }
1039
1040 void
1041 isc__mem_free(isc_mem_t *ctx, void *ptr FLARG) {
1042 size_t size = 0;
1043
1044 REQUIRE(VALID_CONTEXT(ctx));
1045
1046 size = sallocx(ptr, ctx->jemalloc_flags);
1047
1048 DELETE_TRACE(ctx, ptr, size, file, line);
1049
1050 mem_putstats(ctx, ptr, size);
1051 mem_put(ctx, ptr, size, 0);
1052
1053 CALL_LO_WATER(ctx);
1054 }
1055
1056 /*
1057 * Other useful things.
1058 */
1059
1060 char *
1061 isc__mem_strdup(isc_mem_t *mctx, const char *s FLARG) {
1062 size_t len;
1063 char *ns = NULL;
1064
1065 REQUIRE(VALID_CONTEXT(mctx));
1066 REQUIRE(s != NULL);
1067
1068 len = strlen(s) + 1;
1069
1070 ns = isc__mem_allocate(mctx, len FLARG_PASS);
1071
1072 strlcpy(ns, s, len);
1073
1074 return (ns);
1075 }
1076
1077 char *
1078 isc__mem_strndup(isc_mem_t *mctx, const char *s, size_t size FLARG) {
1079 size_t len;
1080 char *ns = NULL;
1081
1082 REQUIRE(VALID_CONTEXT(mctx));
1083 REQUIRE(s != NULL);
1084 REQUIRE(size != 0);
1085
1086 len = strlen(s) + 1;
1087 if (len > size) {
1088 len = size;
1089 }
1090
1091 ns = isc__mem_allocate(mctx, len FLARG_PASS);
1092
1093 strlcpy(ns, s, len);
1094
1095 return (ns);
1096 }
1097
1098 void
1099 isc_mem_setdestroycheck(isc_mem_t *ctx, bool flag) {
1100 REQUIRE(VALID_CONTEXT(ctx));
1101
1102 MCTXLOCK(ctx);
1103
1104 ctx->checkfree = flag;
1105
1106 MCTXUNLOCK(ctx);
1107 }
1108
1109 size_t
1110 isc_mem_inuse(isc_mem_t *ctx) {
1111 REQUIRE(VALID_CONTEXT(ctx));
1112
1113 return (atomic_load_acquire(&ctx->inuse));
1114 }
1115
1116 size_t
1117 isc_mem_maxinuse(isc_mem_t *ctx) {
1118 REQUIRE(VALID_CONTEXT(ctx));
1119
1120 return (atomic_load_acquire(&ctx->maxinuse));
1121 }
1122
1123 size_t
1124 isc_mem_total(isc_mem_t *ctx) {
1125 REQUIRE(VALID_CONTEXT(ctx));
1126
1127 return (atomic_load_acquire(&ctx->total));
1128 }
1129
1130 size_t
1131 isc_mem_malloced(isc_mem_t *ctx) {
1132 REQUIRE(VALID_CONTEXT(ctx));
1133
1134 return (atomic_load_acquire(&ctx->malloced));
1135 }
1136
1137 size_t
1138 isc_mem_maxmalloced(isc_mem_t *ctx) {
1139 REQUIRE(VALID_CONTEXT(ctx));
1140
1141 return (atomic_load_acquire(&ctx->maxmalloced));
1142 }
1143
1144 void
1145 isc_mem_clearwater(isc_mem_t *mctx) {
1146 isc_mem_setwater(mctx, NULL, NULL, 0, 0);
1147 }
1148
1149 void
1150 isc_mem_setwater(isc_mem_t *ctx, isc_mem_water_t water, void *water_arg,
1151 size_t hiwater, size_t lowater) {
1152 isc_mem_water_t oldwater;
1153 void *oldwater_arg;
1154
1155 REQUIRE(VALID_CONTEXT(ctx));
1156 REQUIRE(hiwater >= lowater);
1157
1158 oldwater = ctx->water;
1159 oldwater_arg = ctx->water_arg;
1160
1161 /* No water was set and new water is also NULL */
1162 if (oldwater == NULL && water == NULL) {
1163 return;
1164 }
1165
1166 /* The water function is being set for the first time */
1167 if (oldwater == NULL) {
1168 REQUIRE(water != NULL && lowater > 0);
1169
1170 INSIST(atomic_load(&ctx->hi_water) == 0);
1171 INSIST(atomic_load(&ctx->lo_water) == 0);
1172
1173 ctx->water = water;
1174 ctx->water_arg = water_arg;
1175 atomic_store(&ctx->hi_water, hiwater);
1176 atomic_store(&ctx->lo_water, lowater);
1177
1178 return;
1179 }
1180
1181 REQUIRE((water == oldwater && water_arg == oldwater_arg) ||
1182 (water == NULL && water_arg == NULL && hiwater == 0));
1183
1184 atomic_store(&ctx->hi_water, hiwater);
1185 atomic_store(&ctx->lo_water, lowater);
1186
1187 if (atomic_load_acquire(&ctx->hi_called) &&
1188 (atomic_load_acquire(&ctx->inuse) < lowater || lowater == 0U))
1189 {
1190 (oldwater)(oldwater_arg, ISC_MEM_LOWATER);
1191 }
1192 }
1193
1194 bool
1195 isc_mem_isovermem(isc_mem_t *ctx) {
1196 REQUIRE(VALID_CONTEXT(ctx));
1197
1198 return (atomic_load_relaxed(&ctx->is_overmem));
1199 }
1200
1201 void
1202 isc_mem_setname(isc_mem_t *ctx, const char *name) {
1203 REQUIRE(VALID_CONTEXT(ctx));
1204
1205 LOCK(&ctx->lock);
1206 strlcpy(ctx->name, name, sizeof(ctx->name));
1207 UNLOCK(&ctx->lock);
1208 }
1209
1210 const char *
1211 isc_mem_getname(isc_mem_t *ctx) {
1212 REQUIRE(VALID_CONTEXT(ctx));
1213
1214 if (ctx->name[0] == 0) {
1215 return ("");
1216 }
1217
1218 return (ctx->name);
1219 }
1220
1221 /*
1222 * Memory pool stuff
1223 */
1224
1225 void
1226 isc__mempool_create(isc_mem_t *restrict mctx, const size_t element_size,
1227 isc_mempool_t **restrict mpctxp FLARG) {
1228 isc_mempool_t *restrict mpctx = NULL;
1229 size_t size = element_size;
1230
1231 REQUIRE(VALID_CONTEXT(mctx));
1232 REQUIRE(size > 0U);
1233 REQUIRE(mpctxp != NULL && *mpctxp == NULL);
1234
1235 /*
1236 * Mempools are stored as a linked list of element.
1237 */
1238 if (size < sizeof(element)) {
1239 size = sizeof(element);
1240 }
1241
1242 /*
1243 * Allocate space for this pool, initialize values, and if all
1244 * works well, attach to the memory context.
1245 */
1246 mpctx = isc_mem_get(mctx, sizeof(isc_mempool_t));
1247
1248 *mpctx = (isc_mempool_t){
1249 .size = size,
1250 .freemax = 1,
1251 .fillcount = 1,
1252 };
1253
1254 #if ISC_MEM_TRACKLINES
1255 if ((isc_mem_debugging & ISC_MEM_DEBUGTRACE) != 0) {
1256 fprintf(stderr, "create pool %p file %s line %u mctx %p\n",
1257 mpctx, file, line, mctx);
1258 }
1259 #endif /* ISC_MEM_TRACKLINES */
1260
1261 isc_mem_attach(mctx, &mpctx->mctx);
1262 mpctx->magic = MEMPOOL_MAGIC;
1263
1264 *mpctxp = (isc_mempool_t *)mpctx;
1265
1266 MCTXLOCK(mctx);
1267 ISC_LIST_INITANDAPPEND(mctx->pools, mpctx, link);
1268 mctx->poolcnt++;
1269 MCTXUNLOCK(mctx);
1270 }
1271
1272 void
1273 isc_mempool_setname(isc_mempool_t *restrict mpctx, const char *name) {
1274 REQUIRE(VALID_MEMPOOL(mpctx));
1275 REQUIRE(name != NULL);
1276
1277 strlcpy(mpctx->name, name, sizeof(mpctx->name));
1278 }
1279
1280 void
1281 isc__mempool_destroy(isc_mempool_t **restrict mpctxp FLARG) {
1282 isc_mempool_t *restrict mpctx = NULL;
1283 isc_mem_t *mctx = NULL;
1284 element *restrict item = NULL;
1285
1286 REQUIRE(mpctxp != NULL);
1287 REQUIRE(VALID_MEMPOOL(*mpctxp));
1288
1289 mpctx = *mpctxp;
1290 *mpctxp = NULL;
1291
1292 mctx = mpctx->mctx;
1293
1294 #if ISC_MEM_TRACKLINES
1295 if ((isc_mem_debugging & ISC_MEM_DEBUGTRACE) != 0) {
1296 fprintf(stderr, "destroy pool %p file %s line %u mctx %p\n",
1297 mpctx, file, line, mctx);
1298 }
1299 #endif
1300
1301 if (mpctx->allocated > 0) {
1302 UNEXPECTED_ERROR("mempool %s leaked memory", mpctx->name);
1303 }
1304 REQUIRE(mpctx->allocated == 0);
1305
1306 /*
1307 * Return any items on the free list
1308 */
1309 while (mpctx->items != NULL) {
1310 INSIST(mpctx->freecount > 0);
1311 mpctx->freecount--;
1312
1313 item = mpctx->items;
1314 mpctx->items = item->next;
1315
1316 mem_putstats(mctx, item, mpctx->size);
1317 mem_put(mctx, item, mpctx->size, 0);
1318 }
1319
1320 /*
1321 * Remove our linked list entry from the memory context.
1322 */
1323 MCTXLOCK(mctx);
1324 ISC_LIST_UNLINK(mctx->pools, mpctx, link);
1325 mctx->poolcnt--;
1326 MCTXUNLOCK(mctx);
1327
1328 mpctx->magic = 0;
1329
1330 isc_mem_putanddetach(&mpctx->mctx, mpctx, sizeof(isc_mempool_t));
1331 }
1332
1333 void *
1334 isc__mempool_get(isc_mempool_t *restrict mpctx FLARG) {
1335 element *restrict item = NULL;
1336
1337 REQUIRE(VALID_MEMPOOL(mpctx));
1338
1339 mpctx->allocated++;
1340
1341 if (mpctx->items == NULL) {
1342 isc_mem_t *mctx = mpctx->mctx;
1343 #if !__SANITIZE_ADDRESS__
1344 const size_t fillcount = mpctx->fillcount;
1345 #else
1346 const size_t fillcount = 1;
1347 #endif
1348 /*
1349 * We need to dip into the well. Fill up our free list.
1350 */
1351 for (size_t i = 0; i < fillcount; i++) {
1352 item = mem_get(mctx, mpctx->size, 0);
1353 mem_getstats(mctx, mpctx->size);
1354 item->next = mpctx->items;
1355 mpctx->items = item;
1356 mpctx->freecount++;
1357 }
1358 }
1359
1360 item = mpctx->items;
1361 INSIST(item != NULL);
1362
1363 mpctx->items = item->next;
1364
1365 INSIST(mpctx->freecount > 0);
1366 mpctx->freecount--;
1367 mpctx->gets++;
1368
1369 ADD_TRACE(mpctx->mctx, item, mpctx->size, file, line);
1370
1371 return (item);
1372 }
1373
1374 /* coverity[+free : arg-1] */
1375 void
1376 isc__mempool_put(isc_mempool_t *restrict mpctx, void *mem FLARG) {
1377 element *restrict item = NULL;
1378
1379 REQUIRE(VALID_MEMPOOL(mpctx));
1380 REQUIRE(mem != NULL);
1381
1382 isc_mem_t *mctx = mpctx->mctx;
1383 const size_t freecount = mpctx->freecount;
1384 #if !__SANITIZE_ADDRESS__
1385 const size_t freemax = mpctx->freemax;
1386 #else
1387 const size_t freemax = 0;
1388 #endif
1389
1390 INSIST(mpctx->allocated > 0);
1391 mpctx->allocated--;
1392
1393 DELETE_TRACE(mctx, mem, mpctx->size, file, line);
1394
1395 /*
1396 * If our free list is full, return this to the mctx directly.
1397 */
1398 if (freecount >= freemax) {
1399 mem_putstats(mctx, mem, mpctx->size);
1400 mem_put(mctx, mem, mpctx->size, 0);
1401 return;
1402 }
1403
1404 /*
1405 * Otherwise, attach it to our free list and bump the counter.
1406 */
1407 item = (element *)mem;
1408 item->next = mpctx->items;
1409 mpctx->items = item;
1410 mpctx->freecount++;
1411 }
1412
1413 /*
1414 * Quotas
1415 */
1416
1417 void
1418 isc_mempool_setfreemax(isc_mempool_t *restrict mpctx,
1419 const unsigned int limit) {
1420 REQUIRE(VALID_MEMPOOL(mpctx));
1421 mpctx->freemax = limit;
1422 }
1423
1424 unsigned int
1425 isc_mempool_getfreemax(isc_mempool_t *restrict mpctx) {
1426 REQUIRE(VALID_MEMPOOL(mpctx));
1427
1428 return (mpctx->freemax);
1429 }
1430
1431 unsigned int
1432 isc_mempool_getfreecount(isc_mempool_t *restrict mpctx) {
1433 REQUIRE(VALID_MEMPOOL(mpctx));
1434
1435 return (mpctx->freecount);
1436 }
1437
1438 unsigned int
1439 isc_mempool_getallocated(isc_mempool_t *restrict mpctx) {
1440 REQUIRE(VALID_MEMPOOL(mpctx));
1441
1442 return (mpctx->allocated);
1443 }
1444
1445 void
1446 isc_mempool_setfillcount(isc_mempool_t *restrict mpctx,
1447 unsigned int const limit) {
1448 REQUIRE(VALID_MEMPOOL(mpctx));
1449 REQUIRE(limit > 0);
1450
1451 mpctx->fillcount = limit;
1452 }
1453
1454 unsigned int
1455 isc_mempool_getfillcount(isc_mempool_t *restrict mpctx) {
1456 REQUIRE(VALID_MEMPOOL(mpctx));
1457
1458 return (mpctx->fillcount);
1459 }
1460
1461 /*
1462 * Requires contextslock to be held by caller.
1463 */
1464 #if ISC_MEM_TRACKLINES
1465 static void
1466 print_contexts(FILE *file) {
1467 isc_mem_t *ctx;
1468
1469 for (ctx = ISC_LIST_HEAD(contexts); ctx != NULL;
1470 ctx = ISC_LIST_NEXT(ctx, link))
1471 {
1472 fprintf(file, "context: %p (%s): %" PRIuFAST32 " references\n",
1473 ctx, ctx->name[0] == 0 ? "<unknown>" : ctx->name,
1474 isc_refcount_current(&ctx->references));
1475 print_active(ctx, file);
1476 }
1477 fflush(file);
1478 }
1479 #endif
1480
1481 static atomic_uintptr_t checkdestroyed = 0;
1482
1483 void
1484 isc_mem_checkdestroyed(FILE *file) {
1485 atomic_store_release(&checkdestroyed, (uintptr_t)file);
1486 }
1487
1488 void
1489 isc__mem_checkdestroyed(void) {
1490 FILE *file = (FILE *)atomic_load_acquire(&checkdestroyed);
1491
1492 if (file == NULL) {
1493 return;
1494 }
1495
1496 LOCK(&contextslock);
1497 if (!ISC_LIST_EMPTY(contexts)) {
1498 #if ISC_MEM_TRACKLINES
1499 if ((isc_mem_debugging & TRACE_OR_RECORD) != 0) {
1500 print_contexts(file);
1501 }
1502 #endif /* if ISC_MEM_TRACKLINES */
1503 UNREACHABLE();
1504 }
1505 UNLOCK(&contextslock);
1506 }
1507
1508 unsigned int
1509 isc_mem_references(isc_mem_t *ctx) {
1510 return (isc_refcount_current(&ctx->references));
1511 }
1512
1513 typedef struct summarystat {
1514 uint64_t total;
1515 uint64_t inuse;
1516 uint64_t malloced;
1517 uint64_t contextsize;
1518 } summarystat_t;
1519
1520 #ifdef HAVE_LIBXML2
1521 #define TRY0(a) \
1522 do { \
1523 xmlrc = (a); \
1524 if (xmlrc < 0) \
1525 goto error; \
1526 } while (0)
1527 static int
1528 xml_renderctx(isc_mem_t *ctx, summarystat_t *summary, xmlTextWriterPtr writer) {
1529 REQUIRE(VALID_CONTEXT(ctx));
1530
1531 int xmlrc;
1532
1533 MCTXLOCK(ctx);
1534
1535 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "context"));
1536
1537 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "id"));
1538 TRY0(xmlTextWriterWriteFormatString(writer, "%p", ctx));
1539 TRY0(xmlTextWriterEndElement(writer)); /* id */
1540
1541 if (ctx->name[0] != 0) {
1542 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "name"));
1543 TRY0(xmlTextWriterWriteFormatString(writer, "%s", ctx->name));
1544 TRY0(xmlTextWriterEndElement(writer)); /* name */
1545 }
1546
1547 summary->contextsize += sizeof(*ctx);
1548 #if ISC_MEM_TRACKLINES
1549 if (ctx->debuglist != NULL) {
1550 summary->contextsize += DEBUG_TABLE_COUNT *
1551 sizeof(debuglist_t) +
1552 ctx->debuglistcnt * sizeof(debuglink_t);
1553 }
1554 #endif /* if ISC_MEM_TRACKLINES */
1555 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "references"));
1556 TRY0(xmlTextWriterWriteFormatString(
1557 writer, "%" PRIuFAST32,
1558 isc_refcount_current(&ctx->references)));
1559 TRY0(xmlTextWriterEndElement(writer)); /* references */
1560
1561 summary->total += isc_mem_total(ctx);
1562 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "total"));
1563 TRY0(xmlTextWriterWriteFormatString(writer, "%" PRIu64 "",
1564 (uint64_t)isc_mem_total(ctx)));
1565 TRY0(xmlTextWriterEndElement(writer)); /* total */
1566
1567 summary->inuse += isc_mem_inuse(ctx);
1568 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "inuse"));
1569 TRY0(xmlTextWriterWriteFormatString(writer, "%" PRIu64 "",
1570 (uint64_t)isc_mem_inuse(ctx)));
1571 TRY0(xmlTextWriterEndElement(writer)); /* inuse */
1572
1573 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "maxinuse"));
1574 TRY0(xmlTextWriterWriteFormatString(writer, "%" PRIu64 "",
1575 (uint64_t)isc_mem_maxinuse(ctx)));
1576 TRY0(xmlTextWriterEndElement(writer)); /* maxinuse */
1577
1578 summary->malloced += isc_mem_malloced(ctx);
1579 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "malloced"));
1580 TRY0(xmlTextWriterWriteFormatString(writer, "%" PRIu64 "",
1581 (uint64_t)isc_mem_malloced(ctx)));
1582 TRY0(xmlTextWriterEndElement(writer)); /* malloced */
1583
1584 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "maxmalloced"));
1585 TRY0(xmlTextWriterWriteFormatString(
1586 writer, "%" PRIu64 "", (uint64_t)isc_mem_maxmalloced(ctx)));
1587 TRY0(xmlTextWriterEndElement(writer)); /* maxmalloced */
1588
1589 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "pools"));
1590 TRY0(xmlTextWriterWriteFormatString(writer, "%u", ctx->poolcnt));
1591 TRY0(xmlTextWriterEndElement(writer)); /* pools */
1592 summary->contextsize += ctx->poolcnt * sizeof(isc_mempool_t);
1593
1594 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "hiwater"));
1595 TRY0(xmlTextWriterWriteFormatString(
1596 writer, "%" PRIu64 "",
1597 (uint64_t)atomic_load_relaxed(&ctx->hi_water)));
1598 TRY0(xmlTextWriterEndElement(writer)); /* hiwater */
1599
1600 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "lowater"));
1601 TRY0(xmlTextWriterWriteFormatString(
1602 writer, "%" PRIu64 "",
1603 (uint64_t)atomic_load_relaxed(&ctx->lo_water)));
1604 TRY0(xmlTextWriterEndElement(writer)); /* lowater */
1605
1606 TRY0(xmlTextWriterEndElement(writer)); /* context */
1607
1608 error:
1609 MCTXUNLOCK(ctx);
1610
1611 return (xmlrc);
1612 }
1613
1614 int
1615 isc_mem_renderxml(void *writer0) {
1616 isc_mem_t *ctx;
1617 summarystat_t summary = { 0 };
1618 uint64_t lost;
1619 int xmlrc;
1620 xmlTextWriterPtr writer = (xmlTextWriterPtr)writer0;
1621
1622 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "contexts"));
1623
1624 LOCK(&contextslock);
1625 lost = totallost;
1626 for (ctx = ISC_LIST_HEAD(contexts); ctx != NULL;
1627 ctx = ISC_LIST_NEXT(ctx, link))
1628 {
1629 xmlrc = xml_renderctx(ctx, &summary, writer);
1630 if (xmlrc < 0) {
1631 UNLOCK(&contextslock);
1632 goto error;
1633 }
1634 }
1635 UNLOCK(&contextslock);
1636
1637 TRY0(xmlTextWriterEndElement(writer)); /* contexts */
1638
1639 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "summary"));
1640
1641 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "TotalUse"));
1642 TRY0(xmlTextWriterWriteFormatString(writer, "%" PRIu64 "",
1643 summary.total));
1644 TRY0(xmlTextWriterEndElement(writer)); /* TotalUse */
1645
1646 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "InUse"));
1647 TRY0(xmlTextWriterWriteFormatString(writer, "%" PRIu64 "",
1648 summary.inuse));
1649 TRY0(xmlTextWriterEndElement(writer)); /* InUse */
1650
1651 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "Malloced"));
1652 TRY0(xmlTextWriterWriteFormatString(writer, "%" PRIu64 "",
1653 summary.malloced));
1654 TRY0(xmlTextWriterEndElement(writer)); /* InUse */
1655
1656 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "ContextSize"));
1657 TRY0(xmlTextWriterWriteFormatString(writer, "%" PRIu64 "",
1658 summary.contextsize));
1659 TRY0(xmlTextWriterEndElement(writer)); /* ContextSize */
1660
1661 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "Lost"));
1662 TRY0(xmlTextWriterWriteFormatString(writer, "%" PRIu64 "", lost));
1663 TRY0(xmlTextWriterEndElement(writer)); /* Lost */
1664
1665 TRY0(xmlTextWriterEndElement(writer)); /* summary */
1666 error:
1667 return (xmlrc);
1668 }
1669
1670 #endif /* HAVE_LIBXML2 */
1671
1672 #ifdef HAVE_JSON_C
1673 #define CHECKMEM(m) RUNTIME_CHECK(m != NULL)
1674
1675 static isc_result_t
1676 json_renderctx(isc_mem_t *ctx, summarystat_t *summary, json_object *array) {
1677 REQUIRE(VALID_CONTEXT(ctx));
1678 REQUIRE(summary != NULL);
1679 REQUIRE(array != NULL);
1680
1681 json_object *ctxobj, *obj;
1682 char buf[1024];
1683
1684 MCTXLOCK(ctx);
1685
1686 summary->contextsize += sizeof(*ctx);
1687 summary->total += isc_mem_total(ctx);
1688 summary->inuse += isc_mem_inuse(ctx);
1689 summary->malloced += isc_mem_malloced(ctx);
1690 #if ISC_MEM_TRACKLINES
1691 if (ctx->debuglist != NULL) {
1692 summary->contextsize += DEBUG_TABLE_COUNT *
1693 sizeof(debuglist_t) +
1694 ctx->debuglistcnt * sizeof(debuglink_t);
1695 }
1696 #endif /* if ISC_MEM_TRACKLINES */
1697
1698 ctxobj = json_object_new_object();
1699 CHECKMEM(ctxobj);
1700
1701 snprintf(buf, sizeof(buf), "%p", ctx);
1702 obj = json_object_new_string(buf);
1703 CHECKMEM(obj);
1704 json_object_object_add(ctxobj, "id", obj);
1705
1706 if (ctx->name[0] != 0) {
1707 obj = json_object_new_string(ctx->name);
1708 CHECKMEM(obj);
1709 json_object_object_add(ctxobj, "name", obj);
1710 }
1711
1712 obj = json_object_new_int64(isc_refcount_current(&ctx->references));
1713 CHECKMEM(obj);
1714 json_object_object_add(ctxobj, "references", obj);
1715
1716 obj = json_object_new_int64(isc_mem_total(ctx));
1717 CHECKMEM(obj);
1718 json_object_object_add(ctxobj, "total", obj);
1719
1720 obj = json_object_new_int64(isc_mem_inuse(ctx));
1721 CHECKMEM(obj);
1722 json_object_object_add(ctxobj, "inuse", obj);
1723
1724 obj = json_object_new_int64(isc_mem_maxinuse(ctx));
1725 CHECKMEM(obj);
1726 json_object_object_add(ctxobj, "maxinuse", obj);
1727
1728 obj = json_object_new_int64(isc_mem_malloced(ctx));
1729 CHECKMEM(obj);
1730 json_object_object_add(ctxobj, "malloced", obj);
1731
1732 obj = json_object_new_int64(isc_mem_maxmalloced(ctx));
1733 CHECKMEM(obj);
1734 json_object_object_add(ctxobj, "maxmalloced", obj);
1735
1736 obj = json_object_new_int64(ctx->poolcnt);
1737 CHECKMEM(obj);
1738 json_object_object_add(ctxobj, "pools", obj);
1739
1740 summary->contextsize += ctx->poolcnt * sizeof(isc_mempool_t);
1741
1742 obj = json_object_new_int64(atomic_load_relaxed(&ctx->hi_water));
1743 CHECKMEM(obj);
1744 json_object_object_add(ctxobj, "hiwater", obj);
1745
1746 obj = json_object_new_int64(atomic_load_relaxed(&ctx->lo_water));
1747 CHECKMEM(obj);
1748 json_object_object_add(ctxobj, "lowater", obj);
1749
1750 MCTXUNLOCK(ctx);
1751 json_object_array_add(array, ctxobj);
1752 return (ISC_R_SUCCESS);
1753 }
1754
1755 isc_result_t
1756 isc_mem_renderjson(void *memobj0) {
1757 isc_result_t result = ISC_R_SUCCESS;
1758 isc_mem_t *ctx;
1759 summarystat_t summary = { 0 };
1760 uint64_t lost;
1761 json_object *ctxarray, *obj;
1762 json_object *memobj = (json_object *)memobj0;
1763
1764 ctxarray = json_object_new_array();
1765 CHECKMEM(ctxarray);
1766
1767 LOCK(&contextslock);
1768 lost = totallost;
1769 for (ctx = ISC_LIST_HEAD(contexts); ctx != NULL;
1770 ctx = ISC_LIST_NEXT(ctx, link))
1771 {
1772 result = json_renderctx(ctx, &summary, ctxarray);
1773 if (result != ISC_R_SUCCESS) {
1774 UNLOCK(&contextslock);
1775 goto error;
1776 }
1777 }
1778 UNLOCK(&contextslock);
1779
1780 obj = json_object_new_int64(summary.total);
1781 CHECKMEM(obj);
1782 json_object_object_add(memobj, "TotalUse", obj);
1783
1784 obj = json_object_new_int64(summary.inuse);
1785 CHECKMEM(obj);
1786 json_object_object_add(memobj, "InUse", obj);
1787
1788 obj = json_object_new_int64(summary.malloced);
1789 CHECKMEM(obj);
1790 json_object_object_add(memobj, "Malloced", obj);
1791
1792 obj = json_object_new_int64(summary.contextsize);
1793 CHECKMEM(obj);
1794 json_object_object_add(memobj, "ContextSize", obj);
1795
1796 obj = json_object_new_int64(lost);
1797 CHECKMEM(obj);
1798 json_object_object_add(memobj, "Lost", obj);
1799
1800 json_object_object_add(memobj, "contexts", ctxarray);
1801 return (ISC_R_SUCCESS);
1802
1803 error:
1804 if (ctxarray != NULL) {
1805 json_object_put(ctxarray);
1806 }
1807 return (result);
1808 }
1809 #endif /* HAVE_JSON_C */
1810
1811 void
1812 isc__mem_create(isc_mem_t **mctxp FLARG) {
1813 mem_create(mctxp, isc_mem_defaultflags, 0);
1814 #if ISC_MEM_TRACKLINES
1815 if ((isc_mem_debugging & ISC_MEM_DEBUGTRACE) != 0) {
1816 fprintf(stderr, "create mctx %p file %s line %u\n", *mctxp,
1817 file, line);
1818 }
1819 #endif /* ISC_MEM_TRACKLINES */
1820 }
1821
1822 void
1823 isc__mem_create_arena(isc_mem_t **mctxp FLARG) {
1824 unsigned int arena_no = ISC_MEM_ILLEGAL_ARENA;
1825
1826 RUNTIME_CHECK(mem_jemalloc_arena_create(&arena_no));
1827
1828 /*
1829 * We use MALLOCX_TCACHE_NONE to bypass the tcache and route
1830 * allocations directly to the arena. That is a recommendation
1831 * from jemalloc developers:
1832 *
1833 * https://github.com/jemalloc/jemalloc/issues/2483#issuecomment-1698173849
1834 */
1835 mem_create(mctxp, isc_mem_defaultflags,
1836 arena_no == ISC_MEM_ILLEGAL_ARENA
1837 ? 0
1838 : MALLOCX_ARENA(arena_no) | MALLOCX_TCACHE_NONE);
1839 (*mctxp)->jemalloc_arena = arena_no;
1840 #if ISC_MEM_TRACKLINES
1841 if ((isc_mem_debugging & ISC_MEM_DEBUGTRACE) != 0) {
1842 fprintf(stderr,
1843 "create mctx %p file %s line %u for jemalloc arena "
1844 "%u\n",
1845 *mctxp, file, line, arena_no);
1846 }
1847 #endif /* ISC_MEM_TRACKLINES */
1848 }
1849
1850 #if defined(JEMALLOC_API_SUPPORTED) && JEMALLOC_VERSION_MAJOR >= 4
1851 static bool
1852 jemalloc_set_ssize_value(const char *valname, ssize_t newval) {
1853 int ret;
1854
1855 ret = mallctl(valname, NULL, NULL, &newval, sizeof(newval));
1856 return (ret == 0);
1857 }
1858 #endif /* defined(JEMALLOC_API_SUPPORTED) && JEMALLOC_VERSION_MAJOR >= 4 */
1859
1860 static isc_result_t
1861 mem_set_arena_ssize_value(isc_mem_t *mctx, const char *arena_valname,
1862 const ssize_t newval) {
1863 REQUIRE(VALID_CONTEXT(mctx));
1864 #if defined(JEMALLOC_API_SUPPORTED) && JEMALLOC_VERSION_MAJOR >= 4
1865 bool ret;
1866 char buf[256] = { 0 };
1867
1868 if (mctx->jemalloc_arena == ISC_MEM_ILLEGAL_ARENA) {
1869 return (ISC_R_UNEXPECTED);
1870 }
1871
1872 (void)snprintf(buf, sizeof(buf), "arena.%u.%s", mctx->jemalloc_arena,
1873 arena_valname);
1874
1875 ret = jemalloc_set_ssize_value(buf, newval);
1876
1877 if (!ret) {
1878 return (ISC_R_FAILURE);
1879 }
1880
1881 return (ISC_R_SUCCESS);
1882 #else
1883 UNUSED(arena_valname);
1884 UNUSED(newval);
1885 return (ISC_R_NOTIMPLEMENTED);
1886 #endif /* defined(JEMALLOC_API_SUPPORTED) && JEMALLOC_VERSION_MAJOR >= 4 */
1887 }
1888
1889 isc_result_t
1890 isc_mem_arena_set_muzzy_decay_ms(isc_mem_t *mctx, const ssize_t decay_ms) {
1891 return (mem_set_arena_ssize_value(mctx, "muzzy_decay_ms", decay_ms));
1892 }
1893
1894 isc_result_t
1895 isc_mem_arena_set_dirty_decay_ms(isc_mem_t *mctx, const ssize_t decay_ms) {
1896 return (mem_set_arena_ssize_value(mctx, "dirty_decay_ms", decay_ms));
1897 }
1898
1899 void
1900 isc__mem_printactive(isc_mem_t *ctx, FILE *file) {
1901 #if ISC_MEM_TRACKLINES
1902 REQUIRE(VALID_CONTEXT(ctx));
1903 REQUIRE(file != NULL);
1904
1905 print_active(ctx, file);
1906 #else /* if ISC_MEM_TRACKLINES */
1907 UNUSED(ctx);
1908 UNUSED(file);
1909 #endif /* if ISC_MEM_TRACKLINES */
1910 }
1911
1912 void *
1913 isc__mem_alloc_noctx(size_t size) {
1914 return mallocx(size, 0);
1915 }
1916
1917 void
1918 isc__mem_free_noctx(void *ptr, size_t size) {
1919 ADJUST_ZERO_ALLOCATION_SIZE(size);
1920 sdallocx(ptr, size, 0);
1921 }
1922