Home | History | Annotate | Line # | Download | only in isc
stats.c revision 1.2.2.2
      1 /*	$NetBSD: stats.c,v 1.2.2.2 2018/09/06 06:55:05 pgoyette Exp $	*/
      2 
      3 /*
      4  * Copyright (C) Internet Systems Consortium, Inc. ("ISC")
      5  *
      6  * This Source Code Form is subject to the terms of the Mozilla Public
      7  * License, v. 2.0. If a copy of the MPL was not distributed with this
      8  * file, You can obtain one at http://mozilla.org/MPL/2.0/.
      9  *
     10  * See the COPYRIGHT file distributed with this work for additional
     11  * information regarding copyright ownership.
     12  */
     13 
     14 
     15 /*! \file */
     16 
     17 #include <config.h>
     18 
     19 #include <string.h>
     20 
     21 #include <isc/atomic.h>
     22 #include <isc/buffer.h>
     23 #include <isc/magic.h>
     24 #include <isc/mem.h>
     25 #include <isc/platform.h>
     26 #include <isc/print.h>
     27 #include <isc/rwlock.h>
     28 #include <isc/stats.h>
     29 #include <isc/util.h>
     30 
     31 #if defined(ISC_PLATFORM_HAVESTDATOMIC)
     32 #include <stdatomic.h>
     33 #endif
     34 
     35 #define ISC_STATS_MAGIC			ISC_MAGIC('S', 't', 'a', 't')
     36 #define ISC_STATS_VALID(x)		ISC_MAGIC_VALID(x, ISC_STATS_MAGIC)
     37 
     38 /*%
     39  * Local macro confirming prescence of 64-bit
     40  * increment and store operations, just to make
     41  * the later macros simpler
     42  */
     43 #if (defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_LONG_LOCK_FREE)) || \
     44 	(defined(ISC_PLATFORM_HAVEXADDQ) && defined(ISC_PLATFORM_HAVEATOMICSTOREQ))
     45 #define ISC_STATS_HAVEATOMICQ 1
     46 #if (defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_LONG_LOCK_FREE))
     47 #define ISC_STATS_HAVESTDATOMICQ 1
     48 #endif
     49 #else
     50 #define ISC_STATS_HAVEATOMICQ 0
     51 #endif
     52 
     53 /*%
     54  * Only lock the counters if 64-bit atomic operations are
     55  * not available but cheap atomic lock operations are.
     56  * On a modern 64-bit system this should never be the case.
     57  *
     58  * Normal locks are too expensive to be used whenever a counter
     59  * is updated.
     60  */
     61 #if !ISC_STATS_HAVEATOMICQ && defined(ISC_RWLOCK_HAVEATOMIC)
     62 #define ISC_STATS_LOCKCOUNTERS 1
     63 #else
     64 #define ISC_STATS_LOCKCOUNTERS 0
     65 #endif
     66 
     67 /*%
     68  * If 64-bit atomic operations are not available but
     69  * 32-bit operations are then split the counter into two,
     70  * using the atomic operations to try to ensure that any carry
     71  * from the low word is correctly carried into the high word.
     72  *
     73  * Otherwise, just rely on standard 64-bit data types
     74  * and operations
     75  */
     76 #if !ISC_STATS_HAVEATOMICQ && ((defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_INT_LOCK_FREE)) || defined(ISC_PLATFORM_HAVEXADD))
     77 #define ISC_STATS_USEMULTIFIELDS 1
     78 #if (defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_INT_LOCK_FREE))
     79 #define ISC_STATS_HAVESTDATOMIC 1
     80 #endif
     81 #else
     82 #define ISC_STATS_USEMULTIFIELDS 0
     83 #endif
     84 
     85 #if ISC_STATS_USEMULTIFIELDS
     86 typedef struct {
     87 #if defined(ISC_STATS_HAVESTDATOMIC)
     88 	atomic_int_fast32_t hi;
     89 	atomic_int_fast32_t lo;
     90 #else
     91 	isc_uint32_t hi;
     92 	isc_uint32_t lo;
     93 #endif
     94 } isc_stat_t;
     95 #else
     96 #if defined(ISC_STATS_HAVESTDATOMICQ)
     97 typedef atomic_int_fast64_t isc_stat_t;
     98 #else
     99 typedef isc_uint64_t isc_stat_t;
    100 #endif
    101 #endif
    102 
    103 struct isc_stats {
    104 	/*% Unlocked */
    105 	unsigned int	magic;
    106 	isc_mem_t	*mctx;
    107 	int		ncounters;
    108 
    109 	isc_mutex_t	lock;
    110 	unsigned int	references; /* locked by lock */
    111 
    112 	/*%
    113 	 * Locked by counterlock or unlocked if efficient rwlock is not
    114 	 * available.
    115 	 */
    116 #if ISC_STATS_LOCKCOUNTERS
    117 	isc_rwlock_t	counterlock;
    118 #endif
    119 	isc_stat_t	*counters;
    120 
    121 	/*%
    122 	 * We don't want to lock the counters while we are dumping, so we first
    123 	 * copy the current counter values into a local array.  This buffer
    124 	 * will be used as the copy destination.  It's allocated on creation
    125 	 * of the stats structure so that the dump operation won't fail due
    126 	 * to memory allocation failure.
    127 	 * XXX: this approach is weird for non-threaded build because the
    128 	 * additional memory and the copy overhead could be avoided.  We prefer
    129 	 * simplicity here, however, under the assumption that this function
    130 	 * should be only rarely called.
    131 	 */
    132 	isc_uint64_t	*copiedcounters;
    133 };
    134 
    135 static isc_result_t
    136 create_stats(isc_mem_t *mctx, int ncounters, isc_stats_t **statsp) {
    137 	isc_stats_t *stats;
    138 	isc_result_t result = ISC_R_SUCCESS;
    139 
    140 	REQUIRE(statsp != NULL && *statsp == NULL);
    141 
    142 	stats = isc_mem_get(mctx, sizeof(*stats));
    143 	if (stats == NULL)
    144 		return (ISC_R_NOMEMORY);
    145 
    146 	result = isc_mutex_init(&stats->lock);
    147 	if (result != ISC_R_SUCCESS)
    148 		goto clean_stats;
    149 
    150 	stats->counters = isc_mem_get(mctx, sizeof(isc_stat_t) * ncounters);
    151 	if (stats->counters == NULL) {
    152 		result = ISC_R_NOMEMORY;
    153 		goto clean_mutex;
    154 	}
    155 	stats->copiedcounters = isc_mem_get(mctx,
    156 					    sizeof(isc_uint64_t) * ncounters);
    157 	if (stats->copiedcounters == NULL) {
    158 		result = ISC_R_NOMEMORY;
    159 		goto clean_counters;
    160 	}
    161 
    162 #if ISC_STATS_LOCKCOUNTERS
    163 	result = isc_rwlock_init(&stats->counterlock, 0, 0);
    164 	if (result != ISC_R_SUCCESS)
    165 		goto clean_copiedcounters;
    166 #endif
    167 
    168 	stats->references = 1;
    169 	memset(stats->counters, 0, sizeof(isc_stat_t) * ncounters);
    170 	stats->mctx = NULL;
    171 	isc_mem_attach(mctx, &stats->mctx);
    172 	stats->ncounters = ncounters;
    173 	stats->magic = ISC_STATS_MAGIC;
    174 
    175 	*statsp = stats;
    176 
    177 	return (result);
    178 
    179 clean_counters:
    180 	isc_mem_put(mctx, stats->counters, sizeof(isc_stat_t) * ncounters);
    181 
    182 #if ISC_STATS_LOCKCOUNTERS
    183 clean_copiedcounters:
    184 	isc_mem_put(mctx, stats->copiedcounters,
    185 		    sizeof(isc_stat_t) * ncounters);
    186 #endif
    187 
    188 clean_mutex:
    189 	DESTROYLOCK(&stats->lock);
    190 
    191 clean_stats:
    192 	isc_mem_put(mctx, stats, sizeof(*stats));
    193 
    194 	return (result);
    195 }
    196 
    197 void
    198 isc_stats_attach(isc_stats_t *stats, isc_stats_t **statsp) {
    199 	REQUIRE(ISC_STATS_VALID(stats));
    200 	REQUIRE(statsp != NULL && *statsp == NULL);
    201 
    202 	LOCK(&stats->lock);
    203 	stats->references++;
    204 	UNLOCK(&stats->lock);
    205 
    206 	*statsp = stats;
    207 }
    208 
    209 void
    210 isc_stats_detach(isc_stats_t **statsp) {
    211 	isc_stats_t *stats;
    212 
    213 	REQUIRE(statsp != NULL && ISC_STATS_VALID(*statsp));
    214 
    215 	stats = *statsp;
    216 	*statsp = NULL;
    217 
    218 	LOCK(&stats->lock);
    219 	stats->references--;
    220 
    221 	if (stats->references == 0) {
    222 		isc_mem_put(stats->mctx, stats->copiedcounters,
    223 			    sizeof(isc_stat_t) * stats->ncounters);
    224 		isc_mem_put(stats->mctx, stats->counters,
    225 			    sizeof(isc_stat_t) * stats->ncounters);
    226 		UNLOCK(&stats->lock);
    227 		DESTROYLOCK(&stats->lock);
    228 #if ISC_STATS_LOCKCOUNTERS
    229 		isc_rwlock_destroy(&stats->counterlock);
    230 #endif
    231 		isc_mem_putanddetach(&stats->mctx, stats, sizeof(*stats));
    232 		return;
    233 	}
    234 
    235 	UNLOCK(&stats->lock);
    236 }
    237 
    238 int
    239 isc_stats_ncounters(isc_stats_t *stats) {
    240 	REQUIRE(ISC_STATS_VALID(stats));
    241 
    242 	return (stats->ncounters);
    243 }
    244 
    245 static inline void
    246 incrementcounter(isc_stats_t *stats, int counter) {
    247 	isc_int32_t prev;
    248 
    249 #if ISC_STATS_LOCKCOUNTERS
    250 	/*
    251 	 * We use a "read" lock to prevent other threads from reading the
    252 	 * counter while we "writing" a counter field.  The write access itself
    253 	 * is protected by the atomic operation.
    254 	 */
    255 	isc_rwlock_lock(&stats->counterlock, isc_rwlocktype_read);
    256 #endif
    257 
    258 #if ISC_STATS_USEMULTIFIELDS
    259 #if defined(ISC_STATS_HAVESTDATOMIC)
    260 	prev = atomic_fetch_add_explicit(&stats->counters[counter].lo, 1,
    261 					 memory_order_relaxed);
    262 #else
    263 	prev = isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].lo, 1);
    264 #endif
    265 	/*
    266 	 * If the lower 32-bit field overflows, increment the higher field.
    267 	 * Note that it's *theoretically* possible that the lower field
    268 	 * overlaps again before the higher field is incremented.  It doesn't
    269 	 * matter, however, because we don't read the value until
    270 	 * isc_stats_copy() is called where the whole process is protected
    271 	 * by the write (exclusive) lock.
    272 	 */
    273 	if (prev == (isc_int32_t)0xffffffff) {
    274 #if defined(ISC_STATS_HAVESTDATOMIC)
    275 		atomic_fetch_add_explicit(&stats->counters[counter].hi, 1,
    276 					  memory_order_relaxed);
    277 #else
    278 		isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].hi, 1);
    279 #endif
    280 	}
    281 #elif ISC_STATS_HAVEATOMICQ
    282 	UNUSED(prev);
    283 #if defined(ISC_STATS_HAVESTDATOMICQ)
    284 	atomic_fetch_add_explicit(&stats->counters[counter], 1,
    285 				  memory_order_relaxed);
    286 #else
    287 	isc_atomic_xaddq((isc_int64_t *)&stats->counters[counter], 1);
    288 #endif
    289 #else
    290 	UNUSED(prev);
    291 	stats->counters[counter]++;
    292 #endif
    293 
    294 #if ISC_STATS_LOCKCOUNTERS
    295 	isc_rwlock_unlock(&stats->counterlock, isc_rwlocktype_read);
    296 #endif
    297 }
    298 
    299 static inline void
    300 decrementcounter(isc_stats_t *stats, int counter) {
    301 	isc_int32_t prev;
    302 
    303 #if ISC_STATS_LOCKCOUNTERS
    304 	isc_rwlock_lock(&stats->counterlock, isc_rwlocktype_read);
    305 #endif
    306 
    307 #if ISC_STATS_USEMULTIFIELDS
    308 #if defined(ISC_STATS_HAVESTDATOMIC)
    309 	prev = atomic_fetch_sub_explicit(&stats->counters[counter].lo, 1,
    310 					 memory_order_relaxed);
    311 #else
    312 	prev = isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].lo, -1);
    313 #endif
    314 	if (prev == 0) {
    315 #if defined(ISC_STATS_HAVESTDATOMIC)
    316 		atomic_fetch_sub_explicit(&stats->counters[counter].hi, 1,
    317 					  memory_order_relaxed);
    318 #else
    319 		isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].hi,
    320 				-1);
    321 #endif
    322 	}
    323 #elif ISC_STATS_HAVEATOMICQ
    324 	UNUSED(prev);
    325 #if defined(ISC_STATS_HAVESTDATOMICQ)
    326 	atomic_fetch_sub_explicit(&stats->counters[counter], 1,
    327 				  memory_order_relaxed);
    328 #else
    329 	isc_atomic_xaddq((isc_int64_t *)&stats->counters[counter], -1);
    330 #endif
    331 #else
    332 	UNUSED(prev);
    333 	stats->counters[counter]--;
    334 #endif
    335 
    336 #if ISC_STATS_LOCKCOUNTERS
    337 	isc_rwlock_unlock(&stats->counterlock, isc_rwlocktype_read);
    338 #endif
    339 }
    340 
    341 static void
    342 copy_counters(isc_stats_t *stats) {
    343 	int i;
    344 
    345 #if ISC_STATS_LOCKCOUNTERS
    346 	/*
    347 	 * We use a "write" lock before "reading" the statistics counters as
    348 	 * an exclusive lock.
    349 	 */
    350 	isc_rwlock_lock(&stats->counterlock, isc_rwlocktype_write);
    351 #endif
    352 
    353 	for (i = 0; i < stats->ncounters; i++) {
    354 #if ISC_STATS_USEMULTIFIELDS
    355 		stats->copiedcounters[i] =
    356 			(isc_uint64_t)(stats->counters[i].hi) << 32 |
    357 			stats->counters[i].lo;
    358 #elif ISC_STATS_HAVEATOMICQ
    359 #if defined(ISC_STATS_HAVESTDATOMICQ)
    360 		stats->copiedcounters[i] =
    361 			atomic_load_explicit(&stats->counters[i],
    362 					     memory_order_relaxed);
    363 #else
    364 		/* use xaddq(..., 0) as an atomic load */
    365 		stats->copiedcounters[i] =
    366 			(isc_uint64_t)isc_atomic_xaddq((isc_int64_t *)&stats->counters[i], 0);
    367 #endif
    368 #else
    369 		stats->copiedcounters[i] = stats->counters[i];
    370 #endif
    371 	}
    372 
    373 #if ISC_STATS_LOCKCOUNTERS
    374 	isc_rwlock_unlock(&stats->counterlock, isc_rwlocktype_write);
    375 #endif
    376 }
    377 
    378 isc_result_t
    379 isc_stats_create(isc_mem_t *mctx, isc_stats_t **statsp, int ncounters) {
    380 	REQUIRE(statsp != NULL && *statsp == NULL);
    381 
    382 	return (create_stats(mctx, ncounters, statsp));
    383 }
    384 
    385 void
    386 isc_stats_increment(isc_stats_t *stats, isc_statscounter_t counter) {
    387 	REQUIRE(ISC_STATS_VALID(stats));
    388 	REQUIRE(counter < stats->ncounters);
    389 
    390 	incrementcounter(stats, (int)counter);
    391 }
    392 
    393 void
    394 isc_stats_decrement(isc_stats_t *stats, isc_statscounter_t counter) {
    395 	REQUIRE(ISC_STATS_VALID(stats));
    396 	REQUIRE(counter < stats->ncounters);
    397 
    398 	decrementcounter(stats, (int)counter);
    399 }
    400 
    401 void
    402 isc_stats_dump(isc_stats_t *stats, isc_stats_dumper_t dump_fn,
    403 	       void *arg, unsigned int options)
    404 {
    405 	int i;
    406 
    407 	REQUIRE(ISC_STATS_VALID(stats));
    408 
    409 	copy_counters(stats);
    410 
    411 	for (i = 0; i < stats->ncounters; i++) {
    412 		if ((options & ISC_STATSDUMP_VERBOSE) == 0 &&
    413 		    stats->copiedcounters[i] == 0)
    414 				continue;
    415 		dump_fn((isc_statscounter_t)i, stats->copiedcounters[i], arg);
    416 	}
    417 }
    418 
    419 void
    420 isc_stats_set(isc_stats_t *stats, isc_uint64_t val,
    421 	      isc_statscounter_t counter)
    422 {
    423 	REQUIRE(ISC_STATS_VALID(stats));
    424 	REQUIRE(counter < stats->ncounters);
    425 
    426 #if ISC_STATS_LOCKCOUNTERS
    427 	/*
    428 	 * We use a "write" lock before "reading" the statistics counters as
    429 	 * an exclusive lock.
    430 	 */
    431 	isc_rwlock_lock(&stats->counterlock, isc_rwlocktype_write);
    432 #endif
    433 
    434 #if ISC_STATS_USEMULTIFIELDS
    435 	stats->counters[counter].hi = (isc_uint32_t)((val >> 32) & 0xffffffff);
    436 	stats->counters[counter].lo = (isc_uint32_t)(val & 0xffffffff);
    437 #elif ISC_STATS_HAVEATOMICQ
    438 #if defined(ISC_STATS_HAVESTDATOMICQ)
    439 	atomic_store_explicit(&stats->counters[counter], val,
    440 			      memory_order_relaxed);
    441 #else
    442 	isc_atomic_storeq((isc_int64_t *)&stats->counters[counter], val);
    443 #endif
    444 #else
    445 	stats->counters[counter] = val;
    446 #endif
    447 
    448 #if ISC_STATS_LOCKCOUNTERS
    449 	isc_rwlock_unlock(&stats->counterlock, isc_rwlocktype_write);
    450 #endif
    451 }
    452