1 1.8 riastrad /* $NetBSD: subr_localcount.c,v 1.8 2024/08/16 21:54:17 riastradh Exp $ */ 2 1.2 pgoyette 3 1.2 pgoyette /*- 4 1.2 pgoyette * Copyright (c) 2016 The NetBSD Foundation, Inc. 5 1.2 pgoyette * All rights reserved. 6 1.2 pgoyette * 7 1.2 pgoyette * This code is derived from software contributed to The NetBSD Foundation 8 1.2 pgoyette * by Taylor R. Campbell. 9 1.2 pgoyette * 10 1.2 pgoyette * Redistribution and use in source and binary forms, with or without 11 1.2 pgoyette * modification, are permitted provided that the following conditions 12 1.2 pgoyette * are met: 13 1.2 pgoyette * 1. Redistributions of source code must retain the above copyright 14 1.2 pgoyette * notice, this list of conditions and the following disclaimer. 15 1.2 pgoyette * 2. Redistributions in binary form must reproduce the above copyright 16 1.2 pgoyette * notice, this list of conditions and the following disclaimer in the 17 1.2 pgoyette * documentation and/or other materials provided with the distribution. 18 1.2 pgoyette * 19 1.2 pgoyette * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 1.2 pgoyette * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 1.2 pgoyette * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 1.2 pgoyette * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 1.2 pgoyette * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 1.2 pgoyette * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 1.2 pgoyette * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 1.2 pgoyette * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 1.2 pgoyette * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 1.2 pgoyette * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 1.2 pgoyette * POSSIBILITY OF SUCH DAMAGE. 30 1.2 pgoyette */ 31 1.2 pgoyette 32 1.2 pgoyette /* 33 1.2 pgoyette * CPU-local reference counts 34 1.2 pgoyette * 35 1.2 pgoyette * localcount(9) is a reference-counting scheme that involves no 36 1.2 pgoyette * interprocessor synchronization most of the time, at the cost of 37 1.2 pgoyette * eight bytes of memory per CPU per object and at the cost of 38 1.2 pgoyette * expensive interprocessor synchronization to drain references. 39 1.2 pgoyette * 40 1.2 pgoyette * localcount(9) references may be held across sleeps, may be 41 1.2 pgoyette * transferred from CPU to CPU or thread to thread: they behave 42 1.2 pgoyette * semantically like typical reference counts, with different 43 1.2 pgoyette * pragmatic performance characteristics. 44 1.2 pgoyette */ 45 1.2 pgoyette 46 1.2 pgoyette #include <sys/cdefs.h> 47 1.8 riastrad __KERNEL_RCSID(0, "$NetBSD: subr_localcount.c,v 1.8 2024/08/16 21:54:17 riastradh Exp $"); 48 1.2 pgoyette 49 1.2 pgoyette #include <sys/param.h> 50 1.2 pgoyette #include <sys/localcount.h> 51 1.2 pgoyette #include <sys/types.h> 52 1.2 pgoyette #include <sys/condvar.h> 53 1.2 pgoyette #include <sys/errno.h> 54 1.2 pgoyette #include <sys/mutex.h> 55 1.2 pgoyette #include <sys/percpu.h> 56 1.2 pgoyette #include <sys/xcall.h> 57 1.7 ozaki #if defined(DEBUG) && defined(LOCKDEBUG) 58 1.7 ozaki #include <sys/atomic.h> 59 1.7 ozaki #endif 60 1.2 pgoyette 61 1.5 riastrad static void localcount_xc(void *, void *); 62 1.5 riastrad 63 1.2 pgoyette /* 64 1.2 pgoyette * localcount_init(lc) 65 1.2 pgoyette * 66 1.2 pgoyette * Initialize a localcount object. Returns 0 on success, error 67 1.2 pgoyette * code on failure. May fail to allocate memory for percpu(9). 68 1.2 pgoyette * 69 1.2 pgoyette * The caller must call localcount_drain and then localcount_fini 70 1.2 pgoyette * when done with lc. 71 1.2 pgoyette */ 72 1.4 chs void 73 1.2 pgoyette localcount_init(struct localcount *lc) 74 1.2 pgoyette { 75 1.2 pgoyette 76 1.2 pgoyette lc->lc_totalp = NULL; 77 1.2 pgoyette lc->lc_percpu = percpu_alloc(sizeof(int64_t)); 78 1.2 pgoyette } 79 1.2 pgoyette 80 1.2 pgoyette /* 81 1.2 pgoyette * localcount_drain(lc, cv, interlock) 82 1.2 pgoyette * 83 1.2 pgoyette * Wait for all acquired references to lc to drain. Caller must 84 1.2 pgoyette * hold interlock; localcount_drain releases it during cross-calls 85 1.2 pgoyette * and waits on cv. The cv and interlock passed here must be the 86 1.2 pgoyette * same as are passed to localcount_release for this lc. 87 1.2 pgoyette * 88 1.2 pgoyette * Caller must guarantee that no new references can be acquired 89 1.2 pgoyette * with localcount_acquire before calling localcount_drain. For 90 1.2 pgoyette * example, any object that may be found in a list and acquired 91 1.2 pgoyette * must be removed from the list before localcount_drain. 92 1.2 pgoyette * 93 1.2 pgoyette * The localcount object lc may be used only with localcount_fini 94 1.2 pgoyette * after this, unless reinitialized after localcount_fini with 95 1.2 pgoyette * localcount_init. 96 1.2 pgoyette */ 97 1.2 pgoyette void 98 1.2 pgoyette localcount_drain(struct localcount *lc, kcondvar_t *cv, kmutex_t *interlock) 99 1.2 pgoyette { 100 1.2 pgoyette int64_t total = 0; 101 1.2 pgoyette 102 1.2 pgoyette KASSERT(mutex_owned(interlock)); 103 1.2 pgoyette KASSERT(lc->lc_totalp == NULL); 104 1.2 pgoyette 105 1.2 pgoyette /* Mark it draining. */ 106 1.2 pgoyette lc->lc_totalp = &total; 107 1.2 pgoyette 108 1.2 pgoyette /* 109 1.2 pgoyette * Count up all references on all CPUs. 110 1.2 pgoyette * 111 1.2 pgoyette * This serves as a global memory barrier: after xc_wait, all 112 1.2 pgoyette * CPUs will have witnessed the nonnull value of lc->lc_totalp, 113 1.2 pgoyette * so that it is safe to wait on the cv for them. 114 1.2 pgoyette */ 115 1.2 pgoyette mutex_exit(interlock); 116 1.2 pgoyette xc_wait(xc_broadcast(0, &localcount_xc, lc, interlock)); 117 1.2 pgoyette mutex_enter(interlock); 118 1.2 pgoyette 119 1.2 pgoyette /* Wait for remaining references to drain. */ 120 1.2 pgoyette while (total != 0) { 121 1.2 pgoyette /* 122 1.2 pgoyette * At this point, now that we have added up all 123 1.2 pgoyette * references on all CPUs, the total had better be 124 1.2 pgoyette * nonnegative. 125 1.2 pgoyette */ 126 1.2 pgoyette KASSERTMSG((0 < total), 127 1.2 pgoyette "negatively referenced localcount: %p, %"PRId64, 128 1.2 pgoyette lc, total); 129 1.2 pgoyette cv_wait(cv, interlock); 130 1.2 pgoyette } 131 1.2 pgoyette 132 1.2 pgoyette /* Paranoia: Cause any further use of lc->lc_totalp to crash. */ 133 1.2 pgoyette lc->lc_totalp = (void *)(uintptr_t)1; 134 1.2 pgoyette } 135 1.2 pgoyette 136 1.2 pgoyette /* 137 1.2 pgoyette * localcount_fini(lc) 138 1.2 pgoyette * 139 1.2 pgoyette * Finalize a localcount object, releasing any memory allocated 140 1.3 pgoyette * for it. The localcount object must already have been drained. 141 1.2 pgoyette */ 142 1.2 pgoyette void 143 1.2 pgoyette localcount_fini(struct localcount *lc) 144 1.2 pgoyette { 145 1.2 pgoyette 146 1.2 pgoyette KASSERT(lc->lc_totalp == (void *)(uintptr_t)1); 147 1.2 pgoyette percpu_free(lc->lc_percpu, sizeof(uint64_t)); 148 1.2 pgoyette } 149 1.2 pgoyette 150 1.2 pgoyette /* 151 1.2 pgoyette * localcount_xc(cookie0, cookie1) 152 1.2 pgoyette * 153 1.2 pgoyette * Accumulate and transfer the per-CPU reference counts to a 154 1.2 pgoyette * global total, resetting the per-CPU counter to zero. Once 155 1.2 pgoyette * localcount_drain() has started, we only maintain the total 156 1.2 pgoyette * count in localcount_release(). 157 1.2 pgoyette */ 158 1.2 pgoyette static void 159 1.2 pgoyette localcount_xc(void *cookie0, void *cookie1) 160 1.2 pgoyette { 161 1.2 pgoyette struct localcount *lc = cookie0; 162 1.2 pgoyette kmutex_t *interlock = cookie1; 163 1.2 pgoyette int64_t *localp; 164 1.8 riastrad int s; 165 1.2 pgoyette 166 1.2 pgoyette mutex_enter(interlock); 167 1.2 pgoyette localp = percpu_getref(lc->lc_percpu); 168 1.8 riastrad s = splhigh(); 169 1.2 pgoyette *lc->lc_totalp += *localp; 170 1.2 pgoyette *localp -= *localp; /* ie, *localp = 0; */ 171 1.8 riastrad splx(s); 172 1.2 pgoyette percpu_putref(lc->lc_percpu); 173 1.2 pgoyette mutex_exit(interlock); 174 1.2 pgoyette } 175 1.2 pgoyette 176 1.6 riastrad /* 177 1.6 riastrad * localcount_adjust(lc, delta) 178 1.6 riastrad * 179 1.6 riastrad * Add delta -- positive or negative -- to the local CPU's count 180 1.6 riastrad * for lc. 181 1.6 riastrad */ 182 1.2 pgoyette static void 183 1.2 pgoyette localcount_adjust(struct localcount *lc, int delta) 184 1.2 pgoyette { 185 1.2 pgoyette int64_t *localp; 186 1.8 riastrad int s; 187 1.2 pgoyette 188 1.2 pgoyette localp = percpu_getref(lc->lc_percpu); 189 1.8 riastrad s = splhigh(); 190 1.2 pgoyette *localp += delta; 191 1.8 riastrad splx(s); 192 1.2 pgoyette percpu_putref(lc->lc_percpu); 193 1.2 pgoyette } 194 1.2 pgoyette 195 1.2 pgoyette /* 196 1.2 pgoyette * localcount_acquire(lc) 197 1.2 pgoyette * 198 1.2 pgoyette * Acquire a reference to lc. 199 1.2 pgoyette * 200 1.2 pgoyette * The reference may be held across sleeps and may be migrated 201 1.2 pgoyette * from CPU to CPU, or even thread to thread -- it is only 202 1.2 pgoyette * counted, not associated with a particular concrete owner. 203 1.2 pgoyette * 204 1.2 pgoyette * Involves no interprocessor synchronization. May be used in any 205 1.2 pgoyette * context: while a lock is held, within a pserialize(9) read 206 1.2 pgoyette * section, in hard interrupt context (provided other users block 207 1.2 pgoyette * hard interrupts), in soft interrupt context, in thread context, 208 1.2 pgoyette * &c. 209 1.2 pgoyette * 210 1.2 pgoyette * Caller must guarantee that there is no concurrent 211 1.2 pgoyette * localcount_drain. For example, any object that may be found in 212 1.2 pgoyette * a list and acquired must be removed from the list before 213 1.2 pgoyette * localcount_drain. 214 1.2 pgoyette */ 215 1.2 pgoyette void 216 1.2 pgoyette localcount_acquire(struct localcount *lc) 217 1.2 pgoyette { 218 1.2 pgoyette 219 1.2 pgoyette KASSERT(lc->lc_totalp == NULL); 220 1.2 pgoyette localcount_adjust(lc, +1); 221 1.7 ozaki #if defined(DEBUG) && defined(LOCKDEBUG) 222 1.7 ozaki if (atomic_inc_32_nv(&lc->lc_refcnt) == 0) 223 1.7 ozaki panic("counter overflow"); 224 1.7 ozaki #endif 225 1.2 pgoyette } 226 1.2 pgoyette 227 1.2 pgoyette /* 228 1.2 pgoyette * localcount_release(lc, cv, interlock) 229 1.2 pgoyette * 230 1.2 pgoyette * Release a reference to lc. If there is a concurrent 231 1.2 pgoyette * localcount_drain and this may be the last reference, notify 232 1.2 pgoyette * localcount_drain by acquiring interlock, waking cv, and 233 1.2 pgoyette * releasing interlock. The cv and interlock passed here must be 234 1.2 pgoyette * the same as are passed to localcount_drain for this lc. 235 1.2 pgoyette * 236 1.2 pgoyette * Involves no interprocessor synchronization unless there is a 237 1.2 pgoyette * concurrent localcount_drain in progress. 238 1.2 pgoyette */ 239 1.2 pgoyette void 240 1.2 pgoyette localcount_release(struct localcount *lc, kcondvar_t *cv, kmutex_t *interlock) 241 1.2 pgoyette { 242 1.2 pgoyette 243 1.2 pgoyette /* 244 1.2 pgoyette * Block xcall so that if someone begins draining after we see 245 1.2 pgoyette * lc->lc_totalp as null, then they won't start cv_wait until 246 1.2 pgoyette * after they have counted this CPU's contributions. 247 1.2 pgoyette * 248 1.2 pgoyette * Otherwise, localcount_drain may notice an extant reference 249 1.2 pgoyette * from this CPU and cv_wait for it, but having seen 250 1.2 pgoyette * lc->lc_totalp as null, this CPU will not wake 251 1.2 pgoyette * localcount_drain. 252 1.2 pgoyette */ 253 1.2 pgoyette kpreempt_disable(); 254 1.2 pgoyette 255 1.2 pgoyette KDASSERT(mutex_ownable(interlock)); 256 1.2 pgoyette if (__predict_false(lc->lc_totalp != NULL)) { 257 1.2 pgoyette /* 258 1.2 pgoyette * Slow path -- wake localcount_drain in case this is 259 1.2 pgoyette * the last reference. 260 1.2 pgoyette */ 261 1.2 pgoyette mutex_enter(interlock); 262 1.2 pgoyette if (--*lc->lc_totalp == 0) 263 1.2 pgoyette cv_broadcast(cv); 264 1.2 pgoyette mutex_exit(interlock); 265 1.2 pgoyette goto out; 266 1.2 pgoyette } 267 1.2 pgoyette 268 1.2 pgoyette localcount_adjust(lc, -1); 269 1.7 ozaki #if defined(DEBUG) && defined(LOCKDEBUG) 270 1.7 ozaki if (atomic_dec_32_nv(&lc->lc_refcnt) == UINT_MAX) 271 1.7 ozaki panic("counter underflow"); 272 1.7 ozaki #endif 273 1.2 pgoyette out: kpreempt_enable(); 274 1.2 pgoyette } 275 1.7 ozaki 276 1.7 ozaki /* 277 1.7 ozaki * localcount_debug_refcnt(lc) 278 1.7 ozaki * 279 1.7 ozaki * Return a total reference count of lc. It returns a correct value 280 1.7 ozaki * only if DEBUG and LOCKDEBUG enabled. Otherwise always return 0. 281 1.7 ozaki */ 282 1.7 ozaki uint32_t 283 1.7 ozaki localcount_debug_refcnt(const struct localcount *lc) 284 1.7 ozaki { 285 1.7 ozaki 286 1.7 ozaki #if defined(DEBUG) && defined(LOCKDEBUG) 287 1.7 ozaki return lc->lc_refcnt; 288 1.7 ozaki #else 289 1.7 ozaki return 0; 290 1.7 ozaki #endif 291 1.7 ozaki } 292