Home | History | Annotate | Line # | Download | only in kern
      1 /*	$NetBSD: subr_psref.c,v 1.18 2022/02/12 16:31:06 macallan Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2016 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Passive references
     34  *
     35  *	Passive references are references to objects that guarantee the
     36  *	object will not be destroyed until the reference is released.
     37  *
     38  *	Passive references require no interprocessor synchronization to
     39  *	acquire or release.  However, destroying the target of passive
     40  *	references requires expensive interprocessor synchronization --
     41  *	xcalls to determine on which CPUs the object is still in use.
     42  *
     43  *	Passive references may be held only on a single CPU and by a
     44  *	single LWP.  They require the caller to allocate a little stack
     45  *	space, a struct psref object.  Sleeping while a passive
     46  *	reference is held is allowed, provided that the owner's LWP is
     47  *	bound to a CPU -- e.g., the owner is a softint or a bound
     48  *	kthread.  However, sleeping should be kept to a short duration,
     49  *	e.g. sleeping on an adaptive lock.
     50  *
     51  *	Passive references serve as an intermediate stage between
     52  *	reference counting and passive serialization (pserialize(9)):
     53  *
     54  *	- If you need references to transfer from CPU to CPU or LWP to
     55  *	  LWP, or if you need long-term references, you must use
     56  *	  reference counting, e.g. with atomic operations or locks,
     57  *	  which incurs interprocessor synchronization for every use --
     58  *	  cheaper than an xcall, but not scalable.
     59  *
     60  *	- If all users *guarantee* that they will not sleep, then it is
     61  *	  not necessary to use passive references: you may as well just
     62  *	  use the even cheaper pserialize(9), because you have
     63  *	  satisfied the requirements of a pserialize read section.
     64  */
     65 
     66 #include <sys/cdefs.h>
     67 __KERNEL_RCSID(0, "$NetBSD: subr_psref.c,v 1.18 2022/02/12 16:31:06 macallan Exp $");
     68 
     69 #include <sys/param.h>
     70 #include <sys/types.h>
     71 #include <sys/condvar.h>
     72 #include <sys/cpu.h>
     73 #include <sys/intr.h>
     74 #include <sys/kmem.h>
     75 #include <sys/lwp.h>
     76 #include <sys/mutex.h>
     77 #include <sys/percpu.h>
     78 #include <sys/psref.h>
     79 #include <sys/queue.h>
     80 #include <sys/xcall.h>
     81 #include <sys/lwp.h>
     82 
     83 SLIST_HEAD(psref_head, psref);
     84 
     85 static bool	_psref_held(const struct psref_target *, struct psref_class *,
     86 		    bool);
     87 
     88 /*
     89  * struct psref_class
     90  *
     91  *	Private global state for a class of passive reference targets.
     92  *	Opaque to callers.
     93  */
     94 struct psref_class {
     95 	kmutex_t		prc_lock;
     96 	kcondvar_t		prc_cv;
     97 	struct percpu		*prc_percpu; /* struct psref_cpu */
     98 	ipl_cookie_t		prc_iplcookie;
     99 	unsigned int		prc_xc_flags;
    100 };
    101 
    102 /*
    103  * struct psref_cpu
    104  *
    105  *	Private per-CPU state for a class of passive reference targets.
    106  *	Not exposed by the API.
    107  */
    108 struct psref_cpu {
    109 	struct psref_head	pcpu_head;
    110 };
    111 
    112 /*
    113  * Data structures and functions for debugging.
    114  */
    115 #ifndef PSREF_DEBUG_NITEMS
    116 #define PSREF_DEBUG_NITEMS 16
    117 #endif
    118 
    119 struct psref_debug_item {
    120 	void			*prdi_caller;
    121 	struct psref		*prdi_psref;
    122 };
    123 
    124 struct psref_debug {
    125 	int			prd_refs_peek;
    126 	struct psref_debug_item prd_items[PSREF_DEBUG_NITEMS];
    127 };
    128 
    129 #ifdef PSREF_DEBUG
    130 static void psref_debug_acquire(struct psref *);
    131 static void psref_debug_release(struct psref *);
    132 
    133 static void psref_debug_lwp_free(void *);
    134 
    135 static specificdata_key_t psref_debug_lwp_key;
    136 #endif
    137 
    138 /*
    139  * psref_init()
    140  */
    141 void
    142 psref_init(void)
    143 {
    144 
    145 #ifdef PSREF_DEBUG
    146 	lwp_specific_key_create(&psref_debug_lwp_key, psref_debug_lwp_free);
    147 #endif
    148 }
    149 
    150 /*
    151  * psref_class_create(name, ipl)
    152  *
    153  *	Create a new passive reference class, with the given wchan name
    154  *	and ipl.
    155  */
    156 struct psref_class *
    157 psref_class_create(const char *name, int ipl)
    158 {
    159 	struct psref_class *class;
    160 
    161 	ASSERT_SLEEPABLE();
    162 
    163 	class = kmem_alloc(sizeof(*class), KM_SLEEP);
    164 	class->prc_percpu = percpu_alloc(sizeof(struct psref_cpu));
    165 	mutex_init(&class->prc_lock, MUTEX_DEFAULT, ipl);
    166 	cv_init(&class->prc_cv, name);
    167 	class->prc_iplcookie = makeiplcookie(ipl);
    168 	class->prc_xc_flags = XC_HIGHPRI_IPL(ipl);
    169 
    170 	return class;
    171 }
    172 
    173 static void __diagused
    174 psref_cpu_drained_p(void *p, void *cookie, struct cpu_info *ci __unused)
    175 {
    176 	const struct psref_cpu *pcpu = p;
    177 	bool *retp = cookie;
    178 
    179 	if (!SLIST_EMPTY(&pcpu->pcpu_head))
    180 		*retp = false;
    181 }
    182 
    183 static bool __diagused
    184 psref_class_drained_p(const struct psref_class *prc)
    185 {
    186 	bool ret = true;
    187 
    188 	percpu_foreach(prc->prc_percpu, &psref_cpu_drained_p, &ret);
    189 
    190 	return ret;
    191 }
    192 
    193 /*
    194  * psref_class_destroy(class)
    195  *
    196  *	Destroy a passive reference class and free memory associated
    197  *	with it.  All targets in this class must have been drained and
    198  *	destroyed already.
    199  */
    200 void
    201 psref_class_destroy(struct psref_class *class)
    202 {
    203 
    204 	KASSERT(psref_class_drained_p(class));
    205 
    206 	cv_destroy(&class->prc_cv);
    207 	mutex_destroy(&class->prc_lock);
    208 	percpu_free(class->prc_percpu, sizeof(struct psref_cpu));
    209 	kmem_free(class, sizeof(*class));
    210 }
    211 
    212 /*
    213  * psref_target_init(target, class)
    214  *
    215  *	Initialize a passive reference target in the specified class.
    216  *	The caller is responsible for issuing a membar_producer after
    217  *	psref_target_init and before exposing a pointer to the target
    218  *	to other CPUs.
    219  */
    220 void
    221 psref_target_init(struct psref_target *target,
    222     struct psref_class *class)
    223 {
    224 
    225 	target->prt_class = class;
    226 	target->prt_draining = false;
    227 }
    228 
    229 #ifdef DEBUG
    230 static bool
    231 psref_exist(struct psref_cpu *pcpu, struct psref *psref)
    232 {
    233 	struct psref *_psref;
    234 
    235 	SLIST_FOREACH(_psref, &pcpu->pcpu_head, psref_entry) {
    236 		if (_psref == psref)
    237 			return true;
    238 	}
    239 	return false;
    240 }
    241 
    242 static void
    243 psref_check_duplication(struct psref_cpu *pcpu, struct psref *psref,
    244     const struct psref_target *target)
    245 {
    246 	bool found = false;
    247 
    248 	found = psref_exist(pcpu, psref);
    249 	if (found) {
    250 		panic("The psref is already in the list (acquiring twice?): "
    251 		    "psref=%p target=%p", psref, target);
    252 	}
    253 }
    254 
    255 static void
    256 psref_check_existence(struct psref_cpu *pcpu, struct psref *psref,
    257     const struct psref_target *target)
    258 {
    259 	bool found = false;
    260 
    261 	found = psref_exist(pcpu, psref);
    262 	if (!found) {
    263 		panic("The psref isn't in the list (releasing unused psref?): "
    264 		    "psref=%p target=%p", psref, target);
    265 	}
    266 }
    267 #endif /* DEBUG */
    268 
    269 /*
    270  * psref_acquire(psref, target, class)
    271  *
    272  *	Acquire a passive reference to the specified target, which must
    273  *	be in the specified class.
    274  *
    275  *	The caller must guarantee that the target will not be destroyed
    276  *	before psref_acquire returns.
    277  *
    278  *	The caller must additionally guarantee that it will not switch
    279  *	CPUs before releasing the passive reference, either by
    280  *	disabling kpreemption and avoiding sleeps, or by being in a
    281  *	softint or in an LWP bound to a CPU.
    282  */
    283 void
    284 psref_acquire(struct psref *psref, const struct psref_target *target,
    285     struct psref_class *class)
    286 {
    287 	struct psref_cpu *pcpu;
    288 	int s;
    289 
    290 	KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() ||
    291 		ISSET(curlwp->l_pflag, LP_BOUND)),
    292 	    "passive references are CPU-local,"
    293 	    " but preemption is enabled and the caller is not"
    294 	    " in a softint or CPU-bound LWP");
    295 	KASSERTMSG(!target->prt_draining, "psref target already destroyed: %p",
    296 	    target);
    297 	KASSERTMSG((target->prt_class == class),
    298 	    "mismatched psref target class: %p (ref) != %p (expected)",
    299 	    target->prt_class, class);
    300 
    301 	/* Block interrupts and acquire the current CPU's reference list.  */
    302 	s = splraiseipl(class->prc_iplcookie);
    303 	pcpu = percpu_getref(class->prc_percpu);
    304 
    305 #ifdef DEBUG
    306 	/* Sanity-check if the target is already acquired with the same psref.  */
    307 	psref_check_duplication(pcpu, psref, target);
    308 #endif
    309 
    310 	/* Record our reference.  */
    311 	SLIST_INSERT_HEAD(&pcpu->pcpu_head, psref, psref_entry);
    312 	psref->psref_target = target;
    313 	psref->psref_lwp = curlwp;
    314 	psref->psref_cpu = curcpu();
    315 
    316 	/* Release the CPU list and restore interrupts.  */
    317 	percpu_putref(class->prc_percpu);
    318 	splx(s);
    319 
    320 #if defined(DIAGNOSTIC) || defined(PSREF_DEBUG)
    321 	curlwp->l_psrefs++;
    322 #endif
    323 #ifdef PSREF_DEBUG
    324 	psref_debug_acquire(psref);
    325 #endif
    326 }
    327 
    328 /*
    329  * psref_release(psref, target, class)
    330  *
    331  *	Release a passive reference to the specified target, which must
    332  *	be in the specified class.
    333  *
    334  *	The caller must not have switched CPUs or LWPs since acquiring
    335  *	the passive reference.
    336  */
    337 void
    338 psref_release(struct psref *psref, const struct psref_target *target,
    339     struct psref_class *class)
    340 {
    341 	struct psref_cpu *pcpu;
    342 	int s;
    343 
    344 	KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() ||
    345 		ISSET(curlwp->l_pflag, LP_BOUND)),
    346 	    "passive references are CPU-local,"
    347 	    " but preemption is enabled and the caller is not"
    348 	    " in a softint or CPU-bound LWP");
    349 	KASSERTMSG((target->prt_class == class),
    350 	    "mismatched psref target class: %p (ref) != %p (expected)",
    351 	    target->prt_class, class);
    352 
    353 	/* Make sure the psref looks sensible.  */
    354 	KASSERTMSG((psref->psref_target == target),
    355 	    "passive reference target mismatch: %p (ref) != %p (expected)",
    356 	    psref->psref_target, target);
    357 	KASSERTMSG((psref->psref_lwp == curlwp),
    358 	    "passive reference transferred from lwp %p to lwp %p",
    359 	    psref->psref_lwp, curlwp);
    360 	KASSERTMSG((psref->psref_cpu == curcpu()),
    361 	    "passive reference transferred from CPU %u to CPU %u",
    362 	    cpu_index(psref->psref_cpu), cpu_index(curcpu()));
    363 
    364 	/*
    365 	 * Block interrupts and remove the psref from the current CPU's
    366 	 * list.  No need to percpu_getref or get the head of the list,
    367 	 * and the caller guarantees that we are bound to a CPU anyway
    368 	 * (as does blocking interrupts).
    369 	 */
    370 	s = splraiseipl(class->prc_iplcookie);
    371 	pcpu = percpu_getref(class->prc_percpu);
    372 #ifdef DEBUG
    373 	/* Sanity-check if the target is surely acquired before.  */
    374 	psref_check_existence(pcpu, psref, target);
    375 #endif
    376 	SLIST_REMOVE(&pcpu->pcpu_head, psref, psref, psref_entry);
    377 	percpu_putref(class->prc_percpu);
    378 	splx(s);
    379 
    380 #if defined(DIAGNOSTIC) || defined(PSREF_DEBUG)
    381 	KASSERT(curlwp->l_psrefs > 0);
    382 	curlwp->l_psrefs--;
    383 #endif
    384 #ifdef PSREF_DEBUG
    385 	psref_debug_release(psref);
    386 #endif
    387 
    388 	/* If someone is waiting for users to drain, notify 'em.  */
    389 	if (__predict_false(target->prt_draining))
    390 		cv_broadcast(&class->prc_cv);
    391 }
    392 
    393 /*
    394  * psref_copy(pto, pfrom, class)
    395  *
    396  *	Copy a passive reference from pfrom, which must be in the
    397  *	specified class, to pto.  Both pfrom and pto must later be
    398  *	released with psref_release.
    399  *
    400  *	The caller must not have switched CPUs or LWPs since acquiring
    401  *	pfrom, and must not switch CPUs or LWPs before releasing both
    402  *	pfrom and pto.
    403  */
    404 void
    405 psref_copy(struct psref *pto, const struct psref *pfrom,
    406     struct psref_class *class)
    407 {
    408 	struct psref_cpu *pcpu;
    409 	int s;
    410 
    411 	KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() ||
    412 		ISSET(curlwp->l_pflag, LP_BOUND)),
    413 	    "passive references are CPU-local,"
    414 	    " but preemption is enabled and the caller is not"
    415 	    " in a softint or CPU-bound LWP");
    416 	KASSERTMSG((pto != pfrom),
    417 	    "can't copy passive reference to itself: %p",
    418 	    pto);
    419 
    420 	/* Make sure the pfrom reference looks sensible.  */
    421 	KASSERTMSG((pfrom->psref_lwp == curlwp),
    422 	    "passive reference transferred from lwp %p to lwp %p",
    423 	    pfrom->psref_lwp, curlwp);
    424 	KASSERTMSG((pfrom->psref_cpu == curcpu()),
    425 	    "passive reference transferred from CPU %u to CPU %u",
    426 	    cpu_index(pfrom->psref_cpu), cpu_index(curcpu()));
    427 	KASSERTMSG((pfrom->psref_target->prt_class == class),
    428 	    "mismatched psref target class: %p (ref) != %p (expected)",
    429 	    pfrom->psref_target->prt_class, class);
    430 
    431 	/* Block interrupts and acquire the current CPU's reference list.  */
    432 	s = splraiseipl(class->prc_iplcookie);
    433 	pcpu = percpu_getref(class->prc_percpu);
    434 
    435 	/* Record the new reference.  */
    436 	SLIST_INSERT_HEAD(&pcpu->pcpu_head, pto, psref_entry);
    437 	pto->psref_target = pfrom->psref_target;
    438 	pto->psref_lwp = curlwp;
    439 	pto->psref_cpu = curcpu();
    440 
    441 	/* Release the CPU list and restore interrupts.  */
    442 	percpu_putref(class->prc_percpu);
    443 	splx(s);
    444 
    445 #if defined(DIAGNOSTIC) || defined(PSREF_DEBUG)
    446 	curlwp->l_psrefs++;
    447 #endif
    448 }
    449 
    450 /*
    451  * struct psreffed
    452  *
    453  *	Global state for draining a psref target.
    454  */
    455 struct psreffed {
    456 	struct psref_class	*class;
    457 	struct psref_target	*target;
    458 	bool			ret;
    459 };
    460 
    461 static void
    462 psreffed_p_xc(void *cookie0, void *cookie1 __unused)
    463 {
    464 	struct psreffed *P = cookie0;
    465 
    466 	/*
    467 	 * If we hold a psref to the target, then answer true.
    468 	 *
    469 	 * This is the only dynamic decision that may be made with
    470 	 * psref_held.
    471 	 *
    472 	 * No need to lock anything here: every write transitions from
    473 	 * false to true, so there can be no conflicting writes.  No
    474 	 * need for a memory barrier here because P->ret is read only
    475 	 * after xc_wait, which has already issued any necessary memory
    476 	 * barriers.
    477 	 */
    478 	if (_psref_held(P->target, P->class, true))
    479 		P->ret = true;
    480 }
    481 
    482 static bool
    483 psreffed_p(struct psref_target *target, struct psref_class *class)
    484 {
    485 	struct psreffed P = {
    486 		.class = class,
    487 		.target = target,
    488 		.ret = false,
    489 	};
    490 
    491 	if (__predict_true(mp_online)) {
    492 		/*
    493 		 * Ask all CPUs to say whether they hold a psref to the
    494 		 * target.
    495 		 */
    496 		xc_wait(xc_broadcast(class->prc_xc_flags, &psreffed_p_xc, &P,
    497 		                     NULL));
    498 	} else
    499 		psreffed_p_xc(&P, NULL);
    500 
    501 	return P.ret;
    502 }
    503 
    504 /*
    505  * psref_target_destroy(target, class)
    506  *
    507  *	Destroy a passive reference target.  Waits for all existing
    508  *	references to drain.  Caller must guarantee no new references
    509  *	will be acquired once it calls psref_target_destroy, e.g. by
    510  *	removing the target from a global list first.  May sleep.
    511  */
    512 void
    513 psref_target_destroy(struct psref_target *target, struct psref_class *class)
    514 {
    515 
    516 	ASSERT_SLEEPABLE();
    517 
    518 	KASSERTMSG(!target->prt_draining, "psref target already destroyed: %p",
    519 	    target);
    520 	KASSERTMSG((target->prt_class == class),
    521 	    "mismatched psref target class: %p (ref) != %p (expected)",
    522 	    target->prt_class, class);
    523 
    524 	/* Request psref_release to notify us when done.  */
    525 	target->prt_draining = true;
    526 
    527 	/* Wait until there are no more references on any CPU.  */
    528 	while (psreffed_p(target, class)) {
    529 		/*
    530 		 * This enter/wait/exit business looks wrong, but it is
    531 		 * both necessary, because psreffed_p performs a
    532 		 * low-priority xcall and hence cannot run while a
    533 		 * mutex is locked, and OK, because the wait is timed
    534 		 * -- explicit wakeups are only an optimization.
    535 		 */
    536 		mutex_enter(&class->prc_lock);
    537 		(void)cv_timedwait(&class->prc_cv, &class->prc_lock, 1);
    538 		mutex_exit(&class->prc_lock);
    539 	}
    540 
    541 	/* No more references.  Cause subsequent psref_acquire to kassert.  */
    542 	target->prt_class = NULL;
    543 }
    544 
    545 static bool
    546 _psref_held(const struct psref_target *target, struct psref_class *class,
    547     bool lwp_mismatch_ok)
    548 {
    549 	const struct psref_cpu *pcpu;
    550 	const struct psref *psref;
    551 	int s;
    552 	bool held = false;
    553 
    554 	KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() ||
    555 		ISSET(curlwp->l_pflag, LP_BOUND)),
    556 	    "passive references are CPU-local,"
    557 	    " but preemption is enabled and the caller is not"
    558 	    " in a softint or CPU-bound LWP");
    559 	KASSERTMSG((target->prt_class == class),
    560 	    "mismatched psref target class: %p (ref) != %p (expected)",
    561 	    target->prt_class, class);
    562 
    563 	/* Block interrupts and acquire the current CPU's reference list.  */
    564 	s = splraiseipl(class->prc_iplcookie);
    565 	pcpu = percpu_getref(class->prc_percpu);
    566 
    567 	/* Search through all the references on this CPU.  */
    568 	SLIST_FOREACH(psref, &pcpu->pcpu_head, psref_entry) {
    569 		/* Sanity-check the reference's CPU.  */
    570 		KASSERTMSG((psref->psref_cpu == curcpu()),
    571 		    "passive reference transferred from CPU %u to CPU %u",
    572 		    cpu_index(psref->psref_cpu), cpu_index(curcpu()));
    573 
    574 		/* If it doesn't match, skip it and move on.  */
    575 		if (psref->psref_target != target)
    576 			continue;
    577 
    578 		/*
    579 		 * Sanity-check the reference's LWP if we are asserting
    580 		 * via psref_held that this LWP holds it, but not if we
    581 		 * are testing in psref_target_destroy whether any LWP
    582 		 * still holds it.
    583 		 */
    584 		KASSERTMSG((lwp_mismatch_ok || psref->psref_lwp == curlwp),
    585 		    "passive reference transferred from lwp %p to lwp %p",
    586 		    psref->psref_lwp, curlwp);
    587 
    588 		/* Stop here and report that we found it.  */
    589 		held = true;
    590 		break;
    591 	}
    592 
    593 	/* Release the CPU list and restore interrupts.  */
    594 	percpu_putref(class->prc_percpu);
    595 	splx(s);
    596 
    597 	return held;
    598 }
    599 
    600 /*
    601  * psref_held(target, class)
    602  *
    603  *	True if the current CPU holds a passive reference to target,
    604  *	false otherwise.  May be used only inside assertions.
    605  */
    606 bool
    607 psref_held(const struct psref_target *target, struct psref_class *class)
    608 {
    609 
    610 	return _psref_held(target, class, false);
    611 }
    612 
    613 #ifdef PSREF_DEBUG
    614 void
    615 psref_debug_init_lwp(struct lwp *l)
    616 {
    617 	struct psref_debug *prd;
    618 
    619 	prd = kmem_zalloc(sizeof(*prd), KM_SLEEP);
    620 	lwp_setspecific_by_lwp(l, psref_debug_lwp_key, prd);
    621 }
    622 
    623 static void
    624 psref_debug_lwp_free(void *arg)
    625 {
    626 	struct psref_debug *prd = arg;
    627 
    628 	kmem_free(prd, sizeof(*prd));
    629 }
    630 
    631 static void
    632 psref_debug_acquire(struct psref *psref)
    633 {
    634 	struct psref_debug *prd;
    635 	struct lwp *l = curlwp;
    636 	int s, i;
    637 
    638 	prd = lwp_getspecific(psref_debug_lwp_key);
    639 	if (__predict_false(prd == NULL)) {
    640 		psref->psref_debug = NULL;
    641 		return;
    642 	}
    643 
    644 	s = splserial();
    645 	if (l->l_psrefs > prd->prd_refs_peek) {
    646 		prd->prd_refs_peek = l->l_psrefs;
    647 		if (__predict_false(prd->prd_refs_peek > PSREF_DEBUG_NITEMS))
    648 			panic("exceeded PSREF_DEBUG_NITEMS");
    649 	}
    650 	for (i = 0; i < prd->prd_refs_peek; i++) {
    651 		struct psref_debug_item *prdi = &prd->prd_items[i];
    652 		if (prdi->prdi_psref != NULL)
    653 			continue;
    654 		prdi->prdi_caller = psref->psref_debug;
    655 		prdi->prdi_psref = psref;
    656 		psref->psref_debug = prdi;
    657 		break;
    658 	}
    659 	if (__predict_false(i == prd->prd_refs_peek))
    660 		panic("out of range: %d", i);
    661 	splx(s);
    662 }
    663 
    664 static void
    665 psref_debug_release(struct psref *psref)
    666 {
    667 	int s;
    668 
    669 	s = splserial();
    670 	if (__predict_true(psref->psref_debug != NULL)) {
    671 		struct psref_debug_item *prdi = psref->psref_debug;
    672 		prdi->prdi_psref = NULL;
    673 	}
    674 	splx(s);
    675 }
    676 
    677 void
    678 psref_debug_barrier(void)
    679 {
    680 	struct psref_debug *prd;
    681 	struct lwp *l = curlwp;
    682 	int s, i;
    683 
    684 	prd = lwp_getspecific(psref_debug_lwp_key);
    685 	if (__predict_false(prd == NULL))
    686 		return;
    687 
    688 	s = splserial();
    689 	for (i = 0; i < prd->prd_refs_peek; i++) {
    690 		struct psref_debug_item *prdi = &prd->prd_items[i];
    691 		if (__predict_true(prdi->prdi_psref == NULL))
    692 			continue;
    693 		panic("psref leaked: lwp(%p) acquired at %p", l, prdi->prdi_caller);
    694 	}
    695 	prd->prd_refs_peek = 0; /* Reset the counter */
    696 	splx(s);
    697 }
    698 #endif /* PSREF_DEBUG */
    699