Home | History | Annotate | Line # | Download | only in kern
subr_psref.c revision 1.5
      1 /*	$NetBSD: subr_psref.c,v 1.5 2016/10/28 07:27:52 ozaki-r Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2016 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Passive references
     34  *
     35  *	Passive references are references to objects that guarantee the
     36  *	object will not be destroyed until the reference is released.
     37  *
     38  *	Passive references require no interprocessor synchronization to
     39  *	acquire or release.  However, destroying the target of passive
     40  *	references requires expensive interprocessor synchronization --
     41  *	xcalls to determine on which CPUs the object is still in use.
     42  *
     43  *	Passive references may be held only on a single CPU and by a
     44  *	single LWP.  They require the caller to allocate a little stack
     45  *	space, a struct psref object.  Sleeping while a passive
     46  *	reference is held is allowed, provided that the owner's LWP is
     47  *	bound to a CPU -- e.g., the owner is a softint or a bound
     48  *	kthread.  However, sleeping should be kept to a short duration,
     49  *	e.g. sleeping on an adaptive lock.
     50  *
     51  *	Passive references serve as an intermediate stage between
     52  *	reference counting and passive serialization (pserialize(9)):
     53  *
     54  *	- If you need references to transfer from CPU to CPU or LWP to
     55  *	  LWP, or if you need long-term references, you must use
     56  *	  reference counting, e.g. with atomic operations or locks,
     57  *	  which incurs interprocessor synchronization for every use --
     58  *	  cheaper than an xcall, but not scalable.
     59  *
     60  *	- If all users *guarantee* that they will not sleep, then it is
     61  *	  not necessary to use passive references: you may as well just
     62  *	  use the even cheaper pserialize(9), because you have
     63  *	  satisfied the requirements of a pserialize read section.
     64  */
     65 
     66 #include <sys/cdefs.h>
     67 __KERNEL_RCSID(0, "$NetBSD: subr_psref.c,v 1.5 2016/10/28 07:27:52 ozaki-r Exp $");
     68 
     69 #include <sys/types.h>
     70 #include <sys/condvar.h>
     71 #include <sys/cpu.h>
     72 #include <sys/intr.h>
     73 #include <sys/kmem.h>
     74 #include <sys/lwp.h>
     75 #include <sys/mutex.h>
     76 #include <sys/percpu.h>
     77 #include <sys/psref.h>
     78 #include <sys/queue.h>
     79 #include <sys/xcall.h>
     80 
     81 LIST_HEAD(psref_head, psref);
     82 
     83 static bool	_psref_held(const struct psref_target *, struct psref_class *,
     84 		    bool);
     85 
     86 /*
     87  * struct psref_class
     88  *
     89  *	Private global state for a class of passive reference targets.
     90  *	Opaque to callers.
     91  */
     92 struct psref_class {
     93 	kmutex_t		prc_lock;
     94 	kcondvar_t		prc_cv;
     95 	struct percpu		*prc_percpu; /* struct psref_cpu */
     96 	ipl_cookie_t		prc_iplcookie;
     97 };
     98 
     99 /*
    100  * struct psref_cpu
    101  *
    102  *	Private per-CPU state for a class of passive reference targets.
    103  *	Not exposed by the API.
    104  */
    105 struct psref_cpu {
    106 	struct psref_head	pcpu_head;
    107 };
    108 
    109 /*
    110  * psref_class_create(name, ipl)
    111  *
    112  *	Create a new passive reference class, with the given wchan name
    113  *	and ipl.
    114  */
    115 struct psref_class *
    116 psref_class_create(const char *name, int ipl)
    117 {
    118 	struct psref_class *class;
    119 
    120 	ASSERT_SLEEPABLE();
    121 
    122 	class = kmem_alloc(sizeof(*class), KM_SLEEP);
    123 	if (class == NULL)
    124 		goto fail0;
    125 
    126 	class->prc_percpu = percpu_alloc(sizeof(struct psref_cpu));
    127 	if (class->prc_percpu == NULL)
    128 		goto fail1;
    129 
    130 	mutex_init(&class->prc_lock, MUTEX_DEFAULT, ipl);
    131 	cv_init(&class->prc_cv, name);
    132 	class->prc_iplcookie = makeiplcookie(ipl);
    133 
    134 	return class;
    135 
    136 fail1:	kmem_free(class, sizeof(*class));
    137 fail0:	return NULL;
    138 }
    139 
    140 #ifdef DIAGNOSTIC
    141 static void
    142 psref_cpu_drained_p(void *p, void *cookie, struct cpu_info *ci __unused)
    143 {
    144 	const struct psref_cpu *pcpu = p;
    145 	bool *retp = cookie;
    146 
    147 	if (!LIST_EMPTY(&pcpu->pcpu_head))
    148 		*retp = false;
    149 }
    150 
    151 static bool
    152 psref_class_drained_p(const struct psref_class *prc)
    153 {
    154 	bool ret = true;
    155 
    156 	percpu_foreach(prc->prc_percpu, &psref_cpu_drained_p, &ret);
    157 
    158 	return ret;
    159 }
    160 #endif	/* DIAGNOSTIC */
    161 
    162 /*
    163  * psref_class_destroy(class)
    164  *
    165  *	Destroy a passive reference class and free memory associated
    166  *	with it.  All targets in this class must have been drained and
    167  *	destroyed already.
    168  */
    169 void
    170 psref_class_destroy(struct psref_class *class)
    171 {
    172 
    173 	KASSERT(psref_class_drained_p(class));
    174 
    175 	cv_destroy(&class->prc_cv);
    176 	mutex_destroy(&class->prc_lock);
    177 	percpu_free(class->prc_percpu, sizeof(struct psref_cpu));
    178 	kmem_free(class, sizeof(*class));
    179 }
    180 
    181 /*
    182  * psref_target_init(target, class)
    183  *
    184  *	Initialize a passive reference target in the specified class.
    185  *	The caller is responsible for issuing a membar_producer after
    186  *	psref_target_init and before exposing a pointer to the target
    187  *	to other CPUs.
    188  */
    189 void
    190 psref_target_init(struct psref_target *target,
    191     struct psref_class *class)
    192 {
    193 
    194 	target->prt_class = class;
    195 	target->prt_draining = false;
    196 }
    197 
    198 /*
    199  * psref_acquire(psref, target, class)
    200  *
    201  *	Acquire a passive reference to the specified target, which must
    202  *	be in the specified class.
    203  *
    204  *	The caller must guarantee that the target will not be destroyed
    205  *	before psref_acquire returns.
    206  *
    207  *	The caller must additionally guarantee that it will not switch
    208  *	CPUs before releasing the passive reference, either by
    209  *	disabling kpreemption and avoiding sleeps, or by being in a
    210  *	softint or in an LWP bound to a CPU.
    211  */
    212 void
    213 psref_acquire(struct psref *psref, const struct psref_target *target,
    214     struct psref_class *class)
    215 {
    216 	struct psref_cpu *pcpu;
    217 	int s;
    218 
    219 	KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() ||
    220 		ISSET(curlwp->l_pflag, LP_BOUND)),
    221 	    "passive references are CPU-local,"
    222 	    " but preemption is enabled and the caller is not"
    223 	    " in a softint or CPU-bound LWP");
    224 	KASSERTMSG((target->prt_class == class),
    225 	    "mismatched psref target class: %p (ref) != %p (expected)",
    226 	    target->prt_class, class);
    227 	KASSERTMSG(!target->prt_draining, "psref target already destroyed: %p",
    228 	    target);
    229 
    230 	/* Block interrupts and acquire the current CPU's reference list.  */
    231 	s = splraiseipl(class->prc_iplcookie);
    232 	pcpu = percpu_getref(class->prc_percpu);
    233 
    234 	/* Record our reference.  */
    235 	LIST_INSERT_HEAD(&pcpu->pcpu_head, psref, psref_entry);
    236 	psref->psref_target = target;
    237 	psref->psref_lwp = curlwp;
    238 	psref->psref_cpu = curcpu();
    239 
    240 	/* Release the CPU list and restore interrupts.  */
    241 	percpu_putref(class->prc_percpu);
    242 	splx(s);
    243 }
    244 
    245 /*
    246  * psref_release(psref, target, class)
    247  *
    248  *	Release a passive reference to the specified target, which must
    249  *	be in the specified class.
    250  *
    251  *	The caller must not have switched CPUs or LWPs since acquiring
    252  *	the passive reference.
    253  */
    254 void
    255 psref_release(struct psref *psref, const struct psref_target *target,
    256     struct psref_class *class)
    257 {
    258 	int s;
    259 
    260 	KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() ||
    261 		ISSET(curlwp->l_pflag, LP_BOUND)),
    262 	    "passive references are CPU-local,"
    263 	    " but preemption is enabled and the caller is not"
    264 	    " in a softint or CPU-bound LWP");
    265 	KASSERTMSG((target->prt_class == class),
    266 	    "mismatched psref target class: %p (ref) != %p (expected)",
    267 	    target->prt_class, class);
    268 
    269 	/* Make sure the psref looks sensible.  */
    270 	KASSERTMSG((psref->psref_target == target),
    271 	    "passive reference target mismatch: %p (ref) != %p (expected)",
    272 	    psref->psref_target, target);
    273 	KASSERTMSG((psref->psref_lwp == curlwp),
    274 	    "passive reference transferred from lwp %p to lwp %p",
    275 	    psref->psref_lwp, curlwp);
    276 	KASSERTMSG((psref->psref_cpu == curcpu()),
    277 	    "passive reference transferred from CPU %u to CPU %u",
    278 	    cpu_index(psref->psref_cpu), cpu_index(curcpu()));
    279 
    280 	/*
    281 	 * Block interrupts and remove the psref from the current CPU's
    282 	 * list.  No need to percpu_getref or get the head of the list,
    283 	 * and the caller guarantees that we are bound to a CPU anyway
    284 	 * (as does blocking interrupts).
    285 	 */
    286 	s = splraiseipl(class->prc_iplcookie);
    287 	LIST_REMOVE(psref, psref_entry);
    288 	splx(s);
    289 
    290 	/* If someone is waiting for users to drain, notify 'em.  */
    291 	if (__predict_false(target->prt_draining))
    292 		cv_broadcast(&class->prc_cv);
    293 }
    294 
    295 /*
    296  * psref_copy(pto, pfrom, class)
    297  *
    298  *	Copy a passive reference from pfrom, which must be in the
    299  *	specified class, to pto.  Both pfrom and pto must later be
    300  *	released with psref_release.
    301  *
    302  *	The caller must not have switched CPUs or LWPs since acquiring
    303  *	pfrom, and must not switch CPUs or LWPs before releasing both
    304  *	pfrom and pto.
    305  */
    306 void
    307 psref_copy(struct psref *pto, const struct psref *pfrom,
    308     struct psref_class *class)
    309 {
    310 	struct psref_cpu *pcpu;
    311 	int s;
    312 
    313 	KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() ||
    314 		ISSET(curlwp->l_pflag, LP_BOUND)),
    315 	    "passive references are CPU-local,"
    316 	    " but preemption is enabled and the caller is not"
    317 	    " in a softint or CPU-bound LWP");
    318 	KASSERTMSG((pto != pfrom),
    319 	    "can't copy passive reference to itself: %p",
    320 	    pto);
    321 
    322 	/* Make sure the pfrom reference looks sensible.  */
    323 	KASSERTMSG((pfrom->psref_lwp == curlwp),
    324 	    "passive reference transferred from lwp %p to lwp %p",
    325 	    pfrom->psref_lwp, curlwp);
    326 	KASSERTMSG((pfrom->psref_cpu == curcpu()),
    327 	    "passive reference transferred from CPU %u to CPU %u",
    328 	    cpu_index(pfrom->psref_cpu), cpu_index(curcpu()));
    329 	KASSERTMSG((pfrom->psref_target->prt_class == class),
    330 	    "mismatched psref target class: %p (ref) != %p (expected)",
    331 	    pfrom->psref_target->prt_class, class);
    332 
    333 	/* Block interrupts and acquire the current CPU's reference list.  */
    334 	s = splraiseipl(class->prc_iplcookie);
    335 	pcpu = percpu_getref(class->prc_percpu);
    336 
    337 	/* Record the new reference.  */
    338 	LIST_INSERT_HEAD(&pcpu->pcpu_head, pto, psref_entry);
    339 	pto->psref_target = pfrom->psref_target;
    340 	pto->psref_lwp = curlwp;
    341 	pto->psref_cpu = curcpu();
    342 
    343 	/* Release the CPU list and restore interrupts.  */
    344 	percpu_putref(class->prc_percpu);
    345 	splx(s);
    346 }
    347 
    348 /*
    349  * struct psreffed
    350  *
    351  *	Global state for draining a psref target.
    352  */
    353 struct psreffed {
    354 	struct psref_class	*class;
    355 	struct psref_target	*target;
    356 	bool			ret;
    357 };
    358 
    359 static void
    360 psreffed_p_xc(void *cookie0, void *cookie1 __unused)
    361 {
    362 	struct psreffed *P = cookie0;
    363 
    364 	/*
    365 	 * If we hold a psref to the target, then answer true.
    366 	 *
    367 	 * This is the only dynamic decision that may be made with
    368 	 * psref_held.
    369 	 *
    370 	 * No need to lock anything here: every write transitions from
    371 	 * false to true, so there can be no conflicting writes.  No
    372 	 * need for a memory barrier here because P->ret is read only
    373 	 * after xc_wait, which has already issued any necessary memory
    374 	 * barriers.
    375 	 */
    376 	if (_psref_held(P->target, P->class, true))
    377 		P->ret = true;
    378 }
    379 
    380 static bool
    381 psreffed_p(struct psref_target *target, struct psref_class *class)
    382 {
    383 	struct psreffed P = {
    384 		.class = class,
    385 		.target = target,
    386 		.ret = false,
    387 	};
    388 
    389 	/* Ask all CPUs to say whether they hold a psref to the target.  */
    390 	xc_wait(xc_broadcast(0, &psreffed_p_xc, &P, NULL));
    391 
    392 	return P.ret;
    393 }
    394 
    395 /*
    396  * psref_target_destroy(target, class)
    397  *
    398  *	Destroy a passive reference target.  Waits for all existing
    399  *	references to drain.  Caller must guarantee no new references
    400  *	will be acquired once it calls psref_target_destroy, e.g. by
    401  *	removing the target from a global list first.  May sleep.
    402  */
    403 void
    404 psref_target_destroy(struct psref_target *target, struct psref_class *class)
    405 {
    406 
    407 	ASSERT_SLEEPABLE();
    408 
    409 	KASSERTMSG((target->prt_class == class),
    410 	    "mismatched psref target class: %p (ref) != %p (expected)",
    411 	    target->prt_class, class);
    412 
    413 	/* Request psref_release to notify us when done.  */
    414 	KASSERTMSG(!target->prt_draining, "psref target already destroyed: %p",
    415 	    target);
    416 	target->prt_draining = true;
    417 
    418 	/* Wait until there are no more references on any CPU.  */
    419 	while (psreffed_p(target, class)) {
    420 		/*
    421 		 * This enter/wait/exit business looks wrong, but it is
    422 		 * both necessary, because psreffed_p performs a
    423 		 * low-priority xcall and hence cannot run while a
    424 		 * mutex is locked, and OK, because the wait is timed
    425 		 * -- explicit wakeups are only an optimization.
    426 		 */
    427 		mutex_enter(&class->prc_lock);
    428 		(void)cv_timedwait(&class->prc_cv, &class->prc_lock, 1);
    429 		mutex_exit(&class->prc_lock);
    430 	}
    431 
    432 	/* No more references.  Cause subsequent psref_acquire to kassert.  */
    433 	target->prt_class = NULL;
    434 }
    435 
    436 static bool
    437 _psref_held(const struct psref_target *target, struct psref_class *class,
    438     bool lwp_mismatch_ok)
    439 {
    440 	const struct psref_cpu *pcpu;
    441 	const struct psref *psref;
    442 	int s;
    443 	bool held = false;
    444 
    445 	KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() ||
    446 		ISSET(curlwp->l_pflag, LP_BOUND)),
    447 	    "passive references are CPU-local,"
    448 	    " but preemption is enabled and the caller is not"
    449 	    " in a softint or CPU-bound LWP");
    450 	KASSERTMSG((target->prt_class == class),
    451 	    "mismatched psref target class: %p (ref) != %p (expected)",
    452 	    target->prt_class, class);
    453 
    454 	/* Block interrupts and acquire the current CPU's reference list.  */
    455 	s = splraiseipl(class->prc_iplcookie);
    456 	pcpu = percpu_getref(class->prc_percpu);
    457 
    458 	/* Search through all the references on this CPU.  */
    459 	LIST_FOREACH(psref, &pcpu->pcpu_head, psref_entry) {
    460 		/* Sanity-check the reference's CPU.  */
    461 		KASSERTMSG((psref->psref_cpu == curcpu()),
    462 		    "passive reference transferred from CPU %u to CPU %u",
    463 		    cpu_index(psref->psref_cpu), cpu_index(curcpu()));
    464 
    465 		/* If it doesn't match, skip it and move on.  */
    466 		if (psref->psref_target != target)
    467 			continue;
    468 
    469 		/*
    470 		 * Sanity-check the reference's LWP if we are asserting
    471 		 * via psref_held that this LWP holds it, but not if we
    472 		 * are testing in psref_target_destroy whether any LWP
    473 		 * still holds it.
    474 		 */
    475 		KASSERTMSG((lwp_mismatch_ok || psref->psref_lwp == curlwp),
    476 		    "passive reference transferred from lwp %p to lwp %p",
    477 		    psref->psref_lwp, curlwp);
    478 
    479 		/* Stop here and report that we found it.  */
    480 		held = true;
    481 		break;
    482 	}
    483 
    484 	/* Release the CPU list and restore interrupts.  */
    485 	percpu_putref(class->prc_percpu);
    486 	splx(s);
    487 
    488 	return held;
    489 }
    490 
    491 /*
    492  * psref_held(target, class)
    493  *
    494  *	True if the current CPU holds a passive reference to target,
    495  *	false otherwise.  May be used only inside assertions.
    496  */
    497 bool
    498 psref_held(const struct psref_target *target, struct psref_class *class)
    499 {
    500 
    501 	return _psref_held(target, class, false);
    502 }
    503