subr_psref.c revision 1.6 1 1.6 ozaki /* $NetBSD: subr_psref.c,v 1.6 2016/11/09 09:00:46 ozaki-r Exp $ */
2 1.1 riastrad
3 1.1 riastrad /*-
4 1.1 riastrad * Copyright (c) 2016 The NetBSD Foundation, Inc.
5 1.1 riastrad * All rights reserved.
6 1.1 riastrad *
7 1.1 riastrad * This code is derived from software contributed to The NetBSD Foundation
8 1.1 riastrad * by Taylor R. Campbell.
9 1.1 riastrad *
10 1.1 riastrad * Redistribution and use in source and binary forms, with or without
11 1.1 riastrad * modification, are permitted provided that the following conditions
12 1.1 riastrad * are met:
13 1.1 riastrad * 1. Redistributions of source code must retain the above copyright
14 1.1 riastrad * notice, this list of conditions and the following disclaimer.
15 1.1 riastrad * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 riastrad * notice, this list of conditions and the following disclaimer in the
17 1.1 riastrad * documentation and/or other materials provided with the distribution.
18 1.1 riastrad *
19 1.1 riastrad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 riastrad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 riastrad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 riastrad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 riastrad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 riastrad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 riastrad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 riastrad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 riastrad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 riastrad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 riastrad * POSSIBILITY OF SUCH DAMAGE.
30 1.1 riastrad */
31 1.1 riastrad
32 1.1 riastrad /*
33 1.1 riastrad * Passive references
34 1.1 riastrad *
35 1.1 riastrad * Passive references are references to objects that guarantee the
36 1.1 riastrad * object will not be destroyed until the reference is released.
37 1.1 riastrad *
38 1.1 riastrad * Passive references require no interprocessor synchronization to
39 1.1 riastrad * acquire or release. However, destroying the target of passive
40 1.1 riastrad * references requires expensive interprocessor synchronization --
41 1.1 riastrad * xcalls to determine on which CPUs the object is still in use.
42 1.1 riastrad *
43 1.1 riastrad * Passive references may be held only on a single CPU and by a
44 1.1 riastrad * single LWP. They require the caller to allocate a little stack
45 1.1 riastrad * space, a struct psref object. Sleeping while a passive
46 1.1 riastrad * reference is held is allowed, provided that the owner's LWP is
47 1.1 riastrad * bound to a CPU -- e.g., the owner is a softint or a bound
48 1.1 riastrad * kthread. However, sleeping should be kept to a short duration,
49 1.1 riastrad * e.g. sleeping on an adaptive lock.
50 1.1 riastrad *
51 1.1 riastrad * Passive references serve as an intermediate stage between
52 1.1 riastrad * reference counting and passive serialization (pserialize(9)):
53 1.1 riastrad *
54 1.1 riastrad * - If you need references to transfer from CPU to CPU or LWP to
55 1.1 riastrad * LWP, or if you need long-term references, you must use
56 1.1 riastrad * reference counting, e.g. with atomic operations or locks,
57 1.1 riastrad * which incurs interprocessor synchronization for every use --
58 1.1 riastrad * cheaper than an xcall, but not scalable.
59 1.1 riastrad *
60 1.1 riastrad * - If all users *guarantee* that they will not sleep, then it is
61 1.1 riastrad * not necessary to use passive references: you may as well just
62 1.1 riastrad * use the even cheaper pserialize(9), because you have
63 1.1 riastrad * satisfied the requirements of a pserialize read section.
64 1.1 riastrad */
65 1.1 riastrad
66 1.1 riastrad #include <sys/cdefs.h>
67 1.6 ozaki __KERNEL_RCSID(0, "$NetBSD: subr_psref.c,v 1.6 2016/11/09 09:00:46 ozaki-r Exp $");
68 1.1 riastrad
69 1.1 riastrad #include <sys/types.h>
70 1.1 riastrad #include <sys/condvar.h>
71 1.1 riastrad #include <sys/cpu.h>
72 1.1 riastrad #include <sys/intr.h>
73 1.1 riastrad #include <sys/kmem.h>
74 1.1 riastrad #include <sys/lwp.h>
75 1.1 riastrad #include <sys/mutex.h>
76 1.1 riastrad #include <sys/percpu.h>
77 1.1 riastrad #include <sys/psref.h>
78 1.1 riastrad #include <sys/queue.h>
79 1.1 riastrad #include <sys/xcall.h>
80 1.1 riastrad
81 1.1 riastrad LIST_HEAD(psref_head, psref);
82 1.1 riastrad
83 1.4 riastrad static bool _psref_held(const struct psref_target *, struct psref_class *,
84 1.4 riastrad bool);
85 1.4 riastrad
86 1.1 riastrad /*
87 1.1 riastrad * struct psref_class
88 1.1 riastrad *
89 1.1 riastrad * Private global state for a class of passive reference targets.
90 1.1 riastrad * Opaque to callers.
91 1.1 riastrad */
92 1.1 riastrad struct psref_class {
93 1.1 riastrad kmutex_t prc_lock;
94 1.1 riastrad kcondvar_t prc_cv;
95 1.1 riastrad struct percpu *prc_percpu; /* struct psref_cpu */
96 1.1 riastrad ipl_cookie_t prc_iplcookie;
97 1.1 riastrad };
98 1.1 riastrad
99 1.1 riastrad /*
100 1.1 riastrad * struct psref_cpu
101 1.1 riastrad *
102 1.1 riastrad * Private per-CPU state for a class of passive reference targets.
103 1.1 riastrad * Not exposed by the API.
104 1.1 riastrad */
105 1.1 riastrad struct psref_cpu {
106 1.1 riastrad struct psref_head pcpu_head;
107 1.1 riastrad };
108 1.1 riastrad
109 1.1 riastrad /*
110 1.1 riastrad * psref_class_create(name, ipl)
111 1.1 riastrad *
112 1.1 riastrad * Create a new passive reference class, with the given wchan name
113 1.1 riastrad * and ipl.
114 1.1 riastrad */
115 1.1 riastrad struct psref_class *
116 1.1 riastrad psref_class_create(const char *name, int ipl)
117 1.1 riastrad {
118 1.1 riastrad struct psref_class *class;
119 1.1 riastrad
120 1.1 riastrad ASSERT_SLEEPABLE();
121 1.1 riastrad
122 1.1 riastrad class = kmem_alloc(sizeof(*class), KM_SLEEP);
123 1.1 riastrad if (class == NULL)
124 1.1 riastrad goto fail0;
125 1.1 riastrad
126 1.1 riastrad class->prc_percpu = percpu_alloc(sizeof(struct psref_cpu));
127 1.1 riastrad if (class->prc_percpu == NULL)
128 1.1 riastrad goto fail1;
129 1.1 riastrad
130 1.1 riastrad mutex_init(&class->prc_lock, MUTEX_DEFAULT, ipl);
131 1.1 riastrad cv_init(&class->prc_cv, name);
132 1.1 riastrad class->prc_iplcookie = makeiplcookie(ipl);
133 1.1 riastrad
134 1.1 riastrad return class;
135 1.1 riastrad
136 1.1 riastrad fail1: kmem_free(class, sizeof(*class));
137 1.1 riastrad fail0: return NULL;
138 1.1 riastrad }
139 1.1 riastrad
140 1.1 riastrad #ifdef DIAGNOSTIC
141 1.1 riastrad static void
142 1.1 riastrad psref_cpu_drained_p(void *p, void *cookie, struct cpu_info *ci __unused)
143 1.1 riastrad {
144 1.1 riastrad const struct psref_cpu *pcpu = p;
145 1.1 riastrad bool *retp = cookie;
146 1.1 riastrad
147 1.1 riastrad if (!LIST_EMPTY(&pcpu->pcpu_head))
148 1.1 riastrad *retp = false;
149 1.1 riastrad }
150 1.1 riastrad
151 1.1 riastrad static bool
152 1.1 riastrad psref_class_drained_p(const struct psref_class *prc)
153 1.1 riastrad {
154 1.1 riastrad bool ret = true;
155 1.1 riastrad
156 1.1 riastrad percpu_foreach(prc->prc_percpu, &psref_cpu_drained_p, &ret);
157 1.1 riastrad
158 1.1 riastrad return ret;
159 1.1 riastrad }
160 1.1 riastrad #endif /* DIAGNOSTIC */
161 1.1 riastrad
162 1.1 riastrad /*
163 1.1 riastrad * psref_class_destroy(class)
164 1.1 riastrad *
165 1.1 riastrad * Destroy a passive reference class and free memory associated
166 1.1 riastrad * with it. All targets in this class must have been drained and
167 1.1 riastrad * destroyed already.
168 1.1 riastrad */
169 1.1 riastrad void
170 1.1 riastrad psref_class_destroy(struct psref_class *class)
171 1.1 riastrad {
172 1.1 riastrad
173 1.1 riastrad KASSERT(psref_class_drained_p(class));
174 1.1 riastrad
175 1.1 riastrad cv_destroy(&class->prc_cv);
176 1.1 riastrad mutex_destroy(&class->prc_lock);
177 1.1 riastrad percpu_free(class->prc_percpu, sizeof(struct psref_cpu));
178 1.1 riastrad kmem_free(class, sizeof(*class));
179 1.1 riastrad }
180 1.1 riastrad
181 1.1 riastrad /*
182 1.1 riastrad * psref_target_init(target, class)
183 1.1 riastrad *
184 1.1 riastrad * Initialize a passive reference target in the specified class.
185 1.1 riastrad * The caller is responsible for issuing a membar_producer after
186 1.1 riastrad * psref_target_init and before exposing a pointer to the target
187 1.1 riastrad * to other CPUs.
188 1.1 riastrad */
189 1.1 riastrad void
190 1.1 riastrad psref_target_init(struct psref_target *target,
191 1.1 riastrad struct psref_class *class)
192 1.1 riastrad {
193 1.1 riastrad
194 1.1 riastrad target->prt_class = class;
195 1.1 riastrad target->prt_draining = false;
196 1.1 riastrad }
197 1.1 riastrad
198 1.6 ozaki #ifdef DEBUG
199 1.6 ozaki static void
200 1.6 ozaki psref_check_duplication(struct psref_cpu *pcpu, struct psref *psref,
201 1.6 ozaki const struct psref_target *target)
202 1.6 ozaki {
203 1.6 ozaki bool found = false;
204 1.6 ozaki struct psref *_psref;
205 1.6 ozaki
206 1.6 ozaki LIST_FOREACH(_psref, &pcpu->pcpu_head, psref_entry) {
207 1.6 ozaki if (_psref == psref &&
208 1.6 ozaki _psref->psref_target == target) {
209 1.6 ozaki found = true;
210 1.6 ozaki break;
211 1.6 ozaki }
212 1.6 ozaki }
213 1.6 ozaki if (found) {
214 1.6 ozaki panic("trying to acquire a target twice with the same psref: "
215 1.6 ozaki "psref=%p target=%p", psref, target);
216 1.6 ozaki }
217 1.6 ozaki }
218 1.6 ozaki #endif /* DEBUG */
219 1.6 ozaki
220 1.1 riastrad /*
221 1.1 riastrad * psref_acquire(psref, target, class)
222 1.1 riastrad *
223 1.1 riastrad * Acquire a passive reference to the specified target, which must
224 1.1 riastrad * be in the specified class.
225 1.1 riastrad *
226 1.1 riastrad * The caller must guarantee that the target will not be destroyed
227 1.1 riastrad * before psref_acquire returns.
228 1.1 riastrad *
229 1.1 riastrad * The caller must additionally guarantee that it will not switch
230 1.1 riastrad * CPUs before releasing the passive reference, either by
231 1.1 riastrad * disabling kpreemption and avoiding sleeps, or by being in a
232 1.1 riastrad * softint or in an LWP bound to a CPU.
233 1.1 riastrad */
234 1.1 riastrad void
235 1.1 riastrad psref_acquire(struct psref *psref, const struct psref_target *target,
236 1.1 riastrad struct psref_class *class)
237 1.1 riastrad {
238 1.1 riastrad struct psref_cpu *pcpu;
239 1.1 riastrad int s;
240 1.1 riastrad
241 1.1 riastrad KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() ||
242 1.1 riastrad ISSET(curlwp->l_pflag, LP_BOUND)),
243 1.1 riastrad "passive references are CPU-local,"
244 1.1 riastrad " but preemption is enabled and the caller is not"
245 1.1 riastrad " in a softint or CPU-bound LWP");
246 1.1 riastrad KASSERTMSG((target->prt_class == class),
247 1.1 riastrad "mismatched psref target class: %p (ref) != %p (expected)",
248 1.1 riastrad target->prt_class, class);
249 1.1 riastrad KASSERTMSG(!target->prt_draining, "psref target already destroyed: %p",
250 1.1 riastrad target);
251 1.1 riastrad
252 1.1 riastrad /* Block interrupts and acquire the current CPU's reference list. */
253 1.1 riastrad s = splraiseipl(class->prc_iplcookie);
254 1.1 riastrad pcpu = percpu_getref(class->prc_percpu);
255 1.1 riastrad
256 1.6 ozaki #ifdef DEBUG
257 1.6 ozaki /* Sanity-check if the target is already acquired with the same psref. */
258 1.6 ozaki psref_check_duplication(pcpu, psref, target);
259 1.6 ozaki #endif
260 1.6 ozaki
261 1.1 riastrad /* Record our reference. */
262 1.1 riastrad LIST_INSERT_HEAD(&pcpu->pcpu_head, psref, psref_entry);
263 1.1 riastrad psref->psref_target = target;
264 1.1 riastrad psref->psref_lwp = curlwp;
265 1.1 riastrad psref->psref_cpu = curcpu();
266 1.1 riastrad
267 1.1 riastrad /* Release the CPU list and restore interrupts. */
268 1.1 riastrad percpu_putref(class->prc_percpu);
269 1.1 riastrad splx(s);
270 1.1 riastrad }
271 1.1 riastrad
272 1.1 riastrad /*
273 1.1 riastrad * psref_release(psref, target, class)
274 1.1 riastrad *
275 1.1 riastrad * Release a passive reference to the specified target, which must
276 1.1 riastrad * be in the specified class.
277 1.1 riastrad *
278 1.1 riastrad * The caller must not have switched CPUs or LWPs since acquiring
279 1.1 riastrad * the passive reference.
280 1.1 riastrad */
281 1.1 riastrad void
282 1.1 riastrad psref_release(struct psref *psref, const struct psref_target *target,
283 1.1 riastrad struct psref_class *class)
284 1.1 riastrad {
285 1.1 riastrad int s;
286 1.1 riastrad
287 1.1 riastrad KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() ||
288 1.1 riastrad ISSET(curlwp->l_pflag, LP_BOUND)),
289 1.1 riastrad "passive references are CPU-local,"
290 1.1 riastrad " but preemption is enabled and the caller is not"
291 1.1 riastrad " in a softint or CPU-bound LWP");
292 1.1 riastrad KASSERTMSG((target->prt_class == class),
293 1.1 riastrad "mismatched psref target class: %p (ref) != %p (expected)",
294 1.1 riastrad target->prt_class, class);
295 1.1 riastrad
296 1.1 riastrad /* Make sure the psref looks sensible. */
297 1.1 riastrad KASSERTMSG((psref->psref_target == target),
298 1.1 riastrad "passive reference target mismatch: %p (ref) != %p (expected)",
299 1.1 riastrad psref->psref_target, target);
300 1.1 riastrad KASSERTMSG((psref->psref_lwp == curlwp),
301 1.1 riastrad "passive reference transferred from lwp %p to lwp %p",
302 1.1 riastrad psref->psref_lwp, curlwp);
303 1.1 riastrad KASSERTMSG((psref->psref_cpu == curcpu()),
304 1.1 riastrad "passive reference transferred from CPU %u to CPU %u",
305 1.1 riastrad cpu_index(psref->psref_cpu), cpu_index(curcpu()));
306 1.1 riastrad
307 1.1 riastrad /*
308 1.1 riastrad * Block interrupts and remove the psref from the current CPU's
309 1.1 riastrad * list. No need to percpu_getref or get the head of the list,
310 1.1 riastrad * and the caller guarantees that we are bound to a CPU anyway
311 1.1 riastrad * (as does blocking interrupts).
312 1.1 riastrad */
313 1.1 riastrad s = splraiseipl(class->prc_iplcookie);
314 1.1 riastrad LIST_REMOVE(psref, psref_entry);
315 1.1 riastrad splx(s);
316 1.1 riastrad
317 1.1 riastrad /* If someone is waiting for users to drain, notify 'em. */
318 1.1 riastrad if (__predict_false(target->prt_draining))
319 1.1 riastrad cv_broadcast(&class->prc_cv);
320 1.1 riastrad }
321 1.1 riastrad
322 1.1 riastrad /*
323 1.1 riastrad * psref_copy(pto, pfrom, class)
324 1.1 riastrad *
325 1.1 riastrad * Copy a passive reference from pfrom, which must be in the
326 1.1 riastrad * specified class, to pto. Both pfrom and pto must later be
327 1.1 riastrad * released with psref_release.
328 1.1 riastrad *
329 1.1 riastrad * The caller must not have switched CPUs or LWPs since acquiring
330 1.1 riastrad * pfrom, and must not switch CPUs or LWPs before releasing both
331 1.1 riastrad * pfrom and pto.
332 1.1 riastrad */
333 1.1 riastrad void
334 1.1 riastrad psref_copy(struct psref *pto, const struct psref *pfrom,
335 1.1 riastrad struct psref_class *class)
336 1.1 riastrad {
337 1.1 riastrad struct psref_cpu *pcpu;
338 1.1 riastrad int s;
339 1.1 riastrad
340 1.1 riastrad KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() ||
341 1.1 riastrad ISSET(curlwp->l_pflag, LP_BOUND)),
342 1.1 riastrad "passive references are CPU-local,"
343 1.1 riastrad " but preemption is enabled and the caller is not"
344 1.1 riastrad " in a softint or CPU-bound LWP");
345 1.1 riastrad KASSERTMSG((pto != pfrom),
346 1.1 riastrad "can't copy passive reference to itself: %p",
347 1.1 riastrad pto);
348 1.1 riastrad
349 1.1 riastrad /* Make sure the pfrom reference looks sensible. */
350 1.1 riastrad KASSERTMSG((pfrom->psref_lwp == curlwp),
351 1.1 riastrad "passive reference transferred from lwp %p to lwp %p",
352 1.1 riastrad pfrom->psref_lwp, curlwp);
353 1.1 riastrad KASSERTMSG((pfrom->psref_cpu == curcpu()),
354 1.1 riastrad "passive reference transferred from CPU %u to CPU %u",
355 1.1 riastrad cpu_index(pfrom->psref_cpu), cpu_index(curcpu()));
356 1.1 riastrad KASSERTMSG((pfrom->psref_target->prt_class == class),
357 1.1 riastrad "mismatched psref target class: %p (ref) != %p (expected)",
358 1.1 riastrad pfrom->psref_target->prt_class, class);
359 1.1 riastrad
360 1.1 riastrad /* Block interrupts and acquire the current CPU's reference list. */
361 1.1 riastrad s = splraiseipl(class->prc_iplcookie);
362 1.1 riastrad pcpu = percpu_getref(class->prc_percpu);
363 1.1 riastrad
364 1.1 riastrad /* Record the new reference. */
365 1.1 riastrad LIST_INSERT_HEAD(&pcpu->pcpu_head, pto, psref_entry);
366 1.1 riastrad pto->psref_target = pfrom->psref_target;
367 1.1 riastrad pto->psref_lwp = curlwp;
368 1.1 riastrad pto->psref_cpu = curcpu();
369 1.1 riastrad
370 1.1 riastrad /* Release the CPU list and restore interrupts. */
371 1.1 riastrad percpu_putref(class->prc_percpu);
372 1.1 riastrad splx(s);
373 1.1 riastrad }
374 1.1 riastrad
375 1.1 riastrad /*
376 1.1 riastrad * struct psreffed
377 1.1 riastrad *
378 1.1 riastrad * Global state for draining a psref target.
379 1.1 riastrad */
380 1.1 riastrad struct psreffed {
381 1.1 riastrad struct psref_class *class;
382 1.1 riastrad struct psref_target *target;
383 1.1 riastrad bool ret;
384 1.1 riastrad };
385 1.1 riastrad
386 1.1 riastrad static void
387 1.1 riastrad psreffed_p_xc(void *cookie0, void *cookie1 __unused)
388 1.1 riastrad {
389 1.1 riastrad struct psreffed *P = cookie0;
390 1.1 riastrad
391 1.1 riastrad /*
392 1.1 riastrad * If we hold a psref to the target, then answer true.
393 1.1 riastrad *
394 1.1 riastrad * This is the only dynamic decision that may be made with
395 1.1 riastrad * psref_held.
396 1.1 riastrad *
397 1.1 riastrad * No need to lock anything here: every write transitions from
398 1.1 riastrad * false to true, so there can be no conflicting writes. No
399 1.1 riastrad * need for a memory barrier here because P->ret is read only
400 1.1 riastrad * after xc_wait, which has already issued any necessary memory
401 1.1 riastrad * barriers.
402 1.1 riastrad */
403 1.4 riastrad if (_psref_held(P->target, P->class, true))
404 1.1 riastrad P->ret = true;
405 1.1 riastrad }
406 1.1 riastrad
407 1.1 riastrad static bool
408 1.1 riastrad psreffed_p(struct psref_target *target, struct psref_class *class)
409 1.1 riastrad {
410 1.1 riastrad struct psreffed P = {
411 1.1 riastrad .class = class,
412 1.1 riastrad .target = target,
413 1.1 riastrad .ret = false,
414 1.1 riastrad };
415 1.1 riastrad
416 1.1 riastrad /* Ask all CPUs to say whether they hold a psref to the target. */
417 1.1 riastrad xc_wait(xc_broadcast(0, &psreffed_p_xc, &P, NULL));
418 1.1 riastrad
419 1.1 riastrad return P.ret;
420 1.1 riastrad }
421 1.1 riastrad
422 1.1 riastrad /*
423 1.1 riastrad * psref_target_destroy(target, class)
424 1.1 riastrad *
425 1.1 riastrad * Destroy a passive reference target. Waits for all existing
426 1.1 riastrad * references to drain. Caller must guarantee no new references
427 1.1 riastrad * will be acquired once it calls psref_target_destroy, e.g. by
428 1.1 riastrad * removing the target from a global list first. May sleep.
429 1.1 riastrad */
430 1.1 riastrad void
431 1.1 riastrad psref_target_destroy(struct psref_target *target, struct psref_class *class)
432 1.1 riastrad {
433 1.1 riastrad
434 1.1 riastrad ASSERT_SLEEPABLE();
435 1.1 riastrad
436 1.1 riastrad KASSERTMSG((target->prt_class == class),
437 1.1 riastrad "mismatched psref target class: %p (ref) != %p (expected)",
438 1.1 riastrad target->prt_class, class);
439 1.1 riastrad
440 1.1 riastrad /* Request psref_release to notify us when done. */
441 1.1 riastrad KASSERTMSG(!target->prt_draining, "psref target already destroyed: %p",
442 1.1 riastrad target);
443 1.1 riastrad target->prt_draining = true;
444 1.1 riastrad
445 1.1 riastrad /* Wait until there are no more references on any CPU. */
446 1.1 riastrad while (psreffed_p(target, class)) {
447 1.1 riastrad /*
448 1.1 riastrad * This enter/wait/exit business looks wrong, but it is
449 1.1 riastrad * both necessary, because psreffed_p performs a
450 1.1 riastrad * low-priority xcall and hence cannot run while a
451 1.1 riastrad * mutex is locked, and OK, because the wait is timed
452 1.1 riastrad * -- explicit wakeups are only an optimization.
453 1.1 riastrad */
454 1.1 riastrad mutex_enter(&class->prc_lock);
455 1.1 riastrad (void)cv_timedwait(&class->prc_cv, &class->prc_lock, 1);
456 1.1 riastrad mutex_exit(&class->prc_lock);
457 1.1 riastrad }
458 1.1 riastrad
459 1.1 riastrad /* No more references. Cause subsequent psref_acquire to kassert. */
460 1.1 riastrad target->prt_class = NULL;
461 1.1 riastrad }
462 1.1 riastrad
463 1.4 riastrad static bool
464 1.4 riastrad _psref_held(const struct psref_target *target, struct psref_class *class,
465 1.4 riastrad bool lwp_mismatch_ok)
466 1.1 riastrad {
467 1.1 riastrad const struct psref_cpu *pcpu;
468 1.1 riastrad const struct psref *psref;
469 1.1 riastrad int s;
470 1.1 riastrad bool held = false;
471 1.1 riastrad
472 1.1 riastrad KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() ||
473 1.1 riastrad ISSET(curlwp->l_pflag, LP_BOUND)),
474 1.1 riastrad "passive references are CPU-local,"
475 1.1 riastrad " but preemption is enabled and the caller is not"
476 1.1 riastrad " in a softint or CPU-bound LWP");
477 1.1 riastrad KASSERTMSG((target->prt_class == class),
478 1.1 riastrad "mismatched psref target class: %p (ref) != %p (expected)",
479 1.1 riastrad target->prt_class, class);
480 1.1 riastrad
481 1.1 riastrad /* Block interrupts and acquire the current CPU's reference list. */
482 1.1 riastrad s = splraiseipl(class->prc_iplcookie);
483 1.1 riastrad pcpu = percpu_getref(class->prc_percpu);
484 1.1 riastrad
485 1.1 riastrad /* Search through all the references on this CPU. */
486 1.1 riastrad LIST_FOREACH(psref, &pcpu->pcpu_head, psref_entry) {
487 1.5 ozaki /* Sanity-check the reference's CPU. */
488 1.5 ozaki KASSERTMSG((psref->psref_cpu == curcpu()),
489 1.5 ozaki "passive reference transferred from CPU %u to CPU %u",
490 1.5 ozaki cpu_index(psref->psref_cpu), cpu_index(curcpu()));
491 1.5 ozaki
492 1.5 ozaki /* If it doesn't match, skip it and move on. */
493 1.5 ozaki if (psref->psref_target != target)
494 1.5 ozaki continue;
495 1.5 ozaki
496 1.5 ozaki /*
497 1.5 ozaki * Sanity-check the reference's LWP if we are asserting
498 1.5 ozaki * via psref_held that this LWP holds it, but not if we
499 1.5 ozaki * are testing in psref_target_destroy whether any LWP
500 1.5 ozaki * still holds it.
501 1.5 ozaki */
502 1.4 riastrad KASSERTMSG((lwp_mismatch_ok || psref->psref_lwp == curlwp),
503 1.1 riastrad "passive reference transferred from lwp %p to lwp %p",
504 1.1 riastrad psref->psref_lwp, curlwp);
505 1.1 riastrad
506 1.5 ozaki /* Stop here and report that we found it. */
507 1.5 ozaki held = true;
508 1.5 ozaki break;
509 1.1 riastrad }
510 1.1 riastrad
511 1.1 riastrad /* Release the CPU list and restore interrupts. */
512 1.1 riastrad percpu_putref(class->prc_percpu);
513 1.1 riastrad splx(s);
514 1.1 riastrad
515 1.1 riastrad return held;
516 1.1 riastrad }
517 1.4 riastrad
518 1.4 riastrad /*
519 1.4 riastrad * psref_held(target, class)
520 1.4 riastrad *
521 1.4 riastrad * True if the current CPU holds a passive reference to target,
522 1.4 riastrad * false otherwise. May be used only inside assertions.
523 1.4 riastrad */
524 1.4 riastrad bool
525 1.4 riastrad psref_held(const struct psref_target *target, struct psref_class *class)
526 1.4 riastrad {
527 1.4 riastrad
528 1.4 riastrad return _psref_held(target, class, false);
529 1.4 riastrad }
530