subr_psref.c revision 1.17 1 1.17 riastrad /* $NetBSD: subr_psref.c,v 1.17 2022/02/08 12:59:16 riastradh Exp $ */
2 1.1 riastrad
3 1.1 riastrad /*-
4 1.1 riastrad * Copyright (c) 2016 The NetBSD Foundation, Inc.
5 1.1 riastrad * All rights reserved.
6 1.1 riastrad *
7 1.1 riastrad * This code is derived from software contributed to The NetBSD Foundation
8 1.1 riastrad * by Taylor R. Campbell.
9 1.1 riastrad *
10 1.1 riastrad * Redistribution and use in source and binary forms, with or without
11 1.1 riastrad * modification, are permitted provided that the following conditions
12 1.1 riastrad * are met:
13 1.1 riastrad * 1. Redistributions of source code must retain the above copyright
14 1.1 riastrad * notice, this list of conditions and the following disclaimer.
15 1.1 riastrad * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 riastrad * notice, this list of conditions and the following disclaimer in the
17 1.1 riastrad * documentation and/or other materials provided with the distribution.
18 1.1 riastrad *
19 1.1 riastrad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 riastrad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 riastrad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 riastrad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 riastrad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 riastrad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 riastrad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 riastrad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 riastrad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 riastrad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 riastrad * POSSIBILITY OF SUCH DAMAGE.
30 1.1 riastrad */
31 1.1 riastrad
32 1.1 riastrad /*
33 1.1 riastrad * Passive references
34 1.1 riastrad *
35 1.1 riastrad * Passive references are references to objects that guarantee the
36 1.1 riastrad * object will not be destroyed until the reference is released.
37 1.1 riastrad *
38 1.1 riastrad * Passive references require no interprocessor synchronization to
39 1.1 riastrad * acquire or release. However, destroying the target of passive
40 1.1 riastrad * references requires expensive interprocessor synchronization --
41 1.1 riastrad * xcalls to determine on which CPUs the object is still in use.
42 1.1 riastrad *
43 1.1 riastrad * Passive references may be held only on a single CPU and by a
44 1.1 riastrad * single LWP. They require the caller to allocate a little stack
45 1.1 riastrad * space, a struct psref object. Sleeping while a passive
46 1.1 riastrad * reference is held is allowed, provided that the owner's LWP is
47 1.1 riastrad * bound to a CPU -- e.g., the owner is a softint or a bound
48 1.1 riastrad * kthread. However, sleeping should be kept to a short duration,
49 1.1 riastrad * e.g. sleeping on an adaptive lock.
50 1.1 riastrad *
51 1.1 riastrad * Passive references serve as an intermediate stage between
52 1.1 riastrad * reference counting and passive serialization (pserialize(9)):
53 1.1 riastrad *
54 1.1 riastrad * - If you need references to transfer from CPU to CPU or LWP to
55 1.1 riastrad * LWP, or if you need long-term references, you must use
56 1.1 riastrad * reference counting, e.g. with atomic operations or locks,
57 1.1 riastrad * which incurs interprocessor synchronization for every use --
58 1.1 riastrad * cheaper than an xcall, but not scalable.
59 1.1 riastrad *
60 1.1 riastrad * - If all users *guarantee* that they will not sleep, then it is
61 1.1 riastrad * not necessary to use passive references: you may as well just
62 1.1 riastrad * use the even cheaper pserialize(9), because you have
63 1.1 riastrad * satisfied the requirements of a pserialize read section.
64 1.1 riastrad */
65 1.1 riastrad
66 1.1 riastrad #include <sys/cdefs.h>
67 1.17 riastrad __KERNEL_RCSID(0, "$NetBSD: subr_psref.c,v 1.17 2022/02/08 12:59:16 riastradh Exp $");
68 1.1 riastrad
69 1.15 skrll #include <sys/param.h>
70 1.1 riastrad #include <sys/types.h>
71 1.1 riastrad #include <sys/condvar.h>
72 1.1 riastrad #include <sys/cpu.h>
73 1.1 riastrad #include <sys/intr.h>
74 1.1 riastrad #include <sys/kmem.h>
75 1.1 riastrad #include <sys/lwp.h>
76 1.1 riastrad #include <sys/mutex.h>
77 1.1 riastrad #include <sys/percpu.h>
78 1.1 riastrad #include <sys/psref.h>
79 1.1 riastrad #include <sys/queue.h>
80 1.1 riastrad #include <sys/xcall.h>
81 1.13 ozaki #include <sys/lwp.h>
82 1.1 riastrad
83 1.8 knakahar SLIST_HEAD(psref_head, psref);
84 1.1 riastrad
85 1.4 riastrad static bool _psref_held(const struct psref_target *, struct psref_class *,
86 1.4 riastrad bool);
87 1.4 riastrad
88 1.1 riastrad /*
89 1.1 riastrad * struct psref_class
90 1.1 riastrad *
91 1.1 riastrad * Private global state for a class of passive reference targets.
92 1.1 riastrad * Opaque to callers.
93 1.1 riastrad */
94 1.1 riastrad struct psref_class {
95 1.1 riastrad kmutex_t prc_lock;
96 1.1 riastrad kcondvar_t prc_cv;
97 1.1 riastrad struct percpu *prc_percpu; /* struct psref_cpu */
98 1.1 riastrad ipl_cookie_t prc_iplcookie;
99 1.11 ozaki unsigned int prc_xc_flags;
100 1.1 riastrad };
101 1.1 riastrad
102 1.1 riastrad /*
103 1.1 riastrad * struct psref_cpu
104 1.1 riastrad *
105 1.1 riastrad * Private per-CPU state for a class of passive reference targets.
106 1.1 riastrad * Not exposed by the API.
107 1.1 riastrad */
108 1.1 riastrad struct psref_cpu {
109 1.1 riastrad struct psref_head pcpu_head;
110 1.1 riastrad };
111 1.1 riastrad
112 1.1 riastrad /*
113 1.13 ozaki * Data structures and functions for debugging.
114 1.13 ozaki */
115 1.13 ozaki #ifndef PSREF_DEBUG_NITEMS
116 1.13 ozaki #define PSREF_DEBUG_NITEMS 16
117 1.13 ozaki #endif
118 1.13 ozaki
119 1.13 ozaki struct psref_debug_item {
120 1.13 ozaki void *prdi_caller;
121 1.13 ozaki struct psref *prdi_psref;
122 1.13 ozaki };
123 1.13 ozaki
124 1.13 ozaki struct psref_debug {
125 1.13 ozaki int prd_refs_peek;
126 1.13 ozaki struct psref_debug_item prd_items[PSREF_DEBUG_NITEMS];
127 1.13 ozaki };
128 1.13 ozaki
129 1.13 ozaki #ifdef PSREF_DEBUG
130 1.13 ozaki static void psref_debug_acquire(struct psref *);
131 1.13 ozaki static void psref_debug_release(struct psref *);
132 1.13 ozaki
133 1.13 ozaki static void psref_debug_lwp_free(void *);
134 1.13 ozaki
135 1.13 ozaki static specificdata_key_t psref_debug_lwp_key;
136 1.13 ozaki #endif
137 1.13 ozaki
138 1.13 ozaki /*
139 1.13 ozaki * psref_init()
140 1.13 ozaki */
141 1.13 ozaki void
142 1.13 ozaki psref_init(void)
143 1.13 ozaki {
144 1.13 ozaki
145 1.13 ozaki #ifdef PSREF_DEBUG
146 1.13 ozaki lwp_specific_key_create(&psref_debug_lwp_key, psref_debug_lwp_free);
147 1.13 ozaki #endif
148 1.13 ozaki }
149 1.13 ozaki
150 1.13 ozaki /*
151 1.1 riastrad * psref_class_create(name, ipl)
152 1.1 riastrad *
153 1.1 riastrad * Create a new passive reference class, with the given wchan name
154 1.1 riastrad * and ipl.
155 1.1 riastrad */
156 1.1 riastrad struct psref_class *
157 1.1 riastrad psref_class_create(const char *name, int ipl)
158 1.1 riastrad {
159 1.1 riastrad struct psref_class *class;
160 1.1 riastrad
161 1.1 riastrad ASSERT_SLEEPABLE();
162 1.1 riastrad
163 1.1 riastrad class = kmem_alloc(sizeof(*class), KM_SLEEP);
164 1.1 riastrad class->prc_percpu = percpu_alloc(sizeof(struct psref_cpu));
165 1.1 riastrad mutex_init(&class->prc_lock, MUTEX_DEFAULT, ipl);
166 1.1 riastrad cv_init(&class->prc_cv, name);
167 1.1 riastrad class->prc_iplcookie = makeiplcookie(ipl);
168 1.11 ozaki class->prc_xc_flags = XC_HIGHPRI_IPL(ipl);
169 1.1 riastrad
170 1.1 riastrad return class;
171 1.1 riastrad }
172 1.1 riastrad
173 1.17 riastrad static void __diagused
174 1.1 riastrad psref_cpu_drained_p(void *p, void *cookie, struct cpu_info *ci __unused)
175 1.1 riastrad {
176 1.1 riastrad const struct psref_cpu *pcpu = p;
177 1.1 riastrad bool *retp = cookie;
178 1.1 riastrad
179 1.8 knakahar if (!SLIST_EMPTY(&pcpu->pcpu_head))
180 1.1 riastrad *retp = false;
181 1.1 riastrad }
182 1.1 riastrad
183 1.1 riastrad static bool
184 1.1 riastrad psref_class_drained_p(const struct psref_class *prc)
185 1.1 riastrad {
186 1.1 riastrad bool ret = true;
187 1.1 riastrad
188 1.1 riastrad percpu_foreach(prc->prc_percpu, &psref_cpu_drained_p, &ret);
189 1.1 riastrad
190 1.1 riastrad return ret;
191 1.1 riastrad }
192 1.1 riastrad
193 1.1 riastrad /*
194 1.1 riastrad * psref_class_destroy(class)
195 1.1 riastrad *
196 1.1 riastrad * Destroy a passive reference class and free memory associated
197 1.1 riastrad * with it. All targets in this class must have been drained and
198 1.1 riastrad * destroyed already.
199 1.1 riastrad */
200 1.1 riastrad void
201 1.1 riastrad psref_class_destroy(struct psref_class *class)
202 1.1 riastrad {
203 1.1 riastrad
204 1.1 riastrad KASSERT(psref_class_drained_p(class));
205 1.1 riastrad
206 1.1 riastrad cv_destroy(&class->prc_cv);
207 1.1 riastrad mutex_destroy(&class->prc_lock);
208 1.1 riastrad percpu_free(class->prc_percpu, sizeof(struct psref_cpu));
209 1.1 riastrad kmem_free(class, sizeof(*class));
210 1.1 riastrad }
211 1.1 riastrad
212 1.1 riastrad /*
213 1.1 riastrad * psref_target_init(target, class)
214 1.1 riastrad *
215 1.1 riastrad * Initialize a passive reference target in the specified class.
216 1.1 riastrad * The caller is responsible for issuing a membar_producer after
217 1.1 riastrad * psref_target_init and before exposing a pointer to the target
218 1.1 riastrad * to other CPUs.
219 1.1 riastrad */
220 1.1 riastrad void
221 1.1 riastrad psref_target_init(struct psref_target *target,
222 1.1 riastrad struct psref_class *class)
223 1.1 riastrad {
224 1.1 riastrad
225 1.1 riastrad target->prt_class = class;
226 1.1 riastrad target->prt_draining = false;
227 1.1 riastrad }
228 1.1 riastrad
229 1.6 ozaki #ifdef DEBUG
230 1.9 ozaki static bool
231 1.9 ozaki psref_exist(struct psref_cpu *pcpu, struct psref *psref)
232 1.9 ozaki {
233 1.9 ozaki struct psref *_psref;
234 1.9 ozaki
235 1.9 ozaki SLIST_FOREACH(_psref, &pcpu->pcpu_head, psref_entry) {
236 1.9 ozaki if (_psref == psref)
237 1.9 ozaki return true;
238 1.9 ozaki }
239 1.9 ozaki return false;
240 1.9 ozaki }
241 1.9 ozaki
242 1.6 ozaki static void
243 1.6 ozaki psref_check_duplication(struct psref_cpu *pcpu, struct psref *psref,
244 1.6 ozaki const struct psref_target *target)
245 1.6 ozaki {
246 1.6 ozaki bool found = false;
247 1.6 ozaki
248 1.9 ozaki found = psref_exist(pcpu, psref);
249 1.9 ozaki if (found) {
250 1.9 ozaki panic("The psref is already in the list (acquiring twice?): "
251 1.9 ozaki "psref=%p target=%p", psref, target);
252 1.6 ozaki }
253 1.9 ozaki }
254 1.9 ozaki
255 1.9 ozaki static void
256 1.9 ozaki psref_check_existence(struct psref_cpu *pcpu, struct psref *psref,
257 1.9 ozaki const struct psref_target *target)
258 1.9 ozaki {
259 1.9 ozaki bool found = false;
260 1.9 ozaki
261 1.9 ozaki found = psref_exist(pcpu, psref);
262 1.9 ozaki if (!found) {
263 1.9 ozaki panic("The psref isn't in the list (releasing unused psref?): "
264 1.6 ozaki "psref=%p target=%p", psref, target);
265 1.6 ozaki }
266 1.6 ozaki }
267 1.6 ozaki #endif /* DEBUG */
268 1.6 ozaki
269 1.1 riastrad /*
270 1.1 riastrad * psref_acquire(psref, target, class)
271 1.1 riastrad *
272 1.1 riastrad * Acquire a passive reference to the specified target, which must
273 1.1 riastrad * be in the specified class.
274 1.1 riastrad *
275 1.1 riastrad * The caller must guarantee that the target will not be destroyed
276 1.1 riastrad * before psref_acquire returns.
277 1.1 riastrad *
278 1.1 riastrad * The caller must additionally guarantee that it will not switch
279 1.1 riastrad * CPUs before releasing the passive reference, either by
280 1.1 riastrad * disabling kpreemption and avoiding sleeps, or by being in a
281 1.1 riastrad * softint or in an LWP bound to a CPU.
282 1.1 riastrad */
283 1.1 riastrad void
284 1.1 riastrad psref_acquire(struct psref *psref, const struct psref_target *target,
285 1.1 riastrad struct psref_class *class)
286 1.1 riastrad {
287 1.1 riastrad struct psref_cpu *pcpu;
288 1.1 riastrad int s;
289 1.1 riastrad
290 1.1 riastrad KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() ||
291 1.1 riastrad ISSET(curlwp->l_pflag, LP_BOUND)),
292 1.1 riastrad "passive references are CPU-local,"
293 1.1 riastrad " but preemption is enabled and the caller is not"
294 1.1 riastrad " in a softint or CPU-bound LWP");
295 1.14 riastrad KASSERTMSG(!target->prt_draining, "psref target already destroyed: %p",
296 1.14 riastrad target);
297 1.1 riastrad KASSERTMSG((target->prt_class == class),
298 1.1 riastrad "mismatched psref target class: %p (ref) != %p (expected)",
299 1.1 riastrad target->prt_class, class);
300 1.1 riastrad
301 1.1 riastrad /* Block interrupts and acquire the current CPU's reference list. */
302 1.1 riastrad s = splraiseipl(class->prc_iplcookie);
303 1.1 riastrad pcpu = percpu_getref(class->prc_percpu);
304 1.1 riastrad
305 1.6 ozaki #ifdef DEBUG
306 1.6 ozaki /* Sanity-check if the target is already acquired with the same psref. */
307 1.6 ozaki psref_check_duplication(pcpu, psref, target);
308 1.6 ozaki #endif
309 1.6 ozaki
310 1.1 riastrad /* Record our reference. */
311 1.8 knakahar SLIST_INSERT_HEAD(&pcpu->pcpu_head, psref, psref_entry);
312 1.1 riastrad psref->psref_target = target;
313 1.1 riastrad psref->psref_lwp = curlwp;
314 1.1 riastrad psref->psref_cpu = curcpu();
315 1.1 riastrad
316 1.1 riastrad /* Release the CPU list and restore interrupts. */
317 1.1 riastrad percpu_putref(class->prc_percpu);
318 1.1 riastrad splx(s);
319 1.12 ozaki
320 1.13 ozaki #if defined(DIAGNOSTIC) || defined(PSREF_DEBUG)
321 1.12 ozaki curlwp->l_psrefs++;
322 1.12 ozaki #endif
323 1.13 ozaki #ifdef PSREF_DEBUG
324 1.13 ozaki psref_debug_acquire(psref);
325 1.13 ozaki #endif
326 1.1 riastrad }
327 1.1 riastrad
328 1.1 riastrad /*
329 1.1 riastrad * psref_release(psref, target, class)
330 1.1 riastrad *
331 1.1 riastrad * Release a passive reference to the specified target, which must
332 1.1 riastrad * be in the specified class.
333 1.1 riastrad *
334 1.1 riastrad * The caller must not have switched CPUs or LWPs since acquiring
335 1.1 riastrad * the passive reference.
336 1.1 riastrad */
337 1.1 riastrad void
338 1.1 riastrad psref_release(struct psref *psref, const struct psref_target *target,
339 1.1 riastrad struct psref_class *class)
340 1.1 riastrad {
341 1.8 knakahar struct psref_cpu *pcpu;
342 1.1 riastrad int s;
343 1.1 riastrad
344 1.1 riastrad KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() ||
345 1.1 riastrad ISSET(curlwp->l_pflag, LP_BOUND)),
346 1.1 riastrad "passive references are CPU-local,"
347 1.1 riastrad " but preemption is enabled and the caller is not"
348 1.1 riastrad " in a softint or CPU-bound LWP");
349 1.1 riastrad KASSERTMSG((target->prt_class == class),
350 1.1 riastrad "mismatched psref target class: %p (ref) != %p (expected)",
351 1.1 riastrad target->prt_class, class);
352 1.1 riastrad
353 1.1 riastrad /* Make sure the psref looks sensible. */
354 1.1 riastrad KASSERTMSG((psref->psref_target == target),
355 1.1 riastrad "passive reference target mismatch: %p (ref) != %p (expected)",
356 1.1 riastrad psref->psref_target, target);
357 1.1 riastrad KASSERTMSG((psref->psref_lwp == curlwp),
358 1.1 riastrad "passive reference transferred from lwp %p to lwp %p",
359 1.1 riastrad psref->psref_lwp, curlwp);
360 1.1 riastrad KASSERTMSG((psref->psref_cpu == curcpu()),
361 1.1 riastrad "passive reference transferred from CPU %u to CPU %u",
362 1.1 riastrad cpu_index(psref->psref_cpu), cpu_index(curcpu()));
363 1.1 riastrad
364 1.1 riastrad /*
365 1.1 riastrad * Block interrupts and remove the psref from the current CPU's
366 1.1 riastrad * list. No need to percpu_getref or get the head of the list,
367 1.1 riastrad * and the caller guarantees that we are bound to a CPU anyway
368 1.1 riastrad * (as does blocking interrupts).
369 1.1 riastrad */
370 1.1 riastrad s = splraiseipl(class->prc_iplcookie);
371 1.8 knakahar pcpu = percpu_getref(class->prc_percpu);
372 1.9 ozaki #ifdef DEBUG
373 1.9 ozaki /* Sanity-check if the target is surely acquired before. */
374 1.9 ozaki psref_check_existence(pcpu, psref, target);
375 1.9 ozaki #endif
376 1.8 knakahar SLIST_REMOVE(&pcpu->pcpu_head, psref, psref, psref_entry);
377 1.8 knakahar percpu_putref(class->prc_percpu);
378 1.1 riastrad splx(s);
379 1.1 riastrad
380 1.13 ozaki #if defined(DIAGNOSTIC) || defined(PSREF_DEBUG)
381 1.12 ozaki KASSERT(curlwp->l_psrefs > 0);
382 1.12 ozaki curlwp->l_psrefs--;
383 1.12 ozaki #endif
384 1.13 ozaki #ifdef PSREF_DEBUG
385 1.13 ozaki psref_debug_release(psref);
386 1.13 ozaki #endif
387 1.12 ozaki
388 1.1 riastrad /* If someone is waiting for users to drain, notify 'em. */
389 1.1 riastrad if (__predict_false(target->prt_draining))
390 1.1 riastrad cv_broadcast(&class->prc_cv);
391 1.1 riastrad }
392 1.1 riastrad
393 1.1 riastrad /*
394 1.1 riastrad * psref_copy(pto, pfrom, class)
395 1.1 riastrad *
396 1.1 riastrad * Copy a passive reference from pfrom, which must be in the
397 1.1 riastrad * specified class, to pto. Both pfrom and pto must later be
398 1.1 riastrad * released with psref_release.
399 1.1 riastrad *
400 1.1 riastrad * The caller must not have switched CPUs or LWPs since acquiring
401 1.1 riastrad * pfrom, and must not switch CPUs or LWPs before releasing both
402 1.1 riastrad * pfrom and pto.
403 1.1 riastrad */
404 1.1 riastrad void
405 1.1 riastrad psref_copy(struct psref *pto, const struct psref *pfrom,
406 1.1 riastrad struct psref_class *class)
407 1.1 riastrad {
408 1.1 riastrad struct psref_cpu *pcpu;
409 1.1 riastrad int s;
410 1.1 riastrad
411 1.1 riastrad KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() ||
412 1.1 riastrad ISSET(curlwp->l_pflag, LP_BOUND)),
413 1.1 riastrad "passive references are CPU-local,"
414 1.1 riastrad " but preemption is enabled and the caller is not"
415 1.1 riastrad " in a softint or CPU-bound LWP");
416 1.1 riastrad KASSERTMSG((pto != pfrom),
417 1.1 riastrad "can't copy passive reference to itself: %p",
418 1.1 riastrad pto);
419 1.1 riastrad
420 1.1 riastrad /* Make sure the pfrom reference looks sensible. */
421 1.1 riastrad KASSERTMSG((pfrom->psref_lwp == curlwp),
422 1.1 riastrad "passive reference transferred from lwp %p to lwp %p",
423 1.1 riastrad pfrom->psref_lwp, curlwp);
424 1.1 riastrad KASSERTMSG((pfrom->psref_cpu == curcpu()),
425 1.1 riastrad "passive reference transferred from CPU %u to CPU %u",
426 1.1 riastrad cpu_index(pfrom->psref_cpu), cpu_index(curcpu()));
427 1.1 riastrad KASSERTMSG((pfrom->psref_target->prt_class == class),
428 1.1 riastrad "mismatched psref target class: %p (ref) != %p (expected)",
429 1.1 riastrad pfrom->psref_target->prt_class, class);
430 1.1 riastrad
431 1.1 riastrad /* Block interrupts and acquire the current CPU's reference list. */
432 1.1 riastrad s = splraiseipl(class->prc_iplcookie);
433 1.1 riastrad pcpu = percpu_getref(class->prc_percpu);
434 1.1 riastrad
435 1.1 riastrad /* Record the new reference. */
436 1.8 knakahar SLIST_INSERT_HEAD(&pcpu->pcpu_head, pto, psref_entry);
437 1.1 riastrad pto->psref_target = pfrom->psref_target;
438 1.1 riastrad pto->psref_lwp = curlwp;
439 1.1 riastrad pto->psref_cpu = curcpu();
440 1.1 riastrad
441 1.1 riastrad /* Release the CPU list and restore interrupts. */
442 1.1 riastrad percpu_putref(class->prc_percpu);
443 1.1 riastrad splx(s);
444 1.12 ozaki
445 1.13 ozaki #if defined(DIAGNOSTIC) || defined(PSREF_DEBUG)
446 1.12 ozaki curlwp->l_psrefs++;
447 1.12 ozaki #endif
448 1.1 riastrad }
449 1.1 riastrad
450 1.1 riastrad /*
451 1.1 riastrad * struct psreffed
452 1.1 riastrad *
453 1.1 riastrad * Global state for draining a psref target.
454 1.1 riastrad */
455 1.1 riastrad struct psreffed {
456 1.1 riastrad struct psref_class *class;
457 1.1 riastrad struct psref_target *target;
458 1.1 riastrad bool ret;
459 1.1 riastrad };
460 1.1 riastrad
461 1.1 riastrad static void
462 1.1 riastrad psreffed_p_xc(void *cookie0, void *cookie1 __unused)
463 1.1 riastrad {
464 1.1 riastrad struct psreffed *P = cookie0;
465 1.1 riastrad
466 1.1 riastrad /*
467 1.1 riastrad * If we hold a psref to the target, then answer true.
468 1.1 riastrad *
469 1.1 riastrad * This is the only dynamic decision that may be made with
470 1.1 riastrad * psref_held.
471 1.1 riastrad *
472 1.1 riastrad * No need to lock anything here: every write transitions from
473 1.1 riastrad * false to true, so there can be no conflicting writes. No
474 1.1 riastrad * need for a memory barrier here because P->ret is read only
475 1.1 riastrad * after xc_wait, which has already issued any necessary memory
476 1.1 riastrad * barriers.
477 1.1 riastrad */
478 1.4 riastrad if (_psref_held(P->target, P->class, true))
479 1.1 riastrad P->ret = true;
480 1.1 riastrad }
481 1.1 riastrad
482 1.1 riastrad static bool
483 1.1 riastrad psreffed_p(struct psref_target *target, struct psref_class *class)
484 1.1 riastrad {
485 1.1 riastrad struct psreffed P = {
486 1.1 riastrad .class = class,
487 1.1 riastrad .target = target,
488 1.1 riastrad .ret = false,
489 1.1 riastrad };
490 1.1 riastrad
491 1.10 msaitoh if (__predict_true(mp_online)) {
492 1.10 msaitoh /*
493 1.10 msaitoh * Ask all CPUs to say whether they hold a psref to the
494 1.10 msaitoh * target.
495 1.10 msaitoh */
496 1.11 ozaki xc_wait(xc_broadcast(class->prc_xc_flags, &psreffed_p_xc, &P,
497 1.11 ozaki NULL));
498 1.10 msaitoh } else
499 1.10 msaitoh psreffed_p_xc(&P, NULL);
500 1.1 riastrad
501 1.1 riastrad return P.ret;
502 1.1 riastrad }
503 1.1 riastrad
504 1.1 riastrad /*
505 1.1 riastrad * psref_target_destroy(target, class)
506 1.1 riastrad *
507 1.1 riastrad * Destroy a passive reference target. Waits for all existing
508 1.1 riastrad * references to drain. Caller must guarantee no new references
509 1.1 riastrad * will be acquired once it calls psref_target_destroy, e.g. by
510 1.1 riastrad * removing the target from a global list first. May sleep.
511 1.1 riastrad */
512 1.1 riastrad void
513 1.1 riastrad psref_target_destroy(struct psref_target *target, struct psref_class *class)
514 1.1 riastrad {
515 1.1 riastrad
516 1.1 riastrad ASSERT_SLEEPABLE();
517 1.1 riastrad
518 1.14 riastrad KASSERTMSG(!target->prt_draining, "psref target already destroyed: %p",
519 1.14 riastrad target);
520 1.1 riastrad KASSERTMSG((target->prt_class == class),
521 1.1 riastrad "mismatched psref target class: %p (ref) != %p (expected)",
522 1.1 riastrad target->prt_class, class);
523 1.1 riastrad
524 1.1 riastrad /* Request psref_release to notify us when done. */
525 1.1 riastrad target->prt_draining = true;
526 1.1 riastrad
527 1.1 riastrad /* Wait until there are no more references on any CPU. */
528 1.1 riastrad while (psreffed_p(target, class)) {
529 1.1 riastrad /*
530 1.1 riastrad * This enter/wait/exit business looks wrong, but it is
531 1.1 riastrad * both necessary, because psreffed_p performs a
532 1.1 riastrad * low-priority xcall and hence cannot run while a
533 1.1 riastrad * mutex is locked, and OK, because the wait is timed
534 1.1 riastrad * -- explicit wakeups are only an optimization.
535 1.1 riastrad */
536 1.1 riastrad mutex_enter(&class->prc_lock);
537 1.1 riastrad (void)cv_timedwait(&class->prc_cv, &class->prc_lock, 1);
538 1.1 riastrad mutex_exit(&class->prc_lock);
539 1.1 riastrad }
540 1.1 riastrad
541 1.1 riastrad /* No more references. Cause subsequent psref_acquire to kassert. */
542 1.1 riastrad target->prt_class = NULL;
543 1.1 riastrad }
544 1.1 riastrad
545 1.4 riastrad static bool
546 1.4 riastrad _psref_held(const struct psref_target *target, struct psref_class *class,
547 1.4 riastrad bool lwp_mismatch_ok)
548 1.1 riastrad {
549 1.1 riastrad const struct psref_cpu *pcpu;
550 1.1 riastrad const struct psref *psref;
551 1.1 riastrad int s;
552 1.1 riastrad bool held = false;
553 1.1 riastrad
554 1.1 riastrad KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() ||
555 1.1 riastrad ISSET(curlwp->l_pflag, LP_BOUND)),
556 1.1 riastrad "passive references are CPU-local,"
557 1.1 riastrad " but preemption is enabled and the caller is not"
558 1.1 riastrad " in a softint or CPU-bound LWP");
559 1.1 riastrad KASSERTMSG((target->prt_class == class),
560 1.1 riastrad "mismatched psref target class: %p (ref) != %p (expected)",
561 1.1 riastrad target->prt_class, class);
562 1.1 riastrad
563 1.1 riastrad /* Block interrupts and acquire the current CPU's reference list. */
564 1.1 riastrad s = splraiseipl(class->prc_iplcookie);
565 1.1 riastrad pcpu = percpu_getref(class->prc_percpu);
566 1.1 riastrad
567 1.1 riastrad /* Search through all the references on this CPU. */
568 1.8 knakahar SLIST_FOREACH(psref, &pcpu->pcpu_head, psref_entry) {
569 1.5 ozaki /* Sanity-check the reference's CPU. */
570 1.5 ozaki KASSERTMSG((psref->psref_cpu == curcpu()),
571 1.5 ozaki "passive reference transferred from CPU %u to CPU %u",
572 1.5 ozaki cpu_index(psref->psref_cpu), cpu_index(curcpu()));
573 1.5 ozaki
574 1.5 ozaki /* If it doesn't match, skip it and move on. */
575 1.5 ozaki if (psref->psref_target != target)
576 1.5 ozaki continue;
577 1.5 ozaki
578 1.5 ozaki /*
579 1.5 ozaki * Sanity-check the reference's LWP if we are asserting
580 1.5 ozaki * via psref_held that this LWP holds it, but not if we
581 1.5 ozaki * are testing in psref_target_destroy whether any LWP
582 1.5 ozaki * still holds it.
583 1.5 ozaki */
584 1.4 riastrad KASSERTMSG((lwp_mismatch_ok || psref->psref_lwp == curlwp),
585 1.1 riastrad "passive reference transferred from lwp %p to lwp %p",
586 1.1 riastrad psref->psref_lwp, curlwp);
587 1.1 riastrad
588 1.5 ozaki /* Stop here and report that we found it. */
589 1.5 ozaki held = true;
590 1.5 ozaki break;
591 1.1 riastrad }
592 1.1 riastrad
593 1.1 riastrad /* Release the CPU list and restore interrupts. */
594 1.1 riastrad percpu_putref(class->prc_percpu);
595 1.1 riastrad splx(s);
596 1.1 riastrad
597 1.1 riastrad return held;
598 1.1 riastrad }
599 1.4 riastrad
600 1.4 riastrad /*
601 1.4 riastrad * psref_held(target, class)
602 1.4 riastrad *
603 1.4 riastrad * True if the current CPU holds a passive reference to target,
604 1.4 riastrad * false otherwise. May be used only inside assertions.
605 1.4 riastrad */
606 1.4 riastrad bool
607 1.4 riastrad psref_held(const struct psref_target *target, struct psref_class *class)
608 1.4 riastrad {
609 1.4 riastrad
610 1.4 riastrad return _psref_held(target, class, false);
611 1.4 riastrad }
612 1.13 ozaki
613 1.13 ozaki #ifdef PSREF_DEBUG
614 1.13 ozaki void
615 1.13 ozaki psref_debug_init_lwp(struct lwp *l)
616 1.13 ozaki {
617 1.13 ozaki struct psref_debug *prd;
618 1.13 ozaki
619 1.13 ozaki prd = kmem_zalloc(sizeof(*prd), KM_SLEEP);
620 1.13 ozaki lwp_setspecific_by_lwp(l, psref_debug_lwp_key, prd);
621 1.13 ozaki }
622 1.13 ozaki
623 1.13 ozaki static void
624 1.13 ozaki psref_debug_lwp_free(void *arg)
625 1.13 ozaki {
626 1.13 ozaki struct psref_debug *prd = arg;
627 1.13 ozaki
628 1.13 ozaki kmem_free(prd, sizeof(*prd));
629 1.13 ozaki }
630 1.13 ozaki
631 1.13 ozaki static void
632 1.13 ozaki psref_debug_acquire(struct psref *psref)
633 1.13 ozaki {
634 1.13 ozaki struct psref_debug *prd;
635 1.13 ozaki struct lwp *l = curlwp;
636 1.13 ozaki int s, i;
637 1.13 ozaki
638 1.13 ozaki prd = lwp_getspecific(psref_debug_lwp_key);
639 1.13 ozaki if (__predict_false(prd == NULL)) {
640 1.13 ozaki psref->psref_debug = NULL;
641 1.13 ozaki return;
642 1.13 ozaki }
643 1.13 ozaki
644 1.13 ozaki s = splserial();
645 1.13 ozaki if (l->l_psrefs > prd->prd_refs_peek) {
646 1.13 ozaki prd->prd_refs_peek = l->l_psrefs;
647 1.13 ozaki if (__predict_false(prd->prd_refs_peek > PSREF_DEBUG_NITEMS))
648 1.13 ozaki panic("exceeded PSREF_DEBUG_NITEMS");
649 1.13 ozaki }
650 1.13 ozaki for (i = 0; i < prd->prd_refs_peek; i++) {
651 1.13 ozaki struct psref_debug_item *prdi = &prd->prd_items[i];
652 1.13 ozaki if (prdi->prdi_psref != NULL)
653 1.13 ozaki continue;
654 1.13 ozaki prdi->prdi_caller = psref->psref_debug;
655 1.13 ozaki prdi->prdi_psref = psref;
656 1.13 ozaki psref->psref_debug = prdi;
657 1.13 ozaki break;
658 1.13 ozaki }
659 1.13 ozaki if (__predict_false(i == prd->prd_refs_peek))
660 1.13 ozaki panic("out of range: %d", i);
661 1.13 ozaki splx(s);
662 1.13 ozaki }
663 1.13 ozaki
664 1.13 ozaki static void
665 1.13 ozaki psref_debug_release(struct psref *psref)
666 1.13 ozaki {
667 1.13 ozaki int s;
668 1.13 ozaki
669 1.13 ozaki s = splserial();
670 1.13 ozaki if (__predict_true(psref->psref_debug != NULL)) {
671 1.13 ozaki struct psref_debug_item *prdi = psref->psref_debug;
672 1.13 ozaki prdi->prdi_psref = NULL;
673 1.13 ozaki }
674 1.13 ozaki splx(s);
675 1.13 ozaki }
676 1.13 ozaki
677 1.13 ozaki void
678 1.13 ozaki psref_debug_barrier(void)
679 1.13 ozaki {
680 1.13 ozaki struct psref_debug *prd;
681 1.13 ozaki struct lwp *l = curlwp;
682 1.13 ozaki int s, i;
683 1.13 ozaki
684 1.13 ozaki prd = lwp_getspecific(psref_debug_lwp_key);
685 1.13 ozaki if (__predict_false(prd == NULL))
686 1.13 ozaki return;
687 1.13 ozaki
688 1.13 ozaki s = splserial();
689 1.13 ozaki for (i = 0; i < prd->prd_refs_peek; i++) {
690 1.13 ozaki struct psref_debug_item *prdi = &prd->prd_items[i];
691 1.13 ozaki if (__predict_true(prdi->prdi_psref == NULL))
692 1.13 ozaki continue;
693 1.13 ozaki panic("psref leaked: lwp(%p) acquired at %p", l, prdi->prdi_caller);
694 1.13 ozaki }
695 1.13 ozaki prd->prd_refs_peek = 0; /* Reset the counter */
696 1.13 ozaki splx(s);
697 1.13 ozaki }
698 1.13 ozaki #endif /* PSREF_DEBUG */
699