linux_tasklet.c revision 1.1 1 1.1 riastrad /* $NetBSD: linux_tasklet.c,v 1.1 2021/12/19 01:17:14 riastradh Exp $ */
2 1.1 riastrad
3 1.1 riastrad /*-
4 1.1 riastrad * Copyright (c) 2018 The NetBSD Foundation, Inc.
5 1.1 riastrad * All rights reserved.
6 1.1 riastrad *
7 1.1 riastrad * This code is derived from software contributed to The NetBSD Foundation
8 1.1 riastrad * by Taylor R. Campbell.
9 1.1 riastrad *
10 1.1 riastrad * Redistribution and use in source and binary forms, with or without
11 1.1 riastrad * modification, are permitted provided that the following conditions
12 1.1 riastrad * are met:
13 1.1 riastrad * 1. Redistributions of source code must retain the above copyright
14 1.1 riastrad * notice, this list of conditions and the following disclaimer.
15 1.1 riastrad * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 riastrad * notice, this list of conditions and the following disclaimer in the
17 1.1 riastrad * documentation and/or other materials provided with the distribution.
18 1.1 riastrad *
19 1.1 riastrad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 riastrad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 riastrad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 riastrad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 riastrad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 riastrad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 riastrad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 riastrad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 riastrad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 riastrad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 riastrad * POSSIBILITY OF SUCH DAMAGE.
30 1.1 riastrad */
31 1.1 riastrad
32 1.1 riastrad #include <sys/cdefs.h>
33 1.1 riastrad __KERNEL_RCSID(0, "$NetBSD: linux_tasklet.c,v 1.1 2021/12/19 01:17:14 riastradh Exp $");
34 1.1 riastrad
35 1.1 riastrad #include <sys/types.h>
36 1.1 riastrad #include <sys/atomic.h>
37 1.1 riastrad #include <sys/cpu.h>
38 1.1 riastrad #include <sys/errno.h>
39 1.1 riastrad #include <sys/intr.h>
40 1.1 riastrad #include <sys/lock.h>
41 1.1 riastrad #include <sys/percpu.h>
42 1.1 riastrad #include <sys/queue.h>
43 1.1 riastrad
44 1.1 riastrad #include <lib/libkern/libkern.h>
45 1.1 riastrad
46 1.1 riastrad #include <machine/limits.h>
47 1.1 riastrad
48 1.1 riastrad #include <linux/tasklet.h>
49 1.1 riastrad
50 1.1 riastrad #define TASKLET_SCHEDULED ((unsigned)__BIT(0))
51 1.1 riastrad #define TASKLET_RUNNING ((unsigned)__BIT(1))
52 1.1 riastrad
53 1.1 riastrad struct tasklet_queue {
54 1.1 riastrad struct percpu *tq_percpu; /* struct tasklet_cpu */
55 1.1 riastrad void *tq_sih;
56 1.1 riastrad };
57 1.1 riastrad
58 1.1 riastrad SIMPLEQ_HEAD(tasklet_head, tasklet_struct);
59 1.1 riastrad
60 1.1 riastrad struct tasklet_cpu {
61 1.1 riastrad struct tasklet_head tc_head;
62 1.1 riastrad };
63 1.1 riastrad
64 1.1 riastrad static struct tasklet_queue tasklet_queue __read_mostly;
65 1.1 riastrad static struct tasklet_queue tasklet_hi_queue __read_mostly;
66 1.1 riastrad
67 1.1 riastrad static void tasklet_softintr(void *);
68 1.1 riastrad static int tasklet_queue_init(struct tasklet_queue *, unsigned);
69 1.1 riastrad static void tasklet_queue_fini(struct tasklet_queue *);
70 1.1 riastrad static void tasklet_queue_schedule(struct tasklet_queue *,
71 1.1 riastrad struct tasklet_struct *);
72 1.1 riastrad static void tasklet_queue_enqueue(struct tasklet_queue *,
73 1.1 riastrad struct tasklet_struct *);
74 1.1 riastrad
75 1.1 riastrad /*
76 1.1 riastrad * linux_tasklets_init()
77 1.1 riastrad *
78 1.1 riastrad * Initialize the Linux tasklets subsystem. Return 0 on success,
79 1.1 riastrad * error code on failure.
80 1.1 riastrad */
81 1.1 riastrad int
82 1.1 riastrad linux_tasklets_init(void)
83 1.1 riastrad {
84 1.1 riastrad int error;
85 1.1 riastrad
86 1.1 riastrad error = tasklet_queue_init(&tasklet_queue, SOFTINT_CLOCK);
87 1.1 riastrad if (error)
88 1.1 riastrad goto fail0;
89 1.1 riastrad error = tasklet_queue_init(&tasklet_hi_queue, SOFTINT_SERIAL);
90 1.1 riastrad if (error)
91 1.1 riastrad goto fail1;
92 1.1 riastrad
93 1.1 riastrad /* Success! */
94 1.1 riastrad return 0;
95 1.1 riastrad
96 1.1 riastrad fail2: __unused
97 1.1 riastrad tasklet_queue_fini(&tasklet_hi_queue);
98 1.1 riastrad fail1: tasklet_queue_fini(&tasklet_queue);
99 1.1 riastrad fail0: KASSERT(error);
100 1.1 riastrad return error;
101 1.1 riastrad }
102 1.1 riastrad
103 1.1 riastrad /*
104 1.1 riastrad * linux_tasklets_fini()
105 1.1 riastrad *
106 1.1 riastrad * Finalize the Linux tasklets subsystem. All use of tasklets
107 1.1 riastrad * must be done.
108 1.1 riastrad */
109 1.1 riastrad void
110 1.1 riastrad linux_tasklets_fini(void)
111 1.1 riastrad {
112 1.1 riastrad
113 1.1 riastrad tasklet_queue_fini(&tasklet_hi_queue);
114 1.1 riastrad tasklet_queue_fini(&tasklet_queue);
115 1.1 riastrad }
116 1.1 riastrad
117 1.1 riastrad /*
118 1.1 riastrad * tasklet_queue_init(tq, prio)
119 1.1 riastrad *
120 1.1 riastrad * Initialize the tasklet queue tq for running tasklets at softint
121 1.1 riastrad * priority prio (SOFTINT_*).
122 1.1 riastrad */
123 1.1 riastrad static int
124 1.1 riastrad tasklet_queue_init(struct tasklet_queue *tq, unsigned prio)
125 1.1 riastrad {
126 1.1 riastrad int error;
127 1.1 riastrad
128 1.1 riastrad /* Allocate per-CPU memory. percpu_alloc cannot fail. */
129 1.1 riastrad tq->tq_percpu = percpu_alloc(sizeof(struct tasklet_cpu));
130 1.1 riastrad KASSERT(tq->tq_percpu != NULL);
131 1.1 riastrad
132 1.1 riastrad /* Try to establish a softint. softint_establish may fail. */
133 1.1 riastrad tq->tq_sih = softint_establish(prio|SOFTINT_MPSAFE, &tasklet_softintr,
134 1.1 riastrad tq);
135 1.1 riastrad if (tq->tq_sih == NULL) {
136 1.1 riastrad error = ENOMEM;
137 1.1 riastrad goto fail1;
138 1.1 riastrad }
139 1.1 riastrad
140 1.1 riastrad /* Success! */
141 1.1 riastrad return 0;
142 1.1 riastrad
143 1.1 riastrad fail2: __unused
144 1.1 riastrad softint_disestablish(tq->tq_sih);
145 1.1 riastrad tq->tq_sih = NULL;
146 1.1 riastrad fail1: percpu_free(tq->tq_percpu, sizeof(struct tasklet_cpu));
147 1.1 riastrad tq->tq_percpu = NULL;
148 1.1 riastrad fail0: __unused
149 1.1 riastrad KASSERT(error);
150 1.1 riastrad return error;
151 1.1 riastrad }
152 1.1 riastrad
153 1.1 riastrad /*
154 1.1 riastrad * tasklet_queue_fini(tq)
155 1.1 riastrad *
156 1.1 riastrad * Finalize the tasklet queue tq: free all resources associated
157 1.1 riastrad * with it.
158 1.1 riastrad */
159 1.1 riastrad static void
160 1.1 riastrad tasklet_queue_fini(struct tasklet_queue *tq)
161 1.1 riastrad {
162 1.1 riastrad
163 1.1 riastrad softint_disestablish(tq->tq_sih);
164 1.1 riastrad tq->tq_sih = NULL;
165 1.1 riastrad percpu_free(tq->tq_percpu, sizeof(struct tasklet_cpu));
166 1.1 riastrad tq->tq_percpu = NULL;
167 1.1 riastrad }
168 1.1 riastrad
169 1.1 riastrad /*
170 1.1 riastrad * tasklet_softintr(cookie)
171 1.1 riastrad *
172 1.1 riastrad * Soft interrupt handler: Process queued tasklets on the tasklet
173 1.1 riastrad * queue passed in as cookie.
174 1.1 riastrad */
175 1.1 riastrad static void
176 1.1 riastrad tasklet_softintr(void *cookie)
177 1.1 riastrad {
178 1.1 riastrad struct tasklet_queue *const tq = cookie;
179 1.1 riastrad struct tasklet_head th = SIMPLEQ_HEAD_INITIALIZER(th);
180 1.1 riastrad struct tasklet_cpu *tc;
181 1.1 riastrad int s;
182 1.1 riastrad
183 1.1 riastrad /*
184 1.1 riastrad * With all interrupts deferred, transfer the current CPU's
185 1.1 riastrad * queue of tasklets to a local variable in one swell foop.
186 1.1 riastrad *
187 1.1 riastrad * No memory barriers: CPU-local state only.
188 1.1 riastrad */
189 1.1 riastrad tc = percpu_getref(tq->tq_percpu);
190 1.1 riastrad s = splhigh();
191 1.1 riastrad SIMPLEQ_CONCAT(&th, &tc->tc_head);
192 1.1 riastrad splx(s);
193 1.1 riastrad percpu_putref(tq->tq_percpu);
194 1.1 riastrad
195 1.1 riastrad /* Go through the queue of tasklets we grabbed. */
196 1.1 riastrad while (!SIMPLEQ_EMPTY(&th)) {
197 1.1 riastrad struct tasklet_struct *tasklet;
198 1.1 riastrad unsigned state;
199 1.1 riastrad
200 1.1 riastrad /* Remove the first tasklet from the queue. */
201 1.1 riastrad tasklet = SIMPLEQ_FIRST(&th);
202 1.1 riastrad SIMPLEQ_REMOVE_HEAD(&th, tl_entry);
203 1.1 riastrad
204 1.1 riastrad /*
205 1.1 riastrad * Test and set RUNNING, in case it is already running
206 1.1 riastrad * on another CPU and got scheduled again on this one
207 1.1 riastrad * before it completed.
208 1.1 riastrad */
209 1.1 riastrad do {
210 1.1 riastrad state = tasklet->tl_state;
211 1.1 riastrad __insn_barrier();
212 1.1 riastrad /* It had better be scheduled. */
213 1.1 riastrad KASSERT(state & TASKLET_SCHEDULED);
214 1.1 riastrad if (state & TASKLET_RUNNING)
215 1.1 riastrad break;
216 1.1 riastrad } while (atomic_cas_uint(&tasklet->tl_state, state,
217 1.1 riastrad state | TASKLET_RUNNING) != state);
218 1.1 riastrad
219 1.1 riastrad if (state & TASKLET_RUNNING) {
220 1.1 riastrad /*
221 1.1 riastrad * Put it back on the queue to run it again in
222 1.1 riastrad * a sort of busy-wait, and move on to the next
223 1.1 riastrad * one.
224 1.1 riastrad */
225 1.1 riastrad tasklet_queue_enqueue(tq, tasklet);
226 1.1 riastrad continue;
227 1.1 riastrad }
228 1.1 riastrad
229 1.1 riastrad /* Wait for last runner's side effects. */
230 1.1 riastrad membar_enter();
231 1.1 riastrad
232 1.1 riastrad /* Check whether it's currently disabled. */
233 1.1 riastrad if (tasklet->tl_disablecount) {
234 1.1 riastrad /*
235 1.1 riastrad * Disabled: clear the RUNNING bit and, requeue
236 1.1 riastrad * it, but keep it SCHEDULED.
237 1.1 riastrad */
238 1.1 riastrad KASSERT(tasklet->tl_state & TASKLET_RUNNING);
239 1.1 riastrad atomic_and_uint(&tasklet->tl_state, ~TASKLET_RUNNING);
240 1.1 riastrad tasklet_queue_enqueue(tq, tasklet);
241 1.1 riastrad continue;
242 1.1 riastrad }
243 1.1 riastrad
244 1.1 riastrad /* Not disabled. Clear SCHEDULED and call func. */
245 1.1 riastrad KASSERT(tasklet->tl_state & TASKLET_SCHEDULED);
246 1.1 riastrad atomic_and_uint(&tasklet->tl_state, ~TASKLET_SCHEDULED);
247 1.1 riastrad
248 1.1 riastrad (*tasklet->func)(tasklet->data);
249 1.1 riastrad
250 1.1 riastrad /*
251 1.1 riastrad * Guarantee all caller-relevant reads or writes in
252 1.1 riastrad * func have completed before clearing RUNNING bit.
253 1.1 riastrad */
254 1.1 riastrad membar_exit();
255 1.1 riastrad
256 1.1 riastrad /* Clear RUNNING to notify tasklet_disable. */
257 1.1 riastrad atomic_and_uint(&tasklet->tl_state, ~TASKLET_RUNNING);
258 1.1 riastrad }
259 1.1 riastrad }
260 1.1 riastrad
261 1.1 riastrad /*
262 1.1 riastrad * tasklet_queue_schedule(tq, tasklet)
263 1.1 riastrad *
264 1.1 riastrad * Schedule tasklet to run on tq. If it was already scheduled and
265 1.1 riastrad * has not yet run, no effect.
266 1.1 riastrad */
267 1.1 riastrad static void
268 1.1 riastrad tasklet_queue_schedule(struct tasklet_queue *tq,
269 1.1 riastrad struct tasklet_struct *tasklet)
270 1.1 riastrad {
271 1.1 riastrad unsigned ostate, nstate;
272 1.1 riastrad
273 1.1 riastrad /* Test and set the SCHEDULED bit. If already set, we're done. */
274 1.1 riastrad do {
275 1.1 riastrad ostate = tasklet->tl_state;
276 1.1 riastrad if (ostate & TASKLET_SCHEDULED)
277 1.1 riastrad return;
278 1.1 riastrad nstate = ostate | TASKLET_SCHEDULED;
279 1.1 riastrad } while (atomic_cas_uint(&tasklet->tl_state, ostate, nstate)
280 1.1 riastrad != ostate);
281 1.1 riastrad
282 1.1 riastrad /*
283 1.1 riastrad * Not already set and we have set it now. Put it on the queue
284 1.1 riastrad * and kick off a softint.
285 1.1 riastrad */
286 1.1 riastrad tasklet_queue_enqueue(tq, tasklet);
287 1.1 riastrad }
288 1.1 riastrad
289 1.1 riastrad /*
290 1.1 riastrad * tasklet_queue_enqueue(tq, tasklet)
291 1.1 riastrad *
292 1.1 riastrad * Put tasklet on the queue tq and ensure it will run. tasklet
293 1.1 riastrad * must be marked SCHEDULED.
294 1.1 riastrad */
295 1.1 riastrad static void
296 1.1 riastrad tasklet_queue_enqueue(struct tasklet_queue *tq, struct tasklet_struct *tasklet)
297 1.1 riastrad {
298 1.1 riastrad struct tasklet_cpu *tc;
299 1.1 riastrad int s;
300 1.1 riastrad
301 1.1 riastrad KASSERT(tasklet->tl_state & TASKLET_SCHEDULED);
302 1.1 riastrad
303 1.1 riastrad /*
304 1.1 riastrad * Insert on the current CPU's queue while all interrupts are
305 1.1 riastrad * blocked, and schedule a soft interrupt to process it. No
306 1.1 riastrad * memory barriers: CPU-local state only.
307 1.1 riastrad */
308 1.1 riastrad tc = percpu_getref(tq->tq_percpu);
309 1.1 riastrad s = splhigh();
310 1.1 riastrad SIMPLEQ_INSERT_TAIL(&tc->tc_head, tasklet, tl_entry);
311 1.1 riastrad splx(s);
312 1.1 riastrad softint_schedule(tq->tq_sih);
313 1.1 riastrad percpu_putref(tq->tq_percpu);
314 1.1 riastrad }
315 1.1 riastrad
316 1.1 riastrad /*
317 1.1 riastrad * tasklet_init(tasklet, func, data)
318 1.1 riastrad *
319 1.1 riastrad * Initialize tasklet to call func(data) when scheduled.
320 1.1 riastrad *
321 1.1 riastrad * Caller is responsible for issuing the appropriate memory
322 1.1 riastrad * barriers or store releases to publish the tasklet to other CPUs
323 1.1 riastrad * before use.
324 1.1 riastrad */
325 1.1 riastrad void
326 1.1 riastrad tasklet_init(struct tasklet_struct *tasklet, void (*func)(unsigned long),
327 1.1 riastrad unsigned long data)
328 1.1 riastrad {
329 1.1 riastrad
330 1.1 riastrad tasklet->tl_state = 0;
331 1.1 riastrad tasklet->tl_disablecount = 0;
332 1.1 riastrad tasklet->func = func;
333 1.1 riastrad tasklet->data = data;
334 1.1 riastrad }
335 1.1 riastrad
336 1.1 riastrad /*
337 1.1 riastrad * tasklet_schedule(tasklet)
338 1.1 riastrad *
339 1.1 riastrad * Schedule tasklet to run at regular priority. If it was already
340 1.1 riastrad * scheduled and has not yet run, no effect.
341 1.1 riastrad */
342 1.1 riastrad void
343 1.1 riastrad tasklet_schedule(struct tasklet_struct *tasklet)
344 1.1 riastrad {
345 1.1 riastrad
346 1.1 riastrad tasklet_queue_schedule(&tasklet_queue, tasklet);
347 1.1 riastrad }
348 1.1 riastrad
349 1.1 riastrad /*
350 1.1 riastrad * tasklet_hi_schedule(tasklet)
351 1.1 riastrad *
352 1.1 riastrad * Schedule tasklet to run at high priority. If it was already
353 1.1 riastrad * scheduled and has not yet run, no effect.
354 1.1 riastrad */
355 1.1 riastrad void
356 1.1 riastrad tasklet_hi_schedule(struct tasklet_struct *tasklet)
357 1.1 riastrad {
358 1.1 riastrad
359 1.1 riastrad tasklet_queue_schedule(&tasklet_hi_queue, tasklet);
360 1.1 riastrad }
361 1.1 riastrad
362 1.1 riastrad /*
363 1.1 riastrad * tasklet_disable(tasklet)
364 1.1 riastrad *
365 1.1 riastrad * Increment the disable count of tasklet, and if it was already
366 1.1 riastrad * running, busy-wait for it to complete.
367 1.1 riastrad *
368 1.1 riastrad * As long as the disable count is nonzero, the tasklet's function
369 1.1 riastrad * will not run, but if already scheduled, the tasklet will remain
370 1.1 riastrad * so and the softint will repeatedly trigger itself in a sort of
371 1.1 riastrad * busy-wait, so this should be used only for short durations.
372 1.1 riastrad *
373 1.1 riastrad * If tasklet is guaranteed not to be scheduled, e.g. if you have
374 1.1 riastrad * just invoked tasklet_kill, then tasklet_disable serves to wait
375 1.1 riastrad * for it to complete in case it might already be running.
376 1.1 riastrad */
377 1.1 riastrad void
378 1.1 riastrad tasklet_disable(struct tasklet_struct *tasklet)
379 1.1 riastrad {
380 1.1 riastrad unsigned int disablecount __diagused;
381 1.1 riastrad
382 1.1 riastrad /* Increment the disable count. */
383 1.1 riastrad disablecount = atomic_inc_uint_nv(&tasklet->tl_disablecount);
384 1.1 riastrad KASSERT(disablecount < UINT_MAX);
385 1.1 riastrad
386 1.1 riastrad /* Wait for it to finish running, if it was running. */
387 1.1 riastrad while (tasklet->tl_state & TASKLET_RUNNING)
388 1.1 riastrad SPINLOCK_BACKOFF_HOOK;
389 1.1 riastrad
390 1.1 riastrad /*
391 1.1 riastrad * Guarantee any side effects of running are visible to us
392 1.1 riastrad * before we return.
393 1.1 riastrad *
394 1.1 riastrad * XXX membar_sync is overkill here. It is tempting to issue
395 1.1 riastrad * membar_enter, but it only orders stores | loads, stores;
396 1.1 riastrad * what we really want here is load_acquire(&tasklet->tl_state)
397 1.1 riastrad * above, i.e. to witness all side effects preceding the store
398 1.1 riastrad * whose value we loaded. Absent that, membar_sync is the best
399 1.1 riastrad * we can do.
400 1.1 riastrad */
401 1.1 riastrad membar_sync();
402 1.1 riastrad }
403 1.1 riastrad
404 1.1 riastrad /*
405 1.1 riastrad * tasklet_enable(tasklet)
406 1.1 riastrad *
407 1.1 riastrad * Decrement tasklet's disable count. If it was previously
408 1.1 riastrad * scheduled to run, it may now run.
409 1.1 riastrad */
410 1.1 riastrad void
411 1.1 riastrad tasklet_enable(struct tasklet_struct *tasklet)
412 1.1 riastrad {
413 1.1 riastrad unsigned int disablecount __diagused;
414 1.1 riastrad
415 1.1 riastrad /*
416 1.1 riastrad * Guarantee all caller-relevant reads or writes have completed
417 1.1 riastrad * before potentially allowing tasklet to run again by
418 1.1 riastrad * decrementing the disable count.
419 1.1 riastrad */
420 1.1 riastrad membar_exit();
421 1.1 riastrad
422 1.1 riastrad /* Decrement the disable count. */
423 1.1 riastrad disablecount = atomic_dec_uint_nv(&tasklet->tl_disablecount);
424 1.1 riastrad KASSERT(disablecount != UINT_MAX);
425 1.1 riastrad }
426 1.1 riastrad
427 1.1 riastrad /*
428 1.1 riastrad * tasklet_kill(tasklet)
429 1.1 riastrad *
430 1.1 riastrad * Busy-wait for tasklet to run, if it is currently scheduled.
431 1.1 riastrad * Caller must guarantee it does not get scheduled again for this
432 1.1 riastrad * to be useful.
433 1.1 riastrad */
434 1.1 riastrad void
435 1.1 riastrad tasklet_kill(struct tasklet_struct *tasklet)
436 1.1 riastrad {
437 1.1 riastrad
438 1.1 riastrad KASSERTMSG(!cpu_intr_p(),
439 1.1 riastrad "deadlock: soft interrupts are blocked in interrupt context");
440 1.1 riastrad
441 1.1 riastrad /* Wait for it to be removed from the queue. */
442 1.1 riastrad while (tasklet->tl_state & TASKLET_SCHEDULED)
443 1.1 riastrad SPINLOCK_BACKOFF_HOOK;
444 1.1 riastrad
445 1.1 riastrad /*
446 1.1 riastrad * No need for a memory barrier here because writes to the
447 1.1 riastrad * single state word are globally ordered, and RUNNING is set
448 1.1 riastrad * before SCHEDULED is cleared, so as long as the caller
449 1.1 riastrad * guarantees no scheduling, the only possible transitions we
450 1.1 riastrad * can witness are:
451 1.1 riastrad *
452 1.1 riastrad * 0 -> 0
453 1.1 riastrad * SCHEDULED -> 0
454 1.1 riastrad * SCHEDULED -> RUNNING
455 1.1 riastrad * RUNNING -> 0
456 1.1 riastrad * RUNNING -> RUNNING
457 1.1 riastrad * SCHEDULED|RUNNING -> 0
458 1.1 riastrad * SCHEDULED|RUNNING -> RUNNING
459 1.1 riastrad */
460 1.1 riastrad
461 1.1 riastrad /* Wait for it to finish running. */
462 1.1 riastrad while (tasklet->tl_state & TASKLET_RUNNING)
463 1.1 riastrad SPINLOCK_BACKOFF_HOOK;
464 1.1 riastrad
465 1.1 riastrad /*
466 1.1 riastrad * Wait for any side effects running. Again, membar_sync is
467 1.1 riastrad * overkill; we really want load_acquire(&tasklet->tl_state)
468 1.1 riastrad * here.
469 1.1 riastrad */
470 1.1 riastrad membar_sync();
471 1.1 riastrad }
472