linux_tasklet.c revision 1.7 1 /* $NetBSD: linux_tasklet.c,v 1.7 2021/12/19 11:49:11 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2018, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_tasklet.c,v 1.7 2021/12/19 11:49:11 riastradh Exp $");
34
35 #include <sys/types.h>
36 #include <sys/atomic.h>
37 #include <sys/cpu.h>
38 #include <sys/errno.h>
39 #include <sys/intr.h>
40 #include <sys/lock.h>
41 #include <sys/percpu.h>
42 #include <sys/queue.h>
43
44 #include <lib/libkern/libkern.h>
45
46 #include <machine/limits.h>
47
48 #include <linux/tasklet.h>
49
50 #define TASKLET_SCHEDULED ((unsigned)__BIT(0))
51 #define TASKLET_RUNNING ((unsigned)__BIT(1))
52
53 struct tasklet_queue {
54 struct percpu *tq_percpu; /* struct tasklet_cpu */
55 void *tq_sih;
56 };
57
58 SIMPLEQ_HEAD(tasklet_head, tasklet_struct);
59
60 struct tasklet_cpu {
61 struct tasklet_head tc_head;
62 };
63
64 static struct tasklet_queue tasklet_queue __read_mostly;
65 static struct tasklet_queue tasklet_hi_queue __read_mostly;
66
67 static void tasklet_softintr(void *);
68 static int tasklet_queue_init(struct tasklet_queue *, unsigned);
69 static void tasklet_queue_fini(struct tasklet_queue *);
70 static void tasklet_queue_schedule(struct tasklet_queue *,
71 struct tasklet_struct *);
72 static void tasklet_queue_enqueue(struct tasklet_queue *,
73 struct tasklet_struct *);
74
75 /*
76 * linux_tasklets_init()
77 *
78 * Initialize the Linux tasklets subsystem. Return 0 on success,
79 * error code on failure.
80 */
81 int
82 linux_tasklets_init(void)
83 {
84 int error;
85
86 error = tasklet_queue_init(&tasklet_queue, SOFTINT_CLOCK);
87 if (error)
88 goto fail0;
89 error = tasklet_queue_init(&tasklet_hi_queue, SOFTINT_SERIAL);
90 if (error)
91 goto fail1;
92
93 /* Success! */
94 return 0;
95
96 fail2: __unused
97 tasklet_queue_fini(&tasklet_hi_queue);
98 fail1: tasklet_queue_fini(&tasklet_queue);
99 fail0: KASSERT(error);
100 return error;
101 }
102
103 /*
104 * linux_tasklets_fini()
105 *
106 * Finalize the Linux tasklets subsystem. All use of tasklets
107 * must be done.
108 */
109 void
110 linux_tasklets_fini(void)
111 {
112
113 tasklet_queue_fini(&tasklet_hi_queue);
114 tasklet_queue_fini(&tasklet_queue);
115 }
116
117 /*
118 * tasklet_queue_init(tq, prio)
119 *
120 * Initialize the tasklet queue tq for running tasklets at softint
121 * priority prio (SOFTINT_*).
122 */
123 static int
124 tasklet_queue_init(struct tasklet_queue *tq, unsigned prio)
125 {
126 int error;
127
128 /* Allocate per-CPU memory. percpu_alloc cannot fail. */
129 tq->tq_percpu = percpu_alloc(sizeof(struct tasklet_cpu));
130 KASSERT(tq->tq_percpu != NULL);
131
132 /* Try to establish a softint. softint_establish may fail. */
133 tq->tq_sih = softint_establish(prio|SOFTINT_MPSAFE, &tasklet_softintr,
134 tq);
135 if (tq->tq_sih == NULL) {
136 error = ENOMEM;
137 goto fail1;
138 }
139
140 /* Success! */
141 return 0;
142
143 fail2: __unused
144 softint_disestablish(tq->tq_sih);
145 tq->tq_sih = NULL;
146 fail1: percpu_free(tq->tq_percpu, sizeof(struct tasklet_cpu));
147 tq->tq_percpu = NULL;
148 fail0: __unused
149 KASSERT(error);
150 return error;
151 }
152
153 /*
154 * tasklet_queue_fini(tq)
155 *
156 * Finalize the tasklet queue tq: free all resources associated
157 * with it.
158 */
159 static void
160 tasklet_queue_fini(struct tasklet_queue *tq)
161 {
162
163 softint_disestablish(tq->tq_sih);
164 tq->tq_sih = NULL;
165 percpu_free(tq->tq_percpu, sizeof(struct tasklet_cpu));
166 tq->tq_percpu = NULL;
167 }
168
169 /*
170 * tasklet_softintr(cookie)
171 *
172 * Soft interrupt handler: Process queued tasklets on the tasklet
173 * queue passed in as cookie.
174 */
175 static void
176 tasklet_softintr(void *cookie)
177 {
178 struct tasklet_queue *const tq = cookie;
179 struct tasklet_head th = SIMPLEQ_HEAD_INITIALIZER(th);
180 struct tasklet_cpu *tc;
181 int s;
182
183 /*
184 * With all interrupts deferred, transfer the current CPU's
185 * queue of tasklets to a local variable in one swell foop.
186 *
187 * No memory barriers: CPU-local state only.
188 */
189 tc = percpu_getref(tq->tq_percpu);
190 s = splhigh();
191 SIMPLEQ_CONCAT(&th, &tc->tc_head);
192 splx(s);
193 percpu_putref(tq->tq_percpu);
194
195 /* Go through the queue of tasklets we grabbed. */
196 while (!SIMPLEQ_EMPTY(&th)) {
197 struct tasklet_struct *tasklet;
198
199 /* Remove the first tasklet from the queue. */
200 tasklet = SIMPLEQ_FIRST(&th);
201 SIMPLEQ_REMOVE_HEAD(&th, tl_entry);
202
203 KASSERT(atomic_load_relaxed(&tasklet->tl_state) &
204 TASKLET_SCHEDULED);
205
206 /*
207 * Test and set RUNNING, in case it is already running
208 * on another CPU and got scheduled again on this one
209 * before it completed.
210 */
211 if (!tasklet_trylock(tasklet)) {
212 /*
213 * Put it back on the queue to run it again in
214 * a sort of busy-wait, and move on to the next
215 * one.
216 */
217 tasklet_queue_enqueue(tq, tasklet);
218 continue;
219 }
220
221 /*
222 * Check whether it's currently disabled.
223 *
224 * Pairs with membar_exit in __tasklet_enable.
225 */
226 if (atomic_load_acquire(&tasklet->tl_disablecount)) {
227 /*
228 * Disabled: clear the RUNNING bit and, requeue
229 * it, but keep it SCHEDULED.
230 */
231 tasklet_unlock(tasklet);
232 tasklet_queue_enqueue(tq, tasklet);
233 continue;
234 }
235
236 /* Not disabled. Clear SCHEDULED and call func. */
237 KASSERT(atomic_load_relaxed(&tasklet->tl_state) &
238 TASKLET_SCHEDULED);
239 atomic_and_uint(&tasklet->tl_state, ~TASKLET_SCHEDULED);
240
241 (*tasklet->func)(tasklet->data);
242
243 /* Clear RUNNING to notify tasklet_disable. */
244 tasklet_unlock(tasklet);
245 }
246 }
247
248 /*
249 * tasklet_queue_schedule(tq, tasklet)
250 *
251 * Schedule tasklet to run on tq. If it was already scheduled and
252 * has not yet run, no effect.
253 */
254 static void
255 tasklet_queue_schedule(struct tasklet_queue *tq,
256 struct tasklet_struct *tasklet)
257 {
258 unsigned ostate, nstate;
259
260 /* Test and set the SCHEDULED bit. If already set, we're done. */
261 do {
262 ostate = atomic_load_relaxed(&tasklet->tl_state);
263 if (ostate & TASKLET_SCHEDULED)
264 return;
265 nstate = ostate | TASKLET_SCHEDULED;
266 } while (atomic_cas_uint(&tasklet->tl_state, ostate, nstate)
267 != ostate);
268
269 /*
270 * Not already set and we have set it now. Put it on the queue
271 * and kick off a softint.
272 */
273 tasklet_queue_enqueue(tq, tasklet);
274 }
275
276 /*
277 * tasklet_queue_enqueue(tq, tasklet)
278 *
279 * Put tasklet on the queue tq and ensure it will run. tasklet
280 * must be marked SCHEDULED.
281 */
282 static void
283 tasklet_queue_enqueue(struct tasklet_queue *tq, struct tasklet_struct *tasklet)
284 {
285 struct tasklet_cpu *tc;
286 int s;
287
288 KASSERT(atomic_load_relaxed(&tasklet->tl_state) & TASKLET_SCHEDULED);
289
290 /*
291 * Insert on the current CPU's queue while all interrupts are
292 * blocked, and schedule a soft interrupt to process it. No
293 * memory barriers: CPU-local state only.
294 */
295 tc = percpu_getref(tq->tq_percpu);
296 s = splhigh();
297 SIMPLEQ_INSERT_TAIL(&tc->tc_head, tasklet, tl_entry);
298 splx(s);
299 softint_schedule(tq->tq_sih);
300 percpu_putref(tq->tq_percpu);
301 }
302
303 /*
304 * tasklet_init(tasklet, func, data)
305 *
306 * Initialize tasklet to call func(data) when scheduled.
307 *
308 * Caller is responsible for issuing the appropriate memory
309 * barriers or store releases to publish the tasklet to other CPUs
310 * before use.
311 */
312 void
313 tasklet_init(struct tasklet_struct *tasklet, void (*func)(unsigned long),
314 unsigned long data)
315 {
316
317 atomic_store_relaxed(&tasklet->tl_state, 0);
318 atomic_store_relaxed(&tasklet->tl_disablecount, 0);
319 tasklet->func = func;
320 tasklet->data = data;
321 }
322
323 /*
324 * tasklet_schedule(tasklet)
325 *
326 * Schedule tasklet to run at regular priority. If it was already
327 * scheduled and has not yet run, no effect.
328 */
329 void
330 tasklet_schedule(struct tasklet_struct *tasklet)
331 {
332
333 tasklet_queue_schedule(&tasklet_queue, tasklet);
334 }
335
336 /*
337 * tasklet_hi_schedule(tasklet)
338 *
339 * Schedule tasklet to run at high priority. If it was already
340 * scheduled and has not yet run, no effect.
341 */
342 void
343 tasklet_hi_schedule(struct tasklet_struct *tasklet)
344 {
345
346 tasklet_queue_schedule(&tasklet_hi_queue, tasklet);
347 }
348
349 /*
350 * tasklet_disable_nosync(tasklet)
351 *
352 * Increment the disable count of tasklet, but don't wait for it
353 * to complete -- it may remain running after this returns.
354 *
355 * As long as the disable count is nonzero, the tasklet's function
356 * will not run, but if already scheduled, the tasklet will remain
357 * so and the softint will repeatedly trigger itself in a sort of
358 * busy-wait, so this should be used only for short durations.
359 *
360 * Load-acquire semantics.
361 */
362 void
363 tasklet_disable_nosync(struct tasklet_struct *tasklet)
364 {
365 unsigned int disablecount __diagused;
366
367 /* Increment the disable count. */
368 disablecount = atomic_inc_uint_nv(&tasklet->tl_disablecount);
369 KASSERT(disablecount < UINT_MAX);
370 KASSERT(disablecount != 0);
371
372 /* Pairs with membar_exit in __tasklet_enable. */
373 #ifndef __HAVE_ATOMIC_AS_MEMBAR
374 membar_enter();
375 #endif
376 }
377
378 /*
379 * tasklet_disable(tasklet)
380 *
381 * Increment the disable count of tasklet, and if it was already
382 * running, busy-wait for it to complete.
383 *
384 * As long as the disable count is nonzero, the tasklet's function
385 * will not run, but if already scheduled, the tasklet will remain
386 * so and the softint will repeatedly trigger itself in a sort of
387 * busy-wait, so this should be used only for short durations.
388 *
389 * If tasklet is guaranteed not to be scheduled, e.g. if you have
390 * just invoked tasklet_kill, then tasklet_disable serves to wait
391 * for it to complete in case it might already be running.
392 *
393 * Load-acquire semantics.
394 */
395 void
396 tasklet_disable(struct tasklet_struct *tasklet)
397 {
398
399 /* Increment the disable count. */
400 tasklet_disable_nosync(tasklet);
401
402 /* Wait for it to finish running, if it was running. */
403 tasklet_unlock_wait(tasklet);
404 }
405
406 /*
407 * tasklet_enable(tasklet)
408 *
409 * Decrement tasklet's disable count. If it was previously
410 * scheduled to run, it may now run.
411 *
412 * Store-release semantics.
413 */
414 void
415 tasklet_enable(struct tasklet_struct *tasklet)
416 {
417
418 (void)__tasklet_enable(tasklet);
419 }
420
421 /*
422 * tasklet_kill(tasklet)
423 *
424 * Busy-wait for tasklet to run, if it is currently scheduled.
425 * Caller must guarantee it does not get scheduled again for this
426 * to be useful.
427 */
428 void
429 tasklet_kill(struct tasklet_struct *tasklet)
430 {
431
432 KASSERTMSG(!cpu_intr_p(),
433 "deadlock: soft interrupts are blocked in interrupt context");
434
435 /* Wait for it to be removed from the queue. */
436 while (atomic_load_relaxed(&tasklet->tl_state) & TASKLET_SCHEDULED)
437 SPINLOCK_BACKOFF_HOOK;
438
439 /*
440 * No need for a memory barrier here because writes to the
441 * single state word are globally ordered, and RUNNING is set
442 * before SCHEDULED is cleared, so as long as the caller
443 * guarantees no scheduling, the only possible transitions we
444 * can witness are:
445 *
446 * 0 -> 0
447 * SCHEDULED -> 0
448 * SCHEDULED -> RUNNING
449 * RUNNING -> 0
450 * RUNNING -> RUNNING
451 * SCHEDULED|RUNNING -> 0
452 * SCHEDULED|RUNNING -> RUNNING
453 */
454
455 /* Wait for it to finish running. */
456 tasklet_unlock_wait(tasklet);
457 }
458
459 /*
460 * tasklet_is_scheduled(tasklet)
461 *
462 * True if tasklet is currently locked. Caller must use it only
463 * for positive assertions.
464 */
465 bool
466 tasklet_is_locked(const struct tasklet_struct *tasklet)
467 {
468
469 return atomic_load_relaxed(&tasklet->tl_state) & TASKLET_RUNNING;
470 }
471
472 /*
473 * tasklet_trylock(tasklet)
474 *
475 * Try to lock tasklet, i.e., set TASKLET_RUNNING. Return true if
476 * we locked it, false if already locked.
477 *
478 * Load-acquire semantics.
479 */
480 bool
481 tasklet_trylock(struct tasklet_struct *tasklet)
482 {
483 unsigned state;
484
485 do {
486 state = atomic_load_relaxed(&tasklet->tl_state);
487 if (state & TASKLET_RUNNING)
488 return false;
489 } while (atomic_cas_uint(&tasklet->tl_state, state,
490 state | TASKLET_RUNNING) != state);
491
492 /* Pairs with membar_exit in tasklet_unlock. */
493 #ifndef __HAVE_ATOMIC_AS_MEMBAR
494 membar_enter();
495 #endif
496
497 return true;
498 }
499
500 /*
501 * tasklet_unlock(tasklet)
502 *
503 * Unlock tasklet, i.e., clear TASKLET_RUNNING.
504 *
505 * Store-release semantics.
506 */
507 void
508 tasklet_unlock(struct tasklet_struct *tasklet)
509 {
510
511 KASSERT(atomic_load_relaxed(&tasklet->tl_state) & TASKLET_RUNNING);
512
513 /*
514 * Pairs with membar_enter in tasklet_trylock and with
515 * atomic_load_acquire in tasklet_unlock_wait.
516 */
517 #ifndef __HAVE_ATOMIC_AS_MEMBAR
518 membar_exit();
519 #endif
520 atomic_and_uint(&tasklet->tl_state, ~TASKLET_RUNNING);
521 }
522
523 /*
524 * tasklet_unlock_wait(tasklet)
525 *
526 * Busy-wait until tasklet is not running.
527 *
528 * Load-acquire semantics.
529 */
530 void
531 tasklet_unlock_wait(const struct tasklet_struct *tasklet)
532 {
533
534 /* Pairs with membar_exit in tasklet_unlock. */
535 while (atomic_load_acquire(&tasklet->tl_state) & TASKLET_RUNNING)
536 SPINLOCK_BACKOFF_HOOK;
537 }
538
539 /*
540 * BEGIN I915 HACKS
541 *
542 * The i915 driver abuses the tasklet abstraction like a cop abuses his
543 * wife.
544 */
545
546 /*
547 * __tasklet_disable_sync_once(tasklet)
548 *
549 * Increment the disable count of tasklet, and if this is the
550 * first time it was disabled and it was already running,
551 * busy-wait for it to complete.
552 *
553 * Caller must not care about whether the tasklet is running, or
554 * about waiting for any side effects of the tasklet to complete,
555 * if this was not the first time it was disabled.
556 */
557 void
558 __tasklet_disable_sync_once(struct tasklet_struct *tasklet)
559 {
560 unsigned int disablecount;
561
562 /* Increment the disable count. */
563 disablecount = atomic_inc_uint_nv(&tasklet->tl_disablecount);
564 KASSERT(disablecount < UINT_MAX);
565 KASSERT(disablecount != 0);
566
567 /* Pairs with membar_exit in __tasklet_enable_sync_once. */
568 #ifndef __HAVE_ATOMIC_AS_MEMBAR
569 membar_enter();
570 #endif
571
572 /*
573 * If it was zero, wait for it to finish running. If it was
574 * not zero, caller must not care whether it was running.
575 */
576 if (disablecount == 1)
577 tasklet_unlock_wait(tasklet);
578 }
579
580 /*
581 * __tasklet_enable_sync_once(tasklet)
582 *
583 * Decrement the disable count of tasklet, and if it goes to zero,
584 * kill tasklet.
585 */
586 void
587 __tasklet_enable_sync_once(struct tasklet_struct *tasklet)
588 {
589 unsigned int disablecount;
590
591 /* Pairs with membar_enter in __tasklet_disable_sync_once. */
592 #ifndef __HAVE_ATOMIC_AS_MEMBAR
593 membar_exit();
594 #endif
595
596 /* Decrement the disable count. */
597 disablecount = atomic_dec_uint_nv(&tasklet->tl_disablecount);
598 KASSERT(disablecount < UINT_MAX);
599
600 /*
601 * If it became zero, kill the tasklet. If it was not zero,
602 * caller must not care whether it was running.
603 */
604 if (disablecount == 0)
605 tasklet_kill(tasklet);
606 }
607
608 /*
609 * __tasklet_is_enabled(tasklet)
610 *
611 * True if tasklet is not currently disabled. Answer may be stale
612 * as soon as it is returned -- caller must use it only as a hint,
613 * or must arrange synchronization externally.
614 */
615 bool
616 __tasklet_is_enabled(const struct tasklet_struct *tasklet)
617 {
618 unsigned int disablecount;
619
620 disablecount = atomic_load_relaxed(&tasklet->tl_disablecount);
621
622 return (disablecount == 0);
623 }
624
625 /*
626 * __tasklet_is_scheduled(tasklet)
627 *
628 * True if tasklet is currently scheduled. Answer may be stale as
629 * soon as it is returned -- caller must use it only as a hint, or
630 * must arrange synchronization externally.
631 */
632 bool
633 __tasklet_is_scheduled(const struct tasklet_struct *tasklet)
634 {
635
636 return atomic_load_relaxed(&tasklet->tl_state) & TASKLET_SCHEDULED;
637 }
638
639 /*
640 * __tasklet_enable(tasklet)
641 *
642 * Decrement tasklet's disable count. If it was previously
643 * scheduled to run, it may now run. Return true if the disable
644 * count went down to zero; otherwise return false.
645 *
646 * Store-release semantics.
647 */
648 bool
649 __tasklet_enable(struct tasklet_struct *tasklet)
650 {
651 unsigned int disablecount;
652
653 /*
654 * Guarantee all caller-relevant reads or writes have completed
655 * before potentially allowing tasklet to run again by
656 * decrementing the disable count.
657 *
658 * Pairs with atomic_load_acquire in tasklet_softintr and with
659 * membar_enter in tasklet_disable.
660 */
661 #ifndef __HAVE_ATOMIC_AS_MEMBAR
662 membar_exit();
663 #endif
664
665 /* Decrement the disable count. */
666 disablecount = atomic_dec_uint_nv(&tasklet->tl_disablecount);
667 KASSERT(disablecount != UINT_MAX);
668
669 return (disablecount == 0);
670 }
671