linux_tasklet.c revision 1.8 1 /* $NetBSD: linux_tasklet.c,v 1.8 2021/12/19 11:57:34 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2018, 2020, 2021 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_tasklet.c,v 1.8 2021/12/19 11:57:34 riastradh Exp $");
34
35 #include <sys/types.h>
36 #include <sys/atomic.h>
37 #include <sys/cpu.h>
38 #include <sys/errno.h>
39 #include <sys/intr.h>
40 #include <sys/kmem.h>
41 #include <sys/lock.h>
42 #include <sys/percpu.h>
43 #include <sys/queue.h>
44
45 #include <lib/libkern/libkern.h>
46
47 #include <machine/limits.h>
48
49 #include <linux/tasklet.h>
50
51 #define TASKLET_SCHEDULED ((unsigned)__BIT(0))
52 #define TASKLET_RUNNING ((unsigned)__BIT(1))
53
54 struct tasklet_queue {
55 struct percpu *tq_percpu; /* struct tasklet_cpu * */
56 void *tq_sih;
57 };
58
59 SIMPLEQ_HEAD(tasklet_head, tasklet_struct);
60
61 struct tasklet_cpu {
62 struct tasklet_head tc_head;
63 };
64
65 static struct tasklet_queue tasklet_queue __read_mostly;
66 static struct tasklet_queue tasklet_hi_queue __read_mostly;
67
68 static void tasklet_softintr(void *);
69 static int tasklet_queue_init(struct tasklet_queue *, unsigned);
70 static void tasklet_queue_fini(struct tasklet_queue *);
71 static void tasklet_queue_schedule(struct tasklet_queue *,
72 struct tasklet_struct *);
73 static void tasklet_queue_enqueue(struct tasklet_queue *,
74 struct tasklet_struct *);
75
76 /*
77 * linux_tasklets_init()
78 *
79 * Initialize the Linux tasklets subsystem. Return 0 on success,
80 * error code on failure.
81 */
82 int
83 linux_tasklets_init(void)
84 {
85 int error;
86
87 error = tasklet_queue_init(&tasklet_queue, SOFTINT_CLOCK);
88 if (error)
89 goto fail0;
90 error = tasklet_queue_init(&tasklet_hi_queue, SOFTINT_SERIAL);
91 if (error)
92 goto fail1;
93
94 /* Success! */
95 return 0;
96
97 fail2: __unused
98 tasklet_queue_fini(&tasklet_hi_queue);
99 fail1: tasklet_queue_fini(&tasklet_queue);
100 fail0: KASSERT(error);
101 return error;
102 }
103
104 /*
105 * linux_tasklets_fini()
106 *
107 * Finalize the Linux tasklets subsystem. All use of tasklets
108 * must be done.
109 */
110 void
111 linux_tasklets_fini(void)
112 {
113
114 tasklet_queue_fini(&tasklet_hi_queue);
115 tasklet_queue_fini(&tasklet_queue);
116 }
117
118 static void
119 tasklet_cpu_init(void *ptr, void *cookie, struct cpu_info *ci)
120 {
121 struct tasklet_cpu **tcp = ptr, *tc;
122
123 *tcp = tc = kmem_zalloc(sizeof(*tc), KM_SLEEP);
124 SIMPLEQ_INIT(&tc->tc_head);
125 }
126
127 static void
128 tasklet_cpu_fini(void *ptr, void *cookie, struct cpu_info *ci)
129 {
130 struct tasklet_cpu **tcp = ptr, *tc = *tcp;
131
132 KASSERT(SIMPLEQ_EMPTY(&tc->tc_head));
133 kmem_free(tc, sizeof(*tc));
134 *tcp = NULL; /* paranoia */
135 }
136
137 /*
138 * tasklet_queue_init(tq, prio)
139 *
140 * Initialize the tasklet queue tq for running tasklets at softint
141 * priority prio (SOFTINT_*).
142 */
143 static int
144 tasklet_queue_init(struct tasklet_queue *tq, unsigned prio)
145 {
146 int error;
147
148 /* Allocate per-CPU memory. percpu_alloc cannot fail. */
149 tq->tq_percpu = percpu_create(sizeof(struct tasklet_cpu),
150 tasklet_cpu_init, tasklet_cpu_fini, NULL);
151 KASSERT(tq->tq_percpu != NULL);
152
153 /* Try to establish a softint. softint_establish may fail. */
154 tq->tq_sih = softint_establish(prio|SOFTINT_MPSAFE, &tasklet_softintr,
155 tq);
156 if (tq->tq_sih == NULL) {
157 error = ENOMEM;
158 goto fail1;
159 }
160
161 /* Success! */
162 return 0;
163
164 fail2: __unused
165 softint_disestablish(tq->tq_sih);
166 tq->tq_sih = NULL;
167 fail1: percpu_free(tq->tq_percpu, sizeof(struct tasklet_cpu));
168 tq->tq_percpu = NULL;
169 fail0: __unused
170 KASSERT(error);
171 return error;
172 }
173
174 /*
175 * tasklet_queue_fini(tq)
176 *
177 * Finalize the tasklet queue tq: free all resources associated
178 * with it.
179 */
180 static void
181 tasklet_queue_fini(struct tasklet_queue *tq)
182 {
183
184 softint_disestablish(tq->tq_sih);
185 tq->tq_sih = NULL;
186 percpu_free(tq->tq_percpu, sizeof(struct tasklet_cpu));
187 tq->tq_percpu = NULL;
188 }
189
190 /*
191 * tasklet_softintr(cookie)
192 *
193 * Soft interrupt handler: Process queued tasklets on the tasklet
194 * queue passed in as cookie.
195 */
196 static void
197 tasklet_softintr(void *cookie)
198 {
199 struct tasklet_queue *const tq = cookie;
200 struct tasklet_head th = SIMPLEQ_HEAD_INITIALIZER(th);
201 struct tasklet_cpu **tcp, *tc;
202 int s;
203
204 /*
205 * With all interrupts deferred, transfer the current CPU's
206 * queue of tasklets to a local variable in one swell foop.
207 *
208 * No memory barriers: CPU-local state only.
209 */
210 tcp = percpu_getref(tq->tq_percpu);
211 tc = *tcp;
212 s = splhigh();
213 SIMPLEQ_CONCAT(&th, &tc->tc_head);
214 splx(s);
215 percpu_putref(tq->tq_percpu);
216
217 /* Go through the queue of tasklets we grabbed. */
218 while (!SIMPLEQ_EMPTY(&th)) {
219 struct tasklet_struct *tasklet;
220
221 /* Remove the first tasklet from the queue. */
222 tasklet = SIMPLEQ_FIRST(&th);
223 SIMPLEQ_REMOVE_HEAD(&th, tl_entry);
224
225 KASSERT(atomic_load_relaxed(&tasklet->tl_state) &
226 TASKLET_SCHEDULED);
227
228 /*
229 * Test and set RUNNING, in case it is already running
230 * on another CPU and got scheduled again on this one
231 * before it completed.
232 */
233 if (!tasklet_trylock(tasklet)) {
234 /*
235 * Put it back on the queue to run it again in
236 * a sort of busy-wait, and move on to the next
237 * one.
238 */
239 tasklet_queue_enqueue(tq, tasklet);
240 continue;
241 }
242
243 /*
244 * Check whether it's currently disabled.
245 *
246 * Pairs with membar_exit in __tasklet_enable.
247 */
248 if (atomic_load_acquire(&tasklet->tl_disablecount)) {
249 /*
250 * Disabled: clear the RUNNING bit and, requeue
251 * it, but keep it SCHEDULED.
252 */
253 tasklet_unlock(tasklet);
254 tasklet_queue_enqueue(tq, tasklet);
255 continue;
256 }
257
258 /* Not disabled. Clear SCHEDULED and call func. */
259 KASSERT(atomic_load_relaxed(&tasklet->tl_state) &
260 TASKLET_SCHEDULED);
261 atomic_and_uint(&tasklet->tl_state, ~TASKLET_SCHEDULED);
262
263 (*tasklet->func)(tasklet->data);
264
265 /* Clear RUNNING to notify tasklet_disable. */
266 tasklet_unlock(tasklet);
267 }
268 }
269
270 /*
271 * tasklet_queue_schedule(tq, tasklet)
272 *
273 * Schedule tasklet to run on tq. If it was already scheduled and
274 * has not yet run, no effect.
275 */
276 static void
277 tasklet_queue_schedule(struct tasklet_queue *tq,
278 struct tasklet_struct *tasklet)
279 {
280 unsigned ostate, nstate;
281
282 /* Test and set the SCHEDULED bit. If already set, we're done. */
283 do {
284 ostate = atomic_load_relaxed(&tasklet->tl_state);
285 if (ostate & TASKLET_SCHEDULED)
286 return;
287 nstate = ostate | TASKLET_SCHEDULED;
288 } while (atomic_cas_uint(&tasklet->tl_state, ostate, nstate)
289 != ostate);
290
291 /*
292 * Not already set and we have set it now. Put it on the queue
293 * and kick off a softint.
294 */
295 tasklet_queue_enqueue(tq, tasklet);
296 }
297
298 /*
299 * tasklet_queue_enqueue(tq, tasklet)
300 *
301 * Put tasklet on the queue tq and ensure it will run. tasklet
302 * must be marked SCHEDULED.
303 */
304 static void
305 tasklet_queue_enqueue(struct tasklet_queue *tq, struct tasklet_struct *tasklet)
306 {
307 struct tasklet_cpu **tcp, *tc;
308 int s;
309
310 KASSERT(atomic_load_relaxed(&tasklet->tl_state) & TASKLET_SCHEDULED);
311
312 /*
313 * Insert on the current CPU's queue while all interrupts are
314 * blocked, and schedule a soft interrupt to process it. No
315 * memory barriers: CPU-local state only.
316 */
317 tcp = percpu_getref(tq->tq_percpu);
318 tc = *tcp;
319 s = splhigh();
320 SIMPLEQ_INSERT_TAIL(&tc->tc_head, tasklet, tl_entry);
321 splx(s);
322 softint_schedule(tq->tq_sih);
323 percpu_putref(tq->tq_percpu);
324 }
325
326 /*
327 * tasklet_init(tasklet, func, data)
328 *
329 * Initialize tasklet to call func(data) when scheduled.
330 *
331 * Caller is responsible for issuing the appropriate memory
332 * barriers or store releases to publish the tasklet to other CPUs
333 * before use.
334 */
335 void
336 tasklet_init(struct tasklet_struct *tasklet, void (*func)(unsigned long),
337 unsigned long data)
338 {
339
340 atomic_store_relaxed(&tasklet->tl_state, 0);
341 atomic_store_relaxed(&tasklet->tl_disablecount, 0);
342 tasklet->func = func;
343 tasklet->data = data;
344 }
345
346 /*
347 * tasklet_schedule(tasklet)
348 *
349 * Schedule tasklet to run at regular priority. If it was already
350 * scheduled and has not yet run, no effect.
351 */
352 void
353 tasklet_schedule(struct tasklet_struct *tasklet)
354 {
355
356 tasklet_queue_schedule(&tasklet_queue, tasklet);
357 }
358
359 /*
360 * tasklet_hi_schedule(tasklet)
361 *
362 * Schedule tasklet to run at high priority. If it was already
363 * scheduled and has not yet run, no effect.
364 */
365 void
366 tasklet_hi_schedule(struct tasklet_struct *tasklet)
367 {
368
369 tasklet_queue_schedule(&tasklet_hi_queue, tasklet);
370 }
371
372 /*
373 * tasklet_disable_nosync(tasklet)
374 *
375 * Increment the disable count of tasklet, but don't wait for it
376 * to complete -- it may remain running after this returns.
377 *
378 * As long as the disable count is nonzero, the tasklet's function
379 * will not run, but if already scheduled, the tasklet will remain
380 * so and the softint will repeatedly trigger itself in a sort of
381 * busy-wait, so this should be used only for short durations.
382 *
383 * Load-acquire semantics.
384 */
385 void
386 tasklet_disable_nosync(struct tasklet_struct *tasklet)
387 {
388 unsigned int disablecount __diagused;
389
390 /* Increment the disable count. */
391 disablecount = atomic_inc_uint_nv(&tasklet->tl_disablecount);
392 KASSERT(disablecount < UINT_MAX);
393 KASSERT(disablecount != 0);
394
395 /* Pairs with membar_exit in __tasklet_enable. */
396 #ifndef __HAVE_ATOMIC_AS_MEMBAR
397 membar_enter();
398 #endif
399 }
400
401 /*
402 * tasklet_disable(tasklet)
403 *
404 * Increment the disable count of tasklet, and if it was already
405 * running, busy-wait for it to complete.
406 *
407 * As long as the disable count is nonzero, the tasklet's function
408 * will not run, but if already scheduled, the tasklet will remain
409 * so and the softint will repeatedly trigger itself in a sort of
410 * busy-wait, so this should be used only for short durations.
411 *
412 * If tasklet is guaranteed not to be scheduled, e.g. if you have
413 * just invoked tasklet_kill, then tasklet_disable serves to wait
414 * for it to complete in case it might already be running.
415 *
416 * Load-acquire semantics.
417 */
418 void
419 tasklet_disable(struct tasklet_struct *tasklet)
420 {
421
422 /* Increment the disable count. */
423 tasklet_disable_nosync(tasklet);
424
425 /* Wait for it to finish running, if it was running. */
426 tasklet_unlock_wait(tasklet);
427 }
428
429 /*
430 * tasklet_enable(tasklet)
431 *
432 * Decrement tasklet's disable count. If it was previously
433 * scheduled to run, it may now run.
434 *
435 * Store-release semantics.
436 */
437 void
438 tasklet_enable(struct tasklet_struct *tasklet)
439 {
440
441 (void)__tasklet_enable(tasklet);
442 }
443
444 /*
445 * tasklet_kill(tasklet)
446 *
447 * Busy-wait for tasklet to run, if it is currently scheduled.
448 * Caller must guarantee it does not get scheduled again for this
449 * to be useful.
450 */
451 void
452 tasklet_kill(struct tasklet_struct *tasklet)
453 {
454
455 KASSERTMSG(!cpu_intr_p(),
456 "deadlock: soft interrupts are blocked in interrupt context");
457
458 /* Wait for it to be removed from the queue. */
459 while (atomic_load_relaxed(&tasklet->tl_state) & TASKLET_SCHEDULED)
460 SPINLOCK_BACKOFF_HOOK;
461
462 /*
463 * No need for a memory barrier here because writes to the
464 * single state word are globally ordered, and RUNNING is set
465 * before SCHEDULED is cleared, so as long as the caller
466 * guarantees no scheduling, the only possible transitions we
467 * can witness are:
468 *
469 * 0 -> 0
470 * SCHEDULED -> 0
471 * SCHEDULED -> RUNNING
472 * RUNNING -> 0
473 * RUNNING -> RUNNING
474 * SCHEDULED|RUNNING -> 0
475 * SCHEDULED|RUNNING -> RUNNING
476 */
477
478 /* Wait for it to finish running. */
479 tasklet_unlock_wait(tasklet);
480 }
481
482 /*
483 * tasklet_is_scheduled(tasklet)
484 *
485 * True if tasklet is currently locked. Caller must use it only
486 * for positive assertions.
487 */
488 bool
489 tasklet_is_locked(const struct tasklet_struct *tasklet)
490 {
491
492 return atomic_load_relaxed(&tasklet->tl_state) & TASKLET_RUNNING;
493 }
494
495 /*
496 * tasklet_trylock(tasklet)
497 *
498 * Try to lock tasklet, i.e., set TASKLET_RUNNING. Return true if
499 * we locked it, false if already locked.
500 *
501 * Load-acquire semantics.
502 */
503 bool
504 tasklet_trylock(struct tasklet_struct *tasklet)
505 {
506 unsigned state;
507
508 do {
509 state = atomic_load_relaxed(&tasklet->tl_state);
510 if (state & TASKLET_RUNNING)
511 return false;
512 } while (atomic_cas_uint(&tasklet->tl_state, state,
513 state | TASKLET_RUNNING) != state);
514
515 /* Pairs with membar_exit in tasklet_unlock. */
516 #ifndef __HAVE_ATOMIC_AS_MEMBAR
517 membar_enter();
518 #endif
519
520 return true;
521 }
522
523 /*
524 * tasklet_unlock(tasklet)
525 *
526 * Unlock tasklet, i.e., clear TASKLET_RUNNING.
527 *
528 * Store-release semantics.
529 */
530 void
531 tasklet_unlock(struct tasklet_struct *tasklet)
532 {
533
534 KASSERT(atomic_load_relaxed(&tasklet->tl_state) & TASKLET_RUNNING);
535
536 /*
537 * Pairs with membar_enter in tasklet_trylock and with
538 * atomic_load_acquire in tasklet_unlock_wait.
539 */
540 #ifndef __HAVE_ATOMIC_AS_MEMBAR
541 membar_exit();
542 #endif
543 atomic_and_uint(&tasklet->tl_state, ~TASKLET_RUNNING);
544 }
545
546 /*
547 * tasklet_unlock_wait(tasklet)
548 *
549 * Busy-wait until tasklet is not running.
550 *
551 * Load-acquire semantics.
552 */
553 void
554 tasklet_unlock_wait(const struct tasklet_struct *tasklet)
555 {
556
557 /* Pairs with membar_exit in tasklet_unlock. */
558 while (atomic_load_acquire(&tasklet->tl_state) & TASKLET_RUNNING)
559 SPINLOCK_BACKOFF_HOOK;
560 }
561
562 /*
563 * BEGIN I915 HACKS
564 *
565 * The i915 driver abuses the tasklet abstraction like a cop abuses his
566 * wife.
567 */
568
569 /*
570 * __tasklet_disable_sync_once(tasklet)
571 *
572 * Increment the disable count of tasklet, and if this is the
573 * first time it was disabled and it was already running,
574 * busy-wait for it to complete.
575 *
576 * Caller must not care about whether the tasklet is running, or
577 * about waiting for any side effects of the tasklet to complete,
578 * if this was not the first time it was disabled.
579 */
580 void
581 __tasklet_disable_sync_once(struct tasklet_struct *tasklet)
582 {
583 unsigned int disablecount;
584
585 /* Increment the disable count. */
586 disablecount = atomic_inc_uint_nv(&tasklet->tl_disablecount);
587 KASSERT(disablecount < UINT_MAX);
588 KASSERT(disablecount != 0);
589
590 /* Pairs with membar_exit in __tasklet_enable_sync_once. */
591 #ifndef __HAVE_ATOMIC_AS_MEMBAR
592 membar_enter();
593 #endif
594
595 /*
596 * If it was zero, wait for it to finish running. If it was
597 * not zero, caller must not care whether it was running.
598 */
599 if (disablecount == 1)
600 tasklet_unlock_wait(tasklet);
601 }
602
603 /*
604 * __tasklet_enable_sync_once(tasklet)
605 *
606 * Decrement the disable count of tasklet, and if it goes to zero,
607 * kill tasklet.
608 */
609 void
610 __tasklet_enable_sync_once(struct tasklet_struct *tasklet)
611 {
612 unsigned int disablecount;
613
614 /* Pairs with membar_enter in __tasklet_disable_sync_once. */
615 #ifndef __HAVE_ATOMIC_AS_MEMBAR
616 membar_exit();
617 #endif
618
619 /* Decrement the disable count. */
620 disablecount = atomic_dec_uint_nv(&tasklet->tl_disablecount);
621 KASSERT(disablecount < UINT_MAX);
622
623 /*
624 * If it became zero, kill the tasklet. If it was not zero,
625 * caller must not care whether it was running.
626 */
627 if (disablecount == 0)
628 tasklet_kill(tasklet);
629 }
630
631 /*
632 * __tasklet_is_enabled(tasklet)
633 *
634 * True if tasklet is not currently disabled. Answer may be stale
635 * as soon as it is returned -- caller must use it only as a hint,
636 * or must arrange synchronization externally.
637 */
638 bool
639 __tasklet_is_enabled(const struct tasklet_struct *tasklet)
640 {
641 unsigned int disablecount;
642
643 disablecount = atomic_load_relaxed(&tasklet->tl_disablecount);
644
645 return (disablecount == 0);
646 }
647
648 /*
649 * __tasklet_is_scheduled(tasklet)
650 *
651 * True if tasklet is currently scheduled. Answer may be stale as
652 * soon as it is returned -- caller must use it only as a hint, or
653 * must arrange synchronization externally.
654 */
655 bool
656 __tasklet_is_scheduled(const struct tasklet_struct *tasklet)
657 {
658
659 return atomic_load_relaxed(&tasklet->tl_state) & TASKLET_SCHEDULED;
660 }
661
662 /*
663 * __tasklet_enable(tasklet)
664 *
665 * Decrement tasklet's disable count. If it was previously
666 * scheduled to run, it may now run. Return true if the disable
667 * count went down to zero; otherwise return false.
668 *
669 * Store-release semantics.
670 */
671 bool
672 __tasklet_enable(struct tasklet_struct *tasklet)
673 {
674 unsigned int disablecount;
675
676 /*
677 * Guarantee all caller-relevant reads or writes have completed
678 * before potentially allowing tasklet to run again by
679 * decrementing the disable count.
680 *
681 * Pairs with atomic_load_acquire in tasklet_softintr and with
682 * membar_enter in tasklet_disable.
683 */
684 #ifndef __HAVE_ATOMIC_AS_MEMBAR
685 membar_exit();
686 #endif
687
688 /* Decrement the disable count. */
689 disablecount = atomic_dec_uint_nv(&tasklet->tl_disablecount);
690 KASSERT(disablecount != UINT_MAX);
691
692 return (disablecount == 0);
693 }
694