linux_tasklet.c revision 1.5 1 /* $NetBSD: linux_tasklet.c,v 1.5 2021/12/19 11:03:18 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2018, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_tasklet.c,v 1.5 2021/12/19 11:03:18 riastradh Exp $");
34
35 #include <sys/types.h>
36 #include <sys/atomic.h>
37 #include <sys/cpu.h>
38 #include <sys/errno.h>
39 #include <sys/intr.h>
40 #include <sys/lock.h>
41 #include <sys/percpu.h>
42 #include <sys/queue.h>
43
44 #include <lib/libkern/libkern.h>
45
46 #include <machine/limits.h>
47
48 #include <linux/tasklet.h>
49
50 #define TASKLET_SCHEDULED ((unsigned)__BIT(0))
51 #define TASKLET_RUNNING ((unsigned)__BIT(1))
52
53 struct tasklet_queue {
54 struct percpu *tq_percpu; /* struct tasklet_cpu */
55 void *tq_sih;
56 };
57
58 SIMPLEQ_HEAD(tasklet_head, tasklet_struct);
59
60 struct tasklet_cpu {
61 struct tasklet_head tc_head;
62 };
63
64 static struct tasklet_queue tasklet_queue __read_mostly;
65 static struct tasklet_queue tasklet_hi_queue __read_mostly;
66
67 static void tasklet_softintr(void *);
68 static int tasklet_queue_init(struct tasklet_queue *, unsigned);
69 static void tasklet_queue_fini(struct tasklet_queue *);
70 static void tasklet_queue_schedule(struct tasklet_queue *,
71 struct tasklet_struct *);
72 static void tasklet_queue_enqueue(struct tasklet_queue *,
73 struct tasklet_struct *);
74
75 /*
76 * linux_tasklets_init()
77 *
78 * Initialize the Linux tasklets subsystem. Return 0 on success,
79 * error code on failure.
80 */
81 int
82 linux_tasklets_init(void)
83 {
84 int error;
85
86 error = tasklet_queue_init(&tasklet_queue, SOFTINT_CLOCK);
87 if (error)
88 goto fail0;
89 error = tasklet_queue_init(&tasklet_hi_queue, SOFTINT_SERIAL);
90 if (error)
91 goto fail1;
92
93 /* Success! */
94 return 0;
95
96 fail2: __unused
97 tasklet_queue_fini(&tasklet_hi_queue);
98 fail1: tasklet_queue_fini(&tasklet_queue);
99 fail0: KASSERT(error);
100 return error;
101 }
102
103 /*
104 * linux_tasklets_fini()
105 *
106 * Finalize the Linux tasklets subsystem. All use of tasklets
107 * must be done.
108 */
109 void
110 linux_tasklets_fini(void)
111 {
112
113 tasklet_queue_fini(&tasklet_hi_queue);
114 tasklet_queue_fini(&tasklet_queue);
115 }
116
117 /*
118 * tasklet_queue_init(tq, prio)
119 *
120 * Initialize the tasklet queue tq for running tasklets at softint
121 * priority prio (SOFTINT_*).
122 */
123 static int
124 tasklet_queue_init(struct tasklet_queue *tq, unsigned prio)
125 {
126 int error;
127
128 /* Allocate per-CPU memory. percpu_alloc cannot fail. */
129 tq->tq_percpu = percpu_alloc(sizeof(struct tasklet_cpu));
130 KASSERT(tq->tq_percpu != NULL);
131
132 /* Try to establish a softint. softint_establish may fail. */
133 tq->tq_sih = softint_establish(prio|SOFTINT_MPSAFE, &tasklet_softintr,
134 tq);
135 if (tq->tq_sih == NULL) {
136 error = ENOMEM;
137 goto fail1;
138 }
139
140 /* Success! */
141 return 0;
142
143 fail2: __unused
144 softint_disestablish(tq->tq_sih);
145 tq->tq_sih = NULL;
146 fail1: percpu_free(tq->tq_percpu, sizeof(struct tasklet_cpu));
147 tq->tq_percpu = NULL;
148 fail0: __unused
149 KASSERT(error);
150 return error;
151 }
152
153 /*
154 * tasklet_queue_fini(tq)
155 *
156 * Finalize the tasklet queue tq: free all resources associated
157 * with it.
158 */
159 static void
160 tasklet_queue_fini(struct tasklet_queue *tq)
161 {
162
163 softint_disestablish(tq->tq_sih);
164 tq->tq_sih = NULL;
165 percpu_free(tq->tq_percpu, sizeof(struct tasklet_cpu));
166 tq->tq_percpu = NULL;
167 }
168
169 /*
170 * tasklet_softintr(cookie)
171 *
172 * Soft interrupt handler: Process queued tasklets on the tasklet
173 * queue passed in as cookie.
174 */
175 static void
176 tasklet_softintr(void *cookie)
177 {
178 struct tasklet_queue *const tq = cookie;
179 struct tasklet_head th = SIMPLEQ_HEAD_INITIALIZER(th);
180 struct tasklet_cpu *tc;
181 int s;
182
183 /*
184 * With all interrupts deferred, transfer the current CPU's
185 * queue of tasklets to a local variable in one swell foop.
186 *
187 * No memory barriers: CPU-local state only.
188 */
189 tc = percpu_getref(tq->tq_percpu);
190 s = splhigh();
191 SIMPLEQ_CONCAT(&th, &tc->tc_head);
192 splx(s);
193 percpu_putref(tq->tq_percpu);
194
195 /* Go through the queue of tasklets we grabbed. */
196 while (!SIMPLEQ_EMPTY(&th)) {
197 struct tasklet_struct *tasklet;
198
199 /* Remove the first tasklet from the queue. */
200 tasklet = SIMPLEQ_FIRST(&th);
201 SIMPLEQ_REMOVE_HEAD(&th, tl_entry);
202
203 KASSERT(atomic_load_relaxed(&tasklet->tl_state) &
204 TASKLET_SCHEDULED);
205
206 /*
207 * Test and set RUNNING, in case it is already running
208 * on another CPU and got scheduled again on this one
209 * before it completed.
210 */
211 if (!tasklet_trylock(tasklet)) {
212 /*
213 * Put it back on the queue to run it again in
214 * a sort of busy-wait, and move on to the next
215 * one.
216 */
217 tasklet_queue_enqueue(tq, tasklet);
218 continue;
219 }
220
221 /*
222 * Check whether it's currently disabled.
223 *
224 * Pairs with membar_exit in __tasklet_enable.
225 */
226 if (atomic_load_acquire(&tasklet->tl_disablecount)) {
227 /*
228 * Disabled: clear the RUNNING bit and, requeue
229 * it, but keep it SCHEDULED.
230 */
231 tasklet_unlock(tasklet);
232 tasklet_queue_enqueue(tq, tasklet);
233 continue;
234 }
235
236 /* Not disabled. Clear SCHEDULED and call func. */
237 KASSERT(atomic_load_relaxed(&tasklet->tl_state) &
238 TASKLET_SCHEDULED);
239 atomic_and_uint(&tasklet->tl_state, ~TASKLET_SCHEDULED);
240
241 (*tasklet->func)(tasklet->data);
242
243 /* Clear RUNNING to notify tasklet_disable. */
244 tasklet_unlock(tasklet);
245 }
246 }
247
248 /*
249 * tasklet_queue_schedule(tq, tasklet)
250 *
251 * Schedule tasklet to run on tq. If it was already scheduled and
252 * has not yet run, no effect.
253 */
254 static void
255 tasklet_queue_schedule(struct tasklet_queue *tq,
256 struct tasklet_struct *tasklet)
257 {
258 unsigned ostate, nstate;
259
260 /* Test and set the SCHEDULED bit. If already set, we're done. */
261 do {
262 ostate = atomic_load_relaxed(&tasklet->tl_state);
263 if (ostate & TASKLET_SCHEDULED)
264 return;
265 nstate = ostate | TASKLET_SCHEDULED;
266 } while (atomic_cas_uint(&tasklet->tl_state, ostate, nstate)
267 != ostate);
268
269 /*
270 * Not already set and we have set it now. Put it on the queue
271 * and kick off a softint.
272 */
273 tasklet_queue_enqueue(tq, tasklet);
274 }
275
276 /*
277 * tasklet_queue_enqueue(tq, tasklet)
278 *
279 * Put tasklet on the queue tq and ensure it will run. tasklet
280 * must be marked SCHEDULED.
281 */
282 static void
283 tasklet_queue_enqueue(struct tasklet_queue *tq, struct tasklet_struct *tasklet)
284 {
285 struct tasklet_cpu *tc;
286 int s;
287
288 KASSERT(atomic_load_relaxed(&tasklet->tl_state) & TASKLET_SCHEDULED);
289
290 /*
291 * Insert on the current CPU's queue while all interrupts are
292 * blocked, and schedule a soft interrupt to process it. No
293 * memory barriers: CPU-local state only.
294 */
295 tc = percpu_getref(tq->tq_percpu);
296 s = splhigh();
297 SIMPLEQ_INSERT_TAIL(&tc->tc_head, tasklet, tl_entry);
298 splx(s);
299 softint_schedule(tq->tq_sih);
300 percpu_putref(tq->tq_percpu);
301 }
302
303 /*
304 * tasklet_init(tasklet, func, data)
305 *
306 * Initialize tasklet to call func(data) when scheduled.
307 *
308 * Caller is responsible for issuing the appropriate memory
309 * barriers or store releases to publish the tasklet to other CPUs
310 * before use.
311 */
312 void
313 tasklet_init(struct tasklet_struct *tasklet, void (*func)(unsigned long),
314 unsigned long data)
315 {
316
317 atomic_store_relaxed(&tasklet->tl_state, 0);
318 atomic_store_relaxed(&tasklet->tl_disablecount, 0);
319 tasklet->func = func;
320 tasklet->data = data;
321 }
322
323 /*
324 * tasklet_schedule(tasklet)
325 *
326 * Schedule tasklet to run at regular priority. If it was already
327 * scheduled and has not yet run, no effect.
328 */
329 void
330 tasklet_schedule(struct tasklet_struct *tasklet)
331 {
332
333 tasklet_queue_schedule(&tasklet_queue, tasklet);
334 }
335
336 /*
337 * tasklet_hi_schedule(tasklet)
338 *
339 * Schedule tasklet to run at high priority. If it was already
340 * scheduled and has not yet run, no effect.
341 */
342 void
343 tasklet_hi_schedule(struct tasklet_struct *tasklet)
344 {
345
346 tasklet_queue_schedule(&tasklet_hi_queue, tasklet);
347 }
348
349 /*
350 * tasklet_disable(tasklet)
351 *
352 * Increment the disable count of tasklet, and if it was already
353 * running, busy-wait for it to complete.
354 *
355 * As long as the disable count is nonzero, the tasklet's function
356 * will not run, but if already scheduled, the tasklet will remain
357 * so and the softint will repeatedly trigger itself in a sort of
358 * busy-wait, so this should be used only for short durations.
359 *
360 * If tasklet is guaranteed not to be scheduled, e.g. if you have
361 * just invoked tasklet_kill, then tasklet_disable serves to wait
362 * for it to complete in case it might already be running.
363 *
364 * Load-acquire semantics.
365 */
366 void
367 tasklet_disable(struct tasklet_struct *tasklet)
368 {
369 unsigned int disablecount __diagused;
370
371 /* Increment the disable count. */
372 disablecount = atomic_inc_uint_nv(&tasklet->tl_disablecount);
373 KASSERT(disablecount < UINT_MAX);
374 KASSERT(disablecount != 0);
375
376 /* Wait for it to finish running, if it was running. */
377 tasklet_unlock_wait(tasklet);
378 }
379
380 /*
381 * tasklet_enable(tasklet)
382 *
383 * Decrement tasklet's disable count. If it was previously
384 * scheduled to run, it may now run.
385 *
386 * Store-release semantics.
387 */
388 void
389 tasklet_enable(struct tasklet_struct *tasklet)
390 {
391
392 (void)__tasklet_enable(tasklet);
393 }
394
395 /*
396 * tasklet_kill(tasklet)
397 *
398 * Busy-wait for tasklet to run, if it is currently scheduled.
399 * Caller must guarantee it does not get scheduled again for this
400 * to be useful.
401 */
402 void
403 tasklet_kill(struct tasklet_struct *tasklet)
404 {
405
406 KASSERTMSG(!cpu_intr_p(),
407 "deadlock: soft interrupts are blocked in interrupt context");
408
409 /* Wait for it to be removed from the queue. */
410 while (atomic_load_relaxed(&tasklet->tl_state) & TASKLET_SCHEDULED)
411 SPINLOCK_BACKOFF_HOOK;
412
413 /*
414 * No need for a memory barrier here because writes to the
415 * single state word are globally ordered, and RUNNING is set
416 * before SCHEDULED is cleared, so as long as the caller
417 * guarantees no scheduling, the only possible transitions we
418 * can witness are:
419 *
420 * 0 -> 0
421 * SCHEDULED -> 0
422 * SCHEDULED -> RUNNING
423 * RUNNING -> 0
424 * RUNNING -> RUNNING
425 * SCHEDULED|RUNNING -> 0
426 * SCHEDULED|RUNNING -> RUNNING
427 */
428
429 /* Wait for it to finish running. */
430 tasklet_unlock_wait(tasklet);
431 }
432
433 /*
434 * tasklet_is_scheduled(tasklet)
435 *
436 * True if tasklet is currently locked. Caller must use it only
437 * for positive assertions.
438 */
439 bool
440 tasklet_is_locked(const struct tasklet_struct *tasklet)
441 {
442
443 return atomic_load_relaxed(&tasklet->tl_state) & TASKLET_RUNNING;
444 }
445
446 /*
447 * tasklet_trylock(tasklet)
448 *
449 * Try to lock tasklet, i.e., set TASKLET_RUNNING. Return true if
450 * we locked it, false if already locked.
451 *
452 * Load-acquire semantics.
453 */
454 bool
455 tasklet_trylock(struct tasklet_struct *tasklet)
456 {
457 unsigned state;
458
459 do {
460 /* Pairs with membar_exit in tasklet_unlock. */
461 state = atomic_load_acquire(&tasklet->tl_state);
462 if (state & TASKLET_RUNNING)
463 return false;
464 } while (atomic_cas_uint(&tasklet->tl_state, state,
465 state | TASKLET_RUNNING) != state);
466
467 return true;
468 }
469
470 /*
471 * tasklet_unlock(tasklet)
472 *
473 * Unlock tasklet, i.e., clear TASKLET_RUNNING.
474 *
475 * Store-release semantics.
476 */
477 void
478 tasklet_unlock(struct tasklet_struct *tasklet)
479 {
480
481 KASSERT(atomic_load_relaxed(&tasklet->tl_state) & TASKLET_RUNNING);
482
483 /*
484 * Pairs with atomic_load_acquire in tasklet_trylock and
485 * tasklet_unlock.
486 */
487 #ifndef __HAVE_ATOMIC_AS_MEMBAR
488 membar_exit();
489 #endif
490 atomic_and_uint(&tasklet->tl_state, ~TASKLET_RUNNING);
491 }
492
493 /*
494 * tasklet_unlock_wait(tasklet)
495 *
496 * Busy-wait until tasklet is not running.
497 *
498 * Load-acquire semantics.
499 */
500 void
501 tasklet_unlock_wait(const struct tasklet_struct *tasklet)
502 {
503
504 /* Pairs with membar_exit in tasklet_unlock. */
505 while (atomic_load_acquire(&tasklet->tl_state) & TASKLET_RUNNING)
506 SPINLOCK_BACKOFF_HOOK;
507 }
508
509 /*
510 * BEGIN I915 HACKS
511 *
512 * The i915 driver abuses the tasklet abstraction like a cop abuses his
513 * wife.
514 */
515
516 /*
517 * __tasklet_disable_sync_once(tasklet)
518 *
519 * Increment the disable count of tasklet, and if this is the
520 * first time it was disabled and it was already running,
521 * busy-wait for it to complete.
522 *
523 * Caller must not care about whether the tasklet is running, or
524 * about waiting for any side effects of the tasklet to complete,
525 * if this was not the first time it was disabled.
526 */
527 void
528 __tasklet_disable_sync_once(struct tasklet_struct *tasklet)
529 {
530 unsigned int disablecount;
531
532 /* Increment the disable count. */
533 disablecount = atomic_inc_uint_nv(&tasklet->tl_disablecount);
534 KASSERT(disablecount < UINT_MAX);
535 KASSERT(disablecount != 0);
536
537 /*
538 * If it was zero, wait for it to finish running. If it was
539 * not zero, caller must not care whether it was running.
540 */
541 if (disablecount == 1)
542 tasklet_unlock_wait(tasklet);
543 }
544
545 /*
546 * __tasklet_enable_sync_once(tasklet)
547 *
548 * Decrement the disable count of tasklet, and if it goes to zero,
549 * kill tasklet.
550 */
551 void
552 __tasklet_enable_sync_once(struct tasklet_struct *tasklet)
553 {
554 unsigned int disablecount;
555
556 /* Decrement the disable count. */
557 disablecount = atomic_dec_uint_nv(&tasklet->tl_disablecount);
558 KASSERT(disablecount < UINT_MAX);
559
560 /*
561 * If it became zero, kill the tasklet. If it was not zero,
562 * caller must not care whether it was running.
563 */
564 if (disablecount == 0)
565 tasklet_kill(tasklet);
566 }
567
568 /*
569 * __tasklet_is_enabled(tasklet)
570 *
571 * True if tasklet is not currently disabled. Answer may be stale
572 * as soon as it is returned -- caller must use it only as a hint,
573 * or must arrange synchronization externally.
574 */
575 bool
576 __tasklet_is_enabled(const struct tasklet_struct *tasklet)
577 {
578 unsigned int disablecount;
579
580 disablecount = atomic_load_relaxed(&tasklet->tl_disablecount);
581
582 return (disablecount == 0);
583 }
584
585 /*
586 * __tasklet_is_scheduled(tasklet)
587 *
588 * True if tasklet is currently scheduled. Answer may be stale as
589 * soon as it is returned -- caller must use it only as a hint, or
590 * must arrange synchronization externally.
591 */
592 bool
593 __tasklet_is_scheduled(const struct tasklet_struct *tasklet)
594 {
595
596 return atomic_load_relaxed(&tasklet->tl_state) & TASKLET_SCHEDULED;
597 }
598
599 /*
600 * __tasklet_enable(tasklet)
601 *
602 * Decrement tasklet's disable count. If it was previously
603 * scheduled to run, it may now run. Return true if the disable
604 * count went down to zero; otherwise return false.
605 *
606 * Store-release semantics.
607 */
608 bool
609 __tasklet_enable(struct tasklet_struct *tasklet)
610 {
611 unsigned int disablecount;
612
613 /*
614 * Guarantee all caller-relevant reads or writes have completed
615 * before potentially allowing tasklet to run again by
616 * decrementing the disable count.
617 *
618 * Pairs with atomic_load_acquire(&tasklet->tl_disablecount) in
619 * tasklet_softintr.
620 */
621 #ifndef __HAVE_ATOMIC_AS_MEMBAR
622 membar_exit();
623 #endif
624
625 /* Decrement the disable count. */
626 disablecount = atomic_dec_uint_nv(&tasklet->tl_disablecount);
627 KASSERT(disablecount != UINT_MAX);
628
629 return (disablecount == 0);
630 }
631