Home | History | Annotate | Download | only in linux

Lines Matching defs:tasklet

51 #include <linux/tasklet.h>
142 * Initialize the tasklet queue tq for running tasklets at softint
179 * Finalize the tasklet queue tq: free all resources associated
195 * Soft interrupt handler: Process queued tasklets on the tasklet
221 struct tasklet_struct *tasklet;
223 /* Remove the first tasklet from the queue. */
224 tasklet = SIMPLEQ_FIRST(&th);
227 KASSERT(atomic_load_relaxed(&tasklet->tl_state) &
235 if (!tasklet_trylock(tasklet)) {
241 tasklet_queue_enqueue(tq, tasklet);
250 if (atomic_load_acquire(&tasklet->tl_disablecount)) {
255 tasklet_unlock(tasklet);
256 tasklet_queue_enqueue(tq, tasklet);
261 KASSERT(atomic_load_relaxed(&tasklet->tl_state) &
263 atomic_and_uint(&tasklet->tl_state, ~TASKLET_SCHEDULED);
265 (*tasklet->func)(tasklet->data);
268 tasklet_unlock(tasklet);
273 * tasklet_queue_schedule(tq, tasklet)
275 * Schedule tasklet to run on tq. If it was already scheduled and
280 struct tasklet_struct *tasklet)
286 ostate = atomic_load_relaxed(&tasklet->tl_state);
290 } while (atomic_cas_uint(&tasklet->tl_state, ostate, nstate)
297 tasklet_queue_enqueue(tq, tasklet);
301 * tasklet_queue_enqueue(tq, tasklet)
303 * Put tasklet on the queue tq and ensure it will run. tasklet
307 tasklet_queue_enqueue(struct tasklet_queue *tq, struct tasklet_struct *tasklet)
312 KASSERT(atomic_load_relaxed(&tasklet->tl_state) & TASKLET_SCHEDULED);
322 SIMPLEQ_INSERT_TAIL(&tc->tc_head, tasklet, tl_entry);
329 * tasklet_init(tasklet, func, data)
331 * Initialize tasklet to call func(data) when scheduled.
334 * barriers or store releases to publish the tasklet to other CPUs
338 tasklet_init(struct tasklet_struct *tasklet, void (*func)(unsigned long),
342 atomic_store_relaxed(&tasklet->tl_state, 0);
343 atomic_store_relaxed(&tasklet->tl_disablecount, 0);
344 tasklet->func = func;
345 tasklet->data = data;
349 * tasklet_schedule(tasklet)
351 * Schedule tasklet to run at regular priority. If it was already
355 tasklet_schedule(struct tasklet_struct *tasklet)
358 tasklet_queue_schedule(&tasklet_queue, tasklet);
362 * tasklet_hi_schedule(tasklet)
364 * Schedule tasklet to run at high priority. If it was already
368 tasklet_hi_schedule(struct tasklet_struct *tasklet)
371 tasklet_queue_schedule(&tasklet_hi_queue, tasklet);
375 * tasklet_disable_nosync(tasklet)
377 * Increment the disable count of tasklet, but don't wait for it
380 * As long as the disable count is nonzero, the tasklet's function
381 * will not run, but if already scheduled, the tasklet will remain
388 tasklet_disable_nosync(struct tasklet_struct *tasklet)
393 disablecount = atomic_inc_uint_nv(&tasklet->tl_disablecount);
402 * tasklet_disable(tasklet)
404 * Increment the disable count of tasklet, and if it was already
407 * As long as the disable count is nonzero, the tasklet's function
408 * will not run, but if already scheduled, the tasklet will remain
412 * If tasklet is guaranteed not to be scheduled, e.g. if you have
419 tasklet_disable(struct tasklet_struct *tasklet)
423 tasklet_disable_nosync(tasklet);
426 tasklet_unlock_wait(tasklet);
430 * tasklet_enable(tasklet)
432 * Decrement tasklet's disable count. If it was previously
438 tasklet_enable(struct tasklet_struct *tasklet)
441 (void)__tasklet_enable(tasklet);
445 * tasklet_kill(tasklet)
447 * Busy-wait for tasklet to run, if it is currently scheduled.
452 tasklet_kill(struct tasklet_struct *tasklet)
459 while (atomic_load_relaxed(&tasklet->tl_state) & TASKLET_SCHEDULED)
479 tasklet_unlock_wait(tasklet);
483 * tasklet_is_locked(tasklet)
485 * True if tasklet is currently locked. Caller must use it only
489 tasklet_is_locked(const struct tasklet_struct *tasklet)
492 return atomic_load_relaxed(&tasklet->tl_state) & TASKLET_RUNNING;
496 * tasklet_trylock(tasklet)
498 * Try to lock tasklet, i.e., set TASKLET_RUNNING. Return true if
504 tasklet_trylock(struct tasklet_struct *tasklet)
509 state = atomic_load_relaxed(&tasklet->tl_state);
512 } while (atomic_cas_uint(&tasklet->tl_state, state,
522 * tasklet_unlock(tasklet)
524 * Unlock tasklet, i.e., clear TASKLET_RUNNING.
529 tasklet_unlock(struct tasklet_struct *tasklet)
532 KASSERT(atomic_load_relaxed(&tasklet->tl_state) & TASKLET_RUNNING);
539 atomic_and_uint(&tasklet->tl_state, ~TASKLET_RUNNING);
543 * tasklet_unlock_wait(tasklet)
545 * Busy-wait until tasklet is not running.
550 tasklet_unlock_wait(const struct tasklet_struct *tasklet)
554 while (atomic_load_acquire(&tasklet->tl_state) & TASKLET_RUNNING)
561 * The i915 driver abuses the tasklet abstraction like a cop abuses his
566 * __tasklet_disable_sync_once(tasklet)
568 * Increment the disable count of tasklet, and if this is the
572 * Caller must not care about whether the tasklet is running, or
573 * about waiting for any side effects of the tasklet to complete,
577 __tasklet_disable_sync_once(struct tasklet_struct *tasklet)
582 disablecount = atomic_inc_uint_nv(&tasklet->tl_disablecount);
594 tasklet_unlock_wait(tasklet);
598 * __tasklet_enable_sync_once(tasklet)
600 * Decrement the disable count of tasklet, and if it goes to zero,
601 * kill tasklet.
604 __tasklet_enable_sync_once(struct tasklet_struct *tasklet)
612 disablecount = atomic_dec_uint_nv(&tasklet->tl_disablecount);
616 * If it became zero, kill the tasklet. If it was not zero,
620 tasklet_kill(tasklet);
624 * __tasklet_is_enabled(tasklet)
626 * True if tasklet is not currently disabled. Answer may be stale
631 __tasklet_is_enabled(const struct tasklet_struct *tasklet)
635 disablecount = atomic_load_relaxed(&tasklet->tl_disablecount);
641 * __tasklet_is_scheduled(tasklet)
643 * True if tasklet is currently scheduled. Answer may be stale as
648 __tasklet_is_scheduled(const struct tasklet_struct *tasklet)
651 return atomic_load_relaxed(&tasklet->tl_state) & TASKLET_SCHEDULED;
655 * __tasklet_enable(tasklet)
657 * Decrement tasklet's disable count. If it was previously
664 __tasklet_enable(struct tasklet_struct *tasklet)
670 * before potentially allowing tasklet to run again by
679 disablecount = atomic_dec_uint_nv(&tasklet->tl_disablecount);