Home | History | Annotate | Line # | Download | only in linux
      1 /*	$NetBSD: linux_irq_work.c,v 1.2 2021/12/19 11:50:54 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2021 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: linux_irq_work.c,v 1.2 2021/12/19 11:50:54 riastradh Exp $");
     34 
     35 #include <sys/param.h>
     36 
     37 #include <sys/atomic.h>
     38 #include <sys/intr.h>
     39 #include <sys/kmem.h>
     40 #include <sys/mutex.h>
     41 #include <sys/percpu.h>
     42 #include <sys/queue.h>
     43 
     44 #include <linux/irq_work.h>
     45 
     46 struct irq_work_cpu {
     47 	kmutex_t			iwc_lock;
     48 	SIMPLEQ_HEAD(, irq_work)	iwc_todo;
     49 };
     50 
     51 enum {
     52 	IRQ_WORK_PENDING = 1,
     53 };
     54 
     55 static struct percpu *irq_work_percpu;
     56 static void *irq_work_sih __read_mostly;
     57 
     58 static void
     59 irq_work_intr(void *cookie)
     60 {
     61 	struct irq_work_cpu *const *iwcp, *iwc;
     62 	SIMPLEQ_HEAD(, irq_work) todo = SIMPLEQ_HEAD_INITIALIZER(todo);
     63 	struct irq_work *iw, *next;
     64 
     65 	iwcp = percpu_getref(irq_work_percpu);
     66 	iwc = *iwcp;
     67 	mutex_spin_enter(&iwc->iwc_lock);
     68 	SIMPLEQ_CONCAT(&todo, &iwc->iwc_todo);
     69 	mutex_spin_exit(&iwc->iwc_lock);
     70 	percpu_putref(irq_work_percpu);
     71 
     72 	SIMPLEQ_FOREACH_SAFE(iw, &todo, iw_entry, next) {
     73 		atomic_store_relaxed(&iw->iw_flags, 0);
     74 		(*iw->func)(iw);
     75 	}
     76 }
     77 
     78 static void
     79 irq_work_cpu_init(void *ptr, void *cookie, struct cpu_info *ci)
     80 {
     81 	struct irq_work_cpu **iwcp = ptr, *iwc;
     82 
     83 	iwc = *iwcp = kmem_zalloc(sizeof(*iwc), KM_SLEEP);
     84 	mutex_init(&iwc->iwc_lock, MUTEX_DEFAULT, IPL_HIGH);
     85 	SIMPLEQ_INIT(&iwc->iwc_todo);
     86 }
     87 
     88 static void
     89 irq_work_cpu_fini(void *ptr, void *cookie, struct cpu_info *ci)
     90 {
     91 	struct irq_work_cpu **iwcp = ptr, *iwc = *iwcp;
     92 
     93 	KASSERT(SIMPLEQ_EMPTY(&iwc->iwc_todo));
     94 	mutex_destroy(&iwc->iwc_lock);
     95 	kmem_free(iwc, sizeof(*iwc));
     96 }
     97 
     98 void
     99 linux_irq_work_init(void)
    100 {
    101 
    102 	irq_work_percpu = percpu_create(sizeof(struct irq_work_cpu),
    103 	    irq_work_cpu_init, irq_work_cpu_fini, NULL);
    104 	irq_work_sih = softint_establish(SOFTINT_SERIAL|SOFTINT_MPSAFE,
    105 	    irq_work_intr, NULL);
    106 }
    107 
    108 void
    109 linux_irq_work_fini(void)
    110 {
    111 
    112 	softint_disestablish(irq_work_sih);
    113 	percpu_free(irq_work_percpu, sizeof(struct irq_work_cpu));
    114 }
    115 
    116 void
    117 init_irq_work(struct irq_work *iw, void (*func)(struct irq_work *))
    118 {
    119 
    120 	iw->iw_flags = 0;
    121 	iw->func = func;
    122 }
    123 
    124 bool
    125 irq_work_queue(struct irq_work *iw)
    126 {
    127 	struct irq_work_cpu *const *iwcp, *iwc;
    128 
    129 	if (atomic_swap_uint(&iw->iw_flags, IRQ_WORK_PENDING)
    130 	    & IRQ_WORK_PENDING)
    131 		return false;
    132 
    133 	iwcp = percpu_getref(irq_work_percpu);
    134 	iwc = *iwcp;
    135 	mutex_spin_enter(&iwc->iwc_lock);
    136 	SIMPLEQ_INSERT_TAIL(&iwc->iwc_todo, iw, iw_entry);
    137 	mutex_spin_exit(&iwc->iwc_lock);
    138 	softint_schedule(irq_work_sih);
    139 	percpu_putref(irq_work_percpu);
    140 
    141 	return true;
    142 }
    143