Home | History | Annotate | Line # | Download | only in common
shared_intr.c revision 1.26
      1 /* $NetBSD: shared_intr.c,v 1.26 2020/09/26 02:35:31 thorpej Exp $ */
      2 
      3 /*
      4  * Copyright (c) 2020 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Copyright (c) 1996 Carnegie-Mellon University.
     34  * All rights reserved.
     35  *
     36  * Authors: Chris G. Demetriou
     37  *
     38  * Permission to use, copy, modify and distribute this software and
     39  * its documentation is hereby granted, provided that both the copyright
     40  * notice and this permission notice appear in all copies of the
     41  * software, derivative works or modified versions, and any portions
     42  * thereof, and that both notices appear in supporting documentation.
     43  *
     44  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     45  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     46  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     47  *
     48  * Carnegie Mellon requests users of this software to return to
     49  *
     50  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     51  *  School of Computer Science
     52  *  Carnegie Mellon University
     53  *  Pittsburgh PA 15213-3890
     54  *
     55  * any improvements or extensions that they make and grant Carnegie the
     56  * rights to redistribute these changes.
     57  */
     58 
     59 /*
     60  * Common shared-interrupt-line functionality.
     61  */
     62 
     63 #include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
     64 
     65 __KERNEL_RCSID(0, "$NetBSD: shared_intr.c,v 1.26 2020/09/26 02:35:31 thorpej Exp $");
     66 
     67 #include <sys/param.h>
     68 #include <sys/kernel.h>
     69 #include <sys/cpu.h>
     70 #include <sys/kmem.h>
     71 #include <sys/kmem.h>
     72 #include <sys/systm.h>
     73 #include <sys/syslog.h>
     74 #include <sys/queue.h>
     75 #include <sys/atomic.h>
     76 #include <sys/intr.h>
     77 #include <sys/xcall.h>
     78 
     79 static const char *intr_typename(int);
     80 
     81 static const char *
     82 intr_typename(int type)
     83 {
     84 
     85 	switch (type) {
     86 	case IST_UNUSABLE:
     87 		return ("disabled");
     88 	case IST_NONE:
     89 		return ("none");
     90 	case IST_PULSE:
     91 		return ("pulsed");
     92 	case IST_EDGE:
     93 		return ("edge-triggered");
     94 	case IST_LEVEL:
     95 		return ("level-triggered");
     96 	}
     97 	panic("intr_typename: unknown type %d", type);
     98 }
     99 
    100 struct alpha_shared_intr *
    101 alpha_shared_intr_alloc(unsigned int n, unsigned int namesize)
    102 {
    103 	struct alpha_shared_intr *intr;
    104 	unsigned int i;
    105 
    106 	intr = kmem_alloc(n * sizeof(*intr), KM_SLEEP);
    107 	for (i = 0; i < n; i++) {
    108 		TAILQ_INIT(&intr[i].intr_q);
    109 		intr[i].intr_sharetype = IST_NONE;
    110 		intr[i].intr_dfltsharetype = IST_NONE;
    111 		intr[i].intr_nstrays = 0;
    112 		intr[i].intr_maxstrays = 5;
    113 		intr[i].intr_private = NULL;
    114 		intr[i].intr_cpu = NULL;
    115 		if (namesize != 0) {
    116 			intr[i].intr_string = kmem_zalloc(namesize, KM_SLEEP);
    117 		} else {
    118 			intr[i].intr_string = NULL;
    119 		}
    120 	}
    121 
    122 	return (intr);
    123 }
    124 
    125 int
    126 alpha_shared_intr_dispatch(struct alpha_shared_intr *intr, unsigned int num)
    127 {
    128 	struct alpha_shared_intrhand *ih;
    129 	int rv, handled;
    130 
    131 	atomic_add_long(&intr[num].intr_evcnt.ev_count, 1);
    132 
    133 	ih = intr[num].intr_q.tqh_first;
    134 	handled = 0;
    135 	while (ih != NULL) {
    136 
    137 		/*
    138 		 * The handler returns one of three values:
    139 		 *   0:	This interrupt wasn't for me.
    140 		 *   1: This interrupt was for me.
    141 		 *  -1: This interrupt might have been for me, but I can't say
    142 		 *      for sure.
    143 		 */
    144 
    145 		rv = (*ih->ih_fn)(ih->ih_arg);
    146 
    147 		handled = handled || (rv != 0);
    148 		ih = ih->ih_q.tqe_next;
    149 	}
    150 
    151 	return (handled);
    152 }
    153 
    154 static int
    155 alpha_shared_intr_wrapper(void * const arg)
    156 {
    157 	struct alpha_shared_intrhand * const ih = arg;
    158 	int rv;
    159 
    160 	KERNEL_LOCK(1, NULL);
    161 	rv = (*ih->ih_real_fn)(ih->ih_real_arg);
    162 	KERNEL_UNLOCK_ONE(NULL);
    163 
    164 	return rv;
    165 }
    166 
    167 struct alpha_shared_intrhand *
    168 alpha_shared_intr_alloc_intrhand(struct alpha_shared_intr *intr,
    169     unsigned int num, int type, int level, int flags,
    170     int (*fn)(void *), void *arg, const char *basename)
    171 {
    172 	struct alpha_shared_intrhand *ih;
    173 
    174 	if (intr[num].intr_sharetype == IST_UNUSABLE) {
    175 		printf("%s: %s %d: unusable\n", __func__,
    176 		    basename, num);
    177 		return NULL;
    178 	}
    179 
    180 	KASSERT(type != IST_NONE);
    181 
    182 	ih = kmem_alloc(sizeof(*ih), KM_SLEEP);
    183 
    184 	ih->ih_intrhead = intr;
    185 	ih->ih_fn = ih->ih_real_fn = fn;
    186 	ih->ih_arg = ih->ih_real_arg = arg;
    187 	ih->ih_level = level;
    188 	ih->ih_type = type;
    189 	ih->ih_num = num;
    190 
    191 	/*
    192 	 * Non-MPSAFE interrupts get a wrapper that takes the
    193 	 * KERNEL_LOCK.
    194 	 */
    195 	if ((flags & ALPHA_INTR_MPSAFE) == 0) {
    196 		ih->ih_fn = alpha_shared_intr_wrapper;
    197 		ih->ih_arg = ih;
    198 	}
    199 
    200 	return (ih);
    201 }
    202 
    203 void
    204 alpha_shared_intr_free_intrhand(struct alpha_shared_intrhand *ih)
    205 {
    206 
    207 	kmem_free(ih, sizeof(*ih));
    208 }
    209 
    210 static void
    211 alpha_shared_intr_link_unlink_xcall(void *arg1, void *arg2)
    212 {
    213 	struct alpha_shared_intrhand *ih = arg1;
    214 	struct alpha_shared_intr *intr = ih->ih_intrhead;
    215 	unsigned int num = ih->ih_num;
    216 
    217 	struct cpu_info *ci = intr[num].intr_cpu;
    218 
    219 	KASSERT(ci != NULL);
    220 	KASSERT(ci == curcpu() || !mp_online);
    221 	KASSERT(!cpu_intr_p());
    222 
    223 	const unsigned long psl = alpha_pal_swpipl(ALPHA_PSL_IPL_HIGH);
    224 
    225 	if (arg2 != NULL) {
    226 		TAILQ_INSERT_TAIL(&intr[num].intr_q, ih, ih_q);
    227 		ci->ci_nintrhand++;
    228 	} else {
    229 		TAILQ_REMOVE(&intr[num].intr_q, ih, ih_q);
    230 		ci->ci_nintrhand--;
    231 	}
    232 
    233 	alpha_pal_swpipl(psl);
    234 }
    235 
    236 bool
    237 alpha_shared_intr_link(struct alpha_shared_intr *intr,
    238     struct alpha_shared_intrhand *ih, const char *basename)
    239 {
    240 	int type = ih->ih_type;
    241 	unsigned int num = ih->ih_num;
    242 
    243 	KASSERT(mutex_owned(&cpu_lock));
    244 	KASSERT(ih->ih_intrhead == intr);
    245 
    246 	switch (intr[num].intr_sharetype) {
    247 	case IST_EDGE:
    248 	case IST_LEVEL:
    249 		if (type == intr[num].intr_sharetype)
    250 			break;
    251 	case IST_PULSE:
    252 		if (type != IST_NONE) {
    253 			if (intr[num].intr_q.tqh_first == NULL) {
    254 				printf("alpha_shared_intr_establish: %s %d: warning: using %s on %s\n",
    255 				    basename, num, intr_typename(type),
    256 				    intr_typename(intr[num].intr_sharetype));
    257 				type = intr[num].intr_sharetype;
    258 			} else {
    259 				printf("alpha_shared_intr_establish: %s %d: can't share %s with %s\n",
    260 				    basename, num, intr_typename(type),
    261 				    intr_typename(intr[num].intr_sharetype));
    262 				return (false);
    263 			}
    264 		}
    265 		break;
    266 
    267 	case IST_NONE:
    268 		/* not currently used; safe */
    269 		break;
    270 	}
    271 
    272 	intr[num].intr_sharetype = type;
    273 
    274 	/*
    275 	 * If a CPU hasn't been assigned yet, just give it to the
    276 	 * primary.
    277 	 */
    278 	if (intr[num].intr_cpu == NULL) {
    279 		intr[num].intr_cpu = &cpu_info_primary;
    280 	}
    281 
    282 	kpreempt_disable();
    283 	if (intr[num].intr_cpu == curcpu() || !mp_online) {
    284 		alpha_shared_intr_link_unlink_xcall(ih, ih);
    285 	} else {
    286 		uint64_t where = xc_unicast(XC_HIGHPRI,
    287 		    alpha_shared_intr_link_unlink_xcall, ih, ih,
    288 		    intr->intr_cpu);
    289 		xc_wait(where);
    290 	}
    291 	kpreempt_enable();
    292 
    293 	return (true);
    294 }
    295 
    296 void
    297 alpha_shared_intr_unlink(struct alpha_shared_intr *intr,
    298     struct alpha_shared_intrhand *ih, const char *basename)
    299 {
    300 	unsigned int num = ih->ih_num;
    301 
    302 	KASSERT(mutex_owned(&cpu_lock));
    303 
    304 	kpreempt_disable();
    305 	if (intr[num].intr_cpu == curcpu() || !mp_online) {
    306 		alpha_shared_intr_link_unlink_xcall(ih, NULL);
    307 	} else {
    308 		uint64_t where = xc_unicast(XC_HIGHPRI,
    309 		    alpha_shared_intr_link_unlink_xcall, ih, NULL,
    310 		    intr->intr_cpu);
    311 		xc_wait(where);
    312 	}
    313 	kpreempt_enable();
    314 }
    315 
    316 int
    317 alpha_shared_intr_get_sharetype(struct alpha_shared_intr *intr,
    318     unsigned int num)
    319 {
    320 
    321 	return (intr[num].intr_sharetype);
    322 }
    323 
    324 int
    325 alpha_shared_intr_isactive(struct alpha_shared_intr *intr, unsigned int num)
    326 {
    327 
    328 	return (intr[num].intr_q.tqh_first != NULL);
    329 }
    330 
    331 int
    332 alpha_shared_intr_firstactive(struct alpha_shared_intr *intr, unsigned int num)
    333 {
    334 
    335 	return (intr[num].intr_q.tqh_first != NULL &&
    336 		intr[num].intr_q.tqh_first->ih_q.tqe_next == NULL);
    337 }
    338 
    339 void
    340 alpha_shared_intr_set_dfltsharetype(struct alpha_shared_intr *intr,
    341     unsigned int num, int newdfltsharetype)
    342 {
    343 
    344 #ifdef DIAGNOSTIC
    345 	if (alpha_shared_intr_isactive(intr, num))
    346 		panic("alpha_shared_intr_set_dfltsharetype on active intr");
    347 #endif
    348 
    349 	intr[num].intr_dfltsharetype = newdfltsharetype;
    350 	intr[num].intr_sharetype = intr[num].intr_dfltsharetype;
    351 }
    352 
    353 void
    354 alpha_shared_intr_set_maxstrays(struct alpha_shared_intr *intr,
    355     unsigned int num, int newmaxstrays)
    356 {
    357 	int s = splhigh();
    358 	intr[num].intr_maxstrays = newmaxstrays;
    359 	intr[num].intr_nstrays = 0;
    360 	splx(s);
    361 }
    362 
    363 void
    364 alpha_shared_intr_reset_strays(struct alpha_shared_intr *intr,
    365     unsigned int num)
    366 {
    367 
    368 	/*
    369 	 * Don't bother blocking interrupts; this doesn't have to be
    370 	 * precise, but it does need to be fast.
    371 	 */
    372 	intr[num].intr_nstrays = 0;
    373 }
    374 
    375 void
    376 alpha_shared_intr_stray(struct alpha_shared_intr *intr, unsigned int num,
    377     const char *basename)
    378 {
    379 
    380 	intr[num].intr_nstrays++;
    381 
    382 	if (intr[num].intr_maxstrays == 0)
    383 		return;
    384 
    385 	if (intr[num].intr_nstrays <= intr[num].intr_maxstrays)
    386 		log(LOG_ERR, "stray %s %d%s\n", basename, num,
    387 		    intr[num].intr_nstrays >= intr[num].intr_maxstrays ?
    388 		      "; stopped logging" : "");
    389 }
    390 
    391 void
    392 alpha_shared_intr_set_private(struct alpha_shared_intr *intr,
    393     unsigned int num, void *v)
    394 {
    395 
    396 	intr[num].intr_private = v;
    397 }
    398 
    399 void *
    400 alpha_shared_intr_get_private(struct alpha_shared_intr *intr,
    401     unsigned int num)
    402 {
    403 
    404 	return (intr[num].intr_private);
    405 }
    406 
    407 static unsigned int
    408 alpha_shared_intr_q_count_handlers(struct alpha_shared_intr *intr_q)
    409 {
    410 	unsigned int cnt = 0;
    411 	struct alpha_shared_intrhand *ih;
    412 
    413 	TAILQ_FOREACH(ih, &intr_q->intr_q, ih_q) {
    414 		cnt++;
    415 	}
    416 
    417 	return cnt;
    418 }
    419 
    420 static void
    421 alpha_shared_intr_set_cpu_xcall(void *arg1, void *arg2)
    422 {
    423 	struct alpha_shared_intr *intr_q = arg1;
    424 	struct cpu_info *ci = arg2;
    425 	unsigned int cnt = alpha_shared_intr_q_count_handlers(intr_q);
    426 
    427 	KASSERT(ci == curcpu() || !mp_online);
    428 
    429 	ci->ci_nintrhand += cnt;
    430 	KASSERT(cnt <= ci->ci_nintrhand);
    431 }
    432 
    433 static void
    434 alpha_shared_intr_unset_cpu_xcall(void *arg1, void *arg2)
    435 {
    436 	struct alpha_shared_intr *intr_q = arg1;
    437 	struct cpu_info *ci = arg2;
    438 	unsigned int cnt = alpha_shared_intr_q_count_handlers(intr_q);
    439 
    440 	KASSERT(ci == curcpu() || !mp_online);
    441 
    442 	KASSERT(cnt <= ci->ci_nintrhand);
    443 	ci->ci_nintrhand -= cnt;
    444 }
    445 
    446 void
    447 alpha_shared_intr_set_cpu(struct alpha_shared_intr *intr, unsigned int num,
    448     struct cpu_info *ci)
    449 {
    450 	struct cpu_info *old_ci;
    451 
    452 	KASSERT(mutex_owned(&cpu_lock));
    453 
    454 	old_ci = intr[num].intr_cpu;
    455 	intr[num].intr_cpu = ci;
    456 
    457 	if (old_ci != NULL && old_ci != ci) {
    458 		kpreempt_disable();
    459 
    460 		if (ci == curcpu() || !mp_online) {
    461 			alpha_shared_intr_set_cpu_xcall(&intr[num], ci);
    462 		} else {
    463 			uint64_t where = xc_unicast(XC_HIGHPRI,
    464 			    alpha_shared_intr_set_cpu_xcall, &intr[num],
    465 			    ci, ci);
    466 			xc_wait(where);
    467 		}
    468 
    469 		if (old_ci == curcpu() || !mp_online) {
    470 			alpha_shared_intr_unset_cpu_xcall(&intr[num], old_ci);
    471 		} else {
    472 			uint64_t where = xc_unicast(XC_HIGHPRI,
    473 			    alpha_shared_intr_unset_cpu_xcall, &intr[num],
    474 			    old_ci, old_ci);
    475 			xc_wait(where);
    476 		}
    477 
    478 		kpreempt_enable();
    479 	}
    480 }
    481 
    482 struct cpu_info *
    483 alpha_shared_intr_get_cpu(struct alpha_shared_intr *intr, unsigned int num)
    484 {
    485 
    486 	return (intr[num].intr_cpu);
    487 }
    488 
    489 struct evcnt *
    490 alpha_shared_intr_evcnt(struct alpha_shared_intr *intr,
    491     unsigned int num)
    492 {
    493 
    494 	return (&intr[num].intr_evcnt);
    495 }
    496 
    497 char *
    498 alpha_shared_intr_string(struct alpha_shared_intr *intr,
    499     unsigned int num)
    500 {
    501 
    502 	return (intr[num].intr_string);
    503 }
    504