Home | History | Annotate | Line # | Download | only in x86
xen_intr.c revision 1.1.4.1
      1  1.1.4.1  mjf /*	$NetBSD: xen_intr.c,v 1.1.4.1 2007/12/08 18:18:24 mjf Exp $	*/
      2  1.1.4.1  mjf 
      3  1.1.4.1  mjf /*-
      4  1.1.4.1  mjf  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
      5  1.1.4.1  mjf  * All rights reserved.
      6  1.1.4.1  mjf  *
      7  1.1.4.1  mjf  * This code is derived from software contributed to The NetBSD Foundation
      8  1.1.4.1  mjf  * by Charles M. Hannum, and by Jason R. Thorpe.
      9  1.1.4.1  mjf  *
     10  1.1.4.1  mjf  * Redistribution and use in source and binary forms, with or without
     11  1.1.4.1  mjf  * modification, are permitted provided that the following conditions
     12  1.1.4.1  mjf  * are met:
     13  1.1.4.1  mjf  * 1. Redistributions of source code must retain the above copyright
     14  1.1.4.1  mjf  *    notice, this list of conditions and the following disclaimer.
     15  1.1.4.1  mjf  * 2. Redistributions in binary form must reproduce the above copyright
     16  1.1.4.1  mjf  *    notice, this list of conditions and the following disclaimer in the
     17  1.1.4.1  mjf  *    documentation and/or other materials provided with the distribution.
     18  1.1.4.1  mjf  * 3. All advertising materials mentioning features or use of this software
     19  1.1.4.1  mjf  *    must display the following acknowledgement:
     20  1.1.4.1  mjf  *        This product includes software developed by the NetBSD
     21  1.1.4.1  mjf  *        Foundation, Inc. and its contributors.
     22  1.1.4.1  mjf  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  1.1.4.1  mjf  *    contributors may be used to endorse or promote products derived
     24  1.1.4.1  mjf  *    from this software without specific prior written permission.
     25  1.1.4.1  mjf  *
     26  1.1.4.1  mjf  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  1.1.4.1  mjf  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  1.1.4.1  mjf  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  1.1.4.1  mjf  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  1.1.4.1  mjf  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  1.1.4.1  mjf  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  1.1.4.1  mjf  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  1.1.4.1  mjf  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  1.1.4.1  mjf  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  1.1.4.1  mjf  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  1.1.4.1  mjf  * POSSIBILITY OF SUCH DAMAGE.
     37  1.1.4.1  mjf  */
     38  1.1.4.1  mjf 
     39  1.1.4.1  mjf #include <sys/cdefs.h>
     40  1.1.4.1  mjf __KERNEL_RCSID(0, "$NetBSD: xen_intr.c,v 1.1.4.1 2007/12/08 18:18:24 mjf Exp $");
     41  1.1.4.1  mjf 
     42  1.1.4.1  mjf #include <sys/param.h>
     43  1.1.4.1  mjf 
     44  1.1.4.1  mjf #include <machine/cpu.h>
     45  1.1.4.1  mjf #include <machine/intr.h>
     46  1.1.4.1  mjf 
     47  1.1.4.1  mjf /*
     48  1.1.4.1  mjf  * Add a mask to cpl, and return the old value of cpl.
     49  1.1.4.1  mjf  */
     50  1.1.4.1  mjf int
     51  1.1.4.1  mjf splraise(int nlevel)
     52  1.1.4.1  mjf {
     53  1.1.4.1  mjf 	int olevel;
     54  1.1.4.1  mjf 	struct cpu_info *ci = curcpu();
     55  1.1.4.1  mjf 
     56  1.1.4.1  mjf 	olevel = ci->ci_ilevel;
     57  1.1.4.1  mjf 	if (nlevel > olevel)
     58  1.1.4.1  mjf 		ci->ci_ilevel = nlevel;
     59  1.1.4.1  mjf 	__insn_barrier();
     60  1.1.4.1  mjf 	return (olevel);
     61  1.1.4.1  mjf }
     62  1.1.4.1  mjf 
     63  1.1.4.1  mjf /*
     64  1.1.4.1  mjf  * Restore a value to cpl (unmasking interrupts).  If any unmasked
     65  1.1.4.1  mjf  * interrupts are pending, call Xspllower() to process them.
     66  1.1.4.1  mjf  */
     67  1.1.4.1  mjf void
     68  1.1.4.1  mjf spllower(int nlevel)
     69  1.1.4.1  mjf {
     70  1.1.4.1  mjf 	struct cpu_info *ci = curcpu();
     71  1.1.4.1  mjf 	u_int32_t imask;
     72  1.1.4.1  mjf 	u_long psl;
     73  1.1.4.1  mjf 
     74  1.1.4.1  mjf 	__insn_barrier();
     75  1.1.4.1  mjf 
     76  1.1.4.1  mjf 	imask = IUNMASK(ci, nlevel);
     77  1.1.4.1  mjf 	psl = x86_read_psl();
     78  1.1.4.1  mjf 	x86_disable_intr();
     79  1.1.4.1  mjf 	if (ci->ci_ipending & imask) {
     80  1.1.4.1  mjf 		Xspllower(nlevel);
     81  1.1.4.1  mjf 		/* Xspllower does enable_intr() */
     82  1.1.4.1  mjf 	} else {
     83  1.1.4.1  mjf 		ci->ci_ilevel = nlevel;
     84  1.1.4.1  mjf 		x86_write_psl(psl);
     85  1.1.4.1  mjf 	}
     86  1.1.4.1  mjf }
     87  1.1.4.1  mjf 
     88  1.1.4.1  mjf #ifndef __x86_64__
     89  1.1.4.1  mjf 
     90  1.1.4.1  mjf /*
     91  1.1.4.1  mjf  * Software interrupt registration
     92  1.1.4.1  mjf  *
     93  1.1.4.1  mjf  * We hand-code this to ensure that it's atomic.
     94  1.1.4.1  mjf  *
     95  1.1.4.1  mjf  * XXX always scheduled on the current CPU.
     96  1.1.4.1  mjf  */
     97  1.1.4.1  mjf void
     98  1.1.4.1  mjf softintr(int sir)
     99  1.1.4.1  mjf {
    100  1.1.4.1  mjf 	struct cpu_info *ci = curcpu();
    101  1.1.4.1  mjf 
    102  1.1.4.1  mjf 	__asm volatile("orl %1, %0" : "=m"(ci->ci_ipending) : "ir" (1 << sir));
    103  1.1.4.1  mjf }
    104  1.1.4.1  mjf #endif
    105  1.1.4.1  mjf 
    106  1.1.4.1  mjf void
    107  1.1.4.1  mjf x86_disable_intr(void)
    108  1.1.4.1  mjf {
    109  1.1.4.1  mjf 	__cli();
    110  1.1.4.1  mjf }
    111  1.1.4.1  mjf 
    112  1.1.4.1  mjf void
    113  1.1.4.1  mjf x86_enable_intr(void)
    114  1.1.4.1  mjf {
    115  1.1.4.1  mjf 	__sti();
    116  1.1.4.1  mjf }
    117  1.1.4.1  mjf 
    118  1.1.4.1  mjf u_long
    119  1.1.4.1  mjf x86_read_psl(void)
    120  1.1.4.1  mjf {
    121  1.1.4.1  mjf 
    122  1.1.4.1  mjf 	return (HYPERVISOR_shared_info->vcpu_info[0].evtchn_upcall_mask);
    123  1.1.4.1  mjf }
    124  1.1.4.1  mjf 
    125  1.1.4.1  mjf void
    126  1.1.4.1  mjf x86_write_psl(u_long psl)
    127  1.1.4.1  mjf {
    128  1.1.4.1  mjf 
    129  1.1.4.1  mjf 	HYPERVISOR_shared_info->vcpu_info[0].evtchn_upcall_mask = psl;
    130  1.1.4.1  mjf 	x86_lfence();
    131  1.1.4.1  mjf 	if (HYPERVISOR_shared_info->vcpu_info[0].evtchn_upcall_pending &&
    132  1.1.4.1  mjf 	    psl == 0) {
    133  1.1.4.1  mjf 	    	hypervisor_force_callback();
    134  1.1.4.1  mjf 	}
    135  1.1.4.1  mjf }
    136