xen_intr.c revision 1.2.16.2 1 1.2.16.2 matt /* $NetBSD: xen_intr.c,v 1.2.16.2 2008/01/09 01:50:16 matt Exp $ */
2 1.2.16.2 matt
3 1.2.16.2 matt /*-
4 1.2.16.2 matt * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
5 1.2.16.2 matt * All rights reserved.
6 1.2.16.2 matt *
7 1.2.16.2 matt * This code is derived from software contributed to The NetBSD Foundation
8 1.2.16.2 matt * by Charles M. Hannum, and by Jason R. Thorpe.
9 1.2.16.2 matt *
10 1.2.16.2 matt * Redistribution and use in source and binary forms, with or without
11 1.2.16.2 matt * modification, are permitted provided that the following conditions
12 1.2.16.2 matt * are met:
13 1.2.16.2 matt * 1. Redistributions of source code must retain the above copyright
14 1.2.16.2 matt * notice, this list of conditions and the following disclaimer.
15 1.2.16.2 matt * 2. Redistributions in binary form must reproduce the above copyright
16 1.2.16.2 matt * notice, this list of conditions and the following disclaimer in the
17 1.2.16.2 matt * documentation and/or other materials provided with the distribution.
18 1.2.16.2 matt * 3. All advertising materials mentioning features or use of this software
19 1.2.16.2 matt * must display the following acknowledgement:
20 1.2.16.2 matt * This product includes software developed by the NetBSD
21 1.2.16.2 matt * Foundation, Inc. and its contributors.
22 1.2.16.2 matt * 4. Neither the name of The NetBSD Foundation nor the names of its
23 1.2.16.2 matt * contributors may be used to endorse or promote products derived
24 1.2.16.2 matt * from this software without specific prior written permission.
25 1.2.16.2 matt *
26 1.2.16.2 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 1.2.16.2 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 1.2.16.2 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.2.16.2 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 1.2.16.2 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 1.2.16.2 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 1.2.16.2 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 1.2.16.2 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 1.2.16.2 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 1.2.16.2 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 1.2.16.2 matt * POSSIBILITY OF SUCH DAMAGE.
37 1.2.16.2 matt */
38 1.2.16.2 matt
39 1.2.16.2 matt #include <sys/cdefs.h>
40 1.2.16.2 matt __KERNEL_RCSID(0, "$NetBSD: xen_intr.c,v 1.2.16.2 2008/01/09 01:50:16 matt Exp $");
41 1.2.16.2 matt
42 1.2.16.2 matt #include <sys/param.h>
43 1.2.16.2 matt
44 1.2.16.2 matt #include <machine/cpu.h>
45 1.2.16.2 matt #include <machine/intr.h>
46 1.2.16.2 matt
47 1.2.16.2 matt /*
48 1.2.16.2 matt * Add a mask to cpl, and return the old value of cpl.
49 1.2.16.2 matt */
50 1.2.16.2 matt int
51 1.2.16.2 matt splraise(int nlevel)
52 1.2.16.2 matt {
53 1.2.16.2 matt int olevel;
54 1.2.16.2 matt struct cpu_info *ci = curcpu();
55 1.2.16.2 matt
56 1.2.16.2 matt olevel = ci->ci_ilevel;
57 1.2.16.2 matt if (nlevel > olevel)
58 1.2.16.2 matt ci->ci_ilevel = nlevel;
59 1.2.16.2 matt __insn_barrier();
60 1.2.16.2 matt return (olevel);
61 1.2.16.2 matt }
62 1.2.16.2 matt
63 1.2.16.2 matt /*
64 1.2.16.2 matt * Restore a value to cpl (unmasking interrupts). If any unmasked
65 1.2.16.2 matt * interrupts are pending, call Xspllower() to process them.
66 1.2.16.2 matt */
67 1.2.16.2 matt void
68 1.2.16.2 matt spllower(int nlevel)
69 1.2.16.2 matt {
70 1.2.16.2 matt struct cpu_info *ci = curcpu();
71 1.2.16.2 matt u_int32_t imask;
72 1.2.16.2 matt u_long psl;
73 1.2.16.2 matt
74 1.2.16.2 matt __insn_barrier();
75 1.2.16.2 matt
76 1.2.16.2 matt imask = IUNMASK(ci, nlevel);
77 1.2.16.2 matt psl = x86_read_psl();
78 1.2.16.2 matt x86_disable_intr();
79 1.2.16.2 matt if (ci->ci_ipending & imask) {
80 1.2.16.2 matt Xspllower(nlevel);
81 1.2.16.2 matt /* Xspllower does enable_intr() */
82 1.2.16.2 matt } else {
83 1.2.16.2 matt ci->ci_ilevel = nlevel;
84 1.2.16.2 matt x86_write_psl(psl);
85 1.2.16.2 matt }
86 1.2.16.2 matt }
87 1.2.16.2 matt
88 1.2.16.2 matt #ifndef __x86_64__
89 1.2.16.2 matt
90 1.2.16.2 matt /*
91 1.2.16.2 matt * Software interrupt registration
92 1.2.16.2 matt *
93 1.2.16.2 matt * We hand-code this to ensure that it's atomic.
94 1.2.16.2 matt *
95 1.2.16.2 matt * XXX always scheduled on the current CPU.
96 1.2.16.2 matt */
97 1.2.16.2 matt void
98 1.2.16.2 matt softintr(int sir)
99 1.2.16.2 matt {
100 1.2.16.2 matt struct cpu_info *ci = curcpu();
101 1.2.16.2 matt
102 1.2.16.2 matt __asm volatile("orl %1, %0" : "=m"(ci->ci_ipending) : "ir" (1 << sir));
103 1.2.16.2 matt }
104 1.2.16.2 matt #endif
105 1.2.16.2 matt
106 1.2.16.2 matt void
107 1.2.16.2 matt x86_disable_intr(void)
108 1.2.16.2 matt {
109 1.2.16.2 matt __cli();
110 1.2.16.2 matt }
111 1.2.16.2 matt
112 1.2.16.2 matt void
113 1.2.16.2 matt x86_enable_intr(void)
114 1.2.16.2 matt {
115 1.2.16.2 matt __sti();
116 1.2.16.2 matt }
117 1.2.16.2 matt
118 1.2.16.2 matt u_long
119 1.2.16.2 matt x86_read_psl(void)
120 1.2.16.2 matt {
121 1.2.16.2 matt
122 1.2.16.2 matt return (HYPERVISOR_shared_info->vcpu_info[0].evtchn_upcall_mask);
123 1.2.16.2 matt }
124 1.2.16.2 matt
125 1.2.16.2 matt void
126 1.2.16.2 matt x86_write_psl(u_long psl)
127 1.2.16.2 matt {
128 1.2.16.2 matt
129 1.2.16.2 matt HYPERVISOR_shared_info->vcpu_info[0].evtchn_upcall_mask = psl;
130 1.2.16.2 matt x86_lfence();
131 1.2.16.2 matt if (HYPERVISOR_shared_info->vcpu_info[0].evtchn_upcall_pending &&
132 1.2.16.2 matt psl == 0) {
133 1.2.16.2 matt hypervisor_force_callback();
134 1.2.16.2 matt }
135 1.2.16.2 matt }
136