xen.h revision 1.9.2.5 1 /* $NetBSD: xen.h,v 1.9.2.5 2005/02/12 22:25:01 bouyer Exp $ */
2
3 /*
4 *
5 * Copyright (c) 2003, 2004 Keir Fraser (on behalf of the Xen team)
6 * All rights reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to
10 * deal in the Software without restriction, including without limitation the
11 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
12 * sell copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
25 */
26
27
28 #ifndef _XEN_H
29 #define _XEN_H
30
31 #ifndef _LOCORE
32
33 struct xen_netinfo {
34 uint32_t xi_ifno;
35 char *xi_root;
36 uint32_t xi_ip[5];
37 };
38
39 union xen_cmdline_parseinfo {
40 char xcp_bootdev[16]; /* sizeof(dv_xname) */
41 struct xen_netinfo xcp_netinfo;
42 char xcp_console[16];
43 };
44
45 #define XEN_PARSE_BOOTDEV 0
46 #define XEN_PARSE_NETINFO 1
47 #define XEN_PARSE_CONSOLE 2
48
49 void xen_parse_cmdline(int, union xen_cmdline_parseinfo *);
50
51 void xenconscn_attach(void);
52
53 void xenprivcmd_init(void);
54
55 void xenevt_event(int);
56
57 void idle_block(void);
58
59 #ifdef XENDEBUG
60 void printk(const char *, ...);
61 void vprintk(const char *, _BSD_VA_LIST_);
62 #endif
63
64 #endif
65
66 #endif /* _XEN_H */
67
68 /******************************************************************************
69 * os.h
70 *
71 * random collection of macros and definition
72 */
73
74 #ifndef _OS_H_
75 #define _OS_H_
76
77 /*
78 * These are the segment descriptors provided for us by the hypervisor.
79 * For now, these are hardwired -- guest OSes cannot update the GDT
80 * or LDT.
81 *
82 * It shouldn't be hard to support descriptor-table frobbing -- let me
83 * know if the BSD or XP ports require flexibility here.
84 */
85
86
87 /*
88 * these are also defined in xen-public/xen.h but can't be pulled in as
89 * they are used in start of day assembly. Need to clean up the .h files
90 * a bit more...
91 */
92
93 #ifndef FLAT_RING1_CS
94 #define FLAT_RING1_CS 0x0819
95 #define FLAT_RING1_DS 0x0821
96 #define FLAT_RING3_CS 0x082b
97 #define FLAT_RING3_DS 0x0833
98 #endif
99
100 #define __KERNEL_CS FLAT_RING1_CS
101 #define __KERNEL_DS FLAT_RING1_DS
102
103 /* Everything below this point is not included by assembler (.S) files. */
104 #ifndef _LOCORE
105
106 /* some function prototypes */
107 void trap_init(void);
108 void xpq_flush_cache(void);
109
110
111 /*
112 * STI/CLI equivalents. These basically set and clear the virtual
113 * event_enable flag in the shared_info structure. Note that when
114 * the enable bit is set, there may be pending events to be handled.
115 * We may therefore call into do_hypervisor_callback() directly.
116 */
117
118 #define __save_flags(x) \
119 do { \
120 (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask; \
121 } while (0)
122
123 #define __restore_flags(x) \
124 do { \
125 volatile shared_info_t *_shared = HYPERVISOR_shared_info; \
126 __insn_barrier(); \
127 if ((_shared->vcpu_data[0].evtchn_upcall_mask = (x)) == 0) { \
128 __insn_barrier(); \
129 if (__predict_false(_shared->vcpu_data[0].evtchn_upcall_pending)) \
130 hypervisor_force_callback(); \
131 } \
132 } while (0)
133
134 #define __cli() \
135 do { \
136 HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1; \
137 __insn_barrier(); \
138 } while (0)
139
140 #define __sti() \
141 do { \
142 volatile shared_info_t *_shared = HYPERVISOR_shared_info; \
143 __insn_barrier(); \
144 _shared->vcpu_data[0].evtchn_upcall_mask = 0; \
145 __insn_barrier(); /* unmask then check (avoid races) */ \
146 if (__predict_false(_shared->vcpu_data[0].evtchn_upcall_pending)) \
147 hypervisor_force_callback(); \
148 } while (0)
149
150 #define cli() __cli()
151 #define sti() __sti()
152 #define save_flags(x) __save_flags(x)
153 #define restore_flags(x) __restore_flags(x)
154 #define save_and_cli(x) do { \
155 __save_flags(x); \
156 __cli(); \
157 } while (/* CONSTCOND */ 0)
158 #define save_and_sti(x) __save_and_sti(x)
159
160 #ifdef MULTIPROCESSOR
161 #define __LOCK_PREFIX "lock; "
162 #else
163 #define __LOCK_PREFIX ""
164 #endif
165
166 static __inline__ uint32_t
167 x86_atomic_xchg(volatile uint32_t *ptr, unsigned long val)
168 {
169 unsigned long result;
170
171 __asm __volatile("xchgl %0,%1"
172 :"=r" (result)
173 :"m" (*ptr), "0" (val)
174 :"memory");
175
176 return result;
177 }
178
179 static __inline__ int
180 x86_atomic_test_and_clear_bit(volatile void *ptr, int bitno)
181 {
182 int result;
183
184 __asm __volatile(__LOCK_PREFIX
185 "btrl %2,%1 ;"
186 "sbbl %0,%0"
187 :"=r" (result), "=m" (*(volatile uint32_t *)(ptr))
188 :"Ir" (bitno) : "memory");
189 return result;
190 }
191
192 static __inline__ int
193 x86_atomic_test_and_set_bit(volatile void *ptr, int bitno)
194 {
195 int result;
196
197 __asm __volatile(__LOCK_PREFIX
198 "btsl %2,%1 ;"
199 "sbbl %0,%0"
200 :"=r" (result), "=m" (*(volatile uint32_t *)(ptr))
201 :"Ir" (bitno) : "memory");
202 return result;
203 }
204
205 static __inline int
206 x86_constant_test_bit(const volatile void *ptr, int bitno)
207 {
208 return ((1UL << (bitno & 31)) &
209 (((const volatile uint32_t *) ptr)[bitno >> 5])) != 0;
210 }
211
212 static __inline int
213 x86_variable_test_bit(const volatile void *ptr, int bitno)
214 {
215 int result;
216
217 __asm __volatile(
218 "btl %2,%1 ;"
219 "sbbl %0,%0"
220 :"=r" (result)
221 :"m" (*(volatile uint32_t *)(ptr)), "Ir" (bitno));
222 return result;
223 }
224
225 #define x86_atomic_test_bit(ptr, bitno) \
226 (__builtin_constant_p(bitno) ? \
227 x86_constant_test_bit((ptr),(bitno)) : \
228 x86_variable_test_bit((ptr),(bitno)))
229
230 static __inline void
231 x86_atomic_set_bit(volatile void *ptr, int bitno)
232 {
233 __asm __volatile(__LOCK_PREFIX
234 "btsl %1,%0"
235 :"=m" (*(volatile uint32_t *)(ptr))
236 :"Ir" (bitno));
237 }
238
239 static __inline void
240 x86_atomic_clear_bit(volatile void *ptr, int bitno)
241 {
242 __asm __volatile(__LOCK_PREFIX
243 "btrl %1,%0"
244 :"=m" (*(volatile uint32_t *)(ptr))
245 :"Ir" (bitno));
246 }
247
248 static __inline void
249 wbinvd(void)
250 {
251 xpq_flush_cache();
252 }
253
254 #endif /* !__ASSEMBLY__ */
255
256 #endif /* _OS_H_ */
257