xen.h revision 1.32.4.1 1 /* $NetBSD: xen.h,v 1.32.4.1 2011/04/21 01:41:33 rmind Exp $ */
2
3 /*
4 *
5 * Copyright (c) 2003, 2004 Keir Fraser (on behalf of the Xen team)
6 * All rights reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to
10 * deal in the Software without restriction, including without limitation the
11 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
12 * sell copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
25 */
26
27
28 #ifndef _XEN_H
29 #define _XEN_H
30
31 #ifdef _KERNEL_OPT
32 #include "opt_xen.h"
33 #endif
34
35
36 #ifndef _LOCORE
37
38 #include <machine/cpufunc.h>
39
40 struct xen_netinfo {
41 uint32_t xi_ifno;
42 char *xi_root;
43 uint32_t xi_ip[5];
44 };
45
46 union xen_cmdline_parseinfo {
47 char xcp_bootdev[16]; /* sizeof(dv_xname) */
48 struct xen_netinfo xcp_netinfo;
49 char xcp_console[16];
50 char xcp_pcidevs[64];
51 };
52
53 #define XEN_PARSE_BOOTDEV 0
54 #define XEN_PARSE_NETINFO 1
55 #define XEN_PARSE_CONSOLE 2
56 #define XEN_PARSE_BOOTFLAGS 3
57 #define XEN_PARSE_PCIBACK 4
58
59 void xen_parse_cmdline(int, union xen_cmdline_parseinfo *);
60
61 void xenconscn_attach(void);
62
63 void xenprivcmd_init(void);
64
65 void xbdback_init(void);
66 void xennetback_init(void);
67 void xen_shm_init(void);
68
69 void xenevt_event(int);
70 void xenevt_setipending(int, int);
71 void xenevt_notify(void);
72
73 void idle_block(void);
74
75 #if defined(XENDEBUG) || 1 /* XXX */
76 void printk(const char *, ...);
77 void vprintk(const char *, _BSD_VA_LIST_);
78 #endif
79
80 #endif
81
82 #endif /* _XEN_H */
83
84 /******************************************************************************
85 * os.h
86 *
87 * random collection of macros and definition
88 */
89
90 #ifndef _OS_H_
91 #define _OS_H_
92
93 /*
94 * These are the segment descriptors provided for us by the hypervisor.
95 * For now, these are hardwired -- guest OSes cannot update the GDT
96 * or LDT.
97 *
98 * It shouldn't be hard to support descriptor-table frobbing -- let me
99 * know if the BSD or XP ports require flexibility here.
100 */
101
102
103 /*
104 * these are also defined in xen-public/xen.h but can't be pulled in as
105 * they are used in start of day assembly. Need to clean up the .h files
106 * a bit more...
107 */
108
109 #ifndef FLAT_RING1_CS
110 #define FLAT_RING1_CS 0xe019 /* GDT index 259 */
111 #define FLAT_RING1_DS 0xe021 /* GDT index 260 */
112 #define FLAT_RING1_SS 0xe021 /* GDT index 260 */
113 #define FLAT_RING3_CS 0xe02b /* GDT index 261 */
114 #define FLAT_RING3_DS 0xe033 /* GDT index 262 */
115 #define FLAT_RING3_SS 0xe033 /* GDT index 262 */
116 #endif
117
118 #define __KERNEL_CS FLAT_RING1_CS
119 #define __KERNEL_DS FLAT_RING1_DS
120
121 /* Everything below this point is not included by assembler (.S) files. */
122 #ifndef _LOCORE
123
124 /* some function prototypes */
125 void trap_init(void);
126 void xpq_flush_cache(void);
127
128 #define xendomain_is_dom0() (xen_start_info.flags & SIF_INITDOMAIN)
129 #define xendomain_is_privileged() (xen_start_info.flags & SIF_PRIVILEGED)
130
131 /*
132 * STI/CLI equivalents. These basically set and clear the virtual
133 * event_enable flag in the shared_info structure. Note that when
134 * the enable bit is set, there may be pending events to be handled.
135 * We may therefore call into do_hypervisor_callback() directly.
136 */
137
138 #define __save_flags(x) \
139 do { \
140 (x) = curcpu()->ci_vcpu->evtchn_upcall_mask; \
141 } while (0)
142
143 #define __restore_flags(x) \
144 do { \
145 volatile struct vcpu_info *_vci = curcpu()->ci_vcpu; \
146 __insn_barrier(); \
147 if ((_vci->evtchn_upcall_mask = (x)) == 0) { \
148 x86_lfence(); \
149 if (__predict_false(_vci->evtchn_upcall_pending)) \
150 hypervisor_force_callback(); \
151 } \
152 } while (0)
153
154 #define __cli() \
155 do { \
156 curcpu()->ci_vcpu->evtchn_upcall_mask = 1; \
157 x86_lfence(); \
158 } while (0)
159
160 #define __sti() \
161 do { \
162 volatile struct vcpu_info *_vci = curcpu()->ci_vcpu; \
163 __insn_barrier(); \
164 _vci->evtchn_upcall_mask = 0; \
165 x86_lfence(); /* unmask then check (avoid races) */ \
166 if (__predict_false(_vci->evtchn_upcall_pending)) \
167 hypervisor_force_callback(); \
168 } while (0)
169
170 #define cli() __cli()
171 #define sti() __sti()
172 #define save_flags(x) __save_flags(x)
173 #define restore_flags(x) __restore_flags(x)
174 #define save_and_cli(x) do { \
175 __save_flags(x); \
176 __cli(); \
177 } while (/* CONSTCOND */ 0)
178 #define save_and_sti(x) __save_and_sti(x)
179
180 /*
181 * always assume we're on multiprocessor. We don't know how many CPU the
182 * underlying hardware has.
183 */
184 #define __LOCK_PREFIX "lock; "
185
186 #define XATOMIC_T u_long
187 #ifdef __x86_64__
188 #define LONG_SHIFT 6
189 #define LONG_MASK 63
190 #else /* __x86_64__ */
191 #define LONG_SHIFT 5
192 #define LONG_MASK 31
193 #endif /* __x86_64__ */
194
195 #define xen_ffs __builtin_ffsl
196
197 static __inline XATOMIC_T
198 xen_atomic_xchg(volatile XATOMIC_T *ptr, unsigned long val)
199 {
200 unsigned long result;
201
202 __asm volatile(__LOCK_PREFIX
203 #ifdef __x86_64__
204 "xchgq %0,%1"
205 #else
206 "xchgl %0,%1"
207 #endif
208 :"=r" (result)
209 :"m" (*ptr), "0" (val)
210 :"memory");
211
212 return result;
213 }
214
215 static inline uint16_t
216 xen_atomic_cmpxchg16(volatile uint16_t *ptr, uint16_t val, uint16_t newval)
217 {
218 unsigned long result;
219
220 __asm volatile(__LOCK_PREFIX
221 "cmpxchgw %w1,%2"
222 :"=a" (result)
223 :"q"(newval), "m" (*ptr), "0" (val)
224 :"memory");
225
226 return result;
227 }
228
229 static __inline void
230 xen_atomic_setbits_l (volatile XATOMIC_T *ptr, unsigned long bits) {
231 #ifdef __x86_64__
232 __asm volatile("lock ; orq %1,%0" : "=m" (*ptr) : "ir" (bits));
233 #else
234 __asm volatile("lock ; orl %1,%0" : "=m" (*ptr) : "ir" (bits));
235 #endif
236 }
237
238 static __inline void
239 xen_atomic_clearbits_l (volatile XATOMIC_T *ptr, unsigned long bits) {
240 #ifdef __x86_64__
241 __asm volatile("lock ; andq %1,%0" : "=m" (*ptr) : "ir" (~bits));
242 #else
243 __asm volatile("lock ; andl %1,%0" : "=m" (*ptr) : "ir" (~bits));
244 #endif
245 }
246
247 static __inline XATOMIC_T
248 xen_atomic_test_and_clear_bit(volatile void *ptr, unsigned long bitno)
249 {
250 int result;
251
252 __asm volatile(__LOCK_PREFIX
253 #ifdef __x86_64__
254 "btrq %2,%1 ;"
255 "sbbq %0,%0"
256 #else
257 "btrl %2,%1 ;"
258 "sbbl %0,%0"
259 #endif
260 :"=r" (result), "=m" (*(volatile XATOMIC_T *)(ptr))
261 :"Ir" (bitno) : "memory");
262 return result;
263 }
264
265 static __inline XATOMIC_T
266 xen_atomic_test_and_set_bit(volatile void *ptr, unsigned long bitno)
267 {
268 long result;
269
270 __asm volatile(__LOCK_PREFIX
271 #ifdef __x86_64__
272 "btsq %2,%1 ;"
273 "sbbq %0,%0"
274 #else
275 "btsl %2,%1 ;"
276 "sbbl %0,%0"
277 #endif
278 :"=r" (result), "=m" (*(volatile XATOMIC_T *)(ptr))
279 :"Ir" (bitno) : "memory");
280 return result;
281 }
282
283 static __inline int
284 xen_constant_test_bit(const volatile void *ptr, unsigned long bitno)
285 {
286 return ((1UL << (bitno & LONG_MASK)) &
287 (((const volatile XATOMIC_T *) ptr)[bitno >> LONG_SHIFT])) != 0;
288 }
289
290 static __inline XATOMIC_T
291 xen_variable_test_bit(const volatile void *ptr, unsigned long bitno)
292 {
293 long result;
294
295 __asm volatile(
296 #ifdef __x86_64__
297 "btq %2,%1 ;"
298 "sbbq %0,%0"
299 #else
300 "btl %2,%1 ;"
301 "sbbl %0,%0"
302 #endif
303 :"=r" (result)
304 :"m" (*(const volatile XATOMIC_T *)(ptr)), "Ir" (bitno));
305 return result;
306 }
307
308 #define xen_atomic_test_bit(ptr, bitno) \
309 (__builtin_constant_p(bitno) ? \
310 xen_constant_test_bit((ptr),(bitno)) : \
311 xen_variable_test_bit((ptr),(bitno)))
312
313 static __inline void
314 xen_atomic_set_bit(volatile void *ptr, unsigned long bitno)
315 {
316 __asm volatile(__LOCK_PREFIX
317 #ifdef __x86_64__
318 "btsq %1,%0"
319 #else
320 "btsl %1,%0"
321 #endif
322 :"=m" (*(volatile XATOMIC_T *)(ptr))
323 :"Ir" (bitno));
324 }
325
326 static __inline void
327 xen_atomic_clear_bit(volatile void *ptr, unsigned long bitno)
328 {
329 __asm volatile(__LOCK_PREFIX
330 #ifdef __x86_64__
331 "btrq %1,%0"
332 #else
333 "btrl %1,%0"
334 #endif
335 :"=m" (*(volatile XATOMIC_T *)(ptr))
336 :"Ir" (bitno));
337 }
338
339 #undef XATOMIC_T
340
341 void wbinvd(void);
342
343 #endif /* !__ASSEMBLY__ */
344
345 #endif /* _OS_H_ */
346