xen.h revision 1.9.2.4 1 /* $NetBSD: xen.h,v 1.9.2.4 2005/01/31 17:21:16 bouyer Exp $ */
2
3 /*
4 *
5 * Copyright (c) 2003, 2004 Keir Fraser (on behalf of the Xen team)
6 * All rights reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to
10 * deal in the Software without restriction, including without limitation the
11 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
12 * sell copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
25 */
26
27
28 #ifndef _XEN_H
29 #define _XEN_H
30
31 #ifndef _LOCORE
32
33 struct xen_netinfo {
34 uint32_t xi_ifno;
35 char *xi_root;
36 uint32_t xi_ip[5];
37 };
38
39 union xen_cmdline_parseinfo {
40 char xcp_bootdev[16]; /* sizeof(dv_xname) */
41 struct xen_netinfo xcp_netinfo;
42 char xcp_console[16];
43 };
44
45 #define XEN_PARSE_BOOTDEV 0
46 #define XEN_PARSE_NETINFO 1
47 #define XEN_PARSE_CONSOLE 2
48
49 void xen_parse_cmdline(int, union xen_cmdline_parseinfo *);
50
51 void xenconscn_attach(void);
52
53 void xenmachmem_init(void);
54 void xenprivcmd_init(void);
55 void xenvfr_init(void);
56
57 void xenevt_event(int);
58
59 void idle_block(void);
60
61 #ifdef XENDEBUG
62 void printk(const char *, ...);
63 void vprintk(const char *, _BSD_VA_LIST_);
64 #endif
65
66 #endif
67
68 #endif /* _XEN_H */
69
70 /******************************************************************************
71 * os.h
72 *
73 * random collection of macros and definition
74 */
75
76 #ifndef _OS_H_
77 #define _OS_H_
78
79 /*
80 * These are the segment descriptors provided for us by the hypervisor.
81 * For now, these are hardwired -- guest OSes cannot update the GDT
82 * or LDT.
83 *
84 * It shouldn't be hard to support descriptor-table frobbing -- let me
85 * know if the BSD or XP ports require flexibility here.
86 */
87
88
89 /*
90 * these are also defined in xen-public/xen.h but can't be pulled in as
91 * they are used in start of day assembly. Need to clean up the .h files
92 * a bit more...
93 */
94
95 #ifndef FLAT_RING1_CS
96 #define FLAT_RING1_CS 0x0819
97 #define FLAT_RING1_DS 0x0821
98 #define FLAT_RING3_CS 0x082b
99 #define FLAT_RING3_DS 0x0833
100 #endif
101
102 #define __KERNEL_CS FLAT_RING1_CS
103 #define __KERNEL_DS FLAT_RING1_DS
104
105 /* Everything below this point is not included by assembler (.S) files. */
106 #ifndef _LOCORE
107
108 /* some function prototypes */
109 void trap_init(void);
110 void xpq_flush_cache(void);
111
112
113 /*
114 * STI/CLI equivalents. These basically set and clear the virtual
115 * event_enable flag in the shared_info structure. Note that when
116 * the enable bit is set, there may be pending events to be handled.
117 * We may therefore call into do_hypervisor_callback() directly.
118 */
119
120 #define __save_flags(x) \
121 do { \
122 (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask; \
123 } while (0)
124
125 #define __restore_flags(x) \
126 do { \
127 volatile shared_info_t *_shared = HYPERVISOR_shared_info; \
128 __insn_barrier(); \
129 if ((_shared->vcpu_data[0].evtchn_upcall_mask = (x)) == 0) { \
130 __insn_barrier(); \
131 if (__predict_false(_shared->vcpu_data[0].evtchn_upcall_pending)) \
132 hypervisor_force_callback(); \
133 } \
134 } while (0)
135
136 #define __cli() \
137 do { \
138 HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1; \
139 __insn_barrier(); \
140 } while (0)
141
142 #define __sti() \
143 do { \
144 volatile shared_info_t *_shared = HYPERVISOR_shared_info; \
145 __insn_barrier(); \
146 _shared->vcpu_data[0].evtchn_upcall_mask = 0; \
147 __insn_barrier(); /* unmask then check (avoid races) */ \
148 if (__predict_false(_shared->vcpu_data[0].evtchn_upcall_pending)) \
149 hypervisor_force_callback(); \
150 } while (0)
151
152 #define cli() __cli()
153 #define sti() __sti()
154 #define save_flags(x) __save_flags(x)
155 #define restore_flags(x) __restore_flags(x)
156 #define save_and_cli(x) do { \
157 __save_flags(x); \
158 __cli(); \
159 } while (/* CONSTCOND */ 0)
160 #define save_and_sti(x) __save_and_sti(x)
161
162 #ifdef MULTIPROCESSOR
163 #define __LOCK_PREFIX "lock; "
164 #else
165 #define __LOCK_PREFIX ""
166 #endif
167
168 static __inline__ uint32_t
169 x86_atomic_xchg(volatile uint32_t *ptr, unsigned long val)
170 {
171 unsigned long result;
172
173 __asm __volatile("xchgl %0,%1"
174 :"=r" (result)
175 :"m" (*ptr), "0" (val)
176 :"memory");
177
178 return result;
179 }
180
181 static __inline__ int
182 x86_atomic_test_and_clear_bit(volatile void *ptr, int bitno)
183 {
184 int result;
185
186 __asm __volatile(__LOCK_PREFIX
187 "btrl %2,%1 ;"
188 "sbbl %0,%0"
189 :"=r" (result), "=m" (*(volatile uint32_t *)(ptr))
190 :"Ir" (bitno) : "memory");
191 return result;
192 }
193
194 static __inline__ int
195 x86_atomic_test_and_set_bit(volatile void *ptr, int bitno)
196 {
197 int result;
198
199 __asm __volatile(__LOCK_PREFIX
200 "btsl %2,%1 ;"
201 "sbbl %0,%0"
202 :"=r" (result), "=m" (*(volatile uint32_t *)(ptr))
203 :"Ir" (bitno) : "memory");
204 return result;
205 }
206
207 static __inline int
208 x86_constant_test_bit(const volatile void *ptr, int bitno)
209 {
210 return ((1UL << (bitno & 31)) &
211 (((const volatile uint32_t *) ptr)[bitno >> 5])) != 0;
212 }
213
214 static __inline int
215 x86_variable_test_bit(const volatile void *ptr, int bitno)
216 {
217 int result;
218
219 __asm __volatile(
220 "btl %2,%1 ;"
221 "sbbl %0,%0"
222 :"=r" (result)
223 :"m" (*(volatile uint32_t *)(ptr)), "Ir" (bitno));
224 return result;
225 }
226
227 #define x86_atomic_test_bit(ptr, bitno) \
228 (__builtin_constant_p(bitno) ? \
229 x86_constant_test_bit((ptr),(bitno)) : \
230 x86_variable_test_bit((ptr),(bitno)))
231
232 static __inline void
233 x86_atomic_set_bit(volatile void *ptr, int bitno)
234 {
235 __asm __volatile(__LOCK_PREFIX
236 "btsl %1,%0"
237 :"=m" (*(volatile uint32_t *)(ptr))
238 :"Ir" (bitno));
239 }
240
241 static __inline void
242 x86_atomic_clear_bit(volatile void *ptr, int bitno)
243 {
244 __asm __volatile(__LOCK_PREFIX
245 "btrl %1,%0"
246 :"=m" (*(volatile uint32_t *)(ptr))
247 :"Ir" (bitno));
248 }
249
250 static __inline void
251 wbinvd(void)
252 {
253 xpq_flush_cache();
254 }
255
256 #endif /* !__ASSEMBLY__ */
257
258 #endif /* _OS_H_ */
259