xen.h revision 1.42 1 /* $NetBSD: xen.h,v 1.42 2019/02/02 12:32:55 cherry Exp $ */
2
3 /*
4 *
5 * Copyright (c) 2003, 2004 Keir Fraser (on behalf of the Xen team)
6 * All rights reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to
10 * deal in the Software without restriction, including without limitation the
11 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
12 * sell copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
25 */
26
27
28 #ifndef _XEN_H
29 #define _XEN_H
30
31 #ifdef _KERNEL_OPT
32 #include "opt_xen.h"
33 #endif
34
35
36 #ifndef _LOCORE
37
38 #include <machine/cpufunc.h>
39
40 struct xen_netinfo {
41 uint32_t xi_ifno;
42 char *xi_root;
43 uint32_t xi_ip[5];
44 };
45
46 union xen_cmdline_parseinfo {
47 char xcp_bootdev[144];
48 struct xen_netinfo xcp_netinfo;
49 char xcp_console[16];
50 char xcp_pcidevs[64];
51 };
52
53 #define XEN_PARSE_BOOTDEV 0
54 #define XEN_PARSE_NETINFO 1
55 #define XEN_PARSE_CONSOLE 2
56 #define XEN_PARSE_BOOTFLAGS 3
57 #define XEN_PARSE_PCIBACK 4
58
59 void xen_parse_cmdline(int, union xen_cmdline_parseinfo *);
60
61 void xenconscn_attach(void);
62
63 void xenprivcmd_init(void);
64
65 void xbdback_init(void);
66 void xennetback_init(void);
67 void xen_shm_init(void);
68
69 void xenevt_event(int);
70 void xenevt_setipending(int, int);
71 void xenevt_notify(void);
72
73 void idle_block(void);
74
75 /* xen_machdep.c */
76 void sysctl_xen_suspend_setup(void);
77
78 #include <sys/stdarg.h>
79 void printk(const char *, ...);
80
81 #endif
82
83 #endif /* _XEN_H */
84
85 /******************************************************************************
86 * os.h
87 *
88 * random collection of macros and definition
89 */
90
91 #ifndef _OS_H_
92 #define _OS_H_
93
94 /*
95 * These are the segment descriptors provided for us by the hypervisor.
96 * For now, these are hardwired -- guest OSes cannot update the GDT
97 * or LDT.
98 *
99 * It shouldn't be hard to support descriptor-table frobbing -- let me
100 * know if the BSD or XP ports require flexibility here.
101 */
102
103
104 /*
105 * these are also defined in xen-public/xen.h but can't be pulled in as
106 * they are used in start of day assembly. Need to clean up the .h files
107 * a bit more...
108 */
109
110 #ifndef FLAT_RING1_CS
111 #define FLAT_RING1_CS 0xe019 /* GDT index 259 */
112 #define FLAT_RING1_DS 0xe021 /* GDT index 260 */
113 #define FLAT_RING1_SS 0xe021 /* GDT index 260 */
114 #define FLAT_RING3_CS 0xe02b /* GDT index 261 */
115 #define FLAT_RING3_DS 0xe033 /* GDT index 262 */
116 #define FLAT_RING3_SS 0xe033 /* GDT index 262 */
117 #endif
118
119 #define __KERNEL_CS FLAT_RING1_CS
120 #define __KERNEL_DS FLAT_RING1_DS
121
122 /* Everything below this point is not included by assembler (.S) files. */
123 #ifndef _LOCORE
124
125 /* some function prototypes */
126 void trap_init(void);
127 void xpq_flush_cache(void);
128
129 #define xendomain_is_dom0() (xen_start_info.flags & SIF_INITDOMAIN)
130 #define xendomain_is_privileged() (xen_start_info.flags & SIF_PRIVILEGED)
131
132 /*
133 * STI/CLI equivalents. These basically set and clear the virtual
134 * event_enable flag in the shared_info structure. Note that when
135 * the enable bit is set, there may be pending events to be handled.
136 * We may therefore call into do_hypervisor_callback() directly.
137 */
138
139 #define __save_flags(x) \
140 do { \
141 (x) = curcpu()->ci_vcpu->evtchn_upcall_mask; \
142 } while (0)
143
144 #define __restore_flags(x) \
145 do { \
146 volatile struct vcpu_info *_vci = curcpu()->ci_vcpu; \
147 __insn_barrier(); \
148 if ((_vci->evtchn_upcall_mask = (x)) == 0) { \
149 x86_lfence(); \
150 if (__predict_false(_vci->evtchn_upcall_pending)) \
151 hypervisor_force_callback(); \
152 } \
153 } while (0)
154
155 #define __cli() \
156 do { \
157 curcpu()->ci_vcpu->evtchn_upcall_mask = 1; \
158 x86_lfence(); \
159 } while (0)
160
161 #define __sti() \
162 do { \
163 volatile struct vcpu_info *_vci = curcpu()->ci_vcpu; \
164 __insn_barrier(); \
165 _vci->evtchn_upcall_mask = 0; \
166 x86_lfence(); /* unmask then check (avoid races) */ \
167 if (__predict_false(_vci->evtchn_upcall_pending)) \
168 hypervisor_force_callback(); \
169 } while (0)
170
171 #define cli() __cli()
172 #define sti() __sti()
173 #define save_flags(x) __save_flags(x)
174 #define restore_flags(x) __restore_flags(x)
175 #define save_and_cli(x) do { \
176 __save_flags(x); \
177 __cli(); \
178 } while (/* CONSTCOND */ 0)
179 #define save_and_sti(x) __save_and_sti(x)
180
181 /*
182 * always assume we're on multiprocessor. We don't know how many CPU the
183 * underlying hardware has.
184 */
185 #define __LOCK_PREFIX "lock; "
186
187 #define XATOMIC_T u_long
188 #ifdef __x86_64__
189 #define LONG_SHIFT 6
190 #define LONG_MASK 63
191 #else /* __x86_64__ */
192 #define LONG_SHIFT 5
193 #define LONG_MASK 31
194 #endif /* __x86_64__ */
195
196 #define xen_ffs __builtin_ffsl
197
198 static __inline XATOMIC_T
199 xen_atomic_xchg(volatile XATOMIC_T *ptr, unsigned long val)
200 {
201 unsigned long result;
202
203 __asm volatile(__LOCK_PREFIX
204 #ifdef __x86_64__
205 "xchgq %0,%1"
206 #else
207 "xchgl %0,%1"
208 #endif
209 :"=r" (result)
210 :"m" (*ptr), "0" (val)
211 :"memory");
212
213 return result;
214 }
215
216 static inline uint16_t
217 xen_atomic_cmpxchg16(volatile uint16_t *ptr, uint16_t val, uint16_t newval)
218 {
219 unsigned long result;
220
221 __asm volatile(__LOCK_PREFIX
222 "cmpxchgw %w1,%2"
223 :"=a" (result)
224 :"q"(newval), "m" (*ptr), "0" (val)
225 :"memory");
226
227 return result;
228 }
229
230 static __inline void
231 xen_atomic_setbits_l (volatile XATOMIC_T *ptr, unsigned long bits) {
232 #ifdef __x86_64__
233 __asm volatile("lock ; orq %1,%0" : "=m" (*ptr) : "ir" (bits));
234 #else
235 __asm volatile("lock ; orl %1,%0" : "=m" (*ptr) : "ir" (bits));
236 #endif
237 }
238
239 static __inline void
240 xen_atomic_clearbits_l (volatile XATOMIC_T *ptr, unsigned long bits) {
241 #ifdef __x86_64__
242 __asm volatile("lock ; andq %1,%0" : "=m" (*ptr) : "ir" (~bits));
243 #else
244 __asm volatile("lock ; andl %1,%0" : "=m" (*ptr) : "ir" (~bits));
245 #endif
246 }
247
248 static __inline XATOMIC_T
249 xen_atomic_test_and_clear_bit(volatile void *ptr, unsigned long bitno)
250 {
251 long result;
252
253 __asm volatile(__LOCK_PREFIX
254 #ifdef __x86_64__
255 "btrq %2,%1 ;"
256 "sbbq %0,%0"
257 #else
258 "btrl %2,%1 ;"
259 "sbbl %0,%0"
260 #endif
261 :"=r" (result), "=m" (*(volatile XATOMIC_T *)(ptr))
262 :"Ir" (bitno) : "memory");
263 return result;
264 }
265
266 static __inline XATOMIC_T
267 xen_atomic_test_and_set_bit(volatile void *ptr, unsigned long bitno)
268 {
269 long result;
270
271 __asm volatile(__LOCK_PREFIX
272 #ifdef __x86_64__
273 "btsq %2,%1 ;"
274 "sbbq %0,%0"
275 #else
276 "btsl %2,%1 ;"
277 "sbbl %0,%0"
278 #endif
279 :"=r" (result), "=m" (*(volatile XATOMIC_T *)(ptr))
280 :"Ir" (bitno) : "memory");
281 return result;
282 }
283
284 static __inline int
285 xen_constant_test_bit(const volatile void *ptr, unsigned long bitno)
286 {
287 return ((1UL << (bitno & LONG_MASK)) &
288 (((const volatile XATOMIC_T *) ptr)[bitno >> LONG_SHIFT])) != 0;
289 }
290
291 static __inline XATOMIC_T
292 xen_variable_test_bit(const volatile void *ptr, unsigned long bitno)
293 {
294 long result;
295
296 __asm volatile(
297 #ifdef __x86_64__
298 "btq %2,%1 ;"
299 "sbbq %0,%0"
300 #else
301 "btl %2,%1 ;"
302 "sbbl %0,%0"
303 #endif
304 :"=r" (result)
305 :"m" (*(const volatile XATOMIC_T *)(ptr)), "Ir" (bitno));
306 return result;
307 }
308
309 #define xen_atomic_test_bit(ptr, bitno) \
310 (__builtin_constant_p(bitno) ? \
311 xen_constant_test_bit((ptr),(bitno)) : \
312 xen_variable_test_bit((ptr),(bitno)))
313
314 static __inline void
315 xen_atomic_set_bit(volatile void *ptr, unsigned long bitno)
316 {
317 __asm volatile(__LOCK_PREFIX
318 #ifdef __x86_64__
319 "btsq %1,%0"
320 #else
321 "btsl %1,%0"
322 #endif
323 :"=m" (*(volatile XATOMIC_T *)(ptr))
324 :"Ir" (bitno));
325 }
326
327 static __inline void
328 xen_atomic_clear_bit(volatile void *ptr, unsigned long bitno)
329 {
330 __asm volatile(__LOCK_PREFIX
331 #ifdef __x86_64__
332 "btrq %1,%0"
333 #else
334 "btrl %1,%0"
335 #endif
336 :"=m" (*(volatile XATOMIC_T *)(ptr))
337 :"Ir" (bitno));
338 }
339
340 #undef XATOMIC_T
341
342 void wbinvd(void);
343
344 #include <xen/include/public/features.h>
345 #include <sys/systm.h>
346
347 extern bool xen_feature_tables[];
348 void xen_init_features(void);
349 static __inline bool
350 xen_feature(int f)
351 {
352 KASSERT(f < XENFEAT_NR_SUBMAPS * 32);
353 return xen_feature_tables[f];
354 }
355
356 #endif /* !__ASSEMBLY__ */
357
358 #endif /* _OS_H_ */
359