xen.h revision 1.2 1 /* $NetBSD: xen.h,v 1.2 2004/04/17 12:46:42 cl Exp $ */
2
3 /*
4 *
5 * Copyright (c) 2003, 2004 Keir Fraser (on behalf of the Xen team)
6 * All rights reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to
10 * deal in the Software without restriction, including without limitation the
11 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
12 * sell copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
25 */
26
27
28 #ifndef _XEN_H
29 #define _XEN_H
30
31 #ifndef _LOCORE
32
33 typedef uint16_t u16;
34 typedef uint32_t u32;
35 typedef uint64_t u64;
36
37 #ifdef XENDEBUG
38 void printk(const char *, ...);
39 #endif
40
41 void xencn_attach(void);
42
43 #endif
44
45 #define hypervisor_asm_ack(num) \
46 movl HYPERVISOR_shared_info,%eax ;\
47 lock ;\
48 btsl $num,EVENTS_MASK(%eax)
49
50 #endif
51
52 /******************************************************************************
53 * os.h
54 *
55 * random collection of macros and definition
56 */
57
58 #ifndef _OS_H_
59 #define _OS_H_
60
61 /*
62 * These are the segment descriptors provided for us by the hypervisor.
63 * For now, these are hardwired -- guest OSes cannot update the GDT
64 * or LDT.
65 *
66 * It shouldn't be hard to support descriptor-table frobbing -- let me
67 * know if the BSD or XP ports require flexibility here.
68 */
69
70
71 /*
72 * these are also defined in hypervisor-if.h but can't be pulled in as
73 * they are used in start of day assembly. Need to clean up the .h files
74 * a bit more...
75 */
76
77 #ifndef FLAT_RING1_CS
78 #define FLAT_RING1_CS 0x0819
79 #define FLAT_RING1_DS 0x0821
80 #define FLAT_RING3_CS 0x082b
81 #define FLAT_RING3_DS 0x0833
82 #endif
83
84 #define __KERNEL_CS FLAT_RING1_CS
85 #define __KERNEL_DS FLAT_RING1_DS
86
87 /* Everything below this point is not included by assembler (.S) files. */
88 #ifndef _LOCORE
89
90 #include <machine/hypervisor-ifs/hypervisor-if.h>
91
92 /* some function prototypes */
93 void trap_init(void);
94 void dump_regs(struct trapframe *regs);
95
96
97 /*
98 * STI/CLI equivalents. These basically set and clear the virtual
99 * event_enable flag in teh shared_info structure. Note that when
100 * the enable bit is set, there may be pending events to be handled.
101 * We may therefore call into do_hypervisor_callback() directly.
102 */
103 #define unlikely(x) __builtin_expect((x),0)
104 #define __save_flags(x) \
105 do { \
106 (x) = test_bit(EVENTS_MASTER_ENABLE_BIT, \
107 &HYPERVISOR_shared_info->events_mask); \
108 barrier(); \
109 } while (0)
110
111 #define __restore_flags(x) \
112 do { \
113 shared_info_t *_shared = HYPERVISOR_shared_info; \
114 if (x) set_bit(EVENTS_MASTER_ENABLE_BIT, &_shared->events_mask); \
115 barrier(); \
116 } while (0)
117 /* if ( unlikely(_shared->events) && (x) ) do_hypervisor_callback(NULL); \ */
118
119 #define __cli() \
120 do { \
121 clear_bit(EVENTS_MASTER_ENABLE_BIT, &HYPERVISOR_shared_info->events_mask);\
122 barrier(); \
123 } while (0)
124
125 #define __sti() \
126 do { \
127 shared_info_t *_shared = HYPERVISOR_shared_info; \
128 set_bit(EVENTS_MASTER_ENABLE_BIT, &_shared->events_mask); \
129 barrier(); \
130 } while (0)
131 /* if ( unlikely(_shared->events) ) do_hypervisor_callback(NULL); \ */
132 #define cli() __cli()
133 #define sti() __sti()
134 #define save_flags(x) __save_flags(x)
135 #define restore_flags(x) __restore_flags(x)
136 #define save_and_cli(x) __save_and_cli(x)
137 #define save_and_sti(x) __save_and_sti(x)
138
139
140
141 /* This is a barrier for the compiler only, NOT the processor! */
142 #define barrier() __asm__ __volatile__("": : :"memory")
143
144 #define __LOCK_PREFIX ""
145 #define __LOCK ""
146 #define __ADDR (*(volatile long *) addr)
147 /*
148 * Make sure gcc doesn't try to be clever and move things around
149 * on us. We need to use _exactly_ the address the user gave us,
150 * not some alias that contains the same information.
151 */
152 typedef struct { volatile int counter; } atomic_t;
153
154
155 #define xchg(ptr,v) \
156 ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
157 struct __xchg_dummy { unsigned long a[100]; };
158 #define __xg(x) ((struct __xchg_dummy *)(x))
159 static inline unsigned long __xchg(unsigned long x, volatile void * ptr,
160 int size)
161 {
162 switch (size) {
163 case 1:
164 __asm__ __volatile__("xchgb %b0,%1"
165 :"=q" (x)
166 :"m" (*__xg(ptr)), "0" (x)
167 :"memory");
168 break;
169 case 2:
170 __asm__ __volatile__("xchgw %w0,%1"
171 :"=r" (x)
172 :"m" (*__xg(ptr)), "0" (x)
173 :"memory");
174 break;
175 case 4:
176 __asm__ __volatile__("xchgl %0,%1"
177 :"=r" (x)
178 :"m" (*__xg(ptr)), "0" (x)
179 :"memory");
180 break;
181 }
182 return x;
183 }
184
185 /**
186 * test_and_clear_bit - Clear a bit and return its old value
187 * @nr: Bit to set
188 * @addr: Address to count from
189 *
190 * This operation is atomic and cannot be reordered.
191 * It also implies a memory barrier.
192 */
193 static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
194 {
195 int oldbit;
196
197 __asm__ __volatile__( __LOCK_PREFIX
198 "btrl %2,%1\n\tsbbl %0,%0"
199 :"=r" (oldbit),"=m" (__ADDR)
200 :"Ir" (nr) : "memory");
201 return oldbit;
202 }
203
204 static __inline__ int constant_test_bit(int nr, const volatile void * addr)
205 {
206 return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
207 }
208
209 static __inline__ int variable_test_bit(int nr, volatile void * addr)
210 {
211 int oldbit;
212
213 __asm__ __volatile__(
214 "btl %2,%1\n\tsbbl %0,%0"
215 :"=r" (oldbit)
216 :"m" (__ADDR),"Ir" (nr));
217 return oldbit;
218 }
219
220 #define test_bit(nr,addr) \
221 (__builtin_constant_p(nr) ? \
222 constant_test_bit((nr),(addr)) : \
223 variable_test_bit((nr),(addr)))
224
225
226 /**
227 * set_bit - Atomically set a bit in memory
228 * @nr: the bit to set
229 * @addr: the address to start counting from
230 *
231 * This function is atomic and may not be reordered. See __set_bit()
232 * if you do not require the atomic guarantees.
233 * Note that @nr may be almost arbitrarily large; this function is not
234 * restricted to acting on a single-word quantity.
235 */
236 static __inline__ void set_bit(int nr, volatile void * addr)
237 {
238 __asm__ __volatile__( __LOCK_PREFIX
239 "btsl %1,%0"
240 :"=m" (__ADDR)
241 :"Ir" (nr));
242 }
243
244 /**
245 * clear_bit - Clears a bit in memory
246 * @nr: Bit to clear
247 * @addr: Address to start counting from
248 *
249 * clear_bit() is atomic and may not be reordered. However, it does
250 * not contain a memory barrier, so if it is used for locking purposes,
251 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
252 * in order to ensure changes are visible on other processors.
253 */
254 static __inline__ void clear_bit(int nr, volatile void * addr)
255 {
256 __asm__ __volatile__( __LOCK_PREFIX
257 "btrl %1,%0"
258 :"=m" (__ADDR)
259 :"Ir" (nr));
260 }
261
262 /**
263 * atomic_inc - increment atomic variable
264 * @v: pointer of type atomic_t
265 *
266 * Atomically increments @v by 1. Note that the guaranteed
267 * useful range of an atomic_t is only 24 bits.
268 */
269 static __inline__ void atomic_inc(atomic_t *v)
270 {
271 __asm__ __volatile__(
272 __LOCK "incl %0"
273 :"=m" (v->counter)
274 :"m" (v->counter));
275 }
276
277
278 #define rdtscll(val) \
279 __asm__ __volatile__("rdtsc" : "=A" (val))
280
281
282 #endif /* !__ASSEMBLY__ */
283
284 #endif /* _OS_H_ */
285