Home | History | Annotate | Line # | Download | only in include
xen.h revision 1.1
      1 /*	$NetBSD: xen.h,v 1.1 2004/03/11 21:44:08 cl Exp $	*/
      2 
      3 /*
      4  *
      5  * Copyright (c) 2003, 2004 Keir Fraser (on behalf of the Xen team)
      6  * All rights reserved.
      7  *
      8  * Permission is hereby granted, free of charge, to any person obtaining a copy
      9  * of this software and associated documentation files (the "Software"), to
     10  * deal in the Software without restriction, including without limitation the
     11  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
     12  * sell copies of the Software, and to permit persons to whom the Software is
     13  * furnished to do so, subject to the following conditions:
     14  *
     15  * The above copyright notice and this permission notice shall be included in
     16  * all copies or substantial portions of the Software.
     17  *
     18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
     21  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     22  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     23  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
     24  * DEALINGS IN THE SOFTWARE.
     25  */
     26 
     27 
     28 #ifndef _XEN_H
     29 #define _XEN_H
     30 
     31 #ifndef _LOCORE
     32 
     33 typedef uint16_t u16;
     34 typedef uint32_t u32;
     35 typedef uint64_t u64;
     36 
     37 #ifdef XENDEBUG
     38 void printk(const char *, ...);
     39 #endif
     40 
     41 void xencn_attach(void);
     42 
     43 #endif
     44 
     45 #define hypervisor_asm_ack(num) \
     46 	movl	HYPERVISOR_shared_info,%eax		;\
     47 	lock						;\
     48 	btsl	$num,EVENTS_MASK(%eax)
     49 
     50 #endif
     51 
     52 /******************************************************************************
     53  * os.h
     54  *
     55  * random collection of macros and definition
     56  */
     57 
     58 #ifndef _OS_H_
     59 #define _OS_H_
     60 
     61 /*
     62  * These are the segment descriptors provided for us by the hypervisor.
     63  * For now, these are hardwired -- guest OSes cannot update the GDT
     64  * or LDT.
     65  *
     66  * It shouldn't be hard to support descriptor-table frobbing -- let me
     67  * know if the BSD or XP ports require flexibility here.
     68  */
     69 
     70 
     71 /*
     72  * these are also defined in hypervisor-if.h but can't be pulled in as
     73  * they are used in start of day assembly. Need to clean up the .h files
     74  * a bit more...
     75  */
     76 
     77 #ifndef FLAT_RING1_CS
     78 #define FLAT_RING1_CS		0x0819
     79 #define FLAT_RING1_DS		0x0821
     80 #define FLAT_RING3_CS		0x082b
     81 #define FLAT_RING3_DS		0x0833
     82 #endif
     83 
     84 #define __KERNEL_CS        FLAT_RING1_CS
     85 #define __KERNEL_DS        FLAT_RING1_DS
     86 
     87 /* Everything below this point is not included by assembler (.S) files. */
     88 #ifndef _LOCORE
     89 
     90 #include <machine/hypervisor-ifs/hypervisor-if.h>
     91 
     92 
     93 /* this struct defines the way the registers are stored on the
     94    stack during an exception or interrupt. */
     95 struct pt_regs {
     96 	long ebx;
     97 	long ecx;
     98 	long edx;
     99 	long esi;
    100 	long edi;
    101 	long ebp;
    102 	long eax;
    103 	int  xds;
    104 	int  xes;
    105 	long orig_eax;
    106 	long eip;
    107 	int  xcs;
    108 	long eflags;
    109 	long esp;
    110 	int  xss;
    111 };
    112 
    113 /* some function prototypes */
    114 void trap_init(void);
    115 void dump_regs(struct pt_regs *regs);
    116 
    117 
    118 /*
    119  * STI/CLI equivalents. These basically set and clear the virtual
    120  * event_enable flag in teh shared_info structure. Note that when
    121  * the enable bit is set, there may be pending events to be handled.
    122  * We may therefore call into do_hypervisor_callback() directly.
    123  */
    124 #define unlikely(x)  __builtin_expect((x),0)
    125 #define __save_flags(x)                                                       \
    126 do {                                                                          \
    127     (x) = test_bit(EVENTS_MASTER_ENABLE_BIT,                                  \
    128                    &HYPERVISOR_shared_info->events_mask);                     \
    129     barrier();                                                                \
    130 } while (0)
    131 
    132 #define __restore_flags(x)                                                    \
    133 do {                                                                          \
    134     shared_info_t *_shared = HYPERVISOR_shared_info;                          \
    135     if (x) set_bit(EVENTS_MASTER_ENABLE_BIT, &_shared->events_mask);          \
    136     barrier();                                                                \
    137 } while (0)
    138 /*     if ( unlikely(_shared->events) && (x) ) do_hypervisor_callback(NULL);     \ */
    139 
    140 #define __cli()                                                               \
    141 do {                                                                          \
    142     clear_bit(EVENTS_MASTER_ENABLE_BIT, &HYPERVISOR_shared_info->events_mask);\
    143     barrier();                                                                \
    144 } while (0)
    145 
    146 #define __sti()                                                               \
    147 do {                                                                          \
    148     shared_info_t *_shared = HYPERVISOR_shared_info;                          \
    149     set_bit(EVENTS_MASTER_ENABLE_BIT, &_shared->events_mask);                 \
    150     barrier();                                                                \
    151 } while (0)
    152 /*     if ( unlikely(_shared->events) ) do_hypervisor_callback(NULL);            \ */
    153 #define cli() __cli()
    154 #define sti() __sti()
    155 #define save_flags(x) __save_flags(x)
    156 #define restore_flags(x) __restore_flags(x)
    157 #define save_and_cli(x) __save_and_cli(x)
    158 #define save_and_sti(x) __save_and_sti(x)
    159 
    160 
    161 
    162 /* This is a barrier for the compiler only, NOT the processor! */
    163 #define barrier() __asm__ __volatile__("": : :"memory")
    164 
    165 #define __LOCK_PREFIX ""
    166 #define __LOCK ""
    167 #define __ADDR (*(volatile long *) addr)
    168 /*
    169  * Make sure gcc doesn't try to be clever and move things around
    170  * on us. We need to use _exactly_ the address the user gave us,
    171  * not some alias that contains the same information.
    172  */
    173 typedef struct { volatile int counter; } atomic_t;
    174 
    175 
    176 #define xchg(ptr,v) \
    177         ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
    178 struct __xchg_dummy { unsigned long a[100]; };
    179 #define __xg(x) ((struct __xchg_dummy *)(x))
    180 static inline unsigned long __xchg(unsigned long x, volatile void * ptr,
    181                                    int size)
    182 {
    183     switch (size) {
    184     case 1:
    185         __asm__ __volatile__("xchgb %b0,%1"
    186                              :"=q" (x)
    187                              :"m" (*__xg(ptr)), "0" (x)
    188                              :"memory");
    189         break;
    190     case 2:
    191         __asm__ __volatile__("xchgw %w0,%1"
    192                              :"=r" (x)
    193                              :"m" (*__xg(ptr)), "0" (x)
    194                              :"memory");
    195         break;
    196     case 4:
    197         __asm__ __volatile__("xchgl %0,%1"
    198                              :"=r" (x)
    199                              :"m" (*__xg(ptr)), "0" (x)
    200                              :"memory");
    201         break;
    202     }
    203     return x;
    204 }
    205 
    206 /**
    207  * test_and_clear_bit - Clear a bit and return its old value
    208  * @nr: Bit to set
    209  * @addr: Address to count from
    210  *
    211  * This operation is atomic and cannot be reordered.
    212  * It also implies a memory barrier.
    213  */
    214 static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
    215 {
    216         int oldbit;
    217 
    218         __asm__ __volatile__( __LOCK_PREFIX
    219                 "btrl %2,%1\n\tsbbl %0,%0"
    220                 :"=r" (oldbit),"=m" (__ADDR)
    221                 :"Ir" (nr) : "memory");
    222         return oldbit;
    223 }
    224 
    225 static __inline__ int constant_test_bit(int nr, const volatile void * addr)
    226 {
    227     return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
    228 }
    229 
    230 static __inline__ int variable_test_bit(int nr, volatile void * addr)
    231 {
    232     int oldbit;
    233 
    234     __asm__ __volatile__(
    235         "btl %2,%1\n\tsbbl %0,%0"
    236         :"=r" (oldbit)
    237         :"m" (__ADDR),"Ir" (nr));
    238     return oldbit;
    239 }
    240 
    241 #define test_bit(nr,addr) \
    242 (__builtin_constant_p(nr) ? \
    243  constant_test_bit((nr),(addr)) : \
    244  variable_test_bit((nr),(addr)))
    245 
    246 
    247 /**
    248  * set_bit - Atomically set a bit in memory
    249  * @nr: the bit to set
    250  * @addr: the address to start counting from
    251  *
    252  * This function is atomic and may not be reordered.  See __set_bit()
    253  * if you do not require the atomic guarantees.
    254  * Note that @nr may be almost arbitrarily large; this function is not
    255  * restricted to acting on a single-word quantity.
    256  */
    257 static __inline__ void set_bit(int nr, volatile void * addr)
    258 {
    259         __asm__ __volatile__( __LOCK_PREFIX
    260                 "btsl %1,%0"
    261                 :"=m" (__ADDR)
    262                 :"Ir" (nr));
    263 }
    264 
    265 /**
    266  * clear_bit - Clears a bit in memory
    267  * @nr: Bit to clear
    268  * @addr: Address to start counting from
    269  *
    270  * clear_bit() is atomic and may not be reordered.  However, it does
    271  * not contain a memory barrier, so if it is used for locking purposes,
    272  * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
    273  * in order to ensure changes are visible on other processors.
    274  */
    275 static __inline__ void clear_bit(int nr, volatile void * addr)
    276 {
    277         __asm__ __volatile__( __LOCK_PREFIX
    278                 "btrl %1,%0"
    279                 :"=m" (__ADDR)
    280                 :"Ir" (nr));
    281 }
    282 
    283 /**
    284  * atomic_inc - increment atomic variable
    285  * @v: pointer of type atomic_t
    286  *
    287  * Atomically increments @v by 1.  Note that the guaranteed
    288  * useful range of an atomic_t is only 24 bits.
    289  */
    290 static __inline__ void atomic_inc(atomic_t *v)
    291 {
    292         __asm__ __volatile__(
    293                 __LOCK "incl %0"
    294                 :"=m" (v->counter)
    295                 :"m" (v->counter));
    296 }
    297 
    298 
    299 #define rdtscll(val) \
    300      __asm__ __volatile__("rdtsc" : "=A" (val))
    301 
    302 
    303 #endif /* !__ASSEMBLY__ */
    304 
    305 #endif /* _OS_H_ */
    306