Home | History | Annotate | Line # | Download | only in include
xen.h revision 1.4
      1 /*	$NetBSD: xen.h,v 1.4 2004/04/24 18:55:02 cl Exp $	*/
      2 
      3 /*
      4  *
      5  * Copyright (c) 2003, 2004 Keir Fraser (on behalf of the Xen team)
      6  * All rights reserved.
      7  *
      8  * Permission is hereby granted, free of charge, to any person obtaining a copy
      9  * of this software and associated documentation files (the "Software"), to
     10  * deal in the Software without restriction, including without limitation the
     11  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
     12  * sell copies of the Software, and to permit persons to whom the Software is
     13  * furnished to do so, subject to the following conditions:
     14  *
     15  * The above copyright notice and this permission notice shall be included in
     16  * all copies or substantial portions of the Software.
     17  *
     18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
     21  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     22  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     23  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
     24  * DEALINGS IN THE SOFTWARE.
     25  */
     26 
     27 
     28 #ifndef _XEN_H
     29 #define _XEN_H
     30 
     31 #ifndef _LOCORE
     32 
     33 union xen_cmdline_parseinfo {
     34 	char			xcp_bootdev[16]; /* sizeof(dv_xname) */
     35 	struct xen_netinfo	xcp_netinfo;
     36 	char			xcp_console[16];
     37 };
     38 
     39 #define	XEN_PARSE_BOOTDEV	0
     40 #define	XEN_PARSE_NETINFO	1
     41 #define	XEN_PARSE_CONSOLE	2
     42 
     43 void	xen_parse_cmdline(int, union xen_cmdline_parseinfo *);
     44 
     45 void	xenconscn_attach(void);
     46 
     47 typedef uint16_t u16;
     48 typedef uint32_t u32;
     49 typedef uint64_t u64;
     50 
     51 #ifdef XENDEBUG
     52 void printk(const char *, ...);
     53 #endif
     54 
     55 #endif
     56 
     57 #define hypervisor_asm_ack(num) \
     58 	movl	HYPERVISOR_shared_info,%eax		;\
     59 	lock						;\
     60 	btsl	$num,EVENTS_MASK(%eax)
     61 
     62 #endif /* _XEN_H */
     63 
     64 /******************************************************************************
     65  * os.h
     66  *
     67  * random collection of macros and definition
     68  */
     69 
     70 #ifndef _OS_H_
     71 #define _OS_H_
     72 
     73 /*
     74  * These are the segment descriptors provided for us by the hypervisor.
     75  * For now, these are hardwired -- guest OSes cannot update the GDT
     76  * or LDT.
     77  *
     78  * It shouldn't be hard to support descriptor-table frobbing -- let me
     79  * know if the BSD or XP ports require flexibility here.
     80  */
     81 
     82 
     83 /*
     84  * these are also defined in hypervisor-if.h but can't be pulled in as
     85  * they are used in start of day assembly. Need to clean up the .h files
     86  * a bit more...
     87  */
     88 
     89 #ifndef FLAT_RING1_CS
     90 #define FLAT_RING1_CS		0x0819
     91 #define FLAT_RING1_DS		0x0821
     92 #define FLAT_RING3_CS		0x082b
     93 #define FLAT_RING3_DS		0x0833
     94 #endif
     95 
     96 #define __KERNEL_CS        FLAT_RING1_CS
     97 #define __KERNEL_DS        FLAT_RING1_DS
     98 
     99 /* Everything below this point is not included by assembler (.S) files. */
    100 #ifndef _LOCORE
    101 
    102 #include <machine/hypervisor-ifs/hypervisor-if.h>
    103 
    104 /* some function prototypes */
    105 void trap_init(void);
    106 void dump_regs(struct trapframe *regs);
    107 
    108 
    109 /*
    110  * STI/CLI equivalents. These basically set and clear the virtual
    111  * event_enable flag in teh shared_info structure. Note that when
    112  * the enable bit is set, there may be pending events to be handled.
    113  * We may therefore call into do_hypervisor_callback() directly.
    114  */
    115 #define unlikely(x)  __builtin_expect((x),0)
    116 #define __save_flags(x)                                                       \
    117 do {                                                                          \
    118     (x) = test_bit(EVENTS_MASTER_ENABLE_BIT,                                  \
    119                    &HYPERVISOR_shared_info->events_mask);                     \
    120     barrier();                                                                \
    121 } while (0)
    122 
    123 #define __restore_flags(x)                                                    \
    124 do {                                                                          \
    125     shared_info_t *_shared = HYPERVISOR_shared_info;                          \
    126     if (x) set_bit(EVENTS_MASTER_ENABLE_BIT, &_shared->events_mask);          \
    127     barrier();                                                                \
    128 } while (0)
    129 /*     if ( unlikely(_shared->events) && (x) ) do_hypervisor_callback(NULL);     \ */
    130 
    131 #define __cli()                                                               \
    132 do {                                                                          \
    133     clear_bit(EVENTS_MASTER_ENABLE_BIT, &HYPERVISOR_shared_info->events_mask);\
    134     barrier();                                                                \
    135 } while (0)
    136 
    137 #define __sti()                                                               \
    138 do {                                                                          \
    139     shared_info_t *_shared = HYPERVISOR_shared_info;                          \
    140     set_bit(EVENTS_MASTER_ENABLE_BIT, &_shared->events_mask);                 \
    141     barrier();                                                                \
    142 } while (0)
    143 /*     if ( unlikely(_shared->events) ) do_hypervisor_callback(NULL);            \ */
    144 #define cli() __cli()
    145 #define sti() __sti()
    146 #define save_flags(x) __save_flags(x)
    147 #define restore_flags(x) __restore_flags(x)
    148 #define save_and_cli(x) __save_and_cli(x)
    149 #define save_and_sti(x) __save_and_sti(x)
    150 
    151 
    152 
    153 /* This is a barrier for the compiler only, NOT the processor! */
    154 #define barrier() __asm__ __volatile__("": : :"memory")
    155 
    156 #define __LOCK_PREFIX ""
    157 #define __LOCK ""
    158 #define __ADDR (*(volatile long *) addr)
    159 /*
    160  * Make sure gcc doesn't try to be clever and move things around
    161  * on us. We need to use _exactly_ the address the user gave us,
    162  * not some alias that contains the same information.
    163  */
    164 typedef struct { volatile int counter; } atomic_t;
    165 
    166 
    167 #define xchg(ptr,v) \
    168         ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
    169 struct __xchg_dummy { unsigned long a[100]; };
    170 #define __xg(x) ((struct __xchg_dummy *)(x))
    171 static inline unsigned long __xchg(unsigned long x, volatile void * ptr,
    172                                    int size)
    173 {
    174     switch (size) {
    175     case 1:
    176         __asm__ __volatile__("xchgb %b0,%1"
    177                              :"=q" (x)
    178                              :"m" (*__xg(ptr)), "0" (x)
    179                              :"memory");
    180         break;
    181     case 2:
    182         __asm__ __volatile__("xchgw %w0,%1"
    183                              :"=r" (x)
    184                              :"m" (*__xg(ptr)), "0" (x)
    185                              :"memory");
    186         break;
    187     case 4:
    188         __asm__ __volatile__("xchgl %0,%1"
    189                              :"=r" (x)
    190                              :"m" (*__xg(ptr)), "0" (x)
    191                              :"memory");
    192         break;
    193     }
    194     return x;
    195 }
    196 
    197 /**
    198  * test_and_clear_bit - Clear a bit and return its old value
    199  * @nr: Bit to set
    200  * @addr: Address to count from
    201  *
    202  * This operation is atomic and cannot be reordered.
    203  * It also implies a memory barrier.
    204  */
    205 static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
    206 {
    207         int oldbit;
    208 
    209         __asm__ __volatile__( __LOCK_PREFIX
    210                 "btrl %2,%1\n\tsbbl %0,%0"
    211                 :"=r" (oldbit),"=m" (__ADDR)
    212                 :"Ir" (nr) : "memory");
    213         return oldbit;
    214 }
    215 
    216 static __inline__ int constant_test_bit(int nr, const volatile void * addr)
    217 {
    218     return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
    219 }
    220 
    221 static __inline__ int variable_test_bit(int nr, volatile void * addr)
    222 {
    223     int oldbit;
    224 
    225     __asm__ __volatile__(
    226         "btl %2,%1\n\tsbbl %0,%0"
    227         :"=r" (oldbit)
    228         :"m" (__ADDR),"Ir" (nr));
    229     return oldbit;
    230 }
    231 
    232 #define test_bit(nr,addr) \
    233 (__builtin_constant_p(nr) ? \
    234  constant_test_bit((nr),(addr)) : \
    235  variable_test_bit((nr),(addr)))
    236 
    237 
    238 /**
    239  * set_bit - Atomically set a bit in memory
    240  * @nr: the bit to set
    241  * @addr: the address to start counting from
    242  *
    243  * This function is atomic and may not be reordered.  See __set_bit()
    244  * if you do not require the atomic guarantees.
    245  * Note that @nr may be almost arbitrarily large; this function is not
    246  * restricted to acting on a single-word quantity.
    247  */
    248 static __inline__ void set_bit(int nr, volatile void * addr)
    249 {
    250         __asm__ __volatile__( __LOCK_PREFIX
    251                 "btsl %1,%0"
    252                 :"=m" (__ADDR)
    253                 :"Ir" (nr));
    254 }
    255 
    256 /**
    257  * clear_bit - Clears a bit in memory
    258  * @nr: Bit to clear
    259  * @addr: Address to start counting from
    260  *
    261  * clear_bit() is atomic and may not be reordered.  However, it does
    262  * not contain a memory barrier, so if it is used for locking purposes,
    263  * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
    264  * in order to ensure changes are visible on other processors.
    265  */
    266 static __inline__ void clear_bit(int nr, volatile void * addr)
    267 {
    268         __asm__ __volatile__( __LOCK_PREFIX
    269                 "btrl %1,%0"
    270                 :"=m" (__ADDR)
    271                 :"Ir" (nr));
    272 }
    273 
    274 /**
    275  * atomic_inc - increment atomic variable
    276  * @v: pointer of type atomic_t
    277  *
    278  * Atomically increments @v by 1.  Note that the guaranteed
    279  * useful range of an atomic_t is only 24 bits.
    280  */
    281 static __inline__ void atomic_inc(atomic_t *v)
    282 {
    283         __asm__ __volatile__(
    284                 __LOCK "incl %0"
    285                 :"=m" (v->counter)
    286                 :"m" (v->counter));
    287 }
    288 
    289 
    290 #define rdtscll(val) \
    291      __asm__ __volatile__("rdtsc" : "=A" (val))
    292 
    293 
    294 #endif /* !__ASSEMBLY__ */
    295 
    296 #endif /* _OS_H_ */
    297