Home | History | Annotate | Line # | Download | only in include
xen.h revision 1.6
      1 /*	$NetBSD: xen.h,v 1.6 2004/05/07 13:56:48 cl Exp $	*/
      2 
      3 /*
      4  *
      5  * Copyright (c) 2003, 2004 Keir Fraser (on behalf of the Xen team)
      6  * All rights reserved.
      7  *
      8  * Permission is hereby granted, free of charge, to any person obtaining a copy
      9  * of this software and associated documentation files (the "Software"), to
     10  * deal in the Software without restriction, including without limitation the
     11  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
     12  * sell copies of the Software, and to permit persons to whom the Software is
     13  * furnished to do so, subject to the following conditions:
     14  *
     15  * The above copyright notice and this permission notice shall be included in
     16  * all copies or substantial portions of the Software.
     17  *
     18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
     21  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     22  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     23  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
     24  * DEALINGS IN THE SOFTWARE.
     25  */
     26 
     27 
     28 #ifndef _XEN_H
     29 #define _XEN_H
     30 
     31 #ifndef _LOCORE
     32 
     33 struct xen_netinfo {
     34 	uint32_t xi_ifno;
     35 	char *xi_root;
     36 	uint32_t xi_ip[5];
     37 };
     38 
     39 union xen_cmdline_parseinfo {
     40 	char			xcp_bootdev[16]; /* sizeof(dv_xname) */
     41 	struct xen_netinfo	xcp_netinfo;
     42 	char			xcp_console[16];
     43 };
     44 
     45 #define	XEN_PARSE_BOOTDEV	0
     46 #define	XEN_PARSE_NETINFO	1
     47 #define	XEN_PARSE_CONSOLE	2
     48 
     49 void	xen_parse_cmdline(int, union xen_cmdline_parseinfo *);
     50 
     51 void	xenconscn_attach(void);
     52 
     53 typedef uint16_t u16;
     54 typedef uint32_t u32;
     55 typedef uint64_t u64;
     56 
     57 #ifdef XENDEBUG
     58 void printk(const char *, ...);
     59 void vprintk(const char *, va_list);
     60 #endif
     61 
     62 #endif
     63 
     64 #define hypervisor_asm_ack(num) \
     65 	movl	HYPERVISOR_shared_info,%eax		;\
     66 	lock						;\
     67 	btsl	$num,EVENTS_MASK(%eax)
     68 
     69 #endif /* _XEN_H */
     70 
     71 /******************************************************************************
     72  * os.h
     73  *
     74  * random collection of macros and definition
     75  */
     76 
     77 #ifndef _OS_H_
     78 #define _OS_H_
     79 
     80 /*
     81  * These are the segment descriptors provided for us by the hypervisor.
     82  * For now, these are hardwired -- guest OSes cannot update the GDT
     83  * or LDT.
     84  *
     85  * It shouldn't be hard to support descriptor-table frobbing -- let me
     86  * know if the BSD or XP ports require flexibility here.
     87  */
     88 
     89 
     90 /*
     91  * these are also defined in hypervisor-if.h but can't be pulled in as
     92  * they are used in start of day assembly. Need to clean up the .h files
     93  * a bit more...
     94  */
     95 
     96 #ifndef FLAT_RING1_CS
     97 #define FLAT_RING1_CS		0x0819
     98 #define FLAT_RING1_DS		0x0821
     99 #define FLAT_RING3_CS		0x082b
    100 #define FLAT_RING3_DS		0x0833
    101 #endif
    102 
    103 #define __KERNEL_CS        FLAT_RING1_CS
    104 #define __KERNEL_DS        FLAT_RING1_DS
    105 
    106 /* Everything below this point is not included by assembler (.S) files. */
    107 #ifndef _LOCORE
    108 
    109 #include <machine/hypervisor-ifs/hypervisor-if.h>
    110 
    111 /* some function prototypes */
    112 void trap_init(void);
    113 
    114 
    115 /*
    116  * STI/CLI equivalents. These basically set and clear the virtual
    117  * event_enable flag in teh shared_info structure. Note that when
    118  * the enable bit is set, there may be pending events to be handled.
    119  * We may therefore call into do_hypervisor_callback() directly.
    120  */
    121 #define unlikely(x)  __builtin_expect((x),0)
    122 #define __save_flags(x)                                                       \
    123 do {                                                                          \
    124     (x) = test_bit(EVENTS_MASTER_ENABLE_BIT,                                  \
    125                    &HYPERVISOR_shared_info->events_mask);                     \
    126     barrier();                                                                \
    127 } while (0)
    128 
    129 #define __restore_flags(x)                                                    \
    130 do {                                                                          \
    131     shared_info_t *_shared = HYPERVISOR_shared_info;                          \
    132     if (x) set_bit(EVENTS_MASTER_ENABLE_BIT, &_shared->events_mask);          \
    133     barrier();                                                                \
    134 } while (0)
    135 /*     if ( unlikely(_shared->events) && (x) ) do_hypervisor_callback(NULL);     \ */
    136 
    137 #define __cli()                                                               \
    138 do {                                                                          \
    139     clear_bit(EVENTS_MASTER_ENABLE_BIT, &HYPERVISOR_shared_info->events_mask);\
    140     barrier();                                                                \
    141 } while (0)
    142 
    143 #define __sti()                                                               \
    144 do {                                                                          \
    145     shared_info_t *_shared = HYPERVISOR_shared_info;                          \
    146     set_bit(EVENTS_MASTER_ENABLE_BIT, &_shared->events_mask);                 \
    147     barrier();                                                                \
    148 } while (0)
    149 /*     if ( unlikely(_shared->events) ) do_hypervisor_callback(NULL);            \ */
    150 #define cli() __cli()
    151 #define sti() __sti()
    152 #define save_flags(x) __save_flags(x)
    153 #define restore_flags(x) __restore_flags(x)
    154 #define save_and_cli(x) __save_and_cli(x)
    155 #define save_and_sti(x) __save_and_sti(x)
    156 
    157 
    158 
    159 /* This is a barrier for the compiler only, NOT the processor! */
    160 #define barrier() __asm__ __volatile__("": : :"memory")
    161 
    162 #define __LOCK_PREFIX ""
    163 #define __LOCK ""
    164 #define __ADDR (*(volatile long *) addr)
    165 /*
    166  * Make sure gcc doesn't try to be clever and move things around
    167  * on us. We need to use _exactly_ the address the user gave us,
    168  * not some alias that contains the same information.
    169  */
    170 typedef struct { volatile int counter; } atomic_t;
    171 
    172 
    173 #define xchg(ptr,v) \
    174         ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
    175 struct __xchg_dummy { unsigned long a[100]; };
    176 #define __xg(x) ((struct __xchg_dummy *)(x))
    177 static inline unsigned long __xchg(unsigned long x, volatile void * ptr,
    178                                    int size)
    179 {
    180     switch (size) {
    181     case 1:
    182         __asm__ __volatile__("xchgb %b0,%1"
    183                              :"=q" (x)
    184                              :"m" (*__xg(ptr)), "0" (x)
    185                              :"memory");
    186         break;
    187     case 2:
    188         __asm__ __volatile__("xchgw %w0,%1"
    189                              :"=r" (x)
    190                              :"m" (*__xg(ptr)), "0" (x)
    191                              :"memory");
    192         break;
    193     case 4:
    194         __asm__ __volatile__("xchgl %0,%1"
    195                              :"=r" (x)
    196                              :"m" (*__xg(ptr)), "0" (x)
    197                              :"memory");
    198         break;
    199     }
    200     return x;
    201 }
    202 
    203 /**
    204  * test_and_clear_bit - Clear a bit and return its old value
    205  * @nr: Bit to set
    206  * @addr: Address to count from
    207  *
    208  * This operation is atomic and cannot be reordered.
    209  * It also implies a memory barrier.
    210  */
    211 static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
    212 {
    213         int oldbit;
    214 
    215         __asm__ __volatile__( __LOCK_PREFIX
    216                 "btrl %2,%1\n\tsbbl %0,%0"
    217                 :"=r" (oldbit),"=m" (__ADDR)
    218                 :"Ir" (nr) : "memory");
    219         return oldbit;
    220 }
    221 
    222 static __inline__ int constant_test_bit(int nr, const volatile void * addr)
    223 {
    224     return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
    225 }
    226 
    227 static __inline__ int variable_test_bit(int nr, volatile void * addr)
    228 {
    229     int oldbit;
    230 
    231     __asm__ __volatile__(
    232         "btl %2,%1\n\tsbbl %0,%0"
    233         :"=r" (oldbit)
    234         :"m" (__ADDR),"Ir" (nr));
    235     return oldbit;
    236 }
    237 
    238 #define test_bit(nr,addr) \
    239 (__builtin_constant_p(nr) ? \
    240  constant_test_bit((nr),(addr)) : \
    241  variable_test_bit((nr),(addr)))
    242 
    243 
    244 /**
    245  * set_bit - Atomically set a bit in memory
    246  * @nr: the bit to set
    247  * @addr: the address to start counting from
    248  *
    249  * This function is atomic and may not be reordered.  See __set_bit()
    250  * if you do not require the atomic guarantees.
    251  * Note that @nr may be almost arbitrarily large; this function is not
    252  * restricted to acting on a single-word quantity.
    253  */
    254 static __inline__ void set_bit(int nr, volatile void * addr)
    255 {
    256         __asm__ __volatile__( __LOCK_PREFIX
    257                 "btsl %1,%0"
    258                 :"=m" (__ADDR)
    259                 :"Ir" (nr));
    260 }
    261 
    262 /**
    263  * clear_bit - Clears a bit in memory
    264  * @nr: Bit to clear
    265  * @addr: Address to start counting from
    266  *
    267  * clear_bit() is atomic and may not be reordered.  However, it does
    268  * not contain a memory barrier, so if it is used for locking purposes,
    269  * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
    270  * in order to ensure changes are visible on other processors.
    271  */
    272 static __inline__ void clear_bit(int nr, volatile void * addr)
    273 {
    274         __asm__ __volatile__( __LOCK_PREFIX
    275                 "btrl %1,%0"
    276                 :"=m" (__ADDR)
    277                 :"Ir" (nr));
    278 }
    279 
    280 /**
    281  * atomic_inc - increment atomic variable
    282  * @v: pointer of type atomic_t
    283  *
    284  * Atomically increments @v by 1.  Note that the guaranteed
    285  * useful range of an atomic_t is only 24 bits.
    286  */
    287 static __inline__ void atomic_inc(atomic_t *v)
    288 {
    289         __asm__ __volatile__(
    290                 __LOCK "incl %0"
    291                 :"=m" (v->counter)
    292                 :"m" (v->counter));
    293 }
    294 
    295 
    296 #define rdtscll(val) \
    297      __asm__ __volatile__("rdtsc" : "=A" (val))
    298 
    299 
    300 #endif /* !__ASSEMBLY__ */
    301 
    302 #endif /* _OS_H_ */
    303