Home | History | Annotate | Line # | Download | only in include
xen.h revision 1.30.12.1
      1  1.30.12.1      matt /*	$NetBSD: xen.h,v 1.30.12.1 2010/04/21 00:33:44 matt Exp $	*/
      2        1.1        cl 
      3        1.1        cl /*
      4        1.1        cl  *
      5        1.1        cl  * Copyright (c) 2003, 2004 Keir Fraser (on behalf of the Xen team)
      6        1.1        cl  * All rights reserved.
      7        1.1        cl  *
      8        1.1        cl  * Permission is hereby granted, free of charge, to any person obtaining a copy
      9        1.1        cl  * of this software and associated documentation files (the "Software"), to
     10        1.1        cl  * deal in the Software without restriction, including without limitation the
     11        1.1        cl  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
     12        1.1        cl  * sell copies of the Software, and to permit persons to whom the Software is
     13        1.1        cl  * furnished to do so, subject to the following conditions:
     14        1.1        cl  *
     15        1.1        cl  * The above copyright notice and this permission notice shall be included in
     16        1.1        cl  * all copies or substantial portions of the Software.
     17        1.1        cl  *
     18        1.1        cl  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     19        1.1        cl  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     20        1.1        cl  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
     21        1.1        cl  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     22        1.1        cl  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     23        1.1        cl  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
     24        1.1        cl  * DEALINGS IN THE SOFTWARE.
     25        1.1        cl  */
     26        1.1        cl 
     27        1.1        cl 
     28        1.1        cl #ifndef _XEN_H
     29        1.1        cl #define _XEN_H
     30       1.26    bouyer #include "opt_xen.h"
     31        1.1        cl 
     32       1.19    bouyer 
     33        1.1        cl #ifndef _LOCORE
     34        1.1        cl 
     35       1.26    bouyer #include <machine/cpufunc.h>
     36       1.26    bouyer 
     37        1.5        cl struct xen_netinfo {
     38        1.5        cl 	uint32_t xi_ifno;
     39        1.5        cl 	char *xi_root;
     40        1.5        cl 	uint32_t xi_ip[5];
     41        1.5        cl };
     42        1.5        cl 
     43        1.4        cl union xen_cmdline_parseinfo {
     44        1.4        cl 	char			xcp_bootdev[16]; /* sizeof(dv_xname) */
     45        1.4        cl 	struct xen_netinfo	xcp_netinfo;
     46        1.4        cl 	char			xcp_console[16];
     47  1.30.12.1      matt 	char			xcp_pcidevs[64];
     48        1.4        cl };
     49        1.4        cl 
     50        1.4        cl #define	XEN_PARSE_BOOTDEV	0
     51        1.4        cl #define	XEN_PARSE_NETINFO	1
     52        1.4        cl #define	XEN_PARSE_CONSOLE	2
     53       1.15    bouyer #define	XEN_PARSE_BOOTFLAGS	3
     54  1.30.12.1      matt #define	XEN_PARSE_PCIBACK	4
     55        1.4        cl 
     56        1.4        cl void	xen_parse_cmdline(int, union xen_cmdline_parseinfo *);
     57        1.4        cl 
     58        1.3        cl void	xenconscn_attach(void);
     59        1.3        cl 
     60        1.7        cl void	xenprivcmd_init(void);
     61       1.10    bouyer 
     62       1.10    bouyer void	xbdback_init(void);
     63       1.10    bouyer void	xennetback_init(void);
     64       1.10    bouyer void	xen_shm_init(void);
     65       1.10    bouyer 
     66       1.10    bouyer void	xenevt_event(int);
     67       1.27    bouyer void	xenevt_setipending(int, int);
     68       1.12      yamt void	xenevt_notify(void);
     69       1.10    bouyer 
     70       1.10    bouyer void	idle_block(void);
     71        1.7        cl 
     72       1.14      yamt #if defined(XENDEBUG) || 1 /* XXX */
     73        1.1        cl void printk(const char *, ...);
     74        1.9  christos void vprintk(const char *, _BSD_VA_LIST_);
     75        1.1        cl #endif
     76        1.1        cl 
     77        1.1        cl #endif
     78        1.1        cl 
     79        1.3        cl #endif /* _XEN_H */
     80        1.1        cl 
     81        1.1        cl /******************************************************************************
     82        1.1        cl  * os.h
     83        1.1        cl  *
     84        1.1        cl  * random collection of macros and definition
     85        1.1        cl  */
     86        1.1        cl 
     87        1.1        cl #ifndef _OS_H_
     88        1.1        cl #define _OS_H_
     89        1.1        cl 
     90        1.1        cl /*
     91        1.1        cl  * These are the segment descriptors provided for us by the hypervisor.
     92        1.1        cl  * For now, these are hardwired -- guest OSes cannot update the GDT
     93        1.1        cl  * or LDT.
     94        1.1        cl  *
     95        1.1        cl  * It shouldn't be hard to support descriptor-table frobbing -- let me
     96        1.1        cl  * know if the BSD or XP ports require flexibility here.
     97        1.1        cl  */
     98        1.1        cl 
     99        1.1        cl 
    100        1.1        cl /*
    101       1.10    bouyer  * these are also defined in xen-public/xen.h but can't be pulled in as
    102        1.1        cl  * they are used in start of day assembly. Need to clean up the .h files
    103        1.1        cl  * a bit more...
    104        1.1        cl  */
    105        1.1        cl 
    106       1.19    bouyer #ifdef XEN3
    107       1.19    bouyer #ifndef FLAT_RING1_CS
    108       1.19    bouyer #define FLAT_RING1_CS 0xe019    /* GDT index 259 */
    109       1.19    bouyer #define FLAT_RING1_DS 0xe021    /* GDT index 260 */
    110       1.19    bouyer #define FLAT_RING1_SS 0xe021    /* GDT index 260 */
    111       1.19    bouyer #define FLAT_RING3_CS 0xe02b    /* GDT index 261 */
    112       1.19    bouyer #define FLAT_RING3_DS 0xe033    /* GDT index 262 */
    113       1.19    bouyer #define FLAT_RING3_SS 0xe033    /* GDT index 262 */
    114       1.19    bouyer #endif
    115       1.19    bouyer #else /* XEN3 */
    116        1.1        cl #ifndef FLAT_RING1_CS
    117        1.1        cl #define FLAT_RING1_CS		0x0819
    118        1.1        cl #define FLAT_RING1_DS		0x0821
    119        1.1        cl #define FLAT_RING3_CS		0x082b
    120        1.1        cl #define FLAT_RING3_DS		0x0833
    121        1.1        cl #endif
    122       1.19    bouyer #endif /* XEN3 */
    123        1.1        cl 
    124        1.1        cl #define __KERNEL_CS        FLAT_RING1_CS
    125        1.1        cl #define __KERNEL_DS        FLAT_RING1_DS
    126        1.1        cl 
    127        1.1        cl /* Everything below this point is not included by assembler (.S) files. */
    128        1.1        cl #ifndef _LOCORE
    129        1.1        cl 
    130        1.1        cl /* some function prototypes */
    131        1.1        cl void trap_init(void);
    132       1.10    bouyer void xpq_flush_cache(void);
    133        1.1        cl 
    134       1.30    cegger #define xendomain_is_dom0()		(xen_start_info.flags & SIF_INITDOMAIN)
    135       1.30    cegger #define xendomain_is_privileged()	(xen_start_info.flags & SIF_PRIVILEGED)
    136        1.1        cl 
    137        1.1        cl /*
    138        1.1        cl  * STI/CLI equivalents. These basically set and clear the virtual
    139       1.10    bouyer  * event_enable flag in the shared_info structure. Note that when
    140        1.1        cl  * the enable bit is set, there may be pending events to be handled.
    141        1.1        cl  * We may therefore call into do_hypervisor_callback() directly.
    142        1.1        cl  */
    143        1.8        cl 
    144        1.8        cl #define __save_flags(x)							\
    145        1.8        cl do {									\
    146       1.29    cegger 	(x) = curcpu()->ci_vcpu->evtchn_upcall_mask;			\
    147        1.1        cl } while (0)
    148        1.1        cl 
    149        1.8        cl #define __restore_flags(x)						\
    150        1.8        cl do {									\
    151       1.29    cegger 	volatile struct vcpu_info *_vci = curcpu()->ci_vcpu;		\
    152        1.8        cl 	__insn_barrier();						\
    153       1.29    cegger 	if ((_vci->evtchn_upcall_mask = (x)) == 0) {			\
    154       1.29    cegger 		x86_lfence();						\
    155       1.29    cegger 		if (__predict_false(_vci->evtchn_upcall_pending))	\
    156       1.10    bouyer 			hypervisor_force_callback();			\
    157       1.10    bouyer 	}								\
    158        1.1        cl } while (0)
    159        1.1        cl 
    160        1.8        cl #define __cli()								\
    161        1.8        cl do {									\
    162       1.29    cegger 	curcpu()->ci_vcpu->evtchn_upcall_mask = 1;			\
    163       1.29    cegger 	x86_lfence();							\
    164        1.1        cl } while (0)
    165        1.1        cl 
    166        1.8        cl #define __sti()								\
    167        1.8        cl do {									\
    168       1.29    cegger 	volatile struct vcpu_info *_vci = curcpu()->ci_vcpu;		\
    169       1.10    bouyer 	__insn_barrier();						\
    170       1.29    cegger 	_vci->evtchn_upcall_mask = 0;					\
    171       1.11    bouyer 	x86_lfence(); /* unmask then check (avoid races) */		\
    172       1.29    cegger 	if (__predict_false(_vci->evtchn_upcall_pending))		\
    173       1.10    bouyer 		hypervisor_force_callback();				\
    174        1.1        cl } while (0)
    175        1.1        cl 
    176        1.8        cl #define cli()			__cli()
    177        1.8        cl #define sti()			__sti()
    178        1.8        cl #define save_flags(x)		__save_flags(x)
    179        1.8        cl #define restore_flags(x)	__restore_flags(x)
    180       1.10    bouyer #define save_and_cli(x)	do {					\
    181       1.10    bouyer 	__save_flags(x);					\
    182       1.10    bouyer 	__cli();						\
    183       1.10    bouyer } while (/* CONSTCOND */ 0)
    184        1.8        cl #define save_and_sti(x)		__save_and_sti(x)
    185        1.8        cl 
    186       1.11    bouyer /*
    187       1.11    bouyer  * always assume we're on multiprocessor. We don't know how many CPU the
    188       1.11    bouyer  * underlying hardware has.
    189       1.11    bouyer  */
    190        1.8        cl #define __LOCK_PREFIX "lock; "
    191        1.1        cl 
    192       1.19    bouyer #ifdef XEN3
    193       1.28    bouyer #define XATOMIC_T u_long
    194       1.28    bouyer #ifdef __x86_64__
    195       1.28    bouyer #define LONG_SHIFT 6
    196       1.28    bouyer #define LONG_MASK 63
    197       1.28    bouyer #else /* __x86_64__ */
    198       1.28    bouyer #define LONG_SHIFT 5
    199       1.28    bouyer #define LONG_MASK 31
    200       1.28    bouyer #endif /* __x86_64__ */
    201       1.28    bouyer #else /* XEN3 */
    202       1.19    bouyer #define XATOMIC_T uint32_t
    203       1.28    bouyer #define LONG_SHIFT 5
    204       1.28    bouyer #define LONG_MASK 31
    205       1.28    bouyer #endif /* XEN3 */
    206       1.28    bouyer 
    207       1.28    bouyer #define xen_ffs __builtin_ffsl
    208       1.28    bouyer 
    209       1.20     perry static __inline XATOMIC_T
    210       1.19    bouyer xen_atomic_xchg(volatile XATOMIC_T *ptr, unsigned long val)
    211        1.1        cl {
    212        1.8        cl 	unsigned long result;
    213        1.1        cl 
    214       1.25    bouyer 	__asm volatile(__LOCK_PREFIX
    215       1.25    bouyer #ifdef __x86_64__
    216       1.25    bouyer 	    "xchgq %0,%1"
    217       1.25    bouyer #else
    218       1.11    bouyer 	    "xchgl %0,%1"
    219       1.25    bouyer #endif
    220        1.8        cl 	    :"=r" (result)
    221        1.8        cl 	    :"m" (*ptr), "0" (val)
    222        1.8        cl 	    :"memory");
    223        1.1        cl 
    224        1.8        cl 	return result;
    225        1.1        cl }
    226        1.1        cl 
    227       1.21    bouyer static inline uint16_t
    228       1.21    bouyer xen_atomic_cmpxchg16(volatile uint16_t *ptr, uint16_t  val, uint16_t newval)
    229       1.21    bouyer {
    230       1.21    bouyer 	unsigned long result;
    231       1.21    bouyer 
    232       1.21    bouyer         __asm volatile(__LOCK_PREFIX
    233       1.21    bouyer 	    "cmpxchgw %w1,%2"
    234       1.21    bouyer 	    :"=a" (result)
    235       1.21    bouyer 	    :"q"(newval), "m" (*ptr), "0" (val)
    236       1.21    bouyer 	    :"memory");
    237       1.21    bouyer 
    238       1.21    bouyer 	return result;
    239       1.21    bouyer }
    240       1.21    bouyer 
    241       1.19    bouyer static __inline void
    242       1.19    bouyer xen_atomic_setbits_l (volatile XATOMIC_T *ptr, unsigned long bits) {
    243       1.25    bouyer #ifdef __x86_64__
    244       1.25    bouyer 	__asm volatile("lock ; orq %1,%0" :  "=m" (*ptr) : "ir" (bits));
    245       1.25    bouyer #else
    246       1.19    bouyer 	__asm volatile("lock ; orl %1,%0" :  "=m" (*ptr) : "ir" (bits));
    247       1.25    bouyer #endif
    248       1.19    bouyer }
    249       1.19    bouyer 
    250       1.19    bouyer static __inline void
    251       1.19    bouyer xen_atomic_clearbits_l (volatile XATOMIC_T *ptr, unsigned long bits) {
    252       1.25    bouyer #ifdef __x86_64__
    253       1.25    bouyer 	__asm volatile("lock ; andq %1,%0" :  "=m" (*ptr) : "ir" (~bits));
    254       1.25    bouyer #else
    255       1.19    bouyer 	__asm volatile("lock ; andl %1,%0" :  "=m" (*ptr) : "ir" (~bits));
    256       1.25    bouyer #endif
    257       1.19    bouyer }
    258       1.19    bouyer 
    259       1.25    bouyer static __inline XATOMIC_T
    260       1.25    bouyer xen_atomic_test_and_clear_bit(volatile void *ptr, unsigned long bitno)
    261        1.1        cl {
    262       1.25    bouyer 	int result;
    263        1.1        cl 
    264       1.25    bouyer 	__asm volatile(__LOCK_PREFIX
    265       1.25    bouyer #ifdef __x86_64__
    266       1.25    bouyer 	    "btrq %2,%1 ;"
    267       1.25    bouyer 	    "sbbq %0,%0"
    268       1.25    bouyer #else
    269        1.8        cl 	    "btrl %2,%1 ;"
    270        1.8        cl 	    "sbbl %0,%0"
    271       1.25    bouyer #endif
    272       1.19    bouyer 	    :"=r" (result), "=m" (*(volatile XATOMIC_T *)(ptr))
    273        1.8        cl 	    :"Ir" (bitno) : "memory");
    274       1.25    bouyer 	return result;
    275        1.1        cl }
    276        1.1        cl 
    277       1.25    bouyer static __inline XATOMIC_T
    278       1.25    bouyer xen_atomic_test_and_set_bit(volatile void *ptr, unsigned long bitno)
    279       1.10    bouyer {
    280       1.25    bouyer 	long result;
    281       1.10    bouyer 
    282       1.25    bouyer 	__asm volatile(__LOCK_PREFIX
    283       1.25    bouyer #ifdef __x86_64__
    284       1.25    bouyer 	    "btsq %2,%1 ;"
    285       1.25    bouyer 	    "sbbq %0,%0"
    286       1.25    bouyer #else
    287       1.10    bouyer 	    "btsl %2,%1 ;"
    288       1.10    bouyer 	    "sbbl %0,%0"
    289       1.25    bouyer #endif
    290       1.19    bouyer 	    :"=r" (result), "=m" (*(volatile XATOMIC_T *)(ptr))
    291       1.10    bouyer 	    :"Ir" (bitno) : "memory");
    292       1.25    bouyer 	return result;
    293       1.10    bouyer }
    294       1.10    bouyer 
    295       1.20     perry static __inline int
    296       1.25    bouyer xen_constant_test_bit(const volatile void *ptr, unsigned long bitno)
    297        1.1        cl {
    298       1.28    bouyer 	return ((1UL << (bitno & LONG_MASK)) &
    299       1.28    bouyer 	    (((const volatile XATOMIC_T *) ptr)[bitno >> LONG_SHIFT])) != 0;
    300        1.1        cl }
    301        1.1        cl 
    302       1.25    bouyer static __inline XATOMIC_T
    303       1.25    bouyer xen_variable_test_bit(const volatile void *ptr, unsigned long bitno)
    304        1.1        cl {
    305       1.25    bouyer 	long result;
    306        1.8        cl 
    307       1.17     perry 	__asm volatile(
    308       1.25    bouyer #ifdef __x86_64__
    309       1.25    bouyer 		"btq %2,%1 ;"
    310       1.25    bouyer 		"sbbq %0,%0"
    311       1.25    bouyer #else
    312        1.8        cl 		"btl %2,%1 ;"
    313        1.8        cl 		"sbbl %0,%0"
    314       1.25    bouyer #endif
    315        1.8        cl 		:"=r" (result)
    316       1.19    bouyer 		:"m" (*(const volatile XATOMIC_T *)(ptr)), "Ir" (bitno));
    317        1.8        cl 	return result;
    318        1.8        cl }
    319        1.8        cl 
    320       1.19    bouyer #define xen_atomic_test_bit(ptr, bitno) \
    321        1.8        cl 	(__builtin_constant_p(bitno) ? \
    322       1.19    bouyer 	 xen_constant_test_bit((ptr),(bitno)) : \
    323       1.19    bouyer 	 xen_variable_test_bit((ptr),(bitno)))
    324        1.8        cl 
    325       1.20     perry static __inline void
    326       1.25    bouyer xen_atomic_set_bit(volatile void *ptr, unsigned long bitno)
    327        1.8        cl {
    328       1.25    bouyer 	__asm volatile(__LOCK_PREFIX
    329       1.25    bouyer #ifdef __x86_64__
    330       1.25    bouyer 	    "btsq %1,%0"
    331       1.25    bouyer #else
    332        1.8        cl 	    "btsl %1,%0"
    333       1.25    bouyer #endif
    334       1.19    bouyer 	    :"=m" (*(volatile XATOMIC_T *)(ptr))
    335        1.8        cl 	    :"Ir" (bitno));
    336        1.8        cl }
    337        1.8        cl 
    338       1.20     perry static __inline void
    339       1.25    bouyer xen_atomic_clear_bit(volatile void *ptr, unsigned long bitno)
    340        1.8        cl {
    341       1.25    bouyer 	__asm volatile(__LOCK_PREFIX
    342       1.25    bouyer #ifdef __x86_64__
    343       1.25    bouyer 	    "btrq %1,%0"
    344       1.25    bouyer #else
    345        1.8        cl 	    "btrl %1,%0"
    346       1.25    bouyer #endif
    347       1.19    bouyer 	    :"=m" (*(volatile XATOMIC_T *)(ptr))
    348        1.8        cl 	    :"Ir" (bitno));
    349        1.1        cl }
    350        1.1        cl 
    351       1.19    bouyer #undef XATOMIC_T
    352       1.19    bouyer 
    353       1.23        ad void	wbinvd(void);
    354       1.10    bouyer 
    355        1.1        cl #endif /* !__ASSEMBLY__ */
    356        1.1        cl 
    357        1.1        cl #endif /* _OS_H_ */
    358