Home | History | Annotate | Line # | Download | only in include
xenpmap.h revision 1.16
      1  1.16  bouyer /*	$NetBSD: xenpmap.h,v 1.16 2007/11/22 16:16:58 bouyer Exp $	*/
      2   1.1      cl 
      3   1.1      cl /*
      4   1.1      cl  *
      5   1.1      cl  * Copyright (c) 2004 Christian Limpach.
      6   1.1      cl  * All rights reserved.
      7   1.1      cl  *
      8   1.1      cl  * Redistribution and use in source and binary forms, with or without
      9   1.1      cl  * modification, are permitted provided that the following conditions
     10   1.1      cl  * are met:
     11   1.1      cl  * 1. Redistributions of source code must retain the above copyright
     12   1.1      cl  *    notice, this list of conditions and the following disclaimer.
     13   1.1      cl  * 2. Redistributions in binary form must reproduce the above copyright
     14   1.1      cl  *    notice, this list of conditions and the following disclaimer in the
     15   1.1      cl  *    documentation and/or other materials provided with the distribution.
     16   1.1      cl  * 3. All advertising materials mentioning features or use of this software
     17   1.1      cl  *    must display the following acknowledgement:
     18   1.1      cl  *      This product includes software developed by Christian Limpach.
     19   1.1      cl  * 4. The name of the author may not be used to endorse or promote products
     20   1.1      cl  *    derived from this software without specific prior written permission.
     21   1.1      cl  *
     22   1.1      cl  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     23   1.1      cl  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     24   1.1      cl  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     25   1.1      cl  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     26   1.1      cl  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     27   1.1      cl  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     28   1.1      cl  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     29   1.1      cl  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     30   1.1      cl  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     31   1.1      cl  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     32   1.1      cl  */
     33   1.1      cl 
     34   1.1      cl 
     35   1.1      cl #ifndef _XEN_XENPMAP_H_
     36   1.1      cl #define _XEN_XENPMAP_H_
     37   1.1      cl 
     38   1.4  bouyer #define	INVALID_P2M_ENTRY	(~0UL)
     39   1.4  bouyer 
     40   1.8  bouyer void xpq_queue_machphys_update(paddr_t, paddr_t);
     41   1.1      cl void xpq_queue_invlpg(vaddr_t);
     42   1.1      cl void xpq_queue_pde_update(pd_entry_t *, pd_entry_t);
     43   1.1      cl void xpq_queue_pte_update(pt_entry_t *, pt_entry_t);
     44   1.1      cl void xpq_queue_pt_switch(paddr_t);
     45   1.1      cl void xpq_flush_queue(void);
     46   1.1      cl void xpq_queue_set_ldt(vaddr_t, uint32_t);
     47   1.1      cl void xpq_queue_tlb_flush(void);
     48  1.16  bouyer void xpq_queue_pin_table(paddr_t);
     49   1.1      cl void xpq_queue_unpin_table(paddr_t);
     50   1.5    yamt int  xpq_update_foreign(pt_entry_t *, pt_entry_t, int);
     51   1.1      cl 
     52   1.1      cl extern paddr_t *xpmap_phys_to_machine_mapping;
     53   1.1      cl 
     54   1.1      cl #ifndef XEN
     55   1.1      cl #define	PDE_GET(_pdp)						\
     56   1.1      cl 	*(_pdp)
     57   1.4  bouyer #define PDE_SET(_pdp,_mapdp,_npde)				\
     58   1.4  bouyer 	*(_mapdp) = (_npde)
     59   1.4  bouyer #define PDE_CLEAR(_pdp,_mapdp)					\
     60   1.4  bouyer 	*(_mapdp) = 0
     61   1.4  bouyer #define PTE_SET(_ptp,_maptp,_npte)				\
     62   1.4  bouyer 	*(_maptp) = (_npte)
     63   1.4  bouyer #define PTE_CLEAR(_ptp,_maptp)					\
     64   1.4  bouyer 	*(_maptp) = 0
     65   1.4  bouyer #define PTE_ATOMIC_SET(_ptp,_maptp,_npte,_opte)			\
     66   1.4  bouyer 	(_opte) = x86_atomic_testset_ul((_maptp), (_npte))
     67   1.4  bouyer #define PTE_ATOMIC_CLEAR(_ptp,_maptp,_opte)			\
     68   1.4  bouyer 	(_opte) = x86_atomic_testset_ul((_maptp), 0)
     69   1.4  bouyer #define PDE_CLEARBITS(_pdp,_mapdp,_bits)			\
     70   1.4  bouyer 	*(_mapdp) &= ~(_bits)
     71   1.4  bouyer #define PTE_ATOMIC_CLEARBITS(_ptp,_maptp,_bits)			\
     72   1.4  bouyer 	x86_atomic_clearbits_l((_maptp), (_bits))
     73   1.4  bouyer #define PTE_SETBITS(_ptp,_maptp,_bits)				\
     74   1.4  bouyer 	*(_maptp) |= (_bits)
     75   1.4  bouyer #define PTE_ATOMIC_SETBITS(_ptp,_maptp,_bits)			\
     76   1.4  bouyer 	x86_atomic_setbits_l((_maptp), (_bits))
     77   1.1      cl #else
     78   1.1      cl paddr_t *xpmap_phys_to_machine_mapping;
     79   1.1      cl 
     80   1.1      cl #define	PDE_GET(_pdp)						\
     81   1.1      cl 	(pmap_valid_entry(*(_pdp)) ? xpmap_mtop(*(_pdp)) : *(_pdp))
     82   1.4  bouyer #define PDE_SET(_pdp,_mapdp,_npde) do {				\
     83   1.7    yamt 	int _s = splvm();					\
     84   1.4  bouyer 	xpq_queue_pde_update((_mapdp), xpmap_ptom((_npde)));	\
     85   1.1      cl 	xpq_flush_queue();					\
     86   1.7    yamt 	splx(_s);						\
     87   1.1      cl } while (/*CONSTCOND*/0)
     88   1.4  bouyer #define PDE_CLEAR(_pdp,_mapdp) do {				\
     89   1.7    yamt 	int _s = splvm();					\
     90   1.4  bouyer 	xpq_queue_pde_update((_mapdp), 0);			\
     91   1.1      cl 	xpq_flush_queue();					\
     92   1.7    yamt 	splx(_s);						\
     93   1.1      cl } while (/*CONSTCOND*/0)
     94   1.1      cl #define	PTE_GET(_ptp)						\
     95   1.1      cl 	(pmap_valid_entry(*(_ptp)) ? xpmap_mtop(*(_ptp)) : *(_ptp))
     96   1.3      cl #define	PTE_GET_MA(_ptp)					\
     97   1.3      cl 	*(_ptp)
     98   1.4  bouyer #define PTE_SET(_ptp,_maptp,_npte) do {				\
     99   1.7    yamt 	int _s = splvm();					\
    100   1.4  bouyer 	xpq_queue_pte_update((_maptp), xpmap_ptom((_npte)));	\
    101   1.1      cl 	xpq_flush_queue();					\
    102   1.7    yamt 	splx(_s);						\
    103   1.1      cl } while (/*CONSTCOND*/0)
    104   1.4  bouyer #define PTE_SET_MA(_ptp,_maptp,_npte) do {			\
    105   1.7    yamt 	int _s = splvm();					\
    106   1.4  bouyer 	xpq_queue_pte_update((_maptp), (_npte));		\
    107   1.1      cl 	xpq_flush_queue();					\
    108   1.7    yamt 	splx(_s);						\
    109   1.1      cl } while (/*CONSTCOND*/0)
    110   1.4  bouyer #define PTE_CLEAR(_ptp,_maptp) do {				\
    111   1.7    yamt 	int _s = splvm();					\
    112   1.4  bouyer 	xpq_queue_pte_update((_maptp), 0);			\
    113   1.1      cl 	xpq_flush_queue();					\
    114   1.7    yamt 	splx(_s);						\
    115   1.1      cl } while (/*CONSTCOND*/0)
    116   1.4  bouyer #define PTE_ATOMIC_SET(_ptp,_maptp,_npte,_opte) do {		\
    117   1.7    yamt 	int _s;							\
    118   1.1      cl 	(_opte) = PTE_GET(_ptp);				\
    119   1.7    yamt 	_s = splvm();						\
    120   1.4  bouyer 	xpq_queue_pte_update((_maptp), xpmap_ptom((_npte)));	\
    121   1.1      cl 	xpq_flush_queue();					\
    122   1.7    yamt 	splx(_s);						\
    123   1.1      cl } while (/*CONSTCOND*/0)
    124   1.4  bouyer #define PTE_ATOMIC_SET_MA(_ptp,_maptp,_npte,_opte) do {		\
    125   1.7    yamt 	int _s;							\
    126   1.3      cl 	(_opte) = *(_ptp);					\
    127   1.7    yamt 	_s = splvm();						\
    128   1.4  bouyer 	xpq_queue_pte_update((_maptp), (_npte));		\
    129   1.1      cl 	xpq_flush_queue();					\
    130   1.7    yamt 	splx(_s);						\
    131   1.1      cl } while (/*CONSTCOND*/0)
    132   1.4  bouyer #define PTE_ATOMIC_CLEAR(_ptp,_maptp,_opte) do {		\
    133   1.7    yamt 	int _s;							\
    134   1.1      cl 	(_opte) = PTE_GET(_ptp);				\
    135   1.7    yamt 	_s = splvm();						\
    136   1.4  bouyer 	xpq_queue_pte_update((_maptp), 0);			\
    137   1.1      cl 	xpq_flush_queue();					\
    138   1.7    yamt 	splx(_s);						\
    139   1.1      cl } while (/*CONSTCOND*/0)
    140   1.4  bouyer #define PTE_ATOMIC_CLEAR_MA(_ptp,_maptp,_opte) do {		\
    141   1.7    yamt 	int _s;							\
    142   1.3      cl 	(_opte) = *(_ptp);					\
    143   1.7    yamt 	_s = splvm();						\
    144   1.4  bouyer 	xpq_queue_pte_update((_maptp), 0);			\
    145   1.3      cl 	xpq_flush_queue();					\
    146   1.7    yamt 	splx(_s);						\
    147   1.3      cl } while (/*CONSTCOND*/0)
    148   1.4  bouyer #define PDE_CLEARBITS(_pdp,_mapdp,_bits) do {			\
    149   1.7    yamt 	int _s = splvm();					\
    150   1.4  bouyer 	xpq_queue_pte_update((_mapdp), *(_pdp) & ~((_bits) & ~PG_FRAME)); \
    151   1.1      cl 	xpq_flush_queue();					\
    152   1.7    yamt 	splx(_s);						\
    153   1.1      cl } while (/*CONSTCOND*/0)
    154   1.4  bouyer #define PTE_CLEARBITS(_ptp,_maptp,_bits) do {			\
    155   1.7    yamt 	int _s = splvm();					\
    156   1.4  bouyer 	xpq_queue_pte_update((_maptp), *(_ptp) & ~((_bits) & ~PG_FRAME)); \
    157   1.1      cl 	xpq_flush_queue();					\
    158   1.7    yamt 	splx(_s);						\
    159   1.1      cl } while (/*CONSTCOND*/0)
    160   1.4  bouyer #define PDE_ATOMIC_CLEARBITS(_pdp,_mapdp,_bits) do {		\
    161   1.7    yamt 	int _s = splvm();					\
    162   1.4  bouyer 	xpq_queue_pde_update((_mapdp), *(_pdp) & ~((_bits) & ~PG_FRAME)); \
    163   1.1      cl 	xpq_flush_queue();					\
    164   1.7    yamt 	splx(_s);						\
    165   1.1      cl } while (/*CONSTCOND*/0)
    166   1.4  bouyer #define PTE_ATOMIC_CLEARBITS(_ptp,_maptp,_bits) do {		\
    167   1.7    yamt 	int _s = splvm();					\
    168   1.4  bouyer 	xpq_queue_pte_update((_maptp), *(_ptp) & ~((_bits) & ~PG_FRAME)); \
    169   1.1      cl 	xpq_flush_queue();					\
    170   1.7    yamt 	splx(_s);						\
    171   1.1      cl } while (/*CONSTCOND*/0)
    172   1.4  bouyer #define PTE_SETBITS(_ptp,_maptp,_bits) do {			\
    173   1.7    yamt 	int _s = splvm();					\
    174   1.4  bouyer 	xpq_queue_pte_update((_maptp), *(_ptp) | ((_bits) & ~PG_FRAME)); \
    175   1.2      cl 	xpq_flush_queue();					\
    176   1.7    yamt 	splx(_s);						\
    177   1.1      cl } while (/*CONSTCOND*/0)
    178   1.4  bouyer #define PDE_ATOMIC_SETBITS(_pdp,_mapdp,_bits) do {		\
    179   1.7    yamt 	int _s = splvm();					\
    180   1.4  bouyer 	xpq_queue_pde_update((_mapdp), *(_pdp) | ((_bits) & ~PG_FRAME)); \
    181   1.1      cl 	xpq_flush_queue();					\
    182   1.7    yamt 	splx(_s);						\
    183   1.1      cl } while (/*CONSTCOND*/0)
    184   1.4  bouyer #define PTE_ATOMIC_SETBITS(_ptp,_maptp,_bits) do {		\
    185   1.7    yamt 	int _s = splvm();					\
    186   1.4  bouyer 	xpq_queue_pte_update((_maptp), *(_ptp) | ((_bits) & ~PG_FRAME)); \
    187   1.1      cl 	xpq_flush_queue();					\
    188   1.7    yamt 	splx(_s);						\
    189   1.1      cl } while (/*CONSTCOND*/0)
    190   1.4  bouyer #define PDE_COPY(_dpdp,_madpdp,_spdp) do {			\
    191   1.7    yamt 	int _s = splvm();					\
    192   1.4  bouyer 	xpq_queue_pde_update((_madpdp), *(_spdp));		\
    193   1.1      cl 	xpq_flush_queue();					\
    194   1.7    yamt 	splx(_s);						\
    195   1.1      cl } while (/*CONSTCOND*/0)
    196   1.1      cl #define	PTE_UPDATES_FLUSH() do {				\
    197   1.7    yamt 	int _s = splvm();					\
    198   1.1      cl 	xpq_flush_queue();					\
    199   1.7    yamt 	splx(_s);						\
    200   1.1      cl } while (/*CONSTCOND*/0)
    201   1.1      cl 
    202   1.1      cl #endif
    203   1.1      cl 
    204  1.12  bouyer /*
    205  1.12  bouyer  * On Xen-2, the start of the day virual memory starts at KERNTEXTOFF
    206  1.12  bouyer  * (0xc0100000). On Xen-3 for domain0 it starts at KERNBASE (0xc0000000).
    207  1.12  bouyer  * So the offset between physical and virtual address is different on
    208  1.12  bouyer  * Xen-2 and Xen-3 for domain0.
    209  1.15  bouyer  * starting with xen-3.0.2, we can add notes so that virual memory starts
    210  1.15  bouyer  * at KERNBASE for domU as well.
    211  1.12  bouyer  */
    212  1.15  bouyer #if defined(XEN3) && (defined(DOM0OPS) || !defined(XEN_COMPAT_030001))
    213  1.12  bouyer #define XPMAP_OFFSET	0
    214  1.12  bouyer #else
    215   1.9    yamt #define	XPMAP_OFFSET	(KERNTEXTOFF - KERNBASE)
    216  1.12  bouyer #endif
    217  1.12  bouyer 
    218  1.13   perry static __inline paddr_t
    219   1.1      cl xpmap_mtop(paddr_t mpa)
    220   1.1      cl {
    221   1.3      cl 	return ((machine_to_phys_mapping[mpa >> PAGE_SHIFT] << PAGE_SHIFT) +
    222   1.4  bouyer 	    XPMAP_OFFSET) | (mpa & ~PG_FRAME);
    223   1.1      cl }
    224   1.1      cl 
    225  1.13   perry static __inline paddr_t
    226  1.16  bouyer xpmap_mtop_masked(paddr_t mpa)
    227  1.16  bouyer {
    228  1.16  bouyer 	return ((machine_to_phys_mapping[mpa >> PAGE_SHIFT] << PAGE_SHIFT) +
    229  1.16  bouyer 	    XPMAP_OFFSET);
    230  1.16  bouyer }
    231  1.16  bouyer 
    232  1.16  bouyer static __inline paddr_t
    233   1.1      cl xpmap_ptom(paddr_t ppa)
    234   1.1      cl {
    235   1.3      cl 	return (xpmap_phys_to_machine_mapping[(ppa -
    236   1.4  bouyer 	    XPMAP_OFFSET) >> PAGE_SHIFT] << PAGE_SHIFT)
    237   1.3      cl 		| (ppa & ~PG_FRAME);
    238   1.3      cl }
    239   1.3      cl 
    240  1.13   perry static __inline paddr_t
    241   1.3      cl xpmap_ptom_masked(paddr_t ppa)
    242   1.3      cl {
    243   1.3      cl 	return (xpmap_phys_to_machine_mapping[(ppa -
    244   1.4  bouyer 	    XPMAP_OFFSET) >> PAGE_SHIFT] << PAGE_SHIFT);
    245   1.1      cl }
    246   1.1      cl 
    247  1.14  bouyer #ifdef XEN3
    248  1.14  bouyer static inline void
    249  1.14  bouyer MULTI_update_va_mapping(
    250  1.14  bouyer 	multicall_entry_t *mcl, vaddr_t va,
    251  1.14  bouyer 	paddr_t new_val, unsigned long flags)
    252  1.14  bouyer {
    253  1.14  bouyer 	mcl->op = __HYPERVISOR_update_va_mapping;
    254  1.14  bouyer 	mcl->args[0] = va;
    255  1.14  bouyer #if defined(__x86_64__)
    256  1.14  bouyer 	mcl->args[1] = new_val;
    257  1.14  bouyer 	mcl->args[2] = flags;
    258  1.14  bouyer #else
    259  1.14  bouyer 	mcl->args[1] = new_val;
    260  1.14  bouyer 	mcl->args[2] = 0;
    261  1.14  bouyer 	mcl->args[3] = flags;
    262  1.14  bouyer #endif
    263  1.14  bouyer }
    264  1.14  bouyer 
    265  1.14  bouyer static inline void
    266  1.14  bouyer MULTI_update_va_mapping_otherdomain(
    267  1.14  bouyer 	multicall_entry_t *mcl, vaddr_t va,
    268  1.14  bouyer 	paddr_t new_val, unsigned long flags, domid_t domid)
    269  1.14  bouyer {
    270  1.14  bouyer 	mcl->op = __HYPERVISOR_update_va_mapping_otherdomain;
    271  1.14  bouyer 	mcl->args[0] = va;
    272  1.14  bouyer #if defined(__x86_64__)
    273  1.14  bouyer 	mcl->args[1] = new_val;
    274  1.14  bouyer 	mcl->args[2] = flags;
    275  1.14  bouyer 	mcl->args[3] = domid;
    276  1.14  bouyer #else
    277  1.14  bouyer 	mcl->args[1] = new_val;
    278  1.14  bouyer 	mcl->args[2] = 0;
    279  1.14  bouyer 	mcl->args[3] = flags;
    280  1.14  bouyer 	mcl->args[4] = domid;
    281  1.14  bouyer #endif
    282  1.14  bouyer }
    283  1.14  bouyer #if defined(__x86_64__)
    284  1.14  bouyer #define MULTI_UVMFLAGS_INDEX 2
    285  1.14  bouyer #define MULTI_UVMDOMID_INDEX 3
    286  1.14  bouyer #else
    287  1.14  bouyer #define MULTI_UVMFLAGS_INDEX 3
    288  1.14  bouyer #define MULTI_UVMDOMID_INDEX 4
    289  1.14  bouyer #endif
    290  1.14  bouyer 
    291  1.16  bouyer #if defined(__x86_64__)
    292  1.16  bouyer void xen_set_user_pgd(paddr_t);
    293  1.16  bouyer #endif
    294  1.16  bouyer 
    295  1.14  bouyer #endif /* XEN3 */
    296  1.14  bouyer 
    297   1.1      cl #endif /* _XEN_XENPMAP_H_ */
    298