Home | History | Annotate | Line # | Download | only in include
xenpmap.h revision 1.7
      1 /*	$NetBSD: xenpmap.h,v 1.7 2005/05/31 12:36:56 yamt Exp $	*/
      2 
      3 /*
      4  *
      5  * Copyright (c) 2004 Christian Limpach.
      6  * All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *      This product includes software developed by Christian Limpach.
     19  * 4. The name of the author may not be used to endorse or promote products
     20  *    derived from this software without specific prior written permission.
     21  *
     22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     25  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     27  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     31  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     32  */
     33 
     34 
     35 #ifndef _XEN_XENPMAP_H_
     36 #define _XEN_XENPMAP_H_
     37 
     38 #define	INVALID_P2M_ENTRY	(~0UL)
     39 
     40 void xpq_queue_invlpg(vaddr_t);
     41 void xpq_queue_pde_update(pd_entry_t *, pd_entry_t);
     42 void xpq_queue_pte_update(pt_entry_t *, pt_entry_t);
     43 void xpq_queue_unchecked_pte_update(pt_entry_t *, pt_entry_t);
     44 void xpq_queue_pt_switch(paddr_t);
     45 void xpq_flush_queue(void);
     46 void xpq_queue_set_ldt(vaddr_t, uint32_t);
     47 void xpq_queue_tlb_flush(void);
     48 void xpq_queue_pin_table(paddr_t, int);
     49 void xpq_queue_unpin_table(paddr_t);
     50 int  xpq_update_foreign(pt_entry_t *, pt_entry_t, int);
     51 
     52 extern paddr_t *xpmap_phys_to_machine_mapping;
     53 
     54 #define	XPQ_PIN_L1_TABLE 1
     55 #define	XPQ_PIN_L2_TABLE 2
     56 
     57 #ifndef XEN
     58 #define	PDE_GET(_pdp)						\
     59 	*(_pdp)
     60 #define PDE_SET(_pdp,_mapdp,_npde)				\
     61 	*(_mapdp) = (_npde)
     62 #define PDE_CLEAR(_pdp,_mapdp)					\
     63 	*(_mapdp) = 0
     64 #define PTE_SET(_ptp,_maptp,_npte)				\
     65 	*(_maptp) = (_npte)
     66 #define PTE_CLEAR(_ptp,_maptp)					\
     67 	*(_maptp) = 0
     68 #define PTE_ATOMIC_SET(_ptp,_maptp,_npte,_opte)			\
     69 	(_opte) = x86_atomic_testset_ul((_maptp), (_npte))
     70 #define PTE_ATOMIC_CLEAR(_ptp,_maptp,_opte)			\
     71 	(_opte) = x86_atomic_testset_ul((_maptp), 0)
     72 #define PDE_CLEARBITS(_pdp,_mapdp,_bits)			\
     73 	*(_mapdp) &= ~(_bits)
     74 #define PTE_ATOMIC_CLEARBITS(_ptp,_maptp,_bits)			\
     75 	x86_atomic_clearbits_l((_maptp), (_bits))
     76 #define PTE_SETBITS(_ptp,_maptp,_bits)				\
     77 	*(_maptp) |= (_bits)
     78 #define PTE_ATOMIC_SETBITS(_ptp,_maptp,_bits)			\
     79 	x86_atomic_setbits_l((_maptp), (_bits))
     80 #else
     81 paddr_t *xpmap_phys_to_machine_mapping;
     82 
     83 #define	PDE_GET(_pdp)						\
     84 	(pmap_valid_entry(*(_pdp)) ? xpmap_mtop(*(_pdp)) : *(_pdp))
     85 #define PDE_SET(_pdp,_mapdp,_npde) do {				\
     86 	int _s = splvm();					\
     87 	xpq_queue_pde_update((_mapdp), xpmap_ptom((_npde)));	\
     88 	xpq_flush_queue();					\
     89 	splx(_s);						\
     90 } while (/*CONSTCOND*/0)
     91 #define PDE_CLEAR(_pdp,_mapdp) do {				\
     92 	int _s = splvm();					\
     93 	xpq_queue_pde_update((_mapdp), 0);			\
     94 	xpq_flush_queue();					\
     95 	splx(_s);						\
     96 } while (/*CONSTCOND*/0)
     97 #define	PTE_GET(_ptp)						\
     98 	(pmap_valid_entry(*(_ptp)) ? xpmap_mtop(*(_ptp)) : *(_ptp))
     99 #define	PTE_GET_MA(_ptp)					\
    100 	*(_ptp)
    101 #define PTE_SET(_ptp,_maptp,_npte) do {				\
    102 	int _s = splvm();					\
    103 	xpq_queue_pte_update((_maptp), xpmap_ptom((_npte)));	\
    104 	xpq_flush_queue();					\
    105 	splx(_s);						\
    106 } while (/*CONSTCOND*/0)
    107 #define PTE_SET_MA(_ptp,_maptp,_npte) do {			\
    108 	int _s = splvm();					\
    109 	xpq_queue_pte_update((_maptp), (_npte));		\
    110 	xpq_flush_queue();					\
    111 	splx(_s);						\
    112 } while (/*CONSTCOND*/0)
    113 #define PTE_SET_MA_UNCHECKED(_ptp,_maptp,_npte) do {		\
    114 	_s = splvm();						\
    115 	xpq_queue_unchecked_pte_update((_maptp), (_npte));	\
    116 	xpq_flush_queue();					\
    117 	splx(_s);						\
    118 } while (/*CONSTCOND*/0)
    119 #define PTE_CLEAR(_ptp,_maptp) do {				\
    120 	int _s = splvm();					\
    121 	xpq_queue_pte_update((_maptp), 0);			\
    122 	xpq_flush_queue();					\
    123 	splx(_s);						\
    124 } while (/*CONSTCOND*/0)
    125 #define PTE_ATOMIC_SET(_ptp,_maptp,_npte,_opte) do {		\
    126 	int _s;							\
    127 	(_opte) = PTE_GET(_ptp);				\
    128 	_s = splvm();						\
    129 	xpq_queue_pte_update((_maptp), xpmap_ptom((_npte)));	\
    130 	xpq_flush_queue();					\
    131 	splx(_s);						\
    132 } while (/*CONSTCOND*/0)
    133 #define PTE_ATOMIC_SET_MA(_ptp,_maptp,_npte,_opte) do {		\
    134 	int _s;							\
    135 	(_opte) = *(_ptp);					\
    136 	_s = splvm();						\
    137 	xpq_queue_pte_update((_maptp), (_npte));		\
    138 	xpq_flush_queue();					\
    139 	splx(_s);						\
    140 } while (/*CONSTCOND*/0)
    141 #define PTE_ATOMIC_CLEAR(_ptp,_maptp,_opte) do {		\
    142 	int _s;							\
    143 	(_opte) = PTE_GET(_ptp);				\
    144 	_s = splvm();						\
    145 	xpq_queue_pte_update((_maptp), 0);			\
    146 	xpq_flush_queue();					\
    147 	splx(_s);						\
    148 } while (/*CONSTCOND*/0)
    149 #define PTE_ATOMIC_CLEAR_MA(_ptp,_maptp,_opte) do {		\
    150 	int _s;							\
    151 	(_opte) = *(_ptp);					\
    152 	_s = splvm();						\
    153 	xpq_queue_pte_update((_maptp), 0);			\
    154 	xpq_flush_queue();					\
    155 	splx(_s);						\
    156 } while (/*CONSTCOND*/0)
    157 #define PDE_CLEARBITS(_pdp,_mapdp,_bits) do {			\
    158 	int _s = splvm();					\
    159 	xpq_queue_pte_update((_mapdp), *(_pdp) & ~((_bits) & ~PG_FRAME)); \
    160 	xpq_flush_queue();					\
    161 	splx(_s);						\
    162 } while (/*CONSTCOND*/0)
    163 #define PTE_CLEARBITS(_ptp,_maptp,_bits) do {			\
    164 	int _s = splvm();					\
    165 	xpq_queue_pte_update((_maptp), *(_ptp) & ~((_bits) & ~PG_FRAME)); \
    166 	xpq_flush_queue();					\
    167 	splx(_s);						\
    168 } while (/*CONSTCOND*/0)
    169 #define PDE_ATOMIC_CLEARBITS(_pdp,_mapdp,_bits) do {		\
    170 	int _s = splvm();					\
    171 	xpq_queue_pde_update((_mapdp), *(_pdp) & ~((_bits) & ~PG_FRAME)); \
    172 	xpq_flush_queue();					\
    173 	splx(_s);						\
    174 } while (/*CONSTCOND*/0)
    175 #define PTE_ATOMIC_CLEARBITS(_ptp,_maptp,_bits) do {		\
    176 	int _s = splvm();					\
    177 	xpq_queue_pte_update((_maptp), *(_ptp) & ~((_bits) & ~PG_FRAME)); \
    178 	xpq_flush_queue();					\
    179 	splx(_s);						\
    180 } while (/*CONSTCOND*/0)
    181 #define PTE_SETBITS(_ptp,_maptp,_bits) do {			\
    182 	int _s = splvm();					\
    183 	xpq_queue_pte_update((_maptp), *(_ptp) | ((_bits) & ~PG_FRAME)); \
    184 	xpq_flush_queue();					\
    185 	splx(_s);						\
    186 } while (/*CONSTCOND*/0)
    187 #define PDE_ATOMIC_SETBITS(_pdp,_mapdp,_bits) do {		\
    188 	int _s = splvm();					\
    189 	xpq_queue_pde_update((_mapdp), *(_pdp) | ((_bits) & ~PG_FRAME)); \
    190 	xpq_flush_queue();					\
    191 	splx(_s);						\
    192 } while (/*CONSTCOND*/0)
    193 #define PTE_ATOMIC_SETBITS(_ptp,_maptp,_bits) do {		\
    194 	int _s = splvm();					\
    195 	xpq_queue_pte_update((_maptp), *(_ptp) | ((_bits) & ~PG_FRAME)); \
    196 	xpq_flush_queue();					\
    197 	splx(_s);						\
    198 } while (/*CONSTCOND*/0)
    199 #define PDE_COPY(_dpdp,_madpdp,_spdp) do {			\
    200 	int _s = splvm();					\
    201 	xpq_queue_pde_update((_madpdp), *(_spdp));		\
    202 	xpq_flush_queue();					\
    203 	splx(_s);						\
    204 } while (/*CONSTCOND*/0)
    205 #define	PTE_UPDATES_FLUSH() do {				\
    206 	int _s = splvm();					\
    207 	xpq_flush_queue();					\
    208 	splx(_s);						\
    209 } while (/*CONSTCOND*/0)
    210 
    211 #endif
    212 
    213 #define	XPMAP_OFFSET	(KERNTEXTOFF - KERNBASE_LOCORE)
    214 static __inline paddr_t
    215 xpmap_mtop(paddr_t mpa)
    216 {
    217 	return ((machine_to_phys_mapping[mpa >> PAGE_SHIFT] << PAGE_SHIFT) +
    218 	    XPMAP_OFFSET) | (mpa & ~PG_FRAME);
    219 }
    220 
    221 static __inline paddr_t
    222 xpmap_ptom(paddr_t ppa)
    223 {
    224 	return (xpmap_phys_to_machine_mapping[(ppa -
    225 	    XPMAP_OFFSET) >> PAGE_SHIFT] << PAGE_SHIFT)
    226 		| (ppa & ~PG_FRAME);
    227 }
    228 
    229 static __inline paddr_t
    230 xpmap_ptom_masked(paddr_t ppa)
    231 {
    232 	return (xpmap_phys_to_machine_mapping[(ppa -
    233 	    XPMAP_OFFSET) >> PAGE_SHIFT] << PAGE_SHIFT);
    234 }
    235 
    236 #endif /* _XEN_XENPMAP_H_ */
    237