xenpmap.h revision 1.5 1 /* $NetBSD: xenpmap.h,v 1.5 2005/04/16 08:49:29 yamt Exp $ */
2
3 /*
4 *
5 * Copyright (c) 2004 Christian Limpach.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Christian Limpach.
19 * 4. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34
35 #ifndef _XEN_XENPMAP_H_
36 #define _XEN_XENPMAP_H_
37
38 #define INVALID_P2M_ENTRY (~0UL)
39
40 void xpq_queue_invlpg(vaddr_t);
41 void xpq_queue_pde_update(pd_entry_t *, pd_entry_t);
42 void xpq_queue_pte_update(pt_entry_t *, pt_entry_t);
43 void xpq_queue_unchecked_pte_update(pt_entry_t *, pt_entry_t);
44 void xpq_queue_pt_switch(paddr_t);
45 void xpq_flush_queue(void);
46 void xpq_queue_set_ldt(vaddr_t, uint32_t);
47 void xpq_queue_tlb_flush(void);
48 void xpq_queue_pin_table(paddr_t, int);
49 void xpq_queue_unpin_table(paddr_t);
50 int xpq_update_foreign(pt_entry_t *, pt_entry_t, int);
51
52 extern paddr_t *xpmap_phys_to_machine_mapping;
53
54 #define XPQ_PIN_L1_TABLE 1
55 #define XPQ_PIN_L2_TABLE 2
56
57 #ifndef XEN
58 #define PDE_GET(_pdp) \
59 *(_pdp)
60 #define PDE_SET(_pdp,_mapdp,_npde) \
61 *(_mapdp) = (_npde)
62 #define PDE_CLEAR(_pdp,_mapdp) \
63 *(_mapdp) = 0
64 #define PTE_SET(_ptp,_maptp,_npte) \
65 *(_maptp) = (_npte)
66 #define PTE_CLEAR(_ptp,_maptp) \
67 *(_maptp) = 0
68 #define PTE_ATOMIC_SET(_ptp,_maptp,_npte,_opte) \
69 (_opte) = x86_atomic_testset_ul((_maptp), (_npte))
70 #define PTE_ATOMIC_CLEAR(_ptp,_maptp,_opte) \
71 (_opte) = x86_atomic_testset_ul((_maptp), 0)
72 #define PDE_CLEARBITS(_pdp,_mapdp,_bits) \
73 *(_mapdp) &= ~(_bits)
74 #define PTE_ATOMIC_CLEARBITS(_ptp,_maptp,_bits) \
75 x86_atomic_clearbits_l((_maptp), (_bits))
76 #define PTE_SETBITS(_ptp,_maptp,_bits) \
77 *(_maptp) |= (_bits)
78 #define PTE_ATOMIC_SETBITS(_ptp,_maptp,_bits) \
79 x86_atomic_setbits_l((_maptp), (_bits))
80 #else
81 paddr_t *xpmap_phys_to_machine_mapping;
82
83 #define PDE_GET(_pdp) \
84 (pmap_valid_entry(*(_pdp)) ? xpmap_mtop(*(_pdp)) : *(_pdp))
85 #define PDE_SET(_pdp,_mapdp,_npde) do { \
86 xpq_queue_pde_update((_mapdp), xpmap_ptom((_npde))); \
87 xpq_flush_queue(); \
88 } while (/*CONSTCOND*/0)
89 #define PDE_CLEAR(_pdp,_mapdp) do { \
90 xpq_queue_pde_update((_mapdp), 0); \
91 xpq_flush_queue(); \
92 } while (/*CONSTCOND*/0)
93 #define PTE_GET(_ptp) \
94 (pmap_valid_entry(*(_ptp)) ? xpmap_mtop(*(_ptp)) : *(_ptp))
95 #define PTE_GET_MA(_ptp) \
96 *(_ptp)
97 #define PTE_SET(_ptp,_maptp,_npte) do { \
98 xpq_queue_pte_update((_maptp), xpmap_ptom((_npte))); \
99 xpq_flush_queue(); \
100 } while (/*CONSTCOND*/0)
101 #define PTE_SET_MA(_ptp,_maptp,_npte) do { \
102 xpq_queue_pte_update((_maptp), (_npte)); \
103 xpq_flush_queue(); \
104 } while (/*CONSTCOND*/0)
105 #define PTE_SET_MA_UNCHECKED(_ptp,_maptp,_npte) do { \
106 xpq_queue_unchecked_pte_update((_maptp), (_npte)); \
107 xpq_flush_queue(); \
108 } while (/*CONSTCOND*/0)
109 #define PTE_CLEAR(_ptp,_maptp) do { \
110 xpq_queue_pte_update((_maptp), 0); \
111 xpq_flush_queue(); \
112 } while (/*CONSTCOND*/0)
113 #define PTE_ATOMIC_SET(_ptp,_maptp,_npte,_opte) do { \
114 (_opte) = PTE_GET(_ptp); \
115 xpq_queue_pte_update((_maptp), xpmap_ptom((_npte))); \
116 xpq_flush_queue(); \
117 } while (/*CONSTCOND*/0)
118 #define PTE_ATOMIC_SET_MA(_ptp,_maptp,_npte,_opte) do { \
119 (_opte) = *(_ptp); \
120 xpq_queue_pte_update((_maptp), (_npte)); \
121 xpq_flush_queue(); \
122 } while (/*CONSTCOND*/0)
123 #define PTE_ATOMIC_CLEAR(_ptp,_maptp,_opte) do { \
124 (_opte) = PTE_GET(_ptp); \
125 xpq_queue_pte_update((_maptp), 0); \
126 xpq_flush_queue(); \
127 } while (/*CONSTCOND*/0)
128 #define PTE_ATOMIC_CLEAR_MA(_ptp,_maptp,_opte) do { \
129 (_opte) = *(_ptp); \
130 xpq_queue_pte_update((_maptp), 0); \
131 xpq_flush_queue(); \
132 } while (/*CONSTCOND*/0)
133 #define PDE_CLEARBITS(_pdp,_mapdp,_bits) do { \
134 xpq_queue_pte_update((_mapdp), *(_pdp) & ~((_bits) & ~PG_FRAME)); \
135 xpq_flush_queue(); \
136 } while (/*CONSTCOND*/0)
137 #define PTE_CLEARBITS(_ptp,_maptp,_bits) do { \
138 xpq_queue_pte_update((_maptp), *(_ptp) & ~((_bits) & ~PG_FRAME)); \
139 xpq_flush_queue(); \
140 } while (/*CONSTCOND*/0)
141 #define PDE_ATOMIC_CLEARBITS(_pdp,_mapdp,_bits) do { \
142 xpq_queue_pde_update((_mapdp), *(_pdp) & ~((_bits) & ~PG_FRAME)); \
143 xpq_flush_queue(); \
144 } while (/*CONSTCOND*/0)
145 #define PTE_ATOMIC_CLEARBITS(_ptp,_maptp,_bits) do { \
146 xpq_queue_pte_update((_maptp), *(_ptp) & ~((_bits) & ~PG_FRAME)); \
147 xpq_flush_queue(); \
148 } while (/*CONSTCOND*/0)
149 #define PTE_SETBITS(_ptp,_maptp,_bits) do { \
150 xpq_queue_pte_update((_maptp), *(_ptp) | ((_bits) & ~PG_FRAME)); \
151 xpq_flush_queue(); \
152 } while (/*CONSTCOND*/0)
153 #define PDE_ATOMIC_SETBITS(_pdp,_mapdp,_bits) do { \
154 xpq_queue_pde_update((_mapdp), *(_pdp) | ((_bits) & ~PG_FRAME)); \
155 xpq_flush_queue(); \
156 } while (/*CONSTCOND*/0)
157 #define PTE_ATOMIC_SETBITS(_ptp,_maptp,_bits) do { \
158 xpq_queue_pte_update((_maptp), *(_ptp) | ((_bits) & ~PG_FRAME)); \
159 xpq_flush_queue(); \
160 } while (/*CONSTCOND*/0)
161 #define PDE_COPY(_dpdp,_madpdp,_spdp) do { \
162 xpq_queue_pde_update((_madpdp), *(_spdp)); \
163 xpq_flush_queue(); \
164 } while (/*CONSTCOND*/0)
165 #define PTE_UPDATES_FLUSH() do { \
166 xpq_flush_queue(); \
167 } while (/*CONSTCOND*/0)
168
169 #endif
170
171 #define XPMAP_OFFSET (KERNTEXTOFF - KERNBASE_LOCORE)
172 static __inline paddr_t
173 xpmap_mtop(paddr_t mpa)
174 {
175 return ((machine_to_phys_mapping[mpa >> PAGE_SHIFT] << PAGE_SHIFT) +
176 XPMAP_OFFSET) | (mpa & ~PG_FRAME);
177 }
178
179 static __inline paddr_t
180 xpmap_ptom(paddr_t ppa)
181 {
182 return (xpmap_phys_to_machine_mapping[(ppa -
183 XPMAP_OFFSET) >> PAGE_SHIFT] << PAGE_SHIFT)
184 | (ppa & ~PG_FRAME);
185 }
186
187 static __inline paddr_t
188 xpmap_ptom_masked(paddr_t ppa)
189 {
190 return (xpmap_phys_to_machine_mapping[(ppa -
191 XPMAP_OFFSET) >> PAGE_SHIFT] << PAGE_SHIFT);
192 }
193
194 #endif /* _XEN_XENPMAP_H_ */
195