xenpmap.h revision 1.28 1 /* $NetBSD: xenpmap.h,v 1.28 2011/08/10 09:50:37 cherry Exp $ */
2
3 /*
4 *
5 * Copyright (c) 2004 Christian Limpach.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29
30 #ifndef _XEN_XENPMAP_H_
31 #define _XEN_XENPMAP_H_
32
33 #ifdef _KERNEL_OPT
34 #include "opt_xen.h"
35 #endif
36
37 #define INVALID_P2M_ENTRY (~0UL)
38
39 void xpq_queue_lock(void);
40 void xpq_queue_unlock(void);
41 void xpq_queue_machphys_update(paddr_t, paddr_t);
42 void xpq_queue_invlpg(vaddr_t);
43 void xpq_queue_pte_update(paddr_t, pt_entry_t);
44 void xpq_queue_pt_switch(paddr_t);
45 void xpq_flush_queue(void);
46 void xpq_queue_set_ldt(vaddr_t, uint32_t);
47 void xpq_queue_tlb_flush(void);
48 void xpq_queue_pin_table(paddr_t, int);
49 void xpq_queue_unpin_table(paddr_t);
50 int xpq_update_foreign(paddr_t, pt_entry_t, int);
51 void xen_vcpu_mcast_invlpg(vaddr_t, vaddr_t, uint32_t);
52 void xen_vcpu_bcast_invlpg(vaddr_t, vaddr_t);
53 void xen_mcast_tlbflush(uint32_t);
54 void xen_bcast_tlbflush(void);
55 void xen_mcast_invlpg(vaddr_t, uint32_t);
56 void xen_bcast_invlpg(vaddr_t);
57
58
59 #define xpq_queue_pin_l1_table(pa) \
60 xpq_queue_pin_table(pa, MMUEXT_PIN_L1_TABLE)
61 #define xpq_queue_pin_l2_table(pa) \
62 xpq_queue_pin_table(pa, MMUEXT_PIN_L2_TABLE)
63 #define xpq_queue_pin_l3_table(pa) \
64 xpq_queue_pin_table(pa, MMUEXT_PIN_L3_TABLE)
65 #define xpq_queue_pin_l4_table(pa) \
66 xpq_queue_pin_table(pa, MMUEXT_PIN_L4_TABLE)
67
68 extern unsigned long *xpmap_phys_to_machine_mapping;
69
70 /*
71 * On Xen-2, the start of the day virtual memory starts at KERNTEXTOFF
72 * (0xc0100000). On Xen-3 for domain0 it starts at KERNBASE (0xc0000000).
73 * So the offset between physical and virtual address is different on
74 * Xen-2 and Xen-3 for domain0.
75 * starting with xen-3.0.2, we can add notes so that virtual memory starts
76 * at KERNBASE for domU as well.
77 */
78 #if defined(DOM0OPS) || !defined(XEN_COMPAT_030001)
79 #define XPMAP_OFFSET 0
80 #else
81 #define XPMAP_OFFSET (KERNTEXTOFF - KERNBASE)
82 #endif
83
84 #define mfn_to_pfn(mfn) (machine_to_phys_mapping[(mfn)])
85 #define pfn_to_mfn(pfn) (xpmap_phys_to_machine_mapping[(pfn)])
86
87 static __inline paddr_t
88 xpmap_mtop_masked(paddr_t mpa)
89 {
90 return (
91 ((paddr_t)machine_to_phys_mapping[mpa >> PAGE_SHIFT] << PAGE_SHIFT)
92 + XPMAP_OFFSET);
93 }
94
95 static __inline paddr_t
96 xpmap_mtop(paddr_t mpa)
97 {
98 return (xpmap_mtop_masked(mpa) | (mpa & ~PG_FRAME));
99 }
100
101 static __inline paddr_t
102 xpmap_ptom_masked(paddr_t ppa)
103 {
104 return (((paddr_t)xpmap_phys_to_machine_mapping[(ppa -
105 XPMAP_OFFSET) >> PAGE_SHIFT]) << PAGE_SHIFT);
106 }
107
108 static __inline paddr_t
109 xpmap_ptom(paddr_t ppa)
110 {
111 return (xpmap_ptom_masked(ppa) | (ppa & ~PG_FRAME));
112 }
113
114 static inline void
115 MULTI_update_va_mapping(
116 multicall_entry_t *mcl, vaddr_t va,
117 pt_entry_t new_val, unsigned long flags)
118 {
119 mcl->op = __HYPERVISOR_update_va_mapping;
120 mcl->args[0] = va;
121 #if defined(__x86_64__)
122 mcl->args[1] = new_val;
123 mcl->args[2] = flags;
124 #else
125 mcl->args[1] = (new_val & 0xffffffff);
126 #ifdef PAE
127 mcl->args[2] = (new_val >> 32);
128 #else
129 mcl->args[2] = 0;
130 #endif
131 mcl->args[3] = flags;
132 #endif
133 }
134
135 static inline void
136 MULTI_update_va_mapping_otherdomain(
137 multicall_entry_t *mcl, vaddr_t va,
138 pt_entry_t new_val, unsigned long flags, domid_t domid)
139 {
140 mcl->op = __HYPERVISOR_update_va_mapping_otherdomain;
141 mcl->args[0] = va;
142 #if defined(__x86_64__)
143 mcl->args[1] = new_val;
144 mcl->args[2] = flags;
145 mcl->args[3] = domid;
146 #else
147 mcl->args[1] = (new_val & 0xffffffff);
148 #ifdef PAE
149 mcl->args[2] = (new_val >> 32);
150 #else
151 mcl->args[2] = 0;
152 #endif
153 mcl->args[3] = flags;
154 mcl->args[4] = domid;
155 #endif
156 }
157 #if defined(__x86_64__)
158 #define MULTI_UVMFLAGS_INDEX 2
159 #define MULTI_UVMDOMID_INDEX 3
160 #else
161 #define MULTI_UVMFLAGS_INDEX 3
162 #define MULTI_UVMDOMID_INDEX 4
163 #endif
164
165 #if defined(__x86_64__)
166 void xen_set_user_pgd(paddr_t);
167 #endif
168
169 #endif /* _XEN_XENPMAP_H_ */
170