xenpmap.h revision 1.34 1 1.34 rmind /* $NetBSD: xenpmap.h,v 1.34 2012/04/20 22:23:24 rmind Exp $ */
2 1.1 cl
3 1.1 cl /*
4 1.1 cl *
5 1.1 cl * Copyright (c) 2004 Christian Limpach.
6 1.1 cl * All rights reserved.
7 1.1 cl *
8 1.1 cl * Redistribution and use in source and binary forms, with or without
9 1.1 cl * modification, are permitted provided that the following conditions
10 1.1 cl * are met:
11 1.1 cl * 1. Redistributions of source code must retain the above copyright
12 1.1 cl * notice, this list of conditions and the following disclaimer.
13 1.1 cl * 2. Redistributions in binary form must reproduce the above copyright
14 1.1 cl * notice, this list of conditions and the following disclaimer in the
15 1.1 cl * documentation and/or other materials provided with the distribution.
16 1.1 cl *
17 1.1 cl * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 1.1 cl * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 1.1 cl * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 1.1 cl * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 1.1 cl * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 1.1 cl * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 1.1 cl * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 1.1 cl * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 1.1 cl * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 1.1 cl * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 1.1 cl */
28 1.1 cl
29 1.1 cl
30 1.1 cl #ifndef _XEN_XENPMAP_H_
31 1.1 cl #define _XEN_XENPMAP_H_
32 1.26 mrg
33 1.26 mrg #ifdef _KERNEL_OPT
34 1.19 bouyer #include "opt_xen.h"
35 1.26 mrg #endif
36 1.1 cl
37 1.34 rmind #include <sys/types.h>
38 1.34 rmind #include <sys/kcpuset.h>
39 1.34 rmind
40 1.4 bouyer #define INVALID_P2M_ENTRY (~0UL)
41 1.4 bouyer
42 1.8 bouyer void xpq_queue_machphys_update(paddr_t, paddr_t);
43 1.1 cl void xpq_queue_invlpg(vaddr_t);
44 1.19 bouyer void xpq_queue_pte_update(paddr_t, pt_entry_t);
45 1.1 cl void xpq_queue_pt_switch(paddr_t);
46 1.1 cl void xpq_flush_queue(void);
47 1.1 cl void xpq_queue_set_ldt(vaddr_t, uint32_t);
48 1.1 cl void xpq_queue_tlb_flush(void);
49 1.25 jym void xpq_queue_pin_table(paddr_t, int);
50 1.1 cl void xpq_queue_unpin_table(paddr_t);
51 1.19 bouyer int xpq_update_foreign(paddr_t, pt_entry_t, int);
52 1.34 rmind void xen_vcpu_mcast_invlpg(vaddr_t, vaddr_t, kcpuset_t *);
53 1.28 cherry void xen_vcpu_bcast_invlpg(vaddr_t, vaddr_t);
54 1.34 rmind void xen_mcast_tlbflush(kcpuset_t *);
55 1.28 cherry void xen_bcast_tlbflush(void);
56 1.34 rmind void xen_mcast_invlpg(vaddr_t, kcpuset_t *);
57 1.28 cherry void xen_bcast_invlpg(vaddr_t);
58 1.28 cherry
59 1.32 jym void pmap_xen_resume(void);
60 1.32 jym void pmap_xen_suspend(void);
61 1.32 jym
62 1.32 jym #ifdef PAE
63 1.32 jym void pmap_map_recursive_entries(void);
64 1.32 jym void pmap_unmap_recursive_entries(void);
65 1.32 jym #endif /* PAE */
66 1.1 cl
67 1.33 cherry #if defined(PAE) || defined(__x86_64__)
68 1.33 cherry void xen_kpm_sync(struct pmap *, int);
69 1.33 cherry #endif /* PAE || __x86_64__ */
70 1.33 cherry
71 1.25 jym #define xpq_queue_pin_l1_table(pa) \
72 1.25 jym xpq_queue_pin_table(pa, MMUEXT_PIN_L1_TABLE)
73 1.25 jym #define xpq_queue_pin_l2_table(pa) \
74 1.25 jym xpq_queue_pin_table(pa, MMUEXT_PIN_L2_TABLE)
75 1.25 jym #define xpq_queue_pin_l3_table(pa) \
76 1.25 jym xpq_queue_pin_table(pa, MMUEXT_PIN_L3_TABLE)
77 1.25 jym #define xpq_queue_pin_l4_table(pa) \
78 1.25 jym xpq_queue_pin_table(pa, MMUEXT_PIN_L4_TABLE)
79 1.25 jym
80 1.19 bouyer extern unsigned long *xpmap_phys_to_machine_mapping;
81 1.1 cl
82 1.12 bouyer /*
83 1.20 jym * On Xen-2, the start of the day virtual memory starts at KERNTEXTOFF
84 1.12 bouyer * (0xc0100000). On Xen-3 for domain0 it starts at KERNBASE (0xc0000000).
85 1.12 bouyer * So the offset between physical and virtual address is different on
86 1.12 bouyer * Xen-2 and Xen-3 for domain0.
87 1.20 jym * starting with xen-3.0.2, we can add notes so that virtual memory starts
88 1.15 bouyer * at KERNBASE for domU as well.
89 1.12 bouyer */
90 1.23 cegger #if defined(DOM0OPS) || !defined(XEN_COMPAT_030001)
91 1.12 bouyer #define XPMAP_OFFSET 0
92 1.12 bouyer #else
93 1.9 yamt #define XPMAP_OFFSET (KERNTEXTOFF - KERNBASE)
94 1.12 bouyer #endif
95 1.12 bouyer
96 1.21 jym #define mfn_to_pfn(mfn) (machine_to_phys_mapping[(mfn)])
97 1.21 jym #define pfn_to_mfn(pfn) (xpmap_phys_to_machine_mapping[(pfn)])
98 1.21 jym
99 1.13 perry static __inline paddr_t
100 1.27 jym xpmap_mtop_masked(paddr_t mpa)
101 1.1 cl {
102 1.22 bouyer return (
103 1.22 bouyer ((paddr_t)machine_to_phys_mapping[mpa >> PAGE_SHIFT] << PAGE_SHIFT)
104 1.27 jym + XPMAP_OFFSET);
105 1.1 cl }
106 1.1 cl
107 1.13 perry static __inline paddr_t
108 1.27 jym xpmap_mtop(paddr_t mpa)
109 1.16 bouyer {
110 1.27 jym return (xpmap_mtop_masked(mpa) | (mpa & ~PG_FRAME));
111 1.16 bouyer }
112 1.16 bouyer
113 1.16 bouyer static __inline paddr_t
114 1.27 jym xpmap_ptom_masked(paddr_t ppa)
115 1.1 cl {
116 1.19 bouyer return (((paddr_t)xpmap_phys_to_machine_mapping[(ppa -
117 1.27 jym XPMAP_OFFSET) >> PAGE_SHIFT]) << PAGE_SHIFT);
118 1.3 cl }
119 1.3 cl
120 1.13 perry static __inline paddr_t
121 1.27 jym xpmap_ptom(paddr_t ppa)
122 1.3 cl {
123 1.27 jym return (xpmap_ptom_masked(ppa) | (ppa & ~PG_FRAME));
124 1.1 cl }
125 1.1 cl
126 1.14 bouyer static inline void
127 1.14 bouyer MULTI_update_va_mapping(
128 1.14 bouyer multicall_entry_t *mcl, vaddr_t va,
129 1.19 bouyer pt_entry_t new_val, unsigned long flags)
130 1.14 bouyer {
131 1.14 bouyer mcl->op = __HYPERVISOR_update_va_mapping;
132 1.14 bouyer mcl->args[0] = va;
133 1.14 bouyer #if defined(__x86_64__)
134 1.14 bouyer mcl->args[1] = new_val;
135 1.14 bouyer mcl->args[2] = flags;
136 1.14 bouyer #else
137 1.19 bouyer mcl->args[1] = (new_val & 0xffffffff);
138 1.19 bouyer #ifdef PAE
139 1.19 bouyer mcl->args[2] = (new_val >> 32);
140 1.19 bouyer #else
141 1.14 bouyer mcl->args[2] = 0;
142 1.19 bouyer #endif
143 1.14 bouyer mcl->args[3] = flags;
144 1.14 bouyer #endif
145 1.14 bouyer }
146 1.14 bouyer
147 1.14 bouyer static inline void
148 1.14 bouyer MULTI_update_va_mapping_otherdomain(
149 1.14 bouyer multicall_entry_t *mcl, vaddr_t va,
150 1.19 bouyer pt_entry_t new_val, unsigned long flags, domid_t domid)
151 1.14 bouyer {
152 1.14 bouyer mcl->op = __HYPERVISOR_update_va_mapping_otherdomain;
153 1.14 bouyer mcl->args[0] = va;
154 1.14 bouyer #if defined(__x86_64__)
155 1.14 bouyer mcl->args[1] = new_val;
156 1.14 bouyer mcl->args[2] = flags;
157 1.14 bouyer mcl->args[3] = domid;
158 1.14 bouyer #else
159 1.19 bouyer mcl->args[1] = (new_val & 0xffffffff);
160 1.19 bouyer #ifdef PAE
161 1.19 bouyer mcl->args[2] = (new_val >> 32);
162 1.19 bouyer #else
163 1.14 bouyer mcl->args[2] = 0;
164 1.19 bouyer #endif
165 1.14 bouyer mcl->args[3] = flags;
166 1.14 bouyer mcl->args[4] = domid;
167 1.14 bouyer #endif
168 1.14 bouyer }
169 1.14 bouyer #if defined(__x86_64__)
170 1.14 bouyer #define MULTI_UVMFLAGS_INDEX 2
171 1.14 bouyer #define MULTI_UVMDOMID_INDEX 3
172 1.14 bouyer #else
173 1.14 bouyer #define MULTI_UVMFLAGS_INDEX 3
174 1.14 bouyer #define MULTI_UVMDOMID_INDEX 4
175 1.14 bouyer #endif
176 1.14 bouyer
177 1.16 bouyer #if defined(__x86_64__)
178 1.16 bouyer void xen_set_user_pgd(paddr_t);
179 1.16 bouyer #endif
180 1.16 bouyer
181 1.1 cl #endif /* _XEN_XENPMAP_H_ */
182