xenfunc.c revision 1.22 1 /* $NetBSD: xenfunc.c,v 1.22 2018/10/18 04:17:18 cherry Exp $ */
2
3 /*
4 * Copyright (c) 2004 Christian Limpach.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: xenfunc.c,v 1.22 2018/10/18 04:17:18 cherry Exp $");
30
31 #include <sys/param.h>
32
33 #include <uvm/uvm_extern.h>
34
35 #include <machine/intr.h>
36 #include <machine/vmparam.h>
37 #include <machine/pmap.h>
38 #include <xen/xen.h>
39 #include <xen/hypervisor.h>
40 //#include <xen/evtchn.h>
41 #include <xen/xenpmap.h>
42 #include <machine/pte.h>
43
44 #define MAX_XEN_IDT 128
45
46 void xen_set_ldt(vaddr_t, uint32_t);
47
48 void
49 invlpg(vaddr_t addr)
50 {
51 int s = splvm(); /* XXXSMP */
52 xpq_queue_invlpg(addr);
53 splx(s);
54 }
55
56 void
57 lidt(struct region_descriptor *rd)
58 {
59 /*
60 * We need to do this because we can't assume kmem_alloc(9)
61 * will be available at the boot stage when this is called.
62 */
63 static char xen_idt_page[PAGE_SIZE] __attribute__((__aligned__ (PAGE_SIZE)));
64 memset(xen_idt_page, 0, PAGE_SIZE);
65
66 struct trap_info *xen_idt = (void * )xen_idt_page;
67 int xen_idt_idx = 0;
68
69 struct trap_info * idd = (void *) rd->rd_base;
70 const int nidt = rd->rd_limit / (sizeof *idd);
71
72 int i;
73
74 /*
75 * Sweep in all initialised entries, consolidate them back to
76 * back in the requestor array.
77 */
78 for (i = 0; i < nidt; i++) {
79 if (idd[i].address == 0) /* Skip gap */
80 continue;
81 KASSERT(xen_idt_idx < MAX_XEN_IDT);
82 /* Copy over entry */
83 xen_idt[xen_idt_idx++] = idd[i];
84 }
85
86 #if defined(__x86_64__)
87 /* page needs to be r/o */
88 pmap_changeprot_local((vaddr_t) xen_idt, VM_PROT_READ);
89 #endif /* __x86_64 */
90
91 /* Hook it up in the hypervisor */
92 if (HYPERVISOR_set_trap_table(xen_idt))
93 panic("HYPERVISOR_set_trap_table() failed");
94
95 #if defined(__x86_64__)
96 /* reset */
97 pmap_changeprot_local((vaddr_t) xen_idt, VM_PROT_READ|VM_PROT_WRITE);
98 #endif /* __x86_64 */
99 }
100
101 void
102 lldt(u_short sel)
103 {
104 #ifndef __x86_64__
105 struct cpu_info *ci;
106
107 ci = curcpu();
108
109 if (ci->ci_curldt == sel)
110 return;
111 if (sel == GSEL(GLDT_SEL, SEL_KPL))
112 xen_set_ldt((vaddr_t)ldtstore, NLDT);
113 else
114 xen_set_ldt(ci->ci_gdt[IDXSELN(sel)].ld.ld_base,
115 ci->ci_gdt[IDXSELN(sel)].ld.ld_entries);
116 ci->ci_curldt = sel;
117 #endif
118 }
119
120 void
121 ltr(u_short sel)
122 {
123 panic("XXX ltr not supported\n");
124 }
125
126 void
127 lcr0(u_long val)
128 {
129 panic("XXX lcr0 not supported\n");
130 }
131
132 u_long
133 rcr0(void)
134 {
135 /* XXX: handle X86_CR0_TS ? */
136 return 0;
137 }
138
139 #ifndef __x86_64__
140 void
141 lcr3(vaddr_t val)
142 {
143 int s = splvm(); /* XXXSMP */
144 xpq_queue_pt_switch(xpmap_ptom_masked(val));
145 splx(s);
146 }
147 #endif
148
149 void
150 tlbflush(void)
151 {
152 int s = splvm(); /* XXXSMP */
153 xpq_queue_tlb_flush();
154 splx(s);
155 }
156
157 void
158 tlbflushg(void)
159 {
160 tlbflush();
161 }
162
163 register_t
164 rdr0(void)
165 {
166
167 return HYPERVISOR_get_debugreg(0);
168 }
169
170 void
171 ldr0(register_t val)
172 {
173
174 HYPERVISOR_set_debugreg(0, val);
175 }
176
177 register_t
178 rdr1(void)
179 {
180
181 return HYPERVISOR_get_debugreg(1);
182 }
183
184 void
185 ldr1(register_t val)
186 {
187
188 HYPERVISOR_set_debugreg(1, val);
189 }
190
191 register_t
192 rdr2(void)
193 {
194
195 return HYPERVISOR_get_debugreg(2);
196 }
197
198 void
199 ldr2(register_t val)
200 {
201
202 HYPERVISOR_set_debugreg(2, val);
203 }
204
205 register_t
206 rdr3(void)
207 {
208
209 return HYPERVISOR_get_debugreg(3);
210 }
211
212 void
213 ldr3(register_t val)
214 {
215
216 HYPERVISOR_set_debugreg(3, val);
217 }
218 register_t
219 rdr6(void)
220 {
221
222 return HYPERVISOR_get_debugreg(6);
223 }
224
225 void
226 ldr6(register_t val)
227 {
228
229 HYPERVISOR_set_debugreg(6, val);
230 }
231
232 register_t
233 rdr7(void)
234 {
235
236 return HYPERVISOR_get_debugreg(7);
237 }
238
239 void
240 ldr7(register_t val)
241 {
242
243 HYPERVISOR_set_debugreg(7, val);
244 }
245
246 void
247 wbinvd(void)
248 {
249
250 xpq_flush_cache();
251 }
252
253 vaddr_t
254 rcr2(void)
255 {
256 return curcpu()->ci_vcpu->arch.cr2;
257 }
258
259 #ifdef __x86_64__
260 void
261 setusergs(int gssel)
262 {
263 HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, gssel);
264 }
265 #endif
266