xenfunc.c revision 1.28 1 /* $NetBSD: xenfunc.c,v 1.28 2020/05/06 19:47:05 bouyer Exp $ */
2
3 /*
4 * Copyright (c) 2004 Christian Limpach.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: xenfunc.c,v 1.28 2020/05/06 19:47:05 bouyer Exp $");
30
31 #include <sys/param.h>
32
33 #include <uvm/uvm_extern.h>
34
35 #include <machine/intr.h>
36 #include <machine/vmparam.h>
37 #include <machine/pmap.h>
38 #include <xen/xen.h>
39 #include <xen/hypervisor.h>
40 //#include <xen/evtchn.h>
41 #include <xen/xenpmap.h>
42 #include <machine/pte.h>
43
44 #define MAX_XEN_IDT 128
45
46 void xen_set_ldt(vaddr_t, uint32_t);
47
48 void
49 invlpg(vaddr_t addr)
50 {
51 int s = splvm(); /* XXXSMP */
52 xpq_queue_invlpg(addr);
53 splx(s);
54 }
55
56 void
57 lidt(struct region_descriptor *rd)
58 {
59 /*
60 * We need to do this because we can't assume kmem_alloc(9)
61 * will be available at the boot stage when this is called.
62 */
63 static char xen_idt_page[PAGE_SIZE] __attribute__((__aligned__ (PAGE_SIZE)));
64 #if defined(__x86_64__)
65 kpreempt_disable();
66 #endif
67 memset(xen_idt_page, 0, PAGE_SIZE);
68
69 struct trap_info *xen_idt = (void * )xen_idt_page;
70 int xen_idt_idx = 0;
71
72 struct trap_info * idd = (void *) rd->rd_base;
73 const int nidt = rd->rd_limit / (sizeof *idd);
74
75 int i;
76
77 /*
78 * Sweep in all initialised entries, consolidate them back to
79 * back in the requestor array.
80 */
81 for (i = 0; i < nidt; i++) {
82 if (idd[i].address == 0) /* Skip gap */
83 continue;
84 KASSERT(xen_idt_idx < MAX_XEN_IDT);
85 /* Copy over entry */
86 xen_idt[xen_idt_idx++] = idd[i];
87 }
88
89 #if defined(__x86_64__)
90 /* page needs to be r/o */
91 pmap_changeprot_local((vaddr_t) xen_idt, VM_PROT_READ);
92 #endif /* __x86_64 */
93
94 /* Hook it up in the hypervisor */
95 if (HYPERVISOR_set_trap_table(xen_idt))
96 panic("HYPERVISOR_set_trap_table() failed");
97
98 #if defined(__x86_64__)
99 /* reset */
100 pmap_changeprot_local((vaddr_t) xen_idt, VM_PROT_READ|VM_PROT_WRITE);
101 kpreempt_enable();
102 #endif /* __x86_64 */
103 }
104
105 void
106 lldt(u_short sel)
107 {
108 #ifndef __x86_64__
109 struct cpu_info *ci;
110
111 ci = curcpu();
112
113 if (ci->ci_curldt == sel)
114 return;
115 if (sel == GSEL(GLDT_SEL, SEL_KPL))
116 xen_set_ldt((vaddr_t)ldtstore, NLDT);
117 else
118 xen_set_ldt(ci->ci_gdt[IDXSELN(sel)].ld.ld_base,
119 ci->ci_gdt[IDXSELN(sel)].ld.ld_entries);
120 ci->ci_curldt = sel;
121 #endif
122 }
123
124 void
125 ltr(u_short sel)
126 {
127 panic("XXX ltr not supported\n");
128 }
129
130 void
131 lcr0(register_t val)
132 {
133 panic("XXX lcr0 not supported\n");
134 }
135
136 register_t
137 rcr0(void)
138 {
139 /* XXX: handle X86_CR0_TS ? */
140 return 0;
141 }
142
143 #ifndef __x86_64__
144 void
145 lcr3(register_t val)
146 {
147 int s = splvm();
148 xpq_queue_pt_switch(xpmap_ptom_masked(val));
149 splx(s);
150 }
151 #endif
152
153 void
154 tlbflush(void)
155 {
156 int s = splvm();
157 xpq_queue_tlb_flush();
158 splx(s);
159 }
160
161 void
162 tlbflushg(void)
163 {
164 tlbflush();
165 }
166
167 register_t
168 rdr0(void)
169 {
170
171 return HYPERVISOR_get_debugreg(0);
172 }
173
174 void
175 ldr0(register_t val)
176 {
177
178 HYPERVISOR_set_debugreg(0, val);
179 }
180
181 register_t
182 rdr1(void)
183 {
184
185 return HYPERVISOR_get_debugreg(1);
186 }
187
188 void
189 ldr1(register_t val)
190 {
191
192 HYPERVISOR_set_debugreg(1, val);
193 }
194
195 register_t
196 rdr2(void)
197 {
198
199 return HYPERVISOR_get_debugreg(2);
200 }
201
202 void
203 ldr2(register_t val)
204 {
205
206 HYPERVISOR_set_debugreg(2, val);
207 }
208
209 register_t
210 rdr3(void)
211 {
212
213 return HYPERVISOR_get_debugreg(3);
214 }
215
216 void
217 ldr3(register_t val)
218 {
219
220 HYPERVISOR_set_debugreg(3, val);
221 }
222 register_t
223 rdr6(void)
224 {
225
226 return HYPERVISOR_get_debugreg(6);
227 }
228
229 void
230 ldr6(register_t val)
231 {
232
233 HYPERVISOR_set_debugreg(6, val);
234 }
235
236 register_t
237 rdr7(void)
238 {
239
240 return HYPERVISOR_get_debugreg(7);
241 }
242
243 void
244 ldr7(register_t val)
245 {
246
247 HYPERVISOR_set_debugreg(7, val);
248 }
249
250 void
251 wbinvd(void)
252 {
253
254 xpq_flush_cache();
255 }
256
257 register_t
258 rcr2(void)
259 {
260 return curcpu()->ci_vcpu->arch.cr2;
261 }
262
263 void
264 lcr2(register_t v)
265 {
266 curcpu()->ci_vcpu->arch.cr2 = v;
267 }
268
269 #ifdef __x86_64__
270 void
271 setusergs(int gssel)
272 {
273 HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, gssel);
274 }
275 #endif
276