xenfunc.c revision 1.21 1 /* $NetBSD: xenfunc.c,v 1.21 2018/09/23 15:28:49 cherry Exp $ */
2
3 /*
4 * Copyright (c) 2004 Christian Limpach.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: xenfunc.c,v 1.21 2018/09/23 15:28:49 cherry Exp $");
30
31 #include <sys/param.h>
32
33 #include <uvm/uvm_extern.h>
34
35 #include <machine/intr.h>
36 #include <machine/vmparam.h>
37 #include <machine/pmap.h>
38 #include <xen/xen.h>
39 #include <xen/hypervisor.h>
40 //#include <xen/evtchn.h>
41 #include <xen/xenpmap.h>
42 #include <machine/pte.h>
43
44 void xen_set_ldt(vaddr_t, uint32_t);
45
46 void
47 invlpg(vaddr_t addr)
48 {
49 int s = splvm(); /* XXXSMP */
50 xpq_queue_invlpg(addr);
51 splx(s);
52 }
53
54 void
55 lidt(struct region_descriptor *rd)
56 {
57 /*
58 * We need to do this because we can't assume kmem_alloc(9)
59 * will be available at the boot stage when this is called.
60 */
61 static char xen_idt_page[PAGE_SIZE] __attribute__((__aligned__ (PAGE_SIZE)));
62
63 struct trap_info *xen_idt = (void * )xen_idt_page;
64 int xen_idt_idx = 0;
65
66 struct trap_info * idd = (void *) rd->rd_base;
67 const int nidt = rd->rd_limit / (sizeof *idd);
68
69 int i;
70
71 /*
72 * Sweep in all initialised entries, consolidate them back to
73 * back in the requestor array.
74 */
75 for (i = 0; i < nidt; i++) {
76 if (idd->address == 0) /* Skip gap */
77 continue;
78
79 /* Copy over entry */
80 xen_idt[xen_idt_idx++] = idd[i];
81 }
82
83 #if defined(__x86_64__)
84 /* page needs to be r/o */
85 pmap_changeprot_local((vaddr_t) xen_idt, VM_PROT_READ);
86 #endif /* __x86_64 */
87
88 /* Hook it up in the hypervisor */
89 if (HYPERVISOR_set_trap_table(xen_idt))
90 panic("HYPERVISOR_set_trap_table() failed");
91
92 #if defined(__x86_64__)
93 /* reset */
94 pmap_changeprot_local((vaddr_t) xen_idt, VM_PROT_READ|VM_PROT_WRITE);
95 #endif /* __x86_64 */
96 }
97
98 void
99 lldt(u_short sel)
100 {
101 #ifndef __x86_64__
102 struct cpu_info *ci;
103
104 ci = curcpu();
105
106 if (ci->ci_curldt == sel)
107 return;
108 if (sel == GSEL(GLDT_SEL, SEL_KPL))
109 xen_set_ldt((vaddr_t)ldtstore, NLDT);
110 else
111 xen_set_ldt(ci->ci_gdt[IDXSELN(sel)].ld.ld_base,
112 ci->ci_gdt[IDXSELN(sel)].ld.ld_entries);
113 ci->ci_curldt = sel;
114 #endif
115 }
116
117 void
118 ltr(u_short sel)
119 {
120 panic("XXX ltr not supported\n");
121 }
122
123 void
124 lcr0(u_long val)
125 {
126 panic("XXX lcr0 not supported\n");
127 }
128
129 u_long
130 rcr0(void)
131 {
132 /* XXX: handle X86_CR0_TS ? */
133 return 0;
134 }
135
136 #ifndef __x86_64__
137 void
138 lcr3(vaddr_t val)
139 {
140 int s = splvm(); /* XXXSMP */
141 xpq_queue_pt_switch(xpmap_ptom_masked(val));
142 splx(s);
143 }
144 #endif
145
146 void
147 tlbflush(void)
148 {
149 int s = splvm(); /* XXXSMP */
150 xpq_queue_tlb_flush();
151 splx(s);
152 }
153
154 void
155 tlbflushg(void)
156 {
157 tlbflush();
158 }
159
160 register_t
161 rdr0(void)
162 {
163
164 return HYPERVISOR_get_debugreg(0);
165 }
166
167 void
168 ldr0(register_t val)
169 {
170
171 HYPERVISOR_set_debugreg(0, val);
172 }
173
174 register_t
175 rdr1(void)
176 {
177
178 return HYPERVISOR_get_debugreg(1);
179 }
180
181 void
182 ldr1(register_t val)
183 {
184
185 HYPERVISOR_set_debugreg(1, val);
186 }
187
188 register_t
189 rdr2(void)
190 {
191
192 return HYPERVISOR_get_debugreg(2);
193 }
194
195 void
196 ldr2(register_t val)
197 {
198
199 HYPERVISOR_set_debugreg(2, val);
200 }
201
202 register_t
203 rdr3(void)
204 {
205
206 return HYPERVISOR_get_debugreg(3);
207 }
208
209 void
210 ldr3(register_t val)
211 {
212
213 HYPERVISOR_set_debugreg(3, val);
214 }
215 register_t
216 rdr6(void)
217 {
218
219 return HYPERVISOR_get_debugreg(6);
220 }
221
222 void
223 ldr6(register_t val)
224 {
225
226 HYPERVISOR_set_debugreg(6, val);
227 }
228
229 register_t
230 rdr7(void)
231 {
232
233 return HYPERVISOR_get_debugreg(7);
234 }
235
236 void
237 ldr7(register_t val)
238 {
239
240 HYPERVISOR_set_debugreg(7, val);
241 }
242
243 void
244 wbinvd(void)
245 {
246
247 xpq_flush_cache();
248 }
249
250 vaddr_t
251 rcr2(void)
252 {
253 return curcpu()->ci_vcpu->arch.cr2;
254 }
255
256 #ifdef __x86_64__
257 void
258 setusergs(int gssel)
259 {
260 HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, gssel);
261 }
262 #endif
263