hypervisor_machdep.c revision 1.11 1 /* $NetBSD: hypervisor_machdep.c,v 1.11 2008/10/21 15:46:32 cegger Exp $ */
2
3 /*
4 *
5 * Copyright (c) 2004 Christian Limpach.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Christian Limpach.
19 * 4. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /******************************************************************************
35 * hypervisor.c
36 *
37 * Communication to/from hypervisor.
38 *
39 * Copyright (c) 2002-2004, K A Fraser
40 *
41 * Permission is hereby granted, free of charge, to any person obtaining a copy
42 * of this software and associated documentation files (the "Software"), to
43 * deal in the Software without restriction, including without limitation the
44 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
45 * sell copies of the Software, and to permit persons to whom the Software is
46 * furnished to do so, subject to the following conditions:
47 *
48 * The above copyright notice and this permission notice shall be included in
49 * all copies or substantial portions of the Software.
50 *
51 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
52 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
53 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
54 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
55 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
56 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
57 * DEALINGS IN THE SOFTWARE.
58 */
59
60
61 #include <sys/cdefs.h>
62 __KERNEL_RCSID(0, "$NetBSD: hypervisor_machdep.c,v 1.11 2008/10/21 15:46:32 cegger Exp $");
63
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/kmem.h>
67
68 #include <uvm/uvm_extern.h>
69
70 #include <machine/vmparam.h>
71 #include <machine/pmap.h>
72
73 #include <xen/xen.h>
74 #include <xen/hypervisor.h>
75 #include <xen/evtchn.h>
76 #include <xen/xenpmap.h>
77
78 #include "opt_xen.h"
79
80 #ifdef XEN3
81 /*
82 * arch-dependent p2m frame lists list (L3 and L2)
83 * used by Xen for save/restore mappings
84 */
85 static unsigned long * l3_p2m_page;
86 static unsigned long * l2_p2m_page;
87 static int l2_p2m_page_size; /* size of L2 page, in pages */
88
89 static void build_p2m_frame_list_list(void);
90 static void update_p2m_frame_list_list(void);
91 #endif /* XEN3 */
92
93 // #define PORT_DEBUG 4
94 // #define EARLY_DEBUG_EVENT
95
96 int stipending(void);
97 int
98 stipending(void)
99 {
100 unsigned long l1;
101 unsigned long l2;
102 unsigned int l1i, l2i, port;
103 volatile shared_info_t *s = HYPERVISOR_shared_info;
104 struct cpu_info *ci;
105 volatile struct vcpu_info *vci;
106 int ret;
107
108 ret = 0;
109 ci = curcpu();
110 vci = ci->ci_vcpu;
111
112 #if 0
113 if (HYPERVISOR_shared_info->events)
114 printf("stipending events %08lx mask %08lx ilevel %d\n",
115 HYPERVISOR_shared_info->events,
116 HYPERVISOR_shared_info->events_mask, ci->ci_ilevel);
117 #endif
118
119 #ifdef EARLY_DEBUG_EVENT
120 if (xen_atomic_test_bit(&s->evtchn_pending[0], debug_port)) {
121 xen_debug_handler(NULL);
122 xen_atomic_clear_bit(&s->evtchn_pending[0], debug_port);
123 }
124 #endif
125
126 /*
127 * we're only called after STIC, so we know that we'll have to
128 * STI at the end
129 */
130 while (vci->evtchn_upcall_pending) {
131 cli();
132 vci->evtchn_upcall_pending = 0;
133 /* NB. No need for a barrier here -- XCHG is a barrier
134 * on x86. */
135 #ifdef XEN3
136 l1 = xen_atomic_xchg(&vci->evtchn_pending_sel, 0);
137 #else
138 l1 = xen_atomic_xchg(&s->evtchn_pending_sel, 0);
139 #endif
140 while ((l1i = xen_ffs(l1)) != 0) {
141 l1i--;
142 l1 &= ~(1UL << l1i);
143
144 l2 = s->evtchn_pending[l1i] & ~s->evtchn_mask[l1i];
145 /*
146 * mask and clear event. More efficient than calling
147 * hypervisor_mask/clear_event for each event.
148 */
149 xen_atomic_setbits_l(&s->evtchn_mask[l1i], l2);
150 xen_atomic_clearbits_l(&s->evtchn_pending[l1i], l2);
151 while ((l2i = xen_ffs(l2)) != 0) {
152 l2i--;
153 l2 &= ~(1UL << l2i);
154
155 port = (l1i << LONG_SHIFT) + l2i;
156 if (evtsource[port]) {
157 hypervisor_set_ipending(
158 evtsource[port]->ev_imask,
159 l1i, l2i);
160 evtsource[port]->ev_evcnt.ev_count++;
161 if (ret == 0 && ci->ci_ilevel <
162 evtsource[port]->ev_maxlevel)
163 ret = 1;
164 }
165 #ifdef DOM0OPS
166 else {
167 /* set pending event */
168 xenevt_setipending(l1i, l2i);
169 }
170 #endif
171 }
172 }
173 sti();
174 }
175
176 #if 0
177 if (ci->ci_ipending & 0x1)
178 printf("stipending events %08lx mask %08lx ilevel %d ipending %08x\n",
179 HYPERVISOR_shared_info->events,
180 HYPERVISOR_shared_info->events_mask, ci->ci_ilevel,
181 ci->ci_ipending);
182 #endif
183
184 return (ret);
185 }
186
187 void
188 do_hypervisor_callback(struct intrframe *regs)
189 {
190 unsigned long l1;
191 unsigned long l2;
192 unsigned int l1i, l2i, port;
193 volatile shared_info_t *s = HYPERVISOR_shared_info;
194 struct cpu_info *ci;
195 volatile struct vcpu_info *vci;
196 int level;
197
198 ci = curcpu();
199 vci = ci->ci_vcpu;
200 level = ci->ci_ilevel;
201
202 // DDD printf("do_hypervisor_callback\n");
203
204 #ifdef EARLY_DEBUG_EVENT
205 if (xen_atomic_test_bit(&s->evtchn_pending[0], debug_port)) {
206 xen_debug_handler(NULL);
207 xen_atomic_clear_bit(&s->evtchn_pending[0], debug_port);
208 }
209 #endif
210
211 while (vci->evtchn_upcall_pending) {
212 vci->evtchn_upcall_pending = 0;
213 /* NB. No need for a barrier here -- XCHG is a barrier
214 * on x86. */
215 #ifdef XEN3
216 l1 = xen_atomic_xchg(&vci->evtchn_pending_sel, 0);
217 #else
218 l1 = xen_atomic_xchg(&s->evtchn_pending_sel, 0);
219 #endif
220 while ((l1i = xen_ffs(l1)) != 0) {
221 l1i--;
222 l1 &= ~(1UL << l1i);
223
224 l2 = s->evtchn_pending[l1i] & ~s->evtchn_mask[l1i];
225 /*
226 * mask and clear the pending events.
227 * Doing it here for all event that will be processed
228 * avoids a race with stipending (which can be called
229 * though evtchn_do_event->splx) that could cause an
230 * event to be both processed and marked pending.
231 */
232 xen_atomic_setbits_l(&s->evtchn_mask[l1i], l2);
233 xen_atomic_clearbits_l(&s->evtchn_pending[l1i], l2);
234
235 while ((l2i = xen_ffs(l2)) != 0) {
236 l2i--;
237 l2 &= ~(1UL << l2i);
238
239 port = (l1i << LONG_SHIFT) + l2i;
240 #ifdef PORT_DEBUG
241 if (port == PORT_DEBUG)
242 printf("do_hypervisor_callback event %d\n", port);
243 #endif
244 if (evtsource[port])
245 call_evtchn_do_event(port, regs);
246 #ifdef DOM0OPS
247 else {
248 if (ci->ci_ilevel < IPL_HIGH) {
249 /* fast path */
250 int oipl = ci->ci_ilevel;
251 ci->ci_ilevel = IPL_HIGH;
252 call_xenevt_event(port);
253 ci->ci_ilevel = oipl;
254 } else {
255 /* set pending event */
256 xenevt_setipending(l1i, l2i);
257 }
258 }
259 #endif
260 }
261 }
262 }
263
264 #ifdef DIAGNOSTIC
265 if (level != ci->ci_ilevel)
266 printf("hypervisor done %08x level %d/%d ipending %08x\n",
267 #ifdef XEN3
268 (uint)vci->evtchn_pending_sel,
269 #else
270 (uint)HYPERVISOR_shared_info->evtchn_pending_sel,
271 #endif
272 level, ci->ci_ilevel, ci->ci_ipending);
273 #endif
274 }
275
276 void
277 hypervisor_unmask_event(unsigned int ev)
278 {
279 volatile shared_info_t *s = HYPERVISOR_shared_info;
280 volatile struct vcpu_info *vci = curcpu()->ci_vcpu;
281
282 #ifdef PORT_DEBUG
283 if (ev == PORT_DEBUG)
284 printf("hypervisor_unmask_event %d\n", ev);
285 #endif
286
287 xen_atomic_clear_bit(&s->evtchn_mask[0], ev);
288 /*
289 * The following is basically the equivalent of
290 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose the
291 * interrupt edge' if the channel is masked.
292 */
293 if (xen_atomic_test_bit(&s->evtchn_pending[0], ev) &&
294 #ifdef XEN3
295 !xen_atomic_test_and_set_bit(&vci->evtchn_pending_sel, ev>>LONG_SHIFT)) {
296 #else
297 !xen_atomic_test_and_set_bit(&s->evtchn_pending_sel, ev>>LONG_SHIFT)) {
298 #endif
299 xen_atomic_set_bit(&vci->evtchn_upcall_pending, 0);
300 if (!vci->evtchn_upcall_mask)
301 hypervisor_force_callback();
302 }
303 }
304
305 void
306 hypervisor_mask_event(unsigned int ev)
307 {
308 volatile shared_info_t *s = HYPERVISOR_shared_info;
309 #ifdef PORT_DEBUG
310 if (ev == PORT_DEBUG)
311 printf("hypervisor_mask_event %d\n", ev);
312 #endif
313
314 xen_atomic_set_bit(&s->evtchn_mask[0], ev);
315 }
316
317 void
318 hypervisor_clear_event(unsigned int ev)
319 {
320 volatile shared_info_t *s = HYPERVISOR_shared_info;
321 #ifdef PORT_DEBUG
322 if (ev == PORT_DEBUG)
323 printf("hypervisor_clear_event %d\n", ev);
324 #endif
325
326 xen_atomic_clear_bit(&s->evtchn_pending[0], ev);
327 }
328
329 void
330 hypervisor_enable_ipl(unsigned int ipl)
331 {
332 u_long l1, l2;
333 int l1i, l2i;
334 struct cpu_info *ci = curcpu();
335
336 /*
337 * enable all events for ipl. As we only set an event in ipl_evt_mask
338 * for its lowest IPL, and pending IPLs are processed high to low,
339 * we know that all callback for this event have been processed.
340 */
341
342 l1 = ci->ci_isources[ipl]->ipl_evt_mask1;
343 ci->ci_isources[ipl]->ipl_evt_mask1 = 0;
344 while ((l1i = xen_ffs(l1)) != 0) {
345 l1i--;
346 l1 &= ~(1UL << l1i);
347 l2 = ci->ci_isources[ipl]->ipl_evt_mask2[l1i];
348 ci->ci_isources[ipl]->ipl_evt_mask2[l1i] = 0;
349 while ((l2i = xen_ffs(l2)) != 0) {
350 int evtch;
351
352 l2i--;
353 l2 &= ~(1UL << l2i);
354
355 evtch = (l1i << LONG_SHIFT) + l2i;
356 hypervisor_enable_event(evtch);
357 }
358 }
359 }
360
361 void
362 hypervisor_set_ipending(uint32_t iplmask, int l1, int l2)
363 {
364 int ipl;
365 struct cpu_info *ci = curcpu();
366
367 /* set pending bit for the appropriate IPLs */
368 ci->ci_ipending |= iplmask;
369
370 /*
371 * And set event pending bit for the lowest IPL. As IPL are handled
372 * from high to low, this ensure that all callbacks will have been
373 * called when we ack the event
374 */
375 ipl = ffs(iplmask);
376 KASSERT(ipl > 0);
377 ipl--;
378 ci->ci_isources[ipl]->ipl_evt_mask1 |= 1UL << l1;
379 ci->ci_isources[ipl]->ipl_evt_mask2[l1] |= 1UL << l2;
380 }
381
382 void
383 hypervisor_machdep_attach(void) {
384
385 #ifdef XEN3
386 /* dom0 does not require the arch-dependent P2M translation table */
387 if ( !xendomain_is_dom0() ) {
388 build_p2m_frame_list_list();
389 }
390 #endif
391
392 }
393
394 #ifdef XEN3
395 /*
396 * Generate the p2m_frame_list_list table,
397 * needed for guest save/restore
398 */
399 static void
400 build_p2m_frame_list_list(void) {
401
402 int fpp; /* number of page (frame) pointer per page */
403 unsigned long max_pfn;
404 /*
405 * The p2m list is composed of three levels of indirection,
406 * each layer containing MFNs pointing to lower level pages
407 * The indirection is used to convert a given PFN to its MFN
408 * Each N level page can point to @fpp (N-1) level pages
409 * For example, for x86 32bit, we have:
410 * - PAGE_SIZE: 4096 bytes
411 * - fpp: 1024 (one L3 page can address 1024 L2 pages)
412 * A L1 page contains the list of MFN we are looking for
413 */
414 max_pfn = xen_start_info.nr_pages;
415 fpp = PAGE_SIZE / sizeof(paddr_t);
416
417 /* we only need one L3 page */
418 l3_p2m_page = kmem_alloc(PAGE_SIZE, KM_NOSLEEP);
419 if (l3_p2m_page == NULL)
420 panic("could not allocate memory for l3_p2m_page");
421
422 /*
423 * Determine how many L2 pages we need for the mapping
424 * Each L2 can map a total of @fpp L1 pages
425 */
426 l2_p2m_page_size = howmany(max_pfn, fpp);
427
428 l2_p2m_page = kmem_alloc(l2_p2m_page_size * PAGE_SIZE, KM_NOSLEEP);
429 if (l2_p2m_page == NULL)
430 panic("could not allocate memory for l2_p2m_page");
431
432 /* We now have L3 and L2 pages ready, update L1 mapping */
433 update_p2m_frame_list_list();
434
435 }
436
437 /*
438 * Update the L1 p2m_frame_list_list mapping (during guest boot or resume)
439 */
440 static void
441 update_p2m_frame_list_list(void) {
442
443 int i;
444 int fpp; /* number of page (frame) pointer per page */
445 unsigned long max_pfn;
446
447 max_pfn = xen_start_info.nr_pages;
448 fpp = PAGE_SIZE / sizeof(paddr_t);
449
450 for (i = 0; i < l2_p2m_page_size; i++) {
451 /*
452 * Each time we start a new L2 page,
453 * store its MFN in the L3 page
454 */
455 if ((i % fpp) == 0) {
456 l3_p2m_page[i/fpp] = vtomfn(
457 (vaddr_t)&l2_p2m_page[i]);
458 }
459 /*
460 * we use a shortcut
461 * since @xpmap_phys_to_machine_mapping array
462 * already contains PFN to MFN mapping, we just
463 * set the l2_p2m_page MFN pointer to the MFN of the
464 * according frame of @xpmap_phys_to_machine_mapping
465 */
466 l2_p2m_page[i] = vtomfn((vaddr_t)
467 &xpmap_phys_to_machine_mapping[i*fpp]);
468 }
469
470 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
471 vtomfn((vaddr_t)l3_p2m_page);
472 HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
473
474 }
475 #endif /* XEN3 */
476