hypervisor_machdep.c revision 1.35 1 /* $NetBSD: hypervisor_machdep.c,v 1.35 2019/02/12 07:58:26 cherry Exp $ */
2
3 /*
4 *
5 * Copyright (c) 2004 Christian Limpach.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /******************************************************************************
30 * hypervisor.c
31 *
32 * Communication to/from hypervisor.
33 *
34 * Copyright (c) 2002-2004, K A Fraser
35 *
36 * Permission is hereby granted, free of charge, to any person obtaining a copy
37 * of this software and associated documentation files (the "Software"), to
38 * deal in the Software without restriction, including without limitation the
39 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
40 * sell copies of the Software, and to permit persons to whom the Software is
41 * furnished to do so, subject to the following conditions:
42 *
43 * The above copyright notice and this permission notice shall be included in
44 * all copies or substantial portions of the Software.
45 *
46 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
47 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
48 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
49 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
50 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
51 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
52 * DEALINGS IN THE SOFTWARE.
53 */
54
55
56 #include <sys/cdefs.h>
57 __KERNEL_RCSID(0, "$NetBSD: hypervisor_machdep.c,v 1.35 2019/02/12 07:58:26 cherry Exp $");
58
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/kmem.h>
62
63 #include <uvm/uvm_extern.h>
64
65 #include <machine/vmparam.h>
66 #include <machine/pmap.h>
67
68 #include <xen/xen.h>
69 #include <xen/hypervisor.h>
70 #include <xen/evtchn.h>
71 #include <xen/xenpmap.h>
72
73 #include "opt_xen.h"
74 #include "isa.h"
75 #include "pci.h"
76
77 #ifdef XENPV
78 /*
79 * arch-dependent p2m frame lists list (L3 and L2)
80 * used by Xen for save/restore mappings
81 */
82 static unsigned long * l3_p2m_page;
83 static unsigned long * l2_p2m_page;
84 static int l2_p2m_page_size; /* size of L2 page, in pages */
85
86 static void build_p2m_frame_list_list(void);
87 static void update_p2m_frame_list_list(void);
88
89 #endif
90
91 // #define PORT_DEBUG 4
92 // #define EARLY_DEBUG_EVENT
93
94 /* callback function type */
95 typedef void (*iterate_func_t)(unsigned int, unsigned int,
96 unsigned int, void *);
97
98 static inline void
99 evt_iterate_bits(volatile unsigned long *pendingl1,
100 volatile unsigned long *pendingl2,
101 volatile unsigned long *mask,
102 iterate_func_t iterate_pending, void *iterate_args)
103 {
104
105 KASSERT(pendingl1 != NULL);
106 KASSERT(pendingl2 != NULL);
107
108 unsigned long l1, l2;
109 unsigned int l1i, l2i, port;
110
111 l1 = xen_atomic_xchg(pendingl1, 0);
112 while ((l1i = xen_ffs(l1)) != 0) {
113 l1i--;
114 l1 &= ~(1UL << l1i);
115
116 l2 = pendingl2[l1i] & (mask != NULL ? ~mask[l1i] : -1UL);
117 l2 &= curcpu()->ci_evtmask[l1i];
118
119 if (mask != NULL) xen_atomic_setbits_l(&mask[l1i], l2);
120 xen_atomic_clearbits_l(&pendingl2[l1i], l2);
121
122 while ((l2i = xen_ffs(l2)) != 0) {
123 l2i--;
124 l2 &= ~(1UL << l2i);
125
126 port = (l1i << LONG_SHIFT) + l2i;
127
128 iterate_pending(port, l1i, l2i, iterate_args);
129 }
130 }
131 }
132
133 /*
134 * Set per-cpu "pending" information for outstanding events that
135 * cannot be processed now.
136 */
137
138 static inline void
139 evt_set_pending(unsigned int port, unsigned int l1i,
140 unsigned int l2i, void *args)
141 {
142
143 KASSERT(args != NULL);
144
145 int *ret = args;
146
147 if (evtsource[port]) {
148 hypervisor_set_ipending(evtsource[port]->ev_imask, l1i, l2i);
149 evtsource[port]->ev_evcnt.ev_count++;
150 if (*ret == 0 && curcpu()->ci_ilevel <
151 evtsource[port]->ev_maxlevel)
152 *ret = 1;
153 }
154 #ifdef DOM0OPS
155 else {
156 /* set pending event */
157 xenevt_setipending(l1i, l2i);
158 }
159 #endif
160 }
161
162 int stipending(void);
163 int
164 stipending(void)
165 {
166 volatile shared_info_t *s = HYPERVISOR_shared_info;
167 struct cpu_info *ci;
168 volatile struct vcpu_info *vci;
169 int ret;
170
171 ret = 0;
172 ci = curcpu();
173 vci = ci->ci_vcpu;
174
175 #if 0
176 if (HYPERVISOR_shared_info->events)
177 printf("stipending events %08lx mask %08lx ilevel %d\n",
178 HYPERVISOR_shared_info->events,
179 HYPERVISOR_shared_info->events_mask, ci->ci_ilevel);
180 #endif
181
182 #ifdef EARLY_DEBUG_EVENT
183 if (xen_atomic_test_bit(&s->evtchn_pending[0], debug_port)) {
184 xen_debug_handler(NULL);
185 xen_atomic_clear_bit(&s->evtchn_pending[0], debug_port);
186 }
187 #endif
188
189 /*
190 * we're only called after STIC, so we know that we'll have to
191 * STI at the end
192 */
193
194 while (vci->evtchn_upcall_pending) {
195 cli();
196
197 vci->evtchn_upcall_pending = 0;
198
199 evt_iterate_bits(&vci->evtchn_pending_sel,
200 s->evtchn_pending, s->evtchn_mask,
201 evt_set_pending, &ret);
202
203 sti();
204 }
205
206 #if 0
207 if (ci->ci_xpending & 0x1)
208 printf("stipending events %08lx mask %08lx ilevel %d ipending %08x\n",
209 HYPERVISOR_shared_info->events,
210 HYPERVISOR_shared_info->events_mask, ci->ci_ilevel,
211 ci->ci_xpending);
212 #endif
213
214 return (ret);
215 }
216
217 /* Iterate through pending events and call the event handler */
218
219 static inline void
220 evt_do_hypervisor_callback(unsigned int port, unsigned int l1i,
221 unsigned int l2i, void *args)
222 {
223 KASSERT(args != NULL);
224
225 struct cpu_info *ci = curcpu();
226 struct intrframe *regs = args;
227
228 #ifdef PORT_DEBUG
229 if (port == PORT_DEBUG)
230 printf("do_hypervisor_callback event %d\n", port);
231 #endif
232 if (evtsource[port]) {
233 ci->ci_idepth++;
234 evtchn_do_event(port, regs);
235 ci->ci_idepth--;
236 }
237 #ifdef DOM0OPS
238 else {
239 if (ci->ci_ilevel < IPL_HIGH) {
240 /* fast path */
241 int oipl = ci->ci_ilevel;
242 ci->ci_ilevel = IPL_HIGH;
243 ci->ci_idepth++;
244 xenevt_event(port);
245 ci->ci_idepth--;
246 ci->ci_ilevel = oipl;
247 } else {
248 /* set pending event */
249 xenevt_setipending(l1i, l2i);
250 }
251 }
252 #endif
253 }
254
255 void
256 do_hypervisor_callback(struct intrframe *regs)
257 {
258 volatile shared_info_t *s = HYPERVISOR_shared_info;
259 struct cpu_info *ci;
260 volatile struct vcpu_info *vci;
261 int level __diagused;
262
263 ci = curcpu();
264 vci = ci->ci_vcpu;
265 level = ci->ci_ilevel;
266
267 /* Save trapframe for clock handler */
268 KASSERT(regs != NULL);
269 ci->ci_xen_clockf_usermode = USERMODE(regs->_INTRFRAME_CS);
270 ci->ci_xen_clockf_pc = regs->_INTRFRAME_IP;
271
272 // DDD printf("do_hypervisor_callback\n");
273
274 #ifdef EARLY_DEBUG_EVENT
275 if (xen_atomic_test_bit(&s->evtchn_pending[0], debug_port)) {
276 xen_debug_handler(NULL);
277 xen_atomic_clear_bit(&s->evtchn_pending[0], debug_port);
278 }
279 #endif
280
281 while (vci->evtchn_upcall_pending) {
282 vci->evtchn_upcall_pending = 0;
283
284 evt_iterate_bits(&vci->evtchn_pending_sel,
285 s->evtchn_pending, s->evtchn_mask,
286 evt_do_hypervisor_callback, regs);
287 }
288
289 #ifdef DIAGNOSTIC
290 if (level != ci->ci_ilevel)
291 printf("hypervisor done %08x level %d/%d ipending %08x\n",
292 (uint)vci->evtchn_pending_sel,
293 level, ci->ci_ilevel, ci->ci_xpending);
294 #endif
295 }
296
297 void
298 hypervisor_send_event(struct cpu_info *ci, unsigned int ev)
299 {
300 KASSERT(ci != NULL);
301
302 volatile shared_info_t *s = HYPERVISOR_shared_info;
303 volatile struct vcpu_info *vci = ci->ci_vcpu;
304
305 #ifdef PORT_DEBUG
306 if (ev == PORT_DEBUG)
307 printf("hypervisor_send_event %d\n", ev);
308 #endif
309
310 xen_atomic_set_bit(&s->evtchn_pending[0], ev);
311
312 if (__predict_false(ci == curcpu())) {
313 xen_atomic_set_bit(&vci->evtchn_pending_sel,
314 ev >> LONG_SHIFT);
315 xen_atomic_set_bit(&vci->evtchn_upcall_pending, 0);
316 }
317
318 xen_atomic_clear_bit(&s->evtchn_mask[0], ev);
319
320 if (__predict_true(ci == curcpu())) {
321 hypervisor_force_callback();
322 } else {
323 if (__predict_false(xen_send_ipi(ci, XEN_IPI_HVCB))) {
324 panic("xen_send_ipi(cpu%d, XEN_IPI_HVCB) failed\n",
325 (int) ci->ci_cpuid);
326 }
327 }
328 }
329
330 void
331 hypervisor_unmask_event(unsigned int ev)
332 {
333
334 KASSERT(ev > 0 && ev < NR_EVENT_CHANNELS);
335
336 #ifdef PORT_DEBUG
337 if (ev == PORT_DEBUG)
338 printf("hypervisor_unmask_event %d\n", ev);
339 #endif
340
341 /* Xen unmasks the evtchn_mask[0]:ev bit for us. */
342 evtchn_op_t op;
343 op.cmd = EVTCHNOP_unmask;
344 op.u.unmask.port = ev;
345 if (HYPERVISOR_event_channel_op(&op) != 0)
346 panic("Failed to unmask event %d\n", ev);
347
348 return;
349 }
350
351 void
352 hypervisor_mask_event(unsigned int ev)
353 {
354 volatile shared_info_t *s = HYPERVISOR_shared_info;
355 #ifdef PORT_DEBUG
356 if (ev == PORT_DEBUG)
357 printf("hypervisor_mask_event %d\n", ev);
358 #endif
359
360 xen_atomic_set_bit(&s->evtchn_mask[0], ev);
361 }
362
363 void
364 hypervisor_clear_event(unsigned int ev)
365 {
366 volatile shared_info_t *s = HYPERVISOR_shared_info;
367 #ifdef PORT_DEBUG
368 if (ev == PORT_DEBUG)
369 printf("hypervisor_clear_event %d\n", ev);
370 #endif
371
372 xen_atomic_clear_bit(&s->evtchn_pending[0], ev);
373 }
374
375 static inline void
376 evt_enable_event(unsigned int port, unsigned int l1i,
377 unsigned int l2i, void *args)
378 {
379 KASSERT(args == NULL);
380 hypervisor_unmask_event(port);
381 #if NPCI > 0 || NISA > 0
382 hypervisor_ack_pirq_event(port);
383 #endif /* NPCI > 0 || NISA > 0 */
384 }
385
386 void
387 hypervisor_enable_ipl(unsigned int ipl)
388 {
389 struct cpu_info *ci = curcpu();
390
391 /*
392 * enable all events for ipl. As we only set an event in ipl_evt_mask
393 * for its lowest IPL, and pending IPLs are processed high to low,
394 * we know that all callback for this event have been processed.
395 */
396
397 evt_iterate_bits(&ci->ci_xsources[ipl]->ipl_evt_mask1,
398 ci->ci_xsources[ipl]->ipl_evt_mask2, NULL,
399 evt_enable_event, NULL);
400
401 }
402
403 void
404 hypervisor_set_ipending(uint32_t iplmask, int l1, int l2)
405 {
406
407 /* This function is not re-entrant */
408 KASSERT(x86_read_psl() != 0);
409
410 int ipl;
411 struct cpu_info *ci = curcpu();
412
413 /* set pending bit for the appropriate IPLs */
414 ci->ci_xpending |= iplmask;
415
416 /*
417 * And set event pending bit for the lowest IPL. As IPL are handled
418 * from high to low, this ensure that all callbacks will have been
419 * called when we ack the event
420 */
421 ipl = ffs(iplmask);
422 KASSERT(ipl > 0);
423 ipl--;
424 KASSERT(ipl < NIPL);
425 KASSERT(ci->ci_xsources[ipl] != NULL);
426 ci->ci_xsources[ipl]->ipl_evt_mask1 |= 1UL << l1;
427 ci->ci_xsources[ipl]->ipl_evt_mask2[l1] |= 1UL << l2;
428 if (__predict_false(ci != curcpu())) {
429 if (xen_send_ipi(ci, XEN_IPI_HVCB)) {
430 panic("hypervisor_set_ipending: "
431 "xen_send_ipi(cpu%d, XEN_IPI_HVCB) failed\n",
432 (int) ci->ci_cpuid);
433 }
434 }
435 }
436
437 void
438 hypervisor_machdep_attach(void)
439 {
440 #ifdef XENPV
441 /* dom0 does not require the arch-dependent P2M translation table */
442 if (!xendomain_is_dom0()) {
443 build_p2m_frame_list_list();
444 sysctl_xen_suspend_setup();
445 }
446 #endif
447 }
448
449 void
450 hypervisor_machdep_resume(void)
451 {
452 #ifdef XENPV
453 /* dom0 does not require the arch-dependent P2M translation table */
454 if (!xendomain_is_dom0())
455 update_p2m_frame_list_list();
456 #endif
457 }
458
459 #ifdef XENPV
460 /*
461 * Generate the p2m_frame_list_list table,
462 * needed for guest save/restore
463 */
464 static void
465 build_p2m_frame_list_list(void)
466 {
467 int fpp; /* number of page (frame) pointer per page */
468 unsigned long max_pfn;
469 /*
470 * The p2m list is composed of three levels of indirection,
471 * each layer containing MFNs pointing to lower level pages
472 * The indirection is used to convert a given PFN to its MFN
473 * Each N level page can point to @fpp (N-1) level pages
474 * For example, for x86 32bit, we have:
475 * - PAGE_SIZE: 4096 bytes
476 * - fpp: 1024 (one L3 page can address 1024 L2 pages)
477 * A L1 page contains the list of MFN we are looking for
478 */
479 max_pfn = xen_start_info.nr_pages;
480 fpp = PAGE_SIZE / sizeof(xen_pfn_t);
481
482 /* we only need one L3 page */
483 l3_p2m_page = (vaddr_t *)uvm_km_alloc(kernel_map, PAGE_SIZE,
484 PAGE_SIZE, UVM_KMF_WIRED | UVM_KMF_NOWAIT);
485 if (l3_p2m_page == NULL)
486 panic("could not allocate memory for l3_p2m_page");
487
488 /*
489 * Determine how many L2 pages we need for the mapping
490 * Each L2 can map a total of @fpp L1 pages
491 */
492 l2_p2m_page_size = howmany(max_pfn, fpp);
493
494 l2_p2m_page = (vaddr_t *)uvm_km_alloc(kernel_map,
495 l2_p2m_page_size * PAGE_SIZE,
496 PAGE_SIZE, UVM_KMF_WIRED | UVM_KMF_NOWAIT);
497 if (l2_p2m_page == NULL)
498 panic("could not allocate memory for l2_p2m_page");
499
500 /* We now have L3 and L2 pages ready, update L1 mapping */
501 update_p2m_frame_list_list();
502
503 }
504
505 /*
506 * Update the L1 p2m_frame_list_list mapping (during guest boot or resume)
507 */
508 static void
509 update_p2m_frame_list_list(void)
510 {
511 int i;
512 int fpp; /* number of page (frame) pointer per page */
513 unsigned long max_pfn;
514
515 max_pfn = xen_start_info.nr_pages;
516 fpp = PAGE_SIZE / sizeof(xen_pfn_t);
517
518 for (i = 0; i < l2_p2m_page_size; i++) {
519 /*
520 * Each time we start a new L2 page,
521 * store its MFN in the L3 page
522 */
523 if ((i % fpp) == 0) {
524 l3_p2m_page[i/fpp] = vtomfn(
525 (vaddr_t)&l2_p2m_page[i]);
526 }
527 /*
528 * we use a shortcut
529 * since @xpmap_phys_to_machine_mapping array
530 * already contains PFN to MFN mapping, we just
531 * set the l2_p2m_page MFN pointer to the MFN of the
532 * according frame of @xpmap_phys_to_machine_mapping
533 */
534 l2_p2m_page[i] = vtomfn((vaddr_t)
535 &xpmap_phys_to_machine_mapping[i*fpp]);
536 }
537
538 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
539 vtomfn((vaddr_t)l3_p2m_page);
540 HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
541
542 }
543 #endif /* XENPV */
544