hypervisor_machdep.c revision 1.36.8.1 1 1.36.8.1 bouyer /* $NetBSD: hypervisor_machdep.c,v 1.36.8.1 2020/04/12 17:25:52 bouyer Exp $ */
2 1.2 bouyer
3 1.2 bouyer /*
4 1.2 bouyer *
5 1.2 bouyer * Copyright (c) 2004 Christian Limpach.
6 1.2 bouyer * All rights reserved.
7 1.2 bouyer *
8 1.2 bouyer * Redistribution and use in source and binary forms, with or without
9 1.2 bouyer * modification, are permitted provided that the following conditions
10 1.2 bouyer * are met:
11 1.2 bouyer * 1. Redistributions of source code must retain the above copyright
12 1.2 bouyer * notice, this list of conditions and the following disclaimer.
13 1.2 bouyer * 2. Redistributions in binary form must reproduce the above copyright
14 1.2 bouyer * notice, this list of conditions and the following disclaimer in the
15 1.2 bouyer * documentation and/or other materials provided with the distribution.
16 1.2 bouyer *
17 1.2 bouyer * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 1.2 bouyer * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 1.2 bouyer * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 1.2 bouyer * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 1.2 bouyer * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 1.2 bouyer * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 1.2 bouyer * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 1.2 bouyer * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 1.2 bouyer * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 1.2 bouyer * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 1.2 bouyer */
28 1.2 bouyer
29 1.2 bouyer /******************************************************************************
30 1.2 bouyer * hypervisor.c
31 1.2 bouyer *
32 1.2 bouyer * Communication to/from hypervisor.
33 1.2 bouyer *
34 1.2 bouyer * Copyright (c) 2002-2004, K A Fraser
35 1.2 bouyer *
36 1.2 bouyer * Permission is hereby granted, free of charge, to any person obtaining a copy
37 1.2 bouyer * of this software and associated documentation files (the "Software"), to
38 1.2 bouyer * deal in the Software without restriction, including without limitation the
39 1.2 bouyer * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
40 1.2 bouyer * sell copies of the Software, and to permit persons to whom the Software is
41 1.2 bouyer * furnished to do so, subject to the following conditions:
42 1.2 bouyer *
43 1.2 bouyer * The above copyright notice and this permission notice shall be included in
44 1.2 bouyer * all copies or substantial portions of the Software.
45 1.2 bouyer *
46 1.2 bouyer * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
47 1.2 bouyer * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
48 1.2 bouyer * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
49 1.2 bouyer * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
50 1.2 bouyer * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
51 1.2 bouyer * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
52 1.2 bouyer * DEALINGS IN THE SOFTWARE.
53 1.2 bouyer */
54 1.2 bouyer
55 1.2 bouyer
56 1.2 bouyer #include <sys/cdefs.h>
57 1.36.8.1 bouyer __KERNEL_RCSID(0, "$NetBSD: hypervisor_machdep.c,v 1.36.8.1 2020/04/12 17:25:52 bouyer Exp $");
58 1.2 bouyer
59 1.2 bouyer #include <sys/param.h>
60 1.2 bouyer #include <sys/systm.h>
61 1.10 bouyer #include <sys/kmem.h>
62 1.10 bouyer
63 1.10 bouyer #include <uvm/uvm_extern.h>
64 1.10 bouyer
65 1.10 bouyer #include <machine/vmparam.h>
66 1.10 bouyer #include <machine/pmap.h>
67 1.2 bouyer
68 1.2 bouyer #include <xen/xen.h>
69 1.2 bouyer #include <xen/hypervisor.h>
70 1.2 bouyer #include <xen/evtchn.h>
71 1.10 bouyer #include <xen/xenpmap.h>
72 1.2 bouyer
73 1.2 bouyer #include "opt_xen.h"
74 1.29 cherry #include "isa.h"
75 1.29 cherry #include "pci.h"
76 1.2 bouyer
77 1.35 cherry #ifdef XENPV
78 1.10 bouyer /*
79 1.10 bouyer * arch-dependent p2m frame lists list (L3 and L2)
80 1.10 bouyer * used by Xen for save/restore mappings
81 1.10 bouyer */
82 1.10 bouyer static unsigned long * l3_p2m_page;
83 1.10 bouyer static unsigned long * l2_p2m_page;
84 1.10 bouyer static int l2_p2m_page_size; /* size of L2 page, in pages */
85 1.10 bouyer
86 1.10 bouyer static void build_p2m_frame_list_list(void);
87 1.10 bouyer static void update_p2m_frame_list_list(void);
88 1.10 bouyer
89 1.35 cherry #endif
90 1.35 cherry
91 1.2 bouyer // #define PORT_DEBUG 4
92 1.2 bouyer // #define EARLY_DEBUG_EVENT
93 1.2 bouyer
94 1.15 cherry /* callback function type */
95 1.27 bouyer typedef void (*iterate_func_t)(unsigned int, unsigned int,
96 1.27 bouyer unsigned int, void *);
97 1.15 cherry
98 1.15 cherry static inline void
99 1.27 bouyer evt_iterate_bits(volatile unsigned long *pendingl1,
100 1.15 cherry volatile unsigned long *pendingl2,
101 1.15 cherry volatile unsigned long *mask,
102 1.15 cherry iterate_func_t iterate_pending, void *iterate_args)
103 1.15 cherry {
104 1.15 cherry
105 1.15 cherry KASSERT(pendingl1 != NULL);
106 1.15 cherry KASSERT(pendingl2 != NULL);
107 1.15 cherry
108 1.15 cherry unsigned long l1, l2;
109 1.15 cherry unsigned int l1i, l2i, port;
110 1.15 cherry
111 1.15 cherry l1 = xen_atomic_xchg(pendingl1, 0);
112 1.15 cherry while ((l1i = xen_ffs(l1)) != 0) {
113 1.15 cherry l1i--;
114 1.15 cherry l1 &= ~(1UL << l1i);
115 1.15 cherry
116 1.15 cherry l2 = pendingl2[l1i] & (mask != NULL ? ~mask[l1i] : -1UL);
117 1.27 bouyer l2 &= curcpu()->ci_evtmask[l1i];
118 1.15 cherry
119 1.15 cherry if (mask != NULL) xen_atomic_setbits_l(&mask[l1i], l2);
120 1.15 cherry xen_atomic_clearbits_l(&pendingl2[l1i], l2);
121 1.15 cherry
122 1.15 cherry while ((l2i = xen_ffs(l2)) != 0) {
123 1.15 cherry l2i--;
124 1.15 cherry l2 &= ~(1UL << l2i);
125 1.15 cherry
126 1.15 cherry port = (l1i << LONG_SHIFT) + l2i;
127 1.15 cherry
128 1.27 bouyer iterate_pending(port, l1i, l2i, iterate_args);
129 1.15 cherry }
130 1.15 cherry }
131 1.15 cherry }
132 1.15 cherry
133 1.15 cherry /*
134 1.15 cherry * Set per-cpu "pending" information for outstanding events that
135 1.15 cherry * cannot be processed now.
136 1.15 cherry */
137 1.15 cherry
138 1.15 cherry static inline void
139 1.27 bouyer evt_set_pending(unsigned int port, unsigned int l1i,
140 1.15 cherry unsigned int l2i, void *args)
141 1.15 cherry {
142 1.15 cherry
143 1.15 cherry KASSERT(args != NULL);
144 1.15 cherry
145 1.15 cherry int *ret = args;
146 1.15 cherry
147 1.15 cherry if (evtsource[port]) {
148 1.27 bouyer hypervisor_set_ipending(evtsource[port]->ev_imask, l1i, l2i);
149 1.15 cherry evtsource[port]->ev_evcnt.ev_count++;
150 1.27 bouyer if (*ret == 0 && curcpu()->ci_ilevel <
151 1.15 cherry evtsource[port]->ev_maxlevel)
152 1.15 cherry *ret = 1;
153 1.15 cherry }
154 1.15 cherry #ifdef DOM0OPS
155 1.15 cherry else {
156 1.15 cherry /* set pending event */
157 1.15 cherry xenevt_setipending(l1i, l2i);
158 1.15 cherry }
159 1.15 cherry #endif
160 1.15 cherry }
161 1.15 cherry
162 1.2 bouyer int stipending(void);
163 1.2 bouyer int
164 1.7 cegger stipending(void)
165 1.2 bouyer {
166 1.2 bouyer volatile shared_info_t *s = HYPERVISOR_shared_info;
167 1.2 bouyer struct cpu_info *ci;
168 1.8 cegger volatile struct vcpu_info *vci;
169 1.2 bouyer int ret;
170 1.2 bouyer
171 1.2 bouyer ret = 0;
172 1.2 bouyer ci = curcpu();
173 1.8 cegger vci = ci->ci_vcpu;
174 1.2 bouyer
175 1.2 bouyer #if 0
176 1.2 bouyer if (HYPERVISOR_shared_info->events)
177 1.2 bouyer printf("stipending events %08lx mask %08lx ilevel %d\n",
178 1.2 bouyer HYPERVISOR_shared_info->events,
179 1.2 bouyer HYPERVISOR_shared_info->events_mask, ci->ci_ilevel);
180 1.2 bouyer #endif
181 1.2 bouyer
182 1.2 bouyer #ifdef EARLY_DEBUG_EVENT
183 1.2 bouyer if (xen_atomic_test_bit(&s->evtchn_pending[0], debug_port)) {
184 1.2 bouyer xen_debug_handler(NULL);
185 1.2 bouyer xen_atomic_clear_bit(&s->evtchn_pending[0], debug_port);
186 1.2 bouyer }
187 1.2 bouyer #endif
188 1.2 bouyer
189 1.2 bouyer /*
190 1.2 bouyer * we're only called after STIC, so we know that we'll have to
191 1.2 bouyer * STI at the end
192 1.2 bouyer */
193 1.15 cherry
194 1.8 cegger while (vci->evtchn_upcall_pending) {
195 1.36 bouyer x86_disable_intr();
196 1.15 cherry
197 1.8 cegger vci->evtchn_upcall_pending = 0;
198 1.15 cherry
199 1.27 bouyer evt_iterate_bits(&vci->evtchn_pending_sel,
200 1.15 cherry s->evtchn_pending, s->evtchn_mask,
201 1.15 cherry evt_set_pending, &ret);
202 1.15 cherry
203 1.36 bouyer x86_enable_intr();
204 1.2 bouyer }
205 1.2 bouyer
206 1.2 bouyer return (ret);
207 1.2 bouyer }
208 1.2 bouyer
209 1.15 cherry /* Iterate through pending events and call the event handler */
210 1.15 cherry
211 1.15 cherry static inline void
212 1.27 bouyer evt_do_hypervisor_callback(unsigned int port, unsigned int l1i,
213 1.27 bouyer unsigned int l2i, void *args)
214 1.15 cherry {
215 1.15 cherry KASSERT(args != NULL);
216 1.15 cherry
217 1.27 bouyer struct cpu_info *ci = curcpu();
218 1.15 cherry struct intrframe *regs = args;
219 1.15 cherry
220 1.15 cherry #ifdef PORT_DEBUG
221 1.15 cherry if (port == PORT_DEBUG)
222 1.15 cherry printf("do_hypervisor_callback event %d\n", port);
223 1.15 cherry #endif
224 1.22 cherry if (evtsource[port]) {
225 1.22 cherry ci->ci_idepth++;
226 1.22 cherry evtchn_do_event(port, regs);
227 1.22 cherry ci->ci_idepth--;
228 1.22 cherry }
229 1.15 cherry #ifdef DOM0OPS
230 1.15 cherry else {
231 1.15 cherry if (ci->ci_ilevel < IPL_HIGH) {
232 1.15 cherry /* fast path */
233 1.15 cherry int oipl = ci->ci_ilevel;
234 1.15 cherry ci->ci_ilevel = IPL_HIGH;
235 1.22 cherry ci->ci_idepth++;
236 1.22 cherry xenevt_event(port);
237 1.22 cherry ci->ci_idepth--;
238 1.15 cherry ci->ci_ilevel = oipl;
239 1.15 cherry } else {
240 1.15 cherry /* set pending event */
241 1.15 cherry xenevt_setipending(l1i, l2i);
242 1.15 cherry }
243 1.15 cherry }
244 1.15 cherry #endif
245 1.15 cherry }
246 1.15 cherry
247 1.2 bouyer void
248 1.2 bouyer do_hypervisor_callback(struct intrframe *regs)
249 1.2 bouyer {
250 1.2 bouyer volatile shared_info_t *s = HYPERVISOR_shared_info;
251 1.2 bouyer struct cpu_info *ci;
252 1.8 cegger volatile struct vcpu_info *vci;
253 1.28 bouyer int level __diagused;
254 1.2 bouyer
255 1.2 bouyer ci = curcpu();
256 1.8 cegger vci = ci->ci_vcpu;
257 1.2 bouyer level = ci->ci_ilevel;
258 1.2 bouyer
259 1.31 cherry /* Save trapframe for clock handler */
260 1.31 cherry KASSERT(regs != NULL);
261 1.33 kre ci->ci_xen_clockf_usermode = USERMODE(regs->_INTRFRAME_CS);
262 1.33 kre ci->ci_xen_clockf_pc = regs->_INTRFRAME_IP;
263 1.31 cherry
264 1.2 bouyer // DDD printf("do_hypervisor_callback\n");
265 1.2 bouyer
266 1.2 bouyer #ifdef EARLY_DEBUG_EVENT
267 1.2 bouyer if (xen_atomic_test_bit(&s->evtchn_pending[0], debug_port)) {
268 1.2 bouyer xen_debug_handler(NULL);
269 1.2 bouyer xen_atomic_clear_bit(&s->evtchn_pending[0], debug_port);
270 1.2 bouyer }
271 1.2 bouyer #endif
272 1.2 bouyer
273 1.8 cegger while (vci->evtchn_upcall_pending) {
274 1.8 cegger vci->evtchn_upcall_pending = 0;
275 1.2 bouyer
276 1.27 bouyer evt_iterate_bits(&vci->evtchn_pending_sel,
277 1.15 cherry s->evtchn_pending, s->evtchn_mask,
278 1.15 cherry evt_do_hypervisor_callback, regs);
279 1.2 bouyer }
280 1.2 bouyer
281 1.2 bouyer #ifdef DIAGNOSTIC
282 1.2 bouyer if (level != ci->ci_ilevel)
283 1.2 bouyer printf("hypervisor done %08x level %d/%d ipending %08x\n",
284 1.8 cegger (uint)vci->evtchn_pending_sel,
285 1.36.8.1 bouyer level, ci->ci_ilevel, ci->ci_ipending);
286 1.2 bouyer #endif
287 1.2 bouyer }
288 1.2 bouyer
289 1.2 bouyer void
290 1.17 cherry hypervisor_send_event(struct cpu_info *ci, unsigned int ev)
291 1.17 cherry {
292 1.17 cherry KASSERT(ci != NULL);
293 1.17 cherry
294 1.17 cherry volatile shared_info_t *s = HYPERVISOR_shared_info;
295 1.17 cherry volatile struct vcpu_info *vci = ci->ci_vcpu;
296 1.17 cherry
297 1.17 cherry #ifdef PORT_DEBUG
298 1.17 cherry if (ev == PORT_DEBUG)
299 1.17 cherry printf("hypervisor_send_event %d\n", ev);
300 1.17 cherry #endif
301 1.17 cherry
302 1.17 cherry xen_atomic_set_bit(&s->evtchn_pending[0], ev);
303 1.17 cherry
304 1.21 cherry if (__predict_false(ci == curcpu())) {
305 1.20 cherry xen_atomic_set_bit(&vci->evtchn_pending_sel,
306 1.20 cherry ev >> LONG_SHIFT);
307 1.20 cherry xen_atomic_set_bit(&vci->evtchn_upcall_pending, 0);
308 1.20 cherry }
309 1.17 cherry
310 1.17 cherry xen_atomic_clear_bit(&s->evtchn_mask[0], ev);
311 1.17 cherry
312 1.17 cherry if (__predict_true(ci == curcpu())) {
313 1.17 cherry hypervisor_force_callback();
314 1.17 cherry } else {
315 1.18 bouyer if (__predict_false(xen_send_ipi(ci, XEN_IPI_HVCB))) {
316 1.18 bouyer panic("xen_send_ipi(cpu%d, XEN_IPI_HVCB) failed\n",
317 1.18 bouyer (int) ci->ci_cpuid);
318 1.17 cherry }
319 1.17 cherry }
320 1.17 cherry }
321 1.17 cherry
322 1.17 cherry void
323 1.2 bouyer hypervisor_unmask_event(unsigned int ev)
324 1.2 bouyer {
325 1.30 cherry
326 1.30 cherry KASSERT(ev > 0 && ev < NR_EVENT_CHANNELS);
327 1.8 cegger
328 1.2 bouyer #ifdef PORT_DEBUG
329 1.2 bouyer if (ev == PORT_DEBUG)
330 1.2 bouyer printf("hypervisor_unmask_event %d\n", ev);
331 1.2 bouyer #endif
332 1.2 bouyer
333 1.30 cherry /* Xen unmasks the evtchn_mask[0]:ev bit for us. */
334 1.30 cherry evtchn_op_t op;
335 1.30 cherry op.cmd = EVTCHNOP_unmask;
336 1.30 cherry op.u.unmask.port = ev;
337 1.30 cherry if (HYPERVISOR_event_channel_op(&op) != 0)
338 1.30 cherry panic("Failed to unmask event %d\n", ev);
339 1.18 bouyer
340 1.30 cherry return;
341 1.2 bouyer }
342 1.2 bouyer
343 1.2 bouyer void
344 1.2 bouyer hypervisor_mask_event(unsigned int ev)
345 1.2 bouyer {
346 1.2 bouyer volatile shared_info_t *s = HYPERVISOR_shared_info;
347 1.2 bouyer #ifdef PORT_DEBUG
348 1.2 bouyer if (ev == PORT_DEBUG)
349 1.2 bouyer printf("hypervisor_mask_event %d\n", ev);
350 1.2 bouyer #endif
351 1.2 bouyer
352 1.2 bouyer xen_atomic_set_bit(&s->evtchn_mask[0], ev);
353 1.2 bouyer }
354 1.2 bouyer
355 1.2 bouyer void
356 1.2 bouyer hypervisor_clear_event(unsigned int ev)
357 1.2 bouyer {
358 1.2 bouyer volatile shared_info_t *s = HYPERVISOR_shared_info;
359 1.2 bouyer #ifdef PORT_DEBUG
360 1.2 bouyer if (ev == PORT_DEBUG)
361 1.2 bouyer printf("hypervisor_clear_event %d\n", ev);
362 1.2 bouyer #endif
363 1.2 bouyer
364 1.2 bouyer xen_atomic_clear_bit(&s->evtchn_pending[0], ev);
365 1.2 bouyer }
366 1.2 bouyer
367 1.15 cherry static inline void
368 1.27 bouyer evt_enable_event(unsigned int port, unsigned int l1i,
369 1.27 bouyer unsigned int l2i, void *args)
370 1.15 cherry {
371 1.15 cherry KASSERT(args == NULL);
372 1.29 cherry hypervisor_unmask_event(port);
373 1.29 cherry #if NPCI > 0 || NISA > 0
374 1.29 cherry hypervisor_ack_pirq_event(port);
375 1.29 cherry #endif /* NPCI > 0 || NISA > 0 */
376 1.15 cherry }
377 1.15 cherry
378 1.2 bouyer void
379 1.36.8.1 bouyer hypervisor_enable_sir(unsigned int sir)
380 1.2 bouyer {
381 1.2 bouyer struct cpu_info *ci = curcpu();
382 1.2 bouyer
383 1.2 bouyer /*
384 1.2 bouyer * enable all events for ipl. As we only set an event in ipl_evt_mask
385 1.2 bouyer * for its lowest IPL, and pending IPLs are processed high to low,
386 1.2 bouyer * we know that all callback for this event have been processed.
387 1.2 bouyer */
388 1.2 bouyer
389 1.36.8.1 bouyer evt_iterate_bits(&ci->ci_isources[sir]->ipl_evt_mask1,
390 1.36.8.1 bouyer ci->ci_isources[sir]->ipl_evt_mask2, NULL,
391 1.15 cherry evt_enable_event, NULL);
392 1.2 bouyer
393 1.2 bouyer }
394 1.2 bouyer
395 1.2 bouyer void
396 1.36.8.1 bouyer hypervisor_set_ipending(uint32_t imask, int l1, int l2)
397 1.2 bouyer {
398 1.27 bouyer
399 1.27 bouyer /* This function is not re-entrant */
400 1.27 bouyer KASSERT(x86_read_psl() != 0);
401 1.27 bouyer
402 1.36.8.1 bouyer int sir;
403 1.27 bouyer struct cpu_info *ci = curcpu();
404 1.2 bouyer
405 1.2 bouyer /* set pending bit for the appropriate IPLs */
406 1.36.8.1 bouyer ci->ci_ipending |= imask;
407 1.2 bouyer
408 1.2 bouyer /*
409 1.2 bouyer * And set event pending bit for the lowest IPL. As IPL are handled
410 1.2 bouyer * from high to low, this ensure that all callbacks will have been
411 1.2 bouyer * called when we ack the event
412 1.2 bouyer */
413 1.36.8.1 bouyer sir = ffs(imask);
414 1.36.8.1 bouyer KASSERT(sir > SIR_XENIPL_VM);
415 1.36.8.1 bouyer sir--;
416 1.36.8.1 bouyer KASSERT(sir <= SIR_XENIPL_HIGH);
417 1.36.8.1 bouyer KASSERT(ci->ci_isources[sir] != NULL);
418 1.36.8.1 bouyer ci->ci_isources[sir]->ipl_evt_mask1 |= 1UL << l1;
419 1.36.8.1 bouyer ci->ci_isources[sir]->ipl_evt_mask2[l1] |= 1UL << l2;
420 1.25 bouyer if (__predict_false(ci != curcpu())) {
421 1.25 bouyer if (xen_send_ipi(ci, XEN_IPI_HVCB)) {
422 1.25 bouyer panic("hypervisor_set_ipending: "
423 1.25 bouyer "xen_send_ipi(cpu%d, XEN_IPI_HVCB) failed\n",
424 1.25 bouyer (int) ci->ci_cpuid);
425 1.25 bouyer }
426 1.25 bouyer }
427 1.2 bouyer }
428 1.10 bouyer
429 1.10 bouyer void
430 1.12 cegger hypervisor_machdep_attach(void)
431 1.12 cegger {
432 1.35 cherry #ifdef XENPV
433 1.10 bouyer /* dom0 does not require the arch-dependent P2M translation table */
434 1.16 jym if (!xendomain_is_dom0()) {
435 1.10 bouyer build_p2m_frame_list_list();
436 1.16 jym sysctl_xen_suspend_setup();
437 1.10 bouyer }
438 1.35 cherry #endif
439 1.10 bouyer }
440 1.10 bouyer
441 1.16 jym void
442 1.16 jym hypervisor_machdep_resume(void)
443 1.16 jym {
444 1.35 cherry #ifdef XENPV
445 1.16 jym /* dom0 does not require the arch-dependent P2M translation table */
446 1.16 jym if (!xendomain_is_dom0())
447 1.16 jym update_p2m_frame_list_list();
448 1.35 cherry #endif
449 1.16 jym }
450 1.16 jym
451 1.35 cherry #ifdef XENPV
452 1.10 bouyer /*
453 1.10 bouyer * Generate the p2m_frame_list_list table,
454 1.10 bouyer * needed for guest save/restore
455 1.10 bouyer */
456 1.10 bouyer static void
457 1.12 cegger build_p2m_frame_list_list(void)
458 1.12 cegger {
459 1.10 bouyer int fpp; /* number of page (frame) pointer per page */
460 1.10 bouyer unsigned long max_pfn;
461 1.10 bouyer /*
462 1.10 bouyer * The p2m list is composed of three levels of indirection,
463 1.10 bouyer * each layer containing MFNs pointing to lower level pages
464 1.10 bouyer * The indirection is used to convert a given PFN to its MFN
465 1.10 bouyer * Each N level page can point to @fpp (N-1) level pages
466 1.10 bouyer * For example, for x86 32bit, we have:
467 1.10 bouyer * - PAGE_SIZE: 4096 bytes
468 1.10 bouyer * - fpp: 1024 (one L3 page can address 1024 L2 pages)
469 1.10 bouyer * A L1 page contains the list of MFN we are looking for
470 1.10 bouyer */
471 1.10 bouyer max_pfn = xen_start_info.nr_pages;
472 1.14 jym fpp = PAGE_SIZE / sizeof(xen_pfn_t);
473 1.10 bouyer
474 1.10 bouyer /* we only need one L3 page */
475 1.14 jym l3_p2m_page = (vaddr_t *)uvm_km_alloc(kernel_map, PAGE_SIZE,
476 1.14 jym PAGE_SIZE, UVM_KMF_WIRED | UVM_KMF_NOWAIT);
477 1.10 bouyer if (l3_p2m_page == NULL)
478 1.10 bouyer panic("could not allocate memory for l3_p2m_page");
479 1.10 bouyer
480 1.10 bouyer /*
481 1.10 bouyer * Determine how many L2 pages we need for the mapping
482 1.10 bouyer * Each L2 can map a total of @fpp L1 pages
483 1.10 bouyer */
484 1.10 bouyer l2_p2m_page_size = howmany(max_pfn, fpp);
485 1.10 bouyer
486 1.14 jym l2_p2m_page = (vaddr_t *)uvm_km_alloc(kernel_map,
487 1.14 jym l2_p2m_page_size * PAGE_SIZE,
488 1.14 jym PAGE_SIZE, UVM_KMF_WIRED | UVM_KMF_NOWAIT);
489 1.10 bouyer if (l2_p2m_page == NULL)
490 1.10 bouyer panic("could not allocate memory for l2_p2m_page");
491 1.10 bouyer
492 1.10 bouyer /* We now have L3 and L2 pages ready, update L1 mapping */
493 1.10 bouyer update_p2m_frame_list_list();
494 1.10 bouyer
495 1.10 bouyer }
496 1.10 bouyer
497 1.10 bouyer /*
498 1.10 bouyer * Update the L1 p2m_frame_list_list mapping (during guest boot or resume)
499 1.10 bouyer */
500 1.10 bouyer static void
501 1.12 cegger update_p2m_frame_list_list(void)
502 1.12 cegger {
503 1.10 bouyer int i;
504 1.10 bouyer int fpp; /* number of page (frame) pointer per page */
505 1.10 bouyer unsigned long max_pfn;
506 1.10 bouyer
507 1.10 bouyer max_pfn = xen_start_info.nr_pages;
508 1.14 jym fpp = PAGE_SIZE / sizeof(xen_pfn_t);
509 1.10 bouyer
510 1.10 bouyer for (i = 0; i < l2_p2m_page_size; i++) {
511 1.10 bouyer /*
512 1.10 bouyer * Each time we start a new L2 page,
513 1.10 bouyer * store its MFN in the L3 page
514 1.10 bouyer */
515 1.10 bouyer if ((i % fpp) == 0) {
516 1.10 bouyer l3_p2m_page[i/fpp] = vtomfn(
517 1.10 bouyer (vaddr_t)&l2_p2m_page[i]);
518 1.10 bouyer }
519 1.10 bouyer /*
520 1.10 bouyer * we use a shortcut
521 1.10 bouyer * since @xpmap_phys_to_machine_mapping array
522 1.10 bouyer * already contains PFN to MFN mapping, we just
523 1.10 bouyer * set the l2_p2m_page MFN pointer to the MFN of the
524 1.10 bouyer * according frame of @xpmap_phys_to_machine_mapping
525 1.10 bouyer */
526 1.10 bouyer l2_p2m_page[i] = vtomfn((vaddr_t)
527 1.10 bouyer &xpmap_phys_to_machine_mapping[i*fpp]);
528 1.10 bouyer }
529 1.10 bouyer
530 1.10 bouyer HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
531 1.10 bouyer vtomfn((vaddr_t)l3_p2m_page);
532 1.10 bouyer HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
533 1.10 bouyer
534 1.10 bouyer }
535 1.35 cherry #endif /* XENPV */
536