hypervisor_machdep.c revision 1.11 1 1.11 cegger /* $NetBSD: hypervisor_machdep.c,v 1.11 2008/10/21 15:46:32 cegger Exp $ */
2 1.2 bouyer
3 1.2 bouyer /*
4 1.2 bouyer *
5 1.2 bouyer * Copyright (c) 2004 Christian Limpach.
6 1.2 bouyer * All rights reserved.
7 1.2 bouyer *
8 1.2 bouyer * Redistribution and use in source and binary forms, with or without
9 1.2 bouyer * modification, are permitted provided that the following conditions
10 1.2 bouyer * are met:
11 1.2 bouyer * 1. Redistributions of source code must retain the above copyright
12 1.2 bouyer * notice, this list of conditions and the following disclaimer.
13 1.2 bouyer * 2. Redistributions in binary form must reproduce the above copyright
14 1.2 bouyer * notice, this list of conditions and the following disclaimer in the
15 1.2 bouyer * documentation and/or other materials provided with the distribution.
16 1.2 bouyer * 3. All advertising materials mentioning features or use of this software
17 1.2 bouyer * must display the following acknowledgement:
18 1.2 bouyer * This product includes software developed by Christian Limpach.
19 1.2 bouyer * 4. The name of the author may not be used to endorse or promote products
20 1.2 bouyer * derived from this software without specific prior written permission.
21 1.2 bouyer *
22 1.2 bouyer * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 1.2 bouyer * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 1.2 bouyer * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 1.2 bouyer * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 1.2 bouyer * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 1.2 bouyer * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 1.2 bouyer * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 1.2 bouyer * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 1.2 bouyer * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 1.2 bouyer * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 1.2 bouyer */
33 1.2 bouyer
34 1.2 bouyer /******************************************************************************
35 1.2 bouyer * hypervisor.c
36 1.2 bouyer *
37 1.2 bouyer * Communication to/from hypervisor.
38 1.2 bouyer *
39 1.2 bouyer * Copyright (c) 2002-2004, K A Fraser
40 1.2 bouyer *
41 1.2 bouyer * Permission is hereby granted, free of charge, to any person obtaining a copy
42 1.2 bouyer * of this software and associated documentation files (the "Software"), to
43 1.2 bouyer * deal in the Software without restriction, including without limitation the
44 1.2 bouyer * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
45 1.2 bouyer * sell copies of the Software, and to permit persons to whom the Software is
46 1.2 bouyer * furnished to do so, subject to the following conditions:
47 1.2 bouyer *
48 1.2 bouyer * The above copyright notice and this permission notice shall be included in
49 1.2 bouyer * all copies or substantial portions of the Software.
50 1.2 bouyer *
51 1.2 bouyer * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
52 1.2 bouyer * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
53 1.2 bouyer * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
54 1.2 bouyer * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
55 1.2 bouyer * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
56 1.2 bouyer * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
57 1.2 bouyer * DEALINGS IN THE SOFTWARE.
58 1.2 bouyer */
59 1.2 bouyer
60 1.2 bouyer
61 1.2 bouyer #include <sys/cdefs.h>
62 1.11 cegger __KERNEL_RCSID(0, "$NetBSD: hypervisor_machdep.c,v 1.11 2008/10/21 15:46:32 cegger Exp $");
63 1.2 bouyer
64 1.2 bouyer #include <sys/param.h>
65 1.2 bouyer #include <sys/systm.h>
66 1.10 bouyer #include <sys/kmem.h>
67 1.10 bouyer
68 1.10 bouyer #include <uvm/uvm_extern.h>
69 1.10 bouyer
70 1.10 bouyer #include <machine/vmparam.h>
71 1.10 bouyer #include <machine/pmap.h>
72 1.2 bouyer
73 1.2 bouyer #include <xen/xen.h>
74 1.2 bouyer #include <xen/hypervisor.h>
75 1.2 bouyer #include <xen/evtchn.h>
76 1.10 bouyer #include <xen/xenpmap.h>
77 1.2 bouyer
78 1.2 bouyer #include "opt_xen.h"
79 1.2 bouyer
80 1.10 bouyer #ifdef XEN3
81 1.10 bouyer /*
82 1.10 bouyer * arch-dependent p2m frame lists list (L3 and L2)
83 1.10 bouyer * used by Xen for save/restore mappings
84 1.10 bouyer */
85 1.10 bouyer static unsigned long * l3_p2m_page;
86 1.10 bouyer static unsigned long * l2_p2m_page;
87 1.10 bouyer static int l2_p2m_page_size; /* size of L2 page, in pages */
88 1.10 bouyer
89 1.10 bouyer static void build_p2m_frame_list_list(void);
90 1.10 bouyer static void update_p2m_frame_list_list(void);
91 1.10 bouyer #endif /* XEN3 */
92 1.10 bouyer
93 1.2 bouyer // #define PORT_DEBUG 4
94 1.2 bouyer // #define EARLY_DEBUG_EVENT
95 1.2 bouyer
96 1.2 bouyer int stipending(void);
97 1.2 bouyer int
98 1.7 cegger stipending(void)
99 1.2 bouyer {
100 1.6 bouyer unsigned long l1;
101 1.2 bouyer unsigned long l2;
102 1.2 bouyer unsigned int l1i, l2i, port;
103 1.2 bouyer volatile shared_info_t *s = HYPERVISOR_shared_info;
104 1.2 bouyer struct cpu_info *ci;
105 1.8 cegger volatile struct vcpu_info *vci;
106 1.2 bouyer int ret;
107 1.2 bouyer
108 1.2 bouyer ret = 0;
109 1.2 bouyer ci = curcpu();
110 1.8 cegger vci = ci->ci_vcpu;
111 1.2 bouyer
112 1.2 bouyer #if 0
113 1.2 bouyer if (HYPERVISOR_shared_info->events)
114 1.2 bouyer printf("stipending events %08lx mask %08lx ilevel %d\n",
115 1.2 bouyer HYPERVISOR_shared_info->events,
116 1.2 bouyer HYPERVISOR_shared_info->events_mask, ci->ci_ilevel);
117 1.2 bouyer #endif
118 1.2 bouyer
119 1.2 bouyer #ifdef EARLY_DEBUG_EVENT
120 1.2 bouyer if (xen_atomic_test_bit(&s->evtchn_pending[0], debug_port)) {
121 1.2 bouyer xen_debug_handler(NULL);
122 1.2 bouyer xen_atomic_clear_bit(&s->evtchn_pending[0], debug_port);
123 1.2 bouyer }
124 1.2 bouyer #endif
125 1.2 bouyer
126 1.2 bouyer /*
127 1.2 bouyer * we're only called after STIC, so we know that we'll have to
128 1.2 bouyer * STI at the end
129 1.2 bouyer */
130 1.8 cegger while (vci->evtchn_upcall_pending) {
131 1.2 bouyer cli();
132 1.8 cegger vci->evtchn_upcall_pending = 0;
133 1.2 bouyer /* NB. No need for a barrier here -- XCHG is a barrier
134 1.2 bouyer * on x86. */
135 1.2 bouyer #ifdef XEN3
136 1.8 cegger l1 = xen_atomic_xchg(&vci->evtchn_pending_sel, 0);
137 1.2 bouyer #else
138 1.2 bouyer l1 = xen_atomic_xchg(&s->evtchn_pending_sel, 0);
139 1.2 bouyer #endif
140 1.6 bouyer while ((l1i = xen_ffs(l1)) != 0) {
141 1.2 bouyer l1i--;
142 1.6 bouyer l1 &= ~(1UL << l1i);
143 1.2 bouyer
144 1.2 bouyer l2 = s->evtchn_pending[l1i] & ~s->evtchn_mask[l1i];
145 1.2 bouyer /*
146 1.2 bouyer * mask and clear event. More efficient than calling
147 1.2 bouyer * hypervisor_mask/clear_event for each event.
148 1.2 bouyer */
149 1.2 bouyer xen_atomic_setbits_l(&s->evtchn_mask[l1i], l2);
150 1.2 bouyer xen_atomic_clearbits_l(&s->evtchn_pending[l1i], l2);
151 1.6 bouyer while ((l2i = xen_ffs(l2)) != 0) {
152 1.2 bouyer l2i--;
153 1.6 bouyer l2 &= ~(1UL << l2i);
154 1.2 bouyer
155 1.6 bouyer port = (l1i << LONG_SHIFT) + l2i;
156 1.2 bouyer if (evtsource[port]) {
157 1.2 bouyer hypervisor_set_ipending(
158 1.2 bouyer evtsource[port]->ev_imask,
159 1.2 bouyer l1i, l2i);
160 1.2 bouyer evtsource[port]->ev_evcnt.ev_count++;
161 1.2 bouyer if (ret == 0 && ci->ci_ilevel <
162 1.2 bouyer evtsource[port]->ev_maxlevel)
163 1.2 bouyer ret = 1;
164 1.2 bouyer }
165 1.2 bouyer #ifdef DOM0OPS
166 1.5 bouyer else {
167 1.5 bouyer /* set pending event */
168 1.5 bouyer xenevt_setipending(l1i, l2i);
169 1.5 bouyer }
170 1.2 bouyer #endif
171 1.2 bouyer }
172 1.2 bouyer }
173 1.2 bouyer sti();
174 1.2 bouyer }
175 1.2 bouyer
176 1.2 bouyer #if 0
177 1.2 bouyer if (ci->ci_ipending & 0x1)
178 1.2 bouyer printf("stipending events %08lx mask %08lx ilevel %d ipending %08x\n",
179 1.2 bouyer HYPERVISOR_shared_info->events,
180 1.2 bouyer HYPERVISOR_shared_info->events_mask, ci->ci_ilevel,
181 1.2 bouyer ci->ci_ipending);
182 1.2 bouyer #endif
183 1.2 bouyer
184 1.2 bouyer return (ret);
185 1.2 bouyer }
186 1.2 bouyer
187 1.2 bouyer void
188 1.2 bouyer do_hypervisor_callback(struct intrframe *regs)
189 1.2 bouyer {
190 1.6 bouyer unsigned long l1;
191 1.2 bouyer unsigned long l2;
192 1.2 bouyer unsigned int l1i, l2i, port;
193 1.2 bouyer volatile shared_info_t *s = HYPERVISOR_shared_info;
194 1.2 bouyer struct cpu_info *ci;
195 1.8 cegger volatile struct vcpu_info *vci;
196 1.2 bouyer int level;
197 1.2 bouyer
198 1.2 bouyer ci = curcpu();
199 1.8 cegger vci = ci->ci_vcpu;
200 1.2 bouyer level = ci->ci_ilevel;
201 1.2 bouyer
202 1.2 bouyer // DDD printf("do_hypervisor_callback\n");
203 1.2 bouyer
204 1.2 bouyer #ifdef EARLY_DEBUG_EVENT
205 1.2 bouyer if (xen_atomic_test_bit(&s->evtchn_pending[0], debug_port)) {
206 1.2 bouyer xen_debug_handler(NULL);
207 1.2 bouyer xen_atomic_clear_bit(&s->evtchn_pending[0], debug_port);
208 1.2 bouyer }
209 1.2 bouyer #endif
210 1.2 bouyer
211 1.8 cegger while (vci->evtchn_upcall_pending) {
212 1.8 cegger vci->evtchn_upcall_pending = 0;
213 1.2 bouyer /* NB. No need for a barrier here -- XCHG is a barrier
214 1.2 bouyer * on x86. */
215 1.2 bouyer #ifdef XEN3
216 1.8 cegger l1 = xen_atomic_xchg(&vci->evtchn_pending_sel, 0);
217 1.2 bouyer #else
218 1.2 bouyer l1 = xen_atomic_xchg(&s->evtchn_pending_sel, 0);
219 1.2 bouyer #endif
220 1.6 bouyer while ((l1i = xen_ffs(l1)) != 0) {
221 1.2 bouyer l1i--;
222 1.6 bouyer l1 &= ~(1UL << l1i);
223 1.2 bouyer
224 1.2 bouyer l2 = s->evtchn_pending[l1i] & ~s->evtchn_mask[l1i];
225 1.2 bouyer /*
226 1.2 bouyer * mask and clear the pending events.
227 1.2 bouyer * Doing it here for all event that will be processed
228 1.2 bouyer * avoids a race with stipending (which can be called
229 1.8 cegger * though evtchn_do_event->splx) that could cause an
230 1.8 cegger * event to be both processed and marked pending.
231 1.2 bouyer */
232 1.2 bouyer xen_atomic_setbits_l(&s->evtchn_mask[l1i], l2);
233 1.2 bouyer xen_atomic_clearbits_l(&s->evtchn_pending[l1i], l2);
234 1.2 bouyer
235 1.6 bouyer while ((l2i = xen_ffs(l2)) != 0) {
236 1.2 bouyer l2i--;
237 1.6 bouyer l2 &= ~(1UL << l2i);
238 1.2 bouyer
239 1.6 bouyer port = (l1i << LONG_SHIFT) + l2i;
240 1.2 bouyer #ifdef PORT_DEBUG
241 1.2 bouyer if (port == PORT_DEBUG)
242 1.2 bouyer printf("do_hypervisor_callback event %d\n", port);
243 1.2 bouyer #endif
244 1.2 bouyer if (evtsource[port])
245 1.2 bouyer call_evtchn_do_event(port, regs);
246 1.2 bouyer #ifdef DOM0OPS
247 1.5 bouyer else {
248 1.5 bouyer if (ci->ci_ilevel < IPL_HIGH) {
249 1.5 bouyer /* fast path */
250 1.5 bouyer int oipl = ci->ci_ilevel;
251 1.5 bouyer ci->ci_ilevel = IPL_HIGH;
252 1.9 bouyer call_xenevt_event(port);
253 1.5 bouyer ci->ci_ilevel = oipl;
254 1.5 bouyer } else {
255 1.5 bouyer /* set pending event */
256 1.5 bouyer xenevt_setipending(l1i, l2i);
257 1.5 bouyer }
258 1.5 bouyer }
259 1.2 bouyer #endif
260 1.2 bouyer }
261 1.2 bouyer }
262 1.2 bouyer }
263 1.2 bouyer
264 1.2 bouyer #ifdef DIAGNOSTIC
265 1.2 bouyer if (level != ci->ci_ilevel)
266 1.2 bouyer printf("hypervisor done %08x level %d/%d ipending %08x\n",
267 1.2 bouyer #ifdef XEN3
268 1.8 cegger (uint)vci->evtchn_pending_sel,
269 1.2 bouyer #else
270 1.2 bouyer (uint)HYPERVISOR_shared_info->evtchn_pending_sel,
271 1.2 bouyer #endif
272 1.2 bouyer level, ci->ci_ilevel, ci->ci_ipending);
273 1.2 bouyer #endif
274 1.2 bouyer }
275 1.2 bouyer
276 1.2 bouyer void
277 1.2 bouyer hypervisor_unmask_event(unsigned int ev)
278 1.2 bouyer {
279 1.2 bouyer volatile shared_info_t *s = HYPERVISOR_shared_info;
280 1.8 cegger volatile struct vcpu_info *vci = curcpu()->ci_vcpu;
281 1.8 cegger
282 1.2 bouyer #ifdef PORT_DEBUG
283 1.2 bouyer if (ev == PORT_DEBUG)
284 1.2 bouyer printf("hypervisor_unmask_event %d\n", ev);
285 1.2 bouyer #endif
286 1.2 bouyer
287 1.2 bouyer xen_atomic_clear_bit(&s->evtchn_mask[0], ev);
288 1.2 bouyer /*
289 1.2 bouyer * The following is basically the equivalent of
290 1.2 bouyer * 'hw_resend_irq'. Just like a real IO-APIC we 'lose the
291 1.2 bouyer * interrupt edge' if the channel is masked.
292 1.2 bouyer */
293 1.2 bouyer if (xen_atomic_test_bit(&s->evtchn_pending[0], ev) &&
294 1.2 bouyer #ifdef XEN3
295 1.8 cegger !xen_atomic_test_and_set_bit(&vci->evtchn_pending_sel, ev>>LONG_SHIFT)) {
296 1.2 bouyer #else
297 1.6 bouyer !xen_atomic_test_and_set_bit(&s->evtchn_pending_sel, ev>>LONG_SHIFT)) {
298 1.2 bouyer #endif
299 1.8 cegger xen_atomic_set_bit(&vci->evtchn_upcall_pending, 0);
300 1.8 cegger if (!vci->evtchn_upcall_mask)
301 1.2 bouyer hypervisor_force_callback();
302 1.2 bouyer }
303 1.2 bouyer }
304 1.2 bouyer
305 1.2 bouyer void
306 1.2 bouyer hypervisor_mask_event(unsigned int ev)
307 1.2 bouyer {
308 1.2 bouyer volatile shared_info_t *s = HYPERVISOR_shared_info;
309 1.2 bouyer #ifdef PORT_DEBUG
310 1.2 bouyer if (ev == PORT_DEBUG)
311 1.2 bouyer printf("hypervisor_mask_event %d\n", ev);
312 1.2 bouyer #endif
313 1.2 bouyer
314 1.2 bouyer xen_atomic_set_bit(&s->evtchn_mask[0], ev);
315 1.2 bouyer }
316 1.2 bouyer
317 1.2 bouyer void
318 1.2 bouyer hypervisor_clear_event(unsigned int ev)
319 1.2 bouyer {
320 1.2 bouyer volatile shared_info_t *s = HYPERVISOR_shared_info;
321 1.2 bouyer #ifdef PORT_DEBUG
322 1.2 bouyer if (ev == PORT_DEBUG)
323 1.2 bouyer printf("hypervisor_clear_event %d\n", ev);
324 1.2 bouyer #endif
325 1.2 bouyer
326 1.2 bouyer xen_atomic_clear_bit(&s->evtchn_pending[0], ev);
327 1.2 bouyer }
328 1.2 bouyer
329 1.2 bouyer void
330 1.2 bouyer hypervisor_enable_ipl(unsigned int ipl)
331 1.2 bouyer {
332 1.6 bouyer u_long l1, l2;
333 1.2 bouyer int l1i, l2i;
334 1.2 bouyer struct cpu_info *ci = curcpu();
335 1.2 bouyer
336 1.2 bouyer /*
337 1.2 bouyer * enable all events for ipl. As we only set an event in ipl_evt_mask
338 1.2 bouyer * for its lowest IPL, and pending IPLs are processed high to low,
339 1.2 bouyer * we know that all callback for this event have been processed.
340 1.2 bouyer */
341 1.2 bouyer
342 1.2 bouyer l1 = ci->ci_isources[ipl]->ipl_evt_mask1;
343 1.2 bouyer ci->ci_isources[ipl]->ipl_evt_mask1 = 0;
344 1.6 bouyer while ((l1i = xen_ffs(l1)) != 0) {
345 1.2 bouyer l1i--;
346 1.6 bouyer l1 &= ~(1UL << l1i);
347 1.2 bouyer l2 = ci->ci_isources[ipl]->ipl_evt_mask2[l1i];
348 1.2 bouyer ci->ci_isources[ipl]->ipl_evt_mask2[l1i] = 0;
349 1.6 bouyer while ((l2i = xen_ffs(l2)) != 0) {
350 1.2 bouyer int evtch;
351 1.2 bouyer
352 1.2 bouyer l2i--;
353 1.6 bouyer l2 &= ~(1UL << l2i);
354 1.2 bouyer
355 1.6 bouyer evtch = (l1i << LONG_SHIFT) + l2i;
356 1.2 bouyer hypervisor_enable_event(evtch);
357 1.2 bouyer }
358 1.2 bouyer }
359 1.2 bouyer }
360 1.2 bouyer
361 1.2 bouyer void
362 1.7 cegger hypervisor_set_ipending(uint32_t iplmask, int l1, int l2)
363 1.2 bouyer {
364 1.2 bouyer int ipl;
365 1.2 bouyer struct cpu_info *ci = curcpu();
366 1.2 bouyer
367 1.2 bouyer /* set pending bit for the appropriate IPLs */
368 1.2 bouyer ci->ci_ipending |= iplmask;
369 1.2 bouyer
370 1.2 bouyer /*
371 1.2 bouyer * And set event pending bit for the lowest IPL. As IPL are handled
372 1.2 bouyer * from high to low, this ensure that all callbacks will have been
373 1.2 bouyer * called when we ack the event
374 1.2 bouyer */
375 1.2 bouyer ipl = ffs(iplmask);
376 1.2 bouyer KASSERT(ipl > 0);
377 1.2 bouyer ipl--;
378 1.6 bouyer ci->ci_isources[ipl]->ipl_evt_mask1 |= 1UL << l1;
379 1.6 bouyer ci->ci_isources[ipl]->ipl_evt_mask2[l1] |= 1UL << l2;
380 1.2 bouyer }
381 1.10 bouyer
382 1.10 bouyer void
383 1.10 bouyer hypervisor_machdep_attach(void) {
384 1.10 bouyer
385 1.10 bouyer #ifdef XEN3
386 1.10 bouyer /* dom0 does not require the arch-dependent P2M translation table */
387 1.11 cegger if ( !xendomain_is_dom0() ) {
388 1.10 bouyer build_p2m_frame_list_list();
389 1.10 bouyer }
390 1.10 bouyer #endif
391 1.10 bouyer
392 1.10 bouyer }
393 1.10 bouyer
394 1.10 bouyer #ifdef XEN3
395 1.10 bouyer /*
396 1.10 bouyer * Generate the p2m_frame_list_list table,
397 1.10 bouyer * needed for guest save/restore
398 1.10 bouyer */
399 1.10 bouyer static void
400 1.10 bouyer build_p2m_frame_list_list(void) {
401 1.10 bouyer
402 1.10 bouyer int fpp; /* number of page (frame) pointer per page */
403 1.10 bouyer unsigned long max_pfn;
404 1.10 bouyer /*
405 1.10 bouyer * The p2m list is composed of three levels of indirection,
406 1.10 bouyer * each layer containing MFNs pointing to lower level pages
407 1.10 bouyer * The indirection is used to convert a given PFN to its MFN
408 1.10 bouyer * Each N level page can point to @fpp (N-1) level pages
409 1.10 bouyer * For example, for x86 32bit, we have:
410 1.10 bouyer * - PAGE_SIZE: 4096 bytes
411 1.10 bouyer * - fpp: 1024 (one L3 page can address 1024 L2 pages)
412 1.10 bouyer * A L1 page contains the list of MFN we are looking for
413 1.10 bouyer */
414 1.10 bouyer max_pfn = xen_start_info.nr_pages;
415 1.10 bouyer fpp = PAGE_SIZE / sizeof(paddr_t);
416 1.10 bouyer
417 1.10 bouyer /* we only need one L3 page */
418 1.10 bouyer l3_p2m_page = kmem_alloc(PAGE_SIZE, KM_NOSLEEP);
419 1.10 bouyer if (l3_p2m_page == NULL)
420 1.10 bouyer panic("could not allocate memory for l3_p2m_page");
421 1.10 bouyer
422 1.10 bouyer /*
423 1.10 bouyer * Determine how many L2 pages we need for the mapping
424 1.10 bouyer * Each L2 can map a total of @fpp L1 pages
425 1.10 bouyer */
426 1.10 bouyer l2_p2m_page_size = howmany(max_pfn, fpp);
427 1.10 bouyer
428 1.10 bouyer l2_p2m_page = kmem_alloc(l2_p2m_page_size * PAGE_SIZE, KM_NOSLEEP);
429 1.10 bouyer if (l2_p2m_page == NULL)
430 1.10 bouyer panic("could not allocate memory for l2_p2m_page");
431 1.10 bouyer
432 1.10 bouyer /* We now have L3 and L2 pages ready, update L1 mapping */
433 1.10 bouyer update_p2m_frame_list_list();
434 1.10 bouyer
435 1.10 bouyer }
436 1.10 bouyer
437 1.10 bouyer /*
438 1.10 bouyer * Update the L1 p2m_frame_list_list mapping (during guest boot or resume)
439 1.10 bouyer */
440 1.10 bouyer static void
441 1.10 bouyer update_p2m_frame_list_list(void) {
442 1.10 bouyer
443 1.10 bouyer int i;
444 1.10 bouyer int fpp; /* number of page (frame) pointer per page */
445 1.10 bouyer unsigned long max_pfn;
446 1.10 bouyer
447 1.10 bouyer max_pfn = xen_start_info.nr_pages;
448 1.10 bouyer fpp = PAGE_SIZE / sizeof(paddr_t);
449 1.10 bouyer
450 1.10 bouyer for (i = 0; i < l2_p2m_page_size; i++) {
451 1.10 bouyer /*
452 1.10 bouyer * Each time we start a new L2 page,
453 1.10 bouyer * store its MFN in the L3 page
454 1.10 bouyer */
455 1.10 bouyer if ((i % fpp) == 0) {
456 1.10 bouyer l3_p2m_page[i/fpp] = vtomfn(
457 1.10 bouyer (vaddr_t)&l2_p2m_page[i]);
458 1.10 bouyer }
459 1.10 bouyer /*
460 1.10 bouyer * we use a shortcut
461 1.10 bouyer * since @xpmap_phys_to_machine_mapping array
462 1.10 bouyer * already contains PFN to MFN mapping, we just
463 1.10 bouyer * set the l2_p2m_page MFN pointer to the MFN of the
464 1.10 bouyer * according frame of @xpmap_phys_to_machine_mapping
465 1.10 bouyer */
466 1.10 bouyer l2_p2m_page[i] = vtomfn((vaddr_t)
467 1.10 bouyer &xpmap_phys_to_machine_mapping[i*fpp]);
468 1.10 bouyer }
469 1.10 bouyer
470 1.10 bouyer HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
471 1.10 bouyer vtomfn((vaddr_t)l3_p2m_page);
472 1.10 bouyer HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
473 1.10 bouyer
474 1.10 bouyer }
475 1.10 bouyer #endif /* XEN3 */
476