x86_xpmap.c revision 1.12 1 /* $NetBSD: x86_xpmap.c,v 1.12 2008/11/13 18:44:51 cegger Exp $ */
2
3 /*
4 * Copyright (c) 2006 Mathieu Ropert <mro (at) adviseo.fr>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 /*
20 * Copyright (c) 2006, 2007 Manuel Bouyer.
21 *
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions
24 * are met:
25 * 1. Redistributions of source code must retain the above copyright
26 * notice, this list of conditions and the following disclaimer.
27 * 2. Redistributions in binary form must reproduce the above copyright
28 * notice, this list of conditions and the following disclaimer in the
29 * documentation and/or other materials provided with the distribution.
30 * 3. All advertising materials mentioning features or use of this software
31 * must display the following acknowledgement:
32 * This product includes software developed by Manuel Bouyer.
33 * 4. The name of the author may not be used to endorse or promote products
34 * derived from this software without specific prior written permission.
35 *
36 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
37 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
38 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
39 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
40 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
41 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
45 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 *
47 */
48
49 /*
50 *
51 * Copyright (c) 2004 Christian Limpach.
52 * All rights reserved.
53 *
54 * Redistribution and use in source and binary forms, with or without
55 * modification, are permitted provided that the following conditions
56 * are met:
57 * 1. Redistributions of source code must retain the above copyright
58 * notice, this list of conditions and the following disclaimer.
59 * 2. Redistributions in binary form must reproduce the above copyright
60 * notice, this list of conditions and the following disclaimer in the
61 * documentation and/or other materials provided with the distribution.
62 * 3. All advertising materials mentioning features or use of this software
63 * must display the following acknowledgement:
64 * This product includes software developed by Christian Limpach.
65 * 4. The name of the author may not be used to endorse or promote products
66 * derived from this software without specific prior written permission.
67 *
68 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
69 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
70 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
71 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
72 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
73 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
74 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
75 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
76 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
77 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
78 */
79
80
81 #include <sys/cdefs.h>
82 __KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.12 2008/11/13 18:44:51 cegger Exp $");
83
84 #include "opt_xen.h"
85 #include "opt_ddb.h"
86 #include "ksyms.h"
87
88 #include <sys/param.h>
89 #include <sys/systm.h>
90
91 #include <uvm/uvm.h>
92
93 #include <machine/pmap.h>
94 #include <machine/gdt.h>
95 #include <xen/xenfunc.h>
96
97 #include <dev/isa/isareg.h>
98 #include <machine/isa_machdep.h>
99
100 #undef XENDEBUG
101 /* #define XENDEBUG_SYNC */
102 /* #define XENDEBUG_LOW */
103
104 #ifdef XENDEBUG
105 #define XENPRINTF(x) printf x
106 #define XENPRINTK(x) printk x
107 #define XENPRINTK2(x) /* printk x */
108
109 static char XBUF[256];
110 #else
111 #define XENPRINTF(x)
112 #define XENPRINTK(x)
113 #define XENPRINTK2(x)
114 #endif
115 #define PRINTF(x) printf x
116 #define PRINTK(x) printk x
117
118 /* on x86_64 kernel runs in ring 3 */
119 #ifdef __x86_64__
120 #define PG_k PG_u
121 #else
122 #define PG_k 0
123 #endif
124
125 volatile shared_info_t *HYPERVISOR_shared_info;
126 /* Xen requires the start_info struct to be page aligned */
127 union start_info_union start_info_union __aligned(PAGE_SIZE);
128 unsigned long *xpmap_phys_to_machine_mapping;
129
130 void xen_failsafe_handler(void);
131
132 #ifdef XEN3
133 #define HYPERVISOR_mmu_update_self(req, count, success_count) \
134 HYPERVISOR_mmu_update((req), (count), (success_count), DOMID_SELF)
135 #else
136 #define HYPERVISOR_mmu_update_self(req, count, success_count) \
137 HYPERVISOR_mmu_update((req), (count), (success_count))
138 #endif
139
140 void
141 xen_failsafe_handler(void)
142 {
143
144 panic("xen_failsafe_handler called!\n");
145 }
146
147
148 void
149 xen_set_ldt(vaddr_t base, uint32_t entries)
150 {
151 vaddr_t va;
152 vaddr_t end;
153 pt_entry_t *ptp;
154 int s;
155
156 #ifdef __x86_64__
157 end = base + (entries << 3);
158 #else
159 end = base + entries * sizeof(union descriptor);
160 #endif
161
162 for (va = base; va < end; va += PAGE_SIZE) {
163 KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
164 ptp = kvtopte(va);
165 XENPRINTF(("xen_set_ldt %p %d %p\n", (void *)base,
166 entries, ptp));
167 pmap_pte_clearbits(ptp, PG_RW);
168 }
169 s = splvm();
170 xpq_queue_set_ldt(base, entries);
171 xpq_flush_queue();
172 splx(s);
173 }
174
175 #ifdef XENDEBUG
176 void xpq_debug_dump(void);
177 #endif
178
179 #define XPQUEUE_SIZE 2048
180 static mmu_update_t xpq_queue[XPQUEUE_SIZE];
181 static int xpq_idx = 0;
182
183 void
184 xpq_flush_queue(void)
185 {
186 int i, ok;
187
188 XENPRINTK2(("flush queue %p entries %d\n", xpq_queue, xpq_idx));
189 for (i = 0; i < xpq_idx; i++)
190 XENPRINTK2(("%d: %p %08" PRIx64 "\n", i,
191 (uint64_t)xpq_queue[i].ptr, (uint64_t)xpq_queue[i].val));
192 if (xpq_idx != 0 &&
193 HYPERVISOR_mmu_update_self(xpq_queue, xpq_idx, &ok) < 0) {
194 printf("xpq_flush_queue: %d entries \n", xpq_idx);
195 for (i = 0; i < xpq_idx; i++)
196 printf("0x%016" PRIx64 ": 0x%016" PRIx64 "\n",
197 (uint64_t)xpq_queue[i].ptr,
198 (uint64_t)xpq_queue[i].val);
199 panic("HYPERVISOR_mmu_update failed\n");
200 }
201 xpq_idx = 0;
202 }
203
204 static inline void
205 xpq_increment_idx(void)
206 {
207
208 xpq_idx++;
209 if (__predict_false(xpq_idx == XPQUEUE_SIZE))
210 xpq_flush_queue();
211 }
212
213 void
214 xpq_queue_machphys_update(paddr_t ma, paddr_t pa)
215 {
216 XENPRINTK2(("xpq_queue_machphys_update ma=0x%" PRIx64 " pa=0x%" PRIx64
217 "\n", (int64_t)ma, (int64_t)pa));
218 xpq_queue[xpq_idx].ptr = ma | MMU_MACHPHYS_UPDATE;
219 xpq_queue[xpq_idx].val = (pa - XPMAP_OFFSET) >> PAGE_SHIFT;
220 xpq_increment_idx();
221 #ifdef XENDEBUG_SYNC
222 xpq_flush_queue();
223 #endif
224 }
225
226 void
227 xpq_queue_pte_update(paddr_t ptr, pt_entry_t val)
228 {
229
230 KASSERT((ptr & 3) == 0);
231 xpq_queue[xpq_idx].ptr = (paddr_t)ptr | MMU_NORMAL_PT_UPDATE;
232 xpq_queue[xpq_idx].val = val;
233 xpq_increment_idx();
234 #ifdef XENDEBUG_SYNC
235 xpq_flush_queue();
236 #endif
237 }
238
239 #ifdef XEN3
240 void
241 xpq_queue_pt_switch(paddr_t pa)
242 {
243 struct mmuext_op op;
244 xpq_flush_queue();
245
246 XENPRINTK2(("xpq_queue_pt_switch: 0x%" PRIx64 " 0x%" PRIx64 "\n",
247 (int64_t)pa, (int64_t)pa));
248 op.cmd = MMUEXT_NEW_BASEPTR;
249 op.arg1.mfn = pa >> PAGE_SHIFT;
250 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
251 panic("xpq_queue_pt_switch");
252 }
253
254 void
255 xpq_queue_pin_table(paddr_t pa)
256 {
257 struct mmuext_op op;
258 xpq_flush_queue();
259
260 XENPRINTK2(("xpq_queue_pin_table: 0x%" PRIx64 " 0x%" PRIx64 "\n",
261 (int64_t)pa, (int64_t)pa));
262 op.arg1.mfn = pa >> PAGE_SHIFT;
263
264 #if defined(__x86_64__)
265 op.cmd = MMUEXT_PIN_L4_TABLE;
266 #else
267 op.cmd = MMUEXT_PIN_L2_TABLE;
268 #endif
269 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
270 panic("xpq_queue_pin_table");
271 }
272
273 #ifdef PAE
274 static void
275 xpq_queue_pin_l3_table(paddr_t pa)
276 {
277 struct mmuext_op op;
278 xpq_flush_queue();
279
280 XENPRINTK2(("xpq_queue_pin_l2_table: 0x%" PRIx64 " 0x%" PRIx64 "\n",
281 (int64_t)pa, (int64_t)pa));
282 op.arg1.mfn = pa >> PAGE_SHIFT;
283
284 op.cmd = MMUEXT_PIN_L3_TABLE;
285 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
286 panic("xpq_queue_pin_table");
287 }
288 #endif
289
290 void
291 xpq_queue_unpin_table(paddr_t pa)
292 {
293 struct mmuext_op op;
294 xpq_flush_queue();
295
296 XENPRINTK2(("xpq_queue_unpin_table: 0x%" PRIx64 " 0x%" PRIx64 "\n",
297 (int64_t)pa, (int64_t)pa));
298 op.arg1.mfn = pa >> PAGE_SHIFT;
299 op.cmd = MMUEXT_UNPIN_TABLE;
300 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
301 panic("xpq_queue_unpin_table");
302 }
303
304 void
305 xpq_queue_set_ldt(vaddr_t va, uint32_t entries)
306 {
307 struct mmuext_op op;
308 xpq_flush_queue();
309
310 XENPRINTK2(("xpq_queue_set_ldt\n"));
311 KASSERT(va == (va & ~PAGE_MASK));
312 op.cmd = MMUEXT_SET_LDT;
313 op.arg1.linear_addr = va;
314 op.arg2.nr_ents = entries;
315 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
316 panic("xpq_queue_set_ldt");
317 }
318
319 void
320 xpq_queue_tlb_flush(void)
321 {
322 struct mmuext_op op;
323 xpq_flush_queue();
324
325 XENPRINTK2(("xpq_queue_tlb_flush\n"));
326 op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
327 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
328 panic("xpq_queue_tlb_flush");
329 }
330
331 void
332 xpq_flush_cache(void)
333 {
334 struct mmuext_op op;
335 int s = splvm();
336 xpq_flush_queue();
337
338 XENPRINTK2(("xpq_queue_flush_cache\n"));
339 op.cmd = MMUEXT_FLUSH_CACHE;
340 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
341 panic("xpq_flush_cache");
342 splx(s);
343 }
344
345 void
346 xpq_queue_invlpg(vaddr_t va)
347 {
348 struct mmuext_op op;
349 xpq_flush_queue();
350
351 XENPRINTK2(("xpq_queue_invlpg %p\n", (void *)va));
352 op.cmd = MMUEXT_INVLPG_LOCAL;
353 op.arg1.linear_addr = (va & ~PAGE_MASK);
354 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
355 panic("xpq_queue_invlpg");
356 }
357
358 int
359 xpq_update_foreign(paddr_t ptr, pt_entry_t val, int dom)
360 {
361 mmu_update_t op;
362 int ok;
363 xpq_flush_queue();
364
365 op.ptr = ptr;
366 op.val = val;
367 if (HYPERVISOR_mmu_update(&op, 1, &ok, dom) < 0)
368 return EFAULT;
369 return (0);
370 }
371 #else /* XEN3 */
372 void
373 xpq_queue_pt_switch(paddr_t pa)
374 {
375
376 XENPRINTK2(("xpq_queue_pt_switch: %p %p\n", (void *)pa, (void *)pa));
377 xpq_queue[xpq_idx].ptr = pa | MMU_EXTENDED_COMMAND;
378 xpq_queue[xpq_idx].val = MMUEXT_NEW_BASEPTR;
379 xpq_increment_idx();
380 }
381
382 void
383 xpq_queue_pin_table(paddr_t pa)
384 {
385
386 XENPRINTK2(("xpq_queue_pin_table: %p %p\n", (void *)pa, (void *)pa));
387 xpq_queue[xpq_idx].ptr = pa | MMU_EXTENDED_COMMAND;
388 xpq_queue[xpq_idx].val = MMUEXT_PIN_L2_TABLE;
389 xpq_increment_idx();
390 }
391
392 void
393 xpq_queue_unpin_table(paddr_t pa)
394 {
395
396 XENPRINTK2(("xpq_queue_unpin_table: %p %p\n", (void *)pa, (void *)pa));
397 xpq_queue[xpq_idx].ptr = pa | MMU_EXTENDED_COMMAND;
398 xpq_queue[xpq_idx].val = MMUEXT_UNPIN_TABLE;
399 xpq_increment_idx();
400 }
401
402 void
403 xpq_queue_set_ldt(vaddr_t va, uint32_t entries)
404 {
405
406 XENPRINTK2(("xpq_queue_set_ldt\n"));
407 KASSERT(va == (va & ~PAGE_MASK));
408 xpq_queue[xpq_idx].ptr = MMU_EXTENDED_COMMAND | va;
409 xpq_queue[xpq_idx].val = MMUEXT_SET_LDT | (entries << MMUEXT_CMD_SHIFT);
410 xpq_increment_idx();
411 }
412
413 void
414 xpq_queue_tlb_flush(void)
415 {
416
417 XENPRINTK2(("xpq_queue_tlb_flush\n"));
418 xpq_queue[xpq_idx].ptr = MMU_EXTENDED_COMMAND;
419 xpq_queue[xpq_idx].val = MMUEXT_TLB_FLUSH;
420 xpq_increment_idx();
421 }
422
423 void
424 xpq_flush_cache(void)
425 {
426 int s = splvm();
427
428 XENPRINTK2(("xpq_queue_flush_cache\n"));
429 xpq_queue[xpq_idx].ptr = MMU_EXTENDED_COMMAND;
430 xpq_queue[xpq_idx].val = MMUEXT_FLUSH_CACHE;
431 xpq_increment_idx();
432 xpq_flush_queue();
433 splx(s);
434 }
435
436 void
437 xpq_queue_invlpg(vaddr_t va)
438 {
439
440 XENPRINTK2(("xpq_queue_invlpg %p\n", (void *)va));
441 xpq_queue[xpq_idx].ptr = (va & ~PAGE_MASK) | MMU_EXTENDED_COMMAND;
442 xpq_queue[xpq_idx].val = MMUEXT_INVLPG;
443 xpq_increment_idx();
444 }
445
446 int
447 xpq_update_foreign(paddr_t ptr, pt_entry_t val, int dom)
448 {
449 mmu_update_t xpq_up[3];
450
451 xpq_up[0].ptr = MMU_EXTENDED_COMMAND;
452 xpq_up[0].val = MMUEXT_SET_FOREIGNDOM | (dom << 16);
453 xpq_up[1].ptr = ptr;
454 xpq_up[1].val = val;
455 if (HYPERVISOR_mmu_update_self(xpq_up, 2, NULL) < 0)
456 return EFAULT;
457 return (0);
458 }
459 #endif /* XEN3 */
460
461 #ifdef XENDEBUG
462 void
463 xpq_debug_dump(void)
464 {
465 int i;
466
467 XENPRINTK2(("idx: %d\n", xpq_idx));
468 for (i = 0; i < xpq_idx; i++) {
469 sprintf(XBUF, "%" PRIx64 " %08" PRIx64,
470 (uint64_t)xpq_queue[i].ptr, (uint64_t)xpq_queue[i].val);
471 if (++i < xpq_idx)
472 sprintf(XBUF + strlen(XBUF), "%" PRIx64 " %08" PRIx64,
473 (uint64_t)xpq_queue[i].ptr, (uint64_t)xpq_queue[i].val);
474 if (++i < xpq_idx)
475 sprintf(XBUF + strlen(XBUF), "%" PRIx64 " %08" PRIx64,
476 (uint64_t)xpq_queue[i].ptr, (uint64_t)xpq_queue[i].val);
477 if (++i < xpq_idx)
478 sprintf(XBUF + strlen(XBUF), "%" PRIx64 " %08" PRIx64,
479 (uint64_t)xpq_queue[i].ptr, (uint64_t)xpq_queue[i].val);
480 XENPRINTK2(("%d: %s\n", xpq_idx, XBUF));
481 }
482 }
483 #endif
484
485
486 extern volatile struct xencons_interface *xencons_interface; /* XXX */
487 extern struct xenstore_domain_interface *xenstore_interface; /* XXX */
488
489 static void xen_bt_set_readonly (vaddr_t);
490 static void xen_bootstrap_tables (vaddr_t, vaddr_t, int, int, int);
491
492 /* How many PDEs ? */
493 #if L2_SLOT_KERNBASE > 0
494 #define TABLE_L2_ENTRIES (2 * (NKL2_KIMG_ENTRIES + 1))
495 #else
496 #define TABLE_L2_ENTRIES (NKL2_KIMG_ENTRIES + 1)
497 #endif
498
499 /*
500 * Construct and switch to new pagetables
501 * first_avail is the first vaddr we can use after
502 * we get rid of Xen pagetables
503 */
504
505 vaddr_t xen_pmap_bootstrap (void);
506
507 /*
508 * Function to get rid of Xen bootstrap tables
509 */
510
511 /* How many PDP do we need: */
512 #ifdef PAE
513 /*
514 * For PAE, we consider a single contigous L2 "superpage" of 4 pages,
515 * all of them mapped by the L3 page. We also need a shadow page
516 * for L3[3].
517 */
518 static const int l2_4_count = 6;
519 #else
520 static const int l2_4_count = PTP_LEVELS - 1;
521 #endif
522
523 vaddr_t
524 xen_pmap_bootstrap(void)
525 {
526 int count, oldcount;
527 long mapsize;
528 vaddr_t bootstrap_tables, init_tables;
529
530 xpmap_phys_to_machine_mapping =
531 (unsigned long *)xen_start_info.mfn_list;
532 init_tables = xen_start_info.pt_base;
533 __PRINTK(("xen_arch_pmap_bootstrap init_tables=0x%lx\n", init_tables));
534
535 /* Space after Xen boostrap tables should be free */
536 bootstrap_tables = xen_start_info.pt_base +
537 (xen_start_info.nr_pt_frames * PAGE_SIZE);
538
539 /*
540 * Calculate how many space we need
541 * first everything mapped before the Xen bootstrap tables
542 */
543 mapsize = init_tables - KERNTEXTOFF;
544 /* after the tables we'll have:
545 * - UAREA
546 * - dummy user PGD (x86_64)
547 * - HYPERVISOR_shared_info
548 * - ISA I/O mem (if needed)
549 */
550 mapsize += UPAGES * NBPG;
551 #ifdef __x86_64__
552 mapsize += NBPG;
553 #endif
554 mapsize += NBPG;
555
556 #ifdef DOM0OPS
557 if (xendomain_is_dom0()) {
558 /* space for ISA I/O mem */
559 mapsize += IOM_SIZE;
560 }
561 #endif
562 /* at this point mapsize doens't include the table size */
563
564 #ifdef __x86_64__
565 count = TABLE_L2_ENTRIES;
566 #else
567 count = (mapsize + (NBPD_L2 -1)) >> L2_SHIFT;
568 #endif /* __x86_64__ */
569
570 /* now compute how many L2 pages we need exactly */
571 XENPRINTK(("bootstrap_final mapsize 0x%lx count %d\n", mapsize, count));
572 while (mapsize + (count + l2_4_count) * PAGE_SIZE + KERNTEXTOFF >
573 ((long)count << L2_SHIFT) + KERNBASE) {
574 count++;
575 }
576 #ifndef __x86_64__
577 /*
578 * one more L2 page: we'll alocate several pages after kva_start
579 * in pmap_bootstrap() before pmap_growkernel(), which have not been
580 * counted here. It's not a big issue to allocate one more L2 as
581 * pmap_growkernel() will be called anyway.
582 */
583 count++;
584 nkptp[1] = count;
585 #endif
586
587 /*
588 * install bootstrap pages. We may need more L2 pages than will
589 * have the final table here, as it's installed after the final table
590 */
591 oldcount = count;
592
593 bootstrap_again:
594 XENPRINTK(("bootstrap_again oldcount %d\n", oldcount));
595 /*
596 * Xen space we'll reclaim may not be enough for our new page tables,
597 * move bootstrap tables if necessary
598 */
599 if (bootstrap_tables < init_tables + ((count + l2_4_count) * PAGE_SIZE))
600 bootstrap_tables = init_tables +
601 ((count + l2_4_count) * PAGE_SIZE);
602 /* make sure we have enough to map the bootstrap_tables */
603 if (bootstrap_tables + ((oldcount + l2_4_count) * PAGE_SIZE) >
604 ((long)oldcount << L2_SHIFT) + KERNBASE) {
605 oldcount++;
606 goto bootstrap_again;
607 }
608
609 /* Create temporary tables */
610 xen_bootstrap_tables(xen_start_info.pt_base, bootstrap_tables,
611 xen_start_info.nr_pt_frames, oldcount, 0);
612
613 /* Create final tables */
614 xen_bootstrap_tables(bootstrap_tables, init_tables,
615 oldcount + l2_4_count, count, 1);
616
617 /* zero out free space after tables */
618 memset((void *)(init_tables + ((count + l2_4_count) * PAGE_SIZE)), 0,
619 (UPAGES + 1) * NBPG);
620 return (init_tables + ((count + l2_4_count) * PAGE_SIZE));
621 }
622
623
624 /*
625 * Build a new table and switch to it
626 * old_count is # of old tables (including PGD, PDTPE and PDE)
627 * new_count is # of new tables (PTE only)
628 * we assume areas don't overlap
629 */
630
631
632 static void
633 xen_bootstrap_tables (vaddr_t old_pgd, vaddr_t new_pgd,
634 int old_count, int new_count, int final)
635 {
636 pd_entry_t *pdtpe, *pde, *pte;
637 pd_entry_t *cur_pgd, *bt_pgd;
638 paddr_t addr;
639 vaddr_t page, avail, text_end, map_end;
640 int i;
641 extern char __data_start;
642
643 __PRINTK(("xen_bootstrap_tables(0x%lx, 0x%lx, %d, %d)\n",
644 old_pgd, new_pgd, old_count, new_count));
645 text_end = ((vaddr_t)&__data_start) & ~PAGE_MASK;
646 /*
647 * size of R/W area after kernel text:
648 * xencons_interface (if present)
649 * xenstore_interface (if present)
650 * table pages (new_count + l2_4_count entries)
651 * extra mappings (only when final is true):
652 * UAREA
653 * dummy user PGD (x86_64 only)/gdt page (i386 only)
654 * HYPERVISOR_shared_info
655 * ISA I/O mem (if needed)
656 */
657 map_end = new_pgd + ((new_count + l2_4_count) * NBPG);
658 if (final) {
659 map_end += (UPAGES + 1) * NBPG;
660 HYPERVISOR_shared_info = (shared_info_t *)map_end;
661 map_end += NBPG;
662 }
663 /*
664 * we always set atdevbase, as it's used by init386 to find the first
665 * available VA. map_end is updated only if we are dom0, so
666 * atdevbase -> atdevbase + IOM_SIZE will be mapped only in
667 * this case.
668 */
669 if (final)
670 atdevbase = map_end;
671 #ifdef DOM0OPS
672 if (final && xendomain_is_dom0()) {
673 /* ISA I/O mem */
674 map_end += IOM_SIZE;
675 }
676 #endif /* DOM0OPS */
677
678 __PRINTK(("xen_bootstrap_tables text_end 0x%lx map_end 0x%lx\n",
679 text_end, map_end));
680 __PRINTK(("console 0x%lx ", xen_start_info.console.domU.mfn));
681 __PRINTK(("xenstore 0x%lx\n", xen_start_info.store_mfn));
682
683 /*
684 * Create bootstrap page tables
685 * What we need:
686 * - a PGD (level 4)
687 * - a PDTPE (level 3)
688 * - a PDE (level2)
689 * - some PTEs (level 1)
690 */
691
692 cur_pgd = (pd_entry_t *) old_pgd;
693 bt_pgd = (pd_entry_t *) new_pgd;
694 memset (bt_pgd, 0, PAGE_SIZE);
695 avail = new_pgd + PAGE_SIZE;
696 #if PTP_LEVELS > 3
697 /* Install level 3 */
698 pdtpe = (pd_entry_t *) avail;
699 memset (pdtpe, 0, PAGE_SIZE);
700 avail += PAGE_SIZE;
701
702 addr = ((u_long) pdtpe) - KERNBASE;
703 bt_pgd[pl4_pi(KERNTEXTOFF)] =
704 xpmap_ptom_masked(addr) | PG_k | PG_RW | PG_V;
705
706 __PRINTK(("L3 va 0x%lx pa 0x%" PRIx64 " entry 0x%" PRIx64 " -> L4[0x%x]\n",
707 pdtpe, (uint64_t)addr, (uint64_t)bt_pgd[pl4_pi(KERNTEXTOFF)],
708 pl4_pi(KERNTEXTOFF)));
709 #else
710 pdtpe = bt_pgd;
711 #endif /* PTP_LEVELS > 3 */
712
713 #if PTP_LEVELS > 2
714 /* Level 2 */
715 pde = (pd_entry_t *) avail;
716 memset(pde, 0, PAGE_SIZE);
717 avail += PAGE_SIZE;
718
719 addr = ((u_long) pde) - KERNBASE;
720 pdtpe[pl3_pi(KERNTEXTOFF)] =
721 xpmap_ptom_masked(addr) | PG_k | PG_V | PG_RW;
722 __PRINTK(("L2 va 0x%lx pa 0x%" PRIx64 " entry 0x%" PRIx64 " -> L3[0x%x]\n",
723 pde, (int64_t)addr, (int64_t)pdtpe[pl3_pi(KERNTEXTOFF)],
724 pl3_pi(KERNTEXTOFF)));
725 #elif defined(PAE)
726 /* our PAE-style level 2: 5 contigous pages (4 L2 + 1 shadow) */
727 pde = (pd_entry_t *) avail;
728 memset(pde, 0, PAGE_SIZE * 5);
729 avail += PAGE_SIZE * 5;
730 addr = ((u_long) pde) - KERNBASE;
731 /*
732 * enter L2 pages in the L3.
733 * The real L2 kernel PD will be the last one (so that
734 * pde[L2_SLOT_KERN] always point to the shadow).
735 */
736 for (i = 0; i < 3; i++, addr += PAGE_SIZE) {
737 /*
738 * Xen doens't want R/W mappings in L3 entries, it'll add it
739 * itself.
740 */
741 pdtpe[i] = xpmap_ptom_masked(addr) | PG_k | PG_V;
742 __PRINTK(("L2 va 0x%lx pa 0x%" PRIx64 " entry 0x%" PRIx64
743 " -> L3[0x%x]\n", (vaddr_t)pde + PAGE_SIZE * i,
744 (int64_t)addr, (int64_t)pdtpe[i], i));
745 }
746 addr += PAGE_SIZE;
747 pdtpe[3] = xpmap_ptom_masked(addr) | PG_k | PG_V;
748 __PRINTK(("L2 va 0x%lx pa 0x%" PRIx64 " entry 0x%" PRIx64
749 " -> L3[0x%x]\n", (vaddr_t)pde + PAGE_SIZE * 4,
750 (int64_t)addr, (int64_t)pdtpe[3], 3));
751
752 #else /* PAE */
753 pde = bt_pgd;
754 #endif /* PTP_LEVELS > 2 */
755
756 /* Level 1 */
757 page = KERNTEXTOFF;
758 for (i = 0; i < new_count; i ++) {
759 vaddr_t cur_page = page;
760
761 pte = (pd_entry_t *) avail;
762 avail += PAGE_SIZE;
763
764 memset(pte, 0, PAGE_SIZE);
765 while (pl2_pi(page) == pl2_pi (cur_page)) {
766 if (page >= map_end) {
767 /* not mapped at all */
768 pte[pl1_pi(page)] = 0;
769 page += PAGE_SIZE;
770 continue;
771 }
772 pte[pl1_pi(page)] = xpmap_ptom_masked(page - KERNBASE);
773 if (page == (vaddr_t)HYPERVISOR_shared_info) {
774 pte[pl1_pi(page)] = xen_start_info.shared_info;
775 __PRINTK(("HYPERVISOR_shared_info "
776 "va 0x%lx pte 0x%" PRIx64 "\n",
777 HYPERVISOR_shared_info, (int64_t)pte[pl1_pi(page)]));
778 }
779 #ifdef XEN3
780 if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT)
781 == xen_start_info.console.domU.mfn) {
782 xencons_interface = (void *)page;
783 pte[pl1_pi(page)] = xen_start_info.console.domU.mfn;
784 pte[pl1_pi(page)] <<= PAGE_SHIFT;
785 __PRINTK(("xencons_interface "
786 "va 0x%lx pte 0x%" PRIx64 "\n",
787 xencons_interface, (int64_t)pte[pl1_pi(page)]));
788 }
789 if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT)
790 == xen_start_info.store_mfn) {
791 xenstore_interface = (void *)page;
792 pte[pl1_pi(page)] = xen_start_info.store_mfn;
793 pte[pl1_pi(page)] <<= PAGE_SHIFT;
794 __PRINTK(("xenstore_interface "
795 "va 0x%lx pte 0x%" PRIx64 "\n",
796 xenstore_interface, (int64_t)pte[pl1_pi(page)]));
797 }
798 #endif /* XEN3 */
799 #ifdef DOM0OPS
800 if (page >= (vaddr_t)atdevbase &&
801 page < (vaddr_t)atdevbase + IOM_SIZE) {
802 pte[pl1_pi(page)] =
803 IOM_BEGIN + (page - (vaddr_t)atdevbase);
804 }
805 #endif
806 pte[pl1_pi(page)] |= PG_k | PG_V;
807 if (page < text_end) {
808 /* map kernel text RO */
809 pte[pl1_pi(page)] |= 0;
810 } else if (page >= old_pgd
811 && page < old_pgd + (old_count * PAGE_SIZE)) {
812 /* map old page tables RO */
813 pte[pl1_pi(page)] |= 0;
814 } else if (page >= new_pgd &&
815 page < new_pgd + ((new_count + l2_4_count) * PAGE_SIZE)) {
816 /* map new page tables RO */
817 pte[pl1_pi(page)] |= 0;
818 } else {
819 /* map page RW */
820 pte[pl1_pi(page)] |= PG_RW;
821 }
822
823 if ((page >= old_pgd && page < old_pgd + (old_count * PAGE_SIZE))
824 || page >= new_pgd) {
825 __PRINTK(("va 0x%lx pa 0x%lx "
826 "entry 0x%" PRIx64 " -> L1[0x%x]\n",
827 page, page - KERNBASE,
828 (int64_t)pte[pl1_pi(page)], pl1_pi(page)));
829 }
830 page += PAGE_SIZE;
831 }
832
833 addr = ((u_long) pte) - KERNBASE;
834 pde[pl2_pi(cur_page)] =
835 xpmap_ptom_masked(addr) | PG_k | PG_RW | PG_V;
836 __PRINTK(("L1 va 0x%lx pa 0x%" PRIx64 " entry 0x%" PRIx64
837 " -> L2[0x%x]\n", pte, (int64_t)addr,
838 (int64_t)pde[pl2_pi(cur_page)], pl2_pi(cur_page)));
839 /* Mark readonly */
840 xen_bt_set_readonly((vaddr_t) pte);
841 }
842
843 /* Install recursive page tables mapping */
844 #ifdef PAE
845 /*
846 * we need a shadow page for the kernel's L2 page
847 * The real L2 kernel PD will be the last one (so that
848 * pde[L2_SLOT_KERN] always point to the shadow.
849 */
850 memcpy(&pde[L2_SLOT_KERN + NPDPG], &pde[L2_SLOT_KERN], PAGE_SIZE);
851 pmap_kl2pd = &pde[L2_SLOT_KERN + NPDPG];
852 pmap_kl2paddr = (u_long)pmap_kl2pd - KERNBASE;
853
854 /*
855 * We don't enter a recursive entry from the L3 PD. Instead,
856 * we enter the first 4 L2 pages, which includes the kernel's L2
857 * shadow. But we have to entrer the shadow after switching
858 * %cr3, or Xen will refcount some PTE with the wrong type.
859 */
860 addr = (u_long)pde - KERNBASE;
861 for (i = 0; i < 3; i++, addr += PAGE_SIZE) {
862 pde[PDIR_SLOT_PTE + i] = xpmap_ptom_masked(addr) | PG_k | PG_V;
863 __PRINTK(("pde[%d] va 0x%lx pa 0x%lx entry 0x%" PRIx64 "\n",
864 (int)(PDIR_SLOT_PTE + i), pde + PAGE_SIZE * i, (long)addr,
865 (int64_t)pde[PDIR_SLOT_PTE + i]));
866 }
867 #if 0
868 addr += PAGE_SIZE; /* point to shadow L2 */
869 pde[PDIR_SLOT_PTE + 3] = xpmap_ptom_masked(addr) | PG_k | PG_V;
870 __PRINTK(("pde[%d] va 0x%lx pa 0x%lx entry 0x%" PRIx64 "\n",
871 (int)(PDIR_SLOT_PTE + 3), pde + PAGE_SIZE * 4, (long)addr,
872 (int64_t)pde[PDIR_SLOT_PTE + 3]));
873 #endif
874 /* Mark tables RO, and pin the kenrel's shadow as L2 */
875 addr = (u_long)pde - KERNBASE;
876 for (i = 0; i < 5; i++, addr += PAGE_SIZE) {
877 xen_bt_set_readonly(((vaddr_t)pde) + PAGE_SIZE * i);
878 if (i == 2 || i == 3)
879 continue;
880 #if 0
881 __PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", i, (int64_t)addr));
882 xpq_queue_pin_table(xpmap_ptom_masked(addr));
883 #endif
884 }
885 if (final) {
886 addr = (u_long)pde - KERNBASE + 3 * PAGE_SIZE;
887 __PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", 2, (int64_t)addr));
888 xpq_queue_pin_table(xpmap_ptom_masked(addr));
889 }
890 #if 0
891 addr = (u_long)pde - KERNBASE + 2 * PAGE_SIZE;
892 __PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", 2, (int64_t)addr));
893 xpq_queue_pin_table(xpmap_ptom_masked(addr));
894 #endif
895 #else /* PAE */
896 /* recursive entry in higher-level PD */
897 bt_pgd[PDIR_SLOT_PTE] =
898 xpmap_ptom_masked(new_pgd - KERNBASE) | PG_k | PG_V;
899 __PRINTK(("bt_pgd[PDIR_SLOT_PTE] va 0x%lx pa 0x%" PRIx64
900 " entry 0x%" PRIx64 "\n", new_pgd, (int64_t)new_pgd - KERNBASE,
901 (int64_t)bt_pgd[PDIR_SLOT_PTE]));
902 /* Mark tables RO */
903 xen_bt_set_readonly((vaddr_t) pde);
904 #endif
905 #if PTP_LEVELS > 2 || defined(PAE)
906 xen_bt_set_readonly((vaddr_t) pdtpe);
907 #endif
908 #if PTP_LEVELS > 3
909 xen_bt_set_readonly(new_pgd);
910 #endif
911 /* Pin the PGD */
912 __PRINTK(("pin PDG\n"));
913 #ifdef PAE
914 xpq_queue_pin_l3_table(xpmap_ptom_masked(new_pgd - KERNBASE));
915 #else
916 xpq_queue_pin_table(xpmap_ptom_masked(new_pgd - KERNBASE));
917 #endif
918 #ifdef __i386__
919 /* Save phys. addr of PDP, for libkvm. */
920 PDPpaddr = (long)pde;
921 #ifdef PAE
922 /* also save the address of the L3 page */
923 pmap_l3pd = pdtpe;
924 pmap_l3paddr = (new_pgd - KERNBASE);
925 #endif /* PAE */
926 #endif /* i386 */
927 /* Switch to new tables */
928 __PRINTK(("switch to PDG\n"));
929 xpq_queue_pt_switch(xpmap_ptom_masked(new_pgd - KERNBASE));
930 __PRINTK(("bt_pgd[PDIR_SLOT_PTE] now entry 0x%" PRIx64 "\n",
931 (int64_t)bt_pgd[PDIR_SLOT_PTE]));
932 #ifdef PAE
933 if (final) {
934 /* now enter kernel's PTE mappings */
935 addr = (u_long)pde - KERNBASE + PAGE_SIZE * 3;
936 xpq_queue_pte_update(
937 xpmap_ptom(((vaddr_t)&pde[PDIR_SLOT_PTE + 3]) - KERNBASE),
938 xpmap_ptom_masked(addr) | PG_k | PG_V);
939 xpq_flush_queue();
940 }
941 #endif
942
943
944
945 /* Now we can safely reclaim space taken by old tables */
946
947 __PRINTK(("unpin old PDG\n"));
948 /* Unpin old PGD */
949 xpq_queue_unpin_table(xpmap_ptom_masked(old_pgd - KERNBASE));
950 /* Mark old tables RW */
951 page = old_pgd;
952 addr = (paddr_t) pde[pl2_pi(page)] & PG_FRAME;
953 addr = xpmap_mtop(addr);
954 pte = (pd_entry_t *) ((u_long)addr + KERNBASE);
955 pte += pl1_pi(page);
956 __PRINTK(("*pde 0x%" PRIx64 " addr 0x%" PRIx64 " pte 0x%lx\n",
957 (int64_t)pde[pl2_pi(page)], (int64_t)addr, (long)pte));
958 while (page < old_pgd + (old_count * PAGE_SIZE) && page < map_end) {
959 addr = xpmap_ptom(((u_long) pte) - KERNBASE);
960 XENPRINTK(("addr 0x%" PRIx64 " pte 0x%lx *pte 0x%" PRIx64 "\n",
961 (int64_t)addr, (long)pte, (int64_t)*pte));
962 xpq_queue_pte_update(addr, *pte | PG_RW);
963 page += PAGE_SIZE;
964 /*
965 * Our ptes are contiguous
966 * so it's safe to just "++" here
967 */
968 pte++;
969 }
970 xpq_flush_queue();
971 }
972
973
974 /*
975 * Bootstrap helper functions
976 */
977
978 /*
979 * Mark a page readonly
980 * XXX: assuming vaddr = paddr + KERNBASE
981 */
982
983 static void
984 xen_bt_set_readonly (vaddr_t page)
985 {
986 pt_entry_t entry;
987
988 entry = xpmap_ptom_masked(page - KERNBASE);
989 entry |= PG_k | PG_V;
990
991 HYPERVISOR_update_va_mapping (page, entry, UVMF_INVLPG);
992 }
993
994 #ifdef __x86_64__
995 void
996 xen_set_user_pgd(paddr_t page)
997 {
998 struct mmuext_op op;
999 int s = splvm();
1000
1001 xpq_flush_queue();
1002 op.cmd = MMUEXT_NEW_USER_BASEPTR;
1003 op.arg1.mfn = xpmap_phys_to_machine_mapping[page >> PAGE_SHIFT];
1004 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
1005 panic("xen_set_user_pgd: failed to install new user page"
1006 " directory %lx", page);
1007 splx(s);
1008 }
1009 #endif /* __x86_64__ */
1010