x86_xpmap.c revision 1.35 1 /* $NetBSD: x86_xpmap.c,v 1.35 2011/11/06 11:40:47 cherry Exp $ */
2
3 /*
4 * Copyright (c) 2006 Mathieu Ropert <mro (at) adviseo.fr>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 /*
20 * Copyright (c) 2006, 2007 Manuel Bouyer.
21 *
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions
24 * are met:
25 * 1. Redistributions of source code must retain the above copyright
26 * notice, this list of conditions and the following disclaimer.
27 * 2. Redistributions in binary form must reproduce the above copyright
28 * notice, this list of conditions and the following disclaimer in the
29 * documentation and/or other materials provided with the distribution.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
32 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
33 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
34 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
35 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
36 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
40 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 *
42 */
43
44 /*
45 *
46 * Copyright (c) 2004 Christian Limpach.
47 * All rights reserved.
48 *
49 * Redistribution and use in source and binary forms, with or without
50 * modification, are permitted provided that the following conditions
51 * are met:
52 * 1. Redistributions of source code must retain the above copyright
53 * notice, this list of conditions and the following disclaimer.
54 * 2. Redistributions in binary form must reproduce the above copyright
55 * notice, this list of conditions and the following disclaimer in the
56 * documentation and/or other materials provided with the distribution.
57 *
58 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
59 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
60 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
61 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
62 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
63 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
64 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
65 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
66 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
67 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
68 */
69
70
71 #include <sys/cdefs.h>
72 __KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.35 2011/11/06 11:40:47 cherry Exp $");
73
74 #include "opt_xen.h"
75 #include "opt_ddb.h"
76 #include "ksyms.h"
77
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/simplelock.h>
81
82 #include <uvm/uvm.h>
83
84 #include <machine/pmap.h>
85 #include <machine/gdt.h>
86 #include <xen/xenfunc.h>
87
88 #include <dev/isa/isareg.h>
89 #include <machine/isa_machdep.h>
90
91 #undef XENDEBUG
92 /* #define XENDEBUG_SYNC */
93 /* #define XENDEBUG_LOW */
94
95 #ifdef XENDEBUG
96 #define XENPRINTF(x) printf x
97 #define XENPRINTK(x) printk x
98 #define XENPRINTK2(x) /* printk x */
99
100 static char XBUF[256];
101 #else
102 #define XENPRINTF(x)
103 #define XENPRINTK(x)
104 #define XENPRINTK2(x)
105 #endif
106 #define PRINTF(x) printf x
107 #define PRINTK(x) printk x
108
109 /* on x86_64 kernel runs in ring 3 */
110 #ifdef __x86_64__
111 #define PG_k PG_u
112 #else
113 #define PG_k 0
114 #endif
115
116 volatile shared_info_t *HYPERVISOR_shared_info;
117 /* Xen requires the start_info struct to be page aligned */
118 union start_info_union start_info_union __aligned(PAGE_SIZE);
119 unsigned long *xpmap_phys_to_machine_mapping;
120
121 void xen_failsafe_handler(void);
122
123 #define HYPERVISOR_mmu_update_self(req, count, success_count) \
124 HYPERVISOR_mmu_update((req), (count), (success_count), DOMID_SELF)
125
126 void
127 xen_failsafe_handler(void)
128 {
129
130 panic("xen_failsafe_handler called!\n");
131 }
132
133
134 void
135 xen_set_ldt(vaddr_t base, uint32_t entries)
136 {
137 vaddr_t va;
138 vaddr_t end;
139 pt_entry_t *ptp;
140 int s;
141
142 #ifdef __x86_64__
143 end = base + (entries << 3);
144 #else
145 end = base + entries * sizeof(union descriptor);
146 #endif
147
148 for (va = base; va < end; va += PAGE_SIZE) {
149 KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
150 ptp = kvtopte(va);
151 XENPRINTF(("xen_set_ldt %#" PRIxVADDR " %d %p\n",
152 base, entries, ptp));
153 pmap_pte_clearbits(ptp, PG_RW);
154 }
155 s = splvm();
156 xpq_queue_set_ldt(base, entries);
157 splx(s);
158 }
159
160 #ifdef XENDEBUG
161 void xpq_debug_dump(void);
162 #endif
163
164 #define XPQUEUE_SIZE 2048
165 static mmu_update_t xpq_queue_array[MAXCPUS][XPQUEUE_SIZE];
166 static int xpq_idx_array[MAXCPUS];
167
168 extern struct cpu_info * (*xpq_cpu)(void);
169
170 void
171 xpq_flush_queue(void)
172 {
173 int i, ok = 0, ret;
174
175 mmu_update_t *xpq_queue = xpq_queue_array[xpq_cpu()->ci_cpuid];
176 int xpq_idx = xpq_idx_array[xpq_cpu()->ci_cpuid];
177
178 XENPRINTK2(("flush queue %p entries %d\n", xpq_queue, xpq_idx));
179 for (i = 0; i < xpq_idx; i++)
180 XENPRINTK2(("%d: 0x%08" PRIx64 " 0x%08" PRIx64 "\n", i,
181 xpq_queue[i].ptr, xpq_queue[i].val));
182
183 retry:
184 ret = HYPERVISOR_mmu_update_self(xpq_queue, xpq_idx, &ok);
185
186 if (xpq_idx != 0 && ret < 0) {
187 printf("xpq_flush_queue: %d entries (%d successful)\n",
188 xpq_idx, ok);
189
190 if (ok != 0) {
191 xpq_queue += ok;
192 xpq_idx -= ok;
193 ok = 0;
194 goto retry;
195 }
196
197 for (i = 0; i < xpq_idx; i++)
198 printf("0x%016" PRIx64 ": 0x%016" PRIx64 "\n",
199 xpq_queue[i].ptr, xpq_queue[i].val);
200 panic("HYPERVISOR_mmu_update failed, ret: %d\n", ret);
201 }
202 xpq_idx_array[xpq_cpu()->ci_cpuid] = 0;
203 }
204
205 static inline void
206 xpq_increment_idx(void)
207 {
208
209 if (__predict_false(++xpq_idx_array[xpq_cpu()->ci_cpuid] == XPQUEUE_SIZE))
210 xpq_flush_queue();
211 }
212
213 void
214 xpq_queue_machphys_update(paddr_t ma, paddr_t pa)
215 {
216
217 mmu_update_t *xpq_queue = xpq_queue_array[xpq_cpu()->ci_cpuid];
218 int xpq_idx = xpq_idx_array[xpq_cpu()->ci_cpuid];
219
220 XENPRINTK2(("xpq_queue_machphys_update ma=0x%" PRIx64 " pa=0x%" PRIx64
221 "\n", (int64_t)ma, (int64_t)pa));
222
223 xpq_queue[xpq_idx].ptr = ma | MMU_MACHPHYS_UPDATE;
224 xpq_queue[xpq_idx].val = (pa - XPMAP_OFFSET) >> PAGE_SHIFT;
225 xpq_increment_idx();
226 #ifdef XENDEBUG_SYNC
227 xpq_flush_queue();
228 #endif
229 }
230
231 void
232 xpq_queue_pte_update(paddr_t ptr, pt_entry_t val)
233 {
234
235 mmu_update_t *xpq_queue = xpq_queue_array[xpq_cpu()->ci_cpuid];
236 int xpq_idx = xpq_idx_array[xpq_cpu()->ci_cpuid];
237
238 KASSERT((ptr & 3) == 0);
239 xpq_queue[xpq_idx].ptr = (paddr_t)ptr | MMU_NORMAL_PT_UPDATE;
240 xpq_queue[xpq_idx].val = val;
241 xpq_increment_idx();
242 #ifdef XENDEBUG_SYNC
243 xpq_flush_queue();
244 #endif
245 }
246
247 void
248 xpq_queue_pt_switch(paddr_t pa)
249 {
250 struct mmuext_op op;
251 xpq_flush_queue();
252
253 XENPRINTK2(("xpq_queue_pt_switch: 0x%" PRIx64 " 0x%" PRIx64 "\n",
254 (int64_t)pa, (int64_t)pa));
255 op.cmd = MMUEXT_NEW_BASEPTR;
256 op.arg1.mfn = pa >> PAGE_SHIFT;
257 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
258 panic("xpq_queue_pt_switch");
259 }
260
261 void
262 xpq_queue_pin_table(paddr_t pa, int lvl)
263 {
264 struct mmuext_op op;
265
266 xpq_flush_queue();
267
268 XENPRINTK2(("xpq_queue_pin_l%d_table: %#" PRIxPADDR "\n",
269 lvl + 1, pa));
270
271 op.arg1.mfn = pa >> PAGE_SHIFT;
272 op.cmd = lvl;
273
274 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
275 panic("xpq_queue_pin_table");
276 }
277
278 void
279 xpq_queue_unpin_table(paddr_t pa)
280 {
281 struct mmuext_op op;
282
283 xpq_flush_queue();
284
285 XENPRINTK2(("xpq_queue_unpin_table: %#" PRIxPADDR "\n", pa));
286 op.arg1.mfn = pa >> PAGE_SHIFT;
287 op.cmd = MMUEXT_UNPIN_TABLE;
288 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
289 panic("xpq_queue_unpin_table");
290 }
291
292 void
293 xpq_queue_set_ldt(vaddr_t va, uint32_t entries)
294 {
295 struct mmuext_op op;
296
297 xpq_flush_queue();
298
299 XENPRINTK2(("xpq_queue_set_ldt\n"));
300 KASSERT(va == (va & ~PAGE_MASK));
301 op.cmd = MMUEXT_SET_LDT;
302 op.arg1.linear_addr = va;
303 op.arg2.nr_ents = entries;
304 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
305 panic("xpq_queue_set_ldt");
306 }
307
308 void
309 xpq_queue_tlb_flush(void)
310 {
311 struct mmuext_op op;
312
313 xpq_flush_queue();
314
315 XENPRINTK2(("xpq_queue_tlb_flush\n"));
316 op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
317 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
318 panic("xpq_queue_tlb_flush");
319 }
320
321 void
322 xpq_flush_cache(void)
323 {
324 struct mmuext_op op;
325 int s = splvm(), err;
326
327 xpq_flush_queue();
328
329 XENPRINTK2(("xpq_queue_flush_cache\n"));
330 op.cmd = MMUEXT_FLUSH_CACHE;
331 if ((err = HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF)) < 0) {
332 panic("xpq_flush_cache, err %d", err);
333 }
334 splx(s); /* XXX: removeme */
335 }
336
337 void
338 xpq_queue_invlpg(vaddr_t va)
339 {
340 struct mmuext_op op;
341 xpq_flush_queue();
342
343 XENPRINTK2(("xpq_queue_invlpg %#" PRIxVADDR "\n", va));
344 op.cmd = MMUEXT_INVLPG_LOCAL;
345 op.arg1.linear_addr = (va & ~PAGE_MASK);
346 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
347 panic("xpq_queue_invlpg");
348 }
349
350 void
351 xen_mcast_invlpg(vaddr_t va, uint32_t cpumask)
352 {
353 mmuext_op_t op;
354
355 /* Flush pending page updates */
356 xpq_flush_queue();
357
358 op.cmd = MMUEXT_INVLPG_MULTI;
359 op.arg1.linear_addr = va;
360 op.arg2.vcpumask = &cpumask;
361
362 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
363 panic("xpq_queue_invlpg_all");
364 }
365
366 return;
367 }
368
369 void
370 xen_bcast_invlpg(vaddr_t va)
371 {
372 mmuext_op_t op;
373
374 /* Flush pending page updates */
375 xpq_flush_queue();
376
377 op.cmd = MMUEXT_INVLPG_ALL;
378 op.arg1.linear_addr = va;
379
380 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
381 panic("xpq_queue_invlpg_all");
382 }
383
384 return;
385 }
386
387 /* This is a synchronous call. */
388 void
389 xen_mcast_tlbflush(uint32_t cpumask)
390 {
391 mmuext_op_t op;
392
393 /* Flush pending page updates */
394 xpq_flush_queue();
395
396 op.cmd = MMUEXT_TLB_FLUSH_MULTI;
397 op.arg2.vcpumask = &cpumask;
398
399 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
400 panic("xpq_queue_invlpg_all");
401 }
402
403 return;
404 }
405
406 /* This is a synchronous call. */
407 void
408 xen_bcast_tlbflush(void)
409 {
410 mmuext_op_t op;
411
412 /* Flush pending page updates */
413 xpq_flush_queue();
414
415 op.cmd = MMUEXT_TLB_FLUSH_ALL;
416
417 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
418 panic("xpq_queue_invlpg_all");
419 }
420
421 return;
422 }
423
424 /* This is a synchronous call. */
425 void
426 xen_vcpu_mcast_invlpg(vaddr_t sva, vaddr_t eva, uint32_t cpumask)
427 {
428 KASSERT(eva > sva);
429
430 /* Flush pending page updates */
431 xpq_flush_queue();
432
433 /* Align to nearest page boundary */
434 sva &= ~PAGE_MASK;
435 eva &= ~PAGE_MASK;
436
437 for ( ; sva <= eva; sva += PAGE_SIZE) {
438 xen_mcast_invlpg(sva, cpumask);
439 }
440
441 return;
442 }
443
444 /* This is a synchronous call. */
445 void
446 xen_vcpu_bcast_invlpg(vaddr_t sva, vaddr_t eva)
447 {
448 KASSERT(eva > sva);
449
450 /* Flush pending page updates */
451 xpq_flush_queue();
452
453 /* Align to nearest page boundary */
454 sva &= ~PAGE_MASK;
455 eva &= ~PAGE_MASK;
456
457 for ( ; sva <= eva; sva += PAGE_SIZE) {
458 xen_bcast_invlpg(sva);
459 }
460
461 return;
462 }
463
464 int
465 xpq_update_foreign(paddr_t ptr, pt_entry_t val, int dom)
466 {
467 mmu_update_t op;
468 int ok;
469
470 xpq_flush_queue();
471
472 op.ptr = ptr;
473 op.val = val;
474 if (HYPERVISOR_mmu_update(&op, 1, &ok, dom) < 0)
475 return EFAULT;
476 return (0);
477 }
478
479 #ifdef XENDEBUG
480 void
481 xpq_debug_dump(void)
482 {
483 int i;
484
485 mmu_update_t *xpq_queue = xpq_queue_array[xpq_cpu()->ci_cpuid];
486 int xpq_idx = xpq_idx_array[xpq_cpu()->ci_cpuid];
487
488 XENPRINTK2(("idx: %d\n", xpq_idx));
489 for (i = 0; i < xpq_idx; i++) {
490 snprintf(XBUF, sizeof(XBUF), "%" PRIx64 " %08" PRIx64,
491 xpq_queue[i].ptr, xpq_queue[i].val);
492 if (++i < xpq_idx)
493 snprintf(XBUF + strlen(XBUF),
494 sizeof(XBUF) - strlen(XBUF),
495 "%" PRIx64 " %08" PRIx64,
496 xpq_queue[i].ptr, xpq_queue[i].val);
497 if (++i < xpq_idx)
498 snprintf(XBUF + strlen(XBUF),
499 sizeof(XBUF) - strlen(XBUF),
500 "%" PRIx64 " %08" PRIx64,
501 xpq_queue[i].ptr, xpq_queue[i].val);
502 if (++i < xpq_idx)
503 snprintf(XBUF + strlen(XBUF),
504 sizeof(XBUF) - strlen(XBUF),
505 "%" PRIx64 " %08" PRIx64,
506 xpq_queue[i].ptr, xpq_queue[i].val);
507 XENPRINTK2(("%d: %s\n", xpq_idx, XBUF));
508 }
509 }
510 #endif
511
512
513 extern volatile struct xencons_interface *xencons_interface; /* XXX */
514 extern struct xenstore_domain_interface *xenstore_interface; /* XXX */
515
516 static void xen_bt_set_readonly (vaddr_t);
517 static void xen_bootstrap_tables (vaddr_t, vaddr_t, int, int, int);
518
519 /* How many PDEs ? */
520 #if L2_SLOT_KERNBASE > 0
521 #define TABLE_L2_ENTRIES (2 * (NKL2_KIMG_ENTRIES + 1))
522 #else
523 #define TABLE_L2_ENTRIES (NKL2_KIMG_ENTRIES + 1)
524 #endif
525
526 /*
527 * Construct and switch to new pagetables
528 * first_avail is the first vaddr we can use after
529 * we get rid of Xen pagetables
530 */
531
532 vaddr_t xen_pmap_bootstrap (void);
533
534 /*
535 * Function to get rid of Xen bootstrap tables
536 */
537
538 /* How many PDP do we need: */
539 #ifdef PAE
540 /*
541 * For PAE, we consider a single contigous L2 "superpage" of 4 pages,
542 * all of them mapped by the L3 page. We also need a shadow page
543 * for L3[3].
544 */
545 static const int l2_4_count = 6;
546 #else
547 static const int l2_4_count = PTP_LEVELS - 1;
548 #endif
549
550 vaddr_t
551 xen_pmap_bootstrap(void)
552 {
553 int count, oldcount;
554 long mapsize;
555 vaddr_t bootstrap_tables, init_tables;
556
557 memset(xpq_idx_array, 0, sizeof xpq_idx_array);
558
559 xpmap_phys_to_machine_mapping =
560 (unsigned long *)xen_start_info.mfn_list;
561 init_tables = xen_start_info.pt_base;
562 __PRINTK(("xen_arch_pmap_bootstrap init_tables=0x%lx\n", init_tables));
563
564 /* Space after Xen boostrap tables should be free */
565 bootstrap_tables = xen_start_info.pt_base +
566 (xen_start_info.nr_pt_frames * PAGE_SIZE);
567
568 /*
569 * Calculate how many space we need
570 * first everything mapped before the Xen bootstrap tables
571 */
572 mapsize = init_tables - KERNTEXTOFF;
573 /* after the tables we'll have:
574 * - UAREA
575 * - dummy user PGD (x86_64)
576 * - HYPERVISOR_shared_info
577 * - ISA I/O mem (if needed)
578 */
579 mapsize += UPAGES * NBPG;
580 #ifdef __x86_64__
581 mapsize += NBPG;
582 #endif
583 mapsize += NBPG;
584
585 #ifdef DOM0OPS
586 if (xendomain_is_dom0()) {
587 /* space for ISA I/O mem */
588 mapsize += IOM_SIZE;
589 }
590 #endif
591 /* at this point mapsize doens't include the table size */
592
593 #ifdef __x86_64__
594 count = TABLE_L2_ENTRIES;
595 #else
596 count = (mapsize + (NBPD_L2 -1)) >> L2_SHIFT;
597 #endif /* __x86_64__ */
598
599 /* now compute how many L2 pages we need exactly */
600 XENPRINTK(("bootstrap_final mapsize 0x%lx count %d\n", mapsize, count));
601 while (mapsize + (count + l2_4_count) * PAGE_SIZE + KERNTEXTOFF >
602 ((long)count << L2_SHIFT) + KERNBASE) {
603 count++;
604 }
605 #ifndef __x86_64__
606 /*
607 * one more L2 page: we'll alocate several pages after kva_start
608 * in pmap_bootstrap() before pmap_growkernel(), which have not been
609 * counted here. It's not a big issue to allocate one more L2 as
610 * pmap_growkernel() will be called anyway.
611 */
612 count++;
613 nkptp[1] = count;
614 #endif
615
616 /*
617 * install bootstrap pages. We may need more L2 pages than will
618 * have the final table here, as it's installed after the final table
619 */
620 oldcount = count;
621
622 bootstrap_again:
623 XENPRINTK(("bootstrap_again oldcount %d\n", oldcount));
624 /*
625 * Xen space we'll reclaim may not be enough for our new page tables,
626 * move bootstrap tables if necessary
627 */
628 if (bootstrap_tables < init_tables + ((count + l2_4_count) * PAGE_SIZE))
629 bootstrap_tables = init_tables +
630 ((count + l2_4_count) * PAGE_SIZE);
631 /* make sure we have enough to map the bootstrap_tables */
632 if (bootstrap_tables + ((oldcount + l2_4_count) * PAGE_SIZE) >
633 ((long)oldcount << L2_SHIFT) + KERNBASE) {
634 oldcount++;
635 goto bootstrap_again;
636 }
637
638 /* Create temporary tables */
639 xen_bootstrap_tables(xen_start_info.pt_base, bootstrap_tables,
640 xen_start_info.nr_pt_frames, oldcount, 0);
641
642 /* Create final tables */
643 xen_bootstrap_tables(bootstrap_tables, init_tables,
644 oldcount + l2_4_count, count, 1);
645
646 /* zero out free space after tables */
647 memset((void *)(init_tables + ((count + l2_4_count) * PAGE_SIZE)), 0,
648 (UPAGES + 1) * NBPG);
649
650 /* Finally, flush TLB. */
651 xpq_queue_tlb_flush();
652
653 return (init_tables + ((count + l2_4_count) * PAGE_SIZE));
654 }
655
656 /*
657 * Build a new table and switch to it
658 * old_count is # of old tables (including PGD, PDTPE and PDE)
659 * new_count is # of new tables (PTE only)
660 * we assume areas don't overlap
661 */
662 static void
663 xen_bootstrap_tables (vaddr_t old_pgd, vaddr_t new_pgd,
664 int old_count, int new_count, int final)
665 {
666 pd_entry_t *pdtpe, *pde, *pte;
667 pd_entry_t *cur_pgd, *bt_pgd;
668 paddr_t addr;
669 vaddr_t page, avail, text_end, map_end;
670 int i;
671 extern char __data_start;
672
673 __PRINTK(("xen_bootstrap_tables(%#" PRIxVADDR ", %#" PRIxVADDR ","
674 " %d, %d)\n",
675 old_pgd, new_pgd, old_count, new_count));
676 text_end = ((vaddr_t)&__data_start) & ~PAGE_MASK;
677 /*
678 * size of R/W area after kernel text:
679 * xencons_interface (if present)
680 * xenstore_interface (if present)
681 * table pages (new_count + l2_4_count entries)
682 * extra mappings (only when final is true):
683 * UAREA
684 * dummy user PGD (x86_64 only)/gdt page (i386 only)
685 * HYPERVISOR_shared_info
686 * ISA I/O mem (if needed)
687 */
688 map_end = new_pgd + ((new_count + l2_4_count) * NBPG);
689 if (final) {
690 map_end += (UPAGES + 1) * NBPG;
691 HYPERVISOR_shared_info = (shared_info_t *)map_end;
692 map_end += NBPG;
693 }
694 /*
695 * we always set atdevbase, as it's used by init386 to find the first
696 * available VA. map_end is updated only if we are dom0, so
697 * atdevbase -> atdevbase + IOM_SIZE will be mapped only in
698 * this case.
699 */
700 if (final)
701 atdevbase = map_end;
702 #ifdef DOM0OPS
703 if (final && xendomain_is_dom0()) {
704 /* ISA I/O mem */
705 map_end += IOM_SIZE;
706 }
707 #endif /* DOM0OPS */
708
709 __PRINTK(("xen_bootstrap_tables text_end 0x%lx map_end 0x%lx\n",
710 text_end, map_end));
711 __PRINTK(("console %#lx ", xen_start_info.console_mfn));
712 __PRINTK(("xenstore %#" PRIx32 "\n", xen_start_info.store_mfn));
713
714 /*
715 * Create bootstrap page tables
716 * What we need:
717 * - a PGD (level 4)
718 * - a PDTPE (level 3)
719 * - a PDE (level2)
720 * - some PTEs (level 1)
721 */
722
723 cur_pgd = (pd_entry_t *) old_pgd;
724 bt_pgd = (pd_entry_t *) new_pgd;
725 memset (bt_pgd, 0, PAGE_SIZE);
726 avail = new_pgd + PAGE_SIZE;
727 #if PTP_LEVELS > 3
728 /* Install level 3 */
729 pdtpe = (pd_entry_t *) avail;
730 memset (pdtpe, 0, PAGE_SIZE);
731 avail += PAGE_SIZE;
732
733 addr = ((u_long) pdtpe) - KERNBASE;
734 bt_pgd[pl4_pi(KERNTEXTOFF)] =
735 xpmap_ptom_masked(addr) | PG_k | PG_RW | PG_V;
736
737 __PRINTK(("L3 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR
738 " -> L4[%#x]\n",
739 pdtpe, addr, bt_pgd[pl4_pi(KERNTEXTOFF)], pl4_pi(KERNTEXTOFF)));
740 #else
741 pdtpe = bt_pgd;
742 #endif /* PTP_LEVELS > 3 */
743
744 #if PTP_LEVELS > 2
745 /* Level 2 */
746 pde = (pd_entry_t *) avail;
747 memset(pde, 0, PAGE_SIZE);
748 avail += PAGE_SIZE;
749
750 addr = ((u_long) pde) - KERNBASE;
751 pdtpe[pl3_pi(KERNTEXTOFF)] =
752 xpmap_ptom_masked(addr) | PG_k | PG_V | PG_RW;
753 __PRINTK(("L2 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR
754 " -> L3[%#x]\n",
755 pde, addr, pdtpe[pl3_pi(KERNTEXTOFF)], pl3_pi(KERNTEXTOFF)));
756 #elif defined(PAE)
757 /* our PAE-style level 2: 5 contigous pages (4 L2 + 1 shadow) */
758 pde = (pd_entry_t *) avail;
759 memset(pde, 0, PAGE_SIZE * 5);
760 avail += PAGE_SIZE * 5;
761 addr = ((u_long) pde) - KERNBASE;
762 /*
763 * enter L2 pages in the L3.
764 * The real L2 kernel PD will be the last one (so that
765 * pde[L2_SLOT_KERN] always point to the shadow).
766 */
767 for (i = 0; i < 3; i++, addr += PAGE_SIZE) {
768 /*
769 * Xen doesn't want R/W mappings in L3 entries, it'll add it
770 * itself.
771 */
772 pdtpe[i] = xpmap_ptom_masked(addr) | PG_k | PG_V;
773 __PRINTK(("L2 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR
774 " -> L3[%#x]\n",
775 (vaddr_t)pde + PAGE_SIZE * i, addr, pdtpe[i], i));
776 }
777 addr += PAGE_SIZE;
778 pdtpe[3] = xpmap_ptom_masked(addr) | PG_k | PG_V;
779 __PRINTK(("L2 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR
780 " -> L3[%#x]\n",
781 (vaddr_t)pde + PAGE_SIZE * 4, addr, pdtpe[3], 3));
782
783 #else /* PAE */
784 pde = bt_pgd;
785 #endif /* PTP_LEVELS > 2 */
786
787 /* Level 1 */
788 page = KERNTEXTOFF;
789 for (i = 0; i < new_count; i ++) {
790 vaddr_t cur_page = page;
791
792 pte = (pd_entry_t *) avail;
793 avail += PAGE_SIZE;
794
795 memset(pte, 0, PAGE_SIZE);
796 while (pl2_pi(page) == pl2_pi (cur_page)) {
797 if (page >= map_end) {
798 /* not mapped at all */
799 pte[pl1_pi(page)] = 0;
800 page += PAGE_SIZE;
801 continue;
802 }
803 pte[pl1_pi(page)] = xpmap_ptom_masked(page - KERNBASE);
804 if (page == (vaddr_t)HYPERVISOR_shared_info) {
805 pte[pl1_pi(page)] = xen_start_info.shared_info;
806 __PRINTK(("HYPERVISOR_shared_info "
807 "va %#lx pte %#" PRIxPADDR "\n",
808 HYPERVISOR_shared_info, pte[pl1_pi(page)]));
809 }
810 if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT)
811 == xen_start_info.console.domU.mfn) {
812 xencons_interface = (void *)page;
813 pte[pl1_pi(page)] = xen_start_info.console_mfn;
814 pte[pl1_pi(page)] <<= PAGE_SHIFT;
815 __PRINTK(("xencons_interface "
816 "va %#lx pte %#" PRIxPADDR "\n",
817 xencons_interface, pte[pl1_pi(page)]));
818 }
819 if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT)
820 == xen_start_info.store_mfn) {
821 xenstore_interface = (void *)page;
822 pte[pl1_pi(page)] = xen_start_info.store_mfn;
823 pte[pl1_pi(page)] <<= PAGE_SHIFT;
824 __PRINTK(("xenstore_interface "
825 "va %#lx pte %#" PRIxPADDR "\n",
826 xenstore_interface, pte[pl1_pi(page)]));
827 }
828 #ifdef DOM0OPS
829 if (page >= (vaddr_t)atdevbase &&
830 page < (vaddr_t)atdevbase + IOM_SIZE) {
831 pte[pl1_pi(page)] =
832 IOM_BEGIN + (page - (vaddr_t)atdevbase);
833 }
834 #endif
835 pte[pl1_pi(page)] |= PG_k | PG_V;
836 if (page < text_end) {
837 /* map kernel text RO */
838 pte[pl1_pi(page)] |= 0;
839 } else if (page >= old_pgd
840 && page < old_pgd + (old_count * PAGE_SIZE)) {
841 /* map old page tables RO */
842 pte[pl1_pi(page)] |= 0;
843 } else if (page >= new_pgd &&
844 page < new_pgd + ((new_count + l2_4_count) * PAGE_SIZE)) {
845 /* map new page tables RO */
846 pte[pl1_pi(page)] |= 0;
847 } else {
848 /* map page RW */
849 pte[pl1_pi(page)] |= PG_RW;
850 }
851
852 if ((page >= old_pgd && page < old_pgd + (old_count * PAGE_SIZE))
853 || page >= new_pgd) {
854 __PRINTK(("va %#lx pa %#lx "
855 "entry 0x%" PRIxPADDR " -> L1[%#x]\n",
856 page, page - KERNBASE,
857 pte[pl1_pi(page)], pl1_pi(page)));
858 }
859 page += PAGE_SIZE;
860 }
861
862 addr = ((u_long) pte) - KERNBASE;
863 pde[pl2_pi(cur_page)] =
864 xpmap_ptom_masked(addr) | PG_k | PG_RW | PG_V;
865 __PRINTK(("L1 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR
866 " -> L2[%#x]\n",
867 pte, addr, pde[pl2_pi(cur_page)], pl2_pi(cur_page)));
868 /* Mark readonly */
869 xen_bt_set_readonly((vaddr_t) pte);
870 }
871
872 /* Install recursive page tables mapping */
873 #ifdef PAE
874 /*
875 * we need a shadow page for the kernel's L2 page
876 * The real L2 kernel PD will be the last one (so that
877 * pde[L2_SLOT_KERN] always point to the shadow.
878 */
879 memcpy(&pde[L2_SLOT_KERN + NPDPG], &pde[L2_SLOT_KERN], PAGE_SIZE);
880 pmap_kl2pd = &pde[L2_SLOT_KERN + NPDPG];
881 pmap_kl2paddr = (u_long)pmap_kl2pd - KERNBASE;
882
883 /*
884 * We don't enter a recursive entry from the L3 PD. Instead,
885 * we enter the first 4 L2 pages, which includes the kernel's L2
886 * shadow. But we have to entrer the shadow after switching
887 * %cr3, or Xen will refcount some PTE with the wrong type.
888 */
889 addr = (u_long)pde - KERNBASE;
890 for (i = 0; i < 3; i++, addr += PAGE_SIZE) {
891 pde[PDIR_SLOT_PTE + i] = xpmap_ptom_masked(addr) | PG_k | PG_V;
892 __PRINTK(("pde[%d] va %#" PRIxVADDR " pa %#" PRIxPADDR
893 " entry %#" PRIxPADDR "\n",
894 (int)(PDIR_SLOT_PTE + i), pde + PAGE_SIZE * i,
895 addr, pde[PDIR_SLOT_PTE + i]));
896 }
897 #if 0
898 addr += PAGE_SIZE; /* point to shadow L2 */
899 pde[PDIR_SLOT_PTE + 3] = xpmap_ptom_masked(addr) | PG_k | PG_V;
900 __PRINTK(("pde[%d] va 0x%lx pa 0x%lx entry 0x%" PRIx64 "\n",
901 (int)(PDIR_SLOT_PTE + 3), pde + PAGE_SIZE * 4, (long)addr,
902 (int64_t)pde[PDIR_SLOT_PTE + 3]));
903 #endif
904 /* Mark tables RO, and pin the kernel's shadow as L2 */
905 addr = (u_long)pde - KERNBASE;
906 for (i = 0; i < 5; i++, addr += PAGE_SIZE) {
907 xen_bt_set_readonly(((vaddr_t)pde) + PAGE_SIZE * i);
908 if (i == 2 || i == 3)
909 continue;
910 #if 0
911 __PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", i, (int64_t)addr));
912 xpq_queue_pin_l2_table(xpmap_ptom_masked(addr));
913 #endif
914 }
915 if (final) {
916 addr = (u_long)pde - KERNBASE + 3 * PAGE_SIZE;
917 __PRINTK(("pin L2 %d addr %#" PRIxPADDR "\n", 2, addr));
918 xpq_queue_pin_l2_table(xpmap_ptom_masked(addr));
919 }
920 #if 0
921 addr = (u_long)pde - KERNBASE + 2 * PAGE_SIZE;
922 __PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", 2, (int64_t)addr));
923 xpq_queue_pin_l2_table(xpmap_ptom_masked(addr));
924 #endif
925 #else /* PAE */
926 /* recursive entry in higher-level PD */
927 bt_pgd[PDIR_SLOT_PTE] =
928 xpmap_ptom_masked(new_pgd - KERNBASE) | PG_k | PG_V;
929 __PRINTK(("bt_pgd[PDIR_SLOT_PTE] va %#" PRIxVADDR " pa %#" PRIxPADDR
930 " entry %#" PRIxPADDR "\n", new_pgd, (paddr_t)new_pgd - KERNBASE,
931 bt_pgd[PDIR_SLOT_PTE]));
932 /* Mark tables RO */
933 xen_bt_set_readonly((vaddr_t) pde);
934 #endif
935 #if PTP_LEVELS > 2 || defined(PAE)
936 xen_bt_set_readonly((vaddr_t) pdtpe);
937 #endif
938 #if PTP_LEVELS > 3
939 xen_bt_set_readonly(new_pgd);
940 #endif
941 /* Pin the PGD */
942 __PRINTK(("pin PGD: %"PRIxVADDR"\n", new_pgd - KERNBASE));
943 #ifdef __x86_64__
944 xpq_queue_pin_l4_table(xpmap_ptom_masked(new_pgd - KERNBASE));
945 #elif PAE
946 xpq_queue_pin_l3_table(xpmap_ptom_masked(new_pgd - KERNBASE));
947 #else
948 xpq_queue_pin_l2_table(xpmap_ptom_masked(new_pgd - KERNBASE));
949 #endif
950
951 /* Save phys. addr of PDP, for libkvm. */
952 #ifdef PAE
953 PDPpaddr = (u_long)pde - KERNBASE; /* PDP is the L2 with PAE */
954 #else
955 PDPpaddr = (u_long)new_pgd - KERNBASE;
956 #endif
957
958 /* Switch to new tables */
959 __PRINTK(("switch to PGD\n"));
960 xpq_queue_pt_switch(xpmap_ptom_masked(new_pgd - KERNBASE));
961 __PRINTK(("bt_pgd[PDIR_SLOT_PTE] now entry %#" PRIxPADDR "\n",
962 bt_pgd[PDIR_SLOT_PTE]));
963
964 #ifdef PAE
965 if (final) {
966 /* save the address of the L3 page */
967 cpu_info_primary.ci_pae_l3_pdir = pdtpe;
968 cpu_info_primary.ci_pae_l3_pdirpa = (new_pgd - KERNBASE);
969
970 /* now enter kernel's PTE mappings */
971 addr = (u_long)pde - KERNBASE + PAGE_SIZE * 3;
972 xpq_queue_pte_update(
973 xpmap_ptom(((vaddr_t)&pde[PDIR_SLOT_PTE + 3]) - KERNBASE),
974 xpmap_ptom_masked(addr) | PG_k | PG_V);
975 xpq_flush_queue();
976 }
977 #endif
978
979 /* Now we can safely reclaim space taken by old tables */
980
981 __PRINTK(("unpin old PGD\n"));
982 /* Unpin old PGD */
983 xpq_queue_unpin_table(xpmap_ptom_masked(old_pgd - KERNBASE));
984 /* Mark old tables RW */
985 page = old_pgd;
986 addr = (paddr_t) pde[pl2_pi(page)] & PG_FRAME;
987 addr = xpmap_mtop(addr);
988 pte = (pd_entry_t *) ((u_long)addr + KERNBASE);
989 pte += pl1_pi(page);
990 __PRINTK(("*pde %#" PRIxPADDR " addr %#" PRIxPADDR " pte %#lx\n",
991 pde[pl2_pi(page)], addr, (long)pte));
992 while (page < old_pgd + (old_count * PAGE_SIZE) && page < map_end) {
993 addr = xpmap_ptom(((u_long) pte) - KERNBASE);
994 XENPRINTK(("addr %#" PRIxPADDR " pte %#lx "
995 "*pte %#" PRIxPADDR "\n",
996 addr, (long)pte, *pte));
997 xpq_queue_pte_update(addr, *pte | PG_RW);
998 page += PAGE_SIZE;
999 /*
1000 * Our ptes are contiguous
1001 * so it's safe to just "++" here
1002 */
1003 pte++;
1004 }
1005 xpq_flush_queue();
1006 }
1007
1008
1009 /*
1010 * Bootstrap helper functions
1011 */
1012
1013 /*
1014 * Mark a page readonly
1015 * XXX: assuming vaddr = paddr + KERNBASE
1016 */
1017
1018 static void
1019 xen_bt_set_readonly (vaddr_t page)
1020 {
1021 pt_entry_t entry;
1022
1023 entry = xpmap_ptom_masked(page - KERNBASE);
1024 entry |= PG_k | PG_V;
1025
1026 HYPERVISOR_update_va_mapping (page, entry, UVMF_INVLPG);
1027 }
1028
1029 #ifdef __x86_64__
1030 void
1031 xen_set_user_pgd(paddr_t page)
1032 {
1033 struct mmuext_op op;
1034 int s = splvm();
1035
1036 xpq_flush_queue();
1037 op.cmd = MMUEXT_NEW_USER_BASEPTR;
1038 op.arg1.mfn = pfn_to_mfn(page >> PAGE_SHIFT);
1039 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
1040 panic("xen_set_user_pgd: failed to install new user page"
1041 " directory %#" PRIxPADDR, page);
1042 splx(s);
1043 }
1044 #endif /* __x86_64__ */
1045