Lines Matching refs:page
126 /* Xen requires the start_info struct to be page aligned */
466 * For PAE, we need an L3 page, a single contiguous L2 "superpage" of 4 pages
467 * (all of them mapped by the L3 page), and a shadow page for L3[3].
473 * Xen locore: get rid of the Xen bootstrap tables. Build and switch to new page
478 * | KERNEL IMAGE | BOOTSTRAP TABLES | PROC0 UAREA | DUMMY PAGE | HYPER. SHARED
482 * INFO | EARLY ZERO PAGE | ISA I/O MEM |
485 * DUMMY PAGE is either a PDG for amd64 or a GDT for i386.
487 * (HYPER. SHARED INFO + EARLY ZERO PAGE + ISA I/O MEM) have no physical
555 * One more L2 page: we'll allocate several pages after kva_start
573 * Xen space we'll reclaim may not be enough for our new page tables,
597 /* Zero out PROC0 UAREA and DUMMY PAGE. */
619 vaddr_t page, avail, map_end;
636 * dummy user PGD (x86_64 only) / GDT page (i386 only)
675 * Create our page tables.
742 page = KERNTEXTOFF;
744 vaddr_t cur_page = page;
750 while (pl2_pi(page) == pl2_pi(cur_page)) {
751 if (page >= map_end) {
753 pte[pl1_pi(page)] = 0;
754 page += PAGE_SIZE;
757 pte[pl1_pi(page)] = xpmap_ptom_masked(page - KERNBASE);
758 if (page == (vaddr_t)HYPERVISOR_shared_info) {
759 pte[pl1_pi(page)] = xen_start_info.shared_info;
761 if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT)
763 xencons_interface = (void *)page;
764 pte[pl1_pi(page)] = xen_start_info.console_mfn;
765 pte[pl1_pi(page)] <<= PAGE_SHIFT;
767 if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT)
769 xenstore_interface = (void *)page;
770 pte[pl1_pi(page)] = xen_start_info.store_mfn;
771 pte[pl1_pi(page)] <<= PAGE_SHIFT;
774 if (page >= (vaddr_t)atdevbase &&
775 page < (vaddr_t)atdevbase + IOM_SIZE) {
776 pte[pl1_pi(page)] =
777 IOM_BEGIN + (page - (vaddr_t)atdevbase);
778 pte[pl1_pi(page)] |= xpmap_pg_nx;
782 pte[pl1_pi(page)] |= PTE_P;
783 if (page < (vaddr_t)&__rodata_start) {
785 } else if (page >= (vaddr_t)&__rodata_start &&
786 page < (vaddr_t)&__data_start) {
788 pte[pl1_pi(page)] |= xpmap_pg_nx;
789 } else if (page >= old_pgd &&
790 page < old_pgd + (old_count * PAGE_SIZE)) {
791 /* Map the old page tables R. */
792 pte[pl1_pi(page)] |= xpmap_pg_nx;
793 } else if (page >= new_pgd &&
794 page < new_pgd + ((new_count + PDIRSZ) * PAGE_SIZE)) {
795 /* Map the new page tables R. */
796 pte[pl1_pi(page)] |= xpmap_pg_nx;
798 } else if (page == (vaddr_t)tmpgdt) {
801 * this page to uvm after making it writable.
803 pte[pl1_pi(page)] = 0;
804 page += PAGE_SIZE;
807 } else if (page >= (vaddr_t)&__data_start &&
808 page < (vaddr_t)&__kernel_end) {
810 pte[pl1_pi(page)] |= PTE_W | xpmap_pg_nx;
812 /* Map the page RW. */
813 pte[pl1_pi(page)] |= PTE_W | xpmap_pg_nx;
816 page += PAGE_SIZE;
826 /* Install recursive page tables mapping */
891 /* Save the address of the real per-cpu L4 page. */
895 /* Save the address of the L3 page */
916 page = old_pgd;
917 addr = xpmap_mtop((paddr_t)L2[pl2_pi(page)] & PTE_4KFRAME);
919 pte += pl1_pi(page);
920 while (page < old_pgd + (old_count * PAGE_SIZE) && page < map_end) {
923 page += PAGE_SIZE;
929 while (page < old_pgd + (old_count * PAGE_SIZE)) {
932 page += PAGE_SIZE;
939 * Mark a page read-only, assuming vaddr = paddr + KERNBASE.
942 xen_bt_set_readonly(vaddr_t page)
946 entry = xpmap_ptom_masked(page - KERNBASE);
949 HYPERVISOR_update_va_mapping(page, entry, UVMF_INVLPG);
954 xen_set_user_pgd(paddr_t page)
962 op.arg1.mfn = xpmap_ptom_masked(page) >> PAGE_SHIFT;
964 panic("xen_set_user_pgd: failed to install new user page"
965 " directory %#" PRIxPADDR, page);