Home | History | Annotate | Line # | Download | only in x86
xen_pmap.c revision 1.6.2.3
      1  1.6.2.3    yamt /*	$NetBSD: xen_pmap.c,v 1.6.2.3 2012/05/23 10:07:52 yamt Exp $	*/
      2      1.2   chuck 
      3      1.1  dyoung /*
      4      1.1  dyoung  * Copyright (c) 2007 Manuel Bouyer.
      5      1.1  dyoung  *
      6      1.1  dyoung  * Redistribution and use in source and binary forms, with or without
      7      1.1  dyoung  * modification, are permitted provided that the following conditions
      8      1.1  dyoung  * are met:
      9      1.1  dyoung  * 1. Redistributions of source code must retain the above copyright
     10      1.1  dyoung  *    notice, this list of conditions and the following disclaimer.
     11      1.1  dyoung  * 2. Redistributions in binary form must reproduce the above copyright
     12      1.1  dyoung  *    notice, this list of conditions and the following disclaimer in the
     13      1.1  dyoung  *    documentation and/or other materials provided with the distribution.
     14      1.1  dyoung  *
     15      1.1  dyoung  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     16      1.1  dyoung  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     17      1.1  dyoung  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     18      1.1  dyoung  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     19      1.1  dyoung  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     20      1.1  dyoung  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     21      1.1  dyoung  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     22      1.1  dyoung  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     23      1.1  dyoung  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     24      1.1  dyoung  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     25      1.1  dyoung  *
     26      1.1  dyoung  */
     27      1.1  dyoung 
     28      1.1  dyoung /*
     29      1.1  dyoung  * Copyright (c) 2006 Mathieu Ropert <mro (at) adviseo.fr>
     30      1.1  dyoung  *
     31      1.1  dyoung  * Permission to use, copy, modify, and distribute this software for any
     32      1.1  dyoung  * purpose with or without fee is hereby granted, provided that the above
     33      1.1  dyoung  * copyright notice and this permission notice appear in all copies.
     34      1.1  dyoung  *
     35      1.1  dyoung  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     36      1.1  dyoung  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     37      1.1  dyoung  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     38      1.1  dyoung  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     39      1.1  dyoung  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     40      1.1  dyoung  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     41      1.1  dyoung  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     42      1.1  dyoung  */
     43      1.1  dyoung 
     44      1.1  dyoung /*
     45      1.1  dyoung  * Copyright (c) 1997 Charles D. Cranor and Washington University.
     46      1.1  dyoung  * All rights reserved.
     47      1.1  dyoung  *
     48      1.1  dyoung  * Redistribution and use in source and binary forms, with or without
     49      1.1  dyoung  * modification, are permitted provided that the following conditions
     50      1.1  dyoung  * are met:
     51      1.1  dyoung  * 1. Redistributions of source code must retain the above copyright
     52      1.1  dyoung  *    notice, this list of conditions and the following disclaimer.
     53      1.1  dyoung  * 2. Redistributions in binary form must reproduce the above copyright
     54      1.1  dyoung  *    notice, this list of conditions and the following disclaimer in the
     55      1.1  dyoung  *    documentation and/or other materials provided with the distribution.
     56      1.1  dyoung  *
     57      1.1  dyoung  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     58      1.1  dyoung  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     59      1.1  dyoung  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     60      1.1  dyoung  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     61      1.1  dyoung  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     62      1.1  dyoung  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     63      1.1  dyoung  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     64      1.1  dyoung  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     65      1.1  dyoung  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     66      1.1  dyoung  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     67      1.1  dyoung  */
     68      1.1  dyoung 
     69      1.1  dyoung /*
     70      1.1  dyoung  * Copyright 2001 (c) Wasabi Systems, Inc.
     71      1.1  dyoung  * All rights reserved.
     72      1.1  dyoung  *
     73      1.1  dyoung  * Written by Frank van der Linden for Wasabi Systems, Inc.
     74      1.1  dyoung  *
     75      1.1  dyoung  * Redistribution and use in source and binary forms, with or without
     76      1.1  dyoung  * modification, are permitted provided that the following conditions
     77      1.1  dyoung  * are met:
     78      1.1  dyoung  * 1. Redistributions of source code must retain the above copyright
     79      1.1  dyoung  *    notice, this list of conditions and the following disclaimer.
     80      1.1  dyoung  * 2. Redistributions in binary form must reproduce the above copyright
     81      1.1  dyoung  *    notice, this list of conditions and the following disclaimer in the
     82      1.1  dyoung  *    documentation and/or other materials provided with the distribution.
     83      1.1  dyoung  * 3. All advertising materials mentioning features or use of this software
     84      1.1  dyoung  *    must display the following acknowledgement:
     85      1.1  dyoung  *      This product includes software developed for the NetBSD Project by
     86      1.1  dyoung  *      Wasabi Systems, Inc.
     87      1.1  dyoung  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     88      1.1  dyoung  *    or promote products derived from this software without specific prior
     89      1.1  dyoung  *    written permission.
     90      1.1  dyoung  *
     91      1.1  dyoung  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     92      1.1  dyoung  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     93      1.1  dyoung  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     94      1.1  dyoung  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     95      1.1  dyoung  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     96      1.1  dyoung  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     97      1.1  dyoung  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     98      1.1  dyoung  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     99      1.1  dyoung  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
    100      1.1  dyoung  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
    101      1.1  dyoung  * POSSIBILITY OF SUCH DAMAGE.
    102      1.1  dyoung  */
    103      1.1  dyoung 
    104      1.1  dyoung #include <sys/cdefs.h>
    105  1.6.2.3    yamt __KERNEL_RCSID(0, "$NetBSD: xen_pmap.c,v 1.6.2.3 2012/05/23 10:07:52 yamt Exp $");
    106      1.1  dyoung 
    107      1.1  dyoung #include "opt_user_ldt.h"
    108      1.1  dyoung #include "opt_lockdebug.h"
    109      1.1  dyoung #include "opt_multiprocessor.h"
    110      1.1  dyoung #include "opt_xen.h"
    111      1.1  dyoung #if !defined(__x86_64__)
    112      1.1  dyoung #include "opt_kstack_dr0.h"
    113      1.1  dyoung #endif /* !defined(__x86_64__) */
    114      1.1  dyoung 
    115      1.1  dyoung #include <sys/param.h>
    116      1.1  dyoung #include <sys/systm.h>
    117      1.1  dyoung #include <sys/proc.h>
    118      1.1  dyoung #include <sys/pool.h>
    119      1.1  dyoung #include <sys/kernel.h>
    120      1.1  dyoung #include <sys/atomic.h>
    121      1.1  dyoung #include <sys/cpu.h>
    122      1.1  dyoung #include <sys/intr.h>
    123      1.1  dyoung #include <sys/xcall.h>
    124      1.1  dyoung 
    125      1.1  dyoung #include <uvm/uvm.h>
    126      1.1  dyoung 
    127      1.1  dyoung #include <dev/isa/isareg.h>
    128      1.1  dyoung 
    129      1.1  dyoung #include <machine/specialreg.h>
    130      1.1  dyoung #include <machine/gdt.h>
    131      1.1  dyoung #include <machine/isa_machdep.h>
    132      1.1  dyoung #include <machine/cpuvar.h>
    133      1.1  dyoung 
    134      1.1  dyoung #include <x86/pmap.h>
    135      1.1  dyoung #include <x86/pmap_pv.h>
    136      1.1  dyoung 
    137      1.1  dyoung #include <x86/i82489reg.h>
    138      1.1  dyoung #include <x86/i82489var.h>
    139      1.1  dyoung 
    140  1.6.2.2    yamt #include <xen/xen-public/xen.h>
    141      1.1  dyoung #include <xen/hypervisor.h>
    142  1.6.2.2    yamt #include <xen/xenpmap.h>
    143      1.1  dyoung 
    144      1.3   rmind #define COUNT(x)	/* nothing */
    145      1.3   rmind 
    146      1.3   rmind extern pd_entry_t * const normal_pdes[];
    147      1.3   rmind 
    148      1.1  dyoung extern paddr_t pmap_pa_start; /* PA of first physical page for this domain */
    149      1.1  dyoung extern paddr_t pmap_pa_end;   /* PA of last physical page for this domain */
    150      1.1  dyoung 
    151      1.1  dyoung int
    152      1.1  dyoung pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
    153      1.1  dyoung {
    154      1.1  dyoung         paddr_t ma;
    155      1.1  dyoung 
    156      1.1  dyoung 	if (__predict_false(pa < pmap_pa_start || pmap_pa_end <= pa)) {
    157      1.1  dyoung 		ma = pa; /* XXX hack */
    158      1.1  dyoung 	} else {
    159      1.1  dyoung 		ma = xpmap_ptom(pa);
    160      1.1  dyoung 	}
    161      1.1  dyoung 
    162      1.1  dyoung 	return pmap_enter_ma(pmap, va, ma, pa, prot, flags, DOMID_SELF);
    163      1.1  dyoung }
    164      1.1  dyoung 
    165      1.1  dyoung /*
    166      1.1  dyoung  * pmap_kenter_ma: enter a kernel mapping without R/M (pv_entry) tracking
    167      1.1  dyoung  *
    168      1.1  dyoung  * => no need to lock anything, assume va is already allocated
    169      1.1  dyoung  * => should be faster than normal pmap enter function
    170      1.1  dyoung  * => we expect a MACHINE address
    171      1.1  dyoung  */
    172      1.1  dyoung 
    173      1.1  dyoung void
    174      1.1  dyoung pmap_kenter_ma(vaddr_t va, paddr_t ma, vm_prot_t prot, u_int flags)
    175      1.1  dyoung {
    176      1.1  dyoung 	pt_entry_t *pte, opte, npte;
    177      1.1  dyoung 
    178      1.1  dyoung 	if (va < VM_MIN_KERNEL_ADDRESS)
    179      1.1  dyoung 		pte = vtopte(va);
    180      1.1  dyoung 	else
    181      1.1  dyoung 		pte = kvtopte(va);
    182      1.1  dyoung 
    183      1.1  dyoung 	npte = ma | ((prot & VM_PROT_WRITE) ? PG_RW : PG_RO) |
    184      1.1  dyoung 	     PG_V | PG_k;
    185      1.1  dyoung 	if (flags & PMAP_NOCACHE)
    186      1.1  dyoung 		npte |= PG_N;
    187      1.1  dyoung 
    188      1.1  dyoung 	if ((cpu_feature[2] & CPUID_NOX) && !(prot & VM_PROT_EXECUTE))
    189      1.1  dyoung 		npte |= PG_NX;
    190      1.1  dyoung 
    191      1.1  dyoung 	opte = pmap_pte_testset (pte, npte); /* zap! */
    192      1.1  dyoung 
    193      1.1  dyoung 	if (pmap_valid_entry(opte)) {
    194      1.1  dyoung #if defined(MULTIPROCESSOR)
    195      1.1  dyoung 		kpreempt_disable();
    196      1.3   rmind 		pmap_tlb_shootdown(pmap_kernel(), va, opte, TLBSHOOT_KENTER);
    197      1.1  dyoung 		kpreempt_enable();
    198      1.1  dyoung #else
    199      1.1  dyoung 		/* Don't bother deferring in the single CPU case. */
    200      1.1  dyoung 		pmap_update_pg(va);
    201      1.1  dyoung #endif
    202      1.1  dyoung 	}
    203      1.1  dyoung }
    204      1.1  dyoung 
    205      1.1  dyoung /*
    206      1.1  dyoung  * pmap_extract_ma: extract a MA for the given VA
    207      1.1  dyoung  */
    208      1.1  dyoung 
    209      1.1  dyoung bool
    210      1.1  dyoung pmap_extract_ma(struct pmap *pmap, vaddr_t va, paddr_t *pap)
    211      1.1  dyoung {
    212      1.1  dyoung 	pt_entry_t *ptes, pte;
    213      1.1  dyoung 	pd_entry_t pde;
    214      1.1  dyoung 	pd_entry_t * const *pdes;
    215      1.1  dyoung 	struct pmap *pmap2;
    216      1.5     jym 
    217      1.1  dyoung 	kpreempt_disable();
    218      1.1  dyoung 	pmap_map_ptes(pmap, &pmap2, &ptes, &pdes);
    219      1.1  dyoung 	if (!pmap_pdes_valid(va, pdes, &pde)) {
    220      1.1  dyoung 		pmap_unmap_ptes(pmap, pmap2);
    221      1.1  dyoung 		kpreempt_enable();
    222      1.1  dyoung 		return false;
    223      1.1  dyoung 	}
    224      1.5     jym 
    225      1.1  dyoung 	pte = ptes[pl1_i(va)];
    226      1.1  dyoung 	pmap_unmap_ptes(pmap, pmap2);
    227      1.1  dyoung 	kpreempt_enable();
    228      1.5     jym 
    229      1.1  dyoung 	if (__predict_true((pte & PG_V) != 0)) {
    230      1.1  dyoung 		if (pap != NULL)
    231      1.1  dyoung 			*pap = (pte & PG_FRAME) | (va & (NBPD_L1 - 1));
    232      1.1  dyoung 		return true;
    233      1.1  dyoung 	}
    234      1.5     jym 
    235      1.1  dyoung 	return false;
    236      1.1  dyoung }
    237      1.6     jym 
    238      1.6     jym /*
    239  1.6.2.2    yamt  * Xen pmap's handlers for save/restore
    240      1.6     jym  */
    241      1.6     jym void
    242  1.6.2.2    yamt pmap_xen_suspend(void)
    243      1.6     jym {
    244  1.6.2.2    yamt #ifdef PAE
    245  1.6.2.2    yamt 	pmap_unmap_recursive_entries();
    246  1.6.2.2    yamt #endif
    247      1.6     jym 
    248      1.6     jym 	xpq_flush_queue();
    249  1.6.2.2    yamt }
    250      1.6     jym 
    251  1.6.2.2    yamt void
    252  1.6.2.2    yamt pmap_xen_resume(void)
    253  1.6.2.2    yamt {
    254  1.6.2.2    yamt #ifdef PAE
    255  1.6.2.2    yamt 	pmap_map_recursive_entries();
    256  1.6.2.2    yamt #endif
    257      1.6     jym 
    258  1.6.2.2    yamt 	xpq_flush_queue();
    259      1.6     jym }
    260      1.6     jym 
    261      1.6     jym #ifdef PAE
    262      1.6     jym /*
    263      1.6     jym  * NetBSD uses L2 shadow pages to support PAE with Xen. However, Xen does not
    264      1.6     jym  * handle them correctly during save/restore, leading to incorrect page
    265      1.6     jym  * tracking and pinning during restore.
    266      1.6     jym  * For save/restore to succeed, two functions are introduced:
    267      1.6     jym  * - pmap_map_recursive_entries(), used by resume code to set the recursive
    268      1.6     jym  *   mapping entries to their correct value
    269      1.6     jym  * - pmap_unmap_recursive_entries(), used by suspend code to clear all
    270      1.6     jym  *   PDIR_SLOT_PTE entries
    271      1.6     jym  */
    272      1.6     jym void
    273      1.6     jym pmap_map_recursive_entries(void)
    274      1.6     jym {
    275      1.6     jym 	int i;
    276      1.6     jym 	struct pmap *pm;
    277      1.6     jym 
    278      1.6     jym 	mutex_enter(&pmaps_lock);
    279      1.6     jym 	LIST_FOREACH(pm, &pmaps, pm_list) {
    280      1.6     jym 		for (i = 0; i < PDP_SIZE; i++) {
    281      1.6     jym 			xpq_queue_pte_update(
    282      1.6     jym 			    xpmap_ptom(pmap_pdirpa(pm, PDIR_SLOT_PTE + i)),
    283      1.6     jym 			    xpmap_ptom((pm)->pm_pdirpa[i]) | PG_V);
    284      1.6     jym 		}
    285      1.6     jym 	}
    286      1.6     jym 	mutex_exit(&pmaps_lock);
    287      1.6     jym 
    288      1.6     jym 	for (i = 0; i < PDP_SIZE; i++) {
    289      1.6     jym 		xpq_queue_pte_update(
    290      1.6     jym 		    xpmap_ptom(pmap_pdirpa(pmap_kernel(), PDIR_SLOT_PTE + i)),
    291      1.6     jym 		    xpmap_ptom(pmap_kernel()->pm_pdirpa[i]) | PG_V);
    292      1.6     jym 	}
    293      1.6     jym }
    294      1.6     jym 
    295  1.6.2.2    yamt /*
    296  1.6.2.2    yamt  * Unmap recursive entries found in pmaps. Required during Xen
    297  1.6.2.2    yamt  * save/restore operations, as Xen does not handle recursive mappings
    298  1.6.2.2    yamt  * properly.
    299  1.6.2.2    yamt  */
    300      1.6     jym void
    301      1.6     jym pmap_unmap_recursive_entries(void)
    302      1.6     jym {
    303      1.6     jym 	int i;
    304      1.6     jym 	struct pmap *pm;
    305      1.6     jym 
    306  1.6.2.2    yamt 	/*
    307  1.6.2.2    yamt 	 * Invalidate pmap_pdp_cache as it contains L2-pinned objects with
    308  1.6.2.2    yamt 	 * recursive entries.
    309  1.6.2.2    yamt 	 * XXX jym@ : find a way to drain per-CPU caches to. pool_cache_inv
    310  1.6.2.2    yamt 	 * does not do that.
    311  1.6.2.2    yamt 	 */
    312  1.6.2.2    yamt 	pool_cache_invalidate(&pmap_pdp_cache);
    313      1.6     jym 
    314      1.6     jym 	mutex_enter(&pmaps_lock);
    315      1.6     jym 	LIST_FOREACH(pm, &pmaps, pm_list) {
    316      1.6     jym 		for (i = 0; i < PDP_SIZE; i++) {
    317      1.6     jym 			xpq_queue_pte_update(
    318      1.6     jym 			    xpmap_ptom(pmap_pdirpa(pm, PDIR_SLOT_PTE + i)), 0);
    319      1.6     jym 		}
    320      1.6     jym 	}
    321      1.6     jym 	mutex_exit(&pmaps_lock);
    322      1.6     jym 
    323      1.6     jym 	/* do it for pmap_kernel() too! */
    324  1.6.2.2    yamt 	for (i = 0; i < PDP_SIZE; i++) {
    325      1.6     jym 		xpq_queue_pte_update(
    326      1.6     jym 		    xpmap_ptom(pmap_pdirpa(pmap_kernel(), PDIR_SLOT_PTE + i)),
    327      1.6     jym 		    0);
    328  1.6.2.2    yamt 	}
    329  1.6.2.2    yamt }
    330  1.6.2.2    yamt #endif /* PAE */
    331      1.6     jym 
    332  1.6.2.2    yamt #if defined(PAE) || defined(__x86_64__)
    333  1.6.2.2    yamt 
    334  1.6.2.2    yamt static __inline void
    335  1.6.2.2    yamt pmap_kpm_setpte(struct cpu_info *ci, struct pmap *pmap, int index)
    336  1.6.2.2    yamt {
    337  1.6.2.2    yamt 	KASSERT(mutex_owned(pmap->pm_lock));
    338  1.6.2.2    yamt 	KASSERT(mutex_owned(&ci->ci_kpm_mtx));
    339  1.6.2.2    yamt 	if (pmap == pmap_kernel()) {
    340  1.6.2.2    yamt 		KASSERT(index >= PDIR_SLOT_KERN);
    341  1.6.2.2    yamt 	}
    342  1.6.2.2    yamt #ifdef PAE
    343  1.6.2.2    yamt 	xpq_queue_pte_update(
    344  1.6.2.2    yamt 		xpmap_ptetomach(&ci->ci_kpm_pdir[l2tol2(index)]),
    345  1.6.2.2    yamt 		pmap->pm_pdir[index]);
    346  1.6.2.2    yamt #elif defined(__x86_64__)
    347  1.6.2.2    yamt 	xpq_queue_pte_update(
    348  1.6.2.2    yamt 		xpmap_ptetomach(&ci->ci_kpm_pdir[index]),
    349  1.6.2.2    yamt 		pmap->pm_pdir[index]);
    350  1.6.2.2    yamt #endif /* PAE */
    351      1.6     jym 	xpq_flush_queue();
    352  1.6.2.2    yamt }
    353  1.6.2.2    yamt 
    354  1.6.2.2    yamt /*
    355  1.6.2.2    yamt  * Synchronise shadow pdir with the pmap on all cpus on which it is
    356  1.6.2.2    yamt  * loaded.
    357  1.6.2.2    yamt  */
    358  1.6.2.2    yamt void
    359  1.6.2.2    yamt xen_kpm_sync(struct pmap *pmap, int index)
    360  1.6.2.2    yamt {
    361  1.6.2.2    yamt 	CPU_INFO_ITERATOR cii;
    362  1.6.2.2    yamt 	struct cpu_info *ci;
    363  1.6.2.3    yamt 
    364  1.6.2.2    yamt 	KASSERT(pmap != NULL);
    365  1.6.2.2    yamt 	KASSERT(kpreempt_disabled());
    366      1.6     jym 
    367  1.6.2.2    yamt 	pmap_pte_flush();
    368  1.6.2.2    yamt 
    369  1.6.2.2    yamt 	for (CPU_INFO_FOREACH(cii, ci)) {
    370  1.6.2.2    yamt 		if (ci == NULL) {
    371  1.6.2.2    yamt 			continue;
    372  1.6.2.2    yamt 		}
    373  1.6.2.3    yamt 		cpuid_t cid = cpu_index(ci);
    374  1.6.2.2    yamt 		if (pmap != pmap_kernel() &&
    375  1.6.2.3    yamt 		    !kcpuset_isset(pmap->pm_xen_ptp_cpus, cid))
    376  1.6.2.2    yamt 			continue;
    377  1.6.2.2    yamt 
    378  1.6.2.2    yamt 		/* take the lock and check again */
    379  1.6.2.2    yamt 		mutex_enter(&ci->ci_kpm_mtx);
    380  1.6.2.2    yamt 		if (pmap == pmap_kernel() ||
    381  1.6.2.3    yamt 		    kcpuset_isset(pmap->pm_xen_ptp_cpus, cid)) {
    382  1.6.2.2    yamt 			pmap_kpm_setpte(ci, pmap, index);
    383  1.6.2.2    yamt 		}
    384  1.6.2.2    yamt 		mutex_exit(&ci->ci_kpm_mtx);
    385  1.6.2.2    yamt 	}
    386      1.6     jym }
    387  1.6.2.2    yamt 
    388  1.6.2.2    yamt #endif /* PAE || __x86_64__ */
    389