Home | History | Annotate | Line # | Download | only in booke
booke_pmap.c revision 1.10.6.2
      1  1.10.6.2   mrg /*	$NetBSD: booke_pmap.c,v 1.10.6.2 2012/04/05 21:33:17 mrg Exp $	*/
      2       1.2  matt /*-
      3       1.2  matt  * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
      4       1.2  matt  * All rights reserved.
      5       1.2  matt  *
      6       1.2  matt  * This code is derived from software contributed to The NetBSD Foundation
      7       1.2  matt  * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects
      8       1.2  matt  * Agency and which was developed by Matt Thomas of 3am Software Foundry.
      9       1.2  matt  *
     10       1.2  matt  * This material is based upon work supported by the Defense Advanced Research
     11       1.2  matt  * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under
     12       1.2  matt  * Contract No. N66001-09-C-2073.
     13       1.2  matt  * Approved for Public Release, Distribution Unlimited
     14       1.2  matt  *
     15       1.2  matt  * Redistribution and use in source and binary forms, with or without
     16       1.2  matt  * modification, are permitted provided that the following conditions
     17       1.2  matt  * are met:
     18       1.2  matt  * 1. Redistributions of source code must retain the above copyright
     19       1.2  matt  *    notice, this list of conditions and the following disclaimer.
     20       1.2  matt  * 2. Redistributions in binary form must reproduce the above copyright
     21       1.2  matt  *    notice, this list of conditions and the following disclaimer in the
     22       1.2  matt  *    documentation and/or other materials provided with the distribution.
     23       1.2  matt  *
     24       1.2  matt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     25       1.2  matt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     26       1.2  matt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     27       1.2  matt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     28       1.2  matt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     29       1.2  matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     30       1.2  matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     31       1.2  matt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     32       1.2  matt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     33       1.2  matt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     34       1.2  matt  * POSSIBILITY OF SUCH DAMAGE.
     35       1.2  matt  */
     36       1.2  matt 
     37       1.4  matt #define __PMAP_PRIVATE
     38       1.3  matt 
     39       1.2  matt #include <sys/cdefs.h>
     40       1.2  matt 
     41  1.10.6.2   mrg __KERNEL_RCSID(0, "$NetBSD: booke_pmap.c,v 1.10.6.2 2012/04/05 21:33:17 mrg Exp $");
     42       1.2  matt 
     43       1.2  matt #include <sys/param.h>
     44       1.2  matt #include <sys/kcore.h>
     45       1.2  matt #include <sys/buf.h>
     46       1.2  matt 
     47       1.6  matt #include <uvm/uvm.h>
     48       1.2  matt 
     49       1.2  matt #include <machine/pmap.h>
     50       1.2  matt 
     51       1.2  matt /*
     52       1.2  matt  * Initialize the kernel pmap.
     53       1.2  matt  */
     54       1.2  matt #ifdef MULTIPROCESSOR
     55       1.2  matt #define	PMAP_SIZE	offsetof(struct pmap, pm_pai[MAXCPUS])
     56       1.2  matt #else
     57       1.2  matt #define	PMAP_SIZE	sizeof(struct pmap)
     58       1.2  matt #endif
     59       1.2  matt 
     60       1.2  matt CTASSERT(sizeof(struct pmap_segtab) == NBPG);
     61       1.2  matt 
     62  1.10.6.2   mrg struct pmap_segtab pmap_kernel_segtab;
     63  1.10.6.2   mrg 
     64       1.2  matt void
     65       1.2  matt pmap_procwr(struct proc *p, vaddr_t va, size_t len)
     66       1.2  matt {
     67       1.2  matt 	struct pmap * const pmap = p->p_vmspace->vm_map.pmap;
     68       1.2  matt 	vsize_t off = va & PAGE_SIZE;
     69       1.2  matt 
     70       1.2  matt 	kpreempt_disable();
     71       1.2  matt 	for (const vaddr_t eva = va + len; va < eva; off = 0) {
     72       1.2  matt 		const vaddr_t segeva = min(va + len, va - off + PAGE_SIZE);
     73       1.2  matt 		pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
     74       1.2  matt 		if (ptep == NULL) {
     75       1.2  matt 			va = segeva;
     76       1.2  matt 			continue;
     77       1.2  matt 		}
     78       1.2  matt 		pt_entry_t pt_entry = *ptep;
     79       1.2  matt 		if (!pte_valid_p(pt_entry) || !pte_exec_p(pt_entry)) {
     80       1.2  matt 			va = segeva;
     81       1.2  matt 			continue;
     82       1.2  matt 		}
     83       1.2  matt 		kpreempt_enable();
     84       1.2  matt 		dcache_wb(pte_to_paddr(pt_entry), segeva - va);
     85       1.2  matt 		icache_inv(pte_to_paddr(pt_entry), segeva - va);
     86       1.2  matt 		kpreempt_disable();
     87       1.2  matt 		va = segeva;
     88       1.2  matt 	}
     89       1.2  matt 	kpreempt_enable();
     90       1.2  matt }
     91       1.2  matt 
     92       1.2  matt void
     93       1.4  matt pmap_md_page_syncicache(struct vm_page *pg, __cpuset_t onproc)
     94       1.2  matt {
     95       1.4  matt 	/*
     96       1.4  matt 	 * If onproc is empty, we could do a
     97       1.4  matt 	 * pmap_page_protect(pg, VM_PROT_NONE) and remove all
     98       1.4  matt 	 * mappings of the page and clear its execness.  Then
     99       1.4  matt 	 * the next time page is faulted, it will get icache
    100       1.4  matt 	 * synched.  But this is easier. :)
    101       1.4  matt 	 */
    102       1.2  matt 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
    103       1.2  matt 	dcache_wb_page(pa);
    104       1.2  matt 	icache_inv_page(pa);
    105       1.2  matt }
    106       1.2  matt 
    107       1.2  matt vaddr_t
    108       1.2  matt pmap_md_direct_map_paddr(paddr_t pa)
    109       1.2  matt {
    110       1.2  matt 	return (vaddr_t) pa;
    111       1.2  matt }
    112       1.2  matt 
    113       1.2  matt bool
    114       1.2  matt pmap_md_direct_mapped_vaddr_p(vaddr_t va)
    115       1.2  matt {
    116       1.2  matt 	return va < VM_MIN_KERNEL_ADDRESS || VM_MAX_KERNEL_ADDRESS <= va;
    117       1.2  matt }
    118       1.2  matt 
    119       1.2  matt paddr_t
    120       1.2  matt pmap_md_direct_mapped_vaddr_to_paddr(vaddr_t va)
    121       1.2  matt {
    122       1.2  matt 	return (paddr_t) va;
    123       1.2  matt }
    124       1.2  matt 
    125  1.10.6.2   mrg #ifdef PMAP_MINIMALTLB
    126  1.10.6.2   mrg static pt_entry_t *
    127  1.10.6.2   mrg kvtopte(const struct pmap_segtab *stp, vaddr_t va)
    128  1.10.6.2   mrg {
    129  1.10.6.2   mrg 	pt_entry_t * const ptep = stp->seg_tab[va >> SEGSHIFT];
    130  1.10.6.2   mrg 	if (ptep == NULL)
    131  1.10.6.2   mrg 		return NULL;
    132  1.10.6.2   mrg 	return &ptep[(va & SEGOFSET) >> PAGE_SHIFT];
    133  1.10.6.2   mrg }
    134  1.10.6.2   mrg 
    135  1.10.6.2   mrg vaddr_t
    136  1.10.6.2   mrg pmap_kvptefill(vaddr_t sva, vaddr_t eva, pt_entry_t pt_entry)
    137  1.10.6.2   mrg {
    138  1.10.6.2   mrg 	const struct pmap_segtab * const stp = pmap_kernel()->pm_segtab;
    139  1.10.6.2   mrg 	KASSERT(sva == trunc_page(sva));
    140  1.10.6.2   mrg 	pt_entry_t *ptep = kvtopte(stp, sva);
    141  1.10.6.2   mrg 	for (; sva < eva; sva += NBPG) {
    142  1.10.6.2   mrg 		*ptep++ = pt_entry ? (sva | pt_entry) : 0;
    143  1.10.6.2   mrg 	}
    144  1.10.6.2   mrg 	return sva;
    145  1.10.6.2   mrg }
    146  1.10.6.2   mrg #endif
    147  1.10.6.2   mrg 
    148       1.2  matt /*
    149       1.2  matt  *	Bootstrap the system enough to run with virtual memory.
    150       1.2  matt  *	firstaddr is the first unused kseg0 address (not page aligned).
    151       1.2  matt  */
    152  1.10.6.2   mrg vaddr_t
    153       1.2  matt pmap_bootstrap(vaddr_t startkernel, vaddr_t endkernel,
    154  1.10.6.2   mrg 	phys_ram_seg_t *avail, size_t cnt)
    155       1.2  matt {
    156  1.10.6.2   mrg 	struct pmap_segtab * const stp = &pmap_kernel_segtab;
    157  1.10.6.2   mrg 
    158  1.10.6.2   mrg 	/*
    159  1.10.6.2   mrg 	 * Initialize the kernel segment table.
    160  1.10.6.2   mrg 	 */
    161  1.10.6.2   mrg 	pmap_kernel()->pm_segtab = stp;
    162  1.10.6.2   mrg 	curcpu()->ci_pmap_kern_segtab = stp;
    163       1.2  matt 
    164  1.10.6.2   mrg 	KASSERT(endkernel == trunc_page(endkernel));
    165       1.2  matt 
    166       1.2  matt 	/*
    167  1.10.6.1   mrg 	 * Compute the number of pages kmem_arena will have.
    168       1.2  matt 	 */
    169       1.2  matt 	kmeminit_nkmempages();
    170       1.2  matt 
    171       1.2  matt 	/*
    172       1.2  matt 	 * Figure out how many PTE's are necessary to map the kernel.
    173       1.2  matt 	 * We also reserve space for kmem_alloc_pageable() for vm_fork().
    174       1.2  matt 	 */
    175       1.2  matt 
    176       1.2  matt 	/* Get size of buffer cache and set an upper limit */
    177       1.2  matt 	buf_setvalimit((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / 8);
    178       1.2  matt 	vsize_t bufsz = buf_memcalc();
    179       1.2  matt 	buf_setvalimit(bufsz);
    180       1.2  matt 
    181  1.10.6.2   mrg 	vsize_t kv_nsegtabs = pmap_round_seg(VM_PHYS_SIZE
    182       1.2  matt 	    + (ubc_nwins << ubc_winshift)
    183       1.2  matt 	    + bufsz
    184       1.2  matt 	    + 16 * NCARGS
    185       1.2  matt 	    + pager_map_size
    186       1.2  matt 	    + maxproc * USPACE
    187       1.2  matt #ifdef SYSVSHM
    188       1.2  matt 	    + NBPG * shminfo.shmall
    189       1.2  matt #endif
    190  1.10.6.2   mrg 	    + NBPG * nkmempages) >> SEGSHIFT;
    191       1.2  matt 
    192       1.2  matt 	/*
    193       1.2  matt 	 * Initialize `FYI' variables.	Note we're relying on
    194       1.2  matt 	 * the fact that BSEARCH sorts the vm_physmem[] array
    195       1.2  matt 	 * for us.  Must do this before uvm_pageboot_alloc()
    196       1.2  matt 	 * can be called.
    197       1.2  matt 	 */
    198       1.2  matt 	pmap_limits.avail_start = vm_physmem[0].start << PGSHIFT;
    199       1.2  matt 	pmap_limits.avail_end = vm_physmem[vm_nphysseg - 1].end << PGSHIFT;
    200  1.10.6.2   mrg 	const size_t max_nsegtabs =
    201       1.2  matt 	    (pmap_round_seg(VM_MAX_KERNEL_ADDRESS)
    202       1.2  matt 		- pmap_trunc_seg(VM_MIN_KERNEL_ADDRESS)) / NBSEG;
    203  1.10.6.2   mrg 	if (kv_nsegtabs >= max_nsegtabs) {
    204       1.2  matt 		pmap_limits.virtual_end = VM_MAX_KERNEL_ADDRESS;
    205  1.10.6.2   mrg 		kv_nsegtabs = max_nsegtabs;
    206       1.2  matt 	} else {
    207       1.2  matt 		pmap_limits.virtual_end = VM_MIN_KERNEL_ADDRESS
    208  1.10.6.2   mrg 		    + kv_nsegtabs * NBSEG;
    209       1.2  matt 	}
    210       1.2  matt 
    211       1.2  matt 	/*
    212       1.2  matt 	 * Now actually allocate the kernel PTE array (must be done
    213       1.2  matt 	 * after virtual_end is initialized).
    214       1.2  matt 	 */
    215  1.10.6.2   mrg 	const vaddr_t kv_segtabs = avail[0].start;
    216  1.10.6.2   mrg 	KASSERT(kv_segtabs == endkernel);
    217  1.10.6.2   mrg 	KASSERT(avail[0].size >= NBPG * kv_nsegtabs);
    218  1.10.6.2   mrg 	printf(" kv_nsegtabs=%#"PRIxVSIZE, kv_nsegtabs);
    219  1.10.6.2   mrg 	printf(" kv_segtabs=%#"PRIxVADDR, kv_segtabs);
    220  1.10.6.2   mrg 	avail[0].start += NBPG * kv_nsegtabs;
    221  1.10.6.2   mrg 	avail[0].size -= NBPG * kv_nsegtabs;
    222  1.10.6.2   mrg 	endkernel += NBPG * kv_nsegtabs;
    223       1.2  matt 
    224       1.2  matt 	/*
    225       1.2  matt 	 * Initialize the kernel's two-level page level.  This only wastes
    226       1.2  matt 	 * an extra page for the segment table and allows the user/kernel
    227       1.2  matt 	 * access to be common.
    228       1.2  matt 	 */
    229       1.2  matt 	pt_entry_t **ptp = &stp->seg_tab[VM_MIN_KERNEL_ADDRESS >> SEGSHIFT];
    230  1.10.6.2   mrg 	pt_entry_t *ptep = (void *)kv_segtabs;
    231  1.10.6.2   mrg 	memset(ptep, 0, NBPG * kv_nsegtabs);
    232  1.10.6.2   mrg 	for (size_t i = 0; i < kv_nsegtabs; i++, ptep += NPTEPG) {
    233  1.10.6.2   mrg 		*ptp++ = ptep;
    234       1.2  matt 	}
    235       1.2  matt 
    236  1.10.6.2   mrg #if PMAP_MINIMALTLB
    237  1.10.6.2   mrg 	const vsize_t dm_nsegtabs = (physmem + NPTEPG - 1) / NPTEPG;
    238  1.10.6.2   mrg 	const vaddr_t dm_segtabs = avail[0].start;
    239  1.10.6.2   mrg 	printf(" dm_nsegtabs=%#"PRIxVSIZE, dm_nsegtabs);
    240  1.10.6.2   mrg 	printf(" dm_segtabs=%#"PRIxVADDR, dm_segtabs);
    241  1.10.6.2   mrg 	KASSERT(dm_segtabs == endkernel);
    242  1.10.6.2   mrg 	KASSERT(avail[0].size >= NBPG * dm_nsegtabs);
    243  1.10.6.2   mrg 	avail[0].start += NBPG * dm_nsegtabs;
    244  1.10.6.2   mrg 	avail[0].size -= NBPG * dm_nsegtabs;
    245  1.10.6.2   mrg 	endkernel += NBPG * dm_nsegtabs;
    246  1.10.6.2   mrg 
    247       1.2  matt 	ptp = stp->seg_tab;
    248  1.10.6.2   mrg 	ptep = (void *)dm_segtabs;
    249  1.10.6.2   mrg 	memset(ptep, 0, NBPG * dm_nsegtabs);
    250  1.10.6.2   mrg 	for (size_t i = 0; i < dm_nsegtabs; i++, ptp++, ptep += NPTEPG) {
    251       1.2  matt 		*ptp = ptep;
    252       1.2  matt 	}
    253       1.2  matt 
    254       1.2  matt 	/*
    255       1.2  matt 	 */
    256  1.10.6.2   mrg 	extern uint32_t _fdata[], _etext[];
    257  1.10.6.2   mrg 	vaddr_t va;
    258  1.10.6.2   mrg 
    259  1.10.6.2   mrg 	/* Now make everything before the kernel inaccessible. */
    260  1.10.6.2   mrg 	va = pmap_kvptefill(NBPG, startkernel, 0);
    261  1.10.6.2   mrg 
    262  1.10.6.2   mrg 	/* Kernel text is readonly & executable */
    263  1.10.6.2   mrg 	va = pmap_kvptefill(va, round_page((vaddr_t)_etext),
    264  1.10.6.2   mrg 	    PTE_M | PTE_xR | PTE_xX);
    265  1.10.6.2   mrg 
    266  1.10.6.2   mrg 	/* Kernel .rdata is readonly */
    267  1.10.6.2   mrg 	va = pmap_kvptefill(va, trunc_page((vaddr_t)_fdata), PTE_M | PTE_xR);
    268  1.10.6.2   mrg 
    269  1.10.6.2   mrg 	/* Kernel .data/.bss + page tables are read-write */
    270  1.10.6.2   mrg 	va = pmap_kvptefill(va, round_page(endkernel), PTE_M | PTE_xR | PTE_xW);
    271  1.10.6.2   mrg 
    272  1.10.6.2   mrg 	/* message buffer page table pages are read-write */
    273  1.10.6.2   mrg 	(void) pmap_kvptefill(msgbuf_paddr, msgbuf_paddr+round_page(MSGBUFSIZE),
    274  1.10.6.2   mrg 	    PTE_M | PTE_xR | PTE_xW);
    275       1.2  matt #endif
    276       1.2  matt 
    277  1.10.6.2   mrg 	for (size_t i = 0; i < cnt; i++) {
    278  1.10.6.2   mrg 		printf(" uvm_page_physload(%#lx,%#lx,%#lx,%#lx,%d)",
    279  1.10.6.2   mrg 		    atop(avail[i].start),
    280  1.10.6.2   mrg 		    atop(avail[i].start + avail[i].size) - 1,
    281  1.10.6.2   mrg 		    atop(avail[i].start),
    282  1.10.6.2   mrg 		    atop(avail[i].start + avail[i].size) - 1,
    283  1.10.6.2   mrg 		    VM_FREELIST_DEFAULT);
    284  1.10.6.2   mrg 		uvm_page_physload(
    285  1.10.6.2   mrg 		    atop(avail[i].start),
    286  1.10.6.2   mrg 		    atop(avail[i].start + avail[i].size) - 1,
    287  1.10.6.2   mrg 		    atop(avail[i].start),
    288  1.10.6.2   mrg 		    atop(avail[i].start + avail[i].size) - 1,
    289  1.10.6.2   mrg 		    VM_FREELIST_DEFAULT);
    290  1.10.6.2   mrg 	}
    291  1.10.6.2   mrg 
    292  1.10.6.2   mrg 	pmap_pvlist_lock_init(curcpu()->ci_ci.dcache_line_size);
    293  1.10.6.2   mrg 
    294       1.2  matt 	/*
    295       1.2  matt 	 * Initialize the pools.
    296       1.2  matt 	 */
    297       1.2  matt 	pool_init(&pmap_pmap_pool, PMAP_SIZE, 0, 0, 0, "pmappl",
    298       1.2  matt 	    &pool_allocator_nointr, IPL_NONE);
    299       1.2  matt 	pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvpl",
    300       1.2  matt 	    &pmap_pv_page_allocator, IPL_NONE);
    301       1.2  matt 
    302       1.2  matt 	tlb_set_asid(0);
    303  1.10.6.2   mrg 
    304  1.10.6.2   mrg 	return endkernel;
    305       1.2  matt }
    306       1.2  matt 
    307       1.2  matt struct vm_page *
    308       1.2  matt pmap_md_alloc_poolpage(int flags)
    309       1.2  matt {
    310       1.2  matt 	/*
    311       1.2  matt 	 * Any managed page works for us.
    312       1.2  matt 	 */
    313       1.2  matt 	return uvm_pagealloc(NULL, 0, NULL, flags);
    314       1.2  matt }
    315       1.2  matt 
    316  1.10.6.2   mrg vaddr_t
    317  1.10.6.2   mrg pmap_md_map_poolpage(paddr_t pa, vsize_t size)
    318  1.10.6.2   mrg {
    319  1.10.6.2   mrg 	const vaddr_t sva = (vaddr_t) pa;
    320  1.10.6.2   mrg #ifdef PMAP_MINIMALTLB
    321  1.10.6.2   mrg 	const vaddr_t eva = sva + size;
    322  1.10.6.2   mrg 	pmap_kvptefill(sva, eva, PTE_M | PTE_xR | PTE_xW);
    323  1.10.6.2   mrg #endif
    324  1.10.6.2   mrg 	return sva;
    325  1.10.6.2   mrg }
    326  1.10.6.2   mrg 
    327  1.10.6.2   mrg void
    328  1.10.6.2   mrg pmap_md_unmap_poolpage(vaddr_t va, vsize_t size)
    329  1.10.6.2   mrg {
    330  1.10.6.2   mrg #ifdef PMAP_MINIMALTLB
    331  1.10.6.2   mrg 	struct pmap * const pm = pmap_kernel();
    332  1.10.6.2   mrg 	const vaddr_t eva = va + size;
    333  1.10.6.2   mrg 	pmap_kvptefill(va, eva, 0);
    334  1.10.6.2   mrg 	for (;va < eva; va += NBPG) {
    335  1.10.6.2   mrg 		pmap_tlb_invalidate_addr(pm, va);
    336  1.10.6.2   mrg 	}
    337  1.10.6.2   mrg 	pmap_update(pm);
    338  1.10.6.2   mrg #endif
    339  1.10.6.2   mrg }
    340  1.10.6.2   mrg 
    341       1.2  matt void
    342       1.2  matt pmap_zero_page(paddr_t pa)
    343       1.2  matt {
    344  1.10.6.2   mrg 	vaddr_t va = pmap_md_map_poolpage(pa, NBPG);
    345  1.10.6.2   mrg 	dcache_zero_page(va);
    346       1.5  matt 
    347  1.10.6.2   mrg 	KASSERT(!VM_PAGEMD_EXECPAGE_P(VM_PAGE_TO_MD(PHYS_TO_VM_PAGE(va))));
    348  1.10.6.2   mrg 	pmap_md_unmap_poolpage(va, NBPG);
    349       1.2  matt }
    350       1.2  matt 
    351       1.2  matt void
    352       1.2  matt pmap_copy_page(paddr_t src, paddr_t dst)
    353       1.2  matt {
    354       1.2  matt 	const size_t line_size = curcpu()->ci_ci.dcache_line_size;
    355  1.10.6.2   mrg 	vaddr_t src_va = pmap_md_map_poolpage(src, NBPG);
    356  1.10.6.2   mrg 	vaddr_t dst_va = pmap_md_map_poolpage(dst, NBPG);
    357  1.10.6.2   mrg 	const vaddr_t end = src_va + PAGE_SIZE;
    358       1.2  matt 
    359  1.10.6.2   mrg 	while (src_va < end) {
    360       1.2  matt 		__asm(
    361       1.2  matt 			"dcbt	%2,%1"	"\n\t"	/* touch next src cachline */
    362       1.2  matt 			"dcba	0,%1"	"\n\t" 	/* don't fetch dst cacheline */
    363  1.10.6.2   mrg 		    :: "b"(src_va), "b"(dst_va), "b"(line_size));
    364       1.2  matt 		for (u_int i = 0;
    365       1.2  matt 		     i < line_size;
    366  1.10.6.2   mrg 		     src_va += 32, dst_va += 32, i += 32) {
    367       1.2  matt 			__asm(
    368       1.2  matt 				"lmw	24,0(%0)" "\n\t"
    369       1.2  matt 				"stmw	24,0(%1)"
    370  1.10.6.2   mrg 			    :: "b"(src_va), "b"(dst_va)
    371       1.2  matt 			    : "r24", "r25", "r26", "r27",
    372       1.2  matt 			      "r28", "r29", "r30", "r31");
    373       1.2  matt 		}
    374       1.2  matt 	}
    375  1.10.6.2   mrg 	pmap_md_unmap_poolpage(src_va, NBPG);
    376  1.10.6.2   mrg 	pmap_md_unmap_poolpage(dst_va, NBPG);
    377       1.5  matt 
    378  1.10.6.2   mrg 	KASSERT(!VM_PAGEMD_EXECPAGE_P(VM_PAGE_TO_MD(PHYS_TO_VM_PAGE(dst))));
    379       1.2  matt }
    380       1.2  matt 
    381       1.2  matt void
    382       1.2  matt pmap_md_init(void)
    383       1.2  matt {
    384       1.2  matt 
    385       1.2  matt 	/* nothing for now */
    386       1.2  matt }
    387       1.2  matt 
    388       1.2  matt bool
    389       1.2  matt pmap_md_io_vaddr_p(vaddr_t va)
    390       1.2  matt {
    391       1.2  matt 	return va >= pmap_limits.avail_end
    392       1.2  matt 	    && !(VM_MIN_KERNEL_ADDRESS <= va && va < VM_MAX_KERNEL_ADDRESS);
    393       1.2  matt }
    394       1.2  matt 
    395       1.7  matt bool
    396       1.7  matt pmap_md_tlb_check_entry(void *ctx, vaddr_t va, tlb_asid_t asid, pt_entry_t pte)
    397       1.7  matt {
    398       1.7  matt 	pmap_t pm = ctx;
    399       1.7  matt         struct pmap_asid_info * const pai = PMAP_PAI(pm, curcpu()->ci_tlb_info);
    400       1.7  matt 
    401       1.7  matt 	if (asid != pai->pai_asid)
    402       1.7  matt 		return true;
    403       1.7  matt 
    404       1.7  matt 	const pt_entry_t * const ptep = pmap_pte_lookup(pm, va);
    405       1.7  matt 	KASSERT(ptep != NULL);
    406       1.7  matt 	pt_entry_t xpte = *ptep;
    407       1.7  matt 	xpte &= ~((xpte & (PTE_UNSYNCED|PTE_UNMODIFIED)) << 1);
    408       1.7  matt 	xpte ^= xpte & (PTE_UNSYNCED|PTE_UNMODIFIED|PTE_WIRED);
    409       1.7  matt 
    410       1.7  matt 	KASSERTMSG(pte == xpte,
    411      1.10   jym 	    "pm=%p va=%#"PRIxVADDR" asid=%u: TLB pte (%#x) != real pte (%#x/%#x)",
    412      1.10   jym 	    pm, va, asid, pte, xpte, *ptep);
    413       1.7  matt 
    414       1.7  matt 	return true;
    415       1.7  matt }
    416       1.8  matt 
    417       1.8  matt #ifdef MULTIPROCESSOR
    418       1.8  matt void
    419       1.8  matt pmap_md_tlb_info_attach(struct pmap_tlb_info *ti, struct cpu_info *ci)
    420       1.8  matt {
    421       1.8  matt 	/* nothing */
    422       1.8  matt }
    423       1.8  matt #endif /* MULTIPROCESSOR */
    424