Home | History | Annotate | Line # | Download | only in booke
booke_pmap.c revision 1.2.2.1
      1      1.2    matt /*-
      2      1.2    matt  * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
      3      1.2    matt  * All rights reserved.
      4      1.2    matt  *
      5      1.2    matt  * This code is derived from software contributed to The NetBSD Foundation
      6      1.2    matt  * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects
      7      1.2    matt  * Agency and which was developed by Matt Thomas of 3am Software Foundry.
      8      1.2    matt  *
      9      1.2    matt  * This material is based upon work supported by the Defense Advanced Research
     10      1.2    matt  * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under
     11      1.2    matt  * Contract No. N66001-09-C-2073.
     12      1.2    matt  * Approved for Public Release, Distribution Unlimited
     13      1.2    matt  *
     14      1.2    matt  * Redistribution and use in source and binary forms, with or without
     15      1.2    matt  * modification, are permitted provided that the following conditions
     16      1.2    matt  * are met:
     17      1.2    matt  * 1. Redistributions of source code must retain the above copyright
     18      1.2    matt  *    notice, this list of conditions and the following disclaimer.
     19      1.2    matt  * 2. Redistributions in binary form must reproduce the above copyright
     20      1.2    matt  *    notice, this list of conditions and the following disclaimer in the
     21      1.2    matt  *    documentation and/or other materials provided with the distribution.
     22      1.2    matt  *
     23      1.2    matt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     24      1.2    matt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     25      1.2    matt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     26      1.2    matt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     27      1.2    matt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     28      1.2    matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     29      1.2    matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     30      1.2    matt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     31      1.2    matt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     32      1.2    matt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     33      1.2    matt  * POSSIBILITY OF SUCH DAMAGE.
     34      1.2    matt  */
     35      1.2    matt 
     36  1.2.2.1  bouyer #define _PMAP_PRIVATE
     37  1.2.2.1  bouyer 
     38      1.2    matt #include <sys/cdefs.h>
     39      1.2    matt 
     40      1.2    matt __KERNEL_RCSID(0, "$NetBSD: booke_pmap.c,v 1.2.2.1 2011/03/05 15:09:58 bouyer Exp $");
     41      1.2    matt 
     42      1.2    matt #include <sys/param.h>
     43      1.2    matt #include <sys/kcore.h>
     44      1.2    matt #include <sys/buf.h>
     45      1.2    matt 
     46      1.2    matt #include <uvm/uvm_extern.h>
     47      1.2    matt 
     48      1.2    matt #include <machine/pmap.h>
     49      1.2    matt 
     50      1.2    matt /*
     51      1.2    matt  * Initialize the kernel pmap.
     52      1.2    matt  */
     53      1.2    matt #ifdef MULTIPROCESSOR
     54      1.2    matt #define	PMAP_SIZE	offsetof(struct pmap, pm_pai[MAXCPUS])
     55      1.2    matt #else
     56      1.2    matt #define	PMAP_SIZE	sizeof(struct pmap)
     57      1.2    matt #endif
     58      1.2    matt 
     59      1.2    matt CTASSERT(sizeof(struct pmap_segtab) == NBPG);
     60      1.2    matt 
     61      1.2    matt void
     62      1.2    matt pmap_procwr(struct proc *p, vaddr_t va, size_t len)
     63      1.2    matt {
     64      1.2    matt 	struct pmap * const pmap = p->p_vmspace->vm_map.pmap;
     65      1.2    matt 	vsize_t off = va & PAGE_SIZE;
     66      1.2    matt 
     67      1.2    matt 	kpreempt_disable();
     68      1.2    matt 	for (const vaddr_t eva = va + len; va < eva; off = 0) {
     69      1.2    matt 		const vaddr_t segeva = min(va + len, va - off + PAGE_SIZE);
     70      1.2    matt 		pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
     71      1.2    matt 		if (ptep == NULL) {
     72      1.2    matt 			va = segeva;
     73      1.2    matt 			continue;
     74      1.2    matt 		}
     75      1.2    matt 		pt_entry_t pt_entry = *ptep;
     76      1.2    matt 		if (!pte_valid_p(pt_entry) || !pte_exec_p(pt_entry)) {
     77      1.2    matt 			va = segeva;
     78      1.2    matt 			continue;
     79      1.2    matt 		}
     80      1.2    matt 		kpreempt_enable();
     81      1.2    matt 		dcache_wb(pte_to_paddr(pt_entry), segeva - va);
     82      1.2    matt 		icache_inv(pte_to_paddr(pt_entry), segeva - va);
     83      1.2    matt 		kpreempt_disable();
     84      1.2    matt 		va = segeva;
     85      1.2    matt 	}
     86      1.2    matt 	kpreempt_enable();
     87      1.2    matt }
     88      1.2    matt 
     89      1.2    matt void
     90      1.2    matt pmap_md_page_syncicache(struct vm_page *pg)
     91      1.2    matt {
     92      1.2    matt 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
     93      1.2    matt 	dcache_wb_page(pa);
     94      1.2    matt 	icache_inv_page(pa);
     95      1.2    matt }
     96      1.2    matt 
     97      1.2    matt vaddr_t
     98      1.2    matt pmap_md_direct_map_paddr(paddr_t pa)
     99      1.2    matt {
    100      1.2    matt 	return (vaddr_t) pa;
    101      1.2    matt }
    102      1.2    matt 
    103      1.2    matt bool
    104      1.2    matt pmap_md_direct_mapped_vaddr_p(vaddr_t va)
    105      1.2    matt {
    106      1.2    matt 	return va < VM_MIN_KERNEL_ADDRESS || VM_MAX_KERNEL_ADDRESS <= va;
    107      1.2    matt }
    108      1.2    matt 
    109      1.2    matt paddr_t
    110      1.2    matt pmap_md_direct_mapped_vaddr_to_paddr(vaddr_t va)
    111      1.2    matt {
    112      1.2    matt 	return (paddr_t) va;
    113      1.2    matt }
    114      1.2    matt 
    115      1.2    matt /*
    116      1.2    matt  *	Bootstrap the system enough to run with virtual memory.
    117      1.2    matt  *	firstaddr is the first unused kseg0 address (not page aligned).
    118      1.2    matt  */
    119      1.2    matt void
    120      1.2    matt pmap_bootstrap(vaddr_t startkernel, vaddr_t endkernel,
    121      1.2    matt 	const phys_ram_seg_t *avail, size_t cnt)
    122      1.2    matt {
    123      1.2    matt 	for (size_t i = 0; i < cnt; i++) {
    124      1.2    matt 		printf(" uvm_page_physload(%#lx,%#lx,%#lx,%#lx,%d)",
    125      1.2    matt 		    atop(avail[i].start),
    126      1.2    matt 		    atop(avail[i].start + avail[i].size) - 1,
    127      1.2    matt 		    atop(avail[i].start),
    128      1.2    matt 		    atop(avail[i].start + avail[i].size) - 1,
    129      1.2    matt 		    VM_FREELIST_DEFAULT);
    130      1.2    matt 		uvm_page_physload(
    131      1.2    matt 		    atop(avail[i].start),
    132      1.2    matt 		    atop(avail[i].start + avail[i].size) - 1,
    133      1.2    matt 		    atop(avail[i].start),
    134      1.2    matt 		    atop(avail[i].start + avail[i].size) - 1,
    135      1.2    matt 		    VM_FREELIST_DEFAULT);
    136      1.2    matt 	}
    137      1.2    matt 
    138      1.2    matt 	pmap_tlb_info_init(&pmap_tlb0_info);		/* init the lock */
    139      1.2    matt 
    140      1.2    matt 	/*
    141      1.2    matt 	 * Compute the number of pages kmem_map will have.
    142      1.2    matt 	 */
    143      1.2    matt 	kmeminit_nkmempages();
    144      1.2    matt 
    145      1.2    matt 	/*
    146      1.2    matt 	 * Figure out how many PTE's are necessary to map the kernel.
    147      1.2    matt 	 * We also reserve space for kmem_alloc_pageable() for vm_fork().
    148      1.2    matt 	 */
    149      1.2    matt 
    150      1.2    matt 	/* Get size of buffer cache and set an upper limit */
    151      1.2    matt 	buf_setvalimit((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / 8);
    152      1.2    matt 	vsize_t bufsz = buf_memcalc();
    153      1.2    matt 	buf_setvalimit(bufsz);
    154      1.2    matt 
    155      1.2    matt 	vsize_t nsegtabs = pmap_round_seg(VM_PHYS_SIZE
    156      1.2    matt 	    + (ubc_nwins << ubc_winshift)
    157      1.2    matt 	    + bufsz
    158      1.2    matt 	    + 16 * NCARGS
    159      1.2    matt 	    + pager_map_size
    160      1.2    matt 	    + maxproc * USPACE
    161      1.2    matt #ifdef SYSVSHM
    162      1.2    matt 	    + NBPG * shminfo.shmall
    163      1.2    matt #endif
    164      1.2    matt 	    + NBPG * nkmempages);
    165      1.2    matt 
    166      1.2    matt 	/*
    167      1.2    matt 	 * Initialize `FYI' variables.	Note we're relying on
    168      1.2    matt 	 * the fact that BSEARCH sorts the vm_physmem[] array
    169      1.2    matt 	 * for us.  Must do this before uvm_pageboot_alloc()
    170      1.2    matt 	 * can be called.
    171      1.2    matt 	 */
    172      1.2    matt 	pmap_limits.avail_start = vm_physmem[0].start << PGSHIFT;
    173      1.2    matt 	pmap_limits.avail_end = vm_physmem[vm_nphysseg - 1].end << PGSHIFT;
    174      1.2    matt 	const vsize_t max_nsegtabs =
    175      1.2    matt 	    (pmap_round_seg(VM_MAX_KERNEL_ADDRESS)
    176      1.2    matt 		- pmap_trunc_seg(VM_MIN_KERNEL_ADDRESS)) / NBSEG;
    177      1.2    matt 	if (nsegtabs >= max_nsegtabs) {
    178      1.2    matt 		pmap_limits.virtual_end = VM_MAX_KERNEL_ADDRESS;
    179      1.2    matt 		nsegtabs = max_nsegtabs;
    180      1.2    matt 	} else {
    181      1.2    matt 		pmap_limits.virtual_end = VM_MIN_KERNEL_ADDRESS
    182      1.2    matt 		    + nsegtabs * NBSEG;
    183      1.2    matt 	}
    184      1.2    matt 
    185      1.2    matt 	pmap_pvlist_lock_init(curcpu()->ci_ci.dcache_line_size);
    186      1.2    matt 
    187      1.2    matt 	/*
    188      1.2    matt 	 * Now actually allocate the kernel PTE array (must be done
    189      1.2    matt 	 * after virtual_end is initialized).
    190      1.2    matt 	 */
    191      1.2    matt 	vaddr_t segtabs =
    192      1.2    matt 	    uvm_pageboot_alloc(NBPG * nsegtabs + sizeof(struct pmap_segtab));
    193      1.2    matt 
    194      1.2    matt 	/*
    195      1.2    matt 	 * Initialize the kernel's two-level page level.  This only wastes
    196      1.2    matt 	 * an extra page for the segment table and allows the user/kernel
    197      1.2    matt 	 * access to be common.
    198      1.2    matt 	 */
    199      1.2    matt 	struct pmap_segtab * const stp = (void *)segtabs;
    200      1.2    matt 	segtabs += round_page(sizeof(struct pmap_segtab));
    201      1.2    matt 	pt_entry_t **ptp = &stp->seg_tab[VM_MIN_KERNEL_ADDRESS >> SEGSHIFT];
    202      1.2    matt 	for (u_int i = 0; i < nsegtabs; i++, segtabs += NBPG) {
    203      1.2    matt 		*ptp++ = (void *)segtabs;
    204      1.2    matt 	}
    205      1.2    matt 	pmap_kernel()->pm_segtab = stp;
    206      1.2    matt 	curcpu()->ci_pmap_kern_segtab = stp;
    207      1.2    matt 	printf(" kern_segtab=%p", stp);
    208      1.2    matt 
    209      1.2    matt #if 0
    210      1.2    matt 	nsegtabs = (physmem + NPTEPG - 1) / NPTEPG;
    211      1.2    matt 	segtabs = uvm_pageboot_alloc(NBPG * nsegtabs);
    212      1.2    matt 	ptp = stp->seg_tab;
    213      1.2    matt 	pt_entry_t pt_entry = PTE_M|PTE_xX|PTE_xR;
    214      1.2    matt 	pt_entry_t *ptep = (void *)segtabs;
    215      1.2    matt 	printf("%s: allocated %lu page table pages for mapping %u pages\n",
    216      1.2    matt 	    __func__, nsegtabs, physmem);
    217      1.2    matt 	for (u_int i = 0; i < nsegtabs; i++, segtabs += NBPG, ptp++) {
    218      1.2    matt 		*ptp = ptep;
    219      1.2    matt 		for (u_int j = 0; j < NPTEPG; j++, ptep++) {
    220      1.2    matt 			*ptep = pt_entry;
    221      1.2    matt 			pt_entry += NBPG;
    222      1.2    matt 		}
    223      1.2    matt 		printf(" [%u]=%p (%#x)", i, *ptp, **ptp);
    224      1.2    matt 		pt_entry |= PTE_xW;
    225      1.2    matt 		pt_entry &= ~PTE_xX;
    226      1.2    matt 	}
    227      1.2    matt 
    228      1.2    matt 	/*
    229      1.2    matt 	 * Now make everything before the kernel inaccessible.
    230      1.2    matt 	 */
    231      1.2    matt 	for (u_int i = 0; i < startkernel / NBPG; i += NBPG) {
    232      1.2    matt 		stp->seg_tab[i >> SEGSHIFT][(i & SEGOFSET) >> PAGE_SHIFT] = 0;
    233      1.2    matt 	}
    234      1.2    matt #endif
    235      1.2    matt 
    236      1.2    matt 	/*
    237      1.2    matt 	 * Initialize the pools.
    238      1.2    matt 	 */
    239      1.2    matt 	pool_init(&pmap_pmap_pool, PMAP_SIZE, 0, 0, 0, "pmappl",
    240      1.2    matt 	    &pool_allocator_nointr, IPL_NONE);
    241      1.2    matt 	pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvpl",
    242      1.2    matt 	    &pmap_pv_page_allocator, IPL_NONE);
    243      1.2    matt 
    244      1.2    matt 	tlb_set_asid(0);
    245      1.2    matt }
    246      1.2    matt 
    247      1.2    matt struct vm_page *
    248      1.2    matt pmap_md_alloc_poolpage(int flags)
    249      1.2    matt {
    250      1.2    matt 	/*
    251      1.2    matt 	 * Any managed page works for us.
    252      1.2    matt 	 */
    253      1.2    matt 	return uvm_pagealloc(NULL, 0, NULL, flags);
    254      1.2    matt }
    255      1.2    matt 
    256      1.2    matt void
    257      1.2    matt pmap_zero_page(paddr_t pa)
    258      1.2    matt {
    259      1.2    matt //	printf("%s(%#lx): calling dcache_zero_page(%#lx)\n", __func__, pa, pa);
    260      1.2    matt 	dcache_zero_page(pa);
    261      1.2    matt }
    262      1.2    matt 
    263      1.2    matt void
    264      1.2    matt pmap_copy_page(paddr_t src, paddr_t dst)
    265      1.2    matt {
    266      1.2    matt 	const size_t line_size = curcpu()->ci_ci.dcache_line_size;
    267      1.2    matt 	const paddr_t end = src + PAGE_SIZE;
    268      1.2    matt 
    269      1.2    matt 	while (src < end) {
    270      1.2    matt 		__asm(
    271      1.2    matt 			"dcbt	%2,%1"	"\n\t"	/* touch next src cachline */
    272      1.2    matt 			"dcba	0,%1"	"\n\t" 	/* don't fetch dst cacheline */
    273      1.2    matt 		    :: "b"(src), "b"(dst), "b"(line_size));
    274      1.2    matt 		for (u_int i = 0;
    275      1.2    matt 		     i < line_size;
    276      1.2    matt 		     src += 32, dst += 32, i += 32) {
    277      1.2    matt 			__asm(
    278      1.2    matt 				"lmw	24,0(%0)" "\n\t"
    279      1.2    matt 				"stmw	24,0(%1)"
    280      1.2    matt 			    :: "b"(src), "b"(dst)
    281      1.2    matt 			    : "r24", "r25", "r26", "r27",
    282      1.2    matt 			      "r28", "r29", "r30", "r31");
    283      1.2    matt 		}
    284      1.2    matt 	}
    285      1.2    matt }
    286      1.2    matt 
    287      1.2    matt void
    288      1.2    matt pmap_md_init(void)
    289      1.2    matt {
    290      1.2    matt 
    291      1.2    matt 	/* nothing for now */
    292      1.2    matt }
    293      1.2    matt 
    294      1.2    matt bool
    295      1.2    matt pmap_md_io_vaddr_p(vaddr_t va)
    296      1.2    matt {
    297      1.2    matt 	return va >= pmap_limits.avail_end
    298      1.2    matt 	    && !(VM_MIN_KERNEL_ADDRESS <= va && va < VM_MAX_KERNEL_ADDRESS);
    299      1.2    matt }
    300      1.2    matt 
    301