Home | History | Annotate | Line # | Download | only in arm32
pmap.h revision 1.92
      1  1.92   thorpej /*	$NetBSD: pmap.h,v 1.92 2009/08/19 23:54:33 thorpej Exp $	*/
      2  1.46   thorpej 
      3  1.46   thorpej /*
      4  1.65       scw  * Copyright (c) 2002, 2003 Wasabi Systems, Inc.
      5  1.46   thorpej  * All rights reserved.
      6  1.46   thorpej  *
      7  1.65       scw  * Written by Jason R. Thorpe & Steve C. Woodford for Wasabi Systems, Inc.
      8  1.46   thorpej  *
      9  1.46   thorpej  * Redistribution and use in source and binary forms, with or without
     10  1.46   thorpej  * modification, are permitted provided that the following conditions
     11  1.46   thorpej  * are met:
     12  1.46   thorpej  * 1. Redistributions of source code must retain the above copyright
     13  1.46   thorpej  *    notice, this list of conditions and the following disclaimer.
     14  1.46   thorpej  * 2. Redistributions in binary form must reproduce the above copyright
     15  1.46   thorpej  *    notice, this list of conditions and the following disclaimer in the
     16  1.46   thorpej  *    documentation and/or other materials provided with the distribution.
     17  1.46   thorpej  * 3. All advertising materials mentioning features or use of this software
     18  1.46   thorpej  *    must display the following acknowledgement:
     19  1.46   thorpej  *	This product includes software developed for the NetBSD Project by
     20  1.46   thorpej  *	Wasabi Systems, Inc.
     21  1.46   thorpej  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  1.46   thorpej  *    or promote products derived from this software without specific prior
     23  1.46   thorpej  *    written permission.
     24  1.46   thorpej  *
     25  1.46   thorpej  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  1.46   thorpej  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  1.46   thorpej  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  1.46   thorpej  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  1.46   thorpej  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  1.46   thorpej  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  1.46   thorpej  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  1.46   thorpej  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  1.46   thorpej  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  1.46   thorpej  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  1.46   thorpej  * POSSIBILITY OF SUCH DAMAGE.
     36  1.46   thorpej  */
     37   1.1   reinoud 
     38   1.1   reinoud /*
     39   1.1   reinoud  * Copyright (c) 1994,1995 Mark Brinicombe.
     40   1.1   reinoud  * All rights reserved.
     41   1.1   reinoud  *
     42   1.1   reinoud  * Redistribution and use in source and binary forms, with or without
     43   1.1   reinoud  * modification, are permitted provided that the following conditions
     44   1.1   reinoud  * are met:
     45   1.1   reinoud  * 1. Redistributions of source code must retain the above copyright
     46   1.1   reinoud  *    notice, this list of conditions and the following disclaimer.
     47   1.1   reinoud  * 2. Redistributions in binary form must reproduce the above copyright
     48   1.1   reinoud  *    notice, this list of conditions and the following disclaimer in the
     49   1.1   reinoud  *    documentation and/or other materials provided with the distribution.
     50   1.1   reinoud  * 3. All advertising materials mentioning features or use of this software
     51   1.1   reinoud  *    must display the following acknowledgement:
     52   1.1   reinoud  *	This product includes software developed by Mark Brinicombe
     53   1.1   reinoud  * 4. The name of the author may not be used to endorse or promote products
     54   1.1   reinoud  *    derived from this software without specific prior written permission.
     55   1.1   reinoud  *
     56   1.1   reinoud  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     57   1.1   reinoud  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     58   1.1   reinoud  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     59   1.1   reinoud  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     60   1.1   reinoud  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     61   1.1   reinoud  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     62   1.1   reinoud  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     63   1.1   reinoud  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     64   1.1   reinoud  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     65   1.1   reinoud  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     66   1.1   reinoud  */
     67   1.1   reinoud 
     68   1.1   reinoud #ifndef	_ARM32_PMAP_H_
     69   1.1   reinoud #define	_ARM32_PMAP_H_
     70   1.1   reinoud 
     71  1.18   thorpej #ifdef _KERNEL
     72  1.18   thorpej 
     73  1.52   thorpej #include <arm/cpuconf.h>
     74  1.75       bsh #include <arm/arm32/pte.h>
     75  1.75       bsh #ifndef _LOCORE
     76  1.85      matt #if defined(_KERNEL_OPT)
     77  1.85      matt #include "opt_arm32_pmap.h"
     78  1.85      matt #endif
     79  1.19   thorpej #include <arm/cpufunc.h>
     80  1.12     chris #include <uvm/uvm_object.h>
     81  1.75       bsh #endif
     82   1.1   reinoud 
     83   1.1   reinoud /*
     84  1.11     chris  * a pmap describes a processes' 4GB virtual address space.  this
     85  1.11     chris  * virtual address space can be broken up into 4096 1MB regions which
     86  1.38   thorpej  * are described by L1 PTEs in the L1 table.
     87  1.11     chris  *
     88  1.38   thorpej  * There is a line drawn at KERNEL_BASE.  Everything below that line
     89  1.38   thorpej  * changes when the VM context is switched.  Everything above that line
     90  1.38   thorpej  * is the same no matter which VM context is running.  This is achieved
     91  1.38   thorpej  * by making the L1 PTEs for those slots above KERNEL_BASE reference
     92  1.38   thorpej  * kernel L2 tables.
     93  1.11     chris  *
     94  1.38   thorpej  * The basic layout of the virtual address space thus looks like this:
     95  1.38   thorpej  *
     96  1.38   thorpej  *	0xffffffff
     97  1.38   thorpej  *	.
     98  1.38   thorpej  *	.
     99  1.38   thorpej  *	.
    100  1.38   thorpej  *	KERNEL_BASE
    101  1.38   thorpej  *	--------------------
    102  1.38   thorpej  *	.
    103  1.38   thorpej  *	.
    104  1.38   thorpej  *	.
    105  1.38   thorpej  *	0x00000000
    106  1.11     chris  */
    107  1.11     chris 
    108  1.65       scw /*
    109  1.65       scw  * The number of L2 descriptor tables which can be tracked by an l2_dtable.
    110  1.65       scw  * A bucket size of 16 provides for 16MB of contiguous virtual address
    111  1.65       scw  * space per l2_dtable. Most processes will, therefore, require only two or
    112  1.65       scw  * three of these to map their whole working set.
    113  1.65       scw  */
    114  1.65       scw #define	L2_BUCKET_LOG2	4
    115  1.65       scw #define	L2_BUCKET_SIZE	(1 << L2_BUCKET_LOG2)
    116  1.65       scw 
    117  1.65       scw /*
    118  1.65       scw  * Given the above "L2-descriptors-per-l2_dtable" constant, the number
    119  1.65       scw  * of l2_dtable structures required to track all possible page descriptors
    120  1.65       scw  * mappable by an L1 translation table is given by the following constants:
    121  1.65       scw  */
    122  1.65       scw #define	L2_LOG2		((32 - L1_S_SHIFT) - L2_BUCKET_LOG2)
    123  1.65       scw #define	L2_SIZE		(1 << L2_LOG2)
    124  1.65       scw 
    125  1.90      matt /*
    126  1.90      matt  * tell MI code that the cache is virtually-indexed.
    127  1.90      matt  * ARMv6 is physically-tagged but all others are virtually-tagged.
    128  1.90      matt  */
    129  1.90      matt #if ARM_MMU_V6 > 0
    130  1.90      matt #define PMAP_CACHE_VIPT
    131  1.90      matt #else
    132  1.90      matt #define PMAP_CACHE_VIVT
    133  1.90      matt #endif
    134  1.90      matt 
    135  1.75       bsh #ifndef _LOCORE
    136  1.75       bsh 
    137  1.65       scw struct l1_ttable;
    138  1.65       scw struct l2_dtable;
    139  1.65       scw 
    140  1.65       scw /*
    141  1.65       scw  * Track cache/tlb occupancy using the following structure
    142  1.65       scw  */
    143  1.65       scw union pmap_cache_state {
    144  1.65       scw 	struct {
    145  1.65       scw 		union {
    146  1.65       scw 			u_int8_t csu_cache_b[2];
    147  1.65       scw 			u_int16_t csu_cache;
    148  1.65       scw 		} cs_cache_u;
    149  1.65       scw 
    150  1.65       scw 		union {
    151  1.65       scw 			u_int8_t csu_tlb_b[2];
    152  1.65       scw 			u_int16_t csu_tlb;
    153  1.65       scw 		} cs_tlb_u;
    154  1.65       scw 	} cs_s;
    155  1.65       scw 	u_int32_t cs_all;
    156  1.65       scw };
    157  1.65       scw #define	cs_cache_id	cs_s.cs_cache_u.csu_cache_b[0]
    158  1.65       scw #define	cs_cache_d	cs_s.cs_cache_u.csu_cache_b[1]
    159  1.65       scw #define	cs_cache	cs_s.cs_cache_u.csu_cache
    160  1.65       scw #define	cs_tlb_id	cs_s.cs_tlb_u.csu_tlb_b[0]
    161  1.65       scw #define	cs_tlb_d	cs_s.cs_tlb_u.csu_tlb_b[1]
    162  1.65       scw #define	cs_tlb		cs_s.cs_tlb_u.csu_tlb
    163  1.65       scw 
    164  1.65       scw /*
    165  1.65       scw  * Assigned to cs_all to force cacheops to work for a particular pmap
    166  1.65       scw  */
    167  1.65       scw #define	PMAP_CACHE_STATE_ALL	0xffffffffu
    168  1.65       scw 
    169  1.65       scw /*
    170  1.73   thorpej  * This structure is used by machine-dependent code to describe
    171  1.73   thorpej  * static mappings of devices, created at bootstrap time.
    172  1.73   thorpej  */
    173  1.73   thorpej struct pmap_devmap {
    174  1.73   thorpej 	vaddr_t		pd_va;		/* virtual address */
    175  1.73   thorpej 	paddr_t		pd_pa;		/* physical address */
    176  1.73   thorpej 	psize_t		pd_size;	/* size of region */
    177  1.73   thorpej 	vm_prot_t	pd_prot;	/* protection code */
    178  1.73   thorpej 	int		pd_cache;	/* cache attributes */
    179  1.73   thorpej };
    180  1.73   thorpej 
    181  1.73   thorpej /*
    182  1.65       scw  * The pmap structure itself
    183  1.65       scw  */
    184  1.65       scw struct pmap {
    185  1.65       scw 	u_int8_t		pm_domain;
    186  1.80   thorpej 	bool			pm_remove_all;
    187  1.82       scw 	bool			pm_activated;
    188  1.65       scw 	struct l1_ttable	*pm_l1;
    189  1.82       scw 	pd_entry_t		*pm_pl1vec;
    190  1.82       scw 	pd_entry_t		pm_l1vec;
    191  1.65       scw 	union pmap_cache_state	pm_cstate;
    192  1.65       scw 	struct uvm_object	pm_obj;
    193  1.65       scw #define	pm_lock pm_obj.vmobjlock
    194  1.65       scw 	struct l2_dtable	*pm_l2[L2_SIZE];
    195  1.65       scw 	struct pmap_statistics	pm_stats;
    196  1.65       scw 	LIST_ENTRY(pmap)	pm_list;
    197  1.65       scw };
    198  1.65       scw 
    199   1.1   reinoud /*
    200   1.1   reinoud  * Physical / virtual address structure. In a number of places (particularly
    201   1.1   reinoud  * during bootstrapping) we need to keep track of the physical and virtual
    202   1.1   reinoud  * addresses of various pages
    203   1.1   reinoud  */
    204  1.28   thorpej typedef struct pv_addr {
    205  1.28   thorpej 	SLIST_ENTRY(pv_addr) pv_list;
    206   1.3      matt 	paddr_t pv_pa;
    207   1.2      matt 	vaddr_t pv_va;
    208  1.85      matt 	vsize_t pv_size;
    209   1.1   reinoud } pv_addr_t;
    210  1.85      matt typedef SLIST_HEAD(, pv_addr) pv_addrqh_t;
    211  1.85      matt 
    212  1.85      matt extern pv_addrqh_t pmap_freeq;
    213  1.85      matt extern pv_addr_t kernelpages;
    214  1.85      matt extern pv_addr_t systempage;
    215  1.85      matt extern pv_addr_t kernel_l1pt;
    216   1.1   reinoud 
    217   1.1   reinoud /*
    218  1.24   thorpej  * Determine various modes for PTEs (user vs. kernel, cacheable
    219  1.24   thorpej  * vs. non-cacheable).
    220  1.24   thorpej  */
    221  1.24   thorpej #define	PTE_KERNEL	0
    222  1.24   thorpej #define	PTE_USER	1
    223  1.24   thorpej #define	PTE_NOCACHE	0
    224  1.24   thorpej #define	PTE_CACHE	1
    225  1.65       scw #define	PTE_PAGETABLE	2
    226  1.24   thorpej 
    227  1.24   thorpej /*
    228  1.43   thorpej  * Flags that indicate attributes of pages or mappings of pages.
    229  1.43   thorpej  *
    230  1.43   thorpej  * The PVF_MOD and PVF_REF flags are stored in the mdpage for each
    231  1.43   thorpej  * page.  PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual
    232  1.43   thorpej  * pv_entry's for each page.  They live in the same "namespace" so
    233  1.43   thorpej  * that we can clear multiple attributes at a time.
    234  1.43   thorpej  *
    235  1.43   thorpej  * Note the "non-cacheable" flag generally means the page has
    236  1.43   thorpej  * multiple mappings in a given address space.
    237  1.43   thorpej  */
    238  1.43   thorpej #define	PVF_MOD		0x01		/* page is modified */
    239  1.43   thorpej #define	PVF_REF		0x02		/* page is referenced */
    240  1.43   thorpej #define	PVF_WIRED	0x04		/* mapping is wired */
    241  1.43   thorpej #define	PVF_WRITE	0x08		/* mapping is writable */
    242  1.56   thorpej #define	PVF_EXEC	0x10		/* mapping is executable */
    243  1.90      matt #ifdef PMAP_CACHE_VIVT
    244  1.65       scw #define	PVF_UNC		0x20		/* mapping is 'user' non-cacheable */
    245  1.65       scw #define	PVF_KNC		0x40		/* mapping is 'kernel' non-cacheable */
    246  1.90      matt #define	PVF_NC		(PVF_UNC|PVF_KNC)
    247  1.90      matt #endif
    248  1.90      matt #ifdef PMAP_CACHE_VIPT
    249  1.90      matt #define	PVF_NC		0x20		/* mapping is 'kernel' non-cacheable */
    250  1.90      matt #define	PVF_MULTCLR	0x40		/* mapping is multi-colored */
    251  1.90      matt #endif
    252  1.85      matt #define	PVF_COLORED	0x80		/* page has or had a color */
    253  1.85      matt #define	PVF_KENTRY	0x0100		/* page entered via pmap_kenter_pa */
    254  1.86      matt #define	PVF_KMPAGE	0x0200		/* page is used for kmem */
    255  1.87      matt #define	PVF_DIRTY	0x0400		/* page may have dirty cache lines */
    256  1.88      matt #define	PVF_KMOD	0x0800		/* unmanaged page is modified  */
    257  1.88      matt #define	PVF_KWRITE	(PVF_KENTRY|PVF_WRITE)
    258  1.88      matt #define	PVF_DMOD	(PVF_MOD|PVF_KMOD|PVF_KMPAGE)
    259  1.43   thorpej 
    260  1.43   thorpej /*
    261   1.1   reinoud  * Commonly referenced structures
    262   1.1   reinoud  */
    263   1.4      matt extern int		pmap_debug_level; /* Only exists if PMAP_DEBUG */
    264   1.1   reinoud 
    265   1.1   reinoud /*
    266   1.1   reinoud  * Macros that we need to export
    267   1.1   reinoud  */
    268   1.1   reinoud #define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
    269   1.1   reinoud #define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
    270  1.31   thorpej 
    271  1.78       scw #define	pmap_remove(pmap,sva,eva)	pmap_do_remove((pmap),(sva),(eva),0)
    272  1.78       scw 
    273  1.43   thorpej #define	pmap_is_modified(pg)	\
    274  1.43   thorpej 	(((pg)->mdpage.pvh_attrs & PVF_MOD) != 0)
    275  1.43   thorpej #define	pmap_is_referenced(pg)	\
    276  1.43   thorpej 	(((pg)->mdpage.pvh_attrs & PVF_REF) != 0)
    277  1.85      matt #define	pmap_is_page_colored_p(pg)	\
    278  1.85      matt 	(((pg)->mdpage.pvh_attrs & PVF_COLORED) != 0)
    279  1.41   thorpej 
    280  1.41   thorpej #define	pmap_copy(dp, sp, da, l, sa)	/* nothing */
    281  1.60       chs 
    282  1.35   thorpej #define pmap_phys_address(ppn)		(arm_ptob((ppn)))
    283   1.1   reinoud 
    284   1.1   reinoud /*
    285   1.1   reinoud  * Functions that we need to export
    286   1.1   reinoud  */
    287  1.39   thorpej void	pmap_procwr(struct proc *, vaddr_t, int);
    288  1.65       scw void	pmap_remove_all(pmap_t);
    289  1.80   thorpej bool	pmap_extract(pmap_t, vaddr_t, paddr_t *);
    290  1.39   thorpej 
    291   1.1   reinoud #define	PMAP_NEED_PROCWR
    292  1.29     chris #define PMAP_GROWKERNEL		/* turn on pmap_growkernel interface */
    293  1.92   thorpej #define	PMAP_ENABLE_PMAP_KMPAGE	/* enable the PMAP_KMPAGE flag */
    294   1.4      matt 
    295  1.85      matt #if ARM_MMU_V6 > 0
    296  1.85      matt #define	PMAP_PREFER(hint, vap, sz, td)	pmap_prefer((hint), (vap), (td))
    297  1.85      matt void	pmap_prefer(vaddr_t, vaddr_t *, int);
    298  1.85      matt #endif
    299  1.85      matt 
    300  1.85      matt void	pmap_icache_sync_range(pmap_t, vaddr_t, vaddr_t);
    301  1.85      matt 
    302  1.39   thorpej /* Functions we use internally. */
    303  1.85      matt #ifdef PMAP_STEAL_MEMORY
    304  1.85      matt void	pmap_boot_pagealloc(psize_t, psize_t, psize_t, pv_addr_t *);
    305  1.85      matt void	pmap_boot_pageadd(pv_addr_t *);
    306  1.85      matt vaddr_t	pmap_steal_memory(vsize_t, vaddr_t *, vaddr_t *);
    307  1.85      matt #endif
    308  1.85      matt void	pmap_bootstrap(vaddr_t, vaddr_t);
    309  1.65       scw 
    310  1.78       scw void	pmap_do_remove(pmap_t, vaddr_t, vaddr_t, int);
    311  1.70       scw int	pmap_fault_fixup(pmap_t, vaddr_t, vm_prot_t, int);
    312  1.80   thorpej bool	pmap_get_pde_pte(pmap_t, vaddr_t, pd_entry_t **, pt_entry_t **);
    313  1.80   thorpej bool	pmap_get_pde(pmap_t, vaddr_t, pd_entry_t **);
    314  1.65       scw void	pmap_set_pcb_pagedir(pmap_t, struct pcb *);
    315  1.65       scw 
    316  1.65       scw void	pmap_debug(int);
    317  1.39   thorpej void	pmap_postinit(void);
    318  1.42   thorpej 
    319  1.42   thorpej void	vector_page_setprot(int);
    320  1.24   thorpej 
    321  1.73   thorpej const struct pmap_devmap *pmap_devmap_find_pa(paddr_t, psize_t);
    322  1.73   thorpej const struct pmap_devmap *pmap_devmap_find_va(vaddr_t, vsize_t);
    323  1.73   thorpej 
    324  1.24   thorpej /* Bootstrapping routines. */
    325  1.24   thorpej void	pmap_map_section(vaddr_t, vaddr_t, paddr_t, int, int);
    326  1.25   thorpej void	pmap_map_entry(vaddr_t, vaddr_t, paddr_t, int, int);
    327  1.28   thorpej vsize_t	pmap_map_chunk(vaddr_t, vaddr_t, paddr_t, vsize_t, int, int);
    328  1.28   thorpej void	pmap_link_l2pt(vaddr_t, vaddr_t, pv_addr_t *);
    329  1.73   thorpej void	pmap_devmap_bootstrap(vaddr_t, const struct pmap_devmap *);
    330  1.74   thorpej void	pmap_devmap_register(const struct pmap_devmap *);
    331  1.13     chris 
    332  1.13     chris /*
    333  1.13     chris  * Special page zero routine for use by the idle loop (no cache cleans).
    334  1.13     chris  */
    335  1.80   thorpej bool	pmap_pageidlezero(paddr_t);
    336  1.13     chris #define PMAP_PAGEIDLEZERO(pa)	pmap_pageidlezero((pa))
    337   1.1   reinoud 
    338  1.29     chris /*
    339  1.84     chris  * used by dumpsys to record the PA of the L1 table
    340  1.84     chris  */
    341  1.84     chris uint32_t pmap_kernel_L1_addr(void);
    342  1.84     chris /*
    343  1.29     chris  * The current top of kernel VM
    344  1.29     chris  */
    345  1.29     chris extern vaddr_t	pmap_curmaxkvaddr;
    346   1.1   reinoud 
    347   1.1   reinoud /*
    348   1.1   reinoud  * Useful macros and constants
    349   1.1   reinoud  */
    350  1.59   thorpej 
    351  1.65       scw /* Virtual address to page table entry */
    352  1.79     perry static inline pt_entry_t *
    353  1.65       scw vtopte(vaddr_t va)
    354  1.65       scw {
    355  1.65       scw 	pd_entry_t *pdep;
    356  1.65       scw 	pt_entry_t *ptep;
    357  1.65       scw 
    358  1.81   thorpej 	if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == false)
    359  1.65       scw 		return (NULL);
    360  1.65       scw 	return (ptep);
    361  1.65       scw }
    362  1.65       scw 
    363  1.65       scw /*
    364  1.65       scw  * Virtual address to physical address
    365  1.65       scw  */
    366  1.79     perry static inline paddr_t
    367  1.65       scw vtophys(vaddr_t va)
    368  1.65       scw {
    369  1.65       scw 	paddr_t pa;
    370  1.65       scw 
    371  1.81   thorpej 	if (pmap_extract(pmap_kernel(), va, &pa) == false)
    372  1.65       scw 		return (0);	/* XXXSCW: Panic? */
    373  1.65       scw 
    374  1.65       scw 	return (pa);
    375  1.65       scw }
    376  1.65       scw 
    377  1.65       scw /*
    378  1.65       scw  * The new pmap ensures that page-tables are always mapping Write-Thru.
    379  1.65       scw  * Thus, on some platforms we can run fast and loose and avoid syncing PTEs
    380  1.65       scw  * on every change.
    381  1.65       scw  *
    382  1.69   thorpej  * Unfortunately, not all CPUs have a write-through cache mode.  So we
    383  1.69   thorpej  * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs,
    384  1.69   thorpej  * and if there is the chance for PTE syncs to be needed, we define
    385  1.69   thorpej  * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run)
    386  1.69   thorpej  * the code.
    387  1.69   thorpej  */
    388  1.69   thorpej extern int pmap_needs_pte_sync;
    389  1.69   thorpej #if defined(_KERNEL_OPT)
    390  1.69   thorpej /*
    391  1.69   thorpej  * StrongARM SA-1 caches do not have a write-through mode.  So, on these,
    392  1.69   thorpej  * we need to do PTE syncs.  If only SA-1 is configured, then evaluate
    393  1.69   thorpej  * this at compile time.
    394  1.69   thorpej  */
    395  1.85      matt #if (ARM_MMU_SA1 + ARM_MMU_V6 != 0) && (ARM_NMMUS == 1)
    396  1.69   thorpej #define	PMAP_NEEDS_PTE_SYNC	1
    397  1.69   thorpej #define	PMAP_INCLUDE_PTE_SYNC
    398  1.69   thorpej #elif (ARM_MMU_SA1 == 0)
    399  1.69   thorpej #define	PMAP_NEEDS_PTE_SYNC	0
    400  1.69   thorpej #endif
    401  1.69   thorpej #endif /* _KERNEL_OPT */
    402  1.69   thorpej 
    403  1.69   thorpej /*
    404  1.69   thorpej  * Provide a fallback in case we were not able to determine it at
    405  1.69   thorpej  * compile-time.
    406  1.65       scw  */
    407  1.69   thorpej #ifndef PMAP_NEEDS_PTE_SYNC
    408  1.69   thorpej #define	PMAP_NEEDS_PTE_SYNC	pmap_needs_pte_sync
    409  1.69   thorpej #define	PMAP_INCLUDE_PTE_SYNC
    410  1.69   thorpej #endif
    411  1.65       scw 
    412  1.69   thorpej #define	PTE_SYNC(pte)							\
    413  1.69   thorpej do {									\
    414  1.69   thorpej 	if (PMAP_NEEDS_PTE_SYNC)					\
    415  1.69   thorpej 		cpu_dcache_wb_range((vaddr_t)(pte), sizeof(pt_entry_t));\
    416  1.69   thorpej } while (/*CONSTCOND*/0)
    417  1.69   thorpej 
    418  1.69   thorpej #define	PTE_SYNC_RANGE(pte, cnt)					\
    419  1.69   thorpej do {									\
    420  1.69   thorpej 	if (PMAP_NEEDS_PTE_SYNC) {					\
    421  1.69   thorpej 		cpu_dcache_wb_range((vaddr_t)(pte),			\
    422  1.69   thorpej 		    (cnt) << 2); /* * sizeof(pt_entry_t) */		\
    423  1.69   thorpej 	}								\
    424  1.69   thorpej } while (/*CONSTCOND*/0)
    425  1.65       scw 
    426  1.36   thorpej #define	l1pte_valid(pde)	((pde) != 0)
    427  1.44   thorpej #define	l1pte_section_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_S)
    428  1.44   thorpej #define	l1pte_page_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_C)
    429  1.44   thorpej #define	l1pte_fpage_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_F)
    430  1.36   thorpej 
    431  1.65       scw #define l2pte_index(v)		(((v) & L2_ADDR_BITS) >> L2_S_SHIFT)
    432  1.85      matt #define	l2pte_valid(pte)	(((pte) & L2_TYPE_MASK) != L2_TYPE_INV)
    433  1.44   thorpej #define	l2pte_pa(pte)		((pte) & L2_S_FRAME)
    434  1.77       scw #define l2pte_minidata(pte)	(((pte) & \
    435  1.85      matt 				 (L2_B | L2_C | L2_XS_T_TEX(TEX_XSCALE_X)))\
    436  1.85      matt 				 == (L2_C | L2_XS_T_TEX(TEX_XSCALE_X)))
    437  1.35   thorpej 
    438   1.1   reinoud /* L1 and L2 page table macros */
    439  1.36   thorpej #define pmap_pde_v(pde)		l1pte_valid(*(pde))
    440  1.36   thorpej #define pmap_pde_section(pde)	l1pte_section_p(*(pde))
    441  1.36   thorpej #define pmap_pde_page(pde)	l1pte_page_p(*(pde))
    442  1.36   thorpej #define pmap_pde_fpage(pde)	l1pte_fpage_p(*(pde))
    443  1.16  rearnsha 
    444  1.36   thorpej #define	pmap_pte_v(pte)		l2pte_valid(*(pte))
    445  1.36   thorpej #define	pmap_pte_pa(pte)	l2pte_pa(*(pte))
    446  1.35   thorpej 
    447   1.1   reinoud /* Size of the kernel part of the L1 page table */
    448   1.1   reinoud #define KERNEL_PD_SIZE	\
    449  1.44   thorpej 	(L1_TABLE_SIZE - (KERNEL_BASE >> L1_S_SHIFT) * sizeof(pd_entry_t))
    450  1.20       chs 
    451  1.46   thorpej /************************* ARM MMU configuration *****************************/
    452  1.46   thorpej 
    453  1.85      matt #if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0
    454  1.51   thorpej void	pmap_copy_page_generic(paddr_t, paddr_t);
    455  1.51   thorpej void	pmap_zero_page_generic(paddr_t);
    456  1.51   thorpej 
    457  1.46   thorpej void	pmap_pte_init_generic(void);
    458  1.69   thorpej #if defined(CPU_ARM8)
    459  1.69   thorpej void	pmap_pte_init_arm8(void);
    460  1.69   thorpej #endif
    461  1.46   thorpej #if defined(CPU_ARM9)
    462  1.46   thorpej void	pmap_pte_init_arm9(void);
    463  1.46   thorpej #endif /* CPU_ARM9 */
    464  1.76  rearnsha #if defined(CPU_ARM10)
    465  1.76  rearnsha void	pmap_pte_init_arm10(void);
    466  1.76  rearnsha #endif /* CPU_ARM10 */
    467  1.69   thorpej #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
    468  1.69   thorpej 
    469  1.69   thorpej #if ARM_MMU_SA1 == 1
    470  1.69   thorpej void	pmap_pte_init_sa1(void);
    471  1.69   thorpej #endif /* ARM_MMU_SA1 == 1 */
    472  1.46   thorpej 
    473  1.52   thorpej #if ARM_MMU_XSCALE == 1
    474  1.51   thorpej void	pmap_copy_page_xscale(paddr_t, paddr_t);
    475  1.51   thorpej void	pmap_zero_page_xscale(paddr_t);
    476  1.51   thorpej 
    477  1.46   thorpej void	pmap_pte_init_xscale(void);
    478  1.50   thorpej 
    479  1.50   thorpej void	xscale_setup_minidata(vaddr_t, vaddr_t, paddr_t);
    480  1.77       scw 
    481  1.77       scw #define	PMAP_UAREA(va)		pmap_uarea(va)
    482  1.77       scw void	pmap_uarea(vaddr_t);
    483  1.52   thorpej #endif /* ARM_MMU_XSCALE == 1 */
    484  1.46   thorpej 
    485  1.49   thorpej extern pt_entry_t		pte_l1_s_cache_mode;
    486  1.49   thorpej extern pt_entry_t		pte_l1_s_cache_mask;
    487  1.49   thorpej 
    488  1.49   thorpej extern pt_entry_t		pte_l2_l_cache_mode;
    489  1.49   thorpej extern pt_entry_t		pte_l2_l_cache_mask;
    490  1.49   thorpej 
    491  1.49   thorpej extern pt_entry_t		pte_l2_s_cache_mode;
    492  1.49   thorpej extern pt_entry_t		pte_l2_s_cache_mask;
    493  1.46   thorpej 
    494  1.65       scw extern pt_entry_t		pte_l1_s_cache_mode_pt;
    495  1.65       scw extern pt_entry_t		pte_l2_l_cache_mode_pt;
    496  1.65       scw extern pt_entry_t		pte_l2_s_cache_mode_pt;
    497  1.65       scw 
    498  1.46   thorpej extern pt_entry_t		pte_l2_s_prot_u;
    499  1.46   thorpej extern pt_entry_t		pte_l2_s_prot_w;
    500  1.46   thorpej extern pt_entry_t		pte_l2_s_prot_mask;
    501  1.46   thorpej 
    502  1.46   thorpej extern pt_entry_t		pte_l1_s_proto;
    503  1.46   thorpej extern pt_entry_t		pte_l1_c_proto;
    504  1.46   thorpej extern pt_entry_t		pte_l2_s_proto;
    505  1.46   thorpej 
    506  1.51   thorpej extern void (*pmap_copy_page_func)(paddr_t, paddr_t);
    507  1.51   thorpej extern void (*pmap_zero_page_func)(paddr_t);
    508  1.75       bsh 
    509  1.75       bsh #endif /* !_LOCORE */
    510  1.51   thorpej 
    511  1.46   thorpej /*****************************************************************************/
    512  1.46   thorpej 
    513  1.20       chs /*
    514  1.65       scw  * Definitions for MMU domains
    515  1.65       scw  */
    516  1.65       scw #define	PMAP_DOMAINS		15	/* 15 'user' domains (0-14) */
    517  1.65       scw #define	PMAP_DOMAIN_KERNEL	15	/* The kernel uses domain #15 */
    518  1.45   thorpej 
    519  1.45   thorpej /*
    520  1.45   thorpej  * These macros define the various bit masks in the PTE.
    521  1.45   thorpej  *
    522  1.45   thorpej  * We use these macros since we use different bits on different processor
    523  1.45   thorpej  * models.
    524  1.45   thorpej  */
    525  1.45   thorpej #define	L1_S_PROT_U		(L1_S_AP(AP_U))
    526  1.45   thorpej #define	L1_S_PROT_W		(L1_S_AP(AP_W))
    527  1.45   thorpej #define	L1_S_PROT_MASK		(L1_S_PROT_U|L1_S_PROT_W)
    528  1.45   thorpej 
    529  1.49   thorpej #define	L1_S_CACHE_MASK_generic	(L1_S_B|L1_S_C)
    530  1.85      matt #define	L1_S_CACHE_MASK_xscale	(L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_XSCALE_X))
    531  1.45   thorpej 
    532  1.45   thorpej #define	L2_L_PROT_U		(L2_AP(AP_U))
    533  1.45   thorpej #define	L2_L_PROT_W		(L2_AP(AP_W))
    534  1.45   thorpej #define	L2_L_PROT_MASK		(L2_L_PROT_U|L2_L_PROT_W)
    535  1.45   thorpej 
    536  1.49   thorpej #define	L2_L_CACHE_MASK_generic	(L2_B|L2_C)
    537  1.85      matt #define	L2_L_CACHE_MASK_xscale	(L2_B|L2_C|L2_XS_L_TEX(TEX_XSCALE_X))
    538  1.49   thorpej 
    539  1.46   thorpej #define	L2_S_PROT_U_generic	(L2_AP(AP_U))
    540  1.46   thorpej #define	L2_S_PROT_W_generic	(L2_AP(AP_W))
    541  1.46   thorpej #define	L2_S_PROT_MASK_generic	(L2_S_PROT_U|L2_S_PROT_W)
    542  1.46   thorpej 
    543  1.48   thorpej #define	L2_S_PROT_U_xscale	(L2_AP0(AP_U))
    544  1.48   thorpej #define	L2_S_PROT_W_xscale	(L2_AP0(AP_W))
    545  1.46   thorpej #define	L2_S_PROT_MASK_xscale	(L2_S_PROT_U|L2_S_PROT_W)
    546  1.46   thorpej 
    547  1.49   thorpej #define	L2_S_CACHE_MASK_generic	(L2_B|L2_C)
    548  1.85      matt #define	L2_S_CACHE_MASK_xscale	(L2_B|L2_C|L2_XS_T_TEX(TEX_XSCALE_X))
    549  1.46   thorpej 
    550  1.46   thorpej #define	L1_S_PROTO_generic	(L1_TYPE_S | L1_S_IMP)
    551  1.47   thorpej #define	L1_S_PROTO_xscale	(L1_TYPE_S)
    552  1.46   thorpej 
    553  1.46   thorpej #define	L1_C_PROTO_generic	(L1_TYPE_C | L1_C_IMP2)
    554  1.47   thorpej #define	L1_C_PROTO_xscale	(L1_TYPE_C)
    555  1.46   thorpej 
    556  1.46   thorpej #define	L2_L_PROTO		(L2_TYPE_L)
    557  1.46   thorpej 
    558  1.46   thorpej #define	L2_S_PROTO_generic	(L2_TYPE_S)
    559  1.85      matt #define	L2_S_PROTO_xscale	(L2_TYPE_XS)
    560  1.45   thorpej 
    561  1.46   thorpej /*
    562  1.46   thorpej  * User-visible names for the ones that vary with MMU class.
    563  1.46   thorpej  */
    564  1.46   thorpej 
    565  1.46   thorpej #if ARM_NMMUS > 1
    566  1.46   thorpej /* More than one MMU class configured; use variables. */
    567  1.46   thorpej #define	L2_S_PROT_U		pte_l2_s_prot_u
    568  1.46   thorpej #define	L2_S_PROT_W		pte_l2_s_prot_w
    569  1.46   thorpej #define	L2_S_PROT_MASK		pte_l2_s_prot_mask
    570  1.46   thorpej 
    571  1.49   thorpej #define	L1_S_CACHE_MASK		pte_l1_s_cache_mask
    572  1.49   thorpej #define	L2_L_CACHE_MASK		pte_l2_l_cache_mask
    573  1.49   thorpej #define	L2_S_CACHE_MASK		pte_l2_s_cache_mask
    574  1.49   thorpej 
    575  1.46   thorpej #define	L1_S_PROTO		pte_l1_s_proto
    576  1.46   thorpej #define	L1_C_PROTO		pte_l1_c_proto
    577  1.46   thorpej #define	L2_S_PROTO		pte_l2_s_proto
    578  1.51   thorpej 
    579  1.51   thorpej #define	pmap_copy_page(s, d)	(*pmap_copy_page_func)((s), (d))
    580  1.51   thorpej #define	pmap_zero_page(d)	(*pmap_zero_page_func)((d))
    581  1.85      matt #elif (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0
    582  1.46   thorpej #define	L2_S_PROT_U		L2_S_PROT_U_generic
    583  1.46   thorpej #define	L2_S_PROT_W		L2_S_PROT_W_generic
    584  1.46   thorpej #define	L2_S_PROT_MASK		L2_S_PROT_MASK_generic
    585  1.46   thorpej 
    586  1.49   thorpej #define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_generic
    587  1.49   thorpej #define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_generic
    588  1.49   thorpej #define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_generic
    589  1.49   thorpej 
    590  1.46   thorpej #define	L1_S_PROTO		L1_S_PROTO_generic
    591  1.46   thorpej #define	L1_C_PROTO		L1_C_PROTO_generic
    592  1.46   thorpej #define	L2_S_PROTO		L2_S_PROTO_generic
    593  1.51   thorpej 
    594  1.51   thorpej #define	pmap_copy_page(s, d)	pmap_copy_page_generic((s), (d))
    595  1.51   thorpej #define	pmap_zero_page(d)	pmap_zero_page_generic((d))
    596  1.46   thorpej #elif ARM_MMU_XSCALE == 1
    597  1.46   thorpej #define	L2_S_PROT_U		L2_S_PROT_U_xscale
    598  1.46   thorpej #define	L2_S_PROT_W		L2_S_PROT_W_xscale
    599  1.46   thorpej #define	L2_S_PROT_MASK		L2_S_PROT_MASK_xscale
    600  1.49   thorpej 
    601  1.49   thorpej #define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_xscale
    602  1.49   thorpej #define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_xscale
    603  1.49   thorpej #define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_xscale
    604  1.46   thorpej 
    605  1.46   thorpej #define	L1_S_PROTO		L1_S_PROTO_xscale
    606  1.46   thorpej #define	L1_C_PROTO		L1_C_PROTO_xscale
    607  1.46   thorpej #define	L2_S_PROTO		L2_S_PROTO_xscale
    608  1.51   thorpej 
    609  1.51   thorpej #define	pmap_copy_page(s, d)	pmap_copy_page_xscale((s), (d))
    610  1.51   thorpej #define	pmap_zero_page(d)	pmap_zero_page_xscale((d))
    611  1.46   thorpej #endif /* ARM_NMMUS > 1 */
    612  1.20       chs 
    613  1.45   thorpej /*
    614  1.45   thorpej  * These macros return various bits based on kernel/user and protection.
    615  1.45   thorpej  * Note that the compiler will usually fold these at compile time.
    616  1.45   thorpej  */
    617  1.45   thorpej #define	L1_S_PROT(ku, pr)	((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \
    618  1.45   thorpej 				 (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0))
    619  1.45   thorpej 
    620  1.45   thorpej #define	L2_L_PROT(ku, pr)	((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \
    621  1.45   thorpej 				 (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0))
    622  1.45   thorpej 
    623  1.45   thorpej #define	L2_S_PROT(ku, pr)	((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \
    624  1.45   thorpej 				 (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0))
    625  1.66   thorpej 
    626  1.66   thorpej /*
    627  1.66   thorpej  * Macros to test if a mapping is mappable with an L1 Section mapping
    628  1.66   thorpej  * or an L2 Large Page mapping.
    629  1.66   thorpej  */
    630  1.66   thorpej #define	L1_S_MAPPABLE_P(va, pa, size)					\
    631  1.66   thorpej 	((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE)
    632  1.66   thorpej 
    633  1.67   thorpej #define	L2_L_MAPPABLE_P(va, pa, size)					\
    634  1.68   thorpej 	((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE)
    635  1.64   thorpej 
    636  1.64   thorpej /*
    637  1.64   thorpej  * Hooks for the pool allocator.
    638  1.64   thorpej  */
    639  1.64   thorpej #define	POOL_VTOPHYS(va)	vtophys((vaddr_t) (va))
    640  1.18   thorpej 
    641  1.18   thorpej #endif /* _KERNEL */
    642   1.1   reinoud 
    643   1.1   reinoud #endif	/* _ARM32_PMAP_H_ */
    644