Home | History | Annotate | Line # | Download | only in include
pmap.h revision 1.2
      1  1.2  tsubai /*	$NetBSD: pmap.h,v 1.2 1999/09/14 10:22:35 tsubai Exp $	*/
      2  1.1  itojun 
      3  1.1  itojun /*
      4  1.2  tsubai  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5  1.1  itojun  * All rights reserved.
      6  1.1  itojun  *
      7  1.1  itojun  * Redistribution and use in source and binary forms, with or without
      8  1.1  itojun  * modification, are permitted provided that the following conditions
      9  1.1  itojun  * are met:
     10  1.1  itojun  * 1. Redistributions of source code must retain the above copyright
     11  1.1  itojun  *    notice, this list of conditions and the following disclaimer.
     12  1.1  itojun  * 2. Redistributions in binary form must reproduce the above copyright
     13  1.1  itojun  *    notice, this list of conditions and the following disclaimer in the
     14  1.1  itojun  *    documentation and/or other materials provided with the distribution.
     15  1.1  itojun  * 3. All advertising materials mentioning features or use of this software
     16  1.2  tsubai  *    must display the following acknowledgment:
     17  1.2  tsubai  *      This product includes software developed by Charles D. Cranor and
     18  1.2  tsubai  *      Washington University.
     19  1.2  tsubai  * 4. The name of the author may not be used to endorse or promote products
     20  1.2  tsubai  *    derived from this software without specific prior written permission.
     21  1.2  tsubai  *
     22  1.2  tsubai  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     23  1.2  tsubai  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     24  1.2  tsubai  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     25  1.2  tsubai  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     26  1.2  tsubai  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     27  1.2  tsubai  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     28  1.2  tsubai  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     29  1.2  tsubai  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     30  1.2  tsubai  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     31  1.2  tsubai  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     32  1.1  itojun  */
     33  1.1  itojun 
     34  1.1  itojun /*
     35  1.2  tsubai  * pmap.h: see pmap.c for the history of this pmap module.
     36  1.2  tsubai  */
     37  1.2  tsubai 
     38  1.2  tsubai #ifndef _SH3_PMAP_H_
     39  1.2  tsubai #define _SH3_PMAP_H_
     40  1.2  tsubai 
     41  1.2  tsubai #include <machine/cpufunc.h>
     42  1.2  tsubai #include <machine/pte.h>
     43  1.2  tsubai #include <uvm/uvm_object.h>
     44  1.2  tsubai 
     45  1.2  tsubai /*
     46  1.2  tsubai  * see pte.h for a description of i386 MMU terminology and hardware
     47  1.2  tsubai  * interface.
     48  1.2  tsubai  *
     49  1.2  tsubai  * a pmap describes a processes' 4GB virtual address space.  this
     50  1.2  tsubai  * virtual address space can be broken up into 1024 4MB regions which
     51  1.2  tsubai  * are described by PDEs in the PDP.  the PDEs are defined as follows:
     52  1.2  tsubai  *
     53  1.2  tsubai  * (ranges are inclusive -> exclusive, just like vm_map_entry start/end)
     54  1.2  tsubai  * (the following assumes that KERNBASE is 0xf0000000)
     55  1.2  tsubai  *
     56  1.2  tsubai  * PDE#s	VA range		usage
     57  1.2  tsubai  * 0->959	0x0 -> 0xefc00000	user address space, note that the
     58  1.2  tsubai  *					max user address is 0xefbfe000
     59  1.2  tsubai  *					the final two pages in the last 4MB
     60  1.2  tsubai  *					used to be reserved for the UAREA
     61  1.2  tsubai  *					but now are no longer used
     62  1.2  tsubai  * 959		0xefc00000->		recursive mapping of PDP (used for
     63  1.2  tsubai  *			0xf0000000	linear mapping of PTPs)
     64  1.2  tsubai  * 960->1023	0xf0000000->		kernel address space (constant
     65  1.2  tsubai  *			0xffc00000	across all pmap's/processes)
     66  1.2  tsubai  * 1023		0xffc00000->		"alternate" recursive PDP mapping
     67  1.2  tsubai  *			<end>		(for other pmaps)
     68  1.2  tsubai  *
     69  1.2  tsubai  *
     70  1.2  tsubai  * note: a recursive PDP mapping provides a way to map all the PTEs for
     71  1.2  tsubai  * a 4GB address space into a linear chunk of virtual memory.  in other
     72  1.2  tsubai  * words, the PTE for page 0 is the first int mapped into the 4MB recursive
     73  1.2  tsubai  * area.  the PTE for page 1 is the second int.  the very last int in the
     74  1.2  tsubai  * 4MB range is the PTE that maps VA 0xffffe000 (the last page in a 4GB
     75  1.2  tsubai  * address).
     76  1.2  tsubai  *
     77  1.2  tsubai  * all pmap's PD's must have the same values in slots 960->1023 so that
     78  1.2  tsubai  * the kernel is always mapped in every process.  these values are loaded
     79  1.2  tsubai  * into the PD at pmap creation time.
     80  1.1  itojun  *
     81  1.2  tsubai  * at any one time only one pmap can be active on a processor.  this is
     82  1.2  tsubai  * the pmap whose PDP is pointed to by processor register %cr3.  this pmap
     83  1.2  tsubai  * will have all its PTEs mapped into memory at the recursive mapping
     84  1.2  tsubai  * point (slot #959 as show above).  when the pmap code wants to find the
     85  1.2  tsubai  * PTE for a virtual address, all it has to do is the following:
     86  1.2  tsubai  *
     87  1.2  tsubai  * address of PTE = (959 * 4MB) + (VA / NBPG) * sizeof(pt_entry_t)
     88  1.2  tsubai  *                = 0xefc00000 + (VA / 4096) * 4
     89  1.2  tsubai  *
     90  1.2  tsubai  * what happens if the pmap layer is asked to perform an operation
     91  1.2  tsubai  * on a pmap that is not the one which is currently active?  in that
     92  1.2  tsubai  * case we take the PA of the PDP of non-active pmap and put it in
     93  1.2  tsubai  * slot 1023 of the active pmap.  this causes the non-active pmap's
     94  1.2  tsubai  * PTEs to get mapped in the final 4MB of the 4GB address space
     95  1.2  tsubai  * (e.g. starting at 0xffc00000).
     96  1.2  tsubai  *
     97  1.2  tsubai  * the following figure shows the effects of the recursive PDP mapping:
     98  1.2  tsubai  *
     99  1.2  tsubai  *   PDP (%cr3)
    100  1.2  tsubai  *   +----+
    101  1.2  tsubai  *   |   0| -> PTP#0 that maps VA 0x0 -> 0x400000
    102  1.2  tsubai  *   |    |
    103  1.2  tsubai  *   |    |
    104  1.2  tsubai  *   | 959| -> points back to PDP (%cr3) mapping VA 0xefc00000 -> 0xf0000000
    105  1.2  tsubai  *   | 960| -> first kernel PTP (maps 0xf0000000 -> 0xf0400000)
    106  1.2  tsubai  *   |    |
    107  1.2  tsubai  *   |1023| -> points to alternate pmap's PDP (maps 0xffc00000 -> end)
    108  1.2  tsubai  *   +----+
    109  1.2  tsubai  *
    110  1.2  tsubai  * note that the PDE#959 VA (0xefc00000) is defined as "PTE_BASE"
    111  1.2  tsubai  * note that the PDE#1023 VA (0xffc00000) is defined as "APTE_BASE"
    112  1.2  tsubai  *
    113  1.2  tsubai  * starting at VA 0xefc00000 the current active PDP (%cr3) acts as a
    114  1.2  tsubai  * PTP:
    115  1.2  tsubai  *
    116  1.2  tsubai  * PTP#959 == PDP(%cr3) => maps VA 0xefc00000 -> 0xf0000000
    117  1.2  tsubai  *   +----+
    118  1.2  tsubai  *   |   0| -> maps the contents of PTP#0 at VA 0xefc00000->0xefc01000
    119  1.2  tsubai  *   |    |
    120  1.2  tsubai  *   |    |
    121  1.2  tsubai  *   | 959| -> maps contents of PTP#959 (the PDP) at VA 0xeffbf000
    122  1.2  tsubai  *   | 960| -> maps contents of first kernel PTP
    123  1.2  tsubai  *   |    |
    124  1.2  tsubai  *   |1023|
    125  1.2  tsubai  *   +----+
    126  1.2  tsubai  *
    127  1.2  tsubai  * note that mapping of the PDP at PTP#959's VA (0xeffbf000) is
    128  1.2  tsubai  * defined as "PDP_BASE".... within that mapping there are two
    129  1.2  tsubai  * defines:
    130  1.2  tsubai  *   "PDP_PDE" (0xeffbfefc) is the VA of the PDE in the PDP
    131  1.2  tsubai  *      which points back to itself.
    132  1.2  tsubai  *   "APDP_PDE" (0xeffbfffc) is the VA of the PDE in the PDP which
    133  1.2  tsubai  *      establishes the recursive mapping of the alternate pmap.
    134  1.2  tsubai  *      to set the alternate PDP, one just has to put the correct
    135  1.2  tsubai  *	PA info in *APDP_PDE.
    136  1.2  tsubai  *
    137  1.2  tsubai  * note that in the APTE_BASE space, the APDP appears at VA
    138  1.2  tsubai  * "APDP_BASE" (0xfffff000).
    139  1.2  tsubai  */
    140  1.2  tsubai 
    141  1.2  tsubai /*
    142  1.2  tsubai  * the following defines identify the slots used as described above.
    143  1.1  itojun  */
    144  1.1  itojun 
    145  1.2  tsubai #define PDSLOT_PTE	((u_int)0x33f)	/* PTDPTDI for recursive PDP map */
    146  1.2  tsubai #define PDSLOT_KERN	((u_int)0x340)	/* KPTDI start of kernel space */
    147  1.2  tsubai #define PDSLOT_APTE	((u_int)0x37f)	/* alternative recursive slot */
    148  1.2  tsubai 
    149  1.2  tsubai /*
    150  1.2  tsubai  * the following defines give the virtual addresses of various MMU
    151  1.2  tsubai  * data structures:
    152  1.2  tsubai  * PTE_BASE and APTE_BASE: the base VA of the linear PTE mappings
    153  1.2  tsubai  * PTD_BASE and APTD_BASE: the base VA of the recursive mapping of the PTD
    154  1.2  tsubai  * PDP_PDE and APDP_PDE: the VA of the PDE that points back to the PDP/APDP
    155  1.2  tsubai  */
    156  1.2  tsubai 
    157  1.2  tsubai #define PTE_BASE	((pt_entry_t *)  (PDSLOT_PTE * NBPD) )
    158  1.2  tsubai #define APTE_BASE	((pt_entry_t *)  (PDSLOT_APTE * NBPD) )
    159  1.2  tsubai #define PDP_BASE ((pd_entry_t *)(((char *)PTE_BASE) + (PDSLOT_PTE * NBPG)))
    160  1.2  tsubai #define APDP_BASE ((pd_entry_t *)(((char *)APTE_BASE) + (PDSLOT_APTE * NBPG)))
    161  1.2  tsubai #define PDP_PDE		(PDP_BASE + PDSLOT_PTE)
    162  1.2  tsubai #define APDP_PDE	(PDP_BASE + PDSLOT_APTE)
    163  1.1  itojun 
    164  1.2  tsubai /*
    165  1.2  tsubai  * XXXCDC: tmp xlate from old names:
    166  1.2  tsubai  * PTDPTDI -> PDSLOT_PTE
    167  1.2  tsubai  * KPTDI -> PDSLOT_KERN
    168  1.2  tsubai  * APTDPTDI -> PDSLOT_APTE
    169  1.2  tsubai  */
    170  1.1  itojun 
    171  1.2  tsubai /*
    172  1.2  tsubai  * the follow define determines how many PTPs should be set up for the
    173  1.2  tsubai  * kernel by locore.s at boot time.  this should be large enough to
    174  1.2  tsubai  * get the VM system running.  once the VM system is running, the
    175  1.2  tsubai  * pmap module can add more PTPs to the kernel area on demand.
    176  1.2  tsubai  */
    177  1.1  itojun 
    178  1.2  tsubai #ifndef NKPTP
    179  1.2  tsubai #define NKPTP		8	/* 32MB to start */
    180  1.2  tsubai #endif
    181  1.2  tsubai #define NKPTP_MIN	8	/* smallest value we allow */
    182  1.2  tsubai #define NKPTP_MAX	63	/* (1024 - (0xd0000000/NBPD) - 1) */
    183  1.2  tsubai 				/* largest value (-1 for APTP space) */
    184  1.1  itojun 
    185  1.1  itojun /*
    186  1.2  tsubai  * various address macros
    187  1.2  tsubai  *
    188  1.2  tsubai  *  vtopte: return a pointer to the PTE mapping a VA
    189  1.2  tsubai  *  kvtopte: same as above (takes a KVA, but doesn't matter with this pmap)
    190  1.2  tsubai  *  ptetov: given a pointer to a PTE, return the VA that it maps
    191  1.2  tsubai  *  vtophys: translate a VA to the PA mapped to it
    192  1.2  tsubai  *
    193  1.2  tsubai  * plus alternative versions of the above
    194  1.1  itojun  */
    195  1.1  itojun 
    196  1.2  tsubai #define vtopte(VA)	(PTE_BASE + sh3_btop(VA))
    197  1.2  tsubai #define kvtopte(VA)	vtopte(VA)
    198  1.2  tsubai #define ptetov(PT)	(sh3_ptob(PT - PTE_BASE))
    199  1.2  tsubai #define avtopte(VA)	(APTE_BASE + sh3_btop(VA))
    200  1.2  tsubai #define ptetoav(PT)	(sh3_ptob(PT - APTE_BASE))
    201  1.2  tsubai #define avtophys(VA)	((*avtopte(VA) & PG_FRAME) | \
    202  1.2  tsubai 			 ((unsigned)(VA) & ~PG_FRAME))
    203  1.2  tsubai 
    204  1.1  itojun /*
    205  1.2  tsubai  * pdei/ptei: generate index into PDP/PTP from a VA
    206  1.1  itojun  */
    207  1.2  tsubai #define	pdei(VA)	(((VA) & PD_MASK) >> PDSHIFT)
    208  1.2  tsubai #define	ptei(VA)	(((VA) & PT_MASK) >> PGSHIFT)
    209  1.2  tsubai 
    210  1.1  itojun /*
    211  1.2  tsubai  * PTP macros:
    212  1.2  tsubai  *   a PTP's index is the PD index of the PDE that points to it
    213  1.2  tsubai  *   a PTP's offset is the byte-offset in the PTE space that this PTP is at
    214  1.2  tsubai  *   a PTP's VA is the first VA mapped by that PTP
    215  1.2  tsubai  *
    216  1.2  tsubai  * note that NBPG == number of bytes in a PTP (4096 bytes == 1024 entries)
    217  1.2  tsubai  *           NBPD == number of bytes a PTP can map (4MB)
    218  1.1  itojun  */
    219  1.1  itojun 
    220  1.2  tsubai #define ptp_i2o(I)	((I) * NBPG)	/* index => offset */
    221  1.2  tsubai #define ptp_o2i(O)	((O) / NBPG)	/* offset => index */
    222  1.2  tsubai #define ptp_i2v(I)	((I) * NBPD)	/* index => VA */
    223  1.2  tsubai #define ptp_v2i(V)	((V) / NBPD)	/* VA => index (same as pdei) */
    224  1.1  itojun 
    225  1.1  itojun /*
    226  1.2  tsubai  * PG_AVAIL usage: we make use of the ignored bits of the PTE
    227  1.1  itojun  */
    228  1.2  tsubai 
    229  1.2  tsubai #define PG_PVLIST	PG_AVAIL1	/* mapping has entry on pvlist */
    230  1.2  tsubai 
    231  1.1  itojun #ifdef _KERNEL
    232  1.1  itojun /*
    233  1.2  tsubai  * pmap data structures: see pmap.c for details of locking.
    234  1.1  itojun  */
    235  1.1  itojun 
    236  1.2  tsubai struct pmap;
    237  1.2  tsubai typedef struct pmap *pmap_t;
    238  1.1  itojun 
    239  1.1  itojun /*
    240  1.2  tsubai  * we maintain a list of all non-kernel pmaps
    241  1.1  itojun  */
    242  1.2  tsubai 
    243  1.2  tsubai LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */
    244  1.1  itojun 
    245  1.1  itojun /*
    246  1.2  tsubai  * the pmap structure
    247  1.2  tsubai  *
    248  1.2  tsubai  * note that the pm_obj contains the simple_lock, the reference count,
    249  1.2  tsubai  * page list, and number of PTPs within the pmap.
    250  1.1  itojun  */
    251  1.2  tsubai 
    252  1.2  tsubai struct pmap {
    253  1.2  tsubai 	struct uvm_object pm_obj;	/* object (lck by object lock) */
    254  1.2  tsubai #define	pm_lock	pm_obj.vmobjlock
    255  1.2  tsubai 	LIST_ENTRY(pmap) pm_list;	/* list (lck by pm_list lock) */
    256  1.2  tsubai 	pd_entry_t *pm_pdir;		/* VA of PD (lck by object lock) */
    257  1.2  tsubai 	u_int32_t pm_pdirpa;		/* PA of PD (read-only after create) */
    258  1.2  tsubai 	struct vm_page *pm_ptphint;	/* pointer to a PTP in our pmap */
    259  1.2  tsubai 	struct pmap_statistics pm_stats;  /* pmap stats (lck by object lock) */
    260  1.2  tsubai 
    261  1.2  tsubai 	int pm_flags;			/* see below */
    262  1.2  tsubai };
    263  1.2  tsubai 
    264  1.2  tsubai /* pm_flags */
    265  1.2  tsubai #define	PMF_USER_LDT	0x01	/* pmap has user-set LDT */
    266  1.1  itojun 
    267  1.1  itojun /*
    268  1.2  tsubai  * for each managed physical page we maintain a list of <PMAP,VA>'s
    269  1.2  tsubai  * which it is mapped at.  the list is headed by a pv_head structure.
    270  1.2  tsubai  * there is one pv_head per managed phys page (allocated at boot time).
    271  1.2  tsubai  * the pv_head structure points to a list of pv_entry structures (each
    272  1.2  tsubai  * describes one mapping).
    273  1.1  itojun  */
    274  1.2  tsubai 
    275  1.2  tsubai struct pv_entry;
    276  1.2  tsubai 
    277  1.2  tsubai struct pv_head {
    278  1.2  tsubai 	simple_lock_data_t pvh_lock;	/* locks every pv on this list */
    279  1.2  tsubai 	struct pv_entry *pvh_list;	/* head of list (locked by pvh_lock) */
    280  1.2  tsubai };
    281  1.2  tsubai 
    282  1.2  tsubai struct pv_entry {			/* locked by its list's pvh_lock */
    283  1.2  tsubai 	struct pv_entry *pv_next;	/* next entry */
    284  1.2  tsubai 	struct pmap *pv_pmap;		/* the pmap */
    285  1.2  tsubai 	vaddr_t pv_va;			/* the virtual address */
    286  1.2  tsubai 	struct vm_page *pv_ptp;		/* the vm_page of the PTP */
    287  1.1  itojun };
    288  1.1  itojun 
    289  1.2  tsubai /*
    290  1.2  tsubai  * pv_entrys are dynamically allocated in chunks from a single page.
    291  1.2  tsubai  * we keep track of how many pv_entrys are in use for each page and
    292  1.2  tsubai  * we can free pv_entry pages if needed.  there is one lock for the
    293  1.2  tsubai  * entire allocation system.
    294  1.2  tsubai  */
    295  1.1  itojun 
    296  1.1  itojun struct pv_page_info {
    297  1.2  tsubai 	TAILQ_ENTRY(pv_page) pvpi_list;
    298  1.2  tsubai 	struct pv_entry *pvpi_pvfree;
    299  1.2  tsubai 	int pvpi_nfree;
    300  1.1  itojun };
    301  1.1  itojun 
    302  1.1  itojun /*
    303  1.2  tsubai  * number of pv_entry's in a pv_page
    304  1.2  tsubai  * (note: won't work on systems where NPBG isn't a constant)
    305  1.2  tsubai  */
    306  1.2  tsubai 
    307  1.2  tsubai #define PVE_PER_PVPAGE ((NBPG - sizeof(struct pv_page_info)) / \
    308  1.2  tsubai 			sizeof(struct pv_entry))
    309  1.2  tsubai 
    310  1.2  tsubai /*
    311  1.2  tsubai  * a pv_page: where pv_entrys are allocated from
    312  1.1  itojun  */
    313  1.1  itojun 
    314  1.1  itojun struct pv_page {
    315  1.2  tsubai 	struct pv_page_info pvinfo;
    316  1.2  tsubai 	struct pv_entry pvents[PVE_PER_PVPAGE];
    317  1.2  tsubai };
    318  1.2  tsubai 
    319  1.2  tsubai /*
    320  1.2  tsubai  * pmap_remove_record: a record of VAs that have been unmapped, used to
    321  1.2  tsubai  * flush TLB.  if we have more than PMAP_RR_MAX then we stop recording.
    322  1.2  tsubai  */
    323  1.2  tsubai 
    324  1.2  tsubai #define PMAP_RR_MAX	16	/* max of 16 pages (64K) */
    325  1.2  tsubai 
    326  1.2  tsubai struct pmap_remove_record {
    327  1.2  tsubai 	int prr_npages;
    328  1.2  tsubai 	vaddr_t prr_vas[PMAP_RR_MAX];
    329  1.2  tsubai };
    330  1.2  tsubai 
    331  1.2  tsubai /*
    332  1.2  tsubai  * pmap_transfer_location: used to pass the current location in the
    333  1.2  tsubai  * pmap between pmap_transfer and pmap_transfer_ptes [e.g. during
    334  1.2  tsubai  * a pmap_copy].
    335  1.2  tsubai  */
    336  1.2  tsubai 
    337  1.2  tsubai struct pmap_transfer_location {
    338  1.2  tsubai 	vaddr_t addr;			/* the address (page-aligned) */
    339  1.2  tsubai 	pt_entry_t *pte;		/* the PTE that maps address */
    340  1.2  tsubai 	struct vm_page *ptp;		/* the PTP that the PTE lives in */
    341  1.1  itojun };
    342  1.1  itojun 
    343  1.2  tsubai /*
    344  1.2  tsubai  * global kernel variables
    345  1.2  tsubai  */
    346  1.2  tsubai 
    347  1.2  tsubai /* PTDpaddr: is the physical address of the kernel's PDP */
    348  1.2  tsubai extern u_long PTDpaddr;
    349  1.2  tsubai 
    350  1.2  tsubai extern struct pmap kernel_pmap_store;	/* kernel pmap */
    351  1.2  tsubai extern int nkpde;			/* current # of PDEs for kernel */
    352  1.2  tsubai extern int pmap_pg_g;			/* do we support PG_G? */
    353  1.2  tsubai 
    354  1.2  tsubai /*
    355  1.2  tsubai  * macros
    356  1.2  tsubai  */
    357  1.1  itojun 
    358  1.1  itojun #define	pmap_kernel()			(&kernel_pmap_store)
    359  1.1  itojun #define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
    360  1.1  itojun #define	pmap_update()			tlbflush()
    361  1.1  itojun 
    362  1.2  tsubai #define pmap_clear_modify(pg)		pmap_change_attrs(pg, 0, PG_M)
    363  1.2  tsubai #define pmap_clear_reference(pg)	pmap_change_attrs(pg, 0, PG_U)
    364  1.2  tsubai #define pmap_copy(DP,SP,D,L,S)		pmap_transfer(DP,SP,D,L,S, FALSE)
    365  1.2  tsubai #define pmap_is_modified(pg)		pmap_test_attrs(pg, PG_M)
    366  1.2  tsubai #ifdef notyet
    367  1.2  tsubai #define pmap_is_referenced(pg)		pmap_test_attrs(pg, PG_U)
    368  1.2  tsubai #else
    369  1.2  tsubai #define pmap_is_referenced(pg)		1
    370  1.2  tsubai #endif
    371  1.2  tsubai #define pmap_move(DP,SP,D,L,S)		pmap_transfer(DP,SP,D,L,S, TRUE)
    372  1.2  tsubai #define pmap_phys_address(ppn)		sh3_ptob(ppn)
    373  1.2  tsubai #define pmap_valid_entry(E) 		((E) & PG_V) /* is PDE or PTE valid? */
    374  1.2  tsubai 
    375  1.2  tsubai 
    376  1.2  tsubai /*
    377  1.2  tsubai  * prototypes
    378  1.2  tsubai  */
    379  1.1  itojun 
    380  1.2  tsubai void		pmap_activate __P((struct proc *));
    381  1.2  tsubai void		pmap_bootstrap __P((vaddr_t));
    382  1.2  tsubai boolean_t	pmap_change_attrs __P((struct vm_page *, int, int));
    383  1.2  tsubai void		pmap_deactivate __P((struct proc *));
    384  1.2  tsubai static void	pmap_page_protect __P((struct vm_page *, vm_prot_t));
    385  1.2  tsubai void		pmap_page_remove  __P((struct vm_page *));
    386  1.2  tsubai static void	pmap_protect __P((struct pmap *, vaddr_t,
    387  1.2  tsubai 				vaddr_t, vm_prot_t));
    388  1.2  tsubai void		pmap_remove __P((struct pmap *, vaddr_t, vaddr_t));
    389  1.2  tsubai boolean_t	pmap_test_attrs __P((struct vm_page *, int));
    390  1.2  tsubai void		pmap_transfer __P((struct pmap *, struct pmap *, vaddr_t,
    391  1.2  tsubai 				   vsize_t, vaddr_t, boolean_t));
    392  1.2  tsubai static void	pmap_update_pg __P((vaddr_t));
    393  1.2  tsubai static void	pmap_update_2pg __P((vaddr_t,vaddr_t));
    394  1.2  tsubai void		pmap_write_protect __P((struct pmap *, vaddr_t,
    395  1.2  tsubai 				vaddr_t, vm_prot_t));
    396  1.2  tsubai 
    397  1.2  tsubai vaddr_t reserve_dumppages __P((vaddr_t)); /* XXX: not a pmap fn */
    398  1.2  tsubai 
    399  1.2  tsubai #define PMAP_GROWKERNEL		/* turn on pmap_growkernel interface */
    400  1.2  tsubai 
    401  1.2  tsubai /*
    402  1.2  tsubai  * inline functions
    403  1.2  tsubai  */
    404  1.2  tsubai 
    405  1.2  tsubai /*
    406  1.2  tsubai  * pmap_update_pg: flush one page from the TLB
    407  1.2  tsubai  */
    408  1.2  tsubai 
    409  1.2  tsubai __inline static void
    410  1.2  tsubai pmap_update_pg(va)
    411  1.2  tsubai 	vaddr_t va;
    412  1.1  itojun {
    413  1.2  tsubai 	u_int32_t *addr, data;
    414  1.2  tsubai 
    415  1.2  tsubai 	addr = (void *)(0xf2000080 | (va & 0x0001f000)); /* 16-12 */
    416  1.2  tsubai 	data =         (0x00000000 | (va & 0xfffe0c00)); /* 31-17, 11-10 */
    417  1.2  tsubai 
    418  1.2  tsubai 	*addr = data;
    419  1.1  itojun }
    420  1.1  itojun 
    421  1.2  tsubai /*
    422  1.2  tsubai  * pmap_update_2pg: flush two pages from the TLB
    423  1.2  tsubai  */
    424  1.2  tsubai 
    425  1.2  tsubai __inline static void
    426  1.2  tsubai pmap_update_2pg(va, vb)
    427  1.2  tsubai 	vaddr_t va, vb;
    428  1.1  itojun {
    429  1.2  tsubai 	pmap_update_pg(va);
    430  1.2  tsubai 	pmap_update_pg(vb);
    431  1.1  itojun }
    432  1.1  itojun 
    433  1.2  tsubai /*
    434  1.2  tsubai  * pmap_page_protect: change the protection of all recorded mappings
    435  1.2  tsubai  *	of a managed page
    436  1.2  tsubai  *
    437  1.2  tsubai  * => this function is a frontend for pmap_page_remove/pmap_change_attrs
    438  1.2  tsubai  * => we only have to worry about making the page more protected.
    439  1.2  tsubai  *	unprotecting a page is done on-demand at fault time.
    440  1.2  tsubai  */
    441  1.1  itojun 
    442  1.2  tsubai __inline static void
    443  1.2  tsubai pmap_page_protect(pg, prot)
    444  1.2  tsubai 	struct vm_page *pg;
    445  1.2  tsubai 	vm_prot_t prot;
    446  1.1  itojun {
    447  1.2  tsubai 	if ((prot & VM_PROT_WRITE) == 0) {
    448  1.2  tsubai 		if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
    449  1.2  tsubai 			(void) pmap_change_attrs(pg, PG_RO, PG_RW);
    450  1.2  tsubai 		} else {
    451  1.2  tsubai 			pmap_page_remove(pg);
    452  1.2  tsubai 		}
    453  1.2  tsubai 	}
    454  1.1  itojun }
    455  1.1  itojun 
    456  1.2  tsubai /*
    457  1.2  tsubai  * pmap_protect: change the protection of pages in a pmap
    458  1.2  tsubai  *
    459  1.2  tsubai  * => this function is a frontend for pmap_remove/pmap_write_protect
    460  1.2  tsubai  * => we only have to worry about making the page more protected.
    461  1.2  tsubai  *	unprotecting a page is done on-demand at fault time.
    462  1.2  tsubai  */
    463  1.1  itojun 
    464  1.2  tsubai __inline static void
    465  1.2  tsubai pmap_protect(pmap, sva, eva, prot)
    466  1.2  tsubai 	struct pmap *pmap;
    467  1.2  tsubai 	vaddr_t sva, eva;
    468  1.2  tsubai 	vm_prot_t prot;
    469  1.1  itojun {
    470  1.2  tsubai 	if ((prot & VM_PROT_WRITE) == 0) {
    471  1.2  tsubai 		if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
    472  1.2  tsubai 			pmap_write_protect(pmap, sva, eva, prot);
    473  1.2  tsubai 		} else {
    474  1.2  tsubai 			pmap_remove(pmap, sva, eva);
    475  1.2  tsubai 		}
    476  1.2  tsubai 	}
    477  1.1  itojun }
    478  1.1  itojun 
    479  1.2  tsubai vaddr_t pmap_map __P((vaddr_t, paddr_t, paddr_t, vm_prot_t));
    480  1.1  itojun paddr_t vtophys __P((vaddr_t));
    481  1.1  itojun void pmap_emulate_reference __P((struct proc *, vaddr_t, int, int));
    482  1.1  itojun 
    483  1.2  tsubai /* XXX */
    484  1.2  tsubai #define SH3_PHYS_TO_P1SEG(pa)	(((pa) & 0x1fffffff) | SH3_P1SEG_BASE)
    485  1.2  tsubai #define PG_U 0		/* referenced bit */
    486  1.2  tsubai 
    487  1.1  itojun #endif /* _KERNEL */
    488  1.1  itojun #endif /* _SH3_PMAP_H_ */
    489