Home | History | Annotate | Line # | Download | only in include
pmap.h revision 1.1
      1 /*	$NetBSD: pmap.h,v 1.1 1999/09/13 10:31:20 itojun Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1995 Charles M. Hannum.  All rights reserved.
      5  * Copyright (c) 1991 Regents of the University of California.
      6  * All rights reserved.
      7  *
      8  * This code is derived from software contributed to Berkeley by
      9  * the Systems Programming Group of the University of Utah Computer
     10  * Science Department and William Jolitz of UUNET Technologies Inc.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. All advertising materials mentioning features or use of this software
     21  *    must display the following acknowledgement:
     22  *	This product includes software developed by the University of
     23  *	California, Berkeley and its contributors.
     24  * 4. Neither the name of the University nor the names of its contributors
     25  *    may be used to endorse or promote products derived from this software
     26  *    without specific prior written permission.
     27  *
     28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     38  * SUCH DAMAGE.
     39  *
     40  *	@(#)pmap.h	7.4 (Berkeley) 5/12/91
     41  */
     42 
     43 /*
     44  * Derived from hp300 version by Mike Hibler, this version by William
     45  * Jolitz uses a recursive map [a pde points to the page directory] to
     46  * map the page tables using the pagetables themselves. This is done to
     47  * reduce the impact on kernel virtual memory for lots of sparse address
     48  * space, and to reduce the cost of memory to each process.
     49  *
     50  * from hp300:	@(#)pmap.h	7.2 (Berkeley) 12/16/90
     51  */
     52 
     53 #if defined(_KERNEL) && !defined(_LKM)
     54 #include "opt_pmap_new.h"
     55 #endif
     56 
     57 #ifdef PMAP_NEW			/* redirect */
     58 #include <sh3/pmap.new.h>	/* defines _SH3_PMAP_H_ */
     59 #endif
     60 
     61 #ifndef	_SH3_PMAP_H_
     62 #define	_SH3_PMAP_H_
     63 
     64 #include <machine/cpufunc.h>
     65 #include <machine/pte.h>
     66 
     67 /*
     68  * SH3 page table entry and page table directory
     69  * T.Horiuchi 06/02/1998
     70  */
     71 
     72 /*
     73  * One page directory, shared between
     74  * kernel and user modes.
     75  */
     76 /*
     77  *	kernel virtual address start = 0xd0000000
     78  *	Page Table Area Virtual address start = 0xcfc00000
     79  */
     80 #define	PTDPTDI		0x33f		/* ptd entry that points to ptd! */
     81 #define	KPTDI		0x340		/* start of kernel virtual pde's */
     82 #define	NKPDE_BASE	4		/* min. # of kernel PDEs */
     83 #define	NKPDE_MAX	63		/* max. # of kernel PDEs */
     84 #define	NKPDE_SCALE	1		/* # of kernel PDEs to add per meg. */
     85 #define	APTDPTDI	0x37f		/* start of alternate page directory */
     86 
     87 #define UPT_MIN_ADDRESS	(PTDPTDI<<PDSHIFT)
     88 #define UPT_MAX_ADDRESS	(UPT_MIN_ADDRESS + (PTDPTDI<<PGSHIFT))
     89 
     90 /*
     91  * Address of current and alternate address space page table maps
     92  * and directories.
     93  */
     94 #ifdef _KERNEL
     95 extern pt_entry_t	PTmap[], APTmap[], Upte;
     96 extern pd_entry_t	PTD[], APTD[], PTDpde, APTDpde, Upde;
     97 extern pt_entry_t	*Sysmap;
     98 
     99 extern int	PTDpaddr;	/* physical address of kernel PTD */
    100 
    101 void pmap_bootstrap __P((vm_offset_t start));
    102 boolean_t pmap_testbit __P((vm_offset_t, int));
    103 void pmap_changebit __P((vm_offset_t, int, int));
    104 #endif
    105 
    106 /*
    107  * virtual address to page table entry and
    108  * to physical address. Likewise for alternate address space.
    109  * Note: these work recursively, thus vtopte of a pte will give
    110  * the corresponding pde that in turn maps it.
    111  */
    112 #define	vtopte(va)	(PTmap + sh3_btop(va))
    113 #define	kvtopte(va)	vtopte(va)
    114 #define	ptetov(pt)	(sh3_ptob(pt - PTmap))
    115 
    116 #define	avtopte(va)	(APTmap + sh3_btop(va))
    117 #define	ptetoav(pt)	(sh3_ptob(pt - APTmap))
    118 #define	avtophys(va) \
    119 	((*avtopte(va) & PG_FRAME) | ((unsigned)(va) & ~PG_FRAME))
    120 
    121 /*
    122  * macros to generate page directory/table indicies
    123  */
    124 #define	pdei(va)	(((va) & PD_MASK) >> PDSHIFT)
    125 #define	ptei(va)	(((va) & PT_MASK) >> PGSHIFT)
    126 
    127 /*
    128  * Pmap stuff
    129  */
    130 typedef struct pmap {
    131 	pd_entry_t		*pm_pdir;	/* KVA of page directory */
    132 	boolean_t		pm_pdchanged;	/* pdir changed */
    133 	short			pm_dref;	/* page directory ref count */
    134 	short			pm_count;	/* pmap reference count */
    135 	simple_lock_data_t	pm_lock;	/* lock on pmap */
    136 	struct pmap_statistics	pm_stats;	/* pmap statistics */
    137 	long			pm_ptpages;	/* more stats: PT pages */
    138 	vm_offset_t		pm_va_top;	/* VA top address */
    139 } *pmap_t;
    140 
    141 /*
    142  * For each vm_page_t, there is a list of all currently valid virtual
    143  * mappings of that page.  An entry is a pv_entry, the list is pv_table.
    144  */
    145 struct pv_entry {
    146 	struct pv_entry	*pv_next;	/* next pv_entry */
    147 	pmap_t		pv_pmap;	/* pmap where mapping lies */
    148 	vm_offset_t	pv_va;		/* virtual address for mapping */
    149 };
    150 
    151 struct pv_page;
    152 
    153 struct pv_page_info {
    154 	TAILQ_ENTRY(pv_page) pgi_list;
    155 	struct pv_entry *pgi_freelist;
    156 	int pgi_nfree;
    157 };
    158 
    159 /*
    160  * This is basically:
    161  * ((NBPG - sizeof(struct pv_page_info)) / sizeof(struct pv_entry))
    162  */
    163 #define	NPVPPG	340
    164 
    165 struct pv_page {
    166 	struct pv_page_info pvp_pgi;
    167 	struct pv_entry pvp_pv[NPVPPG];
    168 };
    169 
    170 #ifdef	_KERNEL
    171 extern int		nkpde;		/* number of kernel page dir. ents */
    172 extern struct pmap	kernel_pmap_store;
    173 struct pv_entry		*pv_table;	/* array of entries, one per page */
    174 
    175 #define	pmap_kernel()			(&kernel_pmap_store)
    176 #define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
    177 #define	pmap_update()			tlbflush()
    178 
    179 vm_offset_t reserve_dumppages __P((vm_offset_t));
    180 
    181 static __inline void
    182 pmap_clear_modify(vm_offset_t pa)
    183 {
    184 	pmap_changebit(pa, 0, ~PG_M);
    185 }
    186 
    187 static __inline void
    188 pmap_clear_reference(vm_offset_t pa)
    189 {
    190 #ifdef	TODO
    191 	pmap_changebit(pa, 0, ~PG_U);
    192 #endif
    193 }
    194 
    195 static __inline void
    196 pmap_copy_on_write(vm_offset_t pa)
    197 {
    198 	pmap_changebit(pa, PG_RO, ~PG_RW);
    199 }
    200 
    201 static __inline boolean_t
    202 pmap_is_modified(vm_offset_t pa)
    203 {
    204 	return pmap_testbit(pa, PG_M);
    205 }
    206 
    207 static __inline boolean_t
    208 pmap_is_referenced(vm_offset_t pa)
    209 {
    210 #ifdef	TODO
    211 	return pmap_testbit(pa, PG_U);
    212 #else
    213 	return 1;
    214 #endif
    215 }
    216 
    217 static __inline vm_offset_t
    218 pmap_phys_address(int ppn)
    219 {
    220 	return sh3_ptob(ppn);
    221 }
    222 
    223 void pmap_activate __P((struct proc *));
    224 vm_offset_t pmap_map __P((vm_offset_t, vm_offset_t, vm_offset_t, int));
    225 paddr_t vtophys __P((vaddr_t));
    226 void pmap_emulate_reference __P((struct proc *, vaddr_t, int, int));
    227 
    228 #endif /* _KERNEL */
    229 
    230 #endif /* _SH3_PMAP_H_ */
    231