Home | History | Annotate | Line # | Download | only in include
pmap.h revision 1.72
      1 /*	$NetBSD: pmap.h,v 1.72 2008/01/04 21:56:36 joerg Exp $	   */
      2 
      3 /*
      4  * Copyright (c) 1991 Regents of the University of California.
      5  * All rights reserved.
      6  *
      7  * Changed for the VAX port. /IC
      8  *
      9  * This code is derived from software contributed to Berkeley by
     10  * the Systems Programming Group of the University of Utah Computer
     11  * Science Department.
     12  *
     13  * Redistribution and use in source and binary forms, with or without
     14  * modification, are permitted provided that the following conditions
     15  * are met:
     16  * 1. Redistributions of source code must retain the above copyright
     17  *    notice, this list of conditions and the following disclaimer.
     18  * 2. Redistributions in binary form must reproduce the above copyright
     19  *    notice, this list of conditions and the following disclaimer in the
     20  *    documentation and/or other materials provided with the distribution.
     21  * 3. Neither the name of the University nor the names of its contributors
     22  *    may be used to endorse or promote products derived from this software
     23  *    without specific prior written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     35  * SUCH DAMAGE.
     36  *
     37  *	@(#)pmap.h	7.6 (Berkeley) 5/10/91
     38  */
     39 
     40 /*
     41  * Copyright (c) 1987 Carnegie-Mellon University
     42  *
     43  * Changed for the VAX port. /IC
     44  *
     45  * This code is derived from software contributed to Berkeley by
     46  * the Systems Programming Group of the University of Utah Computer
     47  * Science Department.
     48  *
     49  * Redistribution and use in source and binary forms, with or without
     50  * modification, are permitted provided that the following conditions
     51  * are met:
     52  * 1. Redistributions of source code must retain the above copyright
     53  *    notice, this list of conditions and the following disclaimer.
     54  * 2. Redistributions in binary form must reproduce the above copyright
     55  *    notice, this list of conditions and the following disclaimer in the
     56  *    documentation and/or other materials provided with the distribution.
     57  * 3. All advertising materials mentioning features or use of this software
     58  *    must display the following acknowledgement:
     59  *	This product includes software developed by the University of
     60  *	California, Berkeley and its contributors.
     61  * 4. Neither the name of the University nor the names of its contributors
     62  *    may be used to endorse or promote products derived from this software
     63  *    without specific prior written permission.
     64  *
     65  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     66  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     67  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     68  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     69  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     70  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     71  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     72  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     73  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     74  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     75  * SUCH DAMAGE.
     76  *
     77  *	@(#)pmap.h	7.6 (Berkeley) 5/10/91
     78  */
     79 
     80 
     81 #ifndef PMAP_H
     82 #define PMAP_H
     83 
     84 #include <sys/simplelock.h>
     85 
     86 #include <machine/pte.h>
     87 #include <machine/mtpr.h>
     88 #include <machine/pcb.h>
     89 
     90 /*
     91  * Some constants to make life easier.
     92  */
     93 #define LTOHPS		(PGSHIFT - VAX_PGSHIFT)
     94 #define LTOHPN		(1 << LTOHPS)
     95 
     96 /*
     97  * Link struct if more than one process share pmap (like vfork).
     98  * This is rarely used.
     99  */
    100 struct pm_share {
    101 	struct pm_share	*ps_next;
    102 	struct pcb	*ps_pcb;
    103 };
    104 
    105 /*
    106  * Pmap structure
    107  *  pm_stack holds lowest allocated memory for the process stack.
    108  */
    109 
    110 typedef struct pmap {
    111 	struct pte	*pm_p1ap;	/* Base of alloced p1 pte space */
    112 	int		 pm_count;	/* reference count */
    113 	struct pm_share	*pm_share;	/* PCBs using this pmap */
    114 	struct pte	*pm_p0br;	/* page 0 base register */
    115 	long		 pm_p0lr;	/* page 0 length register */
    116 	struct pte	*pm_p1br;	/* page 1 base register */
    117 	long		 pm_p1lr;	/* page 1 length register */
    118 	struct simplelock pm_lock;	/* Lock entry in MP environment */
    119 	struct pmap_statistics	 pm_stats;	/* Some statistics */
    120 } *pmap_t;
    121 
    122 /*
    123  * For each struct vm_page, there is a list of all currently valid virtual
    124  * mappings of that page.  An entry is a pv_entry_t, the list is pv_table.
    125  */
    126 
    127 struct pv_entry {
    128 	struct pv_entry *pv_next;	/* next pv_entry */
    129 	vaddr_t		 pv_vaddr;	/* address for this physical page */
    130 	struct pmap	*pv_pmap;	/* pmap this entry belongs to */
    131 	int		 pv_attr;	/* write/modified bits */
    132 };
    133 
    134 extern	struct  pv_entry *pv_table;
    135 
    136 /* Mapping macros used when allocating SPT */
    137 #define MAPVIRT(ptr, count)				\
    138 	ptr = virtual_avail;		\
    139 	virtual_avail += (count) * VAX_NBPG;
    140 
    141 #define MAPPHYS(ptr, count, perm)			\
    142 	ptr = avail_start + KERNBASE;	\
    143 	avail_start += (count) * VAX_NBPG;
    144 
    145 #ifdef	_KERNEL
    146 
    147 extern	struct pmap kernel_pmap_store;
    148 
    149 #define pmap_kernel()			(&kernel_pmap_store)
    150 
    151 #endif	/* _KERNEL */
    152 
    153 
    154 /*
    155  * Real nice (fast) routines to get the virtual address of a physical page
    156  * (and vice versa).
    157  */
    158 #define PMAP_MAP_POOLPAGE(pa)	((pa) | KERNBASE)
    159 #define PMAP_UNMAP_POOLPAGE(va) ((va) & ~KERNBASE)
    160 
    161 #define PMAP_STEAL_MEMORY
    162 
    163 /*
    164  * This is the by far most used pmap routine. Make it inline.
    165  */
    166 __inline static bool
    167 pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
    168 {
    169 	int	*pte, sva;
    170 
    171 	if (va & KERNBASE) {
    172 		paddr_t pa;
    173 
    174 		pa = kvtophys(va); /* Is 0 if not mapped */
    175 		if (pap)
    176 			*pap = pa;
    177 		if (pa)
    178 			return (true);
    179 		return (false);
    180 	}
    181 
    182 	sva = PG_PFNUM(va);
    183 	if (va < 0x40000000) {
    184 		if (sva > (pmap->pm_p0lr & ~AST_MASK))
    185 			goto fail;
    186 		pte = (int *)pmap->pm_p0br;
    187 	} else {
    188 		if (sva < pmap->pm_p1lr)
    189 			goto fail;
    190 		pte = (int *)pmap->pm_p1br;
    191 	}
    192 	if (kvtopte(&pte[sva])->pg_pfn && pte[sva]) {
    193 		if (pap)
    194 			*pap = (pte[sva] & PG_FRAME) << VAX_PGSHIFT;
    195 		return (true);
    196 	}
    197   fail:
    198 	if (pap)
    199 		*pap = 0;
    200 	return (false);
    201 }
    202 
    203 bool pmap_clear_modify_long(struct pv_entry *);
    204 bool pmap_clear_reference_long(struct pv_entry *);
    205 bool pmap_is_modified_long(struct pv_entry *);
    206 void pmap_page_protect_long(struct pv_entry *, vm_prot_t);
    207 void pmap_protect_long(pmap_t, vaddr_t, vaddr_t, vm_prot_t);
    208 
    209 __inline static bool
    210 pmap_is_referenced(struct vm_page *pg)
    211 {
    212 	struct pv_entry *pv = pv_table + (VM_PAGE_TO_PHYS(pg) >> PGSHIFT);
    213 	bool rv = (pv->pv_attr & PG_V) != 0;
    214 
    215 	return rv;
    216 }
    217 
    218 __inline static bool
    219 pmap_clear_reference(struct vm_page *pg)
    220 {
    221 	struct pv_entry *pv = pv_table + (VM_PAGE_TO_PHYS(pg) >> PGSHIFT);
    222 	bool rv = (pv->pv_attr & PG_V) != 0;
    223 
    224 	pv->pv_attr &= ~PG_V;
    225 	if (pv->pv_pmap != NULL || pv->pv_next != NULL)
    226 		rv |= pmap_clear_reference_long(pv);
    227 	return rv;
    228 }
    229 
    230 __inline static bool
    231 pmap_clear_modify(struct vm_page *pg)
    232 {
    233 	struct  pv_entry *pv = pv_table + (VM_PAGE_TO_PHYS(pg) >> PGSHIFT);
    234 	bool rv = (pv->pv_attr & PG_M) != 0;
    235 
    236 	pv->pv_attr &= ~PG_M;
    237 	if (pv->pv_pmap != NULL || pv->pv_next != NULL)
    238 		rv |= pmap_clear_modify_long(pv);
    239 	return rv;
    240 }
    241 
    242 __inline static bool
    243 pmap_is_modified(struct vm_page *pg)
    244 {
    245 	struct pv_entry *pv = pv_table + (VM_PAGE_TO_PHYS(pg) >> PGSHIFT);
    246 	if (pv->pv_attr & PG_M)
    247 		return 1;
    248 	else
    249 		return pmap_is_modified_long(pv);
    250 }
    251 
    252 __inline static void
    253 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
    254 {
    255 	struct  pv_entry *pv = pv_table + (VM_PAGE_TO_PHYS(pg) >> PGSHIFT);
    256 
    257 	if (pv->pv_pmap != NULL || pv->pv_next != NULL)
    258 		pmap_page_protect_long(pv, prot);
    259 }
    260 
    261 __inline static void
    262 pmap_protect(pmap_t pmap, vaddr_t start, vaddr_t end, vm_prot_t prot)
    263 {
    264 	if (pmap->pm_p0lr != 0 || pmap->pm_p1lr != 0x200000 ||
    265 	    (start & KERNBASE) != 0)
    266 		pmap_protect_long(pmap, start, end, prot);
    267 }
    268 
    269 static __inline void
    270 pmap_remove_all(struct pmap *pmap)
    271 {
    272 	/* Nothing. */
    273 }
    274 
    275 /* Routines that are best to define as macros */
    276 #define pmap_phys_address(phys)		((u_int)(phys) << PGSHIFT)
    277 #define pmap_copy(a,b,c,d,e)		/* Dont do anything */
    278 #define pmap_update(pmap)		/* nothing (yet) */
    279 #define pmap_collect(pmap)		/* No need so far */
    280 #define pmap_remove(pmap, start, slut)	pmap_protect(pmap, start, slut, 0)
    281 #define pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
    282 #define pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
    283 #define pmap_reference(pmap)		(pmap)->pm_count++
    284 
    285 /* These can be done as efficient inline macros */
    286 #define pmap_copy_page(src, dst)			\
    287 	__asm("addl3 $0x80000000,%0,%%r0;"		\
    288 		"addl3 $0x80000000,%1,%%r1;"		\
    289 		"movc3 $4096,(%%r0),(%%r1)"		\
    290 	    :: "r"(src), "r"(dst)			\
    291 	    : "r0","r1","r2","r3","r4","r5");
    292 
    293 #define pmap_zero_page(phys)				\
    294 	__asm("addl3 $0x80000000,%0,%%r0;"		\
    295 		"movc5 $0,(%%r0),$0,$4096,(%%r0)"	\
    296 	    :: "r"(phys)				\
    297 	    : "r0","r1","r2","r3","r4","r5");
    298 
    299 /* Prototypes */
    300 void	pmap_bootstrap __P((void));
    301 vaddr_t pmap_map __P((vaddr_t, vaddr_t, vaddr_t, int));
    302 
    303 #endif /* PMAP_H */
    304