Home | History | Annotate | Line # | Download | only in include
pmap.h revision 1.34.2.3
      1  1.34.2.3    yamt /*	$NetBSD: pmap.h,v 1.34.2.3 2006/03/01 09:28:03 yamt Exp $	*/
      2       1.1     eeh 
      3       1.1     eeh /*-
      4       1.1     eeh  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
      5       1.1     eeh  * Copyright (C) 1995, 1996 TooLs GmbH.
      6       1.1     eeh  * All rights reserved.
      7       1.1     eeh  *
      8       1.1     eeh  * Redistribution and use in source and binary forms, with or without
      9       1.1     eeh  * modification, are permitted provided that the following conditions
     10       1.1     eeh  * are met:
     11       1.1     eeh  * 1. Redistributions of source code must retain the above copyright
     12       1.1     eeh  *    notice, this list of conditions and the following disclaimer.
     13       1.1     eeh  * 2. Redistributions in binary form must reproduce the above copyright
     14       1.1     eeh  *    notice, this list of conditions and the following disclaimer in the
     15       1.1     eeh  *    documentation and/or other materials provided with the distribution.
     16       1.1     eeh  * 3. All advertising materials mentioning features or use of this software
     17       1.1     eeh  *    must display the following acknowledgement:
     18       1.1     eeh  *	This product includes software developed by TooLs GmbH.
     19       1.1     eeh  * 4. The name of TooLs GmbH may not be used to endorse or promote products
     20       1.1     eeh  *    derived from this software without specific prior written permission.
     21       1.1     eeh  *
     22       1.1     eeh  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
     23       1.1     eeh  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     24       1.1     eeh  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     25       1.1     eeh  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     26       1.1     eeh  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     27       1.1     eeh  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
     28       1.1     eeh  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
     29       1.1     eeh  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
     30       1.1     eeh  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
     31       1.1     eeh  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     32       1.1     eeh  */
     33       1.1     eeh 
     34       1.1     eeh #ifndef	_MACHINE_PMAP_H_
     35       1.1     eeh #define	_MACHINE_PMAP_H_
     36       1.1     eeh 
     37       1.1     eeh #ifndef _LOCORE
     38       1.1     eeh #include <machine/pte.h>
     39       1.1     eeh #include <sys/queue.h>
     40      1.23     chs #include <uvm/uvm_object.h>
     41       1.1     eeh #endif
     42       1.1     eeh 
     43       1.1     eeh /*
     44       1.1     eeh  * This scheme uses 2-level page tables.
     45       1.1     eeh  *
     46       1.1     eeh  * While we're still in 32-bit mode we do the following:
     47       1.1     eeh  *
     48       1.1     eeh  *   offset:						13 bits
     49       1.1     eeh  * 1st level: 1024 64-bit TTEs in an 8K page for	10 bits
     50       1.1     eeh  * 2nd level: 512 32-bit pointers in the pmap for 	 9 bits
     51       1.1     eeh  *							-------
     52       1.1     eeh  * total:						32 bits
     53       1.1     eeh  *
     54       1.4     eeh  * In 64-bit mode the Spitfire and Blackbird CPUs support only
     55      1.23     chs  * 44-bit virtual addresses.  All addresses between
     56       1.4     eeh  * 0x0000 07ff ffff ffff and 0xffff f800 0000 0000 are in the
     57       1.4     eeh  * "VA hole" and trap, so we don't have to track them.  However,
     58       1.4     eeh  * we do need to keep them in mind during PT walking.  If they
     59       1.4     eeh  * ever change the size of the address "hole" we need to rework
     60       1.4     eeh  * all the page table handling.
     61       1.1     eeh  *
     62       1.1     eeh  *   offset:						13 bits
     63       1.1     eeh  * 1st level: 1024 64-bit TTEs in an 8K page for	10 bits
     64       1.1     eeh  * 2nd level: 1024 64-bit pointers in an 8K page for 	10 bits
     65       1.4     eeh  * 3rd level: 1024 64-bit pointers in the segmap for 	10 bits
     66       1.1     eeh  *							-------
     67       1.4     eeh  * total:						43 bits
     68       1.4     eeh  *
     69       1.4     eeh  * Of course, this means for 32-bit spaces we always have a (practically)
     70       1.4     eeh  * wasted page for the segmap (only one entry used) and half a page wasted
     71       1.4     eeh  * for the page directory.  We still have need of one extra bit 8^(.
     72       1.1     eeh  */
     73       1.1     eeh 
     74       1.4     eeh #define HOLESHIFT	(43)
     75       1.4     eeh 
     76      1.33    heas #define PTSZ	(PAGE_SIZE/8)			/* page table entry */
     77      1.33    heas #define PDSZ	(PTSZ)				/* page directory */
     78      1.33    heas #define STSZ	(PTSZ)				/* psegs */
     79       1.1     eeh 
     80       1.1     eeh #define PTSHIFT		(13)
     81       1.4     eeh #define	PDSHIFT		(10+PTSHIFT)
     82       1.4     eeh #define STSHIFT		(10+PDSHIFT)
     83       1.1     eeh 
     84       1.1     eeh #define PTMASK		(PTSZ-1)
     85       1.4     eeh #define PDMASK		(PDSZ-1)
     86       1.1     eeh #define STMASK		(STSZ-1)
     87       1.1     eeh 
     88       1.1     eeh #ifndef _LOCORE
     89       1.1     eeh 
     90       1.1     eeh /*
     91       1.1     eeh  * Support for big page sizes.  This maps the page size to the
     92       1.1     eeh  * page bits.
     93       1.1     eeh  */
     94       1.1     eeh struct page_size_map {
     95  1.34.2.2    yamt 	uint64_t mask;
     96  1.34.2.2    yamt 	uint64_t code;
     97       1.1     eeh #ifdef DEBUG
     98  1.34.2.2    yamt 	uint64_t use;
     99       1.1     eeh #endif
    100       1.1     eeh };
    101       1.1     eeh extern struct page_size_map page_size_map[];
    102       1.1     eeh 
    103       1.1     eeh /*
    104       1.1     eeh  * Pmap stuff
    105       1.1     eeh  */
    106       1.1     eeh 
    107       1.4     eeh #define va_to_seg(v)	(int)((((paddr_t)(v))>>STSHIFT)&STMASK)
    108       1.4     eeh #define va_to_dir(v)	(int)((((paddr_t)(v))>>PDSHIFT)&PDMASK)
    109       1.4     eeh #define va_to_pte(v)	(int)((((paddr_t)(v))>>PTSHIFT)&PTMASK)
    110       1.1     eeh 
    111       1.1     eeh struct pmap {
    112      1.23     chs 	struct uvm_object pm_obj;
    113      1.23     chs #define pm_lock pm_obj.vmobjlock
    114      1.23     chs #define pm_refs pm_obj.uo_refs
    115      1.33    heas 	LIST_ENTRY(pmap) pm_list;		/* pmap_ctxlist */
    116      1.25  martin 
    117      1.25  martin 	struct pmap_statistics pm_stats;
    118      1.25  martin 
    119       1.1     eeh 	int pm_ctx;		/* Current context */
    120      1.23     chs 
    121      1.23     chs 	/*
    122      1.23     chs 	 * This contains 64-bit pointers to pages that contain
    123       1.4     eeh 	 * 1024 64-bit pointers to page tables.  All addresses
    124      1.23     chs 	 * are physical.
    125       1.4     eeh 	 *
    126       1.4     eeh 	 * !!! Only touch this through pseg_get() and pseg_set() !!!
    127       1.4     eeh 	 */
    128       1.3     eeh 	paddr_t pm_physaddr;	/* physical address of pm_segs */
    129      1.12     eeh 	int64_t *pm_segs;
    130       1.1     eeh };
    131       1.1     eeh 
    132       1.1     eeh /*
    133       1.1     eeh  * This comes from the PROM and is used to map prom entries.
    134       1.1     eeh  */
    135       1.1     eeh struct prom_map {
    136  1.34.2.2    yamt 	uint64_t	vstart;
    137  1.34.2.2    yamt 	uint64_t	vsize;
    138  1.34.2.2    yamt 	uint64_t	tte;
    139       1.1     eeh };
    140       1.1     eeh 
    141       1.9     eeh #define PMAP_NC		0x001	/* Set the E bit in the page */
    142       1.9     eeh #define PMAP_NVC	0x002	/* Don't enable the virtual cache */
    143       1.9     eeh #define PMAP_LITTLE	0x004	/* Map in little endian mode */
    144      1.23     chs /* Large page size hints --
    145      1.23     chs    we really should use another param to pmap_enter() */
    146       1.9     eeh #define PMAP_8K		0x000
    147       1.9     eeh #define PMAP_64K	0x008	/* Use 64K page */
    148       1.9     eeh #define PMAP_512K	0x010
    149       1.9     eeh #define PMAP_4M		0x018
    150       1.9     eeh #define PMAP_SZ_TO_TTE(x)	(((x)&0x018)<<58)
    151      1.23     chs /* If these bits are different in va's to the same PA
    152      1.23     chs    then there is an aliasing in the d$ */
    153      1.30  petrov #define VA_ALIAS_MASK   (1 << 13)
    154       1.1     eeh 
    155       1.1     eeh typedef	struct pmap *pmap_t;
    156       1.1     eeh 
    157       1.1     eeh #ifdef	_KERNEL
    158       1.1     eeh extern struct pmap kernel_pmap_;
    159       1.1     eeh #define	pmap_kernel()	(&kernel_pmap_)
    160       1.1     eeh 
    161      1.25  martin #ifdef PMAP_COUNT_DEBUG
    162      1.25  martin /* diagnostic versions if PMAP_COUNT_DEBUG option is used */
    163  1.34.2.3    yamt int pmap_count_res(struct pmap *);
    164  1.34.2.3    yamt int pmap_count_wired(struct pmap *);
    165      1.21     eeh #define	pmap_resident_count(pm)		pmap_count_res((pm))
    166      1.21     eeh #define	pmap_wired_count(pm)		pmap_count_wired((pm))
    167      1.25  martin #else
    168      1.25  martin #define	pmap_resident_count(pm)		((pm)->pm_stats.resident_count)
    169      1.25  martin #define	pmap_wired_count(pm)		((pm)->pm_stats.wired_count)
    170      1.25  martin #endif
    171      1.25  martin 
    172      1.21     eeh #define	pmap_phys_address(x)		(x)
    173      1.23     chs 
    174      1.23     chs void pmap_activate_pmap(struct pmap *);
    175      1.31     chs void pmap_update(struct pmap *);
    176  1.34.2.3    yamt void pmap_bootstrap(u_long, u_long);
    177      1.11     eeh /* make sure all page mappings are modulo 16K to prevent d$ aliasing */
    178      1.32  atatat #define	PMAP_PREFER(pa, va, sz, td)	(*(va)+=(((*(va))^(pa))&(1<<(PGSHIFT))))
    179      1.13     eeh 
    180      1.21     eeh #define	PMAP_GROWKERNEL         /* turn on pmap_growkernel interface */
    181      1.23     chs #define PMAP_NEED_PROCWR
    182      1.23     chs 
    183      1.23     chs void pmap_procwr(struct proc *, vaddr_t, size_t);
    184       1.1     eeh 
    185       1.1     eeh /* SPARC specific? */
    186  1.34.2.3    yamt int             pmap_dumpsize(void);
    187  1.34.2.3    yamt int             pmap_dumpmmu(int (*)(dev_t, daddr_t, caddr_t, size_t),
    188  1.34.2.3    yamt                                  daddr_t);
    189  1.34.2.3    yamt int		pmap_pa_exists(paddr_t);
    190  1.34.2.3    yamt void		switchexit(struct lwp *, int);
    191      1.29     chs void		pmap_kprotect(vaddr_t, vm_prot_t);
    192       1.1     eeh 
    193       1.1     eeh /* SPARC64 specific */
    194  1.34.2.3    yamt int	ctx_alloc(struct pmap *);
    195  1.34.2.3    yamt void	ctx_free(struct pmap *);
    196       1.1     eeh 
    197  1.34.2.1    yamt /* Installed physical memory, as discovered during bootstrap. */
    198  1.34.2.1    yamt extern int phys_installed_size;
    199  1.34.2.1    yamt extern struct mem_region *phys_installed;
    200  1.34.2.1    yamt 
    201       1.1     eeh #endif	/* _KERNEL */
    202      1.18  martin 
    203      1.18  martin #endif	/* _LOCORE */
    204       1.1     eeh #endif	/* _MACHINE_PMAP_H_ */
    205