Home | History | Annotate | Line # | Download | only in include
pmap.h revision 1.10
      1 /*	$NetBSD: pmap.h,v 1.10 1996/11/06 15:33:56 leo Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1987 Carnegie-Mellon University
      5  * Copyright (c) 1991 Regents of the University of California.
      6  * All rights reserved.
      7  *
      8  * This code is derived from software contributed to Berkeley by
      9  * the Systems Programming Group of the University of Utah Computer
     10  * Science Department.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. All advertising materials mentioning features or use of this software
     21  *    must display the following acknowledgement:
     22  *	This product includes software developed by the University of
     23  *	California, Berkeley and its contributors.
     24  * 4. Neither the name of the University nor the names of its contributors
     25  *    may be used to endorse or promote products derived from this software
     26  *    without specific prior written permission.
     27  *
     28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     38  * SUCH DAMAGE.
     39  *
     40  *	@(#)pmap.h	7.6 (Berkeley) 5/10/91
     41  */
     42 
     43 #ifndef	_MACHINE_PMAP_H_
     44 #define	_MACHINE_PMAP_H_
     45 
     46 /*
     47  * Pmap stuff
     48  */
     49 struct pmap {
     50 	pt_entry_t 		*pm_ptab;	/* KVA of page table */
     51 	st_entry_t		*pm_stab;	/* KVA of segment table */
     52 	int			pm_stchanged;	/* ST changed */
     53 	int			pm_stfree;	/* 040: free lev2 blocks */
     54 	u_int			*pm_stpa;	/* 040: ST phys. address */
     55 	short			pm_sref;	/* segment table ref count */
     56 	short			pm_count;	/* pmap reference count */
     57 	long			pm_ptpages;	/* more stats: PT pages */
     58 	simple_lock_data_t	pm_lock;	/* lock on pmap */
     59 	struct pmap_statistics	pm_stats;	/* pmap statistics */
     60 };
     61 
     62 typedef struct pmap *pmap_t;
     63 
     64 /*
     65  * On the 040 we keep track of which level 2 blocks are already in use
     66  * with the pm_stfree mask.  Bits are arranged from LSB (block 0) to MSB
     67  * (block 31).  For convenience, the level 1 table is considered to be
     68  * block 0.
     69  *
     70  * MAX[KU]L2SIZE control how many pages of level 2 descriptors are allowed.
     71  * for the kernel and users.  16 implies only the initial "segment table"
     72  * page is used.  WARNING: don't change MAXUL2SIZE unless you can allocate
     73  * physically contiguous pages for the ST in pmap.c!
     74  */
     75 #define	MAXKL2SIZE	32
     76 #define MAXUL2SIZE	16
     77 #define l2tobm(n)	(1 << (n))
     78 #define	bmtol2(n)	(ffs(n) - 1)
     79 
     80 /*
     81  * Macros for speed
     82  */
     83 #define PMAP_ACTIVATE(pmapp, pcbp, iscurproc) \
     84 	if ((pmapp) != NULL && (pmapp)->pm_stchanged) { \
     85 		(pcbp)->pcb_ustp = \
     86 		    atari_btop((vm_offset_t)(pmapp)->pm_stpa); \
     87 		if (iscurproc) \
     88 			loadustp((pcbp)->pcb_ustp); \
     89 		(pmapp)->pm_stchanged = FALSE; \
     90 	}
     91 
     92 #define PMAP_DEACTIVATE(pmapp, pcbp)
     93 
     94 /*
     95  * Description of the memory segments. Build in atari_init/start_c().
     96  * This gives a better separation between machine dependent stuff and
     97  * the pmap-module.
     98  */
     99 #define	NMEM_SEGS	8
    100 struct memseg {
    101 	vm_offset_t	start;		/* PA of first page in segment	*/
    102 	vm_offset_t	end;		/* PA of last  page in segment	*/
    103 	int		first_page;	/* relative page# of 'start'	*/
    104 };
    105 
    106 /*
    107  * For each vm_page_t, there is a list of all currently valid virtual
    108  * mappings of that page.  An entry is a pv_entry_t, the list is pv_table.
    109  */
    110 typedef struct pv_entry {
    111 	struct pv_entry	*pv_next;	/* next pv_entry */
    112 	struct pmap	*pv_pmap;	/* pmap where mapping lies */
    113 	vm_offset_t	pv_va;		/* virtual address for mapping */
    114 	u_int		*pv_ptste;	/* non-zero if VA maps a PT page */
    115 	struct pmap	*pv_ptpmap;	/* if pv_ptste, pmap for PT page */
    116 	int		pv_flags;	/* flags */
    117 } *pv_entry_t;
    118 
    119 #define	PV_CI		0x01	/* all entries must be cache inhibited */
    120 #define PV_PTPAGE	0x02	/* entry maps a page table page */
    121 
    122 struct pv_page;
    123 
    124 struct pv_page_info {
    125 	TAILQ_ENTRY(pv_page) pgi_list;
    126 	struct pv_entry *pgi_freelist;
    127 	int pgi_nfree;
    128 };
    129 
    130 /*
    131  * This is basically:
    132  * ((NBPG - sizeof(struct pv_page_info)) / sizeof(struct pv_entry))
    133  */
    134 #define	NPVPPG	340
    135 
    136 struct pv_page {
    137 	struct pv_page_info pvp_pgi;
    138 	struct pv_entry pvp_pv[NPVPPG];
    139 };
    140 
    141 #ifdef	_KERNEL
    142 /*
    143  * Memory segment descriptors.
    144  *  - boot_segs
    145  *	describes the segments obtainted from the bootcode.
    146  *  - usable_segs
    147  *	describes the segments available after system requirements are
    148  *	substracted (reserved pages, etc...).
    149  */
    150 struct memseg	boot_segs[NMEM_SEGS];
    151 struct memseg	usable_segs[NMEM_SEGS];
    152 
    153 pv_entry_t	pv_table;	/* array of entries, one per page */
    154 u_int		*Sysmap;
    155 char		*vmmap;		/* map for mem, dumps, etc. */
    156 struct pmap	kernel_pmap_store;
    157 
    158 #ifdef MACHINE_NONCONTIG
    159 #define	pa_index(pa)			pmap_page_index(pa)
    160 #else
    161 #define pa_index(pa)			atop(pa - vm_first_phys)
    162 #endif /* MACHINE_NONCONTIG */
    163 
    164 #define pa_to_pvh(pa)			(&pv_table[pa_index(pa)])
    165 #define	pmap_kernel()			(&kernel_pmap_store)
    166 #define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
    167 
    168 void	pmap_bootstrap __P((vm_offset_t, u_int, u_int));
    169 void	pmap_changebit __P((vm_offset_t, int, boolean_t));
    170 #endif	/* _KERNEL */
    171 
    172 #endif	/* !_MACHINE_PMAP_H_ */
    173