pmap.h revision 1.1
1/*	$NetBSD: pmap.h,v 1.1 1998/06/09 07:53:05 dbj Exp $	*/
2
3/*
4 * Copyright (c) 1987 Carnegie-Mellon University
5 * Copyright (c) 1991, 1993
6 *	The Regents of the University of California.  All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 *    must display the following acknowledgement:
22 *	This product includes software developed by the University of
23 *	California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 *    may be used to endorse or promote products derived from this software
26 *    without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 *	@(#)pmap.h	8.1 (Berkeley) 6/10/93
41 */
42
43#ifndef	_MACHINE_PMAP_H_
44#define	_MACHINE_PMAP_H_
45
46#include <machine/cpu.h>
47#include <machine/pte.h>
48
49#if defined(M68040) && 0
50@@@ Why is this not always NBSEG ? -- jewell@mit.edu
51#define HP_SEG_SIZE	(mmutype == MMU_68040 ? 0x40000 : NBSEG)
52#else
53#define HP_SEG_SIZE	NBSEG
54#endif
55
56/*
57 * Pmap stuff
58 */
59struct pmap {
60	pt_entry_t		*pm_ptab;	/* KVA of page table */
61	st_entry_t		*pm_stab;	/* KVA of segment table */
62	int			pm_stfree;	/* 040: free lev2 blocks */
63	st_entry_t		*pm_stpa;	/* 040: ST phys addr */
64	short			pm_sref;	/* segment table ref count */
65	short			pm_count;	/* pmap reference count */
66	simple_lock_data_t	pm_lock;	/* lock on pmap */
67	struct pmap_statistics	pm_stats;	/* pmap statistics */
68	long			pm_ptpages;	/* more stats: PT pages */
69};
70
71typedef struct pmap	*pmap_t;
72
73/*
74 * On the 040 we keep track of which level 2 blocks are already in use
75 * with the pm_stfree mask.  Bits are arranged from LSB (block 0) to MSB
76 * (block 31).  For convenience, the level 1 table is considered to be
77 * block 0.
78 *
79 * MAX[KU]L2SIZE control how many pages of level 2 descriptors are allowed.
80 * for the kernel and users.  8 implies only the initial "segment table"
81 * page is used.  WARNING: don't change MAXUL2SIZE unless you can allocate
82 * physically contiguous pages for the ST in pmap.c!
83 */
84#define	MAXKL2SIZE	32
85#define MAXUL2SIZE	8
86#define l2tobm(n)	(1 << (n))
87#define	bmtol2(n)	(ffs(n) - 1)
88
89/*
90 * Macros for speed
91 */
92#define	PMAP_ACTIVATE(pmap, loadhw)					\
93{									\
94	if ((loadhw))							\
95		loadustp(m68k_btop((vm_offset_t)(pmap)->pm_stpa));	\
96}
97
98/*
99 * For each vm_page_t, there is a list of all currently valid virtual
100 * mappings of that page.  An entry is a pv_entry, the list is pv_table.
101 */
102struct pv_entry {
103	struct pv_entry	*pv_next;	/* next pv_entry */
104	struct pmap	*pv_pmap;	/* pmap where mapping lies */
105	vm_offset_t	pv_va;		/* virtual address for mapping */
106	st_entry_t	*pv_ptste;	/* non-zero if VA maps a PT page */
107	struct pmap	*pv_ptpmap;	/* if pv_ptste, pmap for PT page */
108	int		pv_flags;	/* flags */
109};
110
111#define	PV_CI		0x01	/* header: all entries are cache inhibited */
112#define PV_PTPAGE	0x02	/* header: entry maps a page table page */
113
114struct pv_page;
115
116struct pv_page_info {
117	TAILQ_ENTRY(pv_page) pgi_list;
118	struct pv_entry *pgi_freelist;
119	int pgi_nfree;
120};
121
122/*
123 * This is basically:
124 * ((NBPG - sizeof(struct pv_page_info)) / sizeof(struct pv_entry))
125 */
126#define	NPVPPG	170
127
128struct pv_page {
129	struct pv_page_info pvp_pgi;
130	struct pv_entry pvp_pv[NPVPPG];
131};
132
133#ifdef	_KERNEL
134
135extern struct pmap	kernel_pmap_store;
136extern vm_offset_t	vm_first_phys, vm_num_phys;
137
138#define pmap_kernel()	(&kernel_pmap_store)
139#define	active_pmap(pm) \
140	((pm) == pmap_kernel() || (pm) == curproc->p_vmspace->vm_map.pmap)
141#define	active_user_pmap(pm) \
142	(curproc && \
143	 (pm) != pmap_kernel() && (pm) == curproc->p_vmspace->vm_map.pmap)
144
145extern struct pv_entry	*pv_table;	/* array of entries, one per page */
146
147#ifndef MACHINE_NONCONTIG
148#define pmap_page_index(pa)		atop(pa - vm_first_phys)
149#endif
150#define pa_to_pvh(pa)			(&pv_table[pmap_page_index(pa)])
151
152#define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
153#define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
154
155extern pt_entry_t	*Sysmap;
156extern char		*vmmap;		/* map for mem, dumps, etc. */
157#endif /* _KERNEL */
158
159#endif /* !_MACHINE_PMAP_H_ */
160