pmap.h revision 1.23.4.1 1 /* $NetBSD: pmap.h,v 1.23.4.1 2001/10/01 12:40:57 fvdl Exp $ */
2
3 /*
4 * Copyright (c) 1987 Carnegie-Mellon University
5 * Copyright (c) 1991, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * @(#)pmap.h 8.1 (Berkeley) 6/10/93
41 */
42
43 #ifndef _MACHINE_PMAP_H_
44 #define _MACHINE_PMAP_H_
45
46 #include <machine/pte.h>
47
48 /*
49 * Pmap stuff
50 */
51 struct pmap {
52 pt_entry_t *pm_ptab; /* KVA of page table */
53 st_entry_t *pm_stab; /* KVA of segment table */
54 int pm_stfree; /* 040: free lev2 blocks */
55 st_entry_t *pm_stpa; /* 040: ST phys addr */
56 short pm_sref; /* segment table ref count */
57 short pm_count; /* pmap reference count */
58 struct simplelock pm_lock; /* lock on pmap */
59 struct pmap_statistics pm_stats; /* pmap statistics */
60 long pm_ptpages; /* more stats: PT pages */
61 };
62
63 typedef struct pmap *pmap_t;
64
65 /*
66 * On the 040 we keep track of which level 2 blocks are already in use
67 * with the pm_stfree mask. Bits are arranged from LSB (block 0) to MSB
68 * (block 31). For convenience, the level 1 table is considered to be
69 * block 0.
70 *
71 * MAX[KU]L2SIZE control how many pages of level 2 descriptors are allowed.
72 * for the kernel and users. 8 implies only the initial "segment table"
73 * page is used. WARNING: don't change MAXUL2SIZE unless you can allocate
74 * physically contiguous pages for the ST in pmap.c!
75 */
76 #define MAXKL2SIZE 32
77 #define MAXUL2SIZE 8
78 #define l2tobm(n) (1 << (n))
79 #define bmtol2(n) (ffs(n) - 1)
80
81 /*
82 * Macros for speed
83 */
84 #define PMAP_ACTIVATE(pmap, loadhw) \
85 { \
86 if ((loadhw)) \
87 loadustp(m68k_btop((paddr_t)(pmap)->pm_stpa)); \
88 }
89
90 /*
91 * For each struct vm_page, there is a list of all currently valid virtual
92 * mappings of that page. An entry is a pv_entry, the list is pv_table.
93 */
94 struct pv_entry {
95 struct pv_entry *pv_next; /* next pv_entry */
96 struct pmap *pv_pmap; /* pmap where mapping lies */
97 vaddr_t pv_va; /* virtual address for mapping */
98 st_entry_t *pv_ptste; /* non-zero if VA maps a PT page */
99 struct pmap *pv_ptpmap; /* if pv_ptste, pmap for PT page */
100 int pv_flags; /* flags */
101 };
102
103 #define PV_CI 0x01 /* header: all entries are cache inhibited */
104 #define PV_PTPAGE 0x02 /* header: entry maps a page table page */
105
106 struct pv_page;
107
108 struct pv_page_info {
109 TAILQ_ENTRY(pv_page) pgi_list;
110 struct pv_entry *pgi_freelist;
111 int pgi_nfree;
112 };
113
114 /*
115 * This is basically:
116 * ((NBPG - sizeof(struct pv_page_info)) / sizeof(struct pv_entry))
117 */
118 #define NPVPPG 170
119
120 struct pv_page {
121 struct pv_page_info pvp_pgi;
122 struct pv_entry pvp_pv[NPVPPG];
123 };
124
125 #ifdef _KERNEL
126
127 extern struct pmap kernel_pmap_store;
128
129 #define pmap_kernel() (&kernel_pmap_store)
130 #define active_pmap(pm) \
131 ((pm) == pmap_kernel() || (pm) == curproc->p_vmspace->vm_map.pmap)
132 #define active_user_pmap(pm) \
133 (curproc && \
134 (pm) != pmap_kernel() && (pm) == curproc->p_vmspace->vm_map.pmap)
135
136 extern void _pmap_set_page_cacheable __P((struct pmap *, vaddr_t));
137 extern void _pmap_set_page_cacheinhibit __P((struct pmap *, vaddr_t));
138 extern int _pmap_page_is_cacheable __P((struct pmap *, vaddr_t));
139
140 extern struct pv_entry *pv_table; /* array of entries, one per page */
141
142 #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
143 #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
144
145 #define pmap_update(pmap) /* nothing (yet) */
146
147 extern pt_entry_t *Sysmap;
148 extern char *vmmap; /* map for mem, dumps, etc. */
149
150 vaddr_t pmap_map __P((vaddr_t, paddr_t, paddr_t, int));
151 void pmap_procwr __P((struct proc *, vaddr_t, size_t));
152 #define PMAP_NEED_PROCWR
153
154 /*
155 * Do idle page zero'ing uncached to avoid polluting the cache.
156 */
157 boolean_t pmap_zero_page_uncached(paddr_t);
158 #define PMAP_PAGEIDLEZERO(pa) pmap_zero_page_uncached((pa))
159
160 #endif /* _KERNEL */
161
162 #endif /* !_MACHINE_PMAP_H_ */
163