pmap.h revision 1.22 1 1.22 chs /* $NetBSD: pmap.h,v 1.22 2001/05/26 16:32:43 chs Exp $ */
2 1.1 chuck
3 1.1 chuck /*
4 1.1 chuck * Copyright (c) 1987 Carnegie-Mellon University
5 1.1 chuck * Copyright (c) 1991, 1993
6 1.1 chuck * The Regents of the University of California. All rights reserved.
7 1.1 chuck *
8 1.1 chuck * This code is derived from software contributed to Berkeley by
9 1.1 chuck * the Systems Programming Group of the University of Utah Computer
10 1.1 chuck * Science Department.
11 1.1 chuck *
12 1.1 chuck * Redistribution and use in source and binary forms, with or without
13 1.1 chuck * modification, are permitted provided that the following conditions
14 1.1 chuck * are met:
15 1.1 chuck * 1. Redistributions of source code must retain the above copyright
16 1.1 chuck * notice, this list of conditions and the following disclaimer.
17 1.1 chuck * 2. Redistributions in binary form must reproduce the above copyright
18 1.1 chuck * notice, this list of conditions and the following disclaimer in the
19 1.1 chuck * documentation and/or other materials provided with the distribution.
20 1.1 chuck * 3. All advertising materials mentioning features or use of this software
21 1.1 chuck * must display the following acknowledgement:
22 1.1 chuck * This product includes software developed by the University of
23 1.1 chuck * California, Berkeley and its contributors.
24 1.1 chuck * 4. Neither the name of the University nor the names of its contributors
25 1.1 chuck * may be used to endorse or promote products derived from this software
26 1.1 chuck * without specific prior written permission.
27 1.1 chuck *
28 1.1 chuck * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 1.1 chuck * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 1.1 chuck * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 1.1 chuck * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 1.1 chuck * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 1.1 chuck * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 1.1 chuck * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 1.1 chuck * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 1.1 chuck * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 1.1 chuck * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 1.1 chuck * SUCH DAMAGE.
39 1.1 chuck *
40 1.1 chuck * @(#)pmap.h 8.1 (Berkeley) 6/10/93
41 1.1 chuck */
42 1.1 chuck
43 1.1 chuck #ifndef _MACHINE_PMAP_H_
44 1.1 chuck #define _MACHINE_PMAP_H_
45 1.1 chuck
46 1.1 chuck #include <machine/pte.h>
47 1.1 chuck
48 1.1 chuck /*
49 1.1 chuck * Pmap stuff
50 1.1 chuck */
51 1.1 chuck struct pmap {
52 1.1 chuck pt_entry_t *pm_ptab; /* KVA of page table */
53 1.1 chuck st_entry_t *pm_stab; /* KVA of segment table */
54 1.1 chuck int pm_stfree; /* 040: free lev2 blocks */
55 1.1 chuck st_entry_t *pm_stpa; /* 040: ST phys addr */
56 1.1 chuck short pm_sref; /* segment table ref count */
57 1.1 chuck short pm_count; /* pmap reference count */
58 1.22 chs struct simplelock pm_lock; /* lock on pmap */
59 1.1 chuck struct pmap_statistics pm_stats; /* pmap statistics */
60 1.1 chuck long pm_ptpages; /* more stats: PT pages */
61 1.1 chuck };
62 1.1 chuck
63 1.1 chuck typedef struct pmap *pmap_t;
64 1.1 chuck
65 1.1 chuck /*
66 1.1 chuck * On the 040 we keep track of which level 2 blocks are already in use
67 1.1 chuck * with the pm_stfree mask. Bits are arranged from LSB (block 0) to MSB
68 1.1 chuck * (block 31). For convenience, the level 1 table is considered to be
69 1.1 chuck * block 0.
70 1.1 chuck *
71 1.1 chuck * MAX[KU]L2SIZE control how many pages of level 2 descriptors are allowed.
72 1.1 chuck * for the kernel and users. 8 implies only the initial "segment table"
73 1.1 chuck * page is used. WARNING: don't change MAXUL2SIZE unless you can allocate
74 1.1 chuck * physically contiguous pages for the ST in pmap.c!
75 1.1 chuck */
76 1.1 chuck #define MAXKL2SIZE 32
77 1.1 chuck #define MAXUL2SIZE 8
78 1.1 chuck #define l2tobm(n) (1 << (n))
79 1.1 chuck #define bmtol2(n) (ffs(n) - 1)
80 1.1 chuck
81 1.1 chuck /*
82 1.1 chuck * Macros for speed
83 1.1 chuck */
84 1.7 thorpej #define PMAP_ACTIVATE(pmap, loadhw) \
85 1.4 thorpej { \
86 1.7 thorpej if ((loadhw)) \
87 1.12 scw loadustp(m68k_btop((paddr_t)(pmap)->pm_stpa)); \
88 1.4 thorpej }
89 1.1 chuck
90 1.1 chuck /*
91 1.1 chuck * For each vm_page_t, there is a list of all currently valid virtual
92 1.1 chuck * mappings of that page. An entry is a pv_entry, the list is pv_table.
93 1.1 chuck */
94 1.1 chuck struct pv_entry {
95 1.1 chuck struct pv_entry *pv_next; /* next pv_entry */
96 1.1 chuck struct pmap *pv_pmap; /* pmap where mapping lies */
97 1.12 scw vaddr_t pv_va; /* virtual address for mapping */
98 1.1 chuck st_entry_t *pv_ptste; /* non-zero if VA maps a PT page */
99 1.1 chuck struct pmap *pv_ptpmap; /* if pv_ptste, pmap for PT page */
100 1.1 chuck int pv_flags; /* flags */
101 1.1 chuck };
102 1.1 chuck
103 1.1 chuck #define PV_CI 0x01 /* header: all entries are cache inhibited */
104 1.1 chuck #define PV_PTPAGE 0x02 /* header: entry maps a page table page */
105 1.1 chuck
106 1.1 chuck struct pv_page;
107 1.1 chuck
108 1.1 chuck struct pv_page_info {
109 1.1 chuck TAILQ_ENTRY(pv_page) pgi_list;
110 1.1 chuck struct pv_entry *pgi_freelist;
111 1.1 chuck int pgi_nfree;
112 1.1 chuck };
113 1.1 chuck
114 1.1 chuck /*
115 1.1 chuck * This is basically:
116 1.1 chuck * ((NBPG - sizeof(struct pv_page_info)) / sizeof(struct pv_entry))
117 1.1 chuck */
118 1.1 chuck #define NPVPPG 170
119 1.1 chuck
120 1.1 chuck struct pv_page {
121 1.1 chuck struct pv_page_info pvp_pgi;
122 1.1 chuck struct pv_entry pvp_pv[NPVPPG];
123 1.1 chuck };
124 1.1 chuck
125 1.1 chuck #ifdef _KERNEL
126 1.1 chuck
127 1.1 chuck extern struct pmap kernel_pmap_store;
128 1.1 chuck
129 1.1 chuck #define pmap_kernel() (&kernel_pmap_store)
130 1.1 chuck #define active_pmap(pm) \
131 1.1 chuck ((pm) == pmap_kernel() || (pm) == curproc->p_vmspace->vm_map.pmap)
132 1.4 thorpej #define active_user_pmap(pm) \
133 1.4 thorpej (curproc && \
134 1.4 thorpej (pm) != pmap_kernel() && (pm) == curproc->p_vmspace->vm_map.pmap)
135 1.15 scw
136 1.15 scw extern void _pmap_set_page_cacheable __P((struct pmap *, vaddr_t));
137 1.15 scw extern void _pmap_set_page_cacheinhibit __P((struct pmap *, vaddr_t));
138 1.21 scw extern int _pmap_page_is_cacheable __P((struct pmap *, vaddr_t));
139 1.1 chuck
140 1.1 chuck extern struct pv_entry *pv_table; /* array of entries, one per page */
141 1.1 chuck
142 1.1 chuck #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
143 1.1 chuck #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
144 1.18 thorpej
145 1.20 thorpej #define pmap_update() /* nothing (yet) */
146 1.1 chuck
147 1.1 chuck extern pt_entry_t *Sysmap;
148 1.1 chuck extern char *vmmap; /* map for mem, dumps, etc. */
149 1.11 scw
150 1.12 scw vaddr_t pmap_map __P((vaddr_t, paddr_t, paddr_t, int));
151 1.14 is void pmap_procwr __P((struct proc *, vaddr_t, size_t));
152 1.14 is #define PMAP_NEED_PROCWR
153 1.17 scw
154 1.17 scw /*
155 1.17 scw * Do idle page zero'ing uncached to avoid polluting the cache.
156 1.17 scw */
157 1.17 scw boolean_t pmap_zero_page_uncached(paddr_t);
158 1.17 scw #define PMAP_PAGEIDLEZERO(pa) pmap_zero_page_uncached((pa))
159 1.11 scw
160 1.1 chuck #endif /* _KERNEL */
161 1.1 chuck
162 1.1 chuck #endif /* !_MACHINE_PMAP_H_ */
163