pmap.h revision 1.58.4.6 1 1.58.4.6 thorpej /* $NetBSD: pmap.h,v 1.58.4.6 2002/12/29 19:40:14 thorpej Exp $ */
2 1.58.4.2 pk
3 1.58.4.2 pk /*
4 1.58.4.2 pk * Copyright (c) 1996
5 1.58.4.2 pk * The President and Fellows of Harvard College. All rights reserved.
6 1.58.4.2 pk * Copyright (c) 1992, 1993
7 1.58.4.2 pk * The Regents of the University of California. All rights reserved.
8 1.58.4.2 pk *
9 1.58.4.2 pk * This software was developed by the Computer Systems Engineering group
10 1.58.4.2 pk * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
11 1.58.4.2 pk * contributed to Berkeley.
12 1.58.4.2 pk *
13 1.58.4.2 pk * All advertising materials mentioning features or use of this software
14 1.58.4.2 pk * must display the following acknowledgement:
15 1.58.4.2 pk * This product includes software developed by Aaron Brown and
16 1.58.4.2 pk * Harvard University.
17 1.58.4.2 pk * This product includes software developed by the University of
18 1.58.4.2 pk * California, Lawrence Berkeley Laboratory.
19 1.58.4.2 pk *
20 1.58.4.2 pk * @InsertRedistribution@
21 1.58.4.2 pk * 3. All advertising materials mentioning features or use of this software
22 1.58.4.2 pk * must display the following acknowledgement:
23 1.58.4.2 pk * This product includes software developed by Aaron Brown and
24 1.58.4.2 pk * Harvard University.
25 1.58.4.2 pk * This product includes software developed by the University of
26 1.58.4.2 pk * California, Berkeley and its contributors.
27 1.58.4.2 pk * 4. Neither the name of the University nor the names of its contributors
28 1.58.4.2 pk * may be used to endorse or promote products derived from this software
29 1.58.4.2 pk * without specific prior written permission.
30 1.58.4.2 pk *
31 1.58.4.2 pk * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
32 1.58.4.2 pk * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 1.58.4.2 pk * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 1.58.4.2 pk * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
35 1.58.4.2 pk * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 1.58.4.2 pk * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 1.58.4.2 pk * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 1.58.4.2 pk * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39 1.58.4.2 pk * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40 1.58.4.2 pk * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 1.58.4.2 pk * SUCH DAMAGE.
42 1.58.4.2 pk *
43 1.58.4.2 pk * @(#)pmap.h 8.1 (Berkeley) 6/11/93
44 1.58.4.2 pk */
45 1.58.4.2 pk
46 1.58.4.2 pk #ifndef _SPARC_PMAP_H_
47 1.58.4.2 pk #define _SPARC_PMAP_H_
48 1.58.4.2 pk
49 1.58.4.3 nathanw #if defined(_KERNEL_OPT)
50 1.58.4.3 nathanw #include "opt_sparc_arch.h"
51 1.58.4.3 nathanw #endif
52 1.58.4.3 nathanw
53 1.58.4.2 pk #include <machine/pte.h>
54 1.58.4.2 pk
55 1.58.4.2 pk /*
56 1.58.4.2 pk * Pmap structure.
57 1.58.4.2 pk *
58 1.58.4.2 pk * The pmap structure really comes in two variants, one---a single
59 1.58.4.2 pk * instance---for kernel virtual memory and the other---up to nproc
60 1.58.4.2 pk * instances---for user virtual memory. Unfortunately, we have to mash
61 1.58.4.2 pk * both into the same structure. Fortunately, they are almost the same.
62 1.58.4.2 pk *
63 1.58.4.2 pk * The kernel begins at 0xf8000000 and runs to 0xffffffff (although
64 1.58.4.2 pk * some of this is not actually used). Kernel space, including DVMA
65 1.58.4.2 pk * space (for now?), is mapped identically into all user contexts.
66 1.58.4.2 pk * There is no point in duplicating this mapping in each user process
67 1.58.4.2 pk * so they do not appear in the user structures.
68 1.58.4.2 pk *
69 1.58.4.2 pk * User space begins at 0x00000000 and runs through 0x1fffffff,
70 1.58.4.2 pk * then has a `hole', then resumes at 0xe0000000 and runs until it
71 1.58.4.2 pk * hits the kernel space at 0xf8000000. This can be mapped
72 1.58.4.2 pk * contiguously by ignorning the top two bits and pretending the
73 1.58.4.2 pk * space goes from 0 to 37ffffff. Typically the lower range is
74 1.58.4.2 pk * used for text+data and the upper for stack, but the code here
75 1.58.4.2 pk * makes no such distinction.
76 1.58.4.2 pk *
77 1.58.4.2 pk * Since each virtual segment covers 256 kbytes, the user space
78 1.58.4.2 pk * requires 3584 segments, while the kernel (including DVMA) requires
79 1.58.4.2 pk * only 512 segments.
80 1.58.4.2 pk *
81 1.58.4.2 pk *
82 1.58.4.2 pk ** FOR THE SUN4/SUN4C
83 1.58.4.2 pk *
84 1.58.4.2 pk * The segment map entry for virtual segment vseg is offset in
85 1.58.4.2 pk * pmap->pm_rsegmap by 0 if pmap is not the kernel pmap, or by
86 1.58.4.2 pk * NUSEG if it is. We keep a pointer called pmap->pm_segmap
87 1.58.4.2 pk * pre-offset by this value. pmap->pm_segmap thus contains the
88 1.58.4.2 pk * values to be loaded into the user portion of the hardware segment
89 1.58.4.2 pk * map so as to reach the proper PMEGs within the MMU. The kernel
90 1.58.4.2 pk * mappings are `set early' and are always valid in every context
91 1.58.4.2 pk * (every change is always propagated immediately).
92 1.58.4.2 pk *
93 1.58.4.2 pk * The PMEGs within the MMU are loaded `on demand'; when a PMEG is
94 1.58.4.2 pk * taken away from context `c', the pmap for context c has its
95 1.58.4.2 pk * corresponding pm_segmap[vseg] entry marked invalid (the MMU segment
96 1.58.4.2 pk * map entry is also made invalid at the same time). Thus
97 1.58.4.2 pk * pm_segmap[vseg] is the `invalid pmeg' number (127 or 511) whenever
98 1.58.4.2 pk * the corresponding PTEs are not actually in the MMU. On the other
99 1.58.4.2 pk * hand, pm_pte[vseg] is NULL only if no pages in that virtual segment
100 1.58.4.2 pk * are in core; otherwise it points to a copy of the 32 or 64 PTEs that
101 1.58.4.2 pk * must be loaded in the MMU in order to reach those pages.
102 1.58.4.2 pk * pm_npte[vseg] counts the number of valid pages in each vseg.
103 1.58.4.2 pk *
104 1.58.4.2 pk * XXX performance: faster to count valid bits?
105 1.58.4.2 pk *
106 1.58.4.2 pk * The kernel pmap cannot malloc() PTEs since malloc() will sometimes
107 1.58.4.2 pk * allocate a new virtual segment. Since kernel mappings are never
108 1.58.4.2 pk * `stolen' out of the MMU, we just keep all its PTEs there, and have
109 1.58.4.2 pk * no software copies. Its mmu entries are nonetheless kept on lists
110 1.58.4.2 pk * so that the code that fiddles with mmu lists has something to fiddle.
111 1.58.4.2 pk *
112 1.58.4.4 nathanw ** FOR THE SUN4M/SUN4D
113 1.58.4.2 pk *
114 1.58.4.2 pk * On this architecture, the virtual-to-physical translation (page) tables
115 1.58.4.2 pk * are *not* stored within the MMU as they are in the earlier Sun architect-
116 1.58.4.2 pk * ures; instead, they are maintained entirely within physical memory (there
117 1.58.4.2 pk * is a TLB cache to prevent the high performance hit from keeping all page
118 1.58.4.2 pk * tables in core). Thus there is no need to dynamically allocate PMEGs or
119 1.58.4.2 pk * SMEGs; only contexts must be shared.
120 1.58.4.2 pk *
121 1.58.4.2 pk * We maintain two parallel sets of tables: one is the actual MMU-edible
122 1.58.4.2 pk * hierarchy of page tables in allocated kernel memory; these tables refer
123 1.58.4.2 pk * to each other by physical address pointers in SRMMU format (thus they
124 1.58.4.2 pk * are not very useful to the kernel's management routines). The other set
125 1.58.4.2 pk * of tables is similar to those used for the Sun4/100's 3-level MMU; it
126 1.58.4.2 pk * is a hierarchy of regmap and segmap structures which contain kernel virtual
127 1.58.4.2 pk * pointers to each other. These must (unfortunately) be kept in sync.
128 1.58.4.2 pk *
129 1.58.4.2 pk */
130 1.58.4.2 pk #define NKREG ((int)((-(unsigned)KERNBASE) / NBPRG)) /* i.e., 8 */
131 1.58.4.2 pk #define NUREG (256 - NKREG) /* i.e., 248 */
132 1.58.4.2 pk
133 1.58.4.2 pk TAILQ_HEAD(mmuhd,mmuentry);
134 1.58.4.2 pk
135 1.58.4.2 pk /*
136 1.58.4.2 pk * data appearing in both user and kernel pmaps
137 1.58.4.2 pk *
138 1.58.4.2 pk * note: if we want the same binaries to work on the 4/4c and 4m, we have to
139 1.58.4.2 pk * include the fields for both to make sure that the struct kproc
140 1.58.4.2 pk * is the same size.
141 1.58.4.2 pk */
142 1.58.4.2 pk struct pmap {
143 1.58.4.2 pk union ctxinfo *pm_ctx; /* current context, if any */
144 1.58.4.2 pk int pm_ctxnum; /* current context's number */
145 1.58.4.2 pk struct simplelock pm_lock; /* spinlock */
146 1.58.4.2 pk int pm_refcount; /* just what it says */
147 1.58.4.2 pk
148 1.58.4.2 pk struct mmuhd pm_reglist; /* MMU regions on this pmap (4/4c) */
149 1.58.4.2 pk struct mmuhd pm_seglist; /* MMU segments on this pmap (4/4c) */
150 1.58.4.2 pk
151 1.58.4.2 pk struct regmap *pm_regmap;
152 1.58.4.2 pk
153 1.58.4.2 pk int **pm_reg_ptps; /* SRMMU-edible region tables for 4m */
154 1.58.4.2 pk int *pm_reg_ptps_pa;/* _Physical_ address of pm_reg_ptps */
155 1.58.4.2 pk
156 1.58.4.2 pk int pm_gap_start; /* Starting with this vreg there's */
157 1.58.4.2 pk int pm_gap_end; /* no valid mapping until here */
158 1.58.4.2 pk
159 1.58.4.2 pk struct pmap_statistics pm_stats; /* pmap statistics */
160 1.58.4.2 pk };
161 1.58.4.2 pk
162 1.58.4.2 pk struct regmap {
163 1.58.4.2 pk struct segmap *rg_segmap; /* point to NSGPRG PMEGs */
164 1.58.4.2 pk int *rg_seg_ptps; /* SRMMU-edible segment tables (NULL
165 1.58.4.2 pk * indicates invalid region (4m) */
166 1.58.4.2 pk smeg_t rg_smeg; /* the MMU region number (4c) */
167 1.58.4.2 pk u_char rg_nsegmap; /* number of valid PMEGS */
168 1.58.4.2 pk };
169 1.58.4.2 pk
170 1.58.4.2 pk struct segmap {
171 1.58.4.2 pk int *sg_pte; /* points to NPTESG PTEs */
172 1.58.4.2 pk pmeg_t sg_pmeg; /* the MMU segment number (4c) */
173 1.58.4.2 pk u_char sg_npte; /* number of valid PTEs per seg */
174 1.58.4.2 pk };
175 1.58.4.2 pk
176 1.58.4.2 pk typedef struct pmap *pmap_t;
177 1.58.4.2 pk
178 1.58.4.2 pk #if 0
179 1.58.4.2 pk struct kvm_cpustate {
180 1.58.4.2 pk int kvm_npmemarr;
181 1.58.4.2 pk struct memarr kvm_pmemarr[MA_SIZE];
182 1.58.4.2 pk int kvm_seginval; /* [4,4c] */
183 1.58.4.2 pk struct segmap kvm_segmap_store[NKREG*NSEGRG]; /* [4,4c] */
184 1.58.4.2 pk }/*not yet used*/;
185 1.58.4.2 pk #endif
186 1.58.4.2 pk
187 1.58.4.2 pk #ifdef _KERNEL
188 1.58.4.2 pk
189 1.58.4.2 pk #define PMAP_NULL ((pmap_t)0)
190 1.58.4.2 pk
191 1.58.4.2 pk extern struct pmap kernel_pmap_store;
192 1.58.4.2 pk
193 1.58.4.2 pk /*
194 1.58.4.2 pk * Bounds on managed physical addresses. Used by (MD) users
195 1.58.4.2 pk * of uvm_pglistalloc() to provide search hints.
196 1.58.4.2 pk */
197 1.58.4.2 pk extern paddr_t vm_first_phys, vm_last_phys;
198 1.58.4.2 pk extern psize_t vm_num_phys;
199 1.58.4.2 pk
200 1.58.4.2 pk /*
201 1.58.4.2 pk * Since PTEs also contain type bits, we have to have some way
202 1.58.4.2 pk * to tell pmap_enter `this is an IO page' or `this is not to
203 1.58.4.2 pk * be cached'. Since physical addresses are always aligned, we
204 1.58.4.2 pk * can do this with the low order bits.
205 1.58.4.2 pk *
206 1.58.4.2 pk * The ordering below is important: PMAP_PGTYPE << PG_TNC must give
207 1.58.4.2 pk * exactly the PG_NC and PG_TYPE bits.
208 1.58.4.2 pk */
209 1.58.4.2 pk #define PMAP_OBIO 1 /* tells pmap_enter to use PG_OBIO */
210 1.58.4.2 pk #define PMAP_VME16 2 /* etc */
211 1.58.4.2 pk #define PMAP_VME32 3 /* etc */
212 1.58.4.2 pk #define PMAP_NC 4 /* tells pmap_enter to set PG_NC */
213 1.58.4.2 pk #define PMAP_TNC_4 7 /* mask to get PG_TYPE & PG_NC */
214 1.58.4.2 pk
215 1.58.4.2 pk #define PMAP_T2PTE_4(x) (((x) & PMAP_TNC_4) << PG_TNC_SHIFT)
216 1.58.4.2 pk #define PMAP_IOENC_4(io) (io)
217 1.58.4.2 pk
218 1.58.4.2 pk /*
219 1.58.4.2 pk * On a SRMMU machine, the iospace is encoded in bits [3-6] of the
220 1.58.4.2 pk * physical address passed to pmap_enter().
221 1.58.4.2 pk */
222 1.58.4.2 pk #define PMAP_TYPE_SRMMU 0x78 /* mask to get 4m page type */
223 1.58.4.2 pk #define PMAP_PTESHFT_SRMMU 25 /* right shift to put type in pte */
224 1.58.4.2 pk #define PMAP_SHFT_SRMMU 3 /* left shift to extract iospace */
225 1.58.4.2 pk #define PMAP_TNC_SRMMU 127 /* mask to get PG_TYPE & PG_NC */
226 1.58.4.2 pk
227 1.58.4.2 pk /*#define PMAP_IOC 0x00800000 -* IO cacheable, NOT shifted */
228 1.58.4.2 pk
229 1.58.4.2 pk #define PMAP_T2PTE_SRMMU(x) (((x) & PMAP_TYPE_SRMMU) << PMAP_PTESHFT_SRMMU)
230 1.58.4.2 pk #define PMAP_IOENC_SRMMU(io) ((io) << PMAP_SHFT_SRMMU)
231 1.58.4.2 pk
232 1.58.4.2 pk /* Encode IO space for pmap_enter() */
233 1.58.4.4 nathanw #define PMAP_IOENC(io) (CPU_HAS_SRMMU ? PMAP_IOENC_SRMMU(io) \
234 1.58.4.4 nathanw : PMAP_IOENC_4(io))
235 1.58.4.2 pk
236 1.58.4.2 pk int pmap_dumpsize __P((void));
237 1.58.4.2 pk int pmap_dumpmmu __P((int (*)__P((dev_t, daddr_t, caddr_t, size_t)),
238 1.58.4.2 pk daddr_t));
239 1.58.4.2 pk
240 1.58.4.2 pk #define pmap_kernel() (&kernel_pmap_store)
241 1.58.4.2 pk #define pmap_resident_count(pmap) pmap_count_ptes(pmap)
242 1.58.4.2 pk
243 1.58.4.2 pk #define PMAP_PREFER(fo, ap) pmap_prefer((fo), (ap))
244 1.58.4.2 pk
245 1.58.4.2 pk #define PMAP_EXCLUDE_DECLS /* tells MI pmap.h *not* to include decls */
246 1.58.4.2 pk
247 1.58.4.2 pk /* FUNCTION DECLARATIONS FOR COMMON PMAP MODULE */
248 1.58.4.2 pk
249 1.58.4.2 pk void pmap_activate __P((struct lwp *));
250 1.58.4.2 pk void pmap_deactivate __P((struct lwp *));
251 1.58.4.2 pk void pmap_bootstrap __P((int nmmu, int nctx, int nregion));
252 1.58.4.2 pk int pmap_count_ptes __P((struct pmap *));
253 1.58.4.2 pk void pmap_prefer __P((vaddr_t, vaddr_t *));
254 1.58.4.2 pk int pmap_pa_exists __P((paddr_t));
255 1.58.4.2 pk void pmap_unwire __P((pmap_t, vaddr_t));
256 1.58.4.2 pk void pmap_collect __P((pmap_t));
257 1.58.4.2 pk void pmap_copy __P((pmap_t, pmap_t, vaddr_t, vsize_t, vaddr_t));
258 1.58.4.2 pk pmap_t pmap_create __P((void));
259 1.58.4.2 pk void pmap_destroy __P((pmap_t));
260 1.58.4.2 pk void pmap_init __P((void));
261 1.58.4.2 pk vaddr_t pmap_map __P((vaddr_t, paddr_t, paddr_t, int));
262 1.58.4.2 pk paddr_t pmap_phys_address __P((int));
263 1.58.4.2 pk void pmap_reference __P((pmap_t));
264 1.58.4.2 pk void pmap_remove __P((pmap_t, vaddr_t, vaddr_t));
265 1.58.4.2 pk #define pmap_update(pmap) /* nothing (yet) */
266 1.58.4.2 pk void pmap_virtual_space __P((vaddr_t *, vaddr_t *));
267 1.58.4.2 pk void pmap_redzone __P((void));
268 1.58.4.2 pk void kvm_uncache __P((caddr_t, int));
269 1.58.4.2 pk struct user;
270 1.58.4.2 pk int mmu_pagein __P((struct pmap *pm, vaddr_t, int));
271 1.58.4.2 pk void pmap_writetext __P((unsigned char *, int));
272 1.58.4.2 pk void pmap_globalize_boot_cpuinfo __P((struct cpu_info *));
273 1.58.4.2 pk
274 1.58.4.5 nathanw static __inline void
275 1.58.4.5 nathanw pmap_remove_all(struct pmap *pmap)
276 1.58.4.5 nathanw {
277 1.58.4.5 nathanw /* Nothing. */
278 1.58.4.5 nathanw }
279 1.58.4.2 pk
280 1.58.4.2 pk /* SUN4/SUN4C SPECIFIC DECLARATIONS */
281 1.58.4.2 pk
282 1.58.4.2 pk #if defined(SUN4) || defined(SUN4C)
283 1.58.4.2 pk boolean_t pmap_clear_modify4_4c __P((struct vm_page *));
284 1.58.4.2 pk boolean_t pmap_clear_reference4_4c __P((struct vm_page *));
285 1.58.4.2 pk void pmap_copy_page4_4c __P((paddr_t, paddr_t));
286 1.58.4.2 pk int pmap_enter4_4c __P((pmap_t, vaddr_t, paddr_t, vm_prot_t,
287 1.58.4.2 pk int));
288 1.58.4.2 pk boolean_t pmap_extract4_4c __P((pmap_t, vaddr_t, paddr_t *));
289 1.58.4.2 pk boolean_t pmap_is_modified4_4c __P((struct vm_page *));
290 1.58.4.2 pk boolean_t pmap_is_referenced4_4c __P((struct vm_page *));
291 1.58.4.2 pk void pmap_kenter_pa4_4c __P((vaddr_t, paddr_t, vm_prot_t));
292 1.58.4.2 pk void pmap_kremove4_4c __P((vaddr_t, vsize_t));
293 1.58.4.2 pk void pmap_page_protect4_4c __P((struct vm_page *, vm_prot_t));
294 1.58.4.2 pk void pmap_protect4_4c __P((pmap_t, vaddr_t, vaddr_t, vm_prot_t));
295 1.58.4.2 pk void pmap_zero_page4_4c __P((paddr_t));
296 1.58.4.2 pk void pmap_changeprot4_4c __P((pmap_t, vaddr_t, vm_prot_t, int));
297 1.58.4.2 pk
298 1.58.4.4 nathanw #endif /* defined SUN4 || defined SUN4C */
299 1.58.4.2 pk
300 1.58.4.4 nathanw /* SIMILAR DECLARATIONS FOR SUN4M/SUN4D MODULE */
301 1.58.4.2 pk
302 1.58.4.4 nathanw #if defined(SUN4M) || defined(SUN4D)
303 1.58.4.2 pk boolean_t pmap_clear_modify4m __P((struct vm_page *));
304 1.58.4.2 pk boolean_t pmap_clear_reference4m __P((struct vm_page *));
305 1.58.4.2 pk void pmap_copy_page4m __P((paddr_t, paddr_t));
306 1.58.4.2 pk void pmap_copy_page_viking_mxcc(paddr_t, paddr_t);
307 1.58.4.2 pk void pmap_copy_page_hypersparc(paddr_t, paddr_t);
308 1.58.4.2 pk int pmap_enter4m __P((pmap_t, vaddr_t, paddr_t, vm_prot_t,
309 1.58.4.2 pk int));
310 1.58.4.2 pk boolean_t pmap_extract4m __P((pmap_t, vaddr_t, paddr_t *));
311 1.58.4.2 pk boolean_t pmap_is_modified4m __P((struct vm_page *));
312 1.58.4.2 pk boolean_t pmap_is_referenced4m __P((struct vm_page *));
313 1.58.4.2 pk void pmap_kenter_pa4m __P((vaddr_t, paddr_t, vm_prot_t));
314 1.58.4.2 pk void pmap_kremove4m __P((vaddr_t, vsize_t));
315 1.58.4.2 pk void pmap_page_protect4m __P((struct vm_page *, vm_prot_t));
316 1.58.4.2 pk void pmap_protect4m __P((pmap_t, vaddr_t, vaddr_t, vm_prot_t));
317 1.58.4.2 pk void pmap_zero_page4m __P((paddr_t));
318 1.58.4.2 pk void pmap_zero_page_viking_mxcc(paddr_t);
319 1.58.4.2 pk void pmap_zero_page_hypersparc(paddr_t);
320 1.58.4.2 pk void pmap_changeprot4m __P((pmap_t, vaddr_t, vm_prot_t, int));
321 1.58.4.2 pk
322 1.58.4.4 nathanw #endif /* defined SUN4M || defined SUN4D */
323 1.58.4.2 pk
324 1.58.4.4 nathanw #if !(defined(SUN4M) || defined(SUN4D)) && (defined(SUN4) || defined(SUN4C))
325 1.58.4.2 pk
326 1.58.4.2 pk #define pmap_clear_modify pmap_clear_modify4_4c
327 1.58.4.2 pk #define pmap_clear_reference pmap_clear_reference4_4c
328 1.58.4.2 pk #define pmap_enter pmap_enter4_4c
329 1.58.4.2 pk #define pmap_extract pmap_extract4_4c
330 1.58.4.2 pk #define pmap_is_modified pmap_is_modified4_4c
331 1.58.4.2 pk #define pmap_is_referenced pmap_is_referenced4_4c
332 1.58.4.2 pk #define pmap_kenter_pa pmap_kenter_pa4_4c
333 1.58.4.2 pk #define pmap_kremove pmap_kremove4_4c
334 1.58.4.2 pk #define pmap_page_protect pmap_page_protect4_4c
335 1.58.4.2 pk #define pmap_protect pmap_protect4_4c
336 1.58.4.2 pk #define pmap_changeprot pmap_changeprot4_4c
337 1.58.4.2 pk
338 1.58.4.4 nathanw #elif (defined(SUN4M) || defined(SUN4D)) && !(defined(SUN4) || defined(SUN4C))
339 1.58.4.2 pk
340 1.58.4.2 pk #define pmap_clear_modify pmap_clear_modify4m
341 1.58.4.2 pk #define pmap_clear_reference pmap_clear_reference4m
342 1.58.4.2 pk #define pmap_enter pmap_enter4m
343 1.58.4.2 pk #define pmap_extract pmap_extract4m
344 1.58.4.2 pk #define pmap_is_modified pmap_is_modified4m
345 1.58.4.2 pk #define pmap_is_referenced pmap_is_referenced4m
346 1.58.4.2 pk #define pmap_kenter_pa pmap_kenter_pa4m
347 1.58.4.2 pk #define pmap_kremove pmap_kremove4m
348 1.58.4.2 pk #define pmap_page_protect pmap_page_protect4m
349 1.58.4.2 pk #define pmap_protect pmap_protect4m
350 1.58.4.2 pk #define pmap_changeprot pmap_changeprot4m
351 1.58.4.2 pk
352 1.58.4.2 pk #else /* must use function pointers */
353 1.58.4.2 pk
354 1.58.4.2 pk extern boolean_t(*pmap_clear_modify_p) __P((struct vm_page *));
355 1.58.4.2 pk extern boolean_t(*pmap_clear_reference_p) __P((struct vm_page *));
356 1.58.4.2 pk extern int (*pmap_enter_p) __P((pmap_t, vaddr_t, paddr_t, vm_prot_t,
357 1.58.4.2 pk int));
358 1.58.4.2 pk extern boolean_t (*pmap_extract_p) __P((pmap_t, vaddr_t, paddr_t *));
359 1.58.4.2 pk extern boolean_t(*pmap_is_modified_p) __P((struct vm_page *));
360 1.58.4.2 pk extern boolean_t(*pmap_is_referenced_p) __P((struct vm_page *));
361 1.58.4.2 pk extern void (*pmap_kenter_pa_p) __P((vaddr_t, paddr_t, vm_prot_t));
362 1.58.4.2 pk extern void (*pmap_kremove_p) __P((vaddr_t, vsize_t));
363 1.58.4.2 pk extern void (*pmap_page_protect_p) __P((struct vm_page *, vm_prot_t));
364 1.58.4.2 pk extern void (*pmap_protect_p) __P((pmap_t, vaddr_t, vaddr_t, vm_prot_t));
365 1.58.4.2 pk extern void (*pmap_changeprot_p) __P((pmap_t, vaddr_t, vm_prot_t, int));
366 1.58.4.2 pk
367 1.58.4.2 pk #define pmap_clear_modify (*pmap_clear_modify_p)
368 1.58.4.2 pk #define pmap_clear_reference (*pmap_clear_reference_p)
369 1.58.4.2 pk #define pmap_enter (*pmap_enter_p)
370 1.58.4.2 pk #define pmap_extract (*pmap_extract_p)
371 1.58.4.2 pk #define pmap_is_modified (*pmap_is_modified_p)
372 1.58.4.2 pk #define pmap_is_referenced (*pmap_is_referenced_p)
373 1.58.4.2 pk #define pmap_kenter_pa (*pmap_kenter_pa_p)
374 1.58.4.2 pk #define pmap_kremove (*pmap_kremove_p)
375 1.58.4.2 pk #define pmap_page_protect (*pmap_page_protect_p)
376 1.58.4.2 pk #define pmap_protect (*pmap_protect_p)
377 1.58.4.2 pk #define pmap_changeprot (*pmap_changeprot_p)
378 1.58.4.2 pk
379 1.58.4.2 pk #endif
380 1.58.4.2 pk
381 1.58.4.2 pk /* pmap_{zero,copy}_page() may be assisted by specialized hardware */
382 1.58.4.2 pk #define pmap_zero_page (*cpuinfo.zero_page)
383 1.58.4.2 pk #define pmap_copy_page (*cpuinfo.copy_page)
384 1.58.4.2 pk
385 1.58.4.4 nathanw #if defined(SUN4M) || defined(SUN4D)
386 1.58.4.2 pk /*
387 1.58.4.2 pk * Macros which implement SRMMU TLB flushing/invalidation
388 1.58.4.2 pk */
389 1.58.4.2 pk #define tlb_flush_page_real(va) \
390 1.58.4.6 thorpej sta(((vaddr_t)(va) & 0xfffff000) | ASI_SRMMUFP_L3, ASI_SRMMUFP, 0)
391 1.58.4.2 pk
392 1.58.4.6 thorpej #define tlb_flush_segment_real(va) \
393 1.58.4.6 thorpej sta(((vaddr_t)(va) & 0xfffc0000) | ASI_SRMMUFP_L2, ASI_SRMMUFP, 0)
394 1.58.4.2 pk
395 1.58.4.6 thorpej #define tlb_flush_region_real(va) \
396 1.58.4.6 thorpej sta(((vaddr_t)(va) & 0xff000000) | ASI_SRMMUFP_L1, ASI_SRMMUFP, 0)
397 1.58.4.2 pk
398 1.58.4.2 pk #define tlb_flush_context_real() sta(ASI_SRMMUFP_L0, ASI_SRMMUFP, 0)
399 1.58.4.2 pk #define tlb_flush_all_real() sta(ASI_SRMMUFP_LN, ASI_SRMMUFP, 0)
400 1.58.4.2 pk
401 1.58.4.4 nathanw #endif /* SUN4M || SUN4D */
402 1.58.4.2 pk
403 1.58.4.2 pk #endif /* _KERNEL */
404 1.58.4.2 pk
405 1.58.4.2 pk #endif /* _SPARC_PMAP_H_ */
406