pmap.h revision 1.64 1 /* $NetBSD: pmap.h,v 1.64 2015/06/07 06:07:49 matt Exp $ */
2
3 /*
4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Ralph Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)pmap.h 8.1 (Berkeley) 6/10/93
35 */
36
37 /*
38 * Copyright (c) 1987 Carnegie-Mellon University
39 *
40 * This code is derived from software contributed to Berkeley by
41 * Ralph Campbell.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by the University of
54 * California, Berkeley and its contributors.
55 * 4. Neither the name of the University nor the names of its contributors
56 * may be used to endorse or promote products derived from this software
57 * without specific prior written permission.
58 *
59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 * SUCH DAMAGE.
70 *
71 * @(#)pmap.h 8.1 (Berkeley) 6/10/93
72 */
73
74 #ifndef _MIPS_PMAP_H_
75 #define _MIPS_PMAP_H_
76
77 #ifdef _KERNEL_OPT
78 #include "opt_multiprocessor.h"
79 #endif
80
81 #include <sys/evcnt.h>
82
83 #include <mips/cpuregs.h> /* for KSEG0 below */
84 //#include <mips/pte.h>
85
86 /*
87 * The user address space is 2Gb (0x0 - 0x80000000).
88 * User programs are laid out in memory as follows:
89 * address
90 * USRTEXT 0x00001000
91 * USRDATA USRTEXT + text_size
92 * USRSTACK 0x7FFFFFFF
93 *
94 * The user address space is mapped using a two level structure where
95 * virtual address bits 30..22 are used to index into a segment table which
96 * points to a page worth of PTEs (4096 page can hold 1024 PTEs).
97 * Bits 21..12 are then used to index a PTE which describes a page within
98 * a segment.
99 *
100 * The wired entries in the TLB will contain the following:
101 * 0-1 (UPAGES) for curproc user struct and kernel stack.
102 *
103 * Note: The kernel doesn't use the same data structures as user programs.
104 * All the PTE entries are stored in a single array in Sysmap which is
105 * dynamically allocated at boot time.
106 */
107
108 #define mips_trunc_seg(x) ((vaddr_t)(x) & ~SEGOFSET)
109 #define mips_round_seg(x) (((vaddr_t)(x) + SEGOFSET) & ~SEGOFSET)
110
111 #ifdef _LP64
112 #define PMAP_SEGTABSIZE NSEGPG
113 #else
114 #define PMAP_SEGTABSIZE (1 << (31 - SEGSHIFT))
115 #endif
116
117 union pt_entry;
118
119 union segtab {
120 #ifdef _LP64
121 union segtab *seg_seg[PMAP_SEGTABSIZE];
122 #endif
123 union pt_entry *seg_tab[PMAP_SEGTABSIZE];
124 };
125
126 /*
127 * Structure defining an tlb entry data set.
128 */
129 struct tlb {
130 vaddr_t tlb_hi; /* should be 64 bits */
131 uint32_t tlb_lo0; /* XXX maybe 64 bits (only 32 really used) */
132 uint32_t tlb_lo1; /* XXX maybe 64 bits (only 32 really used) */
133 };
134
135 struct tlbmask {
136 vaddr_t tlb_hi; /* should be 64 bits */
137 uint32_t tlb_lo0; /* XXX maybe 64 bits (only 32 really used) */
138 uint32_t tlb_lo1; /* XXX maybe 64 bits (only 32 really used) */
139 uint32_t tlb_mask;
140 };
141
142 #ifdef _KERNEL
143 struct pmap;
144 typedef bool (*pte_callback_t)(struct pmap *, vaddr_t, vaddr_t,
145 union pt_entry *, uintptr_t);
146 union pt_entry *pmap_pte_lookup(struct pmap *, vaddr_t);
147 union pt_entry *pmap_pte_reserve(struct pmap *, vaddr_t, int);
148 void pmap_pte_process(struct pmap *, vaddr_t, vaddr_t, pte_callback_t,
149 uintptr_t);
150 void pmap_segtab_activate(struct pmap *, struct lwp *);
151 void pmap_segtab_init(struct pmap *);
152 void pmap_segtab_destroy(struct pmap *);
153 extern kmutex_t pmap_segtab_lock;
154 #endif /* _KERNEL */
155
156 /*
157 * Per TLB (normally same as CPU) asid info
158 */
159 struct pmap_asid_info {
160 LIST_ENTRY(pmap_asid_info) pai_link;
161 uint32_t pai_asid; /* TLB address space tag */
162 };
163
164 #define TLBINFO_LOCK(ti) mutex_spin_enter((ti)->ti_lock)
165 #define TLBINFO_UNLOCK(ti) mutex_spin_exit((ti)->ti_lock)
166 #define PMAP_PAI_ASIDVALID_P(pai, ti) ((pai)->pai_asid != 0)
167 #define PMAP_PAI(pmap, ti) (&(pmap)->pm_pai[tlbinfo_index(ti)])
168 #define PAI_PMAP(pai, ti) \
169 ((pmap_t)((intptr_t)(pai) \
170 - offsetof(struct pmap, pm_pai[tlbinfo_index(ti)])))
171
172 /*
173 * Machine dependent pmap structure.
174 */
175 struct pmap {
176 #ifdef MULTIPROCESSOR
177 volatile uint32_t pm_active; /* pmap was active on ... */
178 volatile uint32_t pm_onproc; /* pmap is active on ... */
179 volatile u_int pm_shootdown_pending;
180 #endif
181 union segtab *pm_segtab; /* pointers to pages of PTEs */
182 u_int pm_count; /* pmap reference count */
183 u_int pm_flags;
184 #define PMAP_DEFERRED_ACTIVATE 0x0001
185 struct pmap_statistics pm_stats; /* pmap statistics */
186 struct pmap_asid_info pm_pai[1];
187 };
188
189 enum tlb_invalidate_op {
190 TLBINV_NOBODY=0,
191 TLBINV_ONE=1,
192 TLBINV_ALLUSER=2,
193 TLBINV_ALLKERNEL=3,
194 TLBINV_ALL=4
195 };
196
197 struct pmap_tlb_info {
198 char ti_name[8];
199 uint32_t ti_asid_hint; /* probable next ASID to use */
200 uint32_t ti_asids_free; /* # of ASIDs free */
201 #define tlbinfo_noasids_p(ti) ((ti)->ti_asids_free == 0)
202 kmutex_t *ti_lock;
203 u_int ti_wired; /* # of wired TLB entries */
204 uint32_t ti_asid_mask;
205 uint32_t ti_asid_max;
206 LIST_HEAD(, pmap_asid_info) ti_pais; /* list of active ASIDs */
207 #ifdef MULTIPROCESSOR
208 pmap_t ti_victim;
209 uint32_t ti_synci_page_bitmap; /* page indices needing a syncicache */
210 uint32_t ti_cpu_mask; /* bitmask of CPUs sharing this TLB */
211 enum tlb_invalidate_op ti_tlbinvop;
212 u_int ti_index;
213 #define tlbinfo_index(ti) ((ti)->ti_index)
214 struct evcnt ti_evcnt_synci_asts;
215 struct evcnt ti_evcnt_synci_all;
216 struct evcnt ti_evcnt_synci_pages;
217 struct evcnt ti_evcnt_synci_deferred;
218 struct evcnt ti_evcnt_synci_desired;
219 struct evcnt ti_evcnt_synci_duplicate;
220 #else
221 #define tlbinfo_index(ti) (0)
222 #endif
223 struct evcnt ti_evcnt_asid_reinits;
224 u_long ti_asid_bitmap[256 / (sizeof(u_long) * 8)];
225 };
226
227 #ifdef _KERNEL
228
229 struct pmap_kernel {
230 struct pmap kernel_pmap;
231 #ifdef MULTIPROCESSOR
232 struct pmap_asid_info kernel_pai[MAXCPUS-1];
233 #endif
234 };
235
236 extern struct pmap_kernel kernel_pmap_store;
237 extern struct pmap_tlb_info pmap_tlb0_info;
238 #ifdef MULTIPROCESSOR
239 extern struct pmap_tlb_info *pmap_tlbs[MAXCPUS];
240 extern u_int pmap_ntlbs;
241 #endif
242 extern paddr_t mips_avail_start;
243 extern paddr_t mips_avail_end;
244 extern vaddr_t mips_virtual_end;
245
246 #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
247 #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
248
249 #define pmap_phys_address(x) mips_ptob(x)
250
251 /*
252 * Bootstrap the system enough to run with virtual memory.
253 */
254 void pmap_bootstrap(void);
255
256 void pmap_remove_all(pmap_t);
257 void pmap_set_modified(paddr_t);
258 void pmap_procwr(struct proc *, vaddr_t, size_t);
259 #define PMAP_NEED_PROCWR
260
261 #ifdef MULTIPROCESSOR
262 void pmap_tlb_shootdown_process(void);
263 bool pmap_tlb_shootdown_bystanders(pmap_t pmap);
264 void pmap_tlb_info_attach(struct pmap_tlb_info *, struct cpu_info *);
265 void pmap_tlb_syncicache_ast(struct cpu_info *);
266 void pmap_tlb_syncicache_wanted(struct cpu_info *);
267 void pmap_tlb_syncicache(vaddr_t, uint32_t);
268 #endif
269 void pmap_tlb_info_init(struct pmap_tlb_info *);
270 void pmap_tlb_info_evcnt_attach(struct pmap_tlb_info *);
271 void pmap_tlb_asid_acquire(pmap_t pmap, struct lwp *l);
272 void pmap_tlb_asid_deactivate(pmap_t pmap);
273 void pmap_tlb_asid_check(void);
274 void pmap_tlb_asid_release_all(pmap_t pmap);
275 int pmap_tlb_update_addr(pmap_t pmap, vaddr_t, uint32_t, bool);
276 void pmap_tlb_invalidate_addr(pmap_t pmap, vaddr_t);
277
278 /*
279 * pmap_prefer() helps reduce virtual-coherency exceptions in
280 * the virtually-indexed cache on mips3 CPUs.
281 */
282 #ifdef MIPS3_PLUS
283 #define PMAP_PREFER(pa, va, sz, td) pmap_prefer((pa), (va), (sz), (td))
284 void pmap_prefer(vaddr_t, vaddr_t *, vsize_t, int);
285 #endif /* MIPS3_PLUS */
286
287 #define PMAP_STEAL_MEMORY /* enable pmap_steal_memory() */
288 #define PMAP_ENABLE_PMAP_KMPAGE /* enable the PMAP_KMPAGE flag */
289
290 /*
291 * Alternate mapping hooks for pool pages. Avoids thrashing the TLB.
292 */
293 vaddr_t mips_pmap_map_poolpage(paddr_t);
294 paddr_t mips_pmap_unmap_poolpage(vaddr_t);
295 struct vm_page *mips_pmap_alloc_poolpage(int);
296 #define PMAP_ALLOC_POOLPAGE(flags) mips_pmap_alloc_poolpage(flags)
297 #define PMAP_MAP_POOLPAGE(pa) mips_pmap_map_poolpage(pa)
298 #define PMAP_UNMAP_POOLPAGE(va) mips_pmap_unmap_poolpage(va)
299
300 /*
301 * Other hooks for the pool allocator.
302 */
303 #ifdef _LP64
304 #define POOL_VTOPHYS(va) (MIPS_KSEG0_P(va) \
305 ? MIPS_KSEG0_TO_PHYS(va) \
306 : MIPS_XKPHYS_TO_PHYS(va))
307 #else
308 #define POOL_VTOPHYS(va) MIPS_KSEG0_TO_PHYS((vaddr_t)(va))
309 #endif
310
311 /*
312 * Select CCA to use for unmanaged pages.
313 */
314 #define PMAP_CCA_FOR_PA(pa) CCA_UNCACHED /* uncached */
315
316 #if defined(_MIPS_PADDR_T_64BIT) || defined(_LP64)
317 #define PGC_NOCACHE 0x4000000000000000ULL
318 #define PGC_PREFETCH 0x2000000000000000ULL
319 #endif
320
321 #define __HAVE_VM_PAGE_MD
322
323 /*
324 * pmap-specific data stored in the vm_page structure.
325 */
326 /*
327 * For each struct vm_page, there is a list of all currently valid virtual
328 * mappings of that page. An entry is a pv_entry_t, the list is pv_table.
329 * XXX really should do this as a part of the higher level code.
330 */
331 typedef struct pv_entry {
332 struct pv_entry *pv_next; /* next pv_entry */
333 struct pmap *pv_pmap; /* pmap where mapping lies */
334 vaddr_t pv_va; /* virtual address for mapping */
335 #define PV_KENTER 0x001
336 } *pv_entry_t;
337
338 #define PG_MD_UNCACHED 0x0001 /* page is mapped uncached */
339 #define PG_MD_MODIFIED 0x0002 /* page has been modified */
340 #define PG_MD_REFERENCED 0x0004 /* page has been recently referenced */
341 #define PG_MD_POOLPAGE 0x0008 /* page is used as a poolpage */
342 #define PG_MD_EXECPAGE 0x0010 /* page is exec mapped */
343
344 #define PG_MD_CACHED_P(md) (((md)->pvh_attrs & PG_MD_UNCACHED) == 0)
345 #define PG_MD_UNCACHED_P(md) (((md)->pvh_attrs & PG_MD_UNCACHED) != 0)
346 #define PG_MD_MODIFIED_P(md) (((md)->pvh_attrs & PG_MD_MODIFIED) != 0)
347 #define PG_MD_REFERENCED_P(md) (((md)->pvh_attrs & PG_MD_REFERENCED) != 0)
348 #define PG_MD_POOLPAGE_P(md) (((md)->pvh_attrs & PG_MD_POOLPAGE) != 0)
349 #define PG_MD_EXECPAGE_P(md) (((md)->pvh_attrs & PG_MD_EXECPAGE) != 0)
350
351 struct vm_page_md {
352 struct pv_entry pvh_first; /* pv_entry first */
353 #ifdef MULTIPROCESSOR
354 volatile u_int pvh_attrs; /* page attributes */
355 kmutex_t *pvh_lock; /* pv list lock */
356 #define PG_MD_PVLIST_LOCK_INIT(md) ((md)->pvh_lock = NULL)
357 #define PG_MD_PVLIST_LOCKED_P(md) (mutex_owner((md)->pvh_lock) != 0)
358 #define PG_MD_PVLIST_LOCK(md, lc) pmap_pvlist_lock((md), (lc))
359 #define PG_MD_PVLIST_UNLOCK(md) mutex_spin_exit((md)->pvh_lock)
360 #define PG_MD_PVLIST_GEN(md) ((uint16_t)((md)->pvh_attrs >> 16))
361 #else
362 u_int pvh_attrs; /* page attributes */
363 #define PG_MD_PVLIST_LOCK_INIT(md) do { } while (/*CONSTCOND*/ 0)
364 #define PG_MD_PVLIST_LOCKED_P(md) true
365 #define PG_MD_PVLIST_LOCK(md, lc) (mutex_spin_enter(&pmap_pvlist_mutex), 0)
366 #define PG_MD_PVLIST_UNLOCK(md) mutex_spin_exit(&pmap_pvlist_mutex)
367 #define PG_MD_PVLIST_GEN(md) (0)
368 #endif
369 };
370
371 #define VM_MDPAGE_INIT(pg) \
372 do { \
373 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); \
374 (md)->pvh_first.pv_next = NULL; \
375 (md)->pvh_first.pv_pmap = NULL; \
376 (md)->pvh_first.pv_va = VM_PAGE_TO_PHYS(pg); \
377 PG_MD_PVLIST_LOCK_INIT(md); \
378 (md)->pvh_attrs = 0; \
379 } while (/* CONSTCOND */ 0)
380
381 uint16_t pmap_pvlist_lock(struct vm_page_md *, bool);
382
383 #endif /* _KERNEL */
384 #endif /* _MIPS_PMAP_H_ */
385