pmap.h revision 1.64 1 1.64 matt /* $NetBSD: pmap.h,v 1.64 2015/06/07 06:07:49 matt Exp $ */
2 1.5 cgd
3 1.26 simonb /*
4 1.3 glass * Copyright (c) 1992, 1993
5 1.3 glass * The Regents of the University of California. All rights reserved.
6 1.44 agc *
7 1.44 agc * This code is derived from software contributed to Berkeley by
8 1.44 agc * Ralph Campbell.
9 1.44 agc *
10 1.44 agc * Redistribution and use in source and binary forms, with or without
11 1.44 agc * modification, are permitted provided that the following conditions
12 1.44 agc * are met:
13 1.44 agc * 1. Redistributions of source code must retain the above copyright
14 1.44 agc * notice, this list of conditions and the following disclaimer.
15 1.44 agc * 2. Redistributions in binary form must reproduce the above copyright
16 1.44 agc * notice, this list of conditions and the following disclaimer in the
17 1.44 agc * documentation and/or other materials provided with the distribution.
18 1.44 agc * 3. Neither the name of the University nor the names of its contributors
19 1.44 agc * may be used to endorse or promote products derived from this software
20 1.44 agc * without specific prior written permission.
21 1.44 agc *
22 1.44 agc * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 1.44 agc * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 1.44 agc * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 1.44 agc * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 1.44 agc * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 1.44 agc * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 1.44 agc * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 1.44 agc * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 1.44 agc * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 1.44 agc * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 1.44 agc * SUCH DAMAGE.
33 1.44 agc *
34 1.44 agc * @(#)pmap.h 8.1 (Berkeley) 6/10/93
35 1.44 agc */
36 1.44 agc
37 1.44 agc /*
38 1.44 agc * Copyright (c) 1987 Carnegie-Mellon University
39 1.1 deraadt *
40 1.1 deraadt * This code is derived from software contributed to Berkeley by
41 1.1 deraadt * Ralph Campbell.
42 1.1 deraadt *
43 1.1 deraadt * Redistribution and use in source and binary forms, with or without
44 1.1 deraadt * modification, are permitted provided that the following conditions
45 1.1 deraadt * are met:
46 1.1 deraadt * 1. Redistributions of source code must retain the above copyright
47 1.1 deraadt * notice, this list of conditions and the following disclaimer.
48 1.1 deraadt * 2. Redistributions in binary form must reproduce the above copyright
49 1.1 deraadt * notice, this list of conditions and the following disclaimer in the
50 1.1 deraadt * documentation and/or other materials provided with the distribution.
51 1.1 deraadt * 3. All advertising materials mentioning features or use of this software
52 1.1 deraadt * must display the following acknowledgement:
53 1.1 deraadt * This product includes software developed by the University of
54 1.1 deraadt * California, Berkeley and its contributors.
55 1.1 deraadt * 4. Neither the name of the University nor the names of its contributors
56 1.1 deraadt * may be used to endorse or promote products derived from this software
57 1.1 deraadt * without specific prior written permission.
58 1.1 deraadt *
59 1.1 deraadt * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
60 1.1 deraadt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61 1.1 deraadt * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62 1.1 deraadt * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
63 1.1 deraadt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
64 1.1 deraadt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
65 1.1 deraadt * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 1.1 deraadt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 1.1 deraadt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 1.1 deraadt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 1.1 deraadt * SUCH DAMAGE.
70 1.1 deraadt *
71 1.5 cgd * @(#)pmap.h 8.1 (Berkeley) 6/10/93
72 1.1 deraadt */
73 1.1 deraadt
74 1.41 simonb #ifndef _MIPS_PMAP_H_
75 1.41 simonb #define _MIPS_PMAP_H_
76 1.1 deraadt
77 1.60 matt #ifdef _KERNEL_OPT
78 1.60 matt #include "opt_multiprocessor.h"
79 1.60 matt #endif
80 1.60 matt
81 1.64 matt #include <sys/evcnt.h>
82 1.64 matt
83 1.24 castor #include <mips/cpuregs.h> /* for KSEG0 below */
84 1.60 matt //#include <mips/pte.h>
85 1.18 thorpej
86 1.1 deraadt /*
87 1.3 glass * The user address space is 2Gb (0x0 - 0x80000000).
88 1.3 glass * User programs are laid out in memory as follows:
89 1.3 glass * address
90 1.3 glass * USRTEXT 0x00001000
91 1.3 glass * USRDATA USRTEXT + text_size
92 1.3 glass * USRSTACK 0x7FFFFFFF
93 1.3 glass *
94 1.3 glass * The user address space is mapped using a two level structure where
95 1.3 glass * virtual address bits 30..22 are used to index into a segment table which
96 1.3 glass * points to a page worth of PTEs (4096 page can hold 1024 PTEs).
97 1.26 simonb * Bits 21..12 are then used to index a PTE which describes a page within
98 1.3 glass * a segment.
99 1.3 glass *
100 1.1 deraadt * The wired entries in the TLB will contain the following:
101 1.3 glass * 0-1 (UPAGES) for curproc user struct and kernel stack.
102 1.3 glass *
103 1.3 glass * Note: The kernel doesn't use the same data structures as user programs.
104 1.3 glass * All the PTE entries are stored in a single array in Sysmap which is
105 1.3 glass * dynamically allocated at boot time.
106 1.1 deraadt */
107 1.1 deraadt
108 1.22 nisimura #define mips_trunc_seg(x) ((vaddr_t)(x) & ~SEGOFSET)
109 1.22 nisimura #define mips_round_seg(x) (((vaddr_t)(x) + SEGOFSET) & ~SEGOFSET)
110 1.3 glass
111 1.60 matt #ifdef _LP64
112 1.60 matt #define PMAP_SEGTABSIZE NSEGPG
113 1.60 matt #else
114 1.57 matt #define PMAP_SEGTABSIZE (1 << (31 - SEGSHIFT))
115 1.60 matt #endif
116 1.3 glass
117 1.3 glass union pt_entry;
118 1.3 glass
119 1.60 matt union segtab {
120 1.60 matt #ifdef _LP64
121 1.60 matt union segtab *seg_seg[PMAP_SEGTABSIZE];
122 1.60 matt #endif
123 1.3 glass union pt_entry *seg_tab[PMAP_SEGTABSIZE];
124 1.3 glass };
125 1.1 deraadt
126 1.1 deraadt /*
127 1.60 matt * Structure defining an tlb entry data set.
128 1.60 matt */
129 1.60 matt struct tlb {
130 1.60 matt vaddr_t tlb_hi; /* should be 64 bits */
131 1.60 matt uint32_t tlb_lo0; /* XXX maybe 64 bits (only 32 really used) */
132 1.60 matt uint32_t tlb_lo1; /* XXX maybe 64 bits (only 32 really used) */
133 1.60 matt };
134 1.60 matt
135 1.60 matt struct tlbmask {
136 1.60 matt vaddr_t tlb_hi; /* should be 64 bits */
137 1.60 matt uint32_t tlb_lo0; /* XXX maybe 64 bits (only 32 really used) */
138 1.60 matt uint32_t tlb_lo1; /* XXX maybe 64 bits (only 32 really used) */
139 1.60 matt uint32_t tlb_mask;
140 1.60 matt };
141 1.60 matt
142 1.60 matt #ifdef _KERNEL
143 1.60 matt struct pmap;
144 1.60 matt typedef bool (*pte_callback_t)(struct pmap *, vaddr_t, vaddr_t,
145 1.60 matt union pt_entry *, uintptr_t);
146 1.60 matt union pt_entry *pmap_pte_lookup(struct pmap *, vaddr_t);
147 1.60 matt union pt_entry *pmap_pte_reserve(struct pmap *, vaddr_t, int);
148 1.60 matt void pmap_pte_process(struct pmap *, vaddr_t, vaddr_t, pte_callback_t,
149 1.60 matt uintptr_t);
150 1.60 matt void pmap_segtab_activate(struct pmap *, struct lwp *);
151 1.60 matt void pmap_segtab_init(struct pmap *);
152 1.60 matt void pmap_segtab_destroy(struct pmap *);
153 1.62 matt extern kmutex_t pmap_segtab_lock;
154 1.60 matt #endif /* _KERNEL */
155 1.60 matt
156 1.60 matt /*
157 1.60 matt * Per TLB (normally same as CPU) asid info
158 1.60 matt */
159 1.60 matt struct pmap_asid_info {
160 1.60 matt LIST_ENTRY(pmap_asid_info) pai_link;
161 1.60 matt uint32_t pai_asid; /* TLB address space tag */
162 1.60 matt };
163 1.60 matt
164 1.60 matt #define TLBINFO_LOCK(ti) mutex_spin_enter((ti)->ti_lock)
165 1.60 matt #define TLBINFO_UNLOCK(ti) mutex_spin_exit((ti)->ti_lock)
166 1.60 matt #define PMAP_PAI_ASIDVALID_P(pai, ti) ((pai)->pai_asid != 0)
167 1.60 matt #define PMAP_PAI(pmap, ti) (&(pmap)->pm_pai[tlbinfo_index(ti)])
168 1.60 matt #define PAI_PMAP(pai, ti) \
169 1.60 matt ((pmap_t)((intptr_t)(pai) \
170 1.60 matt - offsetof(struct pmap, pm_pai[tlbinfo_index(ti)])))
171 1.60 matt
172 1.60 matt /*
173 1.1 deraadt * Machine dependent pmap structure.
174 1.1 deraadt */
175 1.55 pooka struct pmap {
176 1.60 matt #ifdef MULTIPROCESSOR
177 1.60 matt volatile uint32_t pm_active; /* pmap was active on ... */
178 1.60 matt volatile uint32_t pm_onproc; /* pmap is active on ... */
179 1.60 matt volatile u_int pm_shootdown_pending;
180 1.60 matt #endif
181 1.60 matt union segtab *pm_segtab; /* pointers to pages of PTEs */
182 1.60 matt u_int pm_count; /* pmap reference count */
183 1.60 matt u_int pm_flags;
184 1.60 matt #define PMAP_DEFERRED_ACTIVATE 0x0001
185 1.1 deraadt struct pmap_statistics pm_stats; /* pmap statistics */
186 1.60 matt struct pmap_asid_info pm_pai[1];
187 1.55 pooka };
188 1.1 deraadt
189 1.60 matt enum tlb_invalidate_op {
190 1.60 matt TLBINV_NOBODY=0,
191 1.60 matt TLBINV_ONE=1,
192 1.60 matt TLBINV_ALLUSER=2,
193 1.60 matt TLBINV_ALLKERNEL=3,
194 1.60 matt TLBINV_ALL=4
195 1.60 matt };
196 1.17 thorpej
197 1.60 matt struct pmap_tlb_info {
198 1.60 matt char ti_name[8];
199 1.60 matt uint32_t ti_asid_hint; /* probable next ASID to use */
200 1.60 matt uint32_t ti_asids_free; /* # of ASIDs free */
201 1.60 matt #define tlbinfo_noasids_p(ti) ((ti)->ti_asids_free == 0)
202 1.60 matt kmutex_t *ti_lock;
203 1.60 matt u_int ti_wired; /* # of wired TLB entries */
204 1.60 matt uint32_t ti_asid_mask;
205 1.60 matt uint32_t ti_asid_max;
206 1.60 matt LIST_HEAD(, pmap_asid_info) ti_pais; /* list of active ASIDs */
207 1.60 matt #ifdef MULTIPROCESSOR
208 1.60 matt pmap_t ti_victim;
209 1.60 matt uint32_t ti_synci_page_bitmap; /* page indices needing a syncicache */
210 1.60 matt uint32_t ti_cpu_mask; /* bitmask of CPUs sharing this TLB */
211 1.60 matt enum tlb_invalidate_op ti_tlbinvop;
212 1.60 matt u_int ti_index;
213 1.60 matt #define tlbinfo_index(ti) ((ti)->ti_index)
214 1.60 matt struct evcnt ti_evcnt_synci_asts;
215 1.60 matt struct evcnt ti_evcnt_synci_all;
216 1.60 matt struct evcnt ti_evcnt_synci_pages;
217 1.60 matt struct evcnt ti_evcnt_synci_deferred;
218 1.60 matt struct evcnt ti_evcnt_synci_desired;
219 1.60 matt struct evcnt ti_evcnt_synci_duplicate;
220 1.60 matt #else
221 1.60 matt #define tlbinfo_index(ti) (0)
222 1.60 matt #endif
223 1.60 matt struct evcnt ti_evcnt_asid_reinits;
224 1.60 matt u_long ti_asid_bitmap[256 / (sizeof(u_long) * 8)];
225 1.60 matt };
226 1.1 deraadt
227 1.60 matt #ifdef _KERNEL
228 1.13 jonathan
229 1.60 matt struct pmap_kernel {
230 1.60 matt struct pmap kernel_pmap;
231 1.60 matt #ifdef MULTIPROCESSOR
232 1.60 matt struct pmap_asid_info kernel_pai[MAXCPUS-1];
233 1.60 matt #endif
234 1.60 matt };
235 1.13 jonathan
236 1.60 matt extern struct pmap_kernel kernel_pmap_store;
237 1.60 matt extern struct pmap_tlb_info pmap_tlb0_info;
238 1.60 matt #ifdef MULTIPROCESSOR
239 1.60 matt extern struct pmap_tlb_info *pmap_tlbs[MAXCPUS];
240 1.60 matt extern u_int pmap_ntlbs;
241 1.60 matt #endif
242 1.60 matt extern paddr_t mips_avail_start;
243 1.60 matt extern paddr_t mips_avail_end;
244 1.60 matt extern vaddr_t mips_virtual_end;
245 1.7 mycroft
246 1.3 glass #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
247 1.14 mhitch #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
248 1.31 thorpej
249 1.51 macallan #define pmap_phys_address(x) mips_ptob(x)
250 1.39 chs
251 1.12 jonathan /*
252 1.13 jonathan * Bootstrap the system enough to run with virtual memory.
253 1.13 jonathan */
254 1.38 simonb void pmap_bootstrap(void);
255 1.20 mhitch
256 1.60 matt void pmap_remove_all(pmap_t);
257 1.38 simonb void pmap_set_modified(paddr_t);
258 1.38 simonb void pmap_procwr(struct proc *, vaddr_t, size_t);
259 1.25 is #define PMAP_NEED_PROCWR
260 1.13 jonathan
261 1.60 matt #ifdef MULTIPROCESSOR
262 1.60 matt void pmap_tlb_shootdown_process(void);
263 1.60 matt bool pmap_tlb_shootdown_bystanders(pmap_t pmap);
264 1.60 matt void pmap_tlb_info_attach(struct pmap_tlb_info *, struct cpu_info *);
265 1.60 matt void pmap_tlb_syncicache_ast(struct cpu_info *);
266 1.60 matt void pmap_tlb_syncicache_wanted(struct cpu_info *);
267 1.60 matt void pmap_tlb_syncicache(vaddr_t, uint32_t);
268 1.60 matt #endif
269 1.60 matt void pmap_tlb_info_init(struct pmap_tlb_info *);
270 1.60 matt void pmap_tlb_info_evcnt_attach(struct pmap_tlb_info *);
271 1.60 matt void pmap_tlb_asid_acquire(pmap_t pmap, struct lwp *l);
272 1.60 matt void pmap_tlb_asid_deactivate(pmap_t pmap);
273 1.60 matt void pmap_tlb_asid_check(void);
274 1.60 matt void pmap_tlb_asid_release_all(pmap_t pmap);
275 1.60 matt int pmap_tlb_update_addr(pmap_t pmap, vaddr_t, uint32_t, bool);
276 1.60 matt void pmap_tlb_invalidate_addr(pmap_t pmap, vaddr_t);
277 1.60 matt
278 1.13 jonathan /*
279 1.28 soren * pmap_prefer() helps reduce virtual-coherency exceptions in
280 1.13 jonathan * the virtually-indexed cache on mips3 CPUs.
281 1.13 jonathan */
282 1.38 simonb #ifdef MIPS3_PLUS
283 1.60 matt #define PMAP_PREFER(pa, va, sz, td) pmap_prefer((pa), (va), (sz), (td))
284 1.60 matt void pmap_prefer(vaddr_t, vaddr_t *, vsize_t, int);
285 1.38 simonb #endif /* MIPS3_PLUS */
286 1.13 jonathan
287 1.17 thorpej #define PMAP_STEAL_MEMORY /* enable pmap_steal_memory() */
288 1.63 skrll #define PMAP_ENABLE_PMAP_KMPAGE /* enable the PMAP_KMPAGE flag */
289 1.18 thorpej
290 1.18 thorpej /*
291 1.18 thorpej * Alternate mapping hooks for pool pages. Avoids thrashing the TLB.
292 1.18 thorpej */
293 1.47 tsutsui vaddr_t mips_pmap_map_poolpage(paddr_t);
294 1.47 tsutsui paddr_t mips_pmap_unmap_poolpage(vaddr_t);
295 1.60 matt struct vm_page *mips_pmap_alloc_poolpage(int);
296 1.60 matt #define PMAP_ALLOC_POOLPAGE(flags) mips_pmap_alloc_poolpage(flags)
297 1.60 matt #define PMAP_MAP_POOLPAGE(pa) mips_pmap_map_poolpage(pa)
298 1.60 matt #define PMAP_UNMAP_POOLPAGE(va) mips_pmap_unmap_poolpage(va)
299 1.42 thorpej
300 1.42 thorpej /*
301 1.42 thorpej * Other hooks for the pool allocator.
302 1.42 thorpej */
303 1.57 matt #ifdef _LP64
304 1.57 matt #define POOL_VTOPHYS(va) (MIPS_KSEG0_P(va) \
305 1.57 matt ? MIPS_KSEG0_TO_PHYS(va) \
306 1.57 matt : MIPS_XKPHYS_TO_PHYS(va))
307 1.57 matt #else
308 1.43 thorpej #define POOL_VTOPHYS(va) MIPS_KSEG0_TO_PHYS((vaddr_t)(va))
309 1.57 matt #endif
310 1.38 simonb
311 1.38 simonb /*
312 1.38 simonb * Select CCA to use for unmanaged pages.
313 1.38 simonb */
314 1.57 matt #define PMAP_CCA_FOR_PA(pa) CCA_UNCACHED /* uncached */
315 1.13 jonathan
316 1.52 macallan #if defined(_MIPS_PADDR_T_64BIT) || defined(_LP64)
317 1.58 cegger #define PGC_NOCACHE 0x4000000000000000ULL
318 1.61 macallan #define PGC_PREFETCH 0x2000000000000000ULL
319 1.52 macallan #endif
320 1.52 macallan
321 1.59 uebayasi #define __HAVE_VM_PAGE_MD
322 1.59 uebayasi
323 1.59 uebayasi /*
324 1.59 uebayasi * pmap-specific data stored in the vm_page structure.
325 1.59 uebayasi */
326 1.60 matt /*
327 1.60 matt * For each struct vm_page, there is a list of all currently valid virtual
328 1.60 matt * mappings of that page. An entry is a pv_entry_t, the list is pv_table.
329 1.60 matt * XXX really should do this as a part of the higher level code.
330 1.60 matt */
331 1.60 matt typedef struct pv_entry {
332 1.60 matt struct pv_entry *pv_next; /* next pv_entry */
333 1.60 matt struct pmap *pv_pmap; /* pmap where mapping lies */
334 1.60 matt vaddr_t pv_va; /* virtual address for mapping */
335 1.63 skrll #define PV_KENTER 0x001
336 1.60 matt } *pv_entry_t;
337 1.60 matt
338 1.60 matt #define PG_MD_UNCACHED 0x0001 /* page is mapped uncached */
339 1.60 matt #define PG_MD_MODIFIED 0x0002 /* page has been modified */
340 1.60 matt #define PG_MD_REFERENCED 0x0004 /* page has been recently referenced */
341 1.60 matt #define PG_MD_POOLPAGE 0x0008 /* page is used as a poolpage */
342 1.60 matt #define PG_MD_EXECPAGE 0x0010 /* page is exec mapped */
343 1.60 matt
344 1.60 matt #define PG_MD_CACHED_P(md) (((md)->pvh_attrs & PG_MD_UNCACHED) == 0)
345 1.60 matt #define PG_MD_UNCACHED_P(md) (((md)->pvh_attrs & PG_MD_UNCACHED) != 0)
346 1.60 matt #define PG_MD_MODIFIED_P(md) (((md)->pvh_attrs & PG_MD_MODIFIED) != 0)
347 1.60 matt #define PG_MD_REFERENCED_P(md) (((md)->pvh_attrs & PG_MD_REFERENCED) != 0)
348 1.60 matt #define PG_MD_POOLPAGE_P(md) (((md)->pvh_attrs & PG_MD_POOLPAGE) != 0)
349 1.60 matt #define PG_MD_EXECPAGE_P(md) (((md)->pvh_attrs & PG_MD_EXECPAGE) != 0)
350 1.60 matt
351 1.59 uebayasi struct vm_page_md {
352 1.60 matt struct pv_entry pvh_first; /* pv_entry first */
353 1.60 matt #ifdef MULTIPROCESSOR
354 1.60 matt volatile u_int pvh_attrs; /* page attributes */
355 1.60 matt kmutex_t *pvh_lock; /* pv list lock */
356 1.60 matt #define PG_MD_PVLIST_LOCK_INIT(md) ((md)->pvh_lock = NULL)
357 1.60 matt #define PG_MD_PVLIST_LOCKED_P(md) (mutex_owner((md)->pvh_lock) != 0)
358 1.60 matt #define PG_MD_PVLIST_LOCK(md, lc) pmap_pvlist_lock((md), (lc))
359 1.60 matt #define PG_MD_PVLIST_UNLOCK(md) mutex_spin_exit((md)->pvh_lock)
360 1.60 matt #define PG_MD_PVLIST_GEN(md) ((uint16_t)((md)->pvh_attrs >> 16))
361 1.60 matt #else
362 1.59 uebayasi u_int pvh_attrs; /* page attributes */
363 1.60 matt #define PG_MD_PVLIST_LOCK_INIT(md) do { } while (/*CONSTCOND*/ 0)
364 1.60 matt #define PG_MD_PVLIST_LOCKED_P(md) true
365 1.60 matt #define PG_MD_PVLIST_LOCK(md, lc) (mutex_spin_enter(&pmap_pvlist_mutex), 0)
366 1.60 matt #define PG_MD_PVLIST_UNLOCK(md) mutex_spin_exit(&pmap_pvlist_mutex)
367 1.60 matt #define PG_MD_PVLIST_GEN(md) (0)
368 1.60 matt #endif
369 1.59 uebayasi };
370 1.59 uebayasi
371 1.60 matt #define VM_MDPAGE_INIT(pg) \
372 1.60 matt do { \
373 1.60 matt struct vm_page_md * const md = VM_PAGE_TO_MD(pg); \
374 1.60 matt (md)->pvh_first.pv_next = NULL; \
375 1.60 matt (md)->pvh_first.pv_pmap = NULL; \
376 1.60 matt (md)->pvh_first.pv_va = VM_PAGE_TO_PHYS(pg); \
377 1.60 matt PG_MD_PVLIST_LOCK_INIT(md); \
378 1.60 matt (md)->pvh_attrs = 0; \
379 1.59 uebayasi } while (/* CONSTCOND */ 0)
380 1.59 uebayasi
381 1.60 matt uint16_t pmap_pvlist_lock(struct vm_page_md *, bool);
382 1.60 matt
383 1.6 jtc #endif /* _KERNEL */
384 1.41 simonb #endif /* _MIPS_PMAP_H_ */
385