pmap.h revision 1.37 1 1.37 rin /* $NetBSD: pmap.h,v 1.37 2022/05/07 07:10:46 rin Exp $ */
2 1.1 matt
3 1.1 matt /*-
4 1.1 matt * Copyright (C) 1995, 1996 Wolfgang Solfrank.
5 1.1 matt * Copyright (C) 1995, 1996 TooLs GmbH.
6 1.1 matt * All rights reserved.
7 1.1 matt *
8 1.1 matt * Redistribution and use in source and binary forms, with or without
9 1.1 matt * modification, are permitted provided that the following conditions
10 1.1 matt * are met:
11 1.1 matt * 1. Redistributions of source code must retain the above copyright
12 1.1 matt * notice, this list of conditions and the following disclaimer.
13 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
14 1.1 matt * notice, this list of conditions and the following disclaimer in the
15 1.1 matt * documentation and/or other materials provided with the distribution.
16 1.1 matt * 3. All advertising materials mentioning features or use of this software
17 1.1 matt * must display the following acknowledgement:
18 1.1 matt * This product includes software developed by TooLs GmbH.
19 1.1 matt * 4. The name of TooLs GmbH may not be used to endorse or promote products
20 1.1 matt * derived from this software without specific prior written permission.
21 1.1 matt *
22 1.1 matt * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
23 1.1 matt * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 1.1 matt * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 1.1 matt * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 1.1 matt * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27 1.1 matt * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
28 1.1 matt * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
29 1.1 matt * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
30 1.1 matt * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
31 1.1 matt * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 1.1 matt */
33 1.1 matt
34 1.1 matt #ifndef _POWERPC_OEA_PMAP_H_
35 1.1 matt #define _POWERPC_OEA_PMAP_H_
36 1.1 matt
37 1.24 matt #ifdef _LOCORE
38 1.24 matt #error use assym.h instead
39 1.24 matt #endif
40 1.24 matt
41 1.31 rin #ifdef _MODULE
42 1.24 matt #error this file should not be included by loadable kernel modules
43 1.24 matt #endif
44 1.24 matt
45 1.15 he #ifdef _KERNEL_OPT
46 1.12 garbled #include "opt_ppcarch.h"
47 1.32 rin #include "opt_modular.h"
48 1.15 he #endif
49 1.1 matt #include <powerpc/oea/pte.h>
50 1.1 matt
51 1.37 rin #define __HAVE_PMAP_PV_TRACK
52 1.37 rin #include <uvm/pmap/pmap_pvt.h>
53 1.37 rin
54 1.1 matt /*
55 1.1 matt * Pmap stuff
56 1.1 matt */
57 1.1 matt struct pmap {
58 1.6 matt #ifdef PPC_OEA64
59 1.6 matt struct steg *pm_steg_table; /* segment table pointer */
60 1.6 matt /* XXX need way to track exec pages */
61 1.6 matt #endif
62 1.9 sanjayl
63 1.9 sanjayl #if defined(PPC_OEA) || defined (PPC_OEA64_BRIDGE)
64 1.2 matt register_t pm_sr[16]; /* segments used in this pmap */
65 1.5 chs int pm_exec[16]; /* counts of exec mappings */
66 1.6 matt #endif
67 1.6 matt register_t pm_vsid; /* VSID bits */
68 1.2 matt int pm_refs; /* ref count */
69 1.1 matt struct pmap_statistics pm_stats; /* pmap statistics */
70 1.2 matt unsigned int pm_evictions; /* pvo's not in page table */
71 1.9 sanjayl
72 1.6 matt #ifdef PPC_OEA64
73 1.6 matt unsigned int pm_ste_evictions;
74 1.6 matt #endif
75 1.1 matt };
76 1.1 matt
77 1.12 garbled struct pmap_ops {
78 1.12 garbled int (*pmapop_pte_spill)(struct pmap *, vaddr_t, bool);
79 1.12 garbled void (*pmapop_real_memory)(paddr_t *, psize_t *);
80 1.12 garbled void (*pmapop_init)(void);
81 1.12 garbled void (*pmapop_virtual_space)(vaddr_t *, vaddr_t *);
82 1.12 garbled pmap_t (*pmapop_create)(void);
83 1.12 garbled void (*pmapop_reference)(pmap_t);
84 1.12 garbled void (*pmapop_destroy)(pmap_t);
85 1.12 garbled void (*pmapop_copy)(pmap_t, pmap_t, vaddr_t, vsize_t, vaddr_t);
86 1.12 garbled void (*pmapop_update)(pmap_t);
87 1.16 cegger int (*pmapop_enter)(pmap_t, vaddr_t, paddr_t, vm_prot_t, u_int);
88 1.12 garbled void (*pmapop_remove)(pmap_t, vaddr_t, vaddr_t);
89 1.18 cegger void (*pmapop_kenter_pa)(vaddr_t, paddr_t, vm_prot_t, u_int);
90 1.12 garbled void (*pmapop_kremove)(vaddr_t, vsize_t);
91 1.12 garbled bool (*pmapop_extract)(pmap_t, vaddr_t, paddr_t *);
92 1.12 garbled
93 1.12 garbled void (*pmapop_protect)(pmap_t, vaddr_t, vaddr_t, vm_prot_t);
94 1.12 garbled void (*pmapop_unwire)(pmap_t, vaddr_t);
95 1.12 garbled void (*pmapop_page_protect)(struct vm_page *, vm_prot_t);
96 1.36 riastrad void (*pmapop_pv_protect)(paddr_t, vm_prot_t);
97 1.12 garbled bool (*pmapop_query_bit)(struct vm_page *, int);
98 1.12 garbled bool (*pmapop_clear_bit)(struct vm_page *, int);
99 1.12 garbled
100 1.12 garbled void (*pmapop_activate)(struct lwp *);
101 1.12 garbled void (*pmapop_deactivate)(struct lwp *);
102 1.12 garbled
103 1.12 garbled void (*pmapop_pinit)(pmap_t);
104 1.12 garbled void (*pmapop_procwr)(struct proc *, vaddr_t, size_t);
105 1.12 garbled
106 1.12 garbled void (*pmapop_pte_print)(volatile struct pte *);
107 1.12 garbled void (*pmapop_pteg_check)(void);
108 1.12 garbled void (*pmapop_print_mmuregs)(void);
109 1.12 garbled void (*pmapop_print_pte)(pmap_t, vaddr_t);
110 1.12 garbled void (*pmapop_pteg_dist)(void);
111 1.12 garbled void (*pmapop_pvo_verify)(void);
112 1.12 garbled vaddr_t (*pmapop_steal_memory)(vsize_t, vaddr_t *, vaddr_t *);
113 1.12 garbled void (*pmapop_bootstrap)(paddr_t, paddr_t);
114 1.34 thorpej void (*pmapop_bootstrap1)(paddr_t, paddr_t);
115 1.34 thorpej void (*pmapop_bootstrap2)(void);
116 1.12 garbled };
117 1.12 garbled
118 1.1 matt #ifdef _KERNEL
119 1.12 garbled #include <sys/cdefs.h>
120 1.12 garbled __BEGIN_DECLS
121 1.6 matt #include <sys/param.h>
122 1.4 matt #include <sys/systm.h>
123 1.4 matt
124 1.9 sanjayl #if defined (PPC_OEA) || defined (PPC_OEA64_BRIDGE)
125 1.2 matt extern register_t iosrtable[];
126 1.6 matt #endif
127 1.1 matt extern int pmap_use_altivec;
128 1.1 matt
129 1.1 matt #define pmap_clear_modify(pg) (pmap_clear_bit((pg), PTE_CHG))
130 1.1 matt #define pmap_clear_reference(pg) (pmap_clear_bit((pg), PTE_REF))
131 1.1 matt #define pmap_is_modified(pg) (pmap_query_bit((pg), PTE_CHG))
132 1.1 matt #define pmap_is_referenced(pg) (pmap_query_bit((pg), PTE_REF))
133 1.1 matt
134 1.1 matt #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
135 1.1 matt #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
136 1.1 matt
137 1.3 matt /* ARGSUSED */
138 1.30 ad static __inline bool
139 1.1 matt pmap_remove_all(struct pmap *pmap)
140 1.1 matt {
141 1.1 matt /* Nothing. */
142 1.30 ad return false;
143 1.1 matt }
144 1.1 matt
145 1.13 matt #if (defined(PPC_OEA) + defined(PPC_OEA64) + defined(PPC_OEA64_BRIDGE)) != 1
146 1.21 matt #define PMAP_NEEDS_FIXUP
147 1.6 matt #endif
148 1.1 matt
149 1.27 matt extern volatile struct pteg *pmap_pteg_table;
150 1.27 matt extern unsigned int pmap_pteg_cnt;
151 1.27 matt extern unsigned int pmap_pteg_mask;
152 1.27 matt
153 1.12 garbled void pmap_bootstrap(vaddr_t, vaddr_t);
154 1.33 thorpej void pmap_bootstrap1(vaddr_t, vaddr_t);
155 1.33 thorpej void pmap_bootstrap2(void);
156 1.12 garbled bool pmap_extract(pmap_t, vaddr_t, paddr_t *);
157 1.12 garbled bool pmap_query_bit(struct vm_page *, int);
158 1.12 garbled bool pmap_clear_bit(struct vm_page *, int);
159 1.12 garbled void pmap_real_memory(paddr_t *, psize_t *);
160 1.5 chs void pmap_procwr(struct proc *, vaddr_t, size_t);
161 1.12 garbled int pmap_pte_spill(pmap_t, vaddr_t, bool);
162 1.28 matt int pmap_ste_spill(pmap_t, vaddr_t, bool);
163 1.12 garbled void pmap_pinit(pmap_t);
164 1.1 matt
165 1.35 thorpej #ifdef PPC_OEA601
166 1.35 thorpej bool pmap_extract_ioseg601(vaddr_t, paddr_t *);
167 1.35 thorpej #endif /* PPC_OEA601 */
168 1.35 thorpej #ifdef PPC_OEA
169 1.35 thorpej bool pmap_extract_battable(vaddr_t, paddr_t *);
170 1.35 thorpej #endif /* PPC_OEA */
171 1.35 thorpej
172 1.22 macallan u_int powerpc_mmap_flags(paddr_t);
173 1.22 macallan #define POWERPC_MMAP_FLAG_MASK 0xf
174 1.22 macallan #define POWERPC_MMAP_FLAG_PREFETCHABLE 0x1
175 1.22 macallan #define POWERPC_MMAP_FLAG_CACHEABLE 0x2
176 1.22 macallan
177 1.22 macallan #define pmap_phys_address(ppn) (ppn & ~POWERPC_MMAP_FLAG_MASK)
178 1.22 macallan #define pmap_mmap_flags(ppn) powerpc_mmap_flags(ppn)
179 1.22 macallan
180 1.29 christos static __inline paddr_t vtophys (vaddr_t);
181 1.4 matt
182 1.1 matt /*
183 1.1 matt * Alternate mapping hooks for pool pages. Avoids thrashing the TLB.
184 1.1 matt *
185 1.1 matt * Note: This won't work if we have more memory than can be direct-mapped
186 1.1 matt * VA==PA all at once. But pmap_copy_page() and pmap_zero_page() will have
187 1.1 matt * this problem, too.
188 1.1 matt */
189 1.9 sanjayl #if !defined(PPC_OEA64) && !defined (PPC_OEA64_BRIDGE)
190 1.1 matt #define PMAP_MAP_POOLPAGE(pa) (pa)
191 1.1 matt #define PMAP_UNMAP_POOLPAGE(pa) (pa)
192 1.4 matt #define POOL_VTOPHYS(va) vtophys((vaddr_t) va)
193 1.6 matt #endif
194 1.1 matt
195 1.29 christos static __inline paddr_t
196 1.1 matt vtophys(vaddr_t va)
197 1.1 matt {
198 1.1 matt paddr_t pa;
199 1.1 matt
200 1.1 matt if (pmap_extract(pmap_kernel(), va, &pa))
201 1.1 matt return pa;
202 1.26 jym KASSERTMSG(0, "vtophys: pmap_extract of %#"PRIxVADDR" failed", va);
203 1.4 matt return (paddr_t) -1;
204 1.1 matt }
205 1.1 matt
206 1.21 matt
207 1.21 matt #ifdef PMAP_NEEDS_FIXUP
208 1.12 garbled extern const struct pmap_ops *pmapops;
209 1.12 garbled extern const struct pmap_ops pmap32_ops;
210 1.12 garbled extern const struct pmap_ops pmap64_ops;
211 1.12 garbled extern const struct pmap_ops pmap64bridge_ops;
212 1.12 garbled
213 1.29 christos static __inline void
214 1.12 garbled pmap_setup32(void)
215 1.12 garbled {
216 1.23 matt pmapops = &pmap32_ops;
217 1.12 garbled }
218 1.12 garbled
219 1.29 christos static __inline void
220 1.12 garbled pmap_setup64(void)
221 1.12 garbled {
222 1.23 matt pmapops = &pmap64_ops;
223 1.12 garbled }
224 1.12 garbled
225 1.29 christos static __inline void
226 1.12 garbled pmap_setup64bridge(void)
227 1.12 garbled {
228 1.23 matt pmapops = &pmap64bridge_ops;
229 1.12 garbled }
230 1.21 matt #endif
231 1.12 garbled
232 1.12 garbled bool pmap_pageidlezero (paddr_t);
233 1.12 garbled void pmap_syncicache (paddr_t, psize_t);
234 1.12 garbled #ifdef PPC_OEA64
235 1.12 garbled vaddr_t pmap_setusr (vaddr_t);
236 1.12 garbled vaddr_t pmap_unsetusr (void);
237 1.12 garbled #endif
238 1.12 garbled
239 1.12 garbled #ifdef PPC_OEA64_BRIDGE
240 1.12 garbled int pmap_setup_segment0_map(int use_large_pages, ...);
241 1.12 garbled #endif
242 1.12 garbled
243 1.22 macallan #define PMAP_MD_PREFETCHABLE 0x2000000
244 1.12 garbled #define PMAP_STEAL_MEMORY
245 1.12 garbled #define PMAP_NEED_PROCWR
246 1.12 garbled
247 1.12 garbled void pmap_zero_page(paddr_t);
248 1.12 garbled void pmap_copy_page(paddr_t, paddr_t);
249 1.12 garbled
250 1.19 uebayasi LIST_HEAD(pvo_head, pvo_entry);
251 1.19 uebayasi
252 1.19 uebayasi #define __HAVE_VM_PAGE_MD
253 1.19 uebayasi
254 1.36 riastrad struct pmap_page {
255 1.36 riastrad unsigned int pp_attrs;
256 1.36 riastrad struct pvo_head pp_pvoh;
257 1.36 riastrad #ifdef MODULAR
258 1.36 riastrad uintptr_t pp_dummy[3];
259 1.36 riastrad #endif
260 1.36 riastrad };
261 1.36 riastrad
262 1.19 uebayasi struct vm_page_md {
263 1.36 riastrad struct pmap_page mdpg_pp;
264 1.36 riastrad #define mdpg_attrs mdpg_pp.pp_attrs
265 1.36 riastrad #define mdpg_pvoh mdpg_pp.pp_pvoh
266 1.24 matt #ifdef MODULAR
267 1.36 riastrad #define mdpg_dummy mdpg_pp.pp_dummy
268 1.24 matt #endif
269 1.19 uebayasi };
270 1.19 uebayasi
271 1.19 uebayasi #define VM_MDPAGE_INIT(pg) do { \
272 1.24 matt (pg)->mdpage.mdpg_attrs = 0; \
273 1.19 uebayasi LIST_INIT(&(pg)->mdpage.mdpg_pvoh); \
274 1.19 uebayasi } while (/*CONSTCOND*/0)
275 1.19 uebayasi
276 1.12 garbled __END_DECLS
277 1.1 matt #endif /* _KERNEL */
278 1.1 matt
279 1.1 matt #endif /* _POWERPC_OEA_PMAP_H_ */
280