pte.h revision 1.9 1 /* $NetBSD: pte.h,v 1.9 2017/06/24 07:19:59 skrll Exp $ */
2 /*-
3 * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects
8 * Agency and which was developed by Matt Thomas of 3am Software Foundry.
9 *
10 * This material is based upon work supported by the Defense Advanced Research
11 * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under
12 * Contract No. N66001-09-C-2073.
13 * Approved for Public Release, Distribution Unlimited
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #ifndef _POWERPC_BOOKE_PTE_H_
38 #define _POWERPC_BOOKE_PTE_H_
39
40 #ifndef _LOCORE
41 #ifndef __BSD_PT_ENTRY_T
42 #define __BSD_PT_ENTRY_T __uint32_t
43 typedef __BSD_PT_ENTRY_T pt_entry_t;
44 #define PRIxPTE PRIx32
45 #endif
46 #endif
47
48 #include <powerpc/booke/spr.h>
49
50 /*
51 * The PTE format is software and must be translated into the various portions
52 * X W R are separted by single bits so that they can map to the MAS2 bits
53 * UX/UW/UR or SX/SW/SR by a mask and a shift.
54 */
55 #define PTE_IO (PTE_I|PTE_G|PTE_xW|PTE_xR)
56 #define PTE_DEFAULT (PTE_M|PTE_xX|PTE_xW|PTE_xR)
57 #define PTE_MAS3_MASK (MAS3_RPN|MAS3_U2|MAS3_U0)
58 #define PTE_MAS2_MASK (MAS2_WIMGE)
59 #define PTE_RPN_MASK MAS3_RPN /* MAS3[RPN] */
60 #define PTE_RWX_MASK (PTE_xX|PTE_xW|PTE_xR)
61 #define PTE_WIRED (MAS3_U0 << 2) /* page is wired (PTE only) */
62 #define PTE_xX (MAS3_U0 << 1) /* MAS2[UX] | MAS2[SX] */
63 #define PTE_UNSYNCED MAS3_U0 /* page needs isync */
64 #define PTE_xW MAS3_U1 /* MAS2[UW] | MAS2[SW] */
65 #define PTE_UNMODIFIED MAS3_U2 /* page is unmodified */
66 #define PTE_xR MAS3_U3 /* MAS2[UR] | MAS2[SR] */
67 #define PTE_RWX_SHIFT 6
68 #define PTE_UNUSED 0x00000020
69 #define PTE_WIMGE_MASK MAS2_WIMGE
70 #define PTE_WIG (PTE_W|PTE_I|PTE_G)
71 #define PTE_W MAS2_W /* Write-through */
72 #define PTE_I MAS2_I /* cache-Inhibited */
73 #define PTE_M MAS2_M /* Memory coherence */
74 #define PTE_G MAS2_G /* Guarded */
75 #define PTE_E MAS2_E /* [Little] Endian */
76
77 #ifndef _LOCORE
78 #ifdef _KERNEL
79
80 static inline uint32_t
81 pte_value(pt_entry_t pt_entry)
82 {
83 return pt_entry;
84 }
85
86 static inline bool
87 pte_cached_p(pt_entry_t pt_entry)
88 {
89 return (pt_entry & PTE_I) == 0;
90 }
91
92 static inline bool
93 pte_modified_p(pt_entry_t pt_entry)
94 {
95 return (pt_entry & (PTE_UNMODIFIED|PTE_xW)) == PTE_xW;
96 }
97
98 static inline bool
99 pte_valid_p(pt_entry_t pt_entry)
100 {
101 return pt_entry != 0;
102 }
103
104 static inline bool
105 pte_zero_p(pt_entry_t pt_entry)
106 {
107 return pt_entry == 0;
108 }
109
110 static inline bool
111 pte_exec_p(pt_entry_t pt_entry)
112 {
113 return (pt_entry & PTE_xX) != 0;
114 }
115
116 static inline bool
117 pte_readonly_p(pt_entry_t pt_entry)
118 {
119 return (pt_entry & PTE_xW) == 0;
120 }
121
122 static inline bool
123 pte_deferred_exec_p(pt_entry_t pt_entry)
124 {
125 //return (pt_entry & (PTE_xX|PTE_UNSYNCED)) == (PTE_xX|PTE_UNSYNCED);
126 return (pt_entry & PTE_UNSYNCED) == PTE_UNSYNCED;
127 }
128
129 static inline bool
130 pte_wired_p(pt_entry_t pt_entry)
131 {
132 return (pt_entry & PTE_WIRED) != 0;
133 }
134
135 static inline pt_entry_t
136 pte_nv_entry(bool kernel)
137 {
138 return 0;
139 }
140
141 static inline paddr_t
142 pte_to_paddr(pt_entry_t pt_entry)
143 {
144 return (paddr_t)(pt_entry & PTE_RPN_MASK);
145 }
146
147 static inline pt_entry_t
148 pte_ionocached_bits(void)
149 {
150 return PTE_I|PTE_G;
151 }
152
153 static inline pt_entry_t
154 pte_iocached_bits(void)
155 {
156 return PTE_G;
157 }
158
159 static inline pt_entry_t
160 pte_nocached_bits(void)
161 {
162 return PTE_M|PTE_I;
163 }
164
165 static inline pt_entry_t
166 pte_cached_bits(void)
167 {
168 return PTE_M;
169 }
170
171 static inline pt_entry_t
172 pte_cached_change(pt_entry_t pt_entry, bool cached)
173 {
174 return (pt_entry & ~PTE_I) | (cached ? 0 : PTE_I);
175 }
176
177 static inline pt_entry_t
178 pte_wire_entry(pt_entry_t pt_entry)
179 {
180 return pt_entry | PTE_WIRED;
181 }
182
183 static inline pt_entry_t
184 pte_unwire_entry(pt_entry_t pt_entry)
185 {
186 return pt_entry & ~PTE_WIRED;
187 }
188
189 static inline pt_entry_t
190 pte_prot_nowrite(pt_entry_t pt_entry)
191 {
192 return pt_entry & ~(PTE_xW|PTE_UNMODIFIED);
193 }
194
195 static inline pt_entry_t
196 pte_prot_downgrade(pt_entry_t pt_entry, vm_prot_t newprot)
197 {
198 pt_entry &= ~(PTE_xW|PTE_UNMODIFIED);
199 if ((newprot & VM_PROT_EXECUTE) == 0)
200 pt_entry &= ~(PTE_xX|PTE_UNSYNCED);
201 return pt_entry;
202 }
203
204 static inline pt_entry_t
205 pte_prot_bits(struct vm_page_md *mdpg, vm_prot_t prot)
206 {
207 KASSERT(prot & VM_PROT_READ);
208 pt_entry_t pt_entry = PTE_xR;
209 if (prot & VM_PROT_EXECUTE) {
210 #if 0
211 pt_entry |= PTE_xX;
212 if (mdpg != NULL && !VM_PAGEMD_EXECPAGE_P(mdpg))
213 pt_entry |= PTE_UNSYNCED;
214 #elif 1
215 if (mdpg != NULL && !VM_PAGEMD_EXECPAGE_P(mdpg))
216 pt_entry |= PTE_UNSYNCED;
217 else
218 pt_entry |= PTE_xX;
219 #else
220 pt_entry |= PTE_UNSYNCED;
221 #endif
222 }
223 if (prot & VM_PROT_WRITE) {
224 pt_entry |= PTE_xW;
225 if (mdpg != NULL && !VM_PAGEMD_MODIFIED_P(mdpg))
226 pt_entry |= PTE_UNMODIFIED;
227 }
228 return pt_entry;
229 }
230
231 static inline pt_entry_t
232 pte_flag_bits(struct vm_page_md *mdpg, int flags)
233 {
234 if (__predict_false(flags & PMAP_NOCACHE)) {
235 if (__predict_true(mdpg != NULL)) {
236 return pte_nocached_bits();
237 } else {
238 return pte_ionocached_bits();
239 }
240 } else {
241 if (__predict_false(mdpg != NULL)) {
242 return pte_cached_bits();
243 } else {
244 return pte_iocached_bits();
245 }
246 }
247 }
248
249 static inline pt_entry_t
250 pte_make_enter(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot,
251 int flags, bool kernel)
252 {
253 pt_entry_t pt_entry = (pt_entry_t) pa & PTE_RPN_MASK;
254
255 pt_entry |= pte_flag_bits(mdpg, flags);
256 pt_entry |= pte_prot_bits(mdpg, prot);
257
258 return pt_entry;
259 }
260
261 static inline pt_entry_t
262 pte_make_kenter_pa(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot,
263 int flags)
264 {
265 pt_entry_t pt_entry = (pt_entry_t) pa & PTE_RPN_MASK;
266
267 pt_entry |= PTE_WIRED;
268 pt_entry |= pte_flag_bits(mdpg, flags);
269 pt_entry |= pte_prot_bits(NULL, prot); /* pretend unmanaged */
270
271 return pt_entry;
272 }
273
274 static inline void
275 pte_set(pt_entry_t *ptep, pt_entry_t pte)
276 {
277 *ptep = pte;
278 }
279
280 #endif /* _KERNEL */
281 #endif /* !_LOCORE */
282
283 #endif /* !_POWERPC_BOOKE_PTE_H_ */
284