pte.h revision 1.2 1 /* $NetBSD: pte.h,v 1.2 2011/01/18 01:02:54 matt Exp $ */
2 /*-
3 * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects
8 * Agency and which was developed by Matt Thomas of 3am Software Foundry.
9 *
10 * This material is based upon work supported by the Defense Advanced Research
11 * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under
12 * Contract No. N66001-09-C-2073.
13 * Approved for Public Release, Distribution Unlimited
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #ifndef _POWERPC_BOOKE_PTE_H_
38 #define _POWERPC_BOOKE_PTE_H_
39
40 #ifndef _LOCORE
41 typedef __uint32_t pt_entry_t;
42 #endif
43
44 #include <powerpc/booke/spr.h>
45
46 /*
47 * The PTE format is software and must be translated into the various portions
48 * X W R are separted by single bits so that they can map to the MAS2 bits
49 * UX/UW/UR or SX/SW/SR by a mask and a shift.
50 */
51 #define PTE_IO (PTE_I|PTE_G|PTE_xW|PTE_xR)
52 #define PTE_DEFAULT (PTE_M|PTE_xX|PTE_xW|PTE_xR)
53 #define PTE_MAS3_MASK (MAS3_RPN|MAS3_U2|MAS3_U0)
54 #define PTE_MAS2_MASK (MAS2_WIMGE)
55 #define PTE_RPN_MASK MAS3_RPN /* MAS3[RPN] */
56 #define PTE_RWX_MASK (PTE_xX|PTE_xW|PTE_xR)
57 #define PTE_WIRED (MAS3_U0 << 2) /* page is wired (PTE only) */
58 #define PTE_xX (MAS3_U0 << 1) /* MAS2[UX] | MAS2[SX] */
59 #define PTE_UNSYNCED MAS3_U0 /* page needs isync */
60 #define PTE_xW MAS3_U1 /* MAS2[UW] | MAS2[SW] */
61 #define PTE_UNMODIFIED MAS3_U2 /* page is unmodified */
62 #define PTE_xR MAS3_U3 /* MAS2[UR] | MAS2[SR] */
63 #define PTE_RWX_SHIFT 6
64 #define PTE_UNUSED 0x00000020
65 #define PTE_WIMGE_MASK MAS2_WIMGE
66 #define PTE_WIG (PTE_W|PTE_I|PTE_G)
67 #define PTE_W MAS2_W /* Write-through */
68 #define PTE_I MAS2_I /* cache-Inhibited */
69 #define PTE_M MAS2_M /* Memory coherence */
70 #define PTE_G MAS2_G /* Guarded */
71 #define PTE_E MAS2_E /* [Little] Endian */
72
73 #ifndef _LOCORE
74 #ifdef _KERNEL
75
76 static inline bool
77 pte_cached_p(pt_entry_t pt_entry)
78 {
79 return (pt_entry & PTE_I) == 0;
80 }
81
82 static inline bool
83 pte_modified_p(pt_entry_t pt_entry)
84 {
85 return (pt_entry & (PTE_UNMODIFIED|PTE_xW)) == PTE_xW;
86 }
87
88 static inline bool
89 pte_valid_p(pt_entry_t pt_entry)
90 {
91 return pt_entry != 0;
92 }
93
94 static inline bool
95 pte_exec_p(pt_entry_t pt_entry)
96 {
97 return (pt_entry & PTE_xX) != 0;
98 }
99
100 static inline bool
101 pte_deferred_exec_p(pt_entry_t pt_entry)
102 {
103 //return (pt_entry & (PTE_xX|PTE_UNSYNCED)) == (PTE_xX|PTE_UNSYNCED);
104 return (pt_entry & PTE_UNSYNCED) == PTE_UNSYNCED;
105 }
106
107 static inline bool
108 pte_wired_p(pt_entry_t pt_entry)
109 {
110 return (pt_entry & PTE_WIRED) != 0;
111 }
112
113 static inline pt_entry_t
114 pte_nv_entry(bool kernel)
115 {
116 return 0;
117 }
118
119 static inline paddr_t
120 pte_to_paddr(pt_entry_t pt_entry)
121 {
122 return (paddr_t)(pt_entry & PTE_RPN_MASK);
123 }
124
125 static inline pt_entry_t
126 pte_iouncached_bits(void)
127 {
128 return PTE_W|PTE_I|PTE_G;
129 }
130
131 static inline pt_entry_t
132 pte_ionocached_bits(void)
133 {
134 return PTE_WIG;
135 }
136
137 static inline pt_entry_t
138 pte_iocached_bits(void)
139 {
140 return PTE_G;
141 }
142
143 static inline pt_entry_t
144 pte_nocached_bits(void)
145 {
146 return PTE_M|PTE_I;
147 }
148
149 static inline pt_entry_t
150 pte_cached_bits(void)
151 {
152 return PTE_M;
153 }
154
155 static inline pt_entry_t
156 pte_cached_change(pt_entry_t pt_entry, bool cached)
157 {
158 return (pt_entry & ~PTE_I) | (cached ? 0 : PTE_I);
159 }
160
161 static inline pt_entry_t
162 pte_wired_entry(void)
163 {
164 return PTE_WIRED;
165 }
166
167 static inline pt_entry_t
168 pte_prot_nowrite(pt_entry_t pt_entry)
169 {
170 return pt_entry & ~(PTE_xW|PTE_UNMODIFIED);
171 }
172
173 static inline pt_entry_t
174 pte_prot_downgrade(pt_entry_t pt_entry, vm_prot_t newprot)
175 {
176 pt_entry &= ~(PTE_xW|PTE_UNMODIFIED);
177 if ((newprot & VM_PROT_EXECUTE) == 0)
178 pt_entry &= ~(PTE_xX|PTE_UNSYNCED);
179 return pt_entry;
180 }
181
182 static inline pt_entry_t
183 pte_prot_bits(struct vm_page *pg, vm_prot_t prot)
184 {
185 KASSERT(prot & VM_PROT_READ);
186 pt_entry_t pt_entry = PTE_xR;
187 if (prot & VM_PROT_EXECUTE) {
188 #if 0
189 pt_entry |= PTE_xX;
190 if (pg != NULL && !VM_PAGE_MD_EXECPAGE_P(pg))
191 pt_entry |= PTE_UNSYNCED;
192 #elif 1
193 if (pg != NULL && !VM_PAGE_MD_EXECPAGE_P(pg))
194 pt_entry |= PTE_UNSYNCED;
195 else
196 pt_entry |= PTE_xX;
197 #else
198 pt_entry |= PTE_UNSYNCED;
199 #endif
200 }
201 if (prot & VM_PROT_WRITE) {
202 pt_entry |= PTE_xW;
203 if (pg != NULL && !VM_PAGE_MD_MODIFIED_P(pg))
204 pt_entry |= PTE_UNMODIFIED;
205 }
206 return pt_entry;
207 }
208
209 static inline pt_entry_t
210 pte_flag_bits(struct vm_page *pg, int flags)
211 {
212 if (__predict_false(flags & PMAP_MD_NOCACHE)) {
213 if (__predict_true(pg != NULL)) {
214 return pte_nocached_bits();
215 } else {
216 return pte_ionocached_bits();
217 }
218 } else {
219 if (__predict_false(pg != NULL)) {
220 return pte_cached_bits();
221 } else {
222 return pte_iocached_bits();
223 }
224 }
225 }
226
227 static inline pt_entry_t
228 pte_make_enter(paddr_t pa, struct vm_page *pg, vm_prot_t prot,
229 int flags, bool kernel)
230 {
231 pt_entry_t pt_entry = (pt_entry_t) pa & PTE_RPN_MASK;
232
233 pt_entry |= pte_flag_bits(pg, flags);
234 pt_entry |= pte_prot_bits(pg, prot);
235
236 return pt_entry;
237 }
238
239 static inline pt_entry_t
240 pte_make_kenter_pa(paddr_t pa, struct vm_page *pg, vm_prot_t prot,
241 int flags)
242 {
243 pt_entry_t pt_entry = (pt_entry_t) pa & PTE_RPN_MASK;
244
245 pt_entry |= pte_flag_bits(pg, flags);
246 pt_entry |= pte_prot_bits(NULL, prot); /* pretend unmanaged */
247
248 return pt_entry;
249 }
250 #endif /* _KERNEL */
251 #endif /* !_LOCORE */
252
253 #endif /* !_POWERPC_BOOKE_PTE_H_ */
254