pte_coldfire.h revision 1.1.4.2 1 1.1.4.2 rmind /* $NetBSD: pte_coldfire.h,v 1.1.4.2 2013/08/28 23:59:18 rmind Exp $ */
2 1.1.4.2 rmind /*-
3 1.1.4.2 rmind * Copyright (c) 2013 The NetBSD Foundation, Inc.
4 1.1.4.2 rmind * All rights reserved.
5 1.1.4.2 rmind *
6 1.1.4.2 rmind * This code is derived from software contributed to The NetBSD Foundation
7 1.1.4.2 rmind * by Matt Thomas of 3am Software Foundry.
8 1.1.4.2 rmind *
9 1.1.4.2 rmind * Redistribution and use in source and binary forms, with or without
10 1.1.4.2 rmind * modification, are permitted provided that the following conditions
11 1.1.4.2 rmind * are met:
12 1.1.4.2 rmind * 1. Redistributions of source code must retain the above copyright
13 1.1.4.2 rmind * notice, this list of conditions and the following disclaimer.
14 1.1.4.2 rmind * 2. Redistributions in binary form must reproduce the above copyright
15 1.1.4.2 rmind * notice, this list of conditions and the following disclaimer in the
16 1.1.4.2 rmind * documentation and/or other materials provided with the distribution.
17 1.1.4.2 rmind *
18 1.1.4.2 rmind * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 1.1.4.2 rmind * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 1.1.4.2 rmind * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 1.1.4.2 rmind * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 1.1.4.2 rmind * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 1.1.4.2 rmind * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 1.1.4.2 rmind * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 1.1.4.2 rmind * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 1.1.4.2 rmind * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 1.1.4.2 rmind * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 1.1.4.2 rmind * POSSIBILITY OF SUCH DAMAGE.
29 1.1.4.2 rmind */
30 1.1.4.2 rmind
31 1.1.4.2 rmind #ifndef _M68K_PTE_COLDFIRE_H_
32 1.1.4.2 rmind #define _M68K_PTE_COLDFIRE_H_
33 1.1.4.2 rmind
34 1.1.4.2 rmind #ifdef __ASSEMBLY__
35 1.1.4.2 rmind #error use assym.h instead
36 1.1.4.2 rmind #endif
37 1.1.4.2 rmind
38 1.1.4.2 rmind #ifndef __BSD_PT_ENTRY_T
39 1.1.4.2 rmind #define __BSD_PT_ENTRY_T __uint32_t
40 1.1.4.2 rmind typedef __BSD_PT_ENTRY_T pt_entry_t;
41 1.1.4.2 rmind #endif
42 1.1.4.2 rmind
43 1.1.4.2 rmind #define MMUTR_VA __BITS(31,10) // Virtual Address
44 1.1.4.2 rmind #define MMUTR_ID __BITS(9,2) // ASID
45 1.1.4.2 rmind #define MMUTR_SG __BIT(1) // Shared Global
46 1.1.4.2 rmind #define MMUTR_V __BIT(0) // Valid
47 1.1.4.2 rmind
48 1.1.4.2 rmind #define MMUDR_PA __BITS(31,10) // Physical Address
49 1.1.4.2 rmind #define MMUDR_SZ __BITS(9,8) // Entry Size
50 1.1.4.2 rmind #define MMUDR_SZ_1MB 0
51 1.1.4.2 rmind #define MMUDR_SZ_4KB 1
52 1.1.4.2 rmind #define MMUDR_SZ_8KB 2
53 1.1.4.2 rmind #define MMUDR_SZ_16MB 3
54 1.1.4.2 rmind #define MMUDR_CM __BITS(7,6) // Cache Mode
55 1.1.4.2 rmind #define MMUDR_CM_WT 0 // Write-Through
56 1.1.4.2 rmind #define MMUDR_CM_WB 1 // Write-Back (Copy-Back)
57 1.1.4.2 rmind #define MMUDR_CM_NC 2 // Non-cacheable
58 1.1.4.2 rmind #define MMUDR_CM_NCP 2 // Non-cacheable Precise
59 1.1.4.2 rmind #define MMUDR_CM_NCI 3 // Non-cacheable Imprecise
60 1.1.4.2 rmind #define MMUDR_SP __BIT(5) // Supervisor Protect
61 1.1.4.2 rmind #define MMUDR_R __BIT(4) // Read Access
62 1.1.4.2 rmind #define MMUDR_W __BIT(3) // Write Access
63 1.1.4.2 rmind #define MMUDR_X __BIT(2) // Execute Access
64 1.1.4.2 rmind #define MMUDR_LK __BIT(1) // Lock Entry
65 1.1.4.2 rmind #define MMUDR_MBZ0 __BIT(0) // Must be zero
66 1.1.4.2 rmind
67 1.1.4.2 rmind /*
68 1.1.4.2 rmind * The PTE basically the contents of MMUDR[31:2] | MMUAR[0].
69 1.1.4.2 rmind * We overload the meaning of MMUDR_LK for indicating wired.
70 1.1.4.2 rmind * It will be cleared before writing to the TLB.
71 1.1.4.2 rmind */
72 1.1.4.2 rmind
73 1.1.4.2 rmind #ifdef _KERNEL
74 1.1.4.2 rmind
75 1.1.4.2 rmind static inline bool
76 1.1.4.2 rmind pte_cached_p(pt_entry_t pt_entry)
77 1.1.4.2 rmind {
78 1.1.4.2 rmind return (pt_entry & MMUDR_CM_NC) != MMUDR_CM_NC;
79 1.1.4.2 rmind }
80 1.1.4.2 rmind
81 1.1.4.2 rmind static inline bool
82 1.1.4.2 rmind pte_modified_p(pt_entry_t pt_entry)
83 1.1.4.2 rmind {
84 1.1.4.2 rmind return (pt_entry & MMUDR_W) == MMUDR_W;
85 1.1.4.2 rmind }
86 1.1.4.2 rmind
87 1.1.4.2 rmind static inline bool
88 1.1.4.2 rmind pte_valid_p(pt_entry_t pt_entry)
89 1.1.4.2 rmind {
90 1.1.4.2 rmind return (pt_entry & MMUAR_V) == MMUAR_V;
91 1.1.4.2 rmind }
92 1.1.4.2 rmind
93 1.1.4.2 rmind static inline bool
94 1.1.4.2 rmind pte_exec_p(pt_entry_t pt_entry)
95 1.1.4.2 rmind {
96 1.1.4.2 rmind return (pt_entry & MMUDR_X) == MMUDR_X;
97 1.1.4.2 rmind }
98 1.1.4.2 rmind
99 1.1.4.2 rmind static inline bool
100 1.1.4.2 rmind pte_deferred_exec_p(pt_entry_t pt_entry)
101 1.1.4.2 rmind {
102 1.1.4.2 rmind return !pte_exec_p(pt_entry);
103 1.1.4.2 rmind }
104 1.1.4.2 rmind
105 1.1.4.2 rmind static inline bool
106 1.1.4.2 rmind pte_wired_p(pt_entry_t pt_entry)
107 1.1.4.2 rmind {
108 1.1.4.2 rmind return (pt_entry & MMUDR_LK) == MMUDR_LK;
109 1.1.4.2 rmind }
110 1.1.4.2 rmind
111 1.1.4.2 rmind static inline pt_entry_t
112 1.1.4.2 rmind pte_nv_entry(bool kernel)
113 1.1.4.2 rmind {
114 1.1.4.2 rmind return 0;
115 1.1.4.2 rmind }
116 1.1.4.2 rmind
117 1.1.4.2 rmind static inline paddr_t
118 1.1.4.2 rmind pte_to_paddr(pt_entry_t pt_entry)
119 1.1.4.2 rmind {
120 1.1.4.2 rmind return (paddr_t)(pt_entry & MMUDR_PA);
121 1.1.4.2 rmind }
122 1.1.4.2 rmind
123 1.1.4.2 rmind static inline pt_entry_t
124 1.1.4.2 rmind pte_ionocached_bits(void)
125 1.1.4.2 rmind {
126 1.1.4.2 rmind return MMUDR_CM_NCP;
127 1.1.4.2 rmind }
128 1.1.4.2 rmind
129 1.1.4.2 rmind static inline pt_entry_t
130 1.1.4.2 rmind pte_iocached_bits(void)
131 1.1.4.2 rmind {
132 1.1.4.2 rmind return MMUDR_CM_NCP;
133 1.1.4.2 rmind }
134 1.1.4.2 rmind
135 1.1.4.2 rmind static inline pt_entry_t
136 1.1.4.2 rmind pte_nocached_bits(void)
137 1.1.4.2 rmind {
138 1.1.4.2 rmind return MMUDR_CM_NCP;
139 1.1.4.2 rmind }
140 1.1.4.2 rmind
141 1.1.4.2 rmind static inline pt_entry_t
142 1.1.4.2 rmind pte_cached_bits(void)
143 1.1.4.2 rmind {
144 1.1.4.2 rmind return MMUDR_CM_WB;
145 1.1.4.2 rmind }
146 1.1.4.2 rmind
147 1.1.4.2 rmind static inline pt_entry_t
148 1.1.4.2 rmind pte_cached_change(pt_entry_t pt_entry, bool cached)
149 1.1.4.2 rmind {
150 1.1.4.2 rmind return (pt_entry & ~MMUDR_CM) | (cached ? MMUDR_CM_WB : MMUDR_CM_NCP);
151 1.1.4.2 rmind }
152 1.1.4.2 rmind
153 1.1.4.2 rmind static inline pt_entry_t
154 1.1.4.2 rmind pte_wire_entry(pt_entry_t pt_entry)
155 1.1.4.2 rmind {
156 1.1.4.2 rmind return pt_entry | MMUDR_LK;
157 1.1.4.2 rmind }
158 1.1.4.2 rmind
159 1.1.4.2 rmind static inline pt_entry_t
160 1.1.4.2 rmind pte_unwire_entry(pt_entry_t pt_entry)
161 1.1.4.2 rmind {
162 1.1.4.2 rmind return pt_entry & ~MMUDR_LK;
163 1.1.4.2 rmind }
164 1.1.4.2 rmind
165 1.1.4.2 rmind static inline pt_entry_t
166 1.1.4.2 rmind pte_prot_nowrite(pt_entry_t pt_entry)
167 1.1.4.2 rmind {
168 1.1.4.2 rmind return pt_entry & ~MMUDR_W;
169 1.1.4.2 rmind }
170 1.1.4.2 rmind
171 1.1.4.2 rmind static inline pt_entry_t
172 1.1.4.2 rmind pte_prot_downgrade(pt_entry_t pt_entry, vm_prot_t newprot)
173 1.1.4.2 rmind {
174 1.1.4.2 rmind pt_entry &= ~MMUDR_W;
175 1.1.4.2 rmind if ((newprot & VM_PROT_EXECUTE) == 0)
176 1.1.4.2 rmind pt_entry &= ~MMUDR_X;
177 1.1.4.2 rmind return pt_entry;
178 1.1.4.2 rmind }
179 1.1.4.2 rmind
180 1.1.4.2 rmind static inline pt_entry_t
181 1.1.4.2 rmind pte_prot_bits(struct vm_page_md *mdpg, vm_prot_t prot)
182 1.1.4.2 rmind {
183 1.1.4.2 rmind KASSERT(prot & VM_PROT_READ);
184 1.1.4.2 rmind pt_entry_t pt_entry = MMUDR_R;
185 1.1.4.2 rmind if (prot & VM_PROT_EXECUTE) {
186 1.1.4.2 rmind /* Only allow exec for managed pages */
187 1.1.4.2 rmind if (mdpg != NULL && VM_PAGEMD_EXECPAGE_P(mdpg))
188 1.1.4.2 rmind pt_entry |= MMUDR_X;
189 1.1.4.2 rmind }
190 1.1.4.2 rmind if (prot & VM_PROT_WRITE) {
191 1.1.4.2 rmind if (mdpg == NULL || VM_PAGEMD_MODIFIED_P(mdpg))
192 1.1.4.2 rmind pt_entry |= MMUDR_W;
193 1.1.4.2 rmind }
194 1.1.4.2 rmind return pt_entry;
195 1.1.4.2 rmind }
196 1.1.4.2 rmind
197 1.1.4.2 rmind static inline pt_entry_t
198 1.1.4.2 rmind pte_flag_bits(struct vm_page_md *mdpg, int flags)
199 1.1.4.2 rmind {
200 1.1.4.2 rmind if (__predict_false(flags & PMAP_NOCACHE)) {
201 1.1.4.2 rmind if (__predict_true(mdpg != NULL)) {
202 1.1.4.2 rmind return pte_nocached_bits();
203 1.1.4.2 rmind } else {
204 1.1.4.2 rmind return pte_ionocached_bits();
205 1.1.4.2 rmind }
206 1.1.4.2 rmind } else {
207 1.1.4.2 rmind if (__predict_false(mdpg != NULL)) {
208 1.1.4.2 rmind return pte_cached_bits();
209 1.1.4.2 rmind } else {
210 1.1.4.2 rmind return pte_iocached_bits();
211 1.1.4.2 rmind }
212 1.1.4.2 rmind }
213 1.1.4.2 rmind }
214 1.1.4.2 rmind
215 1.1.4.2 rmind static inline pt_entry_t
216 1.1.4.2 rmind pte_make_enter(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot,
217 1.1.4.2 rmind int flags, bool kernel)
218 1.1.4.2 rmind {
219 1.1.4.2 rmind pt_entry_t pt_entry = (pt_entry_t) pa & MMUDR_PA;
220 1.1.4.2 rmind
221 1.1.4.2 rmind pt_entry |= pte_flag_bits(mdpg, flags);
222 1.1.4.2 rmind pt_entry |= pte_prot_bits(mdpg, prot);
223 1.1.4.2 rmind
224 1.1.4.2 rmind return pt_entry;
225 1.1.4.2 rmind }
226 1.1.4.2 rmind
227 1.1.4.2 rmind static inline pt_entry_t
228 1.1.4.2 rmind pte_make_kenter_pa(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot,
229 1.1.4.2 rmind int flags)
230 1.1.4.2 rmind {
231 1.1.4.2 rmind pt_entry_t pt_entry = (pt_entry_t) pa & MMUDR_PA;
232 1.1.4.2 rmind
233 1.1.4.2 rmind pt_entry |= MMUDR_LK;
234 1.1.4.2 rmind pt_entry |= pte_flag_bits(mdpg, flags);
235 1.1.4.2 rmind pt_entry |= pte_prot_bits(NULL, prot); /* pretend unmanaged */
236 1.1.4.2 rmind
237 1.1.4.2 rmind return pt_entry;
238 1.1.4.2 rmind }
239 1.1.4.2 rmind #endif /* _KERNEL_ */
240 1.1.4.2 rmind
241 1.1.4.2 rmind #endif /* _M68K_PTE_COLDFIRE_H_ */
242