1 1.11 skrll /* $NetBSD: pte.h,v 1.11 2020/08/22 15:34:51 skrll Exp $ */ 2 1.1 matt /*- 3 1.2 matt * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc. 4 1.1 matt * All rights reserved. 5 1.1 matt * 6 1.1 matt * This code is derived from software contributed to The NetBSD Foundation 7 1.2 matt * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects 8 1.2 matt * Agency and which was developed by Matt Thomas of 3am Software Foundry. 9 1.2 matt * 10 1.2 matt * This material is based upon work supported by the Defense Advanced Research 11 1.2 matt * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under 12 1.2 matt * Contract No. N66001-09-C-2073. 13 1.2 matt * Approved for Public Release, Distribution Unlimited 14 1.1 matt * 15 1.1 matt * Redistribution and use in source and binary forms, with or without 16 1.1 matt * modification, are permitted provided that the following conditions 17 1.1 matt * are met: 18 1.1 matt * 1. Redistributions of source code must retain the above copyright 19 1.1 matt * notice, this list of conditions and the following disclaimer. 20 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright 21 1.1 matt * notice, this list of conditions and the following disclaimer in the 22 1.1 matt * documentation and/or other materials provided with the distribution. 23 1.1 matt * 24 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25 1.1 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 1.1 matt * POSSIBILITY OF SUCH DAMAGE. 35 1.1 matt */ 36 1.1 matt 37 1.1 matt #ifndef _POWERPC_BOOKE_PTE_H_ 38 1.1 matt #define _POWERPC_BOOKE_PTE_H_ 39 1.1 matt 40 1.1 matt #ifndef _LOCORE 41 1.4 matt #ifndef __BSD_PT_ENTRY_T 42 1.4 matt #define __BSD_PT_ENTRY_T __uint32_t 43 1.4 matt typedef __BSD_PT_ENTRY_T pt_entry_t; 44 1.7 matt #define PRIxPTE PRIx32 45 1.4 matt #endif 46 1.1 matt #endif 47 1.1 matt 48 1.2 matt #include <powerpc/booke/spr.h> 49 1.2 matt 50 1.1 matt /* 51 1.1 matt * The PTE format is software and must be translated into the various portions 52 1.1 matt * X W R are separted by single bits so that they can map to the MAS2 bits 53 1.1 matt * UX/UW/UR or SX/SW/SR by a mask and a shift. 54 1.1 matt */ 55 1.2 matt #define PTE_IO (PTE_I|PTE_G|PTE_xW|PTE_xR) 56 1.2 matt #define PTE_DEFAULT (PTE_M|PTE_xX|PTE_xW|PTE_xR) 57 1.2 matt #define PTE_MAS3_MASK (MAS3_RPN|MAS3_U2|MAS3_U0) 58 1.1 matt #define PTE_MAS2_MASK (MAS2_WIMGE) 59 1.1 matt #define PTE_RPN_MASK MAS3_RPN /* MAS3[RPN] */ 60 1.1 matt #define PTE_RWX_MASK (PTE_xX|PTE_xW|PTE_xR) 61 1.1 matt #define PTE_WIRED (MAS3_U0 << 2) /* page is wired (PTE only) */ 62 1.1 matt #define PTE_xX (MAS3_U0 << 1) /* MAS2[UX] | MAS2[SX] */ 63 1.1 matt #define PTE_UNSYNCED MAS3_U0 /* page needs isync */ 64 1.1 matt #define PTE_xW MAS3_U1 /* MAS2[UW] | MAS2[SW] */ 65 1.1 matt #define PTE_UNMODIFIED MAS3_U2 /* page is unmodified */ 66 1.1 matt #define PTE_xR MAS3_U3 /* MAS2[UR] | MAS2[SR] */ 67 1.2 matt #define PTE_RWX_SHIFT 6 68 1.2 matt #define PTE_UNUSED 0x00000020 69 1.1 matt #define PTE_WIMGE_MASK MAS2_WIMGE 70 1.2 matt #define PTE_WIG (PTE_W|PTE_I|PTE_G) 71 1.1 matt #define PTE_W MAS2_W /* Write-through */ 72 1.1 matt #define PTE_I MAS2_I /* cache-Inhibited */ 73 1.1 matt #define PTE_M MAS2_M /* Memory coherence */ 74 1.1 matt #define PTE_G MAS2_G /* Guarded */ 75 1.1 matt #define PTE_E MAS2_E /* [Little] Endian */ 76 1.1 matt 77 1.2 matt #ifndef _LOCORE 78 1.2 matt #ifdef _KERNEL 79 1.2 matt 80 1.10 christos static __inline uint32_t 81 1.7 matt pte_value(pt_entry_t pt_entry) 82 1.7 matt { 83 1.7 matt return pt_entry; 84 1.7 matt } 85 1.7 matt 86 1.10 christos static __inline bool 87 1.2 matt pte_cached_p(pt_entry_t pt_entry) 88 1.2 matt { 89 1.2 matt return (pt_entry & PTE_I) == 0; 90 1.2 matt } 91 1.2 matt 92 1.10 christos static __inline bool 93 1.2 matt pte_modified_p(pt_entry_t pt_entry) 94 1.2 matt { 95 1.2 matt return (pt_entry & (PTE_UNMODIFIED|PTE_xW)) == PTE_xW; 96 1.2 matt } 97 1.2 matt 98 1.10 christos static __inline bool 99 1.2 matt pte_valid_p(pt_entry_t pt_entry) 100 1.2 matt { 101 1.2 matt return pt_entry != 0; 102 1.2 matt } 103 1.2 matt 104 1.10 christos static __inline bool 105 1.2 matt pte_exec_p(pt_entry_t pt_entry) 106 1.2 matt { 107 1.2 matt return (pt_entry & PTE_xX) != 0; 108 1.2 matt } 109 1.2 matt 110 1.10 christos static __inline bool 111 1.8 matt pte_readonly_p(pt_entry_t pt_entry) 112 1.8 matt { 113 1.8 matt return (pt_entry & PTE_xW) == 0; 114 1.8 matt } 115 1.8 matt 116 1.10 christos static __inline bool 117 1.2 matt pte_deferred_exec_p(pt_entry_t pt_entry) 118 1.2 matt { 119 1.2 matt //return (pt_entry & (PTE_xX|PTE_UNSYNCED)) == (PTE_xX|PTE_UNSYNCED); 120 1.2 matt return (pt_entry & PTE_UNSYNCED) == PTE_UNSYNCED; 121 1.2 matt } 122 1.2 matt 123 1.10 christos static __inline bool 124 1.2 matt pte_wired_p(pt_entry_t pt_entry) 125 1.2 matt { 126 1.2 matt return (pt_entry & PTE_WIRED) != 0; 127 1.2 matt } 128 1.2 matt 129 1.10 christos static __inline pt_entry_t 130 1.2 matt pte_nv_entry(bool kernel) 131 1.2 matt { 132 1.2 matt return 0; 133 1.2 matt } 134 1.2 matt 135 1.10 christos static __inline paddr_t 136 1.2 matt pte_to_paddr(pt_entry_t pt_entry) 137 1.2 matt { 138 1.2 matt return (paddr_t)(pt_entry & PTE_RPN_MASK); 139 1.2 matt } 140 1.2 matt 141 1.10 christos static __inline pt_entry_t 142 1.2 matt pte_ionocached_bits(void) 143 1.2 matt { 144 1.6 matt return PTE_I|PTE_G; 145 1.2 matt } 146 1.2 matt 147 1.10 christos static __inline pt_entry_t 148 1.2 matt pte_iocached_bits(void) 149 1.2 matt { 150 1.2 matt return PTE_G; 151 1.2 matt } 152 1.2 matt 153 1.10 christos static __inline pt_entry_t 154 1.2 matt pte_nocached_bits(void) 155 1.2 matt { 156 1.2 matt return PTE_M|PTE_I; 157 1.2 matt } 158 1.2 matt 159 1.10 christos static __inline pt_entry_t 160 1.2 matt pte_cached_bits(void) 161 1.2 matt { 162 1.2 matt return PTE_M; 163 1.2 matt } 164 1.2 matt 165 1.10 christos static __inline pt_entry_t 166 1.2 matt pte_cached_change(pt_entry_t pt_entry, bool cached) 167 1.2 matt { 168 1.2 matt return (pt_entry & ~PTE_I) | (cached ? 0 : PTE_I); 169 1.2 matt } 170 1.2 matt 171 1.10 christos static __inline pt_entry_t 172 1.5 matt pte_wire_entry(pt_entry_t pt_entry) 173 1.2 matt { 174 1.5 matt return pt_entry | PTE_WIRED; 175 1.5 matt } 176 1.5 matt 177 1.10 christos static __inline pt_entry_t 178 1.5 matt pte_unwire_entry(pt_entry_t pt_entry) 179 1.5 matt { 180 1.5 matt return pt_entry & ~PTE_WIRED; 181 1.2 matt } 182 1.2 matt 183 1.10 christos static __inline pt_entry_t 184 1.2 matt pte_prot_nowrite(pt_entry_t pt_entry) 185 1.2 matt { 186 1.2 matt return pt_entry & ~(PTE_xW|PTE_UNMODIFIED); 187 1.2 matt } 188 1.2 matt 189 1.10 christos static __inline pt_entry_t 190 1.2 matt pte_prot_downgrade(pt_entry_t pt_entry, vm_prot_t newprot) 191 1.2 matt { 192 1.2 matt pt_entry &= ~(PTE_xW|PTE_UNMODIFIED); 193 1.2 matt if ((newprot & VM_PROT_EXECUTE) == 0) 194 1.2 matt pt_entry &= ~(PTE_xX|PTE_UNSYNCED); 195 1.2 matt return pt_entry; 196 1.2 matt } 197 1.2 matt 198 1.10 christos static __inline pt_entry_t 199 1.3 matt pte_prot_bits(struct vm_page_md *mdpg, vm_prot_t prot) 200 1.2 matt { 201 1.2 matt KASSERT(prot & VM_PROT_READ); 202 1.2 matt pt_entry_t pt_entry = PTE_xR; 203 1.2 matt if (prot & VM_PROT_EXECUTE) { 204 1.2 matt #if 0 205 1.2 matt pt_entry |= PTE_xX; 206 1.3 matt if (mdpg != NULL && !VM_PAGEMD_EXECPAGE_P(mdpg)) 207 1.2 matt pt_entry |= PTE_UNSYNCED; 208 1.2 matt #elif 1 209 1.3 matt if (mdpg != NULL && !VM_PAGEMD_EXECPAGE_P(mdpg)) 210 1.2 matt pt_entry |= PTE_UNSYNCED; 211 1.2 matt else 212 1.2 matt pt_entry |= PTE_xX; 213 1.2 matt #else 214 1.2 matt pt_entry |= PTE_UNSYNCED; 215 1.2 matt #endif 216 1.2 matt } 217 1.2 matt if (prot & VM_PROT_WRITE) { 218 1.2 matt pt_entry |= PTE_xW; 219 1.3 matt if (mdpg != NULL && !VM_PAGEMD_MODIFIED_P(mdpg)) 220 1.2 matt pt_entry |= PTE_UNMODIFIED; 221 1.2 matt } 222 1.2 matt return pt_entry; 223 1.2 matt } 224 1.2 matt 225 1.10 christos static __inline pt_entry_t 226 1.3 matt pte_flag_bits(struct vm_page_md *mdpg, int flags) 227 1.2 matt { 228 1.6 matt if (__predict_false(flags & PMAP_NOCACHE)) { 229 1.3 matt if (__predict_true(mdpg != NULL)) { 230 1.2 matt return pte_nocached_bits(); 231 1.2 matt } else { 232 1.2 matt return pte_ionocached_bits(); 233 1.2 matt } 234 1.2 matt } else { 235 1.3 matt if (__predict_false(mdpg != NULL)) { 236 1.2 matt return pte_cached_bits(); 237 1.2 matt } else { 238 1.2 matt return pte_iocached_bits(); 239 1.2 matt } 240 1.2 matt } 241 1.2 matt } 242 1.2 matt 243 1.10 christos static __inline pt_entry_t 244 1.3 matt pte_make_enter(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot, 245 1.2 matt int flags, bool kernel) 246 1.2 matt { 247 1.2 matt pt_entry_t pt_entry = (pt_entry_t) pa & PTE_RPN_MASK; 248 1.2 matt 249 1.3 matt pt_entry |= pte_flag_bits(mdpg, flags); 250 1.3 matt pt_entry |= pte_prot_bits(mdpg, prot); 251 1.2 matt 252 1.2 matt return pt_entry; 253 1.2 matt } 254 1.2 matt 255 1.10 christos static __inline pt_entry_t 256 1.3 matt pte_make_kenter_pa(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot, 257 1.2 matt int flags) 258 1.2 matt { 259 1.2 matt pt_entry_t pt_entry = (pt_entry_t) pa & PTE_RPN_MASK; 260 1.2 matt 261 1.5 matt pt_entry |= PTE_WIRED; 262 1.3 matt pt_entry |= pte_flag_bits(mdpg, flags); 263 1.2 matt pt_entry |= pte_prot_bits(NULL, prot); /* pretend unmanaged */ 264 1.2 matt 265 1.2 matt return pt_entry; 266 1.2 matt } 267 1.7 matt 268 1.10 christos static __inline void 269 1.9 skrll pte_set(pt_entry_t *ptep, pt_entry_t pte) 270 1.9 skrll { 271 1.9 skrll *ptep = pte; 272 1.9 skrll } 273 1.9 skrll 274 1.2 matt #endif /* _KERNEL */ 275 1.2 matt #endif /* !_LOCORE */ 276 1.2 matt 277 1.1 matt #endif /* !_POWERPC_BOOKE_PTE_H_ */ 278