1 1.2 riastrad /* $NetBSD: pte_coldfire.h,v 1.2 2014/03/18 18:20:41 riastradh Exp $ */ 2 1.1 matt /*- 3 1.1 matt * Copyright (c) 2013 The NetBSD Foundation, Inc. 4 1.1 matt * All rights reserved. 5 1.1 matt * 6 1.1 matt * This code is derived from software contributed to The NetBSD Foundation 7 1.1 matt * by Matt Thomas of 3am Software Foundry. 8 1.1 matt * 9 1.1 matt * Redistribution and use in source and binary forms, with or without 10 1.1 matt * modification, are permitted provided that the following conditions 11 1.1 matt * are met: 12 1.1 matt * 1. Redistributions of source code must retain the above copyright 13 1.1 matt * notice, this list of conditions and the following disclaimer. 14 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright 15 1.1 matt * notice, this list of conditions and the following disclaimer in the 16 1.1 matt * documentation and/or other materials provided with the distribution. 17 1.1 matt * 18 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 1.1 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 1.1 matt * POSSIBILITY OF SUCH DAMAGE. 29 1.1 matt */ 30 1.1 matt 31 1.1 matt #ifndef _M68K_PTE_COLDFIRE_H_ 32 1.1 matt #define _M68K_PTE_COLDFIRE_H_ 33 1.1 matt 34 1.1 matt #ifdef __ASSEMBLY__ 35 1.1 matt #error use assym.h instead 36 1.1 matt #endif 37 1.1 matt 38 1.1 matt #ifndef __BSD_PT_ENTRY_T 39 1.1 matt #define __BSD_PT_ENTRY_T __uint32_t 40 1.1 matt typedef __BSD_PT_ENTRY_T pt_entry_t; 41 1.1 matt #endif 42 1.1 matt 43 1.1 matt #define MMUTR_VA __BITS(31,10) // Virtual Address 44 1.1 matt #define MMUTR_ID __BITS(9,2) // ASID 45 1.1 matt #define MMUTR_SG __BIT(1) // Shared Global 46 1.1 matt #define MMUTR_V __BIT(0) // Valid 47 1.1 matt 48 1.1 matt #define MMUDR_PA __BITS(31,10) // Physical Address 49 1.1 matt #define MMUDR_SZ __BITS(9,8) // Entry Size 50 1.1 matt #define MMUDR_SZ_1MB 0 51 1.1 matt #define MMUDR_SZ_4KB 1 52 1.1 matt #define MMUDR_SZ_8KB 2 53 1.1 matt #define MMUDR_SZ_16MB 3 54 1.1 matt #define MMUDR_CM __BITS(7,6) // Cache Mode 55 1.1 matt #define MMUDR_CM_WT 0 // Write-Through 56 1.1 matt #define MMUDR_CM_WB 1 // Write-Back (Copy-Back) 57 1.1 matt #define MMUDR_CM_NC 2 // Non-cacheable 58 1.1 matt #define MMUDR_CM_NCP 2 // Non-cacheable Precise 59 1.1 matt #define MMUDR_CM_NCI 3 // Non-cacheable Imprecise 60 1.1 matt #define MMUDR_SP __BIT(5) // Supervisor Protect 61 1.1 matt #define MMUDR_R __BIT(4) // Read Access 62 1.1 matt #define MMUDR_W __BIT(3) // Write Access 63 1.1 matt #define MMUDR_X __BIT(2) // Execute Access 64 1.1 matt #define MMUDR_LK __BIT(1) // Lock Entry 65 1.1 matt #define MMUDR_MBZ0 __BIT(0) // Must be zero 66 1.1 matt 67 1.1 matt /* 68 1.1 matt * The PTE basically the contents of MMUDR[31:2] | MMUAR[0]. 69 1.1 matt * We overload the meaning of MMUDR_LK for indicating wired. 70 1.1 matt * It will be cleared before writing to the TLB. 71 1.1 matt */ 72 1.1 matt 73 1.1 matt #ifdef _KERNEL 74 1.1 matt 75 1.1 matt static inline bool 76 1.1 matt pte_cached_p(pt_entry_t pt_entry) 77 1.1 matt { 78 1.1 matt return (pt_entry & MMUDR_CM_NC) != MMUDR_CM_NC; 79 1.1 matt } 80 1.1 matt 81 1.1 matt static inline bool 82 1.1 matt pte_modified_p(pt_entry_t pt_entry) 83 1.1 matt { 84 1.1 matt return (pt_entry & MMUDR_W) == MMUDR_W; 85 1.1 matt } 86 1.1 matt 87 1.1 matt static inline bool 88 1.1 matt pte_valid_p(pt_entry_t pt_entry) 89 1.1 matt { 90 1.1 matt return (pt_entry & MMUAR_V) == MMUAR_V; 91 1.1 matt } 92 1.1 matt 93 1.1 matt static inline bool 94 1.1 matt pte_exec_p(pt_entry_t pt_entry) 95 1.1 matt { 96 1.1 matt return (pt_entry & MMUDR_X) == MMUDR_X; 97 1.1 matt } 98 1.1 matt 99 1.1 matt static inline bool 100 1.1 matt pte_deferred_exec_p(pt_entry_t pt_entry) 101 1.1 matt { 102 1.1 matt return !pte_exec_p(pt_entry); 103 1.1 matt } 104 1.1 matt 105 1.1 matt static inline bool 106 1.1 matt pte_wired_p(pt_entry_t pt_entry) 107 1.1 matt { 108 1.1 matt return (pt_entry & MMUDR_LK) == MMUDR_LK; 109 1.1 matt } 110 1.1 matt 111 1.1 matt static inline pt_entry_t 112 1.1 matt pte_nv_entry(bool kernel) 113 1.1 matt { 114 1.1 matt return 0; 115 1.1 matt } 116 1.1 matt 117 1.1 matt static inline paddr_t 118 1.1 matt pte_to_paddr(pt_entry_t pt_entry) 119 1.1 matt { 120 1.1 matt return (paddr_t)(pt_entry & MMUDR_PA); 121 1.1 matt } 122 1.1 matt 123 1.1 matt static inline pt_entry_t 124 1.1 matt pte_ionocached_bits(void) 125 1.1 matt { 126 1.1 matt return MMUDR_CM_NCP; 127 1.1 matt } 128 1.1 matt 129 1.1 matt static inline pt_entry_t 130 1.1 matt pte_iocached_bits(void) 131 1.1 matt { 132 1.1 matt return MMUDR_CM_NCP; 133 1.1 matt } 134 1.1 matt 135 1.1 matt static inline pt_entry_t 136 1.1 matt pte_nocached_bits(void) 137 1.1 matt { 138 1.1 matt return MMUDR_CM_NCP; 139 1.1 matt } 140 1.1 matt 141 1.1 matt static inline pt_entry_t 142 1.1 matt pte_cached_bits(void) 143 1.1 matt { 144 1.1 matt return MMUDR_CM_WB; 145 1.1 matt } 146 1.1 matt 147 1.1 matt static inline pt_entry_t 148 1.1 matt pte_cached_change(pt_entry_t pt_entry, bool cached) 149 1.1 matt { 150 1.1 matt return (pt_entry & ~MMUDR_CM) | (cached ? MMUDR_CM_WB : MMUDR_CM_NCP); 151 1.1 matt } 152 1.1 matt 153 1.1 matt static inline pt_entry_t 154 1.1 matt pte_wire_entry(pt_entry_t pt_entry) 155 1.1 matt { 156 1.1 matt return pt_entry | MMUDR_LK; 157 1.1 matt } 158 1.1 matt 159 1.1 matt static inline pt_entry_t 160 1.1 matt pte_unwire_entry(pt_entry_t pt_entry) 161 1.1 matt { 162 1.1 matt return pt_entry & ~MMUDR_LK; 163 1.1 matt } 164 1.1 matt 165 1.1 matt static inline pt_entry_t 166 1.1 matt pte_prot_nowrite(pt_entry_t pt_entry) 167 1.1 matt { 168 1.1 matt return pt_entry & ~MMUDR_W; 169 1.1 matt } 170 1.1 matt 171 1.1 matt static inline pt_entry_t 172 1.1 matt pte_prot_downgrade(pt_entry_t pt_entry, vm_prot_t newprot) 173 1.1 matt { 174 1.1 matt pt_entry &= ~MMUDR_W; 175 1.1 matt if ((newprot & VM_PROT_EXECUTE) == 0) 176 1.1 matt pt_entry &= ~MMUDR_X; 177 1.1 matt return pt_entry; 178 1.1 matt } 179 1.1 matt 180 1.1 matt static inline pt_entry_t 181 1.1 matt pte_prot_bits(struct vm_page_md *mdpg, vm_prot_t prot) 182 1.1 matt { 183 1.1 matt KASSERT(prot & VM_PROT_READ); 184 1.1 matt pt_entry_t pt_entry = MMUDR_R; 185 1.1 matt if (prot & VM_PROT_EXECUTE) { 186 1.1 matt /* Only allow exec for managed pages */ 187 1.1 matt if (mdpg != NULL && VM_PAGEMD_EXECPAGE_P(mdpg)) 188 1.1 matt pt_entry |= MMUDR_X; 189 1.1 matt } 190 1.1 matt if (prot & VM_PROT_WRITE) { 191 1.1 matt if (mdpg == NULL || VM_PAGEMD_MODIFIED_P(mdpg)) 192 1.1 matt pt_entry |= MMUDR_W; 193 1.1 matt } 194 1.1 matt return pt_entry; 195 1.1 matt } 196 1.1 matt 197 1.1 matt static inline pt_entry_t 198 1.1 matt pte_flag_bits(struct vm_page_md *mdpg, int flags) 199 1.1 matt { 200 1.1 matt if (__predict_false(flags & PMAP_NOCACHE)) { 201 1.1 matt if (__predict_true(mdpg != NULL)) { 202 1.1 matt return pte_nocached_bits(); 203 1.1 matt } else { 204 1.1 matt return pte_ionocached_bits(); 205 1.1 matt } 206 1.1 matt } else { 207 1.1 matt if (__predict_false(mdpg != NULL)) { 208 1.1 matt return pte_cached_bits(); 209 1.1 matt } else { 210 1.1 matt return pte_iocached_bits(); 211 1.1 matt } 212 1.1 matt } 213 1.1 matt } 214 1.1 matt 215 1.1 matt static inline pt_entry_t 216 1.1 matt pte_make_enter(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot, 217 1.1 matt int flags, bool kernel) 218 1.1 matt { 219 1.1 matt pt_entry_t pt_entry = (pt_entry_t) pa & MMUDR_PA; 220 1.1 matt 221 1.1 matt pt_entry |= pte_flag_bits(mdpg, flags); 222 1.1 matt pt_entry |= pte_prot_bits(mdpg, prot); 223 1.1 matt 224 1.1 matt return pt_entry; 225 1.1 matt } 226 1.1 matt 227 1.1 matt static inline pt_entry_t 228 1.1 matt pte_make_kenter_pa(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot, 229 1.1 matt int flags) 230 1.1 matt { 231 1.1 matt pt_entry_t pt_entry = (pt_entry_t) pa & MMUDR_PA; 232 1.1 matt 233 1.1 matt pt_entry |= MMUDR_LK; 234 1.1 matt pt_entry |= pte_flag_bits(mdpg, flags); 235 1.1 matt pt_entry |= pte_prot_bits(NULL, prot); /* pretend unmanaged */ 236 1.1 matt 237 1.1 matt return pt_entry; 238 1.1 matt } 239 1.1 matt #endif /* _KERNEL_ */ 240 1.1 matt 241 1.1 matt #endif /* _M68K_PTE_COLDFIRE_H_ */ 242