1 /* $NetBSD: vmpagemd.h,v 1.17 2020/12/20 16:38:26 skrll Exp $ */ 2 3 /*- 4 * Copyright (c) 2011 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects 9 * Agency and which was developed by Matt Thomas of 3am Software Foundry. 10 * 11 * This material is based upon work supported by the Defense Advanced Research 12 * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under 13 * Contract No. N66001-09-C-2073. 14 * Approved for Public Release, Distribution Unlimited 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 #ifndef _UVM_PMAP_VMPAGEMD_H_ 39 #define _UVM_PMAP_VMPAGEMD_H_ 40 41 #ifdef _LOCORE 42 #error use assym.h instead 43 #endif 44 45 //#ifdef _MODULE 46 //#error this file should not be included by loadable kernel modules 47 //#endif 48 49 #ifdef _KERNEL_OPT 50 #include "opt_modular.h" 51 #include "opt_multiprocessor.h" 52 #endif 53 54 #include <sys/atomic.h> 55 #include <sys/mutex.h> 56 57 #define __HAVE_VM_PAGE_MD 58 59 typedef struct pv_entry { 60 struct pv_entry *pv_next; 61 struct pmap *pv_pmap; 62 vaddr_t pv_va; 63 #define PV_KENTER __BIT(0) 64 } *pv_entry_t; 65 66 #define PV_ISKENTER_P(pv) (((pv->pv_va) & PV_KENTER) != 0) 67 68 #ifndef _MODULE 69 70 #define VM_PAGEMD_VMPAGE __BIT(0) /* page is vm managed */ 71 #define VM_PAGEMD_REFERENCED __BIT(1) /* page has been referenced */ 72 #define VM_PAGEMD_MODIFIED __BIT(2) /* page has been modified */ 73 #define VM_PAGEMD_POOLPAGE __BIT(3) /* page is used as a poolpage */ 74 #define VM_PAGEMD_EXECPAGE __BIT(4) /* page is exec mapped */ 75 #ifdef PMAP_VIRTUAL_CACHE_ALIASES 76 #define VM_PAGEMD_UNCACHED __BIT(5) /* page is mapped uncached */ 77 #endif 78 79 #define VM_PAGEMD_VMPAGE_P(mdpg) (((mdpg)->mdpg_attrs & VM_PAGEMD_VMPAGE) != 0) 80 #define VM_PAGEMD_REFERENCED_P(mdpg) (((mdpg)->mdpg_attrs & VM_PAGEMD_REFERENCED) != 0) 81 #define VM_PAGEMD_MODIFIED_P(mdpg) (((mdpg)->mdpg_attrs & VM_PAGEMD_MODIFIED) != 0) 82 #define VM_PAGEMD_POOLPAGE_P(mdpg) (((mdpg)->mdpg_attrs & VM_PAGEMD_POOLPAGE) != 0) 83 #define VM_PAGEMD_EXECPAGE_P(mdpg) (((mdpg)->mdpg_attrs & VM_PAGEMD_EXECPAGE) != 0) 84 #ifdef PMAP_VIRTUAL_CACHE_ALIASES 85 #define VM_PAGEMD_CACHED_P(mdpg) (((mdpg)->mdpg_attrs & VM_PAGEMD_UNCACHED) == 0) 86 #define VM_PAGEMD_UNCACHED_P(mdpg) (((mdpg)->mdpg_attrs & VM_PAGEMD_UNCACHED) != 0) 87 #endif 88 89 #endif /* !_MODULE */ 90 91 struct vm_page_md { 92 volatile unsigned long mdpg_attrs; /* page attributes */ 93 struct pv_entry mdpg_first; /* pv_entry first */ 94 #if defined(MULTIPROCESSOR) || defined(MODULAR) || defined(_MODULE) 95 kmutex_t *mdpg_lock; /* pv list lock */ 96 #endif 97 }; 98 99 #ifndef _MODULE 100 #if defined(MULTIPROCESSOR) || defined(MODULAR) 101 #define VM_PAGEMD_PVLIST_LOCK_INIT(mdpg) (mdpg)->mdpg_lock = NULL 102 #else 103 #define VM_PAGEMD_PVLIST_LOCK_INIT(mdpg) __nothing 104 #endif /* MULTIPROCESSOR || MODULAR */ 105 106 #define VM_PAGEMD_PVLIST_LOCK(mdpg) pmap_pvlist_lock(mdpg, 1) 107 #define VM_PAGEMD_PVLIST_READLOCK(mdpg) pmap_pvlist_lock(mdpg, 0) 108 #define VM_PAGEMD_PVLIST_UNLOCK(mdpg) pmap_pvlist_unlock(mdpg) 109 #define VM_PAGEMD_PVLIST_LOCKED_P(mdpg) pmap_pvlist_locked_p(mdpg) 110 #define VM_PAGEMD_PVLIST_GEN(mdpg) ((mdpg)->mdpg_attrs >> 16) 111 112 #define VM_PAGEMD_PVLIST_EMPTY_P(mdpg) ((mdpg)->mdpg_first.pv_pmap == NULL) 113 114 #ifdef _KERNEL 115 #if defined(MULTIPROCESSOR) || defined(MODULAR) 116 kmutex_t *pmap_pvlist_lock_addr(struct vm_page_md *); 117 #else 118 extern kmutex_t pmap_pvlist_mutex; 119 static __inline kmutex_t * 120 pmap_pvlist_lock_addr(struct vm_page_md *mdpg) 121 { 122 return &pmap_pvlist_mutex; 123 } 124 #endif 125 126 static __inline uintptr_t 127 pmap_pvlist_lock(struct vm_page_md *mdpg, uintptr_t increment) 128 { 129 mutex_spin_enter(pmap_pvlist_lock_addr(mdpg)); 130 const uintptr_t gen = VM_PAGEMD_PVLIST_GEN(mdpg); 131 mdpg->mdpg_attrs += increment << 16; 132 return gen; 133 } 134 135 static __inline uintptr_t 136 pmap_pvlist_unlock(struct vm_page_md *mdpg) 137 { 138 const uintptr_t gen = VM_PAGEMD_PVLIST_GEN(mdpg); 139 mutex_spin_exit(pmap_pvlist_lock_addr(mdpg)); 140 return gen; 141 } 142 143 static __inline bool 144 pmap_pvlist_locked_p(struct vm_page_md *mdpg) 145 { 146 147 return mutex_owned(pmap_pvlist_lock_addr(mdpg)); 148 } 149 #endif /* _KERNEL */ 150 151 #define VM_MDPAGE_INIT(pg) \ 152 do { \ 153 (pg)->mdpage.mdpg_first.pv_next = NULL; \ 154 (pg)->mdpage.mdpg_first.pv_pmap = NULL; \ 155 (pg)->mdpage.mdpg_first.pv_va = VM_PAGE_TO_PHYS(pg); \ 156 (pg)->mdpage.mdpg_attrs = VM_PAGEMD_VMPAGE; \ 157 VM_PAGEMD_PVLIST_LOCK_INIT(&(pg)->mdpage); \ 158 } while (/* CONSTCOND */ 0) 159 160 #endif /* _MODULE */ 161 162 #endif /* _UVM_PMAP_VMPAGEMD_H_ */ 163