vmpagemd.h revision 1.9 1 1.9 skrll /* $NetBSD: vmpagemd.h,v 1.9 2019/06/19 10:00:19 skrll Exp $ */
2 1.1 christos
3 1.1 christos /*-
4 1.1 christos * Copyright (c) 2011 The NetBSD Foundation, Inc.
5 1.1 christos * All rights reserved.
6 1.1 christos *
7 1.1 christos * This code is derived from software contributed to The NetBSD Foundation
8 1.1 christos * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects
9 1.1 christos * Agency and which was developed by Matt Thomas of 3am Software Foundry.
10 1.1 christos *
11 1.1 christos * This material is based upon work supported by the Defense Advanced Research
12 1.1 christos * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under
13 1.1 christos * Contract No. N66001-09-C-2073.
14 1.1 christos * Approved for Public Release, Distribution Unlimited
15 1.1 christos *
16 1.1 christos * Redistribution and use in source and binary forms, with or without
17 1.1 christos * modification, are permitted provided that the following conditions
18 1.1 christos * are met:
19 1.1 christos * 1. Redistributions of source code must retain the above copyright
20 1.1 christos * notice, this list of conditions and the following disclaimer.
21 1.1 christos * 2. Redistributions in binary form must reproduce the above copyright
22 1.1 christos * notice, this list of conditions and the following disclaimer in the
23 1.1 christos * documentation and/or other materials provided with the distribution.
24 1.1 christos *
25 1.1 christos * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 1.1 christos * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 1.1 christos * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 1.1 christos * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 1.1 christos * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 1.1 christos * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 1.1 christos * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 1.1 christos * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 1.1 christos * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 1.1 christos * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 1.1 christos * POSSIBILITY OF SUCH DAMAGE.
36 1.1 christos */
37 1.1 christos
38 1.5 skrll #ifndef _UVM_PMAP_VMPAGEMD_H_
39 1.5 skrll #define _UVM_PMAP_VMPAGEMD_H_
40 1.1 christos
41 1.1 christos #ifdef _LOCORE
42 1.1 christos #error use assym.h instead
43 1.1 christos #endif
44 1.1 christos
45 1.3 matt //#ifdef _MODULE
46 1.3 matt //#error this file should not be included by loadable kernel modules
47 1.3 matt //#endif
48 1.1 christos
49 1.2 matt #ifdef _KERNEL_OPT
50 1.1 christos #include "opt_modular.h"
51 1.1 christos #include "opt_multiprocessor.h"
52 1.2 matt #endif
53 1.1 christos
54 1.3 matt #include <sys/atomic.h>
55 1.1 christos #include <sys/mutex.h>
56 1.1 christos
57 1.1 christos #define __HAVE_VM_PAGE_MD
58 1.1 christos
59 1.1 christos typedef struct pv_entry {
60 1.1 christos struct pv_entry *pv_next;
61 1.1 christos struct pmap *pv_pmap;
62 1.1 christos vaddr_t pv_va;
63 1.7 skrll #define PV_KENTER __BIT(0)
64 1.1 christos } *pv_entry_t;
65 1.1 christos
66 1.3 matt #ifndef _MODULE
67 1.3 matt
68 1.4 skrll #define VM_PAGEMD_REFERENCED __BIT(0) /* page has been referenced */
69 1.4 skrll #define VM_PAGEMD_MODIFIED __BIT(1) /* page has been modified */
70 1.4 skrll #define VM_PAGEMD_POOLPAGE __BIT(2) /* page is used as a poolpage */
71 1.4 skrll #define VM_PAGEMD_EXECPAGE __BIT(3) /* page is exec mapped */
72 1.3 matt #ifdef PMAP_VIRTUAL_CACHE_ALIASES
73 1.4 skrll #define VM_PAGEMD_UNCACHED __BIT(4) /* page is mapped uncached */
74 1.1 christos #endif
75 1.1 christos
76 1.3 matt #ifdef PMAP_VIRTUAL_CACHE_ALIASES
77 1.1 christos #define VM_PAGEMD_CACHED_P(mdpg) (((mdpg)->mdpg_attrs & VM_PAGEMD_UNCACHED) == 0)
78 1.1 christos #define VM_PAGEMD_UNCACHED_P(mdpg) (((mdpg)->mdpg_attrs & VM_PAGEMD_UNCACHED) != 0)
79 1.1 christos #endif
80 1.1 christos #define VM_PAGEMD_MODIFIED_P(mdpg) (((mdpg)->mdpg_attrs & VM_PAGEMD_MODIFIED) != 0)
81 1.1 christos #define VM_PAGEMD_REFERENCED_P(mdpg) (((mdpg)->mdpg_attrs & VM_PAGEMD_REFERENCED) != 0)
82 1.1 christos #define VM_PAGEMD_POOLPAGE_P(mdpg) (((mdpg)->mdpg_attrs & VM_PAGEMD_POOLPAGE) != 0)
83 1.1 christos #define VM_PAGEMD_EXECPAGE_P(mdpg) (((mdpg)->mdpg_attrs & VM_PAGEMD_EXECPAGE) != 0)
84 1.1 christos
85 1.3 matt #endif /* !_MODULE */
86 1.3 matt
87 1.1 christos struct vm_page_md {
88 1.3 matt volatile unsigned long mdpg_attrs; /* page attributes */
89 1.6 skrll struct pv_entry mdpg_first; /* pv_entry first */
90 1.3 matt #if defined(MULTIPROCESSOR) || defined(MODULAR) || defined(_MODULE)
91 1.6 skrll kmutex_t *mdpg_lock; /* pv list lock */
92 1.3 matt #endif
93 1.3 matt };
94 1.3 matt
95 1.3 matt #ifndef _MODULE
96 1.1 christos #if defined(MULTIPROCESSOR) || defined(MODULAR)
97 1.1 christos #define VM_PAGEMD_PVLIST_LOCK_INIT(mdpg) \
98 1.1 christos (mdpg)->mdpg_lock = NULL
99 1.3 matt #else
100 1.3 matt #define VM_PAGEMD_PVLIST_LOCK_INIT(mdpg) do { } while (/*CONSTCOND*/ 0)
101 1.3 matt #endif /* MULTIPROCESSOR || MODULAR */
102 1.3 matt
103 1.9 skrll #define VM_PAGEMD_PVLIST_LOCK(mdpg) pmap_pvlist_lock(mdpg, 1)
104 1.9 skrll #define VM_PAGEMD_PVLIST_READLOCK(mdpg) pmap_pvlist_lock(mdpg, 0)
105 1.9 skrll #define VM_PAGEMD_PVLIST_UNLOCK(mdpg) pmap_pvlist_unlock(mdpg)
106 1.9 skrll #define VM_PAGEMD_PVLIST_LOCKED_P(mdpg) pmap_pvlist_locked_p(mdpg)
107 1.9 skrll #define VM_PAGEMD_PVLIST_GEN(mdpg) ((mdpg)->mdpg_attrs >> 16)
108 1.3 matt
109 1.3 matt #ifdef _KERNEL
110 1.3 matt #if defined(MULTIPROCESSOR) || defined(MODULAR)
111 1.3 matt kmutex_t *pmap_pvlist_lock_addr(struct vm_page_md *);
112 1.1 christos #else
113 1.3 matt extern kmutex_t pmap_pvlist_mutex;
114 1.8 christos static __inline kmutex_t *
115 1.3 matt pmap_pvlist_lock_addr(struct vm_page_md *mdpg)
116 1.3 matt {
117 1.3 matt return &pmap_pvlist_mutex;
118 1.3 matt }
119 1.3 matt #endif
120 1.3 matt
121 1.8 christos static __inline uintptr_t
122 1.3 matt pmap_pvlist_lock(struct vm_page_md *mdpg, uintptr_t increment)
123 1.3 matt {
124 1.3 matt mutex_spin_enter(pmap_pvlist_lock_addr(mdpg));
125 1.3 matt const uintptr_t gen = VM_PAGEMD_PVLIST_GEN(mdpg);
126 1.3 matt mdpg->mdpg_attrs += increment << 16;
127 1.3 matt return gen;
128 1.3 matt }
129 1.3 matt
130 1.8 christos static __inline uintptr_t
131 1.3 matt pmap_pvlist_unlock(struct vm_page_md *mdpg)
132 1.3 matt {
133 1.3 matt const uintptr_t gen = VM_PAGEMD_PVLIST_GEN(mdpg);
134 1.3 matt mutex_spin_exit(pmap_pvlist_lock_addr(mdpg));
135 1.3 matt return gen;
136 1.3 matt }
137 1.3 matt
138 1.8 christos static __inline bool
139 1.3 matt pmap_pvlist_locked_p(struct vm_page_md *mdpg)
140 1.3 matt {
141 1.3 matt return mutex_owned(pmap_pvlist_lock_addr(mdpg));
142 1.3 matt }
143 1.3 matt #endif /* _KERNEL */
144 1.1 christos
145 1.1 christos #define VM_MDPAGE_INIT(pg) \
146 1.1 christos do { \
147 1.1 christos (pg)->mdpage.mdpg_first.pv_next = NULL; \
148 1.1 christos (pg)->mdpage.mdpg_first.pv_pmap = NULL; \
149 1.1 christos (pg)->mdpage.mdpg_first.pv_va = (pg)->phys_addr; \
150 1.1 christos (pg)->mdpage.mdpg_attrs = 0; \
151 1.1 christos VM_PAGEMD_PVLIST_LOCK_INIT(&(pg)->mdpage); \
152 1.1 christos } while (/* CONSTCOND */ 0)
153 1.1 christos
154 1.3 matt #endif /* _MODULE */
155 1.3 matt
156 1.5 skrll #endif /* _UVM_PMAP_VMPAGEMD_H_ */
157