pmap.h revision 1.17.2.1 1 /* $NetBSD: pmap.h,v 1.17.2.1 2009/05/13 17:17:48 jym Exp $ */
2
3 /* $OpenBSD: pmap.h,v 1.35 2007/12/14 18:32:23 deraadt Exp $ */
4
5 /*
6 * Copyright (c) 2002-2004 Michael Shalayeff
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
22 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 /*
32 * Pmap header for hppa.
33 */
34
35 #ifndef _HPPA_PMAP_H_
36 #define _HPPA_PMAP_H_
37
38 #include <sys/mutex.h>
39 #include <machine/pte.h>
40 #include <machine/cpufunc.h>
41
42 #include <uvm/uvm_pglist.h>
43 #include <uvm/uvm_object.h>
44
45 #ifdef _KERNEL
46
47 struct pmap {
48 struct uvm_object pm_obj; /* object (lck by object lock) */
49 #define pm_lock pm_obj.vmobjlock
50 struct vm_page *pm_ptphint;
51 struct vm_page *pm_pdir_pg; /* vm_page for pdir */
52 volatile u_int32_t *pm_pdir; /* page dir (read-only after create) */
53 pa_space_t pm_space; /* space id (read-only after create) */
54 u_int pm_pid; /* prot id (read-only after create) */
55
56 struct pmap_statistics pm_stats;
57 };
58
59 #define PMAP_NC 0x100
60
61 /*
62 * Flags that indicate attributes of pages or mappings of pages.
63 *
64 * We need two flags for cacheability because pages/mappings can be marked
65 * uncacheable for two reasons,
66 *
67 * 1) A page's contents may change under our feet and can never be
68 * cacheable, e.g. I/O space, DMA buffers.
69 * 2) A page has non-equivalent aliases and must be (temporarily)
70 * marked uncachable.
71 *
72 * A page that is marked PVF_NC can *never* be marked cacheable and will have
73 * all mappings marked PVF_UNCACHEABLE. A page marked PVF_UNCACHEABLE only
74 * is done so due to non-equivalent aliases this maybe removed is the non-
75 * equivalent aliases are removed.
76 *
77 */
78
79 #define PVF_NC 0x2000 /* pg is never cacheable */
80
81 #define PVF_MOD PTE_PROT(TLB_DIRTY) /* pg/mp is modified */
82 #define PVF_REF PTE_PROT(TLB_REFTRAP) /* pg/mp (inv) is referenced */
83 #define PVF_WRITE PTE_PROT(TLB_WRITE) /* pg/mp is writable */
84 #define PVF_UNCACHEABLE PTE_PROT(TLB_UNCACHEABLE)
85 /* pg/mp is uncacheable */
86
87 #define pmap_is_aliased(pg) \
88 (((pg)->mdpage.pvh_attrs & PVF_NC) == 0 && \
89 ((pg)->mdpage.pvh_attrs & PVF_UNCACHEABLE) != 0)
90
91 #define HPPA_MAX_PID 0xfffa
92 #define HPPA_SID_MAX 0x7ffd
93
94 /*
95 * DON'T CHANGE THIS - this is assumed in lots of places.
96 */
97 #define HPPA_SID_KERNEL 0
98 #define HPPA_PID_KERNEL 2
99
100 struct pv_entry { /* locked by its list's pvh_lock */
101 struct pv_entry *pv_next;
102 struct pmap *pv_pmap; /* the pmap */
103 vaddr_t pv_va; /* the virtual address + flags */
104 #define PV_VAMASK (~(PAGE_SIZE - 1))
105 #define PV_KENTER 0x001
106
107 struct vm_page *pv_ptp; /* the vm_page of the PTP */
108 };
109
110 extern int pmap_hptsize;
111 extern struct pdc_hwtlb pdc_hwtlb;
112
113 /*
114 * pool quickmaps
115 */
116 static inline vaddr_t hppa_map_poolpage(paddr_t pa)
117 {
118 return (vaddr_t)pa;
119 }
120
121 static inline paddr_t hppa_unmap_poolpage(vaddr_t va)
122 {
123 pdcache(HPPA_SID_KERNEL, va, PAGE_SIZE);
124
125 return (paddr_t)va;
126 }
127
128 #define PMAP_MAP_POOLPAGE(pa) hppa_map_poolpage(pa)
129 #define PMAP_UNMAP_POOLPAGE(va) hppa_unmap_poolpage(va)
130
131 /*
132 * according to the parisc manual aliased va's should be
133 * different by high 12 bits only.
134 */
135 #define PMAP_PREFER(o,h,s,td) do { \
136 vaddr_t pmap_prefer_hint; \
137 pmap_prefer_hint = (*(h) & HPPA_PGAMASK) | ((o) & HPPA_PGAOFF); \
138 if (pmap_prefer_hint < *(h)) \
139 pmap_prefer_hint += HPPA_PGALIAS; \
140 *(h) = pmap_prefer_hint; \
141 } while(0)
142
143 #define pmap_sid2pid(s) (((s) + 1) << 1)
144 #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
145 #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
146 #define pmap_update(p)
147
148 #define pmap_copy(dpmap,spmap,da,len,sa)
149
150 #define pmap_clear_modify(pg) pmap_changebit(pg, 0, PTE_PROT(TLB_DIRTY))
151 #define pmap_clear_reference(pg) \
152 pmap_changebit(pg, PTE_PROT(TLB_REFTRAP), 0)
153 #define pmap_is_modified(pg) pmap_testbit(pg, PTE_PROT(TLB_DIRTY))
154 #define pmap_is_referenced(pg) pmap_testbit(pg, PTE_PROT(TLB_REFTRAP))
155 #define pmap_phys_address(ppn) ((ppn) << PAGE_SHIFT)
156
157 void pmap_activate(struct lwp *);
158
159 void pmap_bootstrap(vaddr_t);
160 bool pmap_changebit(struct vm_page *, u_int, u_int);
161 bool pmap_testbit(struct vm_page *, u_int);
162 void pmap_write_protect(struct pmap *, vaddr_t, vaddr_t, vm_prot_t);
163 void pmap_remove(struct pmap *pmap, vaddr_t sva, vaddr_t eva);
164 void pmap_page_remove(struct vm_page *pg);
165
166 static inline void
167 pmap_deactivate(struct lwp *l)
168 {
169 /* Nothing. */
170 }
171
172 static inline void
173 pmap_remove_all(struct pmap *pmap)
174 {
175 /* Nothing. */
176 }
177
178 static inline int
179 pmap_prot(struct pmap *pmap, int prot)
180 {
181 extern u_int hppa_prot[];
182 return (hppa_prot[prot] | (pmap == pmap_kernel()? 0 : TLB_USER));
183 }
184
185 static inline void
186 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
187 {
188 if ((prot & UVM_PROT_WRITE) == 0) {
189 if (prot & (UVM_PROT_RX))
190 pmap_changebit(pg, 0, PTE_PROT(TLB_WRITE));
191 else
192 pmap_page_remove(pg);
193 }
194 }
195
196 static inline void
197 pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
198 {
199 if ((prot & UVM_PROT_WRITE) == 0) {
200 if (prot & (UVM_PROT_RX))
201 pmap_write_protect(pmap, sva, eva, prot);
202 else
203 pmap_remove(pmap, sva, eva);
204 }
205 }
206
207 #define pmap_sid(pmap, va) \
208 ((((va) & 0xc0000000) != 0xc0000000) ? \
209 (pmap)->pm_space : HPPA_SID_KERNEL)
210
211 #endif /* _KERNEL */
212
213 #endif /* _HPPA_PMAP_H_ */
214