pmap.h revision 1.18 1 1.18 skrll /* $NetBSD: pmap.h,v 1.18 2023/06/12 19:04:14 skrll Exp $ */
2 1.1 matt
3 1.3 maxv /*
4 1.9 skrll * Copyright (c) 2014, 2019, 2021 The NetBSD Foundation, Inc.
5 1.1 matt * All rights reserved.
6 1.1 matt *
7 1.1 matt * This code is derived from software contributed to The NetBSD Foundation
8 1.9 skrll * by Matt Thomas (of 3am Software Foundry), Maxime Villard, and
9 1.9 skrll * Nick Hudson.
10 1.1 matt *
11 1.1 matt * Redistribution and use in source and binary forms, with or without
12 1.1 matt * modification, are permitted provided that the following conditions
13 1.1 matt * are met:
14 1.1 matt * 1. Redistributions of source code must retain the above copyright
15 1.1 matt * notice, this list of conditions and the following disclaimer.
16 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
17 1.1 matt * notice, this list of conditions and the following disclaimer in the
18 1.1 matt * documentation and/or other materials provided with the distribution.
19 1.1 matt *
20 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 1.1 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 1.1 matt * POSSIBILITY OF SUCH DAMAGE.
31 1.1 matt */
32 1.1 matt
33 1.1 matt #ifndef _RISCV_PMAP_H_
34 1.11 simonb #define _RISCV_PMAP_H_
35 1.1 matt
36 1.1 matt #ifdef _KERNEL_OPT
37 1.1 matt #include "opt_modular.h"
38 1.1 matt #endif
39 1.1 matt
40 1.1 matt #if !defined(_MODULE)
41 1.1 matt
42 1.9 skrll #include <sys/cdefs.h>
43 1.1 matt #include <sys/types.h>
44 1.1 matt #include <sys/pool.h>
45 1.1 matt #include <sys/evcnt.h>
46 1.1 matt
47 1.2 maxv #include <uvm/uvm_physseg.h>
48 1.1 matt #include <uvm/pmap/vmpagemd.h>
49 1.1 matt
50 1.1 matt #include <riscv/pte.h>
51 1.9 skrll #include <riscv/sysreg.h>
52 1.1 matt
53 1.11 simonb #define PMAP_SEGTABSIZE NPTEPG
54 1.11 simonb #define PMAP_PDETABSIZE NPTEPG
55 1.1 matt
56 1.1 matt #ifdef _LP64
57 1.11 simonb #define PTPSHIFT 3
58 1.15 skrll /* This is SV57. */
59 1.15 skrll //#define XSEGSHIFT (SEGSHIFT + SEGLENGTH + SEGLENGTH + SEGLENGTH)
60 1.15 skrll
61 1.9 skrll /* This is SV48. */
62 1.15 skrll //#define XSEGSHIFT (SEGSHIFT + SEGLENGTH + SEGLENGTH)
63 1.9 skrll
64 1.9 skrll /* This is SV39. */
65 1.11 simonb #define XSEGSHIFT (SEGSHIFT + SEGLENGTH)
66 1.11 simonb #define NBXSEG (1ULL << XSEGSHIFT)
67 1.11 simonb #define XSEGOFSET (NBXSEG - 1) /* byte offset into xsegment */
68 1.11 simonb #define XSEGLENGTH (PGSHIFT - 3)
69 1.11 simonb #define NXSEGPG (1 << XSEGLENGTH)
70 1.1 matt #else
71 1.11 simonb #define PTPSHIFT 2
72 1.12 skrll #define XSEGSHIFT SEGSHIFT
73 1.1 matt #endif
74 1.3 maxv
75 1.11 simonb #define SEGLENGTH (PGSHIFT - PTPSHIFT)
76 1.11 simonb #define SEGSHIFT (SEGLENGTH + PGSHIFT)
77 1.11 simonb #define NBSEG (1 << SEGSHIFT) /* bytes/segment */
78 1.11 simonb #define SEGOFSET (NBSEG - 1) /* byte offset into segment */
79 1.1 matt
80 1.11 simonb #define KERNEL_PID 0
81 1.1 matt
82 1.11 simonb #define PMAP_HWPAGEWALKER 1
83 1.18 skrll #define PMAP_TLB_MAX MAXCPUS
84 1.1 matt #ifdef _LP64
85 1.11 simonb #define PMAP_INVALID_PDETAB_ADDRESS ((pmap_pdetab_t *)(VM_MIN_KERNEL_ADDRESS - PAGE_SIZE))
86 1.11 simonb #define PMAP_INVALID_SEGTAB_ADDRESS ((pmap_segtab_t *)(VM_MIN_KERNEL_ADDRESS - PAGE_SIZE))
87 1.1 matt #else
88 1.11 simonb #define PMAP_INVALID_PDETAB_ADDRESS ((pmap_pdetab_t *)0xdeadbeef)
89 1.11 simonb #define PMAP_INVALID_SEGTAB_ADDRESS ((pmap_segtab_t *)0xdeadbeef)
90 1.1 matt #endif
91 1.11 simonb #define PMAP_TLB_NUM_PIDS (__SHIFTOUT_MASK(SATP_ASID) + 1)
92 1.11 simonb #define PMAP_TLB_BITMAP_LENGTH PMAP_TLB_NUM_PIDS
93 1.18 skrll // Should use SBI TLB ops
94 1.18 skrll #define PMAP_TLB_NEED_SHOOTDOWN 1
95 1.11 simonb #define PMAP_TLB_FLUSH_ASID_ON_RESET false
96 1.1 matt
97 1.11 simonb #define pmap_phys_address(x) (x)
98 1.1 matt
99 1.2 maxv #ifndef __BSD_PTENTRY_T__
100 1.11 simonb #define __BSD_PTENTRY_T__
101 1.2 maxv #ifdef _LP64
102 1.11 simonb #define PRIxPTE PRIx64
103 1.2 maxv #else
104 1.11 simonb #define PRIxPTE PRIx32
105 1.2 maxv #endif
106 1.2 maxv #endif /* __BSD_PTENTRY_T__ */
107 1.2 maxv
108 1.11 simonb #define PMAP_NEED_PROCWR
109 1.1 matt static inline void
110 1.1 matt pmap_procwr(struct proc *p, vaddr_t va, vsize_t len)
111 1.1 matt {
112 1.13 skrll __asm __volatile("fence\trw,rw; fence.i" ::: "memory");
113 1.1 matt }
114 1.1 matt
115 1.1 matt #include <uvm/pmap/tlb.h>
116 1.15 skrll #include <uvm/pmap/pmap_devmap.h>
117 1.1 matt #include <uvm/pmap/pmap_tlb.h>
118 1.18 skrll #include <uvm/pmap/pmap_synci.h>
119 1.1 matt
120 1.11 simonb #define PMAP_GROWKERNEL
121 1.11 simonb #define PMAP_STEAL_MEMORY
122 1.1 matt
123 1.1 matt #ifdef _KERNEL
124 1.1 matt
125 1.11 simonb #define __HAVE_PMAP_MD
126 1.1 matt struct pmap_md {
127 1.10 skrll paddr_t md_ppn;
128 1.1 matt };
129 1.1 matt
130 1.18 skrll static inline void
131 1.18 skrll pmap_md_icache_sync_all(void)
132 1.18 skrll {
133 1.18 skrll }
134 1.18 skrll
135 1.18 skrll static inline void
136 1.18 skrll pmap_md_icache_sync_range_index(vaddr_t va, vsize_t size)
137 1.18 skrll {
138 1.18 skrll }
139 1.18 skrll
140 1.1 matt struct vm_page *
141 1.18 skrll pmap_md_alloc_poolpage(int);
142 1.6 skrll vaddr_t pmap_md_map_poolpage(paddr_t, vsize_t);
143 1.6 skrll void pmap_md_unmap_poolpage(vaddr_t, vsize_t);
144 1.18 skrll
145 1.6 skrll bool pmap_md_direct_mapped_vaddr_p(vaddr_t);
146 1.6 skrll paddr_t pmap_md_direct_mapped_vaddr_to_paddr(vaddr_t);
147 1.6 skrll vaddr_t pmap_md_direct_map_paddr(paddr_t);
148 1.6 skrll void pmap_md_init(void);
149 1.18 skrll bool pmap_md_io_vaddr_p(vaddr_t);
150 1.18 skrll bool pmap_md_ok_to_steal_p(const uvm_physseg_t, size_t);
151 1.18 skrll void pmap_md_pdetab_init(struct pmap *);
152 1.18 skrll void pmap_md_pdetab_fini(struct pmap *);
153 1.18 skrll void pmap_md_tlb_info_attach(struct pmap_tlb_info *, struct cpu_info *);
154 1.9 skrll void pmap_md_xtab_activate(struct pmap *, struct lwp *);
155 1.9 skrll void pmap_md_xtab_deactivate(struct pmap *);
156 1.1 matt
157 1.16 skrll void pmap_bootstrap(vaddr_t, vaddr_t);
158 1.10 skrll
159 1.15 skrll vsize_t pmap_kenter_range(vaddr_t, paddr_t, vsize_t, vm_prot_t, u_int);
160 1.15 skrll
161 1.15 skrll #ifdef _LP64
162 1.3 maxv extern vaddr_t pmap_direct_base;
163 1.3 maxv extern vaddr_t pmap_direct_end;
164 1.15 skrll #define PMAP_DIRECT_MAP(pa) RISCV_PA_TO_KVA(pa)
165 1.15 skrll #define PMAP_DIRECT_UNMAP(va) RISCV_KVA_TO_PA(va)
166 1.15 skrll
167 1.15 skrll /*
168 1.15 skrll * Other hooks for the pool allocator.
169 1.15 skrll */
170 1.15 skrll #define POOL_PHYSTOV(pa) RISCV_PA_TO_KVA((paddr_t)(pa))
171 1.15 skrll #define POOL_VTOPHYS(va) RISCV_KVA_TO_PA((vaddr_t)(va))
172 1.15 skrll
173 1.15 skrll #endif /* _LP64 */
174 1.3 maxv
175 1.11 simonb #define MEGAPAGE_TRUNC(x) ((x) & ~SEGOFSET)
176 1.11 simonb #define MEGAPAGE_ROUND(x) MEGAPAGE_TRUNC((x) + SEGOFSET)
177 1.9 skrll
178 1.15 skrll #define PMAP_DEV __BIT(29) /* 0x2000_0000 */
179 1.15 skrll
180 1.15 skrll #define DEVMAP_ALIGN(x) MEGAPAGE_TRUNC((x))
181 1.15 skrll #define DEVMAP_SIZE(x) MEGAPAGE_ROUND((x))
182 1.15 skrll #define DEVMAP_FLAGS PMAP_DEV
183 1.15 skrll
184 1.1 matt #ifdef __PMAP_PRIVATE
185 1.10 skrll
186 1.10 skrll static inline bool
187 1.10 skrll pmap_md_tlb_check_entry(void *ctx, vaddr_t va, tlb_asid_t asid, pt_entry_t pte)
188 1.10 skrll {
189 1.10 skrll // TLB not walked and so not called.
190 1.10 skrll return false;
191 1.10 skrll }
192 1.10 skrll
193 1.1 matt static inline void
194 1.8 skrll pmap_md_page_syncicache(struct vm_page_md *mdpg, const kcpuset_t *kc)
195 1.1 matt {
196 1.13 skrll __asm __volatile("fence\trw,rw; fence.i" ::: "memory");
197 1.1 matt }
198 1.1 matt
199 1.1 matt /*
200 1.1 matt * Virtual Cache Alias helper routines. Not a problem for RISCV CPUs.
201 1.1 matt */
202 1.1 matt static inline bool
203 1.8 skrll pmap_md_vca_add(struct vm_page_md *mdpg, vaddr_t va, pt_entry_t *nptep)
204 1.1 matt {
205 1.1 matt return false;
206 1.1 matt }
207 1.1 matt
208 1.1 matt static inline void
209 1.8 skrll pmap_md_vca_remove(struct vm_page_md *mdpg, vaddr_t va)
210 1.1 matt {
211 1.1 matt }
212 1.1 matt
213 1.1 matt static inline void
214 1.8 skrll pmap_md_vca_clean(struct vm_page_md *mdpg, vaddr_t va, int op)
215 1.1 matt {
216 1.1 matt }
217 1.1 matt
218 1.1 matt static inline size_t
219 1.1 matt pmap_md_tlb_asid_max(void)
220 1.1 matt {
221 1.1 matt return PMAP_TLB_NUM_PIDS - 1;
222 1.1 matt }
223 1.5 skrll
224 1.15 skrll static inline pt_entry_t *
225 1.15 skrll pmap_md_nptep(pt_entry_t *ptep)
226 1.15 skrll {
227 1.15 skrll return ptep + 1;
228 1.15 skrll }
229 1.15 skrll
230 1.15 skrll static inline bool
231 1.15 skrll pmap_md_kernel_vaddr_p(vaddr_t va)
232 1.15 skrll {
233 1.15 skrll return false;
234 1.15 skrll }
235 1.15 skrll
236 1.15 skrll static inline paddr_t
237 1.15 skrll pmap_md_kernel_vaddr_to_paddr(vaddr_t vax)
238 1.15 skrll {
239 1.15 skrll /* Not used due to false from pmap_md_kernel_vaddr_p */
240 1.15 skrll
241 1.15 skrll return 0;
242 1.15 skrll }
243 1.15 skrll
244 1.1 matt #endif /* __PMAP_PRIVATE */
245 1.1 matt #endif /* _KERNEL */
246 1.1 matt
247 1.1 matt #include <uvm/pmap/pmap.h>
248 1.1 matt
249 1.1 matt #endif /* !_MODULE */
250 1.1 matt
251 1.1 matt #if defined(MODULAR) || defined(_MODULE)
252 1.1 matt /*
253 1.1 matt * Define a compatible vm_page_md so that struct vm_page is the same size
254 1.1 matt * whether we are using modules or not.
255 1.1 matt */
256 1.1 matt #ifndef __HAVE_VM_PAGE_MD
257 1.11 simonb #define __HAVE_VM_PAGE_MD
258 1.1 matt
259 1.1 matt struct vm_page_md {
260 1.1 matt uintptr_t mdpg_dummy[3];
261 1.1 matt };
262 1.2 maxv __CTASSERT(sizeof(struct vm_page_md) == sizeof(uintptr_t)*3);
263 1.1 matt
264 1.2 maxv #endif /* !__HAVE_VM_PAGE_MD */
265 1.1 matt
266 1.1 matt #endif /* MODULAR || _MODULE */
267 1.1 matt
268 1.1 matt #endif /* !_RISCV_PMAP_H_ */
269