pmap.h revision 1.22 1 1.22 skrll /* $NetBSD: pmap.h,v 1.22 2023/10/06 08:48:49 skrll Exp $ */
2 1.1 matt
3 1.3 maxv /*
4 1.9 skrll * Copyright (c) 2014, 2019, 2021 The NetBSD Foundation, Inc.
5 1.1 matt * All rights reserved.
6 1.1 matt *
7 1.1 matt * This code is derived from software contributed to The NetBSD Foundation
8 1.9 skrll * by Matt Thomas (of 3am Software Foundry), Maxime Villard, and
9 1.9 skrll * Nick Hudson.
10 1.1 matt *
11 1.1 matt * Redistribution and use in source and binary forms, with or without
12 1.1 matt * modification, are permitted provided that the following conditions
13 1.1 matt * are met:
14 1.1 matt * 1. Redistributions of source code must retain the above copyright
15 1.1 matt * notice, this list of conditions and the following disclaimer.
16 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
17 1.1 matt * notice, this list of conditions and the following disclaimer in the
18 1.1 matt * documentation and/or other materials provided with the distribution.
19 1.1 matt *
20 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 1.1 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 1.1 matt * POSSIBILITY OF SUCH DAMAGE.
31 1.1 matt */
32 1.1 matt
33 1.1 matt #ifndef _RISCV_PMAP_H_
34 1.11 simonb #define _RISCV_PMAP_H_
35 1.1 matt
36 1.1 matt #ifdef _KERNEL_OPT
37 1.1 matt #include "opt_modular.h"
38 1.1 matt #endif
39 1.1 matt
40 1.1 matt #if !defined(_MODULE)
41 1.1 matt
42 1.9 skrll #include <sys/cdefs.h>
43 1.1 matt #include <sys/types.h>
44 1.1 matt #include <sys/pool.h>
45 1.1 matt #include <sys/evcnt.h>
46 1.1 matt
47 1.2 maxv #include <uvm/uvm_physseg.h>
48 1.1 matt #include <uvm/pmap/vmpagemd.h>
49 1.1 matt
50 1.1 matt #include <riscv/pte.h>
51 1.9 skrll #include <riscv/sysreg.h>
52 1.1 matt
53 1.11 simonb #define PMAP_SEGTABSIZE NPTEPG
54 1.11 simonb #define PMAP_PDETABSIZE NPTEPG
55 1.1 matt
56 1.1 matt #ifdef _LP64
57 1.11 simonb #define PTPSHIFT 3
58 1.15 skrll /* This is SV57. */
59 1.15 skrll //#define XSEGSHIFT (SEGSHIFT + SEGLENGTH + SEGLENGTH + SEGLENGTH)
60 1.15 skrll
61 1.9 skrll /* This is SV48. */
62 1.15 skrll //#define XSEGSHIFT (SEGSHIFT + SEGLENGTH + SEGLENGTH)
63 1.9 skrll
64 1.9 skrll /* This is SV39. */
65 1.11 simonb #define XSEGSHIFT (SEGSHIFT + SEGLENGTH)
66 1.11 simonb #define NBXSEG (1ULL << XSEGSHIFT)
67 1.11 simonb #define XSEGOFSET (NBXSEG - 1) /* byte offset into xsegment */
68 1.11 simonb #define XSEGLENGTH (PGSHIFT - 3)
69 1.11 simonb #define NXSEGPG (1 << XSEGLENGTH)
70 1.1 matt #else
71 1.11 simonb #define PTPSHIFT 2
72 1.12 skrll #define XSEGSHIFT SEGSHIFT
73 1.1 matt #endif
74 1.3 maxv
75 1.11 simonb #define SEGLENGTH (PGSHIFT - PTPSHIFT)
76 1.11 simonb #define SEGSHIFT (SEGLENGTH + PGSHIFT)
77 1.11 simonb #define NBSEG (1 << SEGSHIFT) /* bytes/segment */
78 1.11 simonb #define SEGOFSET (NBSEG - 1) /* byte offset into segment */
79 1.1 matt
80 1.11 simonb #define KERNEL_PID 0
81 1.1 matt
82 1.11 simonb #define PMAP_HWPAGEWALKER 1
83 1.21 skrll #define PMAP_TLB_MAX 1
84 1.22 skrll #define PMAP_TLB_ALWAYS_ASIDS false
85 1.1 matt #ifdef _LP64
86 1.11 simonb #define PMAP_INVALID_PDETAB_ADDRESS ((pmap_pdetab_t *)(VM_MIN_KERNEL_ADDRESS - PAGE_SIZE))
87 1.11 simonb #define PMAP_INVALID_SEGTAB_ADDRESS ((pmap_segtab_t *)(VM_MIN_KERNEL_ADDRESS - PAGE_SIZE))
88 1.1 matt #else
89 1.11 simonb #define PMAP_INVALID_PDETAB_ADDRESS ((pmap_pdetab_t *)0xdeadbeef)
90 1.11 simonb #define PMAP_INVALID_SEGTAB_ADDRESS ((pmap_segtab_t *)0xdeadbeef)
91 1.1 matt #endif
92 1.11 simonb #define PMAP_TLB_NUM_PIDS (__SHIFTOUT_MASK(SATP_ASID) + 1)
93 1.11 simonb #define PMAP_TLB_BITMAP_LENGTH PMAP_TLB_NUM_PIDS
94 1.21 skrll #define PMAP_TLB_FLUSH_ASID_ON_RESET true
95 1.1 matt
96 1.11 simonb #define pmap_phys_address(x) (x)
97 1.1 matt
98 1.2 maxv #ifndef __BSD_PTENTRY_T__
99 1.11 simonb #define __BSD_PTENTRY_T__
100 1.2 maxv #ifdef _LP64
101 1.11 simonb #define PRIxPTE PRIx64
102 1.2 maxv #else
103 1.11 simonb #define PRIxPTE PRIx32
104 1.2 maxv #endif
105 1.2 maxv #endif /* __BSD_PTENTRY_T__ */
106 1.2 maxv
107 1.11 simonb #define PMAP_NEED_PROCWR
108 1.1 matt static inline void
109 1.1 matt pmap_procwr(struct proc *p, vaddr_t va, vsize_t len)
110 1.1 matt {
111 1.13 skrll __asm __volatile("fence\trw,rw; fence.i" ::: "memory");
112 1.1 matt }
113 1.1 matt
114 1.1 matt #include <uvm/pmap/tlb.h>
115 1.15 skrll #include <uvm/pmap/pmap_devmap.h>
116 1.1 matt #include <uvm/pmap/pmap_tlb.h>
117 1.18 skrll #include <uvm/pmap/pmap_synci.h>
118 1.1 matt
119 1.11 simonb #define PMAP_GROWKERNEL
120 1.11 simonb #define PMAP_STEAL_MEMORY
121 1.1 matt
122 1.1 matt #ifdef _KERNEL
123 1.1 matt
124 1.11 simonb #define __HAVE_PMAP_MD
125 1.1 matt struct pmap_md {
126 1.10 skrll paddr_t md_ppn;
127 1.1 matt };
128 1.1 matt
129 1.18 skrll static inline void
130 1.18 skrll pmap_md_icache_sync_all(void)
131 1.18 skrll {
132 1.18 skrll }
133 1.18 skrll
134 1.18 skrll static inline void
135 1.18 skrll pmap_md_icache_sync_range_index(vaddr_t va, vsize_t size)
136 1.18 skrll {
137 1.18 skrll }
138 1.18 skrll
139 1.1 matt struct vm_page *
140 1.18 skrll pmap_md_alloc_poolpage(int);
141 1.6 skrll vaddr_t pmap_md_map_poolpage(paddr_t, vsize_t);
142 1.6 skrll void pmap_md_unmap_poolpage(vaddr_t, vsize_t);
143 1.18 skrll
144 1.6 skrll bool pmap_md_direct_mapped_vaddr_p(vaddr_t);
145 1.6 skrll paddr_t pmap_md_direct_mapped_vaddr_to_paddr(vaddr_t);
146 1.6 skrll vaddr_t pmap_md_direct_map_paddr(paddr_t);
147 1.6 skrll void pmap_md_init(void);
148 1.18 skrll bool pmap_md_io_vaddr_p(vaddr_t);
149 1.18 skrll bool pmap_md_ok_to_steal_p(const uvm_physseg_t, size_t);
150 1.18 skrll void pmap_md_pdetab_init(struct pmap *);
151 1.18 skrll void pmap_md_pdetab_fini(struct pmap *);
152 1.18 skrll void pmap_md_tlb_info_attach(struct pmap_tlb_info *, struct cpu_info *);
153 1.9 skrll void pmap_md_xtab_activate(struct pmap *, struct lwp *);
154 1.9 skrll void pmap_md_xtab_deactivate(struct pmap *);
155 1.1 matt
156 1.16 skrll void pmap_bootstrap(vaddr_t, vaddr_t);
157 1.10 skrll
158 1.15 skrll vsize_t pmap_kenter_range(vaddr_t, paddr_t, vsize_t, vm_prot_t, u_int);
159 1.15 skrll
160 1.15 skrll #ifdef _LP64
161 1.3 maxv extern vaddr_t pmap_direct_base;
162 1.3 maxv extern vaddr_t pmap_direct_end;
163 1.15 skrll #define PMAP_DIRECT_MAP(pa) RISCV_PA_TO_KVA(pa)
164 1.15 skrll #define PMAP_DIRECT_UNMAP(va) RISCV_KVA_TO_PA(va)
165 1.15 skrll
166 1.15 skrll /*
167 1.15 skrll * Other hooks for the pool allocator.
168 1.15 skrll */
169 1.15 skrll #define POOL_PHYSTOV(pa) RISCV_PA_TO_KVA((paddr_t)(pa))
170 1.15 skrll #define POOL_VTOPHYS(va) RISCV_KVA_TO_PA((vaddr_t)(va))
171 1.15 skrll
172 1.15 skrll #endif /* _LP64 */
173 1.3 maxv
174 1.11 simonb #define MEGAPAGE_TRUNC(x) ((x) & ~SEGOFSET)
175 1.11 simonb #define MEGAPAGE_ROUND(x) MEGAPAGE_TRUNC((x) + SEGOFSET)
176 1.9 skrll
177 1.15 skrll #define PMAP_DEV __BIT(29) /* 0x2000_0000 */
178 1.15 skrll
179 1.15 skrll #define DEVMAP_ALIGN(x) MEGAPAGE_TRUNC((x))
180 1.15 skrll #define DEVMAP_SIZE(x) MEGAPAGE_ROUND((x))
181 1.15 skrll #define DEVMAP_FLAGS PMAP_DEV
182 1.15 skrll
183 1.1 matt #ifdef __PMAP_PRIVATE
184 1.10 skrll
185 1.10 skrll static inline bool
186 1.10 skrll pmap_md_tlb_check_entry(void *ctx, vaddr_t va, tlb_asid_t asid, pt_entry_t pte)
187 1.10 skrll {
188 1.10 skrll // TLB not walked and so not called.
189 1.10 skrll return false;
190 1.10 skrll }
191 1.10 skrll
192 1.1 matt static inline void
193 1.20 skrll pmap_md_page_syncicache(struct vm_page_md *mdpg, const kcpuset_t *onproc)
194 1.1 matt {
195 1.13 skrll __asm __volatile("fence\trw,rw; fence.i" ::: "memory");
196 1.1 matt }
197 1.1 matt
198 1.1 matt /*
199 1.1 matt * Virtual Cache Alias helper routines. Not a problem for RISCV CPUs.
200 1.1 matt */
201 1.1 matt static inline bool
202 1.8 skrll pmap_md_vca_add(struct vm_page_md *mdpg, vaddr_t va, pt_entry_t *nptep)
203 1.1 matt {
204 1.1 matt return false;
205 1.1 matt }
206 1.1 matt
207 1.1 matt static inline void
208 1.8 skrll pmap_md_vca_remove(struct vm_page_md *mdpg, vaddr_t va)
209 1.1 matt {
210 1.1 matt }
211 1.1 matt
212 1.1 matt static inline void
213 1.8 skrll pmap_md_vca_clean(struct vm_page_md *mdpg, vaddr_t va, int op)
214 1.1 matt {
215 1.1 matt }
216 1.1 matt
217 1.1 matt static inline size_t
218 1.1 matt pmap_md_tlb_asid_max(void)
219 1.1 matt {
220 1.1 matt return PMAP_TLB_NUM_PIDS - 1;
221 1.1 matt }
222 1.5 skrll
223 1.15 skrll static inline pt_entry_t *
224 1.15 skrll pmap_md_nptep(pt_entry_t *ptep)
225 1.15 skrll {
226 1.15 skrll return ptep + 1;
227 1.15 skrll }
228 1.15 skrll
229 1.1 matt #endif /* __PMAP_PRIVATE */
230 1.1 matt #endif /* _KERNEL */
231 1.1 matt
232 1.1 matt #include <uvm/pmap/pmap.h>
233 1.1 matt
234 1.1 matt #endif /* !_MODULE */
235 1.1 matt
236 1.1 matt #if defined(MODULAR) || defined(_MODULE)
237 1.1 matt /*
238 1.1 matt * Define a compatible vm_page_md so that struct vm_page is the same size
239 1.1 matt * whether we are using modules or not.
240 1.1 matt */
241 1.1 matt #ifndef __HAVE_VM_PAGE_MD
242 1.11 simonb #define __HAVE_VM_PAGE_MD
243 1.1 matt
244 1.1 matt struct vm_page_md {
245 1.1 matt uintptr_t mdpg_dummy[3];
246 1.1 matt };
247 1.2 maxv __CTASSERT(sizeof(struct vm_page_md) == sizeof(uintptr_t)*3);
248 1.1 matt
249 1.2 maxv #endif /* !__HAVE_VM_PAGE_MD */
250 1.1 matt
251 1.1 matt #endif /* MODULAR || _MODULE */
252 1.1 matt
253 1.1 matt #endif /* !_RISCV_PMAP_H_ */
254