pmap.h revision 1.21 1 1.21 skrll /* $NetBSD: pmap.h,v 1.21 2023/09/03 08:48:20 skrll Exp $ */
2 1.1 matt
3 1.3 maxv /*
4 1.9 skrll * Copyright (c) 2014, 2019, 2021 The NetBSD Foundation, Inc.
5 1.1 matt * All rights reserved.
6 1.1 matt *
7 1.1 matt * This code is derived from software contributed to The NetBSD Foundation
8 1.9 skrll * by Matt Thomas (of 3am Software Foundry), Maxime Villard, and
9 1.9 skrll * Nick Hudson.
10 1.1 matt *
11 1.1 matt * Redistribution and use in source and binary forms, with or without
12 1.1 matt * modification, are permitted provided that the following conditions
13 1.1 matt * are met:
14 1.1 matt * 1. Redistributions of source code must retain the above copyright
15 1.1 matt * notice, this list of conditions and the following disclaimer.
16 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
17 1.1 matt * notice, this list of conditions and the following disclaimer in the
18 1.1 matt * documentation and/or other materials provided with the distribution.
19 1.1 matt *
20 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 1.1 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 1.1 matt * POSSIBILITY OF SUCH DAMAGE.
31 1.1 matt */
32 1.1 matt
33 1.1 matt #ifndef _RISCV_PMAP_H_
34 1.11 simonb #define _RISCV_PMAP_H_
35 1.1 matt
36 1.1 matt #ifdef _KERNEL_OPT
37 1.1 matt #include "opt_modular.h"
38 1.1 matt #endif
39 1.1 matt
40 1.1 matt #if !defined(_MODULE)
41 1.1 matt
42 1.9 skrll #include <sys/cdefs.h>
43 1.1 matt #include <sys/types.h>
44 1.1 matt #include <sys/pool.h>
45 1.1 matt #include <sys/evcnt.h>
46 1.1 matt
47 1.2 maxv #include <uvm/uvm_physseg.h>
48 1.1 matt #include <uvm/pmap/vmpagemd.h>
49 1.1 matt
50 1.1 matt #include <riscv/pte.h>
51 1.9 skrll #include <riscv/sysreg.h>
52 1.1 matt
53 1.11 simonb #define PMAP_SEGTABSIZE NPTEPG
54 1.11 simonb #define PMAP_PDETABSIZE NPTEPG
55 1.1 matt
56 1.1 matt #ifdef _LP64
57 1.11 simonb #define PTPSHIFT 3
58 1.15 skrll /* This is SV57. */
59 1.15 skrll //#define XSEGSHIFT (SEGSHIFT + SEGLENGTH + SEGLENGTH + SEGLENGTH)
60 1.15 skrll
61 1.9 skrll /* This is SV48. */
62 1.15 skrll //#define XSEGSHIFT (SEGSHIFT + SEGLENGTH + SEGLENGTH)
63 1.9 skrll
64 1.9 skrll /* This is SV39. */
65 1.11 simonb #define XSEGSHIFT (SEGSHIFT + SEGLENGTH)
66 1.11 simonb #define NBXSEG (1ULL << XSEGSHIFT)
67 1.11 simonb #define XSEGOFSET (NBXSEG - 1) /* byte offset into xsegment */
68 1.11 simonb #define XSEGLENGTH (PGSHIFT - 3)
69 1.11 simonb #define NXSEGPG (1 << XSEGLENGTH)
70 1.1 matt #else
71 1.11 simonb #define PTPSHIFT 2
72 1.12 skrll #define XSEGSHIFT SEGSHIFT
73 1.1 matt #endif
74 1.3 maxv
75 1.11 simonb #define SEGLENGTH (PGSHIFT - PTPSHIFT)
76 1.11 simonb #define SEGSHIFT (SEGLENGTH + PGSHIFT)
77 1.11 simonb #define NBSEG (1 << SEGSHIFT) /* bytes/segment */
78 1.11 simonb #define SEGOFSET (NBSEG - 1) /* byte offset into segment */
79 1.1 matt
80 1.11 simonb #define KERNEL_PID 0
81 1.1 matt
82 1.11 simonb #define PMAP_HWPAGEWALKER 1
83 1.21 skrll #define PMAP_TLB_MAX 1
84 1.1 matt #ifdef _LP64
85 1.11 simonb #define PMAP_INVALID_PDETAB_ADDRESS ((pmap_pdetab_t *)(VM_MIN_KERNEL_ADDRESS - PAGE_SIZE))
86 1.11 simonb #define PMAP_INVALID_SEGTAB_ADDRESS ((pmap_segtab_t *)(VM_MIN_KERNEL_ADDRESS - PAGE_SIZE))
87 1.1 matt #else
88 1.11 simonb #define PMAP_INVALID_PDETAB_ADDRESS ((pmap_pdetab_t *)0xdeadbeef)
89 1.11 simonb #define PMAP_INVALID_SEGTAB_ADDRESS ((pmap_segtab_t *)0xdeadbeef)
90 1.1 matt #endif
91 1.11 simonb #define PMAP_TLB_NUM_PIDS (__SHIFTOUT_MASK(SATP_ASID) + 1)
92 1.11 simonb #define PMAP_TLB_BITMAP_LENGTH PMAP_TLB_NUM_PIDS
93 1.21 skrll #define PMAP_TLB_FLUSH_ASID_ON_RESET true
94 1.1 matt
95 1.11 simonb #define pmap_phys_address(x) (x)
96 1.1 matt
97 1.2 maxv #ifndef __BSD_PTENTRY_T__
98 1.11 simonb #define __BSD_PTENTRY_T__
99 1.2 maxv #ifdef _LP64
100 1.11 simonb #define PRIxPTE PRIx64
101 1.2 maxv #else
102 1.11 simonb #define PRIxPTE PRIx32
103 1.2 maxv #endif
104 1.2 maxv #endif /* __BSD_PTENTRY_T__ */
105 1.2 maxv
106 1.11 simonb #define PMAP_NEED_PROCWR
107 1.1 matt static inline void
108 1.1 matt pmap_procwr(struct proc *p, vaddr_t va, vsize_t len)
109 1.1 matt {
110 1.13 skrll __asm __volatile("fence\trw,rw; fence.i" ::: "memory");
111 1.1 matt }
112 1.1 matt
113 1.1 matt #include <uvm/pmap/tlb.h>
114 1.15 skrll #include <uvm/pmap/pmap_devmap.h>
115 1.1 matt #include <uvm/pmap/pmap_tlb.h>
116 1.18 skrll #include <uvm/pmap/pmap_synci.h>
117 1.1 matt
118 1.11 simonb #define PMAP_GROWKERNEL
119 1.11 simonb #define PMAP_STEAL_MEMORY
120 1.1 matt
121 1.1 matt #ifdef _KERNEL
122 1.1 matt
123 1.11 simonb #define __HAVE_PMAP_MD
124 1.1 matt struct pmap_md {
125 1.10 skrll paddr_t md_ppn;
126 1.1 matt };
127 1.1 matt
128 1.18 skrll static inline void
129 1.18 skrll pmap_md_icache_sync_all(void)
130 1.18 skrll {
131 1.18 skrll }
132 1.18 skrll
133 1.18 skrll static inline void
134 1.18 skrll pmap_md_icache_sync_range_index(vaddr_t va, vsize_t size)
135 1.18 skrll {
136 1.18 skrll }
137 1.18 skrll
138 1.1 matt struct vm_page *
139 1.18 skrll pmap_md_alloc_poolpage(int);
140 1.6 skrll vaddr_t pmap_md_map_poolpage(paddr_t, vsize_t);
141 1.6 skrll void pmap_md_unmap_poolpage(vaddr_t, vsize_t);
142 1.18 skrll
143 1.6 skrll bool pmap_md_direct_mapped_vaddr_p(vaddr_t);
144 1.6 skrll paddr_t pmap_md_direct_mapped_vaddr_to_paddr(vaddr_t);
145 1.6 skrll vaddr_t pmap_md_direct_map_paddr(paddr_t);
146 1.6 skrll void pmap_md_init(void);
147 1.18 skrll bool pmap_md_io_vaddr_p(vaddr_t);
148 1.18 skrll bool pmap_md_ok_to_steal_p(const uvm_physseg_t, size_t);
149 1.18 skrll void pmap_md_pdetab_init(struct pmap *);
150 1.18 skrll void pmap_md_pdetab_fini(struct pmap *);
151 1.18 skrll void pmap_md_tlb_info_attach(struct pmap_tlb_info *, struct cpu_info *);
152 1.9 skrll void pmap_md_xtab_activate(struct pmap *, struct lwp *);
153 1.9 skrll void pmap_md_xtab_deactivate(struct pmap *);
154 1.1 matt
155 1.16 skrll void pmap_bootstrap(vaddr_t, vaddr_t);
156 1.10 skrll
157 1.15 skrll vsize_t pmap_kenter_range(vaddr_t, paddr_t, vsize_t, vm_prot_t, u_int);
158 1.15 skrll
159 1.15 skrll #ifdef _LP64
160 1.3 maxv extern vaddr_t pmap_direct_base;
161 1.3 maxv extern vaddr_t pmap_direct_end;
162 1.15 skrll #define PMAP_DIRECT_MAP(pa) RISCV_PA_TO_KVA(pa)
163 1.15 skrll #define PMAP_DIRECT_UNMAP(va) RISCV_KVA_TO_PA(va)
164 1.15 skrll
165 1.15 skrll /*
166 1.15 skrll * Other hooks for the pool allocator.
167 1.15 skrll */
168 1.15 skrll #define POOL_PHYSTOV(pa) RISCV_PA_TO_KVA((paddr_t)(pa))
169 1.15 skrll #define POOL_VTOPHYS(va) RISCV_KVA_TO_PA((vaddr_t)(va))
170 1.15 skrll
171 1.15 skrll #endif /* _LP64 */
172 1.3 maxv
173 1.11 simonb #define MEGAPAGE_TRUNC(x) ((x) & ~SEGOFSET)
174 1.11 simonb #define MEGAPAGE_ROUND(x) MEGAPAGE_TRUNC((x) + SEGOFSET)
175 1.9 skrll
176 1.15 skrll #define PMAP_DEV __BIT(29) /* 0x2000_0000 */
177 1.15 skrll
178 1.15 skrll #define DEVMAP_ALIGN(x) MEGAPAGE_TRUNC((x))
179 1.15 skrll #define DEVMAP_SIZE(x) MEGAPAGE_ROUND((x))
180 1.15 skrll #define DEVMAP_FLAGS PMAP_DEV
181 1.15 skrll
182 1.1 matt #ifdef __PMAP_PRIVATE
183 1.10 skrll
184 1.10 skrll static inline bool
185 1.10 skrll pmap_md_tlb_check_entry(void *ctx, vaddr_t va, tlb_asid_t asid, pt_entry_t pte)
186 1.10 skrll {
187 1.10 skrll // TLB not walked and so not called.
188 1.10 skrll return false;
189 1.10 skrll }
190 1.10 skrll
191 1.1 matt static inline void
192 1.20 skrll pmap_md_page_syncicache(struct vm_page_md *mdpg, const kcpuset_t *onproc)
193 1.1 matt {
194 1.13 skrll __asm __volatile("fence\trw,rw; fence.i" ::: "memory");
195 1.1 matt }
196 1.1 matt
197 1.1 matt /*
198 1.1 matt * Virtual Cache Alias helper routines. Not a problem for RISCV CPUs.
199 1.1 matt */
200 1.1 matt static inline bool
201 1.8 skrll pmap_md_vca_add(struct vm_page_md *mdpg, vaddr_t va, pt_entry_t *nptep)
202 1.1 matt {
203 1.1 matt return false;
204 1.1 matt }
205 1.1 matt
206 1.1 matt static inline void
207 1.8 skrll pmap_md_vca_remove(struct vm_page_md *mdpg, vaddr_t va)
208 1.1 matt {
209 1.1 matt }
210 1.1 matt
211 1.1 matt static inline void
212 1.8 skrll pmap_md_vca_clean(struct vm_page_md *mdpg, vaddr_t va, int op)
213 1.1 matt {
214 1.1 matt }
215 1.1 matt
216 1.1 matt static inline size_t
217 1.1 matt pmap_md_tlb_asid_max(void)
218 1.1 matt {
219 1.1 matt return PMAP_TLB_NUM_PIDS - 1;
220 1.1 matt }
221 1.5 skrll
222 1.15 skrll static inline pt_entry_t *
223 1.15 skrll pmap_md_nptep(pt_entry_t *ptep)
224 1.15 skrll {
225 1.15 skrll return ptep + 1;
226 1.15 skrll }
227 1.15 skrll
228 1.1 matt #endif /* __PMAP_PRIVATE */
229 1.1 matt #endif /* _KERNEL */
230 1.1 matt
231 1.1 matt #include <uvm/pmap/pmap.h>
232 1.1 matt
233 1.1 matt #endif /* !_MODULE */
234 1.1 matt
235 1.1 matt #if defined(MODULAR) || defined(_MODULE)
236 1.1 matt /*
237 1.1 matt * Define a compatible vm_page_md so that struct vm_page is the same size
238 1.1 matt * whether we are using modules or not.
239 1.1 matt */
240 1.1 matt #ifndef __HAVE_VM_PAGE_MD
241 1.11 simonb #define __HAVE_VM_PAGE_MD
242 1.1 matt
243 1.1 matt struct vm_page_md {
244 1.1 matt uintptr_t mdpg_dummy[3];
245 1.1 matt };
246 1.2 maxv __CTASSERT(sizeof(struct vm_page_md) == sizeof(uintptr_t)*3);
247 1.1 matt
248 1.2 maxv #endif /* !__HAVE_VM_PAGE_MD */
249 1.1 matt
250 1.1 matt #endif /* MODULAR || _MODULE */
251 1.1 matt
252 1.1 matt #endif /* !_RISCV_PMAP_H_ */
253