kvm_aarch64.c revision 1.10 1 1.10 skrll /* $NetBSD: kvm_aarch64.c,v 1.10 2020/11/10 19:14:11 skrll Exp $ */
2 1.1 matt
3 1.1 matt /*-
4 1.7 mrg * Copyright (c) 2014, 2018 The NetBSD Foundation, Inc.
5 1.1 matt * All rights reserved.
6 1.1 matt *
7 1.1 matt * This code is derived from software contributed to The NetBSD Foundation
8 1.1 matt * by Matt Thomas of 3am Software Foundry.
9 1.1 matt *
10 1.1 matt * Redistribution and use in source and binary forms, with or without
11 1.1 matt * modification, are permitted provided that the following conditions
12 1.1 matt * are met:
13 1.1 matt * 1. Redistributions of source code must retain the above copyright
14 1.1 matt * notice, this list of conditions and the following disclaimer.
15 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 matt * notice, this list of conditions and the following disclaimer in the
17 1.1 matt * documentation and/or other materials provided with the distribution.
18 1.1 matt *
19 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 matt * POSSIBILITY OF SUCH DAMAGE.
30 1.1 matt */
31 1.1 matt
32 1.1 matt #include <sys/param.h>
33 1.1 matt #include <sys/proc.h>
34 1.1 matt #include <sys/stat.h>
35 1.1 matt #include <sys/kcore.h>
36 1.1 matt #include <sys/types.h>
37 1.1 matt #include <unistd.h>
38 1.1 matt #include <nlist.h>
39 1.1 matt #include <kvm.h>
40 1.1 matt
41 1.1 matt #include <machine/kcore.h>
42 1.9 mlelstv #include <machine/armreg.h>
43 1.1 matt #include <machine/pte.h>
44 1.1 matt #include <machine/vmparam.h>
45 1.1 matt
46 1.1 matt #include <limits.h>
47 1.1 matt #include <db.h>
48 1.1 matt #include <stdlib.h>
49 1.1 matt
50 1.1 matt #include "kvm_private.h"
51 1.1 matt
52 1.10 skrll __RCSID("$NetBSD: kvm_aarch64.c,v 1.10 2020/11/10 19:14:11 skrll Exp $");
53 1.1 matt
54 1.1 matt /*ARGSUSED*/
55 1.1 matt void
56 1.1 matt _kvm_freevtop(kvm_t *kd)
57 1.1 matt {
58 1.1 matt return;
59 1.1 matt }
60 1.1 matt
61 1.1 matt /*ARGSUSED*/
62 1.1 matt int
63 1.1 matt _kvm_initvtop(kvm_t *kd)
64 1.1 matt {
65 1.1 matt return (0);
66 1.1 matt }
67 1.1 matt
68 1.1 matt int
69 1.1 matt _kvm_kvatop(kvm_t *kd, vaddr_t va, paddr_t *pa)
70 1.1 matt {
71 1.1 matt if (ISALIVE(kd)) {
72 1.1 matt _kvm_err(kd, 0, "vatop called in live kernel!");
73 1.1 matt return(0);
74 1.1 matt }
75 1.1 matt
76 1.10 skrll if ((va & AARCH64_DIRECTMAP_MASK) != AARCH64_DIRECTMAP_START) {
77 1.1 matt /*
78 1.1 matt * Bogus address (not in KV space): punt.
79 1.1 matt */
80 1.1 matt _kvm_err(kd, 0, "invalid kernel virtual address");
81 1.1 matt lose:
82 1.1 matt *pa = -1;
83 1.1 matt return 0;
84 1.1 matt }
85 1.1 matt
86 1.1 matt const cpu_kcore_hdr_t * const cpu_kh = kd->cpu_data;
87 1.6 skrll const uint64_t tg1 = cpu_kh->kh_tcr1 & TCR_TG1;
88 1.1 matt const u_int t1siz = __SHIFTOUT(cpu_kh->kh_tcr1, TCR_T1SZ);
89 1.6 skrll const u_int inputsz = 64 - t1siz;
90 1.1 matt
91 1.1 matt /*
92 1.1 matt * Real kernel virtual address: do the translation.
93 1.1 matt */
94 1.1 matt
95 1.1 matt u_int page_shift;
96 1.1 matt
97 1.1 matt switch (tg1) {
98 1.3 ryo case TCR_TG1_4KB:
99 1.1 matt page_shift = 12;
100 1.1 matt break;
101 1.3 ryo case TCR_TG1_16KB:
102 1.1 matt page_shift = 14;
103 1.1 matt break;
104 1.3 ryo case TCR_TG1_64KB:
105 1.1 matt page_shift = 16;
106 1.1 matt break;
107 1.1 matt default:
108 1.1 matt goto lose;
109 1.1 matt }
110 1.1 matt
111 1.1 matt const size_t page_size = 1 << page_shift;
112 1.6 skrll const uint64_t page_mask = __BITS(page_shift - 1, 0);
113 1.6 skrll const uint64_t page_addr = __BITS(47, page_shift);
114 1.1 matt const u_int pte_shift = page_shift - 3;
115 1.1 matt
116 1.6 skrll /* how many levels of page tables do we have? */
117 1.6 skrll u_int levels = howmany(inputsz - page_shift, pte_shift);
118 1.1 matt
119 1.1 matt /* restrict va to the valid VA bits */
120 1.6 skrll va &= __BITS(inputsz - 1, 0);
121 1.1 matt
122 1.6 skrll u_int addr_shift = page_shift + (levels - 1) * pte_shift;
123 1.1 matt
124 1.1 matt /* clear out the unused low bits of the table address */
125 1.6 skrll paddr_t pte_addr = cpu_kh->kh_ttbr1 & TTBR_BADDR;
126 1.1 matt
127 1.1 matt for (;;) {
128 1.1 matt pt_entry_t pte;
129 1.1 matt
130 1.1 matt /* now index into the pte table */
131 1.7 mrg const uint64_t idx_mask = __BITS(addr_shift + pte_shift - 1,
132 1.7 mrg addr_shift);
133 1.6 skrll pte_addr += 8 * __SHIFTOUT(va, idx_mask);
134 1.1 matt
135 1.1 matt /* Find and read the PTE. */
136 1.1 matt if (_kvm_pread(kd, kd->pmfd, &pte, sizeof(pte),
137 1.1 matt _kvm_pa2off(kd, pte_addr)) != sizeof(pte)) {
138 1.1 matt _kvm_syserr(kd, 0, "could not read pte");
139 1.1 matt goto lose;
140 1.1 matt }
141 1.1 matt
142 1.1 matt /* Find and read the L2 PTE. */
143 1.1 matt if ((pte & LX_VALID) == 0) {
144 1.1 matt _kvm_err(kd, 0, "invalid translation (invalid pte)");
145 1.1 matt goto lose;
146 1.1 matt }
147 1.1 matt
148 1.1 matt if ((pte & LX_TYPE) == LX_TYPE_BLK) {
149 1.6 skrll const size_t blk_size = 1 << addr_shift;
150 1.8 mrg const uint64_t blk_mask = __BITS(addr_shift - 1, 0);
151 1.1 matt
152 1.1 matt *pa = (pte & page_addr & ~blk_mask) | (va & blk_mask);
153 1.6 skrll return blk_size - (va & blk_mask);
154 1.1 matt }
155 1.6 skrll if (--levels == 0) {
156 1.1 matt *pa = (pte & page_addr) | (va & page_mask);
157 1.6 skrll return page_size - (va & page_mask);
158 1.1 matt }
159 1.1 matt
160 1.1 matt /*
161 1.1 matt * Read next level of page table
162 1.1 matt */
163 1.1 matt
164 1.1 matt pte_addr = pte & page_addr;
165 1.1 matt addr_shift -= pte_shift;
166 1.1 matt }
167 1.1 matt }
168 1.1 matt
169 1.1 matt /*
170 1.1 matt * Translate a physical address to a file-offset in the crash dump.
171 1.1 matt */
172 1.1 matt off_t
173 1.1 matt _kvm_pa2off(kvm_t *kd, paddr_t pa)
174 1.1 matt {
175 1.1 matt const cpu_kcore_hdr_t * const cpu_kh = kd->cpu_data;
176 1.1 matt off_t off = 0;
177 1.1 matt
178 1.1 matt for (const phys_ram_seg_t *ramsegs = cpu_kh->kh_ramsegs;
179 1.1 matt ramsegs->size != 0; ramsegs++) {
180 1.1 matt if (pa >= ramsegs->start
181 1.1 matt && pa < ramsegs->start + ramsegs->size) {
182 1.1 matt off += pa - ramsegs->start;
183 1.1 matt break;
184 1.1 matt }
185 1.1 matt off += ramsegs->size;
186 1.1 matt }
187 1.1 matt
188 1.1 matt return kd->dump_off + off;
189 1.1 matt }
190 1.1 matt
191 1.1 matt /*
192 1.1 matt * Machine-dependent initialization for ALL open kvm descriptors,
193 1.1 matt * not just those for a kernel crash dump. Some architectures
194 1.1 matt * have to deal with these NOT being constants! (i.e. m68k)
195 1.1 matt */
196 1.1 matt int
197 1.1 matt _kvm_mdopen(kvm_t *kd)
198 1.1 matt {
199 1.1 matt
200 1.1 matt kd->usrstack = USRSTACK;
201 1.1 matt kd->min_uva = VM_MIN_ADDRESS;
202 1.1 matt kd->max_uva = VM_MAXUSER_ADDRESS;
203 1.1 matt
204 1.1 matt return (0);
205 1.1 matt }
206