kvm_x86_64.c revision 1.2.2.2 1 /* $NetBSD: kvm_x86_64.c,v 1.2.2.2 2002/12/19 02:26:25 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1989, 1992, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software developed by the Computer Systems
8 * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
9 * BG 91-66 and contributed to Berkeley.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 */
39
40 #include <sys/cdefs.h>
41 #if defined(LIBC_SCCS) && !defined(lint)
42 #if 0
43 static char sccsid[] = "@(#)kvm_hp300.c 8.1 (Berkeley) 6/4/93";
44 #else
45 __RCSID("$NetBSD: kvm_x86_64.c,v 1.2.2.2 2002/12/19 02:26:25 thorpej Exp $");
46 #endif
47 #endif /* LIBC_SCCS and not lint */
48
49 /*
50 * x86-64 machine dependent routines for kvm.
51 */
52
53 #include <sys/param.h>
54 #include <sys/user.h>
55 #include <sys/proc.h>
56 #include <sys/stat.h>
57 #include <sys/kcore.h>
58 #include <machine/kcore.h>
59 #include <stdlib.h>
60 #include <unistd.h>
61 #include <nlist.h>
62 #include <kvm.h>
63
64 #include <uvm/uvm_extern.h>
65
66 #include <limits.h>
67 #include <db.h>
68
69 #include "kvm_private.h"
70
71 #include <machine/pmap.h>
72 #include <machine/pte.h>
73 #include <machine/vmparam.h>
74
75 void
76 _kvm_freevtop(kd)
77 kvm_t *kd;
78 {
79
80 /* Not actually used for anything right now, but safe. */
81 if (kd->vmst != 0)
82 free(kd->vmst);
83 }
84
85 /*ARGSUSED*/
86 int
87 _kvm_initvtop(kd)
88 kvm_t *kd;
89 {
90
91 return (0);
92 }
93
94 /*
95 * Translate a kernel virtual address to a physical address.
96 */
97 int
98 _kvm_kvatop(kd, va, pa)
99 kvm_t *kd;
100 u_long va;
101 u_long *pa;
102 {
103 cpu_kcore_hdr_t *cpu_kh;
104 u_long page_off;
105 pd_entry_t pde;
106 pt_entry_t pte;
107 paddr_t pde_pa, pte_pa;
108
109 if (ISALIVE(kd)) {
110 _kvm_err(kd, 0, "vatop called in live kernel!");
111 return (0);
112 }
113
114 cpu_kh = kd->cpu_data;
115 page_off = va & PGOFSET;
116
117 /*
118 * Find and read all entries to get to the pa.
119 */
120
121 /*
122 * Level 4.
123 */
124 pde_pa = cpu_kh->ptdpaddr + (pl4_i(va) * sizeof(pd_entry_t));
125 if (pread(kd->pmfd, (void *)&pde, sizeof(pde),
126 _kvm_pa2off(kd, pde_pa)) != sizeof(pde)) {
127 _kvm_syserr(kd, 0, "could not read PT level 4 entry");
128 goto lose;
129 }
130 if ((pde & PG_V) == 0) {
131 _kvm_err(kd, 0, "invalid translation (invalid level 4 PDE)");
132 goto lose;
133 }
134
135 /*
136 * Level 3.
137 */
138 pde_pa = (pde_pa + PG_FRAME) + (pl3_i(va) * sizeof(pd_entry_t));
139 if (pread(kd->pmfd, (void *)&pde, sizeof(pde),
140 _kvm_pa2off(kd, pde_pa)) != sizeof(pde)) {
141 _kvm_syserr(kd, 0, "could not read PT level 3 entry");
142 goto lose;
143 }
144 if ((pde & PG_V) == 0) {
145 _kvm_err(kd, 0, "invalid translation (invalid level 3 PDE)");
146 goto lose;
147 }
148
149 /*
150 * Level 2.
151 */
152 pde_pa = (pde_pa & PG_FRAME) + (pl2_i(va) * sizeof(pd_entry_t));
153 if (pread(kd->pmfd, (void *)&pde, sizeof(pde),
154 _kvm_pa2off(kd, pde_pa)) != sizeof(pde)) {
155 _kvm_syserr(kd, 0, "could not read PT level 2 entry");
156 goto lose;
157 }
158 if ((pde & PG_V) == 0) {
159 _kvm_err(kd, 0, "invalid translation (invalid level 2 PDE)");
160 goto lose;
161 }
162
163
164 /*
165 * Level 1.
166 */
167 pte_pa = (pde_pa & PG_FRAME) + (pl1_i(va) * sizeof(pt_entry_t));
168 if (pread(kd->pmfd, (void *) &pte, sizeof(pte),
169 _kvm_pa2off(kd, pte_pa)) != sizeof(pte)) {
170 _kvm_syserr(kd, 0, "could not read PTE");
171 goto lose;
172 }
173 /*
174 * Validate the PTE and return the physical address.
175 */
176 if ((pte & PG_V) == 0) {
177 _kvm_err(kd, 0, "invalid translation (invalid PTE)");
178 goto lose;
179 }
180 *pa = (pte & PG_FRAME) + page_off;
181 return (int)(NBPG - page_off);
182
183 lose:
184 *pa = (u_long)~0L;
185 return (0);
186 }
187
188 /*
189 * Translate a physical address to a file-offset in the crash dump.
190 */
191 off_t
192 _kvm_pa2off(kd, pa)
193 kvm_t *kd;
194 u_long pa;
195 {
196 cpu_kcore_hdr_t *cpu_kh;
197 phys_ram_seg_t *ramsegs;
198 off_t off;
199 int i;
200
201 cpu_kh = kd->cpu_data;
202 ramsegs = (void *)((char *)(void *)cpu_kh + ALIGN(sizeof *cpu_kh));
203
204 off = 0;
205 for (i = 0; i < cpu_kh->nmemsegs; i++) {
206 if (pa >= ramsegs[i].start &&
207 (pa - ramsegs[i].start) < ramsegs[i].size) {
208 off += (pa - ramsegs[i].start);
209 break;
210 }
211 off += ramsegs[i].size;
212 }
213
214 return (kd->dump_off + off);
215 }
216
217 /*
218 * Machine-dependent initialization for ALL open kvm descriptors,
219 * not just those for a kernel crash dump. Some architectures
220 * have to deal with these NOT being constants! (i.e. m68k)
221 */
222 int
223 _kvm_mdopen(kd)
224 kvm_t *kd;
225 {
226
227 kd->usrstack = USRSTACK;
228 kd->min_uva = VM_MIN_ADDRESS;
229 kd->max_uva = VM_MAXUSER_ADDRESS;
230
231 return (0);
232 }
233