kvm_sparc.c revision 1.9 1 /* $NetBSD: kvm_sparc.c,v 1.9 1996/04/01 19:23:03 cgd Exp $ */
2
3 /*-
4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software developed by the Computer Systems
8 * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
9 * BG 91-66 and contributed to Berkeley.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 */
39
40 #if defined(LIBC_SCCS) && !defined(lint)
41 #if 0
42 static char sccsid[] = "@(#)kvm_sparc.c 8.1 (Berkeley) 6/4/93";
43 #else
44 static char *rcsid = "$NetBSD: kvm_sparc.c,v 1.9 1996/04/01 19:23:03 cgd Exp $";
45 #endif
46 #endif /* LIBC_SCCS and not lint */
47
48 /*
49 * Sparc machine dependent routines for kvm. Hopefully, the forthcoming
50 * vm code will one day obsolete this module.
51 */
52
53 #include <sys/param.h>
54 #include <sys/user.h>
55 #include <sys/proc.h>
56 #include <sys/stat.h>
57 #include <sys/sysctl.h>
58 #include <sys/device.h>
59 #include <unistd.h>
60 #include <nlist.h>
61 #include <kvm.h>
62
63 #include <vm/vm.h>
64 #include <vm/vm_param.h>
65 #include <machine/autoconf.h>
66
67 #include <limits.h>
68 #include <db.h>
69
70 #include "kvm_private.h"
71
72 #define MA_SIZE 32 /* XXX */
73 struct vmstate {
74 struct {
75 int x_seginval; /* [sun4/sun4c] only */
76 int x_npmemarr;
77 struct memarr x_pmemarr[MA_SIZE];
78 struct segmap x_segmap_store[NKREG*NSEGRG];
79 } x;
80 #define seginval x.x_seginval
81 #define npmemarr x.x_npmemarr
82 #define pmemarr x.x_pmemarr
83 #define segmap_store x.x_segmap_store
84 int *pte; /* [sun4/sun4c] only */
85 };
86 #define NPMEG(vm) ((vm)->seginval+1)
87
88 static int cputyp = -1;
89
90 static int pgshift, nptesg;
91
92 #define VA_VPG(va) ((cputyp == CPU_SUN4C || cputyp == CPU_SUN4M) \
93 ? VA_SUN4C_VPG(va) \
94 : VA_SUN4_VPG(va))
95
96 static int _kvm_mustinit __P((kvm_t *));
97
98 #if 0
99 static int
100 getcputyp()
101 {
102 int mib[2];
103 size_t size;
104
105 mib[0] = CTL_HW;
106 mib[1] = HW_CLASS;
107 size = sizeof cputyp;
108 if (sysctl(mib, 2, &cputyp, &size, NULL, 0) == -1)
109 return (-1);
110 }
111 #endif
112
113 static int
114 _kvm_mustinit(kd)
115 kvm_t *kd;
116 {
117 static struct nlist nlist[2] = {
118 # define X_CPUTYP 0
119 { "_cputyp" },
120 { NULL },
121 };
122 off_t foff;
123
124 if (cputyp != -1)
125 return 0;
126
127 for (pgshift = 12; (1 << pgshift) != kd->nbpg; pgshift++)
128 ;
129 nptesg = NBPSG / kd->nbpg;
130
131 if (kvm_nlist(kd, nlist) != 0) {
132 _kvm_err(kd, kd->program, "cannot find `cputyp' symbol");
133 return (-1);
134 }
135 /* Assume kernel mappings are all within first memory bank. */
136 foff = nlist[X_CPUTYP].n_value - KERNBASE;
137 if (lseek(kd->pmfd, foff, 0) == -1 ||
138 read(kd->pmfd, &cputyp, sizeof(cputyp)) < 0) {
139 _kvm_err(kd, kd->program, "cannot read `cputyp");
140 return (-1);
141 }
142 if (cputyp != CPU_SUN4 &&
143 cputyp != CPU_SUN4C &&
144 cputyp != CPU_SUN4M)
145 return (-1);
146
147 return (0);
148 }
149
150 void
151 _kvm_freevtop(kd)
152 kvm_t *kd;
153 {
154 if (kd->vmst != 0) {
155 if (kd->vmst->pte != 0)
156 free(kd->vmst->pte);
157 free(kd->vmst);
158 kd->vmst = 0;
159 }
160 }
161
162 /*
163 * Translate a kernel virtual address to a physical address using the
164 * mapping information in kd->vm. Returns the result in pa, and returns
165 * the number of bytes that are contiguously available from this
166 * physical address. This routine is used only for crashdumps.
167 */
168 int
169 _kvm_kvatop(kd, va, pa)
170 kvm_t *kd;
171 u_long va;
172 u_long *pa;
173 {
174 if (_kvm_mustinit(kd) != 0)
175 return (-1);
176
177 return ((cputyp == CPU_SUN4M)
178 ? _kvm_kvatop4m(kd, va, pa)
179 : _kvm_kvatop44c(kd, va, pa));
180 }
181
182 /*
183 * Prepare for translation of kernel virtual addresses into offsets
184 * into crash dump files. We use the MMU specific goop written at the
185 * and of crash dump by pmap_dumpmmu().
186 * (note: sun4/sun4c 2-level MMU specific)
187 */
188 int
189 _kvm_initvtop(kd)
190 kvm_t *kd;
191 {
192 if (_kvm_mustinit(kd) != 0)
193 return (-1);
194
195 return ((cputyp == CPU_SUN4M)
196 ? _kvm_initvtop4m(kd)
197 : _kvm_initvtop44c(kd));
198 }
199
200 #define VA_OFF(va) (va & (kd->nbpg - 1))
201
202
203 /*
204 * We use the MMU specific goop written at the end of crash dump
205 * by pmap_dumpmmu().
206 * (note: sun4 3-level MMU not yet supported)
207 */
208 int
209 _kvm_initvtop44c(kd)
210 kvm_t *kd;
211 {
212 register struct vmstate *vm;
213 register int i;
214 off_t foff;
215 struct stat st;
216
217 if ((vm = kd->vmst) == 0) {
218 kd->vmst = vm = (struct vmstate *)_kvm_malloc(kd, sizeof(*vm));
219 if (vm == 0)
220 return (-1);
221 }
222
223 if (fstat(kd->pmfd, &st) < 0)
224 return (-1);
225 /*
226 * Read segment table.
227 */
228
229 foff = st.st_size - roundup(sizeof(vm->x), kd->nbpg);
230 errno = 0;
231 if (lseek(kd->pmfd, (off_t)foff, 0) == -1 && errno != 0 ||
232 read(kd->pmfd, (char *)&vm->x, sizeof(vm->x)) < 0) {
233 _kvm_err(kd, kd->program, "cannot read segment map");
234 return (-1);
235 }
236
237 vm->pte = (int *)_kvm_malloc(kd, NPMEG(vm) * nptesg * sizeof(int));
238 if (vm->pte == 0) {
239 free(kd->vmst);
240 kd->vmst = 0;
241 return (-1);
242 }
243
244 /*
245 * Read PMEGs.
246 */
247 foff = st.st_size - roundup(sizeof(vm->x), kd->nbpg) -
248 roundup(NPMEG(vm) * nptesg * sizeof(int), kd->nbpg);
249
250 errno = 0;
251 if (lseek(kd->pmfd, foff, 0) == -1 && errno != 0 ||
252 read(kd->pmfd, (char *)vm->pte, NPMEG(vm) * nptesg * sizeof(int)) < 0) {
253 _kvm_err(kd, kd->program, "cannot read PMEG table");
254 return (-1);
255 }
256
257 return (0);
258 }
259
260 int
261 _kvm_kvatop44c(kd, va, pa)
262 kvm_t *kd;
263 u_long va;
264 u_long *pa;
265 {
266 register int vr, vs, pte, off, nmem;
267 register struct vmstate *vm = kd->vmst;
268 struct regmap *rp;
269 struct segmap *sp;
270 struct memarr *mp;
271
272 if (va < KERNBASE)
273 goto err;
274
275 vr = VA_VREG(va);
276 vs = VA_VSEG(va);
277
278 sp = &vm->segmap_store[(vr-NUREG)*NSEGRG + vs];
279 if (sp->sg_npte == 0)
280 goto err;
281 if (sp->sg_pmeg == vm->seginval)
282 goto err;
283 pte = vm->pte[sp->sg_pmeg * nptesg + VA_VPG(va)];
284 if ((pte & PG_V) != 0) {
285 register long p, dumpoff = 0;
286
287 off = VA_OFF(va);
288 p = (pte & PG_PFNUM) << pgshift;
289 /* Translate (sparse) pfnum to (packed) dump offset */
290 for (mp = vm->pmemarr, nmem = vm->npmemarr; --nmem >= 0; mp++) {
291 if (mp->addr <= p && p < mp->addr + mp->len)
292 break;
293 dumpoff += mp->len;
294 }
295 if (nmem < 0)
296 goto err;
297 *pa = (dumpoff + p - mp->addr) | off;
298 return (kd->nbpg - off);
299 }
300 err:
301 _kvm_err(kd, 0, "invalid address (%x)", va);
302 return (0);
303 }
304
305 /*
306 * Prepare for translation of kernel virtual addresses into offsets
307 * into crash dump files. Since the sun4m pagetables are all in memory,
308 * we use nlist to bootstrap the translation tables. This assumes that
309 * the kernel mappings all reside in the first physical memory bank.
310 */
311 int
312 _kvm_initvtop4m(kd)
313 kvm_t *kd;
314 {
315 register int i;
316 register off_t foff;
317 register struct vmstate *vm;
318 struct stat st;
319 static struct nlist nlist[4] = {
320 # define X_KSEGSTORE 0
321 { "_kernel_segmap_store" },
322 # define X_PMEMARR 1
323 { "_pmemarr" },
324 # define X_NPMEMARR 2
325 { "_npmemarr" },
326 { NULL },
327 };
328
329 if ((vm = kd->vmst) == 0) {
330 kd->vmst = vm = (struct vmstate *)_kvm_malloc(kd, sizeof(*vm));
331 if (vm == 0)
332 return (-1);
333 }
334
335 if (kvm_nlist(kd, nlist) != 0) {
336 _kvm_err(kd, kd->program, "cannot read symbols");
337 return (-1);
338 }
339
340 /* Assume kernel mappings are all within first memory bank. */
341 foff = nlist[X_KSEGSTORE].n_value - KERNBASE;
342 if (lseek(kd->pmfd, foff, 0) == -1 ||
343 read(kd->pmfd, vm->segmap_store, sizeof(vm->segmap_store)) < 0) {
344 _kvm_err(kd, kd->program, "cannot read segment map");
345 return (-1);
346 }
347
348 foff = nlist[X_PMEMARR].n_value - KERNBASE;
349 if (lseek(kd->pmfd, foff, 0) == -1 ||
350 read(kd->pmfd, vm->pmemarr, sizeof(vm->pmemarr)) < 0) {
351 _kvm_err(kd, kd->program, "cannot read pmemarr");
352 return (-1);
353 }
354
355 foff = nlist[X_NPMEMARR].n_value - KERNBASE;
356 if (lseek(kd->pmfd, foff, 0) == -1 ||
357 read(kd->pmfd, &vm->npmemarr, sizeof(vm->npmemarr)) < 0) {
358 _kvm_err(kd, kd->program, "cannot read npmemarr");
359 return (-1);
360 }
361
362 return (0);
363 }
364
365 int
366 _kvm_kvatop4m(kd, va, pa)
367 kvm_t *kd;
368 u_long va;
369 u_long *pa;
370 {
371 register struct vmstate *vm = kd->vmst;
372 register int vr, vs, nmem, off;
373 int pte;
374 off_t foff;
375 struct regmap *rp;
376 struct segmap *sp;
377 struct memarr *mp;
378
379 if (va < KERNBASE)
380 goto err;
381
382 vr = VA_VREG(va);
383 vs = VA_VSEG(va);
384
385 sp = &vm->segmap_store[(vr-NUREG)*NSEGRG + vs];
386 if (sp->sg_npte == 0)
387 goto err;
388
389 /* Assume kernel mappings are all within first memory bank. */
390 foff = (long)&sp->sg_pte[VA_VPG(va)] - KERNBASE;
391 if (lseek(kd->pmfd, foff, 0) == -1 ||
392 read(kd->pmfd, (void *)&pte, sizeof(pte)) < 0) {
393 _kvm_err(kd, kd->program, "cannot read pte");
394 goto err;
395 }
396
397 if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE) {
398 register long p, dumpoff = 0;
399
400 off = VA_OFF(va);
401 p = (pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT;
402 /* Translate (sparse) pfnum to (packed) dump offset */
403 for (mp = vm->pmemarr, nmem = vm->npmemarr; --nmem >= 0; mp++) {
404 if (mp->addr <= p && p < mp->addr + mp->len)
405 break;
406 dumpoff += mp->len;
407 }
408 if (nmem < 0)
409 goto err;
410 *pa = (dumpoff + p - mp->addr) | off;
411 return (kd->nbpg - off);
412 }
413 err:
414 _kvm_err(kd, 0, "invalid address (%x)", va);
415 return (0);
416 }
417