kvm_sparc.c revision 1.25 1 1.25 matt /* $NetBSD: kvm_sparc.c,v 1.25 2001/08/05 03:33:15 matt Exp $ */
2 1.8 thorpej
3 1.1 cgd /*-
4 1.1 cgd * Copyright (c) 1992, 1993
5 1.1 cgd * The Regents of the University of California. All rights reserved.
6 1.1 cgd *
7 1.1 cgd * This code is derived from software developed by the Computer Systems
8 1.1 cgd * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
9 1.1 cgd * BG 91-66 and contributed to Berkeley.
10 1.1 cgd *
11 1.1 cgd * Redistribution and use in source and binary forms, with or without
12 1.1 cgd * modification, are permitted provided that the following conditions
13 1.1 cgd * are met:
14 1.1 cgd * 1. Redistributions of source code must retain the above copyright
15 1.1 cgd * notice, this list of conditions and the following disclaimer.
16 1.1 cgd * 2. Redistributions in binary form must reproduce the above copyright
17 1.1 cgd * notice, this list of conditions and the following disclaimer in the
18 1.1 cgd * documentation and/or other materials provided with the distribution.
19 1.1 cgd * 3. All advertising materials mentioning features or use of this software
20 1.1 cgd * must display the following acknowledgement:
21 1.1 cgd * This product includes software developed by the University of
22 1.1 cgd * California, Berkeley and its contributors.
23 1.1 cgd * 4. Neither the name of the University nor the names of its contributors
24 1.1 cgd * may be used to endorse or promote products derived from this software
25 1.1 cgd * without specific prior written permission.
26 1.1 cgd *
27 1.1 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 1.1 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 1.1 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 1.1 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 1.1 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 1.1 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 1.1 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 1.1 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 1.1 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 1.1 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 1.1 cgd * SUCH DAMAGE.
38 1.1 cgd */
39 1.1 cgd
40 1.13 mikel #include <sys/cdefs.h>
41 1.1 cgd #if defined(LIBC_SCCS) && !defined(lint)
42 1.8 thorpej #if 0
43 1.1 cgd static char sccsid[] = "@(#)kvm_sparc.c 8.1 (Berkeley) 6/4/93";
44 1.8 thorpej #else
45 1.25 matt __RCSID("$NetBSD: kvm_sparc.c,v 1.25 2001/08/05 03:33:15 matt Exp $");
46 1.8 thorpej #endif
47 1.1 cgd #endif /* LIBC_SCCS and not lint */
48 1.1 cgd
49 1.1 cgd /*
50 1.21 simonb * Sparc machine dependent routines for kvm. Hopefully, the forthcoming
51 1.1 cgd * vm code will one day obsolete this module.
52 1.1 cgd */
53 1.1 cgd
54 1.1 cgd #include <sys/param.h>
55 1.14 pk #include <sys/exec.h>
56 1.1 cgd #include <sys/user.h>
57 1.1 cgd #include <sys/proc.h>
58 1.1 cgd #include <sys/stat.h>
59 1.10 pk #include <sys/core.h>
60 1.10 pk #include <sys/kcore.h>
61 1.1 cgd #include <unistd.h>
62 1.1 cgd #include <nlist.h>
63 1.1 cgd #include <kvm.h>
64 1.1 cgd
65 1.23 mrg #include <uvm/uvm_extern.h>
66 1.22 mrg
67 1.25 matt #include <machine/pmap.h>
68 1.10 pk #include <machine/kcore.h>
69 1.1 cgd
70 1.1 cgd #include <limits.h>
71 1.1 cgd #include <db.h>
72 1.1 cgd
73 1.1 cgd #include "kvm_private.h"
74 1.1 cgd
75 1.1 cgd
76 1.4 deraadt static int cputyp = -1;
77 1.10 pk static int pgshift;
78 1.10 pk static int nptesg; /* [sun4/sun4c] only */
79 1.3 deraadt
80 1.9 cgd #define VA_VPG(va) ((cputyp == CPU_SUN4C || cputyp == CPU_SUN4M) \
81 1.9 cgd ? VA_SUN4C_VPG(va) \
82 1.9 cgd : VA_SUN4_VPG(va))
83 1.4 deraadt
84 1.10 pk #define VA_OFF(va) (va & (kd->nbpg - 1))
85 1.10 pk
86 1.15 mrg int _kvm_kvatop44c __P((kvm_t *, u_long, u_long *));
87 1.15 mrg int _kvm_kvatop4m __P((kvm_t *, u_long, u_long *));
88 1.20 mrg int _kvm_kvatop4u __P((kvm_t *, u_long, u_long *));
89 1.20 mrg
90 1.20 mrg /*
91 1.20 mrg * XXX
92 1.21 simonb * taken from /sys/arch/sparc64/include/kcore.h.
93 1.20 mrg * this is the same as the sparc one, except for the kphys addition,
94 1.20 mrg * so luckily we can use this here...
95 1.20 mrg */
96 1.20 mrg typedef struct sparc64_cpu_kcore_hdr {
97 1.20 mrg int cputype; /* CPU type associated with this dump */
98 1.20 mrg u_long kernbase; /* copy of KERNBASE goes here */
99 1.20 mrg int nmemseg; /* # of physical memory segments */
100 1.20 mrg u_long memsegoffset; /* start of memseg array (relative */
101 1.20 mrg /* to the start of this header) */
102 1.20 mrg int nsegmap; /* # of segmaps following */
103 1.20 mrg u_long segmapoffset; /* start of segmap array (relative */
104 1.20 mrg /* to the start of this header) */
105 1.20 mrg int npmeg; /* # of PMEGs; [sun4/sun4c] only */
106 1.20 mrg u_long pmegoffset; /* start of pmeg array (relative */
107 1.20 mrg /* to the start of this header) */
108 1.20 mrg /* SPARC64 stuff */
109 1.20 mrg paddr_t kphys; /* Physical address of 4MB locked TLB */
110 1.20 mrg } sparc64_cpu_kcore_hdr_t;
111 1.9 cgd
112 1.10 pk void
113 1.10 pk _kvm_freevtop(kd)
114 1.10 pk kvm_t *kd;
115 1.9 cgd {
116 1.10 pk if (kd->vmst != 0) {
117 1.10 pk _kvm_err(kd, kd->program, "_kvm_freevtop: internal error");
118 1.10 pk kd->vmst = 0;
119 1.10 pk }
120 1.9 cgd }
121 1.9 cgd
122 1.10 pk /*
123 1.10 pk * Prepare for translation of kernel virtual addresses into offsets
124 1.10 pk * into crash dump files. We use the MMU specific goop written at the
125 1.10 pk * front of the crash dump by pmap_dumpmmu().
126 1.10 pk */
127 1.10 pk int
128 1.10 pk _kvm_initvtop(kd)
129 1.3 deraadt kvm_t *kd;
130 1.3 deraadt {
131 1.20 mrg sparc64_cpu_kcore_hdr_t *cpup = kd->cpu_data;
132 1.9 cgd
133 1.10 pk switch (cputyp = cpup->cputype) {
134 1.10 pk case CPU_SUN4:
135 1.19 eeh case CPU_SUN4U:
136 1.10 pk kd->nbpg = 8196;
137 1.10 pk pgshift = 13;
138 1.10 pk break;
139 1.10 pk case CPU_SUN4C:
140 1.10 pk case CPU_SUN4M:
141 1.10 pk kd->nbpg = 4096;
142 1.10 pk pgshift = 12;
143 1.10 pk break;
144 1.10 pk default:
145 1.10 pk _kvm_err(kd, kd->program, "Unsupported CPU type");
146 1.9 cgd return (-1);
147 1.9 cgd }
148 1.10 pk nptesg = NBPSG / kd->nbpg;
149 1.9 cgd return (0);
150 1.3 deraadt }
151 1.3 deraadt
152 1.7 pk /*
153 1.9 cgd * Translate a kernel virtual address to a physical address using the
154 1.9 cgd * mapping information in kd->vm. Returns the result in pa, and returns
155 1.21 simonb * the number of bytes that are contiguously available from this
156 1.9 cgd * physical address. This routine is used only for crashdumps.
157 1.9 cgd */
158 1.9 cgd int
159 1.9 cgd _kvm_kvatop(kd, va, pa)
160 1.9 cgd kvm_t *kd;
161 1.9 cgd u_long va;
162 1.9 cgd u_long *pa;
163 1.9 cgd {
164 1.10 pk if (cputyp == -1)
165 1.10 pk if (_kvm_initvtop(kd) != 0)
166 1.10 pk return (-1);
167 1.9 cgd
168 1.19 eeh switch (cputyp) {
169 1.19 eeh case CPU_SUN4:
170 1.19 eeh case CPU_SUN4C:
171 1.19 eeh return _kvm_kvatop44c(kd, va, pa);
172 1.19 eeh break;
173 1.19 eeh case CPU_SUN4M:
174 1.19 eeh return _kvm_kvatop4m(kd, va, pa);
175 1.19 eeh break;
176 1.19 eeh case CPU_SUN4U:
177 1.19 eeh default:
178 1.19 eeh return _kvm_kvatop4u(kd, va, pa);
179 1.19 eeh }
180 1.9 cgd }
181 1.9 cgd
182 1.9 cgd /*
183 1.9 cgd * (note: sun4 3-level MMU not yet supported)
184 1.9 cgd */
185 1.9 cgd int
186 1.9 cgd _kvm_kvatop44c(kd, va, pa)
187 1.1 cgd kvm_t *kd;
188 1.1 cgd u_long va;
189 1.1 cgd u_long *pa;
190 1.1 cgd {
191 1.16 perry int vr, vs, pte;
192 1.20 mrg sparc64_cpu_kcore_hdr_t *cpup = kd->cpu_data;
193 1.14 pk struct segmap *sp, *segmaps;
194 1.10 pk int *ptes;
195 1.14 pk int nkreg, nureg;
196 1.14 pk u_long kernbase = cpup->kernbase;
197 1.1 cgd
198 1.14 pk if (va < kernbase)
199 1.7 pk goto err;
200 1.7 pk
201 1.10 pk /*
202 1.10 pk * Layout of CPU segment:
203 1.10 pk * cpu_kcore_hdr_t;
204 1.10 pk * [alignment]
205 1.10 pk * phys_ram_seg_t[cpup->nmemseg];
206 1.14 pk * segmap[cpup->nsegmap];
207 1.10 pk * ptes[cpup->npmegs];
208 1.10 pk */
209 1.14 pk segmaps = (struct segmap *)((long)kd->cpu_data + cpup->segmapoffset);
210 1.10 pk ptes = (int *)((int)kd->cpu_data + cpup->pmegoffset);
211 1.14 pk nkreg = ((int)((-(unsigned)kernbase) / NBPRG));
212 1.14 pk nureg = 256 - nkreg;
213 1.10 pk
214 1.7 pk vr = VA_VREG(va);
215 1.7 pk vs = VA_VSEG(va);
216 1.1 cgd
217 1.14 pk sp = &segmaps[(vr-nureg)*NSEGRG + vs];
218 1.7 pk if (sp->sg_npte == 0)
219 1.7 pk goto err;
220 1.11 pk if (sp->sg_pmeg == cpup->npmeg - 1) /* =seginval */
221 1.7 pk goto err;
222 1.10 pk pte = ptes[sp->sg_pmeg * nptesg + VA_VPG(va)];
223 1.7 pk if ((pte & PG_V) != 0) {
224 1.16 perry long p, off = VA_OFF(va);
225 1.7 pk
226 1.7 pk p = (pte & PG_PFNUM) << pgshift;
227 1.10 pk *pa = p + off;
228 1.7 pk return (kd->nbpg - off);
229 1.1 cgd }
230 1.7 pk err:
231 1.24 sommerfe _kvm_err(kd, 0, "invalid address (%lx)", va);
232 1.1 cgd return (0);
233 1.1 cgd }
234 1.4 deraadt
235 1.9 cgd int
236 1.9 cgd _kvm_kvatop4m(kd, va, pa)
237 1.9 cgd kvm_t *kd;
238 1.9 cgd u_long va;
239 1.9 cgd u_long *pa;
240 1.9 cgd {
241 1.20 mrg sparc64_cpu_kcore_hdr_t *cpup = kd->cpu_data;
242 1.16 perry int vr, vs;
243 1.9 cgd int pte;
244 1.9 cgd off_t foff;
245 1.14 pk struct segmap *sp, *segmaps;
246 1.14 pk int nkreg, nureg;
247 1.14 pk u_long kernbase = cpup->kernbase;
248 1.9 cgd
249 1.14 pk if (va < kernbase)
250 1.9 cgd goto err;
251 1.9 cgd
252 1.10 pk /*
253 1.10 pk * Layout of CPU segment:
254 1.10 pk * cpu_kcore_hdr_t;
255 1.10 pk * [alignment]
256 1.10 pk * phys_ram_seg_t[cpup->nmemseg];
257 1.14 pk * segmap[cpup->nsegmap];
258 1.10 pk */
259 1.14 pk segmaps = (struct segmap *)((long)kd->cpu_data + cpup->segmapoffset);
260 1.14 pk nkreg = ((int)((-(unsigned)kernbase) / NBPRG));
261 1.14 pk nureg = 256 - nkreg;
262 1.10 pk
263 1.9 cgd vr = VA_VREG(va);
264 1.9 cgd vs = VA_VSEG(va);
265 1.9 cgd
266 1.14 pk sp = &segmaps[(vr-nureg)*NSEGRG + vs];
267 1.9 cgd if (sp->sg_npte == 0)
268 1.9 cgd goto err;
269 1.9 cgd
270 1.10 pk /* XXX - assume page tables in initial kernel DATA or BSS. */
271 1.14 pk foff = _kvm_pa2off(kd, (u_long)&sp->sg_pte[VA_VPG(va)] - kernbase);
272 1.10 pk if (foff == (off_t)-1)
273 1.10 pk return (0);
274 1.10 pk
275 1.18 thorpej if (pread(kd->pmfd, &pte, sizeof(pte), foff) != sizeof(pte)) {
276 1.24 sommerfe _kvm_syserr(kd, kd->program, "cannot read pte for %lx", va);
277 1.10 pk return (0);
278 1.9 cgd }
279 1.9 cgd
280 1.9 cgd if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE) {
281 1.16 perry long p, off = VA_OFF(va);
282 1.9 cgd
283 1.9 cgd p = (pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT;
284 1.10 pk *pa = p + off;
285 1.9 cgd return (kd->nbpg - off);
286 1.9 cgd }
287 1.9 cgd err:
288 1.24 sommerfe _kvm_err(kd, 0, "invalid address (%lx)", va);
289 1.9 cgd return (0);
290 1.10 pk }
291 1.19 eeh
292 1.19 eeh /*
293 1.20 mrg * sparc64 pmap's 32-bit page table format
294 1.19 eeh */
295 1.19 eeh int
296 1.19 eeh _kvm_kvatop4u(kd, va, pa)
297 1.19 eeh kvm_t *kd;
298 1.19 eeh u_long va;
299 1.19 eeh u_long *pa;
300 1.19 eeh {
301 1.20 mrg sparc64_cpu_kcore_hdr_t *cpup = kd->cpu_data;
302 1.19 eeh int64_t **segmaps;
303 1.19 eeh int64_t *ptes;
304 1.19 eeh int64_t pte;
305 1.20 mrg int64_t kphys = cpup->kphys;
306 1.19 eeh u_long kernbase = cpup->kernbase;
307 1.19 eeh
308 1.19 eeh if (va < kernbase)
309 1.19 eeh goto err;
310 1.19 eeh
311 1.21 simonb /*
312 1.19 eeh * Kernel layout:
313 1.19 eeh *
314 1.19 eeh * kernbase:
315 1.19 eeh * 4MB locked TLB (text+data+BSS)
316 1.21 simonb * Random other stuff.
317 1.19 eeh */
318 1.20 mrg if (va >= kernbase && va < kernbase + 4*1024*1024)
319 1.19 eeh return (va - kernbase) + kphys;
320 1.19 eeh
321 1.20 mrg /* XXX: from sparc64/include/pmap.h */
322 1.20 mrg #define SPARC64_PTSZ (kd->nbpg/8)
323 1.20 mrg #define SPARC64_STSZ (SPARC64_PTSZ)
324 1.20 mrg #define SPARC64_PTMASK (SPARC64_PTSZ-1)
325 1.20 mrg #define SPARC64_PTSHIFT (13)
326 1.20 mrg #define SPARC64_PDSHIFT (10+SPARC64_PTSHIFT)
327 1.20 mrg #define SPARC64_STSHIFT (10+SPARC64_PDSHIFT)
328 1.20 mrg #define SPARC64_STMASK (SPARC64_STSZ-1)
329 1.20 mrg #define sparc64_va_to_seg(v) (int)((((int64_t)(v))>>SPARC64_STSHIFT)&SPARC64_STMASK)
330 1.20 mrg #define sparc64_va_to_pte(v) (int)((((int64_t)(v))>>SPARC64_PTSHIFT)&SPARC64_PTMASK)
331 1.20 mrg
332 1.20 mrg /* XXX: from sparc64/include/pte.h */
333 1.20 mrg #define SPARC64_TLB_V 0x8000000000000000LL
334 1.20 mrg #define SPARC64_TLB_PA_MASK 0x000001ffffffe000LL
335 1.20 mrg
336 1.19 eeh /*
337 1.19 eeh * Layout of CPU segment:
338 1.19 eeh * cpu_kcore_hdr_t;
339 1.19 eeh * [alignment]
340 1.19 eeh * phys_ram_seg_t[cpup->nmemseg];
341 1.19 eeh * segmap[cpup->nsegmap];
342 1.19 eeh */
343 1.20 mrg segmaps = (int64_t **)((long)kd->cpu_data + cpup->segmapoffset);
344 1.20 mrg /* XXX XXX XXX _kvm_pa2off takes u_long and returns off_t..
345 1.20 mrg should take off_t also!! */
346 1.20 mrg
347 1.20 mrg ptes = (int64_t *)(int)_kvm_pa2off(kd, (u_long)segmaps[sparc64_va_to_seg(va)]);
348 1.20 mrg pte = ptes[sparc64_va_to_pte(va)];
349 1.20 mrg if ((pte & SPARC64_TLB_V) != 0)
350 1.20 mrg return ((pte & SPARC64_TLB_PA_MASK) | (va & (kd->nbpg - 1)));
351 1.19 eeh err:
352 1.24 sommerfe _kvm_err(kd, 0, "invalid address (%lx)", va);
353 1.19 eeh return (0);
354 1.19 eeh }
355 1.19 eeh
356 1.10 pk
357 1.21 simonb /*
358 1.10 pk * Translate a physical address to a file-offset in the crash-dump.
359 1.21 simonb */
360 1.10 pk off_t
361 1.10 pk _kvm_pa2off(kd, pa)
362 1.10 pk kvm_t *kd;
363 1.10 pk u_long pa;
364 1.10 pk {
365 1.20 mrg sparc64_cpu_kcore_hdr_t *cpup = kd->cpu_data;
366 1.10 pk phys_ram_seg_t *mp;
367 1.10 pk off_t off;
368 1.10 pk int nmem;
369 1.10 pk
370 1.10 pk /*
371 1.10 pk * Layout of CPU segment:
372 1.10 pk * cpu_kcore_hdr_t;
373 1.10 pk * [alignment]
374 1.10 pk * phys_ram_seg_t[cpup->nmemseg];
375 1.10 pk */
376 1.10 pk mp = (phys_ram_seg_t *)((int)kd->cpu_data + cpup->memsegoffset);
377 1.10 pk off = 0;
378 1.10 pk
379 1.10 pk /* Translate (sparse) pfnum to (packed) dump offset */
380 1.10 pk for (nmem = cpup->nmemseg; --nmem >= 0; mp++) {
381 1.10 pk if (mp->start <= pa && pa < mp->start + mp->size)
382 1.10 pk break;
383 1.10 pk off += mp->size;
384 1.10 pk }
385 1.10 pk if (nmem < 0) {
386 1.24 sommerfe _kvm_err(kd, 0, "invalid address (%lx)", pa);
387 1.10 pk return (-1);
388 1.10 pk }
389 1.10 pk
390 1.10 pk return (kd->dump_off + off + pa - mp->start);
391 1.12 gwr }
392 1.12 gwr
393 1.12 gwr /*
394 1.12 gwr * Machine-dependent initialization for ALL open kvm descriptors,
395 1.12 gwr * not just those for a kernel crash dump. Some architectures
396 1.12 gwr * have to deal with these NOT being constants! (i.e. m68k)
397 1.12 gwr */
398 1.12 gwr int
399 1.12 gwr _kvm_mdopen(kd)
400 1.12 gwr kvm_t *kd;
401 1.12 gwr {
402 1.14 pk u_long max_uva;
403 1.14 pk extern struct ps_strings *__ps_strings;
404 1.12 gwr
405 1.14 pk max_uva = (u_long) (__ps_strings + 1);
406 1.14 pk kd->usrstack = max_uva;
407 1.14 pk kd->max_uva = max_uva;
408 1.14 pk kd->min_uva = 0;
409 1.12 gwr
410 1.12 gwr return (0);
411 1.4 deraadt }
412