kvm_sparc.c revision 1.28 1 1.28 agc /* $NetBSD: kvm_sparc.c,v 1.28 2003/08/07 16:44:39 agc Exp $ */
2 1.8 thorpej
3 1.1 cgd /*-
4 1.1 cgd * Copyright (c) 1992, 1993
5 1.1 cgd * The Regents of the University of California. All rights reserved.
6 1.1 cgd *
7 1.1 cgd * This code is derived from software developed by the Computer Systems
8 1.1 cgd * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
9 1.1 cgd * BG 91-66 and contributed to Berkeley.
10 1.1 cgd *
11 1.1 cgd * Redistribution and use in source and binary forms, with or without
12 1.1 cgd * modification, are permitted provided that the following conditions
13 1.1 cgd * are met:
14 1.1 cgd * 1. Redistributions of source code must retain the above copyright
15 1.1 cgd * notice, this list of conditions and the following disclaimer.
16 1.1 cgd * 2. Redistributions in binary form must reproduce the above copyright
17 1.1 cgd * notice, this list of conditions and the following disclaimer in the
18 1.1 cgd * documentation and/or other materials provided with the distribution.
19 1.28 agc * 3. Neither the name of the University nor the names of its contributors
20 1.1 cgd * may be used to endorse or promote products derived from this software
21 1.1 cgd * without specific prior written permission.
22 1.1 cgd *
23 1.1 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 1.1 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 1.1 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 1.1 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 1.1 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 1.1 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 1.1 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 1.1 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 1.1 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 1.1 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 1.1 cgd * SUCH DAMAGE.
34 1.1 cgd */
35 1.1 cgd
36 1.13 mikel #include <sys/cdefs.h>
37 1.1 cgd #if defined(LIBC_SCCS) && !defined(lint)
38 1.8 thorpej #if 0
39 1.1 cgd static char sccsid[] = "@(#)kvm_sparc.c 8.1 (Berkeley) 6/4/93";
40 1.8 thorpej #else
41 1.28 agc __RCSID("$NetBSD: kvm_sparc.c,v 1.28 2003/08/07 16:44:39 agc Exp $");
42 1.8 thorpej #endif
43 1.1 cgd #endif /* LIBC_SCCS and not lint */
44 1.1 cgd
45 1.1 cgd /*
46 1.21 simonb * Sparc machine dependent routines for kvm. Hopefully, the forthcoming
47 1.1 cgd * vm code will one day obsolete this module.
48 1.1 cgd */
49 1.1 cgd
50 1.1 cgd #include <sys/param.h>
51 1.14 pk #include <sys/exec.h>
52 1.1 cgd #include <sys/user.h>
53 1.1 cgd #include <sys/proc.h>
54 1.1 cgd #include <sys/stat.h>
55 1.10 pk #include <sys/core.h>
56 1.10 pk #include <sys/kcore.h>
57 1.1 cgd #include <unistd.h>
58 1.1 cgd #include <nlist.h>
59 1.1 cgd #include <kvm.h>
60 1.1 cgd
61 1.23 mrg #include <uvm/uvm_extern.h>
62 1.22 mrg
63 1.25 matt #include <machine/pmap.h>
64 1.10 pk #include <machine/kcore.h>
65 1.1 cgd
66 1.1 cgd #include <limits.h>
67 1.1 cgd #include <db.h>
68 1.1 cgd
69 1.1 cgd #include "kvm_private.h"
70 1.1 cgd
71 1.1 cgd
72 1.4 deraadt static int cputyp = -1;
73 1.10 pk static int pgshift;
74 1.10 pk static int nptesg; /* [sun4/sun4c] only */
75 1.3 deraadt
76 1.26 mrg #undef VA_VPG
77 1.9 cgd #define VA_VPG(va) ((cputyp == CPU_SUN4C || cputyp == CPU_SUN4M) \
78 1.9 cgd ? VA_SUN4C_VPG(va) \
79 1.9 cgd : VA_SUN4_VPG(va))
80 1.4 deraadt
81 1.26 mrg #undef VA_OFF
82 1.10 pk #define VA_OFF(va) (va & (kd->nbpg - 1))
83 1.10 pk
84 1.15 mrg int _kvm_kvatop44c __P((kvm_t *, u_long, u_long *));
85 1.15 mrg int _kvm_kvatop4m __P((kvm_t *, u_long, u_long *));
86 1.20 mrg int _kvm_kvatop4u __P((kvm_t *, u_long, u_long *));
87 1.20 mrg
88 1.20 mrg /*
89 1.20 mrg * XXX
90 1.21 simonb * taken from /sys/arch/sparc64/include/kcore.h.
91 1.20 mrg * this is the same as the sparc one, except for the kphys addition,
92 1.20 mrg * so luckily we can use this here...
93 1.20 mrg */
94 1.20 mrg typedef struct sparc64_cpu_kcore_hdr {
95 1.20 mrg int cputype; /* CPU type associated with this dump */
96 1.20 mrg u_long kernbase; /* copy of KERNBASE goes here */
97 1.20 mrg int nmemseg; /* # of physical memory segments */
98 1.20 mrg u_long memsegoffset; /* start of memseg array (relative */
99 1.20 mrg /* to the start of this header) */
100 1.20 mrg int nsegmap; /* # of segmaps following */
101 1.20 mrg u_long segmapoffset; /* start of segmap array (relative */
102 1.20 mrg /* to the start of this header) */
103 1.20 mrg int npmeg; /* # of PMEGs; [sun4/sun4c] only */
104 1.20 mrg u_long pmegoffset; /* start of pmeg array (relative */
105 1.20 mrg /* to the start of this header) */
106 1.20 mrg /* SPARC64 stuff */
107 1.20 mrg paddr_t kphys; /* Physical address of 4MB locked TLB */
108 1.20 mrg } sparc64_cpu_kcore_hdr_t;
109 1.9 cgd
110 1.10 pk void
111 1.10 pk _kvm_freevtop(kd)
112 1.10 pk kvm_t *kd;
113 1.9 cgd {
114 1.10 pk if (kd->vmst != 0) {
115 1.10 pk _kvm_err(kd, kd->program, "_kvm_freevtop: internal error");
116 1.10 pk kd->vmst = 0;
117 1.10 pk }
118 1.9 cgd }
119 1.9 cgd
120 1.10 pk /*
121 1.10 pk * Prepare for translation of kernel virtual addresses into offsets
122 1.10 pk * into crash dump files. We use the MMU specific goop written at the
123 1.10 pk * front of the crash dump by pmap_dumpmmu().
124 1.10 pk */
125 1.10 pk int
126 1.10 pk _kvm_initvtop(kd)
127 1.3 deraadt kvm_t *kd;
128 1.3 deraadt {
129 1.20 mrg sparc64_cpu_kcore_hdr_t *cpup = kd->cpu_data;
130 1.9 cgd
131 1.10 pk switch (cputyp = cpup->cputype) {
132 1.10 pk case CPU_SUN4:
133 1.19 eeh case CPU_SUN4U:
134 1.10 pk kd->nbpg = 8196;
135 1.10 pk pgshift = 13;
136 1.10 pk break;
137 1.10 pk case CPU_SUN4C:
138 1.10 pk case CPU_SUN4M:
139 1.10 pk kd->nbpg = 4096;
140 1.10 pk pgshift = 12;
141 1.10 pk break;
142 1.10 pk default:
143 1.10 pk _kvm_err(kd, kd->program, "Unsupported CPU type");
144 1.9 cgd return (-1);
145 1.9 cgd }
146 1.10 pk nptesg = NBPSG / kd->nbpg;
147 1.9 cgd return (0);
148 1.3 deraadt }
149 1.3 deraadt
150 1.7 pk /*
151 1.9 cgd * Translate a kernel virtual address to a physical address using the
152 1.9 cgd * mapping information in kd->vm. Returns the result in pa, and returns
153 1.21 simonb * the number of bytes that are contiguously available from this
154 1.27 wiz * physical address. This routine is used only for crash dumps.
155 1.9 cgd */
156 1.9 cgd int
157 1.9 cgd _kvm_kvatop(kd, va, pa)
158 1.9 cgd kvm_t *kd;
159 1.9 cgd u_long va;
160 1.9 cgd u_long *pa;
161 1.9 cgd {
162 1.10 pk if (cputyp == -1)
163 1.10 pk if (_kvm_initvtop(kd) != 0)
164 1.10 pk return (-1);
165 1.9 cgd
166 1.19 eeh switch (cputyp) {
167 1.19 eeh case CPU_SUN4:
168 1.19 eeh case CPU_SUN4C:
169 1.19 eeh return _kvm_kvatop44c(kd, va, pa);
170 1.19 eeh break;
171 1.19 eeh case CPU_SUN4M:
172 1.19 eeh return _kvm_kvatop4m(kd, va, pa);
173 1.19 eeh break;
174 1.19 eeh case CPU_SUN4U:
175 1.19 eeh default:
176 1.19 eeh return _kvm_kvatop4u(kd, va, pa);
177 1.19 eeh }
178 1.9 cgd }
179 1.9 cgd
180 1.9 cgd /*
181 1.9 cgd * (note: sun4 3-level MMU not yet supported)
182 1.9 cgd */
183 1.9 cgd int
184 1.9 cgd _kvm_kvatop44c(kd, va, pa)
185 1.1 cgd kvm_t *kd;
186 1.1 cgd u_long va;
187 1.1 cgd u_long *pa;
188 1.1 cgd {
189 1.16 perry int vr, vs, pte;
190 1.20 mrg sparc64_cpu_kcore_hdr_t *cpup = kd->cpu_data;
191 1.14 pk struct segmap *sp, *segmaps;
192 1.10 pk int *ptes;
193 1.14 pk int nkreg, nureg;
194 1.14 pk u_long kernbase = cpup->kernbase;
195 1.1 cgd
196 1.14 pk if (va < kernbase)
197 1.7 pk goto err;
198 1.7 pk
199 1.10 pk /*
200 1.10 pk * Layout of CPU segment:
201 1.10 pk * cpu_kcore_hdr_t;
202 1.10 pk * [alignment]
203 1.10 pk * phys_ram_seg_t[cpup->nmemseg];
204 1.14 pk * segmap[cpup->nsegmap];
205 1.10 pk * ptes[cpup->npmegs];
206 1.10 pk */
207 1.14 pk segmaps = (struct segmap *)((long)kd->cpu_data + cpup->segmapoffset);
208 1.10 pk ptes = (int *)((int)kd->cpu_data + cpup->pmegoffset);
209 1.14 pk nkreg = ((int)((-(unsigned)kernbase) / NBPRG));
210 1.14 pk nureg = 256 - nkreg;
211 1.10 pk
212 1.7 pk vr = VA_VREG(va);
213 1.7 pk vs = VA_VSEG(va);
214 1.1 cgd
215 1.14 pk sp = &segmaps[(vr-nureg)*NSEGRG + vs];
216 1.7 pk if (sp->sg_npte == 0)
217 1.7 pk goto err;
218 1.11 pk if (sp->sg_pmeg == cpup->npmeg - 1) /* =seginval */
219 1.7 pk goto err;
220 1.10 pk pte = ptes[sp->sg_pmeg * nptesg + VA_VPG(va)];
221 1.7 pk if ((pte & PG_V) != 0) {
222 1.16 perry long p, off = VA_OFF(va);
223 1.7 pk
224 1.7 pk p = (pte & PG_PFNUM) << pgshift;
225 1.10 pk *pa = p + off;
226 1.7 pk return (kd->nbpg - off);
227 1.1 cgd }
228 1.7 pk err:
229 1.24 sommerfe _kvm_err(kd, 0, "invalid address (%lx)", va);
230 1.1 cgd return (0);
231 1.1 cgd }
232 1.4 deraadt
233 1.9 cgd int
234 1.9 cgd _kvm_kvatop4m(kd, va, pa)
235 1.9 cgd kvm_t *kd;
236 1.9 cgd u_long va;
237 1.9 cgd u_long *pa;
238 1.9 cgd {
239 1.20 mrg sparc64_cpu_kcore_hdr_t *cpup = kd->cpu_data;
240 1.16 perry int vr, vs;
241 1.9 cgd int pte;
242 1.9 cgd off_t foff;
243 1.14 pk struct segmap *sp, *segmaps;
244 1.14 pk int nkreg, nureg;
245 1.14 pk u_long kernbase = cpup->kernbase;
246 1.9 cgd
247 1.14 pk if (va < kernbase)
248 1.9 cgd goto err;
249 1.9 cgd
250 1.10 pk /*
251 1.10 pk * Layout of CPU segment:
252 1.10 pk * cpu_kcore_hdr_t;
253 1.10 pk * [alignment]
254 1.10 pk * phys_ram_seg_t[cpup->nmemseg];
255 1.14 pk * segmap[cpup->nsegmap];
256 1.10 pk */
257 1.14 pk segmaps = (struct segmap *)((long)kd->cpu_data + cpup->segmapoffset);
258 1.14 pk nkreg = ((int)((-(unsigned)kernbase) / NBPRG));
259 1.14 pk nureg = 256 - nkreg;
260 1.10 pk
261 1.9 cgd vr = VA_VREG(va);
262 1.9 cgd vs = VA_VSEG(va);
263 1.9 cgd
264 1.14 pk sp = &segmaps[(vr-nureg)*NSEGRG + vs];
265 1.9 cgd if (sp->sg_npte == 0)
266 1.9 cgd goto err;
267 1.9 cgd
268 1.10 pk /* XXX - assume page tables in initial kernel DATA or BSS. */
269 1.14 pk foff = _kvm_pa2off(kd, (u_long)&sp->sg_pte[VA_VPG(va)] - kernbase);
270 1.10 pk if (foff == (off_t)-1)
271 1.10 pk return (0);
272 1.10 pk
273 1.18 thorpej if (pread(kd->pmfd, &pte, sizeof(pte), foff) != sizeof(pte)) {
274 1.24 sommerfe _kvm_syserr(kd, kd->program, "cannot read pte for %lx", va);
275 1.10 pk return (0);
276 1.9 cgd }
277 1.9 cgd
278 1.9 cgd if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE) {
279 1.16 perry long p, off = VA_OFF(va);
280 1.9 cgd
281 1.9 cgd p = (pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT;
282 1.10 pk *pa = p + off;
283 1.9 cgd return (kd->nbpg - off);
284 1.9 cgd }
285 1.9 cgd err:
286 1.24 sommerfe _kvm_err(kd, 0, "invalid address (%lx)", va);
287 1.9 cgd return (0);
288 1.10 pk }
289 1.19 eeh
290 1.19 eeh /*
291 1.20 mrg * sparc64 pmap's 32-bit page table format
292 1.19 eeh */
293 1.19 eeh int
294 1.19 eeh _kvm_kvatop4u(kd, va, pa)
295 1.19 eeh kvm_t *kd;
296 1.19 eeh u_long va;
297 1.19 eeh u_long *pa;
298 1.19 eeh {
299 1.20 mrg sparc64_cpu_kcore_hdr_t *cpup = kd->cpu_data;
300 1.19 eeh int64_t **segmaps;
301 1.19 eeh int64_t *ptes;
302 1.19 eeh int64_t pte;
303 1.20 mrg int64_t kphys = cpup->kphys;
304 1.19 eeh u_long kernbase = cpup->kernbase;
305 1.19 eeh
306 1.19 eeh if (va < kernbase)
307 1.19 eeh goto err;
308 1.19 eeh
309 1.21 simonb /*
310 1.19 eeh * Kernel layout:
311 1.19 eeh *
312 1.19 eeh * kernbase:
313 1.19 eeh * 4MB locked TLB (text+data+BSS)
314 1.21 simonb * Random other stuff.
315 1.19 eeh */
316 1.20 mrg if (va >= kernbase && va < kernbase + 4*1024*1024)
317 1.19 eeh return (va - kernbase) + kphys;
318 1.19 eeh
319 1.20 mrg /* XXX: from sparc64/include/pmap.h */
320 1.20 mrg #define SPARC64_PTSZ (kd->nbpg/8)
321 1.20 mrg #define SPARC64_STSZ (SPARC64_PTSZ)
322 1.20 mrg #define SPARC64_PTMASK (SPARC64_PTSZ-1)
323 1.20 mrg #define SPARC64_PTSHIFT (13)
324 1.20 mrg #define SPARC64_PDSHIFT (10+SPARC64_PTSHIFT)
325 1.20 mrg #define SPARC64_STSHIFT (10+SPARC64_PDSHIFT)
326 1.20 mrg #define SPARC64_STMASK (SPARC64_STSZ-1)
327 1.20 mrg #define sparc64_va_to_seg(v) (int)((((int64_t)(v))>>SPARC64_STSHIFT)&SPARC64_STMASK)
328 1.20 mrg #define sparc64_va_to_pte(v) (int)((((int64_t)(v))>>SPARC64_PTSHIFT)&SPARC64_PTMASK)
329 1.20 mrg
330 1.20 mrg /* XXX: from sparc64/include/pte.h */
331 1.20 mrg #define SPARC64_TLB_V 0x8000000000000000LL
332 1.20 mrg #define SPARC64_TLB_PA_MASK 0x000001ffffffe000LL
333 1.20 mrg
334 1.19 eeh /*
335 1.19 eeh * Layout of CPU segment:
336 1.19 eeh * cpu_kcore_hdr_t;
337 1.19 eeh * [alignment]
338 1.19 eeh * phys_ram_seg_t[cpup->nmemseg];
339 1.19 eeh * segmap[cpup->nsegmap];
340 1.19 eeh */
341 1.20 mrg segmaps = (int64_t **)((long)kd->cpu_data + cpup->segmapoffset);
342 1.20 mrg /* XXX XXX XXX _kvm_pa2off takes u_long and returns off_t..
343 1.20 mrg should take off_t also!! */
344 1.20 mrg
345 1.20 mrg ptes = (int64_t *)(int)_kvm_pa2off(kd, (u_long)segmaps[sparc64_va_to_seg(va)]);
346 1.20 mrg pte = ptes[sparc64_va_to_pte(va)];
347 1.20 mrg if ((pte & SPARC64_TLB_V) != 0)
348 1.20 mrg return ((pte & SPARC64_TLB_PA_MASK) | (va & (kd->nbpg - 1)));
349 1.19 eeh err:
350 1.24 sommerfe _kvm_err(kd, 0, "invalid address (%lx)", va);
351 1.19 eeh return (0);
352 1.19 eeh }
353 1.19 eeh
354 1.10 pk
355 1.21 simonb /*
356 1.27 wiz * Translate a physical address to a file-offset in the crash dump.
357 1.21 simonb */
358 1.10 pk off_t
359 1.10 pk _kvm_pa2off(kd, pa)
360 1.10 pk kvm_t *kd;
361 1.10 pk u_long pa;
362 1.10 pk {
363 1.20 mrg sparc64_cpu_kcore_hdr_t *cpup = kd->cpu_data;
364 1.10 pk phys_ram_seg_t *mp;
365 1.10 pk off_t off;
366 1.10 pk int nmem;
367 1.10 pk
368 1.10 pk /*
369 1.10 pk * Layout of CPU segment:
370 1.10 pk * cpu_kcore_hdr_t;
371 1.10 pk * [alignment]
372 1.10 pk * phys_ram_seg_t[cpup->nmemseg];
373 1.10 pk */
374 1.10 pk mp = (phys_ram_seg_t *)((int)kd->cpu_data + cpup->memsegoffset);
375 1.10 pk off = 0;
376 1.10 pk
377 1.10 pk /* Translate (sparse) pfnum to (packed) dump offset */
378 1.10 pk for (nmem = cpup->nmemseg; --nmem >= 0; mp++) {
379 1.10 pk if (mp->start <= pa && pa < mp->start + mp->size)
380 1.10 pk break;
381 1.10 pk off += mp->size;
382 1.10 pk }
383 1.10 pk if (nmem < 0) {
384 1.24 sommerfe _kvm_err(kd, 0, "invalid address (%lx)", pa);
385 1.10 pk return (-1);
386 1.10 pk }
387 1.10 pk
388 1.10 pk return (kd->dump_off + off + pa - mp->start);
389 1.12 gwr }
390 1.12 gwr
391 1.12 gwr /*
392 1.12 gwr * Machine-dependent initialization for ALL open kvm descriptors,
393 1.12 gwr * not just those for a kernel crash dump. Some architectures
394 1.12 gwr * have to deal with these NOT being constants! (i.e. m68k)
395 1.12 gwr */
396 1.12 gwr int
397 1.12 gwr _kvm_mdopen(kd)
398 1.12 gwr kvm_t *kd;
399 1.12 gwr {
400 1.14 pk u_long max_uva;
401 1.14 pk extern struct ps_strings *__ps_strings;
402 1.12 gwr
403 1.14 pk max_uva = (u_long) (__ps_strings + 1);
404 1.14 pk kd->usrstack = max_uva;
405 1.14 pk kd->max_uva = max_uva;
406 1.14 pk kd->min_uva = 0;
407 1.12 gwr
408 1.12 gwr return (0);
409 1.4 deraadt }
410