kvm_sparc.c revision 1.31 1 1.31 jym /* $NetBSD: kvm_sparc.c,v 1.31 2010/09/19 02:07:00 jym Exp $ */
2 1.8 thorpej
3 1.1 cgd /*-
4 1.1 cgd * Copyright (c) 1992, 1993
5 1.1 cgd * The Regents of the University of California. All rights reserved.
6 1.1 cgd *
7 1.1 cgd * This code is derived from software developed by the Computer Systems
8 1.1 cgd * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
9 1.1 cgd * BG 91-66 and contributed to Berkeley.
10 1.1 cgd *
11 1.1 cgd * Redistribution and use in source and binary forms, with or without
12 1.1 cgd * modification, are permitted provided that the following conditions
13 1.1 cgd * are met:
14 1.1 cgd * 1. Redistributions of source code must retain the above copyright
15 1.1 cgd * notice, this list of conditions and the following disclaimer.
16 1.1 cgd * 2. Redistributions in binary form must reproduce the above copyright
17 1.1 cgd * notice, this list of conditions and the following disclaimer in the
18 1.1 cgd * documentation and/or other materials provided with the distribution.
19 1.28 agc * 3. Neither the name of the University nor the names of its contributors
20 1.1 cgd * may be used to endorse or promote products derived from this software
21 1.1 cgd * without specific prior written permission.
22 1.1 cgd *
23 1.1 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 1.1 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 1.1 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 1.1 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 1.1 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 1.1 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 1.1 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 1.1 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 1.1 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 1.1 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 1.1 cgd * SUCH DAMAGE.
34 1.1 cgd */
35 1.1 cgd
36 1.13 mikel #include <sys/cdefs.h>
37 1.1 cgd #if defined(LIBC_SCCS) && !defined(lint)
38 1.8 thorpej #if 0
39 1.1 cgd static char sccsid[] = "@(#)kvm_sparc.c 8.1 (Berkeley) 6/4/93";
40 1.8 thorpej #else
41 1.31 jym __RCSID("$NetBSD: kvm_sparc.c,v 1.31 2010/09/19 02:07:00 jym Exp $");
42 1.8 thorpej #endif
43 1.1 cgd #endif /* LIBC_SCCS and not lint */
44 1.1 cgd
45 1.1 cgd /*
46 1.21 simonb * Sparc machine dependent routines for kvm. Hopefully, the forthcoming
47 1.1 cgd * vm code will one day obsolete this module.
48 1.1 cgd */
49 1.1 cgd
50 1.1 cgd #include <sys/param.h>
51 1.14 pk #include <sys/exec.h>
52 1.1 cgd #include <sys/user.h>
53 1.1 cgd #include <sys/proc.h>
54 1.1 cgd #include <sys/stat.h>
55 1.10 pk #include <sys/core.h>
56 1.10 pk #include <sys/kcore.h>
57 1.1 cgd #include <unistd.h>
58 1.1 cgd #include <nlist.h>
59 1.1 cgd #include <kvm.h>
60 1.1 cgd
61 1.23 mrg #include <uvm/uvm_extern.h>
62 1.22 mrg
63 1.30 mrg #include <sparc/pmap.h>
64 1.30 mrg #include <sparc/kcore.h>
65 1.1 cgd
66 1.1 cgd #include <limits.h>
67 1.1 cgd #include <db.h>
68 1.1 cgd
69 1.1 cgd #include "kvm_private.h"
70 1.1 cgd
71 1.1 cgd
72 1.4 deraadt static int cputyp = -1;
73 1.10 pk static int pgshift;
74 1.10 pk static int nptesg; /* [sun4/sun4c] only */
75 1.3 deraadt
76 1.26 mrg #undef VA_VPG
77 1.9 cgd #define VA_VPG(va) ((cputyp == CPU_SUN4C || cputyp == CPU_SUN4M) \
78 1.9 cgd ? VA_SUN4C_VPG(va) \
79 1.9 cgd : VA_SUN4_VPG(va))
80 1.4 deraadt
81 1.26 mrg #undef VA_OFF
82 1.10 pk #define VA_OFF(va) (va & (kd->nbpg - 1))
83 1.10 pk
84 1.31 jym int _kvm_kvatop44c(kvm_t *, u_long, u_long *);
85 1.31 jym int _kvm_kvatop4m (kvm_t *, u_long, u_long *);
86 1.31 jym int _kvm_kvatop4u (kvm_t *, u_long, u_long *);
87 1.20 mrg
88 1.20 mrg /*
89 1.20 mrg * XXX
90 1.21 simonb * taken from /sys/arch/sparc64/include/kcore.h.
91 1.20 mrg * this is the same as the sparc one, except for the kphys addition,
92 1.20 mrg * so luckily we can use this here...
93 1.20 mrg */
94 1.20 mrg typedef struct sparc64_cpu_kcore_hdr {
95 1.20 mrg int cputype; /* CPU type associated with this dump */
96 1.20 mrg u_long kernbase; /* copy of KERNBASE goes here */
97 1.20 mrg int nmemseg; /* # of physical memory segments */
98 1.20 mrg u_long memsegoffset; /* start of memseg array (relative */
99 1.20 mrg /* to the start of this header) */
100 1.20 mrg int nsegmap; /* # of segmaps following */
101 1.20 mrg u_long segmapoffset; /* start of segmap array (relative */
102 1.20 mrg /* to the start of this header) */
103 1.20 mrg int npmeg; /* # of PMEGs; [sun4/sun4c] only */
104 1.20 mrg u_long pmegoffset; /* start of pmeg array (relative */
105 1.20 mrg /* to the start of this header) */
106 1.20 mrg /* SPARC64 stuff */
107 1.20 mrg paddr_t kphys; /* Physical address of 4MB locked TLB */
108 1.20 mrg } sparc64_cpu_kcore_hdr_t;
109 1.9 cgd
110 1.10 pk void
111 1.31 jym _kvm_freevtop(kvm_t *kd)
112 1.9 cgd {
113 1.10 pk if (kd->vmst != 0) {
114 1.10 pk _kvm_err(kd, kd->program, "_kvm_freevtop: internal error");
115 1.10 pk kd->vmst = 0;
116 1.10 pk }
117 1.9 cgd }
118 1.9 cgd
119 1.10 pk /*
120 1.10 pk * Prepare for translation of kernel virtual addresses into offsets
121 1.10 pk * into crash dump files. We use the MMU specific goop written at the
122 1.10 pk * front of the crash dump by pmap_dumpmmu().
123 1.10 pk */
124 1.10 pk int
125 1.31 jym _kvm_initvtop(kvm_t *kd)
126 1.3 deraadt {
127 1.20 mrg sparc64_cpu_kcore_hdr_t *cpup = kd->cpu_data;
128 1.9 cgd
129 1.10 pk switch (cputyp = cpup->cputype) {
130 1.10 pk case CPU_SUN4:
131 1.19 eeh case CPU_SUN4U:
132 1.10 pk kd->nbpg = 8196;
133 1.10 pk pgshift = 13;
134 1.10 pk break;
135 1.10 pk case CPU_SUN4C:
136 1.10 pk case CPU_SUN4M:
137 1.10 pk kd->nbpg = 4096;
138 1.10 pk pgshift = 12;
139 1.10 pk break;
140 1.10 pk default:
141 1.10 pk _kvm_err(kd, kd->program, "Unsupported CPU type");
142 1.9 cgd return (-1);
143 1.9 cgd }
144 1.10 pk nptesg = NBPSG / kd->nbpg;
145 1.9 cgd return (0);
146 1.3 deraadt }
147 1.3 deraadt
148 1.7 pk /*
149 1.9 cgd * Translate a kernel virtual address to a physical address using the
150 1.9 cgd * mapping information in kd->vm. Returns the result in pa, and returns
151 1.21 simonb * the number of bytes that are contiguously available from this
152 1.27 wiz * physical address. This routine is used only for crash dumps.
153 1.9 cgd */
154 1.9 cgd int
155 1.31 jym _kvm_kvatop(kvm_t *kd, u_long va, u_long *pa)
156 1.9 cgd {
157 1.10 pk if (cputyp == -1)
158 1.10 pk if (_kvm_initvtop(kd) != 0)
159 1.10 pk return (-1);
160 1.9 cgd
161 1.19 eeh switch (cputyp) {
162 1.19 eeh case CPU_SUN4:
163 1.19 eeh case CPU_SUN4C:
164 1.19 eeh return _kvm_kvatop44c(kd, va, pa);
165 1.19 eeh break;
166 1.19 eeh case CPU_SUN4M:
167 1.19 eeh return _kvm_kvatop4m(kd, va, pa);
168 1.19 eeh break;
169 1.19 eeh case CPU_SUN4U:
170 1.19 eeh default:
171 1.19 eeh return _kvm_kvatop4u(kd, va, pa);
172 1.19 eeh }
173 1.9 cgd }
174 1.9 cgd
175 1.9 cgd /*
176 1.9 cgd * (note: sun4 3-level MMU not yet supported)
177 1.9 cgd */
178 1.9 cgd int
179 1.31 jym _kvm_kvatop44c(kvm_t *kd, u_long va, u_long *pa)
180 1.1 cgd {
181 1.16 perry int vr, vs, pte;
182 1.20 mrg sparc64_cpu_kcore_hdr_t *cpup = kd->cpu_data;
183 1.14 pk struct segmap *sp, *segmaps;
184 1.10 pk int *ptes;
185 1.14 pk int nkreg, nureg;
186 1.14 pk u_long kernbase = cpup->kernbase;
187 1.1 cgd
188 1.14 pk if (va < kernbase)
189 1.7 pk goto err;
190 1.7 pk
191 1.10 pk /*
192 1.10 pk * Layout of CPU segment:
193 1.10 pk * cpu_kcore_hdr_t;
194 1.10 pk * [alignment]
195 1.10 pk * phys_ram_seg_t[cpup->nmemseg];
196 1.14 pk * segmap[cpup->nsegmap];
197 1.10 pk * ptes[cpup->npmegs];
198 1.10 pk */
199 1.14 pk segmaps = (struct segmap *)((long)kd->cpu_data + cpup->segmapoffset);
200 1.10 pk ptes = (int *)((int)kd->cpu_data + cpup->pmegoffset);
201 1.14 pk nkreg = ((int)((-(unsigned)kernbase) / NBPRG));
202 1.14 pk nureg = 256 - nkreg;
203 1.10 pk
204 1.7 pk vr = VA_VREG(va);
205 1.7 pk vs = VA_VSEG(va);
206 1.1 cgd
207 1.14 pk sp = &segmaps[(vr-nureg)*NSEGRG + vs];
208 1.7 pk if (sp->sg_npte == 0)
209 1.7 pk goto err;
210 1.11 pk if (sp->sg_pmeg == cpup->npmeg - 1) /* =seginval */
211 1.7 pk goto err;
212 1.10 pk pte = ptes[sp->sg_pmeg * nptesg + VA_VPG(va)];
213 1.7 pk if ((pte & PG_V) != 0) {
214 1.16 perry long p, off = VA_OFF(va);
215 1.7 pk
216 1.7 pk p = (pte & PG_PFNUM) << pgshift;
217 1.10 pk *pa = p + off;
218 1.7 pk return (kd->nbpg - off);
219 1.1 cgd }
220 1.7 pk err:
221 1.24 sommerfe _kvm_err(kd, 0, "invalid address (%lx)", va);
222 1.1 cgd return (0);
223 1.1 cgd }
224 1.4 deraadt
225 1.9 cgd int
226 1.31 jym _kvm_kvatop4m(kvm_t *kd, u_long va, u_long *pa)
227 1.9 cgd {
228 1.20 mrg sparc64_cpu_kcore_hdr_t *cpup = kd->cpu_data;
229 1.16 perry int vr, vs;
230 1.9 cgd int pte;
231 1.9 cgd off_t foff;
232 1.14 pk struct segmap *sp, *segmaps;
233 1.14 pk int nkreg, nureg;
234 1.14 pk u_long kernbase = cpup->kernbase;
235 1.9 cgd
236 1.14 pk if (va < kernbase)
237 1.9 cgd goto err;
238 1.9 cgd
239 1.10 pk /*
240 1.10 pk * Layout of CPU segment:
241 1.10 pk * cpu_kcore_hdr_t;
242 1.10 pk * [alignment]
243 1.10 pk * phys_ram_seg_t[cpup->nmemseg];
244 1.14 pk * segmap[cpup->nsegmap];
245 1.10 pk */
246 1.14 pk segmaps = (struct segmap *)((long)kd->cpu_data + cpup->segmapoffset);
247 1.14 pk nkreg = ((int)((-(unsigned)kernbase) / NBPRG));
248 1.14 pk nureg = 256 - nkreg;
249 1.10 pk
250 1.9 cgd vr = VA_VREG(va);
251 1.9 cgd vs = VA_VSEG(va);
252 1.9 cgd
253 1.14 pk sp = &segmaps[(vr-nureg)*NSEGRG + vs];
254 1.9 cgd if (sp->sg_npte == 0)
255 1.9 cgd goto err;
256 1.9 cgd
257 1.10 pk /* XXX - assume page tables in initial kernel DATA or BSS. */
258 1.14 pk foff = _kvm_pa2off(kd, (u_long)&sp->sg_pte[VA_VPG(va)] - kernbase);
259 1.10 pk if (foff == (off_t)-1)
260 1.10 pk return (0);
261 1.10 pk
262 1.29 ad if (_kvm_pread(kd, kd->pmfd, &pte, sizeof(pte), foff) != sizeof(pte)) {
263 1.24 sommerfe _kvm_syserr(kd, kd->program, "cannot read pte for %lx", va);
264 1.10 pk return (0);
265 1.9 cgd }
266 1.9 cgd
267 1.9 cgd if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE) {
268 1.16 perry long p, off = VA_OFF(va);
269 1.9 cgd
270 1.9 cgd p = (pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT;
271 1.10 pk *pa = p + off;
272 1.9 cgd return (kd->nbpg - off);
273 1.9 cgd }
274 1.9 cgd err:
275 1.24 sommerfe _kvm_err(kd, 0, "invalid address (%lx)", va);
276 1.9 cgd return (0);
277 1.10 pk }
278 1.19 eeh
279 1.19 eeh /*
280 1.20 mrg * sparc64 pmap's 32-bit page table format
281 1.19 eeh */
282 1.19 eeh int
283 1.31 jym _kvm_kvatop4u(kvm_t *kd, u_long va, u_long *pa)
284 1.19 eeh {
285 1.20 mrg sparc64_cpu_kcore_hdr_t *cpup = kd->cpu_data;
286 1.19 eeh int64_t **segmaps;
287 1.19 eeh int64_t *ptes;
288 1.19 eeh int64_t pte;
289 1.20 mrg int64_t kphys = cpup->kphys;
290 1.19 eeh u_long kernbase = cpup->kernbase;
291 1.19 eeh
292 1.19 eeh if (va < kernbase)
293 1.19 eeh goto err;
294 1.19 eeh
295 1.21 simonb /*
296 1.19 eeh * Kernel layout:
297 1.19 eeh *
298 1.19 eeh * kernbase:
299 1.19 eeh * 4MB locked TLB (text+data+BSS)
300 1.21 simonb * Random other stuff.
301 1.19 eeh */
302 1.20 mrg if (va >= kernbase && va < kernbase + 4*1024*1024)
303 1.19 eeh return (va - kernbase) + kphys;
304 1.19 eeh
305 1.20 mrg /* XXX: from sparc64/include/pmap.h */
306 1.20 mrg #define SPARC64_PTSZ (kd->nbpg/8)
307 1.20 mrg #define SPARC64_STSZ (SPARC64_PTSZ)
308 1.20 mrg #define SPARC64_PTMASK (SPARC64_PTSZ-1)
309 1.20 mrg #define SPARC64_PTSHIFT (13)
310 1.20 mrg #define SPARC64_PDSHIFT (10+SPARC64_PTSHIFT)
311 1.20 mrg #define SPARC64_STSHIFT (10+SPARC64_PDSHIFT)
312 1.20 mrg #define SPARC64_STMASK (SPARC64_STSZ-1)
313 1.20 mrg #define sparc64_va_to_seg(v) (int)((((int64_t)(v))>>SPARC64_STSHIFT)&SPARC64_STMASK)
314 1.20 mrg #define sparc64_va_to_pte(v) (int)((((int64_t)(v))>>SPARC64_PTSHIFT)&SPARC64_PTMASK)
315 1.20 mrg
316 1.20 mrg /* XXX: from sparc64/include/pte.h */
317 1.20 mrg #define SPARC64_TLB_V 0x8000000000000000LL
318 1.20 mrg #define SPARC64_TLB_PA_MASK 0x000001ffffffe000LL
319 1.20 mrg
320 1.19 eeh /*
321 1.19 eeh * Layout of CPU segment:
322 1.19 eeh * cpu_kcore_hdr_t;
323 1.19 eeh * [alignment]
324 1.19 eeh * phys_ram_seg_t[cpup->nmemseg];
325 1.19 eeh * segmap[cpup->nsegmap];
326 1.19 eeh */
327 1.20 mrg segmaps = (int64_t **)((long)kd->cpu_data + cpup->segmapoffset);
328 1.20 mrg /* XXX XXX XXX _kvm_pa2off takes u_long and returns off_t..
329 1.20 mrg should take off_t also!! */
330 1.20 mrg
331 1.20 mrg ptes = (int64_t *)(int)_kvm_pa2off(kd, (u_long)segmaps[sparc64_va_to_seg(va)]);
332 1.20 mrg pte = ptes[sparc64_va_to_pte(va)];
333 1.20 mrg if ((pte & SPARC64_TLB_V) != 0)
334 1.20 mrg return ((pte & SPARC64_TLB_PA_MASK) | (va & (kd->nbpg - 1)));
335 1.19 eeh err:
336 1.24 sommerfe _kvm_err(kd, 0, "invalid address (%lx)", va);
337 1.19 eeh return (0);
338 1.19 eeh }
339 1.19 eeh
340 1.10 pk
341 1.21 simonb /*
342 1.27 wiz * Translate a physical address to a file-offset in the crash dump.
343 1.21 simonb */
344 1.10 pk off_t
345 1.31 jym _kvm_pa2off(kvm_t *kd, u_long pa)
346 1.10 pk {
347 1.20 mrg sparc64_cpu_kcore_hdr_t *cpup = kd->cpu_data;
348 1.10 pk phys_ram_seg_t *mp;
349 1.10 pk off_t off;
350 1.10 pk int nmem;
351 1.10 pk
352 1.10 pk /*
353 1.10 pk * Layout of CPU segment:
354 1.10 pk * cpu_kcore_hdr_t;
355 1.10 pk * [alignment]
356 1.10 pk * phys_ram_seg_t[cpup->nmemseg];
357 1.10 pk */
358 1.10 pk mp = (phys_ram_seg_t *)((int)kd->cpu_data + cpup->memsegoffset);
359 1.10 pk off = 0;
360 1.10 pk
361 1.10 pk /* Translate (sparse) pfnum to (packed) dump offset */
362 1.10 pk for (nmem = cpup->nmemseg; --nmem >= 0; mp++) {
363 1.10 pk if (mp->start <= pa && pa < mp->start + mp->size)
364 1.10 pk break;
365 1.10 pk off += mp->size;
366 1.10 pk }
367 1.10 pk if (nmem < 0) {
368 1.24 sommerfe _kvm_err(kd, 0, "invalid address (%lx)", pa);
369 1.10 pk return (-1);
370 1.10 pk }
371 1.10 pk
372 1.10 pk return (kd->dump_off + off + pa - mp->start);
373 1.12 gwr }
374 1.12 gwr
375 1.12 gwr /*
376 1.12 gwr * Machine-dependent initialization for ALL open kvm descriptors,
377 1.12 gwr * not just those for a kernel crash dump. Some architectures
378 1.12 gwr * have to deal with these NOT being constants! (i.e. m68k)
379 1.12 gwr */
380 1.12 gwr int
381 1.31 jym _kvm_mdopen(kvm_t *kd)
382 1.12 gwr {
383 1.14 pk u_long max_uva;
384 1.14 pk extern struct ps_strings *__ps_strings;
385 1.12 gwr
386 1.14 pk max_uva = (u_long) (__ps_strings + 1);
387 1.14 pk kd->usrstack = max_uva;
388 1.14 pk kd->max_uva = max_uva;
389 1.14 pk kd->min_uva = 0;
390 1.12 gwr
391 1.12 gwr return (0);
392 1.4 deraadt }
393