kvm_m68k_cmn.c revision 1.8 1 1.8 thorpej /* $NetBSD: kvm_m68k_cmn.c,v 1.8 1998/06/30 20:29:39 thorpej Exp $ */
2 1.1 gwr
3 1.1 gwr /*-
4 1.2 thorpej * Copyright (c) 1997 Jason R. Thorpe. All rights reserved.
5 1.1 gwr * Copyright (c) 1989, 1992, 1993
6 1.1 gwr * The Regents of the University of California. All rights reserved.
7 1.1 gwr *
8 1.1 gwr * This code is derived from software developed by the Computer Systems
9 1.1 gwr * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
10 1.1 gwr * BG 91-66 and contributed to Berkeley.
11 1.1 gwr *
12 1.1 gwr * Redistribution and use in source and binary forms, with or without
13 1.1 gwr * modification, are permitted provided that the following conditions
14 1.1 gwr * are met:
15 1.1 gwr * 1. Redistributions of source code must retain the above copyright
16 1.1 gwr * notice, this list of conditions and the following disclaimer.
17 1.1 gwr * 2. Redistributions in binary form must reproduce the above copyright
18 1.1 gwr * notice, this list of conditions and the following disclaimer in the
19 1.1 gwr * documentation and/or other materials provided with the distribution.
20 1.1 gwr * 3. All advertising materials mentioning features or use of this software
21 1.1 gwr * must display the following acknowledgement:
22 1.1 gwr * This product includes software developed by the University of
23 1.1 gwr * California, Berkeley and its contributors.
24 1.1 gwr * 4. Neither the name of the University nor the names of its contributors
25 1.1 gwr * may be used to endorse or promote products derived from this software
26 1.1 gwr * without specific prior written permission.
27 1.1 gwr *
28 1.1 gwr * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 1.1 gwr * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 1.1 gwr * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 1.1 gwr * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 1.1 gwr * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 1.1 gwr * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 1.1 gwr * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 1.1 gwr * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 1.1 gwr * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 1.1 gwr * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 1.1 gwr * SUCH DAMAGE.
39 1.1 gwr */
40 1.1 gwr
41 1.4 mikel #include <sys/cdefs.h>
42 1.1 gwr #if defined(LIBC_SCCS) && !defined(lint)
43 1.1 gwr #if 0
44 1.1 gwr static char sccsid[] = "@(#)kvm_hp300.c 8.1 (Berkeley) 6/4/93";
45 1.1 gwr #else
46 1.8 thorpej __RCSID("$NetBSD: kvm_m68k_cmn.c,v 1.8 1998/06/30 20:29:39 thorpej Exp $");
47 1.1 gwr #endif
48 1.1 gwr #endif /* LIBC_SCCS and not lint */
49 1.1 gwr
50 1.1 gwr /*
51 1.1 gwr * Common m68k machine dependent routines for kvm.
52 1.1 gwr *
53 1.1 gwr * Note: This file has to build on ALL m68k machines,
54 1.6 briggs * so do NOT include any <machine / *.h> files here.
55 1.1 gwr */
56 1.1 gwr
57 1.1 gwr #include <sys/types.h>
58 1.1 gwr #include <sys/kcore.h>
59 1.1 gwr
60 1.1 gwr #include <unistd.h>
61 1.1 gwr #include <limits.h>
62 1.1 gwr #include <nlist.h>
63 1.1 gwr #include <kvm.h>
64 1.1 gwr #include <db.h>
65 1.1 gwr
66 1.1 gwr #include <m68k/cpu.h>
67 1.1 gwr #include <m68k/kcore.h>
68 1.1 gwr
69 1.1 gwr #include "kvm_private.h"
70 1.1 gwr #include "kvm_m68k.h"
71 1.1 gwr
72 1.1 gwr int _kvm_cmn_initvtop __P((kvm_t *));
73 1.1 gwr void _kvm_cmn_freevtop __P((kvm_t *));
74 1.1 gwr int _kvm_cmn_kvatop __P((kvm_t *, u_long, u_long *));
75 1.1 gwr off_t _kvm_cmn_pa2off __P((kvm_t *, u_long));
76 1.1 gwr
77 1.1 gwr struct kvm_ops _kvm_ops_cmn = {
78 1.1 gwr _kvm_cmn_initvtop,
79 1.1 gwr _kvm_cmn_freevtop,
80 1.1 gwr _kvm_cmn_kvatop,
81 1.1 gwr _kvm_cmn_pa2off };
82 1.1 gwr
83 1.2 thorpej static int vatop_030 __P((kvm_t *, u_int32_t, u_long, u_long *));
84 1.2 thorpej static int vatop_040 __P((kvm_t *, u_int32_t, u_long, u_long *));
85 1.1 gwr
86 1.2 thorpej #define _kvm_btop(v, a) (((unsigned)(a)) >> (v)->pgshift)
87 1.1 gwr
88 1.1 gwr #define KREAD(kd, addr, p)\
89 1.1 gwr (kvm_read(kd, addr, (char *)(p), sizeof(*(p))) != sizeof(*(p)))
90 1.1 gwr
91 1.1 gwr void
92 1.1 gwr _kvm_cmn_freevtop(kd)
93 1.1 gwr kvm_t *kd;
94 1.1 gwr {
95 1.2 thorpej /* No private state information to keep. */
96 1.1 gwr }
97 1.1 gwr
98 1.1 gwr int
99 1.1 gwr _kvm_cmn_initvtop(kd)
100 1.1 gwr kvm_t *kd;
101 1.1 gwr {
102 1.2 thorpej /* No private state information to keep. */
103 1.1 gwr return (0);
104 1.1 gwr }
105 1.1 gwr
106 1.1 gwr int
107 1.1 gwr _kvm_cmn_kvatop(kd, va, pa)
108 1.1 gwr kvm_t *kd;
109 1.1 gwr u_long va;
110 1.1 gwr u_long *pa;
111 1.1 gwr {
112 1.2 thorpej cpu_kcore_hdr_t *h = kd->cpu_data;
113 1.2 thorpej struct m68k_kcore_hdr *m = &h->un._m68k;
114 1.2 thorpej int (*vtopf) __P((kvm_t *, u_int32_t, u_long, u_long *));
115 1.1 gwr
116 1.1 gwr if (ISALIVE(kd)) {
117 1.1 gwr _kvm_err(kd, 0, "vatop called in live kernel!");
118 1.1 gwr return (0);
119 1.1 gwr }
120 1.1 gwr
121 1.2 thorpej /*
122 1.5 kleink * 68040 and 68060 use same translation functions,
123 1.2 thorpej * as do 68030, 68851, HP MMU.
124 1.2 thorpej */
125 1.2 thorpej if (m->mmutype == MMU_68040 || m->mmutype == MMU_68060)
126 1.2 thorpej vtopf = vatop_040;
127 1.2 thorpej else
128 1.1 gwr vtopf = vatop_030;
129 1.1 gwr
130 1.2 thorpej return ((*vtopf)(kd, m->sysseg_pa, va, pa));
131 1.1 gwr }
132 1.1 gwr
133 1.1 gwr /*
134 1.1 gwr * Translate a physical address to a file-offset in the crash-dump.
135 1.1 gwr */
136 1.1 gwr off_t
137 1.1 gwr _kvm_cmn_pa2off(kd, pa)
138 1.1 gwr kvm_t *kd;
139 1.1 gwr u_long pa;
140 1.1 gwr {
141 1.2 thorpej cpu_kcore_hdr_t *h = kd->cpu_data;
142 1.2 thorpej struct m68k_kcore_hdr *m = &h->un._m68k;
143 1.2 thorpej phys_ram_seg_t *rsp;
144 1.2 thorpej off_t off;
145 1.2 thorpej int i;
146 1.1 gwr
147 1.1 gwr off = 0;
148 1.2 thorpej rsp = m->ram_segs;
149 1.2 thorpej for (i = 0; i < M68K_NPHYS_RAM_SEGS && rsp[i].size != 0; i++) {
150 1.2 thorpej if (pa >= rsp[i].start &&
151 1.2 thorpej pa < (rsp[i].start + rsp[i].size)) {
152 1.3 scottr pa -= rsp[i].start;
153 1.1 gwr break;
154 1.1 gwr }
155 1.3 scottr off += rsp[i].size;
156 1.1 gwr }
157 1.2 thorpej return (kd->dump_off + off + pa);
158 1.1 gwr }
159 1.1 gwr
160 1.1 gwr /*****************************************************************
161 1.1 gwr * Local stuff...
162 1.1 gwr */
163 1.1 gwr
164 1.1 gwr static int
165 1.2 thorpej vatop_030(kd, stpa, va, pa)
166 1.1 gwr kvm_t *kd;
167 1.2 thorpej u_int32_t stpa;
168 1.1 gwr u_long va;
169 1.1 gwr u_long *pa;
170 1.1 gwr {
171 1.2 thorpej cpu_kcore_hdr_t *h = kd->cpu_data;
172 1.2 thorpej struct m68k_kcore_hdr *m = &h->un._m68k;
173 1.2 thorpej struct vmstate *vm = kd->vmst;
174 1.2 thorpej u_long addr;
175 1.2 thorpej u_int32_t ste, pte;
176 1.2 thorpej u_int p, offset;
177 1.1 gwr
178 1.2 thorpej offset = va & vm->pgofset;
179 1.1 gwr
180 1.1 gwr /*
181 1.2 thorpej * We may be called before address translation is initialized.
182 1.2 thorpej * This is typically used to find the dump magic number. This
183 1.2 thorpej * means we do not yet have the kernel page tables available,
184 1.2 thorpej * so we must to a simple relocation.
185 1.1 gwr */
186 1.2 thorpej if (va < m->relocend) {
187 1.2 thorpej *pa = (va - h->kernbase) + m->reloc;
188 1.2 thorpej return (h->page_size - offset);
189 1.1 gwr }
190 1.1 gwr
191 1.2 thorpej addr = stpa + ((va >> m->sg_ishift) * sizeof(u_int32_t));
192 1.2 thorpej
193 1.1 gwr /*
194 1.1 gwr * Can't use KREAD to read kernel segment table entries.
195 1.1 gwr * Fortunately it is 1-to-1 mapped so we don't have to.
196 1.1 gwr */
197 1.2 thorpej if (stpa == m->sysseg_pa) {
198 1.8 thorpej if (pread(kd->pmfd, &ste, sizeof(ste),
199 1.8 thorpej _kvm_cmn_pa2off(kd, addr)) != sizeof(ste))
200 1.1 gwr goto invalid;
201 1.1 gwr } else if (KREAD(kd, addr, &ste))
202 1.1 gwr goto invalid;
203 1.2 thorpej if ((ste & m->sg_v) == 0) {
204 1.1 gwr _kvm_err(kd, 0, "invalid segment (%x)", ste);
205 1.2 thorpej return(0);
206 1.1 gwr }
207 1.2 thorpej p = _kvm_btop(vm, va & m->sg_pmask);
208 1.2 thorpej addr = (ste & m->sg_frame) + (p * sizeof(u_int32_t));
209 1.1 gwr
210 1.1 gwr /*
211 1.1 gwr * Address from STE is a physical address so don't use kvm_read.
212 1.1 gwr */
213 1.8 thorpej if (pread(kd->pmfd, &pte, sizeof(pte), _kvm_cmn_pa2off(kd, addr)) !=
214 1.8 thorpej sizeof(pte))
215 1.1 gwr goto invalid;
216 1.2 thorpej addr = pte & m->pg_frame;
217 1.2 thorpej if ((pte & m->pg_v) == 0) {
218 1.1 gwr _kvm_err(kd, 0, "page not valid");
219 1.1 gwr return (0);
220 1.1 gwr }
221 1.1 gwr *pa = addr + offset;
222 1.1 gwr
223 1.2 thorpej return (h->page_size - offset);
224 1.1 gwr invalid:
225 1.1 gwr _kvm_err(kd, 0, "invalid address (%x)", va);
226 1.1 gwr return (0);
227 1.1 gwr }
228 1.1 gwr
229 1.1 gwr static int
230 1.2 thorpej vatop_040(kd, stpa, va, pa)
231 1.1 gwr kvm_t *kd;
232 1.2 thorpej u_int32_t stpa;
233 1.1 gwr u_long va;
234 1.1 gwr u_long *pa;
235 1.1 gwr {
236 1.2 thorpej cpu_kcore_hdr_t *h = kd->cpu_data;
237 1.2 thorpej struct m68k_kcore_hdr *m = &h->un._m68k;
238 1.2 thorpej struct vmstate *vm = kd->vmst;
239 1.2 thorpej u_long addr;
240 1.2 thorpej u_int32_t stpa2;
241 1.2 thorpej u_int32_t ste, pte;
242 1.7 briggs u_int offset;
243 1.2 thorpej
244 1.2 thorpej offset = va & vm->pgofset;
245 1.1 gwr
246 1.1 gwr /*
247 1.2 thorpej * We may be called before address translation is initialized.
248 1.2 thorpej * This is typically used to find the dump magic number. This
249 1.2 thorpej * means we do not yet have the kernel page tables available,
250 1.2 thorpej * so we must to a simple relocation.
251 1.1 gwr */
252 1.2 thorpej if (va < m->relocend) {
253 1.2 thorpej *pa = (va - h->kernbase) + m->reloc;
254 1.2 thorpej return (h->page_size - offset);
255 1.1 gwr }
256 1.1 gwr
257 1.2 thorpej addr = stpa + ((va >> m->sg40_shift1) * sizeof(u_int32_t));
258 1.2 thorpej
259 1.1 gwr /*
260 1.1 gwr * Can't use KREAD to read kernel segment table entries.
261 1.1 gwr * Fortunately it is 1-to-1 mapped so we don't have to.
262 1.1 gwr */
263 1.2 thorpej if (stpa == m->sysseg_pa) {
264 1.8 thorpej if (pread(kd->pmfd, &ste, sizeof(ste),
265 1.8 thorpej _kvm_cmn_pa2off(kd, addr)) != sizeof(ste))
266 1.1 gwr goto invalid;
267 1.1 gwr } else if (KREAD(kd, addr, &ste))
268 1.1 gwr goto invalid;
269 1.2 thorpej if ((ste & m->sg_v) == 0) {
270 1.1 gwr _kvm_err(kd, 0, "invalid level 1 descriptor (%x)",
271 1.1 gwr ste);
272 1.1 gwr return((off_t)0);
273 1.1 gwr }
274 1.2 thorpej stpa2 = (ste & m->sg40_addr1);
275 1.2 thorpej addr = stpa2 + (((va & m->sg40_mask2) >> m->sg40_shift2) *
276 1.2 thorpej sizeof(u_int32_t));
277 1.2 thorpej
278 1.1 gwr /*
279 1.1 gwr * Address from level 1 STE is a physical address,
280 1.1 gwr * so don't use kvm_read.
281 1.1 gwr */
282 1.8 thorpej if (pread(kd->pmfd, &ste, sizeof(ste), _kvm_cmn_pa2off(kd, addr)) !=
283 1.8 thorpej sizeof(ste))
284 1.1 gwr goto invalid;
285 1.2 thorpej if ((ste & m->sg_v) == 0) {
286 1.1 gwr _kvm_err(kd, 0, "invalid level 2 descriptor (%x)",
287 1.1 gwr ste);
288 1.1 gwr return((off_t)0);
289 1.1 gwr }
290 1.2 thorpej stpa2 = (ste & m->sg40_addr2);
291 1.2 thorpej addr = stpa2 + (((va & m->sg40_mask3) >> m->sg40_shift3) *
292 1.2 thorpej sizeof(u_int32_t));
293 1.1 gwr
294 1.1 gwr /*
295 1.1 gwr * Address from STE is a physical address so don't use kvm_read.
296 1.1 gwr */
297 1.8 thorpej if (pread(kd->pmfd, &pte, sizeof(pte), _kvm_cmn_pa2off(kd, addr)) !=
298 1.8 thorpej sizeof(pte))
299 1.1 gwr goto invalid;
300 1.2 thorpej addr = pte & m->pg_frame;
301 1.2 thorpej if ((pte & m->pg_v) == 0) {
302 1.1 gwr _kvm_err(kd, 0, "page not valid");
303 1.1 gwr return (0);
304 1.1 gwr }
305 1.1 gwr *pa = addr + offset;
306 1.1 gwr
307 1.2 thorpej return (h->page_size - offset);
308 1.2 thorpej
309 1.1 gwr invalid:
310 1.1 gwr _kvm_err(kd, 0, "invalid address (%x)", va);
311 1.1 gwr return (0);
312 1.1 gwr }
313