Home | History | Annotate | Line # | Download | only in libkvm
kvm_m68k_cmn.c revision 1.12
      1 /*	$NetBSD: kvm_m68k_cmn.c,v 1.12 2003/08/07 16:44:37 agc Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1989, 1992, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * This code is derived from software developed by the Computer Systems
      8  * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
      9  * BG 91-66 and contributed to Berkeley.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. Neither the name of the University nor the names of its contributors
     20  *    may be used to endorse or promote products derived from this software
     21  *    without specific prior written permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     33  * SUCH DAMAGE.
     34  */
     35 
     36 /*-
     37  * Copyright (c) 1997 Jason R. Thorpe.  All rights reserved.
     38  *
     39  * This code is derived from software developed by the Computer Systems
     40  * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
     41  * BG 91-66 and contributed to Berkeley.
     42  *
     43  * Redistribution and use in source and binary forms, with or without
     44  * modification, are permitted provided that the following conditions
     45  * are met:
     46  * 1. Redistributions of source code must retain the above copyright
     47  *    notice, this list of conditions and the following disclaimer.
     48  * 2. Redistributions in binary form must reproduce the above copyright
     49  *    notice, this list of conditions and the following disclaimer in the
     50  *    documentation and/or other materials provided with the distribution.
     51  * 3. All advertising materials mentioning features or use of this software
     52  *    must display the following acknowledgement:
     53  *	This product includes software developed by the University of
     54  *	California, Berkeley and its contributors.
     55  * 4. Neither the name of the University nor the names of its contributors
     56  *    may be used to endorse or promote products derived from this software
     57  *    without specific prior written permission.
     58  *
     59  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     60  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     61  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     62  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     63  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     64  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     65  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     66  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     67  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     68  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     69  * SUCH DAMAGE.
     70  */
     71 
     72 #include <sys/cdefs.h>
     73 #if defined(LIBC_SCCS) && !defined(lint)
     74 #if 0
     75 static char sccsid[] = "@(#)kvm_hp300.c	8.1 (Berkeley) 6/4/93";
     76 #else
     77 __RCSID("$NetBSD: kvm_m68k_cmn.c,v 1.12 2003/08/07 16:44:37 agc Exp $");
     78 #endif
     79 #endif /* LIBC_SCCS and not lint */
     80 
     81 /*
     82  * Common m68k machine dependent routines for kvm.
     83  *
     84  * Note: This file has to build on ALL m68k machines,
     85  * so do NOT include any <machine / *.h> files here.
     86  */
     87 
     88 #include <sys/types.h>
     89 #include <sys/kcore.h>
     90 
     91 #include <unistd.h>
     92 #include <limits.h>
     93 #include <nlist.h>
     94 #include <kvm.h>
     95 #include <db.h>
     96 
     97 #include <m68k/cpu.h>
     98 #include <m68k/kcore.h>
     99 
    100 #include "kvm_private.h"
    101 #include "kvm_m68k.h"
    102 
    103 int   _kvm_cmn_initvtop __P((kvm_t *));
    104 void  _kvm_cmn_freevtop __P((kvm_t *));
    105 int	  _kvm_cmn_kvatop   __P((kvm_t *, u_long, u_long *));
    106 off_t _kvm_cmn_pa2off   __P((kvm_t *, u_long));
    107 
    108 struct kvm_ops _kvm_ops_cmn = {
    109 	_kvm_cmn_initvtop,
    110 	_kvm_cmn_freevtop,
    111 	_kvm_cmn_kvatop,
    112 	_kvm_cmn_pa2off };
    113 
    114 static int vatop_030 __P((kvm_t *, u_int32_t, u_long, u_long *));
    115 static int vatop_040 __P((kvm_t *, u_int32_t, u_long, u_long *));
    116 
    117 #define	_kvm_btop(v, a)	(((unsigned)(a)) >> (v)->pgshift)
    118 
    119 #define KREAD(kd, addr, p)\
    120 	(kvm_read(kd, addr, (char *)(p), sizeof(*(p))) != sizeof(*(p)))
    121 
    122 void
    123 _kvm_cmn_freevtop(kd)
    124 	kvm_t *kd;
    125 {
    126 	/* No private state information to keep. */
    127 }
    128 
    129 int
    130 _kvm_cmn_initvtop(kd)
    131 	kvm_t *kd;
    132 {
    133 	/* No private state information to keep. */
    134 	return (0);
    135 }
    136 
    137 int
    138 _kvm_cmn_kvatop(kd, va, pa)
    139 	kvm_t *kd;
    140 	u_long va;
    141 	u_long *pa;
    142 {
    143 	cpu_kcore_hdr_t *h = kd->cpu_data;
    144 	struct m68k_kcore_hdr *m = &h->un._m68k;
    145 	int (*vtopf) __P((kvm_t *, u_int32_t, u_long, u_long *));
    146 
    147 	if (ISALIVE(kd)) {
    148 		_kvm_err(kd, 0, "vatop called in live kernel!");
    149 		return (0);
    150 	}
    151 
    152 	/*
    153 	 * 68040 and 68060 use same translation functions,
    154 	 * as do 68030, 68851, HP MMU.
    155 	 */
    156 	if (m->mmutype == MMU_68040 || m->mmutype == MMU_68060)
    157 		vtopf = vatop_040;
    158 	else
    159 		vtopf = vatop_030;
    160 
    161 	return ((*vtopf)(kd, m->sysseg_pa, va, pa));
    162 }
    163 
    164 /*
    165  * Translate a physical address to a file-offset in the crash dump.
    166  */
    167 off_t
    168 _kvm_cmn_pa2off(kd, pa)
    169 	kvm_t	*kd;
    170 	u_long	pa;
    171 {
    172 	cpu_kcore_hdr_t *h = kd->cpu_data;
    173 	struct m68k_kcore_hdr *m = &h->un._m68k;
    174 	phys_ram_seg_t *rsp;
    175 	off_t off;
    176 	int i;
    177 
    178 	off = 0;
    179 	rsp = m->ram_segs;
    180 	for (i = 0; i < M68K_NPHYS_RAM_SEGS && rsp[i].size != 0; i++) {
    181 		if (pa >= rsp[i].start &&
    182 		    pa < (rsp[i].start + rsp[i].size)) {
    183 			pa -= rsp[i].start;
    184 			break;
    185 		}
    186 		off += rsp[i].size;
    187 	}
    188 	return (kd->dump_off + off + pa);
    189 }
    190 
    191 /*****************************************************************
    192  * Local stuff...
    193  */
    194 
    195 static int
    196 vatop_030(kd, stpa, va, pa)
    197 	kvm_t *kd;
    198 	u_int32_t stpa;
    199 	u_long va;
    200 	u_long *pa;
    201 {
    202 	cpu_kcore_hdr_t *h = kd->cpu_data;
    203 	struct m68k_kcore_hdr *m = &h->un._m68k;
    204 	struct vmstate *vm = kd->vmst;
    205 	u_long addr;
    206 	u_int32_t ste, pte;
    207 	u_int p, offset;
    208 
    209 	offset = va & vm->pgofset;
    210 
    211 	/*
    212 	 * We may be called before address translation is initialized.
    213 	 * This is typically used to find the dump magic number.  This
    214 	 * means we do not yet have the kernel page tables available,
    215 	 * so we must to a simple relocation.
    216 	 */
    217 	if (va < m->relocend) {
    218 		*pa = (va - h->kernbase) + m->reloc;
    219 		return (h->page_size - offset);
    220 	}
    221 
    222 	addr = stpa + ((va >> m->sg_ishift) * sizeof(u_int32_t));
    223 
    224 	/*
    225 	 * Can't use KREAD to read kernel segment table entries.
    226 	 * Fortunately it is 1-to-1 mapped so we don't have to.
    227 	 */
    228 	if (stpa == m->sysseg_pa) {
    229 		if (pread(kd->pmfd, &ste, sizeof(ste),
    230 		    _kvm_cmn_pa2off(kd, addr)) != sizeof(ste))
    231 			goto invalid;
    232 	} else if (KREAD(kd, addr, &ste))
    233 		goto invalid;
    234 	if ((ste & m->sg_v) == 0) {
    235 		_kvm_err(kd, 0, "invalid segment (%x)", ste);
    236 		return(0);
    237 	}
    238 	p = _kvm_btop(vm, va & m->sg_pmask);
    239 	addr = (ste & m->sg_frame) + (p * sizeof(u_int32_t));
    240 
    241 	/*
    242 	 * Address from STE is a physical address so don't use kvm_read.
    243 	 */
    244 	if (pread(kd->pmfd, &pte, sizeof(pte), _kvm_cmn_pa2off(kd, addr)) !=
    245 	    sizeof(pte))
    246 		goto invalid;
    247 	addr = pte & m->pg_frame;
    248 	if ((pte & m->pg_v) == 0) {
    249 		_kvm_err(kd, 0, "page not valid");
    250 		return (0);
    251 	}
    252 	*pa = addr + offset;
    253 
    254 	return (h->page_size - offset);
    255 invalid:
    256 	_kvm_err(kd, 0, "invalid address (%lx)", va);
    257 	return (0);
    258 }
    259 
    260 static int
    261 vatop_040(kd, stpa, va, pa)
    262 	kvm_t *kd;
    263 	u_int32_t stpa;
    264 	u_long va;
    265 	u_long *pa;
    266 {
    267 	cpu_kcore_hdr_t *h = kd->cpu_data;
    268 	struct m68k_kcore_hdr *m = &h->un._m68k;
    269 	struct vmstate *vm = kd->vmst;
    270 	u_long addr;
    271 	u_int32_t stpa2;
    272 	u_int32_t ste, pte;
    273 	u_int offset;
    274 
    275 	offset = va & vm->pgofset;
    276 
    277 	/*
    278 	 * We may be called before address translation is initialized.
    279 	 * This is typically used to find the dump magic number.  This
    280 	 * means we do not yet have the kernel page tables available,
    281 	 * so we must to a simple relocation.
    282 	 */
    283 	if (va < m->relocend) {
    284 		*pa = (va - h->kernbase) + m->reloc;
    285 		return (h->page_size - offset);
    286 	}
    287 
    288 	addr = stpa + ((va >> m->sg40_shift1) * sizeof(u_int32_t));
    289 
    290 	/*
    291 	 * Can't use KREAD to read kernel segment table entries.
    292 	 * Fortunately it is 1-to-1 mapped so we don't have to.
    293 	 */
    294 	if (stpa == m->sysseg_pa) {
    295 		if (pread(kd->pmfd, &ste, sizeof(ste),
    296 		    _kvm_cmn_pa2off(kd, addr)) != sizeof(ste))
    297 			goto invalid;
    298 	} else if (KREAD(kd, addr, &ste))
    299 		goto invalid;
    300 	if ((ste & m->sg_v) == 0) {
    301 		_kvm_err(kd, 0, "invalid level 1 descriptor (%x)",
    302 				 ste);
    303 		return((off_t)0);
    304 	}
    305 	stpa2 = (ste & m->sg40_addr1);
    306 	addr = stpa2 + (((va & m->sg40_mask2) >> m->sg40_shift2) *
    307 	    sizeof(u_int32_t));
    308 
    309 	/*
    310 	 * Address from level 1 STE is a physical address,
    311 	 * so don't use kvm_read.
    312 	 */
    313 	if (pread(kd->pmfd, &ste, sizeof(ste), _kvm_cmn_pa2off(kd, addr)) !=
    314 	    sizeof(ste))
    315 		goto invalid;
    316 	if ((ste & m->sg_v) == 0) {
    317 		_kvm_err(kd, 0, "invalid level 2 descriptor (%x)",
    318 				 ste);
    319 		return((off_t)0);
    320 	}
    321 	stpa2 = (ste & m->sg40_addr2);
    322 	addr = stpa2 + (((va & m->sg40_mask3) >> m->sg40_shift3) *
    323 	    sizeof(u_int32_t));
    324 
    325 	/*
    326 	 * Address from STE is a physical address so don't use kvm_read.
    327 	 */
    328 	if (pread(kd->pmfd, &pte, sizeof(pte), _kvm_cmn_pa2off(kd, addr)) !=
    329 	    sizeof(pte))
    330 		goto invalid;
    331 	addr = pte & m->pg_frame;
    332 	if ((pte & m->pg_v) == 0) {
    333 		_kvm_err(kd, 0, "page not valid");
    334 		return (0);
    335 	}
    336 	*pa = addr + offset;
    337 
    338 	return (h->page_size - offset);
    339 
    340 invalid:
    341 	_kvm_err(kd, 0, "invalid address (%lx)", va);
    342 	return (0);
    343 }
    344