Home | History | Annotate | Line # | Download | only in libkvm
kvm_m68k_cmn.c revision 1.8
      1 /*	$NetBSD: kvm_m68k_cmn.c,v 1.8 1998/06/30 20:29:39 thorpej Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1997 Jason R. Thorpe.  All rights reserved.
      5  * Copyright (c) 1989, 1992, 1993
      6  *	The Regents of the University of California.  All rights reserved.
      7  *
      8  * This code is derived from software developed by the Computer Systems
      9  * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
     10  * BG 91-66 and contributed to Berkeley.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. All advertising materials mentioning features or use of this software
     21  *    must display the following acknowledgement:
     22  *	This product includes software developed by the University of
     23  *	California, Berkeley and its contributors.
     24  * 4. Neither the name of the University nor the names of its contributors
     25  *    may be used to endorse or promote products derived from this software
     26  *    without specific prior written permission.
     27  *
     28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     38  * SUCH DAMAGE.
     39  */
     40 
     41 #include <sys/cdefs.h>
     42 #if defined(LIBC_SCCS) && !defined(lint)
     43 #if 0
     44 static char sccsid[] = "@(#)kvm_hp300.c	8.1 (Berkeley) 6/4/93";
     45 #else
     46 __RCSID("$NetBSD: kvm_m68k_cmn.c,v 1.8 1998/06/30 20:29:39 thorpej Exp $");
     47 #endif
     48 #endif /* LIBC_SCCS and not lint */
     49 
     50 /*
     51  * Common m68k machine dependent routines for kvm.
     52  *
     53  * Note: This file has to build on ALL m68k machines,
     54  * so do NOT include any <machine / *.h> files here.
     55  */
     56 
     57 #include <sys/types.h>
     58 #include <sys/kcore.h>
     59 
     60 #include <unistd.h>
     61 #include <limits.h>
     62 #include <nlist.h>
     63 #include <kvm.h>
     64 #include <db.h>
     65 
     66 #include <m68k/cpu.h>
     67 #include <m68k/kcore.h>
     68 
     69 #include "kvm_private.h"
     70 #include "kvm_m68k.h"
     71 
     72 int   _kvm_cmn_initvtop __P((kvm_t *));
     73 void  _kvm_cmn_freevtop __P((kvm_t *));
     74 int	  _kvm_cmn_kvatop   __P((kvm_t *, u_long, u_long *));
     75 off_t _kvm_cmn_pa2off   __P((kvm_t *, u_long));
     76 
     77 struct kvm_ops _kvm_ops_cmn = {
     78 	_kvm_cmn_initvtop,
     79 	_kvm_cmn_freevtop,
     80 	_kvm_cmn_kvatop,
     81 	_kvm_cmn_pa2off };
     82 
     83 static int vatop_030 __P((kvm_t *, u_int32_t, u_long, u_long *));
     84 static int vatop_040 __P((kvm_t *, u_int32_t, u_long, u_long *));
     85 
     86 #define	_kvm_btop(v, a)	(((unsigned)(a)) >> (v)->pgshift)
     87 
     88 #define KREAD(kd, addr, p)\
     89 	(kvm_read(kd, addr, (char *)(p), sizeof(*(p))) != sizeof(*(p)))
     90 
     91 void
     92 _kvm_cmn_freevtop(kd)
     93 	kvm_t *kd;
     94 {
     95 	/* No private state information to keep. */
     96 }
     97 
     98 int
     99 _kvm_cmn_initvtop(kd)
    100 	kvm_t *kd;
    101 {
    102 	/* No private state information to keep. */
    103 	return (0);
    104 }
    105 
    106 int
    107 _kvm_cmn_kvatop(kd, va, pa)
    108 	kvm_t *kd;
    109 	u_long va;
    110 	u_long *pa;
    111 {
    112 	cpu_kcore_hdr_t *h = kd->cpu_data;
    113 	struct m68k_kcore_hdr *m = &h->un._m68k;
    114 	int (*vtopf) __P((kvm_t *, u_int32_t, u_long, u_long *));
    115 
    116 	if (ISALIVE(kd)) {
    117 		_kvm_err(kd, 0, "vatop called in live kernel!");
    118 		return (0);
    119 	}
    120 
    121 	/*
    122 	 * 68040 and 68060 use same translation functions,
    123 	 * as do 68030, 68851, HP MMU.
    124 	 */
    125 	if (m->mmutype == MMU_68040 || m->mmutype == MMU_68060)
    126 		vtopf = vatop_040;
    127 	else
    128 		vtopf = vatop_030;
    129 
    130 	return ((*vtopf)(kd, m->sysseg_pa, va, pa));
    131 }
    132 
    133 /*
    134  * Translate a physical address to a file-offset in the crash-dump.
    135  */
    136 off_t
    137 _kvm_cmn_pa2off(kd, pa)
    138 	kvm_t	*kd;
    139 	u_long	pa;
    140 {
    141 	cpu_kcore_hdr_t *h = kd->cpu_data;
    142 	struct m68k_kcore_hdr *m = &h->un._m68k;
    143 	phys_ram_seg_t *rsp;
    144 	off_t off;
    145 	int i;
    146 
    147 	off = 0;
    148 	rsp = m->ram_segs;
    149 	for (i = 0; i < M68K_NPHYS_RAM_SEGS && rsp[i].size != 0; i++) {
    150 		if (pa >= rsp[i].start &&
    151 		    pa < (rsp[i].start + rsp[i].size)) {
    152 			pa -= rsp[i].start;
    153 			break;
    154 		}
    155 		off += rsp[i].size;
    156 	}
    157 	return (kd->dump_off + off + pa);
    158 }
    159 
    160 /*****************************************************************
    161  * Local stuff...
    162  */
    163 
    164 static int
    165 vatop_030(kd, stpa, va, pa)
    166 	kvm_t *kd;
    167 	u_int32_t stpa;
    168 	u_long va;
    169 	u_long *pa;
    170 {
    171 	cpu_kcore_hdr_t *h = kd->cpu_data;
    172 	struct m68k_kcore_hdr *m = &h->un._m68k;
    173 	struct vmstate *vm = kd->vmst;
    174 	u_long addr;
    175 	u_int32_t ste, pte;
    176 	u_int p, offset;
    177 
    178 	offset = va & vm->pgofset;
    179 
    180 	/*
    181 	 * We may be called before address translation is initialized.
    182 	 * This is typically used to find the dump magic number.  This
    183 	 * means we do not yet have the kernel page tables available,
    184 	 * so we must to a simple relocation.
    185 	 */
    186 	if (va < m->relocend) {
    187 		*pa = (va - h->kernbase) + m->reloc;
    188 		return (h->page_size - offset);
    189 	}
    190 
    191 	addr = stpa + ((va >> m->sg_ishift) * sizeof(u_int32_t));
    192 
    193 	/*
    194 	 * Can't use KREAD to read kernel segment table entries.
    195 	 * Fortunately it is 1-to-1 mapped so we don't have to.
    196 	 */
    197 	if (stpa == m->sysseg_pa) {
    198 		if (pread(kd->pmfd, &ste, sizeof(ste),
    199 		    _kvm_cmn_pa2off(kd, addr)) != sizeof(ste))
    200 			goto invalid;
    201 	} else if (KREAD(kd, addr, &ste))
    202 		goto invalid;
    203 	if ((ste & m->sg_v) == 0) {
    204 		_kvm_err(kd, 0, "invalid segment (%x)", ste);
    205 		return(0);
    206 	}
    207 	p = _kvm_btop(vm, va & m->sg_pmask);
    208 	addr = (ste & m->sg_frame) + (p * sizeof(u_int32_t));
    209 
    210 	/*
    211 	 * Address from STE is a physical address so don't use kvm_read.
    212 	 */
    213 	if (pread(kd->pmfd, &pte, sizeof(pte), _kvm_cmn_pa2off(kd, addr)) !=
    214 	    sizeof(pte))
    215 		goto invalid;
    216 	addr = pte & m->pg_frame;
    217 	if ((pte & m->pg_v) == 0) {
    218 		_kvm_err(kd, 0, "page not valid");
    219 		return (0);
    220 	}
    221 	*pa = addr + offset;
    222 
    223 	return (h->page_size - offset);
    224 invalid:
    225 	_kvm_err(kd, 0, "invalid address (%x)", va);
    226 	return (0);
    227 }
    228 
    229 static int
    230 vatop_040(kd, stpa, va, pa)
    231 	kvm_t *kd;
    232 	u_int32_t stpa;
    233 	u_long va;
    234 	u_long *pa;
    235 {
    236 	cpu_kcore_hdr_t *h = kd->cpu_data;
    237 	struct m68k_kcore_hdr *m = &h->un._m68k;
    238 	struct vmstate *vm = kd->vmst;
    239 	u_long addr;
    240 	u_int32_t stpa2;
    241 	u_int32_t ste, pte;
    242 	u_int offset;
    243 
    244 	offset = va & vm->pgofset;
    245 
    246 	/*
    247 	 * We may be called before address translation is initialized.
    248 	 * This is typically used to find the dump magic number.  This
    249 	 * means we do not yet have the kernel page tables available,
    250 	 * so we must to a simple relocation.
    251 	 */
    252 	if (va < m->relocend) {
    253 		*pa = (va - h->kernbase) + m->reloc;
    254 		return (h->page_size - offset);
    255 	}
    256 
    257 	addr = stpa + ((va >> m->sg40_shift1) * sizeof(u_int32_t));
    258 
    259 	/*
    260 	 * Can't use KREAD to read kernel segment table entries.
    261 	 * Fortunately it is 1-to-1 mapped so we don't have to.
    262 	 */
    263 	if (stpa == m->sysseg_pa) {
    264 		if (pread(kd->pmfd, &ste, sizeof(ste),
    265 		    _kvm_cmn_pa2off(kd, addr)) != sizeof(ste))
    266 			goto invalid;
    267 	} else if (KREAD(kd, addr, &ste))
    268 		goto invalid;
    269 	if ((ste & m->sg_v) == 0) {
    270 		_kvm_err(kd, 0, "invalid level 1 descriptor (%x)",
    271 				 ste);
    272 		return((off_t)0);
    273 	}
    274 	stpa2 = (ste & m->sg40_addr1);
    275 	addr = stpa2 + (((va & m->sg40_mask2) >> m->sg40_shift2) *
    276 	    sizeof(u_int32_t));
    277 
    278 	/*
    279 	 * Address from level 1 STE is a physical address,
    280 	 * so don't use kvm_read.
    281 	 */
    282 	if (pread(kd->pmfd, &ste, sizeof(ste), _kvm_cmn_pa2off(kd, addr)) !=
    283 	    sizeof(ste))
    284 		goto invalid;
    285 	if ((ste & m->sg_v) == 0) {
    286 		_kvm_err(kd, 0, "invalid level 2 descriptor (%x)",
    287 				 ste);
    288 		return((off_t)0);
    289 	}
    290 	stpa2 = (ste & m->sg40_addr2);
    291 	addr = stpa2 + (((va & m->sg40_mask3) >> m->sg40_shift3) *
    292 	    sizeof(u_int32_t));
    293 
    294 	/*
    295 	 * Address from STE is a physical address so don't use kvm_read.
    296 	 */
    297 	if (pread(kd->pmfd, &pte, sizeof(pte), _kvm_cmn_pa2off(kd, addr)) !=
    298 	    sizeof(pte))
    299 		goto invalid;
    300 	addr = pte & m->pg_frame;
    301 	if ((pte & m->pg_v) == 0) {
    302 		_kvm_err(kd, 0, "page not valid");
    303 		return (0);
    304 	}
    305 	*pa = addr + offset;
    306 
    307 	return (h->page_size - offset);
    308 
    309 invalid:
    310 	_kvm_err(kd, 0, "invalid address (%x)", va);
    311 	return (0);
    312 }
    313