Home | History | Annotate | Line # | Download | only in libkvm
kvm_m68k_cmn.c revision 1.4
      1 /*	$NetBSD: kvm_m68k_cmn.c,v 1.4 1997/08/15 02:22:00 mikel Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1997 Jason R. Thorpe.  All rights reserved.
      5  * Copyright (c) 1989, 1992, 1993
      6  *	The Regents of the University of California.  All rights reserved.
      7  *
      8  * This code is derived from software developed by the Computer Systems
      9  * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
     10  * BG 91-66 and contributed to Berkeley.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. All advertising materials mentioning features or use of this software
     21  *    must display the following acknowledgement:
     22  *	This product includes software developed by the University of
     23  *	California, Berkeley and its contributors.
     24  * 4. Neither the name of the University nor the names of its contributors
     25  *    may be used to endorse or promote products derived from this software
     26  *    without specific prior written permission.
     27  *
     28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     38  * SUCH DAMAGE.
     39  */
     40 
     41 #include <sys/cdefs.h>
     42 #if defined(LIBC_SCCS) && !defined(lint)
     43 #if 0
     44 static char sccsid[] = "@(#)kvm_hp300.c	8.1 (Berkeley) 6/4/93";
     45 #else
     46 __RCSID("$NetBSD: kvm_m68k_cmn.c,v 1.4 1997/08/15 02:22:00 mikel Exp $");
     47 #endif
     48 #endif /* LIBC_SCCS and not lint */
     49 
     50 /*
     51  * Common m68k machine dependent routines for kvm.
     52  *
     53  * Note: This file has to build on ALL m68k machines,
     54  * so do NOT include any <machine/*.h> files here.
     55  */
     56 
     57 #include <sys/types.h>
     58 #include <sys/kcore.h>
     59 
     60 #include <unistd.h>
     61 #include <limits.h>
     62 #include <nlist.h>
     63 #include <kvm.h>
     64 #include <db.h>
     65 
     66 #include <m68k/cpu.h>
     67 #include <m68k/kcore.h>
     68 
     69 #include "kvm_private.h"
     70 #include "kvm_m68k.h"
     71 
     72 int   _kvm_cmn_initvtop __P((kvm_t *));
     73 void  _kvm_cmn_freevtop __P((kvm_t *));
     74 int	  _kvm_cmn_kvatop   __P((kvm_t *, u_long, u_long *));
     75 off_t _kvm_cmn_pa2off   __P((kvm_t *, u_long));
     76 
     77 struct kvm_ops _kvm_ops_cmn = {
     78 	_kvm_cmn_initvtop,
     79 	_kvm_cmn_freevtop,
     80 	_kvm_cmn_kvatop,
     81 	_kvm_cmn_pa2off };
     82 
     83 static int vatop_030 __P((kvm_t *, u_int32_t, u_long, u_long *));
     84 static int vatop_040 __P((kvm_t *, u_int32_t, u_long, u_long *));
     85 
     86 #define	_kvm_btop(v, a)	(((unsigned)(a)) >> (v)->pgshift)
     87 
     88 #define KREAD(kd, addr, p)\
     89 	(kvm_read(kd, addr, (char *)(p), sizeof(*(p))) != sizeof(*(p)))
     90 
     91 void
     92 _kvm_cmn_freevtop(kd)
     93 	kvm_t *kd;
     94 {
     95 	/* No private state information to keep. */
     96 }
     97 
     98 int
     99 _kvm_cmn_initvtop(kd)
    100 	kvm_t *kd;
    101 {
    102 	/* No private state information to keep. */
    103 	return (0);
    104 }
    105 
    106 int
    107 _kvm_cmn_kvatop(kd, va, pa)
    108 	kvm_t *kd;
    109 	u_long va;
    110 	u_long *pa;
    111 {
    112 	cpu_kcore_hdr_t *h = kd->cpu_data;
    113 	struct m68k_kcore_hdr *m = &h->un._m68k;
    114 	struct vmstate *vm = kd->vmst;
    115 	int (*vtopf) __P((kvm_t *, u_int32_t, u_long, u_long *));
    116 
    117 	if (ISALIVE(kd)) {
    118 		_kvm_err(kd, 0, "vatop called in live kernel!");
    119 		return (0);
    120 	}
    121 
    122 	/*
    123 	 * 68040 and 68040 use same translation functions,
    124 	 * as do 68030, 68851, HP MMU.
    125 	 */
    126 	if (m->mmutype == MMU_68040 || m->mmutype == MMU_68060)
    127 		vtopf = vatop_040;
    128 	else
    129 		vtopf = vatop_030;
    130 
    131 	return ((*vtopf)(kd, m->sysseg_pa, va, pa));
    132 }
    133 
    134 /*
    135  * Translate a physical address to a file-offset in the crash-dump.
    136  */
    137 off_t
    138 _kvm_cmn_pa2off(kd, pa)
    139 	kvm_t	*kd;
    140 	u_long	pa;
    141 {
    142 	cpu_kcore_hdr_t *h = kd->cpu_data;
    143 	struct m68k_kcore_hdr *m = &h->un._m68k;
    144 	phys_ram_seg_t *rsp;
    145 	off_t off;
    146 	int i;
    147 
    148 	off = 0;
    149 	rsp = m->ram_segs;
    150 	for (i = 0; i < M68K_NPHYS_RAM_SEGS && rsp[i].size != 0; i++) {
    151 		if (pa >= rsp[i].start &&
    152 		    pa < (rsp[i].start + rsp[i].size)) {
    153 			pa -= rsp[i].start;
    154 			break;
    155 		}
    156 		off += rsp[i].size;
    157 	}
    158 	return (kd->dump_off + off + pa);
    159 }
    160 
    161 /*****************************************************************
    162  * Local stuff...
    163  */
    164 
    165 static int
    166 vatop_030(kd, stpa, va, pa)
    167 	kvm_t *kd;
    168 	u_int32_t stpa;
    169 	u_long va;
    170 	u_long *pa;
    171 {
    172 	cpu_kcore_hdr_t *h = kd->cpu_data;
    173 	struct m68k_kcore_hdr *m = &h->un._m68k;
    174 	struct vmstate *vm = kd->vmst;
    175 	u_long addr;
    176 	u_int32_t ste, pte;
    177 	u_int p, offset;
    178 
    179 	offset = va & vm->pgofset;
    180 
    181 	/*
    182 	 * We may be called before address translation is initialized.
    183 	 * This is typically used to find the dump magic number.  This
    184 	 * means we do not yet have the kernel page tables available,
    185 	 * so we must to a simple relocation.
    186 	 */
    187 	if (va < m->relocend) {
    188 		*pa = (va - h->kernbase) + m->reloc;
    189 		return (h->page_size - offset);
    190 	}
    191 
    192 	addr = stpa + ((va >> m->sg_ishift) * sizeof(u_int32_t));
    193 
    194 	/*
    195 	 * Can't use KREAD to read kernel segment table entries.
    196 	 * Fortunately it is 1-to-1 mapped so we don't have to.
    197 	 */
    198 	if (stpa == m->sysseg_pa) {
    199 		if (lseek(kd->pmfd, _kvm_cmn_pa2off(kd, addr), 0) == -1 ||
    200 			read(kd->pmfd, (char *)&ste, sizeof(ste)) < 0)
    201 			goto invalid;
    202 	} else if (KREAD(kd, addr, &ste))
    203 		goto invalid;
    204 	if ((ste & m->sg_v) == 0) {
    205 		_kvm_err(kd, 0, "invalid segment (%x)", ste);
    206 		return(0);
    207 	}
    208 	p = _kvm_btop(vm, va & m->sg_pmask);
    209 	addr = (ste & m->sg_frame) + (p * sizeof(u_int32_t));
    210 
    211 	/*
    212 	 * Address from STE is a physical address so don't use kvm_read.
    213 	 */
    214 	if (lseek(kd->pmfd, _kvm_cmn_pa2off(kd, addr), 0) == -1 ||
    215 	    read(kd->pmfd, (char *)&pte, sizeof(pte)) < 0)
    216 		goto invalid;
    217 	addr = pte & m->pg_frame;
    218 	if ((pte & m->pg_v) == 0) {
    219 		_kvm_err(kd, 0, "page not valid");
    220 		return (0);
    221 	}
    222 	*pa = addr + offset;
    223 
    224 	return (h->page_size - offset);
    225 invalid:
    226 	_kvm_err(kd, 0, "invalid address (%x)", va);
    227 	return (0);
    228 }
    229 
    230 static int
    231 vatop_040(kd, stpa, va, pa)
    232 	kvm_t *kd;
    233 	u_int32_t stpa;
    234 	u_long va;
    235 	u_long *pa;
    236 {
    237 	cpu_kcore_hdr_t *h = kd->cpu_data;
    238 	struct m68k_kcore_hdr *m = &h->un._m68k;
    239 	struct vmstate *vm = kd->vmst;
    240 	u_long addr;
    241 	u_int32_t stpa2;
    242 	u_int32_t ste, pte;
    243 	u_int p, offset;
    244 
    245 	offset = va & vm->pgofset;
    246 
    247 	/*
    248 	 * We may be called before address translation is initialized.
    249 	 * This is typically used to find the dump magic number.  This
    250 	 * means we do not yet have the kernel page tables available,
    251 	 * so we must to a simple relocation.
    252 	 */
    253 	if (va < m->relocend) {
    254 		*pa = (va - h->kernbase) + m->reloc;
    255 		return (h->page_size - offset);
    256 	}
    257 
    258 	addr = stpa + ((va >> m->sg40_shift1) * sizeof(u_int32_t));
    259 
    260 	/*
    261 	 * Can't use KREAD to read kernel segment table entries.
    262 	 * Fortunately it is 1-to-1 mapped so we don't have to.
    263 	 */
    264 	if (stpa == m->sysseg_pa) {
    265 		if (lseek(kd->pmfd, _kvm_cmn_pa2off(kd, addr), 0) == -1 ||
    266 			read(kd->pmfd, (char *)&ste, sizeof(ste)) < 0)
    267 			goto invalid;
    268 	} else if (KREAD(kd, addr, &ste))
    269 		goto invalid;
    270 	if ((ste & m->sg_v) == 0) {
    271 		_kvm_err(kd, 0, "invalid level 1 descriptor (%x)",
    272 				 ste);
    273 		return((off_t)0);
    274 	}
    275 	stpa2 = (ste & m->sg40_addr1);
    276 	addr = stpa2 + (((va & m->sg40_mask2) >> m->sg40_shift2) *
    277 	    sizeof(u_int32_t));
    278 
    279 	/*
    280 	 * Address from level 1 STE is a physical address,
    281 	 * so don't use kvm_read.
    282 	 */
    283 	if (lseek(kd->pmfd, _kvm_cmn_pa2off(kd, addr), 0) == -1 ||
    284 		read(kd->pmfd, (char *)&ste, sizeof(ste)) < 0)
    285 		goto invalid;
    286 	if ((ste & m->sg_v) == 0) {
    287 		_kvm_err(kd, 0, "invalid level 2 descriptor (%x)",
    288 				 ste);
    289 		return((off_t)0);
    290 	}
    291 	stpa2 = (ste & m->sg40_addr2);
    292 	addr = stpa2 + (((va & m->sg40_mask3) >> m->sg40_shift3) *
    293 	    sizeof(u_int32_t));
    294 
    295 	/*
    296 	 * Address from STE is a physical address so don't use kvm_read.
    297 	 */
    298 	if (lseek(kd->pmfd, _kvm_cmn_pa2off(kd, addr), 0) == -1 ||
    299 	    read(kd->pmfd, (char *)&pte, sizeof(pte)) < 0)
    300 		goto invalid;
    301 	addr = pte & m->pg_frame;
    302 	if ((pte & m->pg_v) == 0) {
    303 		_kvm_err(kd, 0, "page not valid");
    304 		return (0);
    305 	}
    306 	*pa = addr + offset;
    307 
    308 	return (h->page_size - offset);
    309 
    310 invalid:
    311 	_kvm_err(kd, 0, "invalid address (%x)", va);
    312 	return (0);
    313 }
    314