Home | History | Annotate | Line # | Download | only in libkvm
kvm_m68k_cmn.c revision 1.1
      1 /*	$NetBSD: kvm_m68k_cmn.c,v 1.1 1997/03/21 18:44:24 gwr Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1989, 1992, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * This code is derived from software developed by the Computer Systems
      8  * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
      9  * BG 91-66 and contributed to Berkeley.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. All advertising materials mentioning features or use of this software
     20  *    must display the following acknowledgement:
     21  *	This product includes software developed by the University of
     22  *	California, Berkeley and its contributors.
     23  * 4. Neither the name of the University nor the names of its contributors
     24  *    may be used to endorse or promote products derived from this software
     25  *    without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     28  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     29  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     30  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     31  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     32  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     33  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     37  * SUCH DAMAGE.
     38  */
     39 
     40 #if defined(LIBC_SCCS) && !defined(lint)
     41 #if 0
     42 static char sccsid[] = "@(#)kvm_hp300.c	8.1 (Berkeley) 6/4/93";
     43 #else
     44 static char *rcsid = "$NetBSD: kvm_m68k_cmn.c,v 1.1 1997/03/21 18:44:24 gwr Exp $";
     45 #endif
     46 #endif /* LIBC_SCCS and not lint */
     47 
     48 /*
     49  * Common m68k machine dependent routines for kvm.
     50  *
     51  * Note: This file has to build on ALL m68k machines,
     52  * so do NOT include any <machine/*.h> files here.
     53  */
     54 
     55 #include <sys/types.h>
     56 #include <sys/kcore.h>
     57 
     58 #include <unistd.h>
     59 #include <limits.h>
     60 #include <nlist.h>
     61 #include <kvm.h>
     62 #include <db.h>
     63 
     64 /* XXX: Avoid <machine/pte.h> etc. (see below) */
     65 typedef u_int pt_entry_t;		/* page table entry */
     66 typedef u_int st_entry_t;		/* segment table entry */
     67 
     68 #include <m68k/cpu.h>
     69 #include <m68k/kcore.h>
     70 
     71 #include "kvm_private.h"
     72 #include "kvm_m68k.h"
     73 
     74 int   _kvm_cmn_initvtop __P((kvm_t *));
     75 void  _kvm_cmn_freevtop __P((kvm_t *));
     76 int	  _kvm_cmn_kvatop   __P((kvm_t *, u_long, u_long *));
     77 off_t _kvm_cmn_pa2off   __P((kvm_t *, u_long));
     78 
     79 struct kvm_ops _kvm_ops_cmn = {
     80 	_kvm_cmn_initvtop,
     81 	_kvm_cmn_freevtop,
     82 	_kvm_cmn_kvatop,
     83 	_kvm_cmn_pa2off };
     84 
     85 static int vatop_030 __P((kvm_t *, st_entry_t *, ulong, ulong *));
     86 static int vatop_040 __P((kvm_t *, st_entry_t *, ulong, ulong *));
     87 
     88 /*
     89  * XXX: I don't like this, but until all arch/.../include files
     90  * are exported into some user-accessable place, there is no
     91  * convenient alternative to copying these definitions here.
     92  */
     93 
     94 /* Things from param.h */
     95 #define PGSHIFT	13
     96 #define NBPG	(1<<13)
     97 #define PGOFSET (NBPG-1)
     98 #define	btop(x)		(((unsigned)(x)) >> PGSHIFT)
     99 
    100 /* Things from pte.h */
    101 
    102 /* All variants */
    103 #define SG_V		 2
    104 #define	PG_NV		0x00000000
    105 #define PG_FRAME	0xffffe000
    106 
    107 /* MC68030 with MMU TCR set for 8/11/13 (bits) */
    108 #define SG3_SHIFT	24	/* a.k.a SEGSHIFT */
    109 #define SG3_FRAME	0xffffe000
    110 #define SG3_PMASK	0x00ffe000
    111 
    112 /* MC68040 with MMU set for 8K page size. */
    113 #define SG4_MASK1	0xfe000000
    114 #define SG4_SHIFT1	25
    115 #define SG4_MASK2	0x01fc0000
    116 #define SG4_SHIFT2	18
    117 #define SG4_MASK3	0x0003e000
    118 #define SG4_SHIFT3	13
    119 #define SG4_ADDR1	0xfffffe00
    120 #define SG4_ADDR2	0xffffff80
    121 
    122 
    123 
    124 #define KREAD(kd, addr, p)\
    125 	(kvm_read(kd, addr, (char *)(p), sizeof(*(p))) != sizeof(*(p)))
    126 
    127 void
    128 _kvm_cmn_freevtop(kd)
    129 	kvm_t *kd;
    130 {
    131 	if (kd->vmst != 0)
    132 		free(kd->vmst);
    133 }
    134 
    135 int
    136 _kvm_cmn_initvtop(kd)
    137 	kvm_t *kd;
    138 {
    139 
    140 	return (0);
    141 }
    142 
    143 int
    144 _kvm_cmn_kvatop(kd, va, pa)
    145 	kvm_t *kd;
    146 	u_long va;
    147 	u_long *pa;
    148 {
    149 	register cpu_kcore_hdr_t *cpu_kh;
    150 	int (*vtopf) __P((kvm_t *, st_entry_t *, ulong, ulong *));
    151 
    152 	if (ISALIVE(kd)) {
    153 		_kvm_err(kd, 0, "vatop called in live kernel!");
    154 		return (0);
    155 	}
    156 
    157 	cpu_kh = kd->cpu_data;
    158 	switch (cpu_kh->mmutype) {
    159 
    160 	case MMU_68030:
    161 		vtopf = vatop_030;
    162 		break;
    163 
    164 	case MMU_68040:
    165 		vtopf = vatop_040;
    166 		break;
    167 
    168 	default:
    169 		_kvm_err(kd, 0, "vatop unknown MMU type!");
    170 		return (0);
    171 	}
    172 
    173 	return ((*vtopf)(kd, cpu_kh->sysseg_pa, va, pa));
    174 }
    175 
    176 /*
    177  * Translate a physical address to a file-offset in the crash-dump.
    178  */
    179 off_t
    180 _kvm_cmn_pa2off(kd, pa)
    181 	kvm_t	*kd;
    182 	u_long	pa;
    183 {
    184 	off_t		off;
    185 	phys_ram_seg_t	*rsp;
    186 	register cpu_kcore_hdr_t *cpu_kh;
    187 
    188 	cpu_kh = kd->cpu_data;
    189 	off = 0;
    190 	for (rsp = cpu_kh->ram_segs; rsp->size; rsp++) {
    191 		if (pa >= rsp->start && pa < rsp->start + rsp->size) {
    192 			pa -= rsp->start;
    193 			break;
    194 		}
    195 		off += rsp->size;
    196 	}
    197 	return(kd->dump_off + off + pa);
    198 }
    199 
    200 /*****************************************************************
    201  * Local stuff...
    202  */
    203 
    204 static int
    205 vatop_030(kd, sta, va, pa)
    206 	kvm_t *kd;
    207 	st_entry_t *sta;
    208 	u_long va;
    209 	u_long *pa;
    210 {
    211 	register cpu_kcore_hdr_t *cpu_kh;
    212 	register u_long addr;
    213 	int p, ste, pte;
    214 	int offset;
    215 
    216 	offset = va & PGOFSET;
    217 	cpu_kh = kd->cpu_data;
    218 
    219 	/*
    220 	 * If we are initializing (kernel segment table pointer not yet set)
    221 	 * then return pa == va to avoid infinite recursion.
    222 	 */
    223 	if (cpu_kh->sysseg_pa == 0) {
    224 		*pa = va + cpu_kh->kernel_pa;
    225 		return (NBPG - offset);
    226 	}
    227 
    228 	addr = (u_long)&sta[va >> SG3_SHIFT];
    229 	/*
    230 	 * Can't use KREAD to read kernel segment table entries.
    231 	 * Fortunately it is 1-to-1 mapped so we don't have to.
    232 	 */
    233 	if (sta == cpu_kh->sysseg_pa) {
    234 		if (lseek(kd->pmfd, _kvm_cmn_pa2off(kd, addr), 0) == -1 ||
    235 			read(kd->pmfd, (char *)&ste, sizeof(ste)) < 0)
    236 			goto invalid;
    237 	} else if (KREAD(kd, addr, &ste))
    238 		goto invalid;
    239 	if ((ste & SG_V) == 0) {
    240 		_kvm_err(kd, 0, "invalid segment (%x)", ste);
    241 		return((off_t)0);
    242 	}
    243 	p = btop(va & SG3_PMASK);
    244 	addr = (ste & SG3_FRAME) + (p * sizeof(pt_entry_t));
    245 
    246 	/*
    247 	 * Address from STE is a physical address so don't use kvm_read.
    248 	 */
    249 	if (lseek(kd->pmfd, _kvm_cmn_pa2off(kd, addr), 0) == -1 ||
    250 	    read(kd->pmfd, (char *)&pte, sizeof(pte)) < 0)
    251 		goto invalid;
    252 	addr = pte & PG_FRAME;
    253 	if (pte == PG_NV) {
    254 		_kvm_err(kd, 0, "page not valid");
    255 		return (0);
    256 	}
    257 	*pa = addr + offset;
    258 
    259 	return (NBPG - offset);
    260 invalid:
    261 	_kvm_err(kd, 0, "invalid address (%x)", va);
    262 	return (0);
    263 }
    264 
    265 static int
    266 vatop_040(kd, sta, va, pa)
    267 	kvm_t *kd;
    268 	st_entry_t *sta;
    269 	u_long va;
    270 	u_long *pa;
    271 {
    272 	register cpu_kcore_hdr_t *cpu_kh;
    273 	register u_long addr;
    274 	st_entry_t *sta2;
    275 	int p, ste, pte;
    276 	int offset;
    277 
    278 	offset = va & PGOFSET;
    279 	cpu_kh = kd->cpu_data;
    280 	/*
    281 	 * If we are initializing (kernel segment table pointer not yet set)
    282 	 * then return pa == va to avoid infinite recursion.
    283 	 */
    284 	if (cpu_kh->sysseg_pa == 0) {
    285 		*pa = va + cpu_kh->kernel_pa;
    286 		return (NBPG - offset);
    287 	}
    288 
    289 	addr = (u_long)&sta[va >> SG4_SHIFT1];
    290 	/*
    291 	 * Can't use KREAD to read kernel segment table entries.
    292 	 * Fortunately it is 1-to-1 mapped so we don't have to.
    293 	 */
    294 	if (sta == cpu_kh->sysseg_pa) {
    295 		if (lseek(kd->pmfd, _kvm_cmn_pa2off(kd, addr), 0) == -1 ||
    296 			read(kd->pmfd, (char *)&ste, sizeof(ste)) < 0)
    297 			goto invalid;
    298 	} else if (KREAD(kd, addr, &ste))
    299 		goto invalid;
    300 	if ((ste & SG_V) == 0) {
    301 		_kvm_err(kd, 0, "invalid level 1 descriptor (%x)",
    302 				 ste);
    303 		return((off_t)0);
    304 	}
    305 	sta2 = (st_entry_t *)(ste & SG4_ADDR1);
    306 	addr = (u_long)&sta2[(va & SG4_MASK2) >> SG4_SHIFT2];
    307 	/*
    308 	 * Address from level 1 STE is a physical address,
    309 	 * so don't use kvm_read.
    310 	 */
    311 	if (lseek(kd->pmfd, _kvm_cmn_pa2off(kd, addr), 0) == -1 ||
    312 		read(kd->pmfd, (char *)&ste, sizeof(ste)) < 0)
    313 		goto invalid;
    314 	if ((ste & SG_V) == 0) {
    315 		_kvm_err(kd, 0, "invalid level 2 descriptor (%x)",
    316 				 ste);
    317 		return((off_t)0);
    318 	}
    319 	sta2 = (st_entry_t *)(ste & SG4_ADDR2);
    320 	addr = (u_long)&sta2[(va & SG4_MASK3) >> SG4_SHIFT3];
    321 
    322 
    323 	/*
    324 	 * Address from STE is a physical address so don't use kvm_read.
    325 	 */
    326 	if (lseek(kd->pmfd, _kvm_cmn_pa2off(kd, addr), 0) == -1 ||
    327 	    read(kd->pmfd, (char *)&pte, sizeof(pte)) < 0)
    328 		goto invalid;
    329 	addr = pte & PG_FRAME;
    330 	if (pte == PG_NV) {
    331 		_kvm_err(kd, 0, "page not valid");
    332 		return (0);
    333 	}
    334 	*pa = addr + offset;
    335 
    336 	return (NBPG - offset);
    337 invalid:
    338 	_kvm_err(kd, 0, "invalid address (%x)", va);
    339 	return (0);
    340 }
    341