Home | History | Annotate | Line # | Download | only in libkvm
kvm_sparc.c revision 1.3
      1 /*-
      2  * Copyright (c) 1992, 1993
      3  *	The Regents of the University of California.  All rights reserved.
      4  *
      5  * This code is derived from software developed by the Computer Systems
      6  * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
      7  * BG 91-66 and contributed to Berkeley.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed by the University of
     20  *	California, Berkeley and its contributors.
     21  * 4. Neither the name of the University nor the names of its contributors
     22  *    may be used to endorse or promote products derived from this software
     23  *    without specific prior written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     35  * SUCH DAMAGE.
     36  */
     37 
     38 #if defined(LIBC_SCCS) && !defined(lint)
     39 static char sccsid[] = "@(#)kvm_sparc.c	8.1 (Berkeley) 6/4/93";
     40 #endif /* LIBC_SCCS and not lint */
     41 
     42 /*
     43  * Sparc machine dependent routines for kvm.  Hopefully, the forthcoming
     44  * vm code will one day obsolete this module.
     45  */
     46 
     47 #include <sys/param.h>
     48 #include <sys/user.h>
     49 #include <sys/proc.h>
     50 #include <sys/stat.h>
     51 #include <unistd.h>
     52 #include <nlist.h>
     53 #include <kvm.h>
     54 
     55 #include <vm/vm.h>
     56 #include <vm/vm_param.h>
     57 
     58 #include <limits.h>
     59 #include <db.h>
     60 
     61 #include "kvm_private.h"
     62 
     63 #define NPMEG 128
     64 
     65 /* XXX from sparc/pmap.c */
     66 #define MAXMEM  (128 * 1024 * 1024)     /* no more than 128 MB phys mem */
     67 #define NPGBANK 16                      /* 2^4 pages per bank (64K / bank) */
     68 #define BSHIFT  4                       /* log2(NPGBANK) */
     69 #define BOFFSET (NPGBANK - 1)
     70 #define BTSIZE  (MAXMEM / 4096 / NPGBANK)
     71 #define HWTOSW(pmap_stod, pg) (pmap_stod[(pg) >> BSHIFT] | ((pg) & BOFFSET))
     72 
     73 struct vmstate {
     74 	pmeg_t segmap[NKSEG];
     75 	int *pmeg;
     76 	int pmap_stod[BTSIZE];              /* dense to sparse */
     77 };
     78 
     79 static int cputyp;
     80 
     81 static int pgshift, nptesg;
     82 
     83 static void
     84 getpgshift(kd)
     85 	kvm_t *kd;
     86 {
     87 	for (pgshift = 12; (1 << pgshift) != kd->nbpg; pgshift++)
     88 		;
     89 	nptesg = NBPSG / kd->nbpg;
     90 }
     91 
     92 void
     93 _kvm_freevtop(kd)
     94 	kvm_t *kd;
     95 {
     96 	if (kd->vmst != 0) {
     97 		if (kd->vmst->pmeg != 0)
     98 			free(kd->vmst->pmeg);
     99 		free(kd->vmst);
    100 	}
    101 }
    102 
    103 int
    104 _kvm_initvtop(kd)
    105 	kvm_t *kd;
    106 {
    107 	register int i;
    108 	register int off;
    109 	register struct vmstate *vm;
    110 	struct stat st;
    111 	struct nlist nlist[3];
    112 
    113 	if (pgshift == 0)
    114 		getpgshift(kd);
    115 
    116 	vm = (struct vmstate *)_kvm_malloc(kd, sizeof(*vm));
    117 	if (vm == 0)
    118 		return (-1);
    119 	vm->pmeg = (int *)_kvm_malloc(kd, NPMEG * nptesg * sizeof(int));
    120 	if (vm->pmeg == 0)
    121 		return (-1);
    122 
    123 	kd->vmst = vm;
    124 
    125 	if (fstat(kd->pmfd, &st) < 0)
    126 		return (-1);
    127 	/*
    128 	 * Read segment table.
    129 	 */
    130 	off = st.st_size - roundup(sizeof(vm->segmap), kd->nbpg);
    131 	errno = 0;
    132 	if (lseek(kd->pmfd, (off_t)off, 0) == -1 && errno != 0 ||
    133 	    read(kd->pmfd, (char *)vm->segmap, sizeof(vm->segmap)) < 0) {
    134 		_kvm_err(kd, kd->program, "cannot read segment map");
    135 		return (-1);
    136 	}
    137 	/*
    138 	 * Read PMEGs.
    139 	 */
    140 	off = st.st_size - roundup(NPMEG * nptesg * sizeof(int), kd->nbpg) +
    141 	    ((sizeof(vm->segmap) + kd->nbpg - 1) >> pgshift);
    142 	errno = 0;
    143 	if (lseek(kd->pmfd, (off_t)off, 0) == -1 && errno != 0 ||
    144 	    read(kd->pmfd, (char *)vm->pmeg, NPMEG * nptesg * sizeof(int)) < 0) {
    145 		_kvm_err(kd, kd->program, "cannot read PMEG table");
    146 		return (-1);
    147 	}
    148 	/*
    149 	 * Make pmap_stod be an identity map so we can bootstrap it in.
    150 	 * We assume it's in the first contiguous chunk of physical memory.
    151 	 */
    152 	for (i = 0; i < BTSIZE; ++i)
    153 		vm->pmap_stod[i] = i << 4;
    154 
    155 	/*
    156 	 * It's okay to do this nlist separately from the one kvm_getprocs()
    157 	 * does, since the only time we could gain anything by combining
    158 	 * them is if we do a kvm_getprocs() on a dead kernel, which is
    159 	 * not too common.
    160 	 */
    161 	nlist[0].n_name = "_cputyp";
    162 	nlist[1].n_name = "_pmap_stod";
    163 	nlist[2].n_name = 0;
    164 	(void)kvm_nlist(kd, nlist);
    165 	if (nlist[0].n_value == 0) {
    166 		_kvm_err(kd, kd->program, "cputyp: no such symbol");
    167 		return (-1);
    168 	}
    169 	if (kvm_read(kd, (u_long)nlist[0].n_value,
    170 	    (char *)&cputyp, sizeof(cputyp)) != sizeof(cputyp)) {
    171 		_kvm_err(kd, kd->program, "cannot read cputyp");
    172 		return (-1);
    173 	}
    174 
    175 	/*
    176 	 * a kernel compiled only for the sun4 will not contain the symbol
    177 	 * map_stod. Instead, we are happy to use the identity map
    178 	 * initialized earlier.
    179 	 * If we are not a sun4, the lack of this symbol is fatal.
    180 	 */
    181 	if (nlist[1].n_value != 0) {
    182 		if (kvm_read(kd, (u_long)nlist[1].n_value,
    183 		    (char *)vm->pmap_stod, sizeof(vm->pmap_stod))
    184 		    != sizeof(vm->pmap_stod)) {
    185 			_kvm_err(kd, kd->program, "cannot read pmap_stod");
    186 			return (-1);
    187 		}
    188 	} else {
    189 		if (cputyp != CPU_SUN4) {
    190 			_kvm_err(kd, kd->program, "pmap_stod: no such symbol");
    191 			return (-1);
    192 		}
    193 	}
    194 
    195 	return (0);
    196 }
    197 
    198 #define VA_OFF(va) (va & (kd->nbpg - 1))
    199 
    200 /*
    201  * Translate a user virtual address to a physical address.
    202  */
    203 int
    204 _kvm_uvatop(kd, p, va, pa)
    205 	kvm_t *kd;
    206 	const struct proc *p;
    207 	u_long va;
    208 	u_long *pa;
    209 {
    210 	int kva, pte;
    211 	register int off, frame;
    212 	register struct vmspace *vms = p->p_vmspace;
    213 	struct usegmap *usp;
    214 
    215 	if (pgshift == 0)
    216 		getpgshift(kd);
    217 
    218 	if ((u_long)vms < KERNBASE) {
    219 		_kvm_err(kd, kd->program, "_kvm_uvatop: corrupt proc");
    220 		return (0);
    221 	}
    222 	if (va >= KERNBASE)
    223 		return (0);
    224 	/*
    225 	 * Get the PTE.  This takes two steps.  We read the
    226 	 * base address of the table, then we index it.
    227 	 * Note that the index pte table is indexed by
    228 	 * virtual segment rather than physical segment.
    229 	 */
    230 	kva = (u_long)&vms->vm_pmap.pm_segstore;
    231 	if (kvm_read(kd, kva, (char *)&usp, 4) != 4)
    232 		goto invalid;
    233 	kva = (u_long)&usp->us_pte[VA_VSEG(va)];
    234 	if (kvm_read(kd, kva, (char *)&kva, 4) != 4 || kva == 0)
    235 		goto invalid;
    236 	kva += sizeof(usp->us_pte[0]) * VA_VPG(va);
    237 	if (kvm_read(kd, kva, (char *)&pte, 4) == 4 && (pte & PG_V)) {
    238 		off = VA_OFF(va);
    239 		/*
    240 		 * /dev/mem adheres to the hardware model of physical memory
    241 		 * (with holes in the address space), while crashdumps
    242 		 * adhere to the contiguous software model.
    243 		 */
    244 		if (ISALIVE(kd))
    245 			frame = pte & PG_PFNUM;
    246 		else
    247 			frame = HWTOSW(kd->vmst->pmap_stod, pte & PG_PFNUM);
    248 		*pa = (frame << pgshift) | off;
    249 		return (kd->nbpg - off);
    250 	}
    251 invalid:
    252 	_kvm_err(kd, 0, "invalid address (%x)", va);
    253 	return (0);
    254 }
    255 
    256 /*
    257  * Translate a kernel virtual address to a physical address using the
    258  * mapping information in kd->vm.  Returns the result in pa, and returns
    259  * the number of bytes that are contiguously available from this
    260  * physical address.  This routine is used only for crashdumps.
    261  */
    262 int
    263 _kvm_kvatop(kd, va, pa)
    264 	kvm_t *kd;
    265 	u_long va;
    266 	u_long *pa;
    267 {
    268 	register struct vmstate *vm;
    269 	register int s;
    270 	register int pte;
    271 	register int off;
    272 
    273 	if (pgshift == 0)
    274 		getpgshift(kd);
    275 
    276 	if (va >= KERNBASE) {
    277 		vm = kd->vmst;
    278 		s = vm->segmap[VA_VSEG(va) - NUSEG];
    279 		pte = vm->pmeg[VA_VPG(va) + nptesg * s];
    280 		if ((pte & PG_V) != 0) {
    281 			off = VA_OFF(va);
    282 			*pa = (HWTOSW(vm->pmap_stod, pte & PG_PFNUM)
    283 			       << pgshift) | off;
    284 
    285 			return (kd->nbpg - off);
    286 		}
    287 	}
    288 	_kvm_err(kd, 0, "invalid address (%x)", va);
    289 	return (0);
    290 }
    291