Home | History | Annotate | Line # | Download | only in libkvm
kvm_sparc64.c revision 1.13
      1 /*	$NetBSD: kvm_sparc64.c,v 1.13 2008/01/18 16:26:09 martin Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1992, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * This code is derived from software developed by the Computer Systems
      8  * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
      9  * BG 91-66 and contributed to Berkeley.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. Neither the name of the University nor the names of its contributors
     20  *    may be used to endorse or promote products derived from this software
     21  *    without specific prior written permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     33  * SUCH DAMAGE.
     34  */
     35 
     36 #include <sys/cdefs.h>
     37 #if defined(LIBC_SCCS) && !defined(lint)
     38 #if 0
     39 static char sccsid[] = "@(#)kvm_sparc.c	8.1 (Berkeley) 6/4/93";
     40 #else
     41 __RCSID("$NetBSD: kvm_sparc64.c,v 1.13 2008/01/18 16:26:09 martin Exp $");
     42 #endif
     43 #endif /* LIBC_SCCS and not lint */
     44 
     45 /*
     46  * Sparc machine dependent routines for kvm.  Hopefully, the forthcoming
     47  * vm code will one day obsolete this module.
     48  */
     49 
     50 #include <sys/param.h>
     51 #include <sys/exec.h>
     52 #include <sys/user.h>
     53 #include <sys/proc.h>
     54 #include <sys/stat.h>
     55 #include <sys/core.h>
     56 #include <sys/kcore.h>
     57 #include <unistd.h>
     58 #include <nlist.h>
     59 #include <kvm.h>
     60 
     61 #include <uvm/uvm_extern.h>
     62 
     63 #include <machine/pmap.h>
     64 #include <machine/kcore.h>
     65 #include <machine/vmparam.h>
     66 
     67 #include <limits.h>
     68 #include <db.h>
     69 
     70 #include "kvm_private.h"
     71 
     72 int _kvm_kvatop __P((kvm_t *, u_long, u_long *));
     73 
     74 void
     75 _kvm_freevtop(kd)
     76 	kvm_t *kd;
     77 {
     78 	if (kd->vmst != 0) {
     79 		_kvm_err(kd, kd->program, "_kvm_freevtop: internal error");
     80 		kd->vmst = 0;
     81 	}
     82 }
     83 
     84 /*
     85  * Prepare for translation of kernel virtual addresses into offsets
     86  * into crash dump files. We use the MMU specific goop written at the
     87  * front of the crash dump by pmap_dumpmmu().
     88  *
     89  * We should read in and cache the ksegs here to speed up operations...
     90  */
     91 int
     92 _kvm_initvtop(kd)
     93 	kvm_t *kd;
     94 {
     95 	kd->nbpg = 0x2000;
     96 
     97 	return (0);
     98 }
     99 
    100 /*
    101  * Translate a kernel virtual address to a physical address using the
    102  * mapping information in kd->vm.  Returns the result in pa, and returns
    103  * the number of bytes that are contiguously available from this
    104  * physical address.  This routine is used only for crash dumps.
    105  */
    106 int
    107 _kvm_kvatop(kd, va, pa)
    108 	kvm_t *kd;
    109 	u_long va;
    110 	u_long *pa;
    111 {
    112 	cpu_kcore_hdr_t *cpup = kd->cpu_data;
    113 	u_long kernbase = cpup->kernbase;
    114 	uint64_t *pseg, *pdir, *ptbl;
    115 	struct cpu_kcore_4mbseg *ktlb;
    116 	int64_t data;
    117 	int i;
    118 
    119 	if (va < kernbase)
    120 		goto lose;
    121 
    122 	/* Handle the wired 4MB TTEs and per-CPU mappings */
    123 	if (cpup->memsegoffset > sizeof(cpu_kcore_hdr_t) &&
    124 	    cpup->newmagic == SPARC64_KCORE_NEWMAGIC) {
    125 		/*
    126 		 * new format: we have a list of 4 MB mappings
    127 		 */
    128 		ktlb = (struct cpu_kcore_4mbseg *)
    129 			((uintptr_t)kd->cpu_data + cpup->off4mbsegs);
    130 		for (i = 0; i < cpup->num4mbsegs; i++) {
    131 			uint64_t start = ktlb[i].va;
    132 			if (va < start || va >= start+PAGE_SIZE_4M)
    133 				continue;
    134 			*pa = ktlb[i].pa + va - start;
    135 			return (int)(start+PAGE_SIZE_4M - va);
    136 		}
    137 
    138 		if (cpup->numcpuinfos > 0) {
    139 			/* we have per-CPU mapping info */
    140 			uint64_t start, base;
    141 
    142 			base = cpup->cpubase - 32*1024;
    143 			if (va >= base && va < (base + cpup->percpusz)) {
    144 				start = va - base;
    145 				*pa = cpup->cpusp
    146 				    + cpup->thiscpu*cpup->percpusz
    147 				    + start;
    148 				return cpup->percpusz - start;
    149 			}
    150 		}
    151 	} else {
    152 		/*
    153 		 * old format: just a textbase/size and database/size
    154 		 */
    155 		if (va > cpup->ktextbase && va <
    156 		    (cpup->ktextbase + cpup->ktextsz)) {
    157 			u_long vaddr;
    158 
    159 			vaddr = va - cpup->ktextbase;
    160 			*pa = cpup->ktextp + vaddr;
    161 			return (int)(cpup->ktextsz - vaddr);
    162 		}
    163 		if (va > cpup->kdatabase && va <
    164 		    (cpup->kdatabase + cpup->kdatasz)) {
    165 			u_long vaddr;
    166 
    167 			vaddr = va - cpup->kdatabase;
    168 			*pa = cpup->kdatap + vaddr;
    169 			return (int)(cpup->kdatasz - vaddr);
    170 		}
    171 	}
    172 
    173 	/*
    174 	 * Parse kernel page table.
    175 	 */
    176 	pseg = (uint64_t *)(u_long)cpup->segmapoffset;
    177 	if (_kvm_pread(kd, kd->pmfd, &pdir, sizeof(pdir),
    178 		_kvm_pa2off(kd, (u_long)&pseg[va_to_seg(va)]))
    179 		!= sizeof(pdir)) {
    180 		_kvm_syserr(kd, 0, "could not read L1 PTE");
    181 		goto lose;
    182 	}
    183 
    184 	if (!pdir) {
    185 		_kvm_err(kd, 0, "invalid L1 PTE");
    186 		goto lose;
    187 	}
    188 
    189 	if (_kvm_pread(kd, kd->pmfd, &ptbl, sizeof(ptbl),
    190 		_kvm_pa2off(kd, (u_long)&pdir[va_to_dir(va)]))
    191 		!= sizeof(ptbl)) {
    192 		_kvm_syserr(kd, 0, "could not read L2 PTE");
    193 		goto lose;
    194 	}
    195 
    196 	if (!ptbl) {
    197 		_kvm_err(kd, 0, "invalid L2 PTE");
    198 		goto lose;
    199 	}
    200 
    201 	if (_kvm_pread(kd, kd->pmfd, &data, sizeof(data),
    202 		_kvm_pa2off(kd, (u_long)&ptbl[va_to_pte(va)]))
    203 		!= sizeof(data)) {
    204 		_kvm_syserr(kd, 0, "could not read TTE");
    205 		goto lose;
    206 	}
    207 
    208 	if (data >= 0) {
    209 		_kvm_err(kd, 0, "invalid L2 TTE");
    210 		goto lose;
    211 	}
    212 
    213 	/*
    214 	 * Calculate page offsets and things.
    215 	 *
    216 	 * XXXX -- We could support multiple page sizes.
    217 	 */
    218 	va = va & (kd->nbpg - 1);
    219 	data &= TLB_PA_MASK;
    220 	*pa = data + va;
    221 
    222 	/*
    223 	 * Parse and trnslate our TTE.
    224 	 */
    225 
    226 	return (int)(kd->nbpg - va);
    227 
    228 lose:
    229 	*pa = (u_long)-1;
    230 	_kvm_err(kd, 0, "invalid address (%lx)", va);
    231 	return (0);
    232 }
    233 
    234 
    235 /*
    236  * Translate a physical address to a file-offset in the crash dump.
    237  */
    238 off_t
    239 _kvm_pa2off(kd, pa)
    240 	kvm_t   *kd;
    241 	u_long  pa;
    242 {
    243 	cpu_kcore_hdr_t *cpup = kd->cpu_data;
    244 	phys_ram_seg_t *mp;
    245 	off_t off;
    246 	int nmem;
    247 
    248 	/*
    249 	 * Layout of CPU segment:
    250 	 *	cpu_kcore_hdr_t;
    251 	 *	[alignment]
    252 	 *	phys_ram_seg_t[cpup->nmemseg];
    253 	 */
    254 	mp = (phys_ram_seg_t *)((long)kd->cpu_data + cpup->memsegoffset);
    255 	off = 0;
    256 
    257 	/* Translate (sparse) pfnum to (packed) dump offset */
    258 	for (nmem = cpup->nmemseg; --nmem >= 0; mp++) {
    259 		if (mp->start <= pa && pa < mp->start + mp->size)
    260 			break;
    261 		off += mp->size;
    262 	}
    263 	if (nmem < 0) {
    264 		_kvm_err(kd, 0, "invalid address (%lx)", pa);
    265 		return (-1);
    266 	}
    267 
    268 	return (kd->dump_off + off + pa - mp->start);
    269 }
    270 
    271 /*
    272  * Machine-dependent initialization for ALL open kvm descriptors,
    273  * not just those for a kernel crash dump.  Some architectures
    274  * have to deal with these NOT being constants!  (i.e. m68k)
    275  */
    276 int
    277 _kvm_mdopen(kd)
    278 	kvm_t	*kd;
    279 {
    280 	u_long max_uva;
    281 	extern struct ps_strings *__ps_strings;
    282 
    283 	max_uva = (u_long) (__ps_strings + 1);
    284 	kd->usrstack = max_uva;
    285 	kd->max_uva  = max_uva;
    286 	kd->min_uva  = 0;
    287 
    288 	return (0);
    289 }
    290