kvm_arm.c revision 1.6 1 1.6 jym /* $NetBSD: kvm_arm.c,v 1.6 2010/09/20 23:23:16 jym Exp $ */
2 1.1 bjh21
3 1.1 bjh21 /*-
4 1.1 bjh21 * Copyright (C) 1996 Wolfgang Solfrank.
5 1.1 bjh21 * Copyright (C) 1996 TooLs GmbH.
6 1.1 bjh21 * All rights reserved.
7 1.1 bjh21 *
8 1.1 bjh21 * Redistribution and use in source and binary forms, with or without
9 1.1 bjh21 * modification, are permitted provided that the following conditions
10 1.1 bjh21 * are met:
11 1.1 bjh21 * 1. Redistributions of source code must retain the above copyright
12 1.1 bjh21 * notice, this list of conditions and the following disclaimer.
13 1.1 bjh21 * 2. Redistributions in binary form must reproduce the above copyright
14 1.1 bjh21 * notice, this list of conditions and the following disclaimer in the
15 1.1 bjh21 * documentation and/or other materials provided with the distribution.
16 1.1 bjh21 * 3. All advertising materials mentioning features or use of this software
17 1.1 bjh21 * must display the following acknowledgement:
18 1.1 bjh21 * This product includes software developed by TooLs GmbH.
19 1.1 bjh21 * 4. The name of TooLs GmbH may not be used to endorse or promote products
20 1.1 bjh21 * derived from this software without specific prior written permission.
21 1.1 bjh21 *
22 1.1 bjh21 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
23 1.1 bjh21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 1.1 bjh21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 1.1 bjh21 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 1.1 bjh21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27 1.1 bjh21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
28 1.1 bjh21 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
29 1.1 bjh21 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
30 1.1 bjh21 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
31 1.1 bjh21 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 1.1 bjh21 *
33 1.1 bjh21 * from: kvm_powerpc.c,v 1.3 1997/09/19 04:00:23 thorpej Exp
34 1.1 bjh21 */
35 1.1 bjh21
36 1.1 bjh21 /*
37 1.1 bjh21 * arm32 machine dependent routines for kvm.
38 1.1 bjh21 */
39 1.1 bjh21
40 1.3 chris #include <sys/cdefs.h>
41 1.3 chris #if defined(LIBC_SCCS) && !defined(lint)
42 1.6 jym __RCSID("$NetBSD: kvm_arm.c,v 1.6 2010/09/20 23:23:16 jym Exp $");
43 1.3 chris #endif /* LIBC_SCCS and not lint */
44 1.3 chris
45 1.1 bjh21 #include <sys/param.h>
46 1.2 matt #include <sys/exec.h>
47 1.3 chris #include <sys/kcore.h>
48 1.6 jym #include <sys/types.h>
49 1.6 jym
50 1.3 chris #include <arm/kcore.h>
51 1.3 chris #include <arm/arm32/pte.h>
52 1.1 bjh21
53 1.1 bjh21 #include <stdlib.h>
54 1.1 bjh21 #include <db.h>
55 1.1 bjh21 #include <limits.h>
56 1.1 bjh21 #include <kvm.h>
57 1.1 bjh21
58 1.3 chris #include <unistd.h>
59 1.3 chris
60 1.1 bjh21 #include "kvm_private.h"
61 1.1 bjh21
62 1.1 bjh21 void
63 1.3 chris _kvm_freevtop(kvm_t * kd)
64 1.1 bjh21 {
65 1.1 bjh21 if (kd->vmst != 0)
66 1.1 bjh21 free(kd->vmst);
67 1.1 bjh21 }
68 1.1 bjh21
69 1.1 bjh21 int
70 1.3 chris _kvm_initvtop(kvm_t * kd)
71 1.1 bjh21 {
72 1.1 bjh21 return 0;
73 1.1 bjh21 }
74 1.1 bjh21
75 1.1 bjh21 int
76 1.6 jym _kvm_kvatop(kvm_t * kd, vaddr_t va, paddr_t *pa)
77 1.1 bjh21 {
78 1.3 chris cpu_kcore_hdr_t *cpu_kh;
79 1.3 chris pd_entry_t pde;
80 1.3 chris pt_entry_t pte;
81 1.6 jym paddr_t pde_pa, pte_pa;
82 1.3 chris
83 1.3 chris if (ISALIVE(kd)) {
84 1.3 chris _kvm_err(kd, 0, "vatop called in live kernel!");
85 1.3 chris return (0);
86 1.3 chris }
87 1.3 chris cpu_kh = kd->cpu_data;
88 1.3 chris
89 1.3 chris if (cpu_kh->version != 1) {
90 1.3 chris _kvm_err(kd, 0, "unsupported kcore structure version");
91 1.3 chris return 0;
92 1.3 chris }
93 1.3 chris if (cpu_kh->flags != 0) {
94 1.3 chris _kvm_err(kd, 0, "kcore flags not supported");
95 1.3 chris return 0;
96 1.3 chris }
97 1.3 chris /*
98 1.3 chris * work out which L1 table we need
99 1.3 chris */
100 1.3 chris if (va >= (cpu_kh->UserL1TableSize << 17))
101 1.3 chris pde_pa = cpu_kh->PAKernelL1Table;
102 1.3 chris else
103 1.3 chris pde_pa = cpu_kh->PAUserL1Table;
104 1.3 chris
105 1.3 chris /*
106 1.3 chris * work out the offset into the L1 Table
107 1.3 chris */
108 1.3 chris pde_pa += ((va >> 20) * sizeof(pd_entry_t));
109 1.3 chris
110 1.4 ad if (_kvm_pread(kd, kd->pmfd, (void *) &pde, sizeof(pd_entry_t),
111 1.3 chris _kvm_pa2off(kd, pde_pa)) != sizeof(pd_entry_t)) {
112 1.3 chris _kvm_syserr(kd, 0, "could not read L1 entry");
113 1.3 chris return (0);
114 1.3 chris }
115 1.3 chris /*
116 1.3 chris * next work out what kind of record it is
117 1.3 chris */
118 1.3 chris switch (pde & L1_TYPE_MASK) {
119 1.3 chris case L1_TYPE_S:
120 1.3 chris *pa = (pde & L1_S_FRAME) | (va & L1_S_OFFSET);
121 1.3 chris return L1_S_SIZE - (va & L1_S_OFFSET);
122 1.3 chris case L1_TYPE_C:
123 1.3 chris pte_pa = (pde & L1_C_ADDR_MASK)
124 1.3 chris | ((va & 0xff000) >> 10);
125 1.3 chris break;
126 1.3 chris case L1_TYPE_F:
127 1.3 chris pte_pa = (pde & L1_S_ADDR_MASK)
128 1.3 chris | ((va & 0xffc00) >> 8);
129 1.3 chris break;
130 1.3 chris default:
131 1.3 chris _kvm_syserr(kd, 0, "L1 entry is invalid");
132 1.3 chris return (0);
133 1.3 chris }
134 1.3 chris
135 1.3 chris /*
136 1.3 chris * locate the pte and load it
137 1.3 chris */
138 1.4 ad if (_kvm_pread(kd, kd->pmfd, (void *) &pte, sizeof(pt_entry_t),
139 1.3 chris _kvm_pa2off(kd, pte_pa)) != sizeof(pt_entry_t)) {
140 1.3 chris _kvm_syserr(kd, 0, "could not read L2 entry");
141 1.3 chris return (0);
142 1.3 chris }
143 1.3 chris switch (pte & L2_TYPE_MASK) {
144 1.3 chris case L2_TYPE_L:
145 1.3 chris *pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET);
146 1.3 chris return (L2_L_SIZE - (va & L2_L_OFFSET));
147 1.3 chris case L2_TYPE_S:
148 1.3 chris *pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET);
149 1.3 chris return (L2_S_SIZE - (va & L2_S_OFFSET));
150 1.3 chris case L2_TYPE_T:
151 1.3 chris *pa = (pte & L2_T_FRAME) | (va & L2_T_OFFSET);
152 1.3 chris return (L2_T_SIZE - (va & L2_T_OFFSET));
153 1.3 chris default:
154 1.3 chris _kvm_syserr(kd, 0, "L2 entry is invalid");
155 1.3 chris return (0);
156 1.3 chris }
157 1.3 chris
158 1.1 bjh21 _kvm_err(kd, 0, "vatop not yet implemented!");
159 1.1 bjh21 return 0;
160 1.1 bjh21 }
161 1.1 bjh21
162 1.1 bjh21 off_t
163 1.3 chris _kvm_pa2off(kvm_t * kd, u_long pa)
164 1.1 bjh21 {
165 1.3 chris cpu_kcore_hdr_t *cpu_kh;
166 1.3 chris phys_ram_seg_t *ramsegs;
167 1.3 chris off_t off;
168 1.3 chris int i;
169 1.3 chris
170 1.3 chris cpu_kh = kd->cpu_data;
171 1.3 chris ramsegs = (void *) ((char *) (void *) cpu_kh + cpu_kh->omemsegs);
172 1.3 chris
173 1.3 chris off = 0;
174 1.3 chris for (i = 0; i < cpu_kh->nmemsegs; i++) {
175 1.3 chris if (pa >= ramsegs[i].start &&
176 1.3 chris (pa - ramsegs[i].start) < ramsegs[i].size) {
177 1.3 chris off += (pa - ramsegs[i].start);
178 1.3 chris break;
179 1.3 chris }
180 1.3 chris off += ramsegs[i].size;
181 1.3 chris }
182 1.3 chris return (kd->dump_off + off);
183 1.1 bjh21 }
184 1.1 bjh21
185 1.1 bjh21 /*
186 1.1 bjh21 * Machine-dependent initialization for ALL open kvm descriptors,
187 1.1 bjh21 * not just those for a kernel crash dump. Some architectures
188 1.2 matt * have to deal with these NOT being constants! (i.e. arm)
189 1.1 bjh21 */
190 1.1 bjh21 int
191 1.3 chris _kvm_mdopen(kvm_t * kd)
192 1.1 bjh21 {
193 1.3 chris uintptr_t max_uva;
194 1.2 matt extern struct ps_strings *__ps_strings;
195 1.1 bjh21
196 1.3 chris #if 0 /* XXX - These vary across arm machines... */
197 1.1 bjh21 kd->usrstack = USRSTACK;
198 1.1 bjh21 kd->min_uva = VM_MIN_ADDRESS;
199 1.1 bjh21 kd->max_uva = VM_MAXUSER_ADDRESS;
200 1.2 matt #endif
201 1.2 matt /* This is somewhat hack-ish, but it works. */
202 1.2 matt max_uva = (uintptr_t) (__ps_strings + 1);
203 1.2 matt kd->usrstack = max_uva;
204 1.3 chris kd->max_uva = max_uva;
205 1.3 chris kd->min_uva = 0;
206 1.1 bjh21
207 1.1 bjh21 return (0);
208 1.1 bjh21 }
209