kvm_sparc.c revision 1.4 1 1.1 cgd /*-
2 1.1 cgd * Copyright (c) 1992, 1993
3 1.1 cgd * The Regents of the University of California. All rights reserved.
4 1.1 cgd *
5 1.1 cgd * This code is derived from software developed by the Computer Systems
6 1.1 cgd * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
7 1.1 cgd * BG 91-66 and contributed to Berkeley.
8 1.1 cgd *
9 1.1 cgd * Redistribution and use in source and binary forms, with or without
10 1.1 cgd * modification, are permitted provided that the following conditions
11 1.1 cgd * are met:
12 1.1 cgd * 1. Redistributions of source code must retain the above copyright
13 1.1 cgd * notice, this list of conditions and the following disclaimer.
14 1.1 cgd * 2. Redistributions in binary form must reproduce the above copyright
15 1.1 cgd * notice, this list of conditions and the following disclaimer in the
16 1.1 cgd * documentation and/or other materials provided with the distribution.
17 1.1 cgd * 3. All advertising materials mentioning features or use of this software
18 1.1 cgd * must display the following acknowledgement:
19 1.1 cgd * This product includes software developed by the University of
20 1.1 cgd * California, Berkeley and its contributors.
21 1.1 cgd * 4. Neither the name of the University nor the names of its contributors
22 1.1 cgd * may be used to endorse or promote products derived from this software
23 1.1 cgd * without specific prior written permission.
24 1.1 cgd *
25 1.1 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 1.1 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 1.1 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 1.1 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 1.1 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 1.1 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 1.1 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 1.1 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 1.1 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 1.1 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 1.1 cgd * SUCH DAMAGE.
36 1.1 cgd */
37 1.1 cgd
38 1.1 cgd #if defined(LIBC_SCCS) && !defined(lint)
39 1.1 cgd static char sccsid[] = "@(#)kvm_sparc.c 8.1 (Berkeley) 6/4/93";
40 1.1 cgd #endif /* LIBC_SCCS and not lint */
41 1.1 cgd
42 1.1 cgd /*
43 1.1 cgd * Sparc machine dependent routines for kvm. Hopefully, the forthcoming
44 1.1 cgd * vm code will one day obsolete this module.
45 1.1 cgd */
46 1.1 cgd
47 1.1 cgd #include <sys/param.h>
48 1.1 cgd #include <sys/user.h>
49 1.1 cgd #include <sys/proc.h>
50 1.1 cgd #include <sys/stat.h>
51 1.4 deraadt #include <sys/sysctl.h>
52 1.1 cgd #include <unistd.h>
53 1.1 cgd #include <nlist.h>
54 1.1 cgd #include <kvm.h>
55 1.1 cgd
56 1.1 cgd #include <vm/vm.h>
57 1.1 cgd #include <vm/vm_param.h>
58 1.1 cgd
59 1.1 cgd #include <limits.h>
60 1.1 cgd #include <db.h>
61 1.1 cgd
62 1.1 cgd #include "kvm_private.h"
63 1.1 cgd
64 1.1 cgd #define NPMEG 128
65 1.1 cgd
66 1.1 cgd /* XXX from sparc/pmap.c */
67 1.1 cgd #define MAXMEM (128 * 1024 * 1024) /* no more than 128 MB phys mem */
68 1.1 cgd #define NPGBANK 16 /* 2^4 pages per bank (64K / bank) */
69 1.1 cgd #define BSHIFT 4 /* log2(NPGBANK) */
70 1.1 cgd #define BOFFSET (NPGBANK - 1)
71 1.3 deraadt #define BTSIZE (MAXMEM / 4096 / NPGBANK)
72 1.1 cgd #define HWTOSW(pmap_stod, pg) (pmap_stod[(pg) >> BSHIFT] | ((pg) & BOFFSET))
73 1.1 cgd
74 1.1 cgd struct vmstate {
75 1.1 cgd pmeg_t segmap[NKSEG];
76 1.3 deraadt int *pmeg;
77 1.1 cgd int pmap_stod[BTSIZE]; /* dense to sparse */
78 1.1 cgd };
79 1.1 cgd
80 1.4 deraadt static int cputyp = -1;
81 1.3 deraadt
82 1.3 deraadt static int pgshift, nptesg;
83 1.3 deraadt
84 1.4 deraadt #define VA_VPG(va) (cputyp==CPU_SUN4C ? VA_SUN4C_VPG(va) : VA_SUN4_VPG(va))
85 1.4 deraadt
86 1.3 deraadt static void
87 1.4 deraadt _kvm_mustinit(kd)
88 1.3 deraadt kvm_t *kd;
89 1.3 deraadt {
90 1.4 deraadt if (cputyp != -1)
91 1.4 deraadt return;
92 1.3 deraadt for (pgshift = 12; (1 << pgshift) != kd->nbpg; pgshift++)
93 1.3 deraadt ;
94 1.3 deraadt nptesg = NBPSG / kd->nbpg;
95 1.4 deraadt
96 1.4 deraadt #if 1
97 1.4 deraadt if (cputyp == -1) {
98 1.4 deraadt if (kd->nbpg == 8192)
99 1.4 deraadt cputyp = CPU_SUN4;
100 1.4 deraadt else
101 1.4 deraadt cputyp = CPU_SUN4C;
102 1.4 deraadt }
103 1.4 deraadt #endif
104 1.3 deraadt }
105 1.3 deraadt
106 1.1 cgd void
107 1.1 cgd _kvm_freevtop(kd)
108 1.1 cgd kvm_t *kd;
109 1.1 cgd {
110 1.3 deraadt if (kd->vmst != 0) {
111 1.3 deraadt if (kd->vmst->pmeg != 0)
112 1.3 deraadt free(kd->vmst->pmeg);
113 1.1 cgd free(kd->vmst);
114 1.4 deraadt kd->vmst = 0;
115 1.3 deraadt }
116 1.1 cgd }
117 1.1 cgd
118 1.1 cgd int
119 1.1 cgd _kvm_initvtop(kd)
120 1.1 cgd kvm_t *kd;
121 1.1 cgd {
122 1.1 cgd register int i;
123 1.1 cgd register int off;
124 1.1 cgd register struct vmstate *vm;
125 1.1 cgd struct stat st;
126 1.4 deraadt struct nlist nlist[2];
127 1.3 deraadt
128 1.4 deraadt _kvm_mustinit(kd);
129 1.1 cgd
130 1.4 deraadt if (kd->vmst == 0) {
131 1.4 deraadt kd->vmst = (struct vmstate *)_kvm_malloc(kd, sizeof(*vm));
132 1.4 deraadt if (kd->vmst == 0)
133 1.4 deraadt return (-1);
134 1.4 deraadt kd->vmst->pmeg = (int *)_kvm_malloc(kd,
135 1.4 deraadt NPMEG * nptesg * sizeof(int));
136 1.4 deraadt if (kd->vmst->pmeg == 0) {
137 1.4 deraadt free(kd->vmst);
138 1.4 deraadt kd->vmst = 0;
139 1.4 deraadt return (-1);
140 1.4 deraadt }
141 1.4 deraadt }
142 1.1 cgd
143 1.1 cgd if (fstat(kd->pmfd, &st) < 0)
144 1.1 cgd return (-1);
145 1.1 cgd /*
146 1.1 cgd * Read segment table.
147 1.1 cgd */
148 1.3 deraadt off = st.st_size - roundup(sizeof(vm->segmap), kd->nbpg);
149 1.1 cgd errno = 0;
150 1.1 cgd if (lseek(kd->pmfd, (off_t)off, 0) == -1 && errno != 0 ||
151 1.1 cgd read(kd->pmfd, (char *)vm->segmap, sizeof(vm->segmap)) < 0) {
152 1.1 cgd _kvm_err(kd, kd->program, "cannot read segment map");
153 1.1 cgd return (-1);
154 1.1 cgd }
155 1.1 cgd /*
156 1.1 cgd * Read PMEGs.
157 1.1 cgd */
158 1.3 deraadt off = st.st_size - roundup(NPMEG * nptesg * sizeof(int), kd->nbpg) +
159 1.3 deraadt ((sizeof(vm->segmap) + kd->nbpg - 1) >> pgshift);
160 1.1 cgd errno = 0;
161 1.1 cgd if (lseek(kd->pmfd, (off_t)off, 0) == -1 && errno != 0 ||
162 1.3 deraadt read(kd->pmfd, (char *)vm->pmeg, NPMEG * nptesg * sizeof(int)) < 0) {
163 1.1 cgd _kvm_err(kd, kd->program, "cannot read PMEG table");
164 1.1 cgd return (-1);
165 1.1 cgd }
166 1.1 cgd /*
167 1.1 cgd * Make pmap_stod be an identity map so we can bootstrap it in.
168 1.1 cgd * We assume it's in the first contiguous chunk of physical memory.
169 1.1 cgd */
170 1.1 cgd for (i = 0; i < BTSIZE; ++i)
171 1.1 cgd vm->pmap_stod[i] = i << 4;
172 1.1 cgd
173 1.1 cgd /*
174 1.1 cgd * It's okay to do this nlist separately from the one kvm_getprocs()
175 1.1 cgd * does, since the only time we could gain anything by combining
176 1.1 cgd * them is if we do a kvm_getprocs() on a dead kernel, which is
177 1.1 cgd * not too common.
178 1.1 cgd */
179 1.4 deraadt nlist[0].n_name = "_pmap_stod";
180 1.4 deraadt nlist[1].n_name = 0;
181 1.3 deraadt (void)kvm_nlist(kd, nlist);
182 1.3 deraadt
183 1.3 deraadt /*
184 1.3 deraadt * a kernel compiled only for the sun4 will not contain the symbol
185 1.4 deraadt * pmap_stod. Instead, we are happy to use the identity map
186 1.3 deraadt * initialized earlier.
187 1.3 deraadt * If we are not a sun4, the lack of this symbol is fatal.
188 1.3 deraadt */
189 1.4 deraadt if (nlist[0].n_value != 0) {
190 1.4 deraadt if (kvm_read(kd, (u_long)nlist[0].n_value,
191 1.3 deraadt (char *)vm->pmap_stod, sizeof(vm->pmap_stod))
192 1.3 deraadt != sizeof(vm->pmap_stod)) {
193 1.3 deraadt _kvm_err(kd, kd->program, "cannot read pmap_stod");
194 1.3 deraadt return (-1);
195 1.3 deraadt }
196 1.3 deraadt } else {
197 1.3 deraadt if (cputyp != CPU_SUN4) {
198 1.3 deraadt _kvm_err(kd, kd->program, "pmap_stod: no such symbol");
199 1.3 deraadt return (-1);
200 1.3 deraadt }
201 1.3 deraadt }
202 1.3 deraadt
203 1.1 cgd return (0);
204 1.1 cgd }
205 1.1 cgd
206 1.3 deraadt #define VA_OFF(va) (va & (kd->nbpg - 1))
207 1.1 cgd
208 1.1 cgd /*
209 1.1 cgd * Translate a user virtual address to a physical address.
210 1.1 cgd */
211 1.1 cgd int
212 1.1 cgd _kvm_uvatop(kd, p, va, pa)
213 1.1 cgd kvm_t *kd;
214 1.1 cgd const struct proc *p;
215 1.1 cgd u_long va;
216 1.1 cgd u_long *pa;
217 1.1 cgd {
218 1.1 cgd int kva, pte;
219 1.1 cgd register int off, frame;
220 1.1 cgd register struct vmspace *vms = p->p_vmspace;
221 1.2 pk struct usegmap *usp;
222 1.1 cgd
223 1.4 deraadt _kvm_mustinit(kd);
224 1.3 deraadt
225 1.1 cgd if ((u_long)vms < KERNBASE) {
226 1.1 cgd _kvm_err(kd, kd->program, "_kvm_uvatop: corrupt proc");
227 1.1 cgd return (0);
228 1.1 cgd }
229 1.1 cgd if (va >= KERNBASE)
230 1.1 cgd return (0);
231 1.1 cgd /*
232 1.1 cgd * Get the PTE. This takes two steps. We read the
233 1.1 cgd * base address of the table, then we index it.
234 1.1 cgd * Note that the index pte table is indexed by
235 1.1 cgd * virtual segment rather than physical segment.
236 1.1 cgd */
237 1.2 pk kva = (u_long)&vms->vm_pmap.pm_segstore;
238 1.2 pk if (kvm_read(kd, kva, (char *)&usp, 4) != 4)
239 1.2 pk goto invalid;
240 1.2 pk kva = (u_long)&usp->us_pte[VA_VSEG(va)];
241 1.1 cgd if (kvm_read(kd, kva, (char *)&kva, 4) != 4 || kva == 0)
242 1.1 cgd goto invalid;
243 1.2 pk kva += sizeof(usp->us_pte[0]) * VA_VPG(va);
244 1.1 cgd if (kvm_read(kd, kva, (char *)&pte, 4) == 4 && (pte & PG_V)) {
245 1.1 cgd off = VA_OFF(va);
246 1.1 cgd /*
247 1.1 cgd * /dev/mem adheres to the hardware model of physical memory
248 1.1 cgd * (with holes in the address space), while crashdumps
249 1.1 cgd * adhere to the contiguous software model.
250 1.1 cgd */
251 1.1 cgd if (ISALIVE(kd))
252 1.1 cgd frame = pte & PG_PFNUM;
253 1.1 cgd else
254 1.1 cgd frame = HWTOSW(kd->vmst->pmap_stod, pte & PG_PFNUM);
255 1.3 deraadt *pa = (frame << pgshift) | off;
256 1.3 deraadt return (kd->nbpg - off);
257 1.1 cgd }
258 1.1 cgd invalid:
259 1.1 cgd _kvm_err(kd, 0, "invalid address (%x)", va);
260 1.1 cgd return (0);
261 1.1 cgd }
262 1.1 cgd
263 1.1 cgd /*
264 1.1 cgd * Translate a kernel virtual address to a physical address using the
265 1.1 cgd * mapping information in kd->vm. Returns the result in pa, and returns
266 1.1 cgd * the number of bytes that are contiguously available from this
267 1.1 cgd * physical address. This routine is used only for crashdumps.
268 1.1 cgd */
269 1.1 cgd int
270 1.1 cgd _kvm_kvatop(kd, va, pa)
271 1.1 cgd kvm_t *kd;
272 1.1 cgd u_long va;
273 1.1 cgd u_long *pa;
274 1.1 cgd {
275 1.1 cgd register struct vmstate *vm;
276 1.1 cgd register int s;
277 1.1 cgd register int pte;
278 1.1 cgd register int off;
279 1.1 cgd
280 1.4 deraadt _kvm_mustinit(kd);
281 1.3 deraadt
282 1.1 cgd if (va >= KERNBASE) {
283 1.1 cgd vm = kd->vmst;
284 1.1 cgd s = vm->segmap[VA_VSEG(va) - NUSEG];
285 1.3 deraadt pte = vm->pmeg[VA_VPG(va) + nptesg * s];
286 1.1 cgd if ((pte & PG_V) != 0) {
287 1.1 cgd off = VA_OFF(va);
288 1.1 cgd *pa = (HWTOSW(vm->pmap_stod, pte & PG_PFNUM)
289 1.3 deraadt << pgshift) | off;
290 1.1 cgd
291 1.3 deraadt return (kd->nbpg - off);
292 1.1 cgd }
293 1.1 cgd }
294 1.1 cgd _kvm_err(kd, 0, "invalid address (%x)", va);
295 1.1 cgd return (0);
296 1.1 cgd }
297 1.4 deraadt
298 1.4 deraadt #if 0
299 1.4 deraadt static int
300 1.4 deraadt getcputyp()
301 1.4 deraadt {
302 1.4 deraadt int mib[2];
303 1.4 deraadt size_t size;
304 1.4 deraadt
305 1.4 deraadt mib[0] = CTL_HW;
306 1.4 deraadt mib[1] = HW_CLASS;
307 1.4 deraadt size = sizeof cputyp;
308 1.4 deraadt if (sysctl(mib, 2, &cputyp, &size, NULL, 0) == -1)
309 1.4 deraadt return (-1);
310 1.4 deraadt }
311 1.4 deraadt #endif
312