kvm_sparc.c revision 1.3 1 1.1 cgd /*-
2 1.1 cgd * Copyright (c) 1992, 1993
3 1.1 cgd * The Regents of the University of California. All rights reserved.
4 1.1 cgd *
5 1.1 cgd * This code is derived from software developed by the Computer Systems
6 1.1 cgd * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
7 1.1 cgd * BG 91-66 and contributed to Berkeley.
8 1.1 cgd *
9 1.1 cgd * Redistribution and use in source and binary forms, with or without
10 1.1 cgd * modification, are permitted provided that the following conditions
11 1.1 cgd * are met:
12 1.1 cgd * 1. Redistributions of source code must retain the above copyright
13 1.1 cgd * notice, this list of conditions and the following disclaimer.
14 1.1 cgd * 2. Redistributions in binary form must reproduce the above copyright
15 1.1 cgd * notice, this list of conditions and the following disclaimer in the
16 1.1 cgd * documentation and/or other materials provided with the distribution.
17 1.1 cgd * 3. All advertising materials mentioning features or use of this software
18 1.1 cgd * must display the following acknowledgement:
19 1.1 cgd * This product includes software developed by the University of
20 1.1 cgd * California, Berkeley and its contributors.
21 1.1 cgd * 4. Neither the name of the University nor the names of its contributors
22 1.1 cgd * may be used to endorse or promote products derived from this software
23 1.1 cgd * without specific prior written permission.
24 1.1 cgd *
25 1.1 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 1.1 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 1.1 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 1.1 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 1.1 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 1.1 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 1.1 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 1.1 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 1.1 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 1.1 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 1.1 cgd * SUCH DAMAGE.
36 1.1 cgd */
37 1.1 cgd
38 1.1 cgd #if defined(LIBC_SCCS) && !defined(lint)
39 1.1 cgd static char sccsid[] = "@(#)kvm_sparc.c 8.1 (Berkeley) 6/4/93";
40 1.1 cgd #endif /* LIBC_SCCS and not lint */
41 1.1 cgd
42 1.1 cgd /*
43 1.1 cgd * Sparc machine dependent routines for kvm. Hopefully, the forthcoming
44 1.1 cgd * vm code will one day obsolete this module.
45 1.1 cgd */
46 1.1 cgd
47 1.1 cgd #include <sys/param.h>
48 1.1 cgd #include <sys/user.h>
49 1.1 cgd #include <sys/proc.h>
50 1.1 cgd #include <sys/stat.h>
51 1.1 cgd #include <unistd.h>
52 1.1 cgd #include <nlist.h>
53 1.1 cgd #include <kvm.h>
54 1.1 cgd
55 1.1 cgd #include <vm/vm.h>
56 1.1 cgd #include <vm/vm_param.h>
57 1.1 cgd
58 1.1 cgd #include <limits.h>
59 1.1 cgd #include <db.h>
60 1.1 cgd
61 1.1 cgd #include "kvm_private.h"
62 1.1 cgd
63 1.1 cgd #define NPMEG 128
64 1.1 cgd
65 1.1 cgd /* XXX from sparc/pmap.c */
66 1.1 cgd #define MAXMEM (128 * 1024 * 1024) /* no more than 128 MB phys mem */
67 1.1 cgd #define NPGBANK 16 /* 2^4 pages per bank (64K / bank) */
68 1.1 cgd #define BSHIFT 4 /* log2(NPGBANK) */
69 1.1 cgd #define BOFFSET (NPGBANK - 1)
70 1.3 deraadt #define BTSIZE (MAXMEM / 4096 / NPGBANK)
71 1.1 cgd #define HWTOSW(pmap_stod, pg) (pmap_stod[(pg) >> BSHIFT] | ((pg) & BOFFSET))
72 1.1 cgd
73 1.1 cgd struct vmstate {
74 1.1 cgd pmeg_t segmap[NKSEG];
75 1.3 deraadt int *pmeg;
76 1.1 cgd int pmap_stod[BTSIZE]; /* dense to sparse */
77 1.1 cgd };
78 1.1 cgd
79 1.3 deraadt static int cputyp;
80 1.3 deraadt
81 1.3 deraadt static int pgshift, nptesg;
82 1.3 deraadt
83 1.3 deraadt static void
84 1.3 deraadt getpgshift(kd)
85 1.3 deraadt kvm_t *kd;
86 1.3 deraadt {
87 1.3 deraadt for (pgshift = 12; (1 << pgshift) != kd->nbpg; pgshift++)
88 1.3 deraadt ;
89 1.3 deraadt nptesg = NBPSG / kd->nbpg;
90 1.3 deraadt }
91 1.3 deraadt
92 1.1 cgd void
93 1.1 cgd _kvm_freevtop(kd)
94 1.1 cgd kvm_t *kd;
95 1.1 cgd {
96 1.3 deraadt if (kd->vmst != 0) {
97 1.3 deraadt if (kd->vmst->pmeg != 0)
98 1.3 deraadt free(kd->vmst->pmeg);
99 1.1 cgd free(kd->vmst);
100 1.3 deraadt }
101 1.1 cgd }
102 1.1 cgd
103 1.1 cgd int
104 1.1 cgd _kvm_initvtop(kd)
105 1.1 cgd kvm_t *kd;
106 1.1 cgd {
107 1.1 cgd register int i;
108 1.1 cgd register int off;
109 1.1 cgd register struct vmstate *vm;
110 1.1 cgd struct stat st;
111 1.3 deraadt struct nlist nlist[3];
112 1.3 deraadt
113 1.3 deraadt if (pgshift == 0)
114 1.3 deraadt getpgshift(kd);
115 1.1 cgd
116 1.1 cgd vm = (struct vmstate *)_kvm_malloc(kd, sizeof(*vm));
117 1.1 cgd if (vm == 0)
118 1.1 cgd return (-1);
119 1.3 deraadt vm->pmeg = (int *)_kvm_malloc(kd, NPMEG * nptesg * sizeof(int));
120 1.3 deraadt if (vm->pmeg == 0)
121 1.3 deraadt return (-1);
122 1.1 cgd
123 1.1 cgd kd->vmst = vm;
124 1.1 cgd
125 1.1 cgd if (fstat(kd->pmfd, &st) < 0)
126 1.1 cgd return (-1);
127 1.1 cgd /*
128 1.1 cgd * Read segment table.
129 1.1 cgd */
130 1.3 deraadt off = st.st_size - roundup(sizeof(vm->segmap), kd->nbpg);
131 1.1 cgd errno = 0;
132 1.1 cgd if (lseek(kd->pmfd, (off_t)off, 0) == -1 && errno != 0 ||
133 1.1 cgd read(kd->pmfd, (char *)vm->segmap, sizeof(vm->segmap)) < 0) {
134 1.1 cgd _kvm_err(kd, kd->program, "cannot read segment map");
135 1.1 cgd return (-1);
136 1.1 cgd }
137 1.1 cgd /*
138 1.1 cgd * Read PMEGs.
139 1.1 cgd */
140 1.3 deraadt off = st.st_size - roundup(NPMEG * nptesg * sizeof(int), kd->nbpg) +
141 1.3 deraadt ((sizeof(vm->segmap) + kd->nbpg - 1) >> pgshift);
142 1.1 cgd errno = 0;
143 1.1 cgd if (lseek(kd->pmfd, (off_t)off, 0) == -1 && errno != 0 ||
144 1.3 deraadt read(kd->pmfd, (char *)vm->pmeg, NPMEG * nptesg * sizeof(int)) < 0) {
145 1.1 cgd _kvm_err(kd, kd->program, "cannot read PMEG table");
146 1.1 cgd return (-1);
147 1.1 cgd }
148 1.1 cgd /*
149 1.1 cgd * Make pmap_stod be an identity map so we can bootstrap it in.
150 1.1 cgd * We assume it's in the first contiguous chunk of physical memory.
151 1.1 cgd */
152 1.1 cgd for (i = 0; i < BTSIZE; ++i)
153 1.1 cgd vm->pmap_stod[i] = i << 4;
154 1.1 cgd
155 1.1 cgd /*
156 1.1 cgd * It's okay to do this nlist separately from the one kvm_getprocs()
157 1.1 cgd * does, since the only time we could gain anything by combining
158 1.1 cgd * them is if we do a kvm_getprocs() on a dead kernel, which is
159 1.1 cgd * not too common.
160 1.1 cgd */
161 1.3 deraadt nlist[0].n_name = "_cputyp";
162 1.3 deraadt nlist[1].n_name = "_pmap_stod";
163 1.3 deraadt nlist[2].n_name = 0;
164 1.3 deraadt (void)kvm_nlist(kd, nlist);
165 1.3 deraadt if (nlist[0].n_value == 0) {
166 1.3 deraadt _kvm_err(kd, kd->program, "cputyp: no such symbol");
167 1.1 cgd return (-1);
168 1.1 cgd }
169 1.1 cgd if (kvm_read(kd, (u_long)nlist[0].n_value,
170 1.3 deraadt (char *)&cputyp, sizeof(cputyp)) != sizeof(cputyp)) {
171 1.3 deraadt _kvm_err(kd, kd->program, "cannot read cputyp");
172 1.1 cgd return (-1);
173 1.1 cgd }
174 1.3 deraadt
175 1.3 deraadt /*
176 1.3 deraadt * a kernel compiled only for the sun4 will not contain the symbol
177 1.3 deraadt * map_stod. Instead, we are happy to use the identity map
178 1.3 deraadt * initialized earlier.
179 1.3 deraadt * If we are not a sun4, the lack of this symbol is fatal.
180 1.3 deraadt */
181 1.3 deraadt if (nlist[1].n_value != 0) {
182 1.3 deraadt if (kvm_read(kd, (u_long)nlist[1].n_value,
183 1.3 deraadt (char *)vm->pmap_stod, sizeof(vm->pmap_stod))
184 1.3 deraadt != sizeof(vm->pmap_stod)) {
185 1.3 deraadt _kvm_err(kd, kd->program, "cannot read pmap_stod");
186 1.3 deraadt return (-1);
187 1.3 deraadt }
188 1.3 deraadt } else {
189 1.3 deraadt if (cputyp != CPU_SUN4) {
190 1.3 deraadt _kvm_err(kd, kd->program, "pmap_stod: no such symbol");
191 1.3 deraadt return (-1);
192 1.3 deraadt }
193 1.3 deraadt }
194 1.3 deraadt
195 1.1 cgd return (0);
196 1.1 cgd }
197 1.1 cgd
198 1.3 deraadt #define VA_OFF(va) (va & (kd->nbpg - 1))
199 1.1 cgd
200 1.1 cgd /*
201 1.1 cgd * Translate a user virtual address to a physical address.
202 1.1 cgd */
203 1.1 cgd int
204 1.1 cgd _kvm_uvatop(kd, p, va, pa)
205 1.1 cgd kvm_t *kd;
206 1.1 cgd const struct proc *p;
207 1.1 cgd u_long va;
208 1.1 cgd u_long *pa;
209 1.1 cgd {
210 1.1 cgd int kva, pte;
211 1.1 cgd register int off, frame;
212 1.1 cgd register struct vmspace *vms = p->p_vmspace;
213 1.2 pk struct usegmap *usp;
214 1.1 cgd
215 1.3 deraadt if (pgshift == 0)
216 1.3 deraadt getpgshift(kd);
217 1.3 deraadt
218 1.1 cgd if ((u_long)vms < KERNBASE) {
219 1.1 cgd _kvm_err(kd, kd->program, "_kvm_uvatop: corrupt proc");
220 1.1 cgd return (0);
221 1.1 cgd }
222 1.1 cgd if (va >= KERNBASE)
223 1.1 cgd return (0);
224 1.1 cgd /*
225 1.1 cgd * Get the PTE. This takes two steps. We read the
226 1.1 cgd * base address of the table, then we index it.
227 1.1 cgd * Note that the index pte table is indexed by
228 1.1 cgd * virtual segment rather than physical segment.
229 1.1 cgd */
230 1.2 pk kva = (u_long)&vms->vm_pmap.pm_segstore;
231 1.2 pk if (kvm_read(kd, kva, (char *)&usp, 4) != 4)
232 1.2 pk goto invalid;
233 1.2 pk kva = (u_long)&usp->us_pte[VA_VSEG(va)];
234 1.1 cgd if (kvm_read(kd, kva, (char *)&kva, 4) != 4 || kva == 0)
235 1.1 cgd goto invalid;
236 1.2 pk kva += sizeof(usp->us_pte[0]) * VA_VPG(va);
237 1.1 cgd if (kvm_read(kd, kva, (char *)&pte, 4) == 4 && (pte & PG_V)) {
238 1.1 cgd off = VA_OFF(va);
239 1.1 cgd /*
240 1.1 cgd * /dev/mem adheres to the hardware model of physical memory
241 1.1 cgd * (with holes in the address space), while crashdumps
242 1.1 cgd * adhere to the contiguous software model.
243 1.1 cgd */
244 1.1 cgd if (ISALIVE(kd))
245 1.1 cgd frame = pte & PG_PFNUM;
246 1.1 cgd else
247 1.1 cgd frame = HWTOSW(kd->vmst->pmap_stod, pte & PG_PFNUM);
248 1.3 deraadt *pa = (frame << pgshift) | off;
249 1.3 deraadt return (kd->nbpg - off);
250 1.1 cgd }
251 1.1 cgd invalid:
252 1.1 cgd _kvm_err(kd, 0, "invalid address (%x)", va);
253 1.1 cgd return (0);
254 1.1 cgd }
255 1.1 cgd
256 1.1 cgd /*
257 1.1 cgd * Translate a kernel virtual address to a physical address using the
258 1.1 cgd * mapping information in kd->vm. Returns the result in pa, and returns
259 1.1 cgd * the number of bytes that are contiguously available from this
260 1.1 cgd * physical address. This routine is used only for crashdumps.
261 1.1 cgd */
262 1.1 cgd int
263 1.1 cgd _kvm_kvatop(kd, va, pa)
264 1.1 cgd kvm_t *kd;
265 1.1 cgd u_long va;
266 1.1 cgd u_long *pa;
267 1.1 cgd {
268 1.1 cgd register struct vmstate *vm;
269 1.1 cgd register int s;
270 1.1 cgd register int pte;
271 1.1 cgd register int off;
272 1.1 cgd
273 1.3 deraadt if (pgshift == 0)
274 1.3 deraadt getpgshift(kd);
275 1.3 deraadt
276 1.1 cgd if (va >= KERNBASE) {
277 1.1 cgd vm = kd->vmst;
278 1.1 cgd s = vm->segmap[VA_VSEG(va) - NUSEG];
279 1.3 deraadt pte = vm->pmeg[VA_VPG(va) + nptesg * s];
280 1.1 cgd if ((pte & PG_V) != 0) {
281 1.1 cgd off = VA_OFF(va);
282 1.1 cgd *pa = (HWTOSW(vm->pmap_stod, pte & PG_PFNUM)
283 1.3 deraadt << pgshift) | off;
284 1.1 cgd
285 1.3 deraadt return (kd->nbpg - off);
286 1.1 cgd }
287 1.1 cgd }
288 1.1 cgd _kvm_err(kd, 0, "invalid address (%x)", va);
289 1.1 cgd return (0);
290 1.1 cgd }
291