kvm_m68k.c revision 1.1 1 1.1 cgd /*-
2 1.1 cgd * Copyright (c) 1989, 1992, 1993
3 1.1 cgd * The Regents of the University of California. All rights reserved.
4 1.1 cgd *
5 1.1 cgd * This code is derived from software developed by the Computer Systems
6 1.1 cgd * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
7 1.1 cgd * BG 91-66 and contributed to Berkeley.
8 1.1 cgd *
9 1.1 cgd * Redistribution and use in source and binary forms, with or without
10 1.1 cgd * modification, are permitted provided that the following conditions
11 1.1 cgd * are met:
12 1.1 cgd * 1. Redistributions of source code must retain the above copyright
13 1.1 cgd * notice, this list of conditions and the following disclaimer.
14 1.1 cgd * 2. Redistributions in binary form must reproduce the above copyright
15 1.1 cgd * notice, this list of conditions and the following disclaimer in the
16 1.1 cgd * documentation and/or other materials provided with the distribution.
17 1.1 cgd * 3. All advertising materials mentioning features or use of this software
18 1.1 cgd * must display the following acknowledgement:
19 1.1 cgd * This product includes software developed by the University of
20 1.1 cgd * California, Berkeley and its contributors.
21 1.1 cgd * 4. Neither the name of the University nor the names of its contributors
22 1.1 cgd * may be used to endorse or promote products derived from this software
23 1.1 cgd * without specific prior written permission.
24 1.1 cgd *
25 1.1 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 1.1 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 1.1 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 1.1 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 1.1 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 1.1 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 1.1 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 1.1 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 1.1 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 1.1 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 1.1 cgd * SUCH DAMAGE.
36 1.1 cgd */
37 1.1 cgd
38 1.1 cgd #if defined(LIBC_SCCS) && !defined(lint)
39 1.1 cgd /* from: static char sccsid[] = "@(#)kvm_hp300.c 8.1 (Berkeley) 6/4/93"; */
40 1.1 cgd static char *rcsid = "$Id: kvm_m68k.c,v 1.1 1994/05/09 04:09:24 cgd Exp $";
41 1.1 cgd #endif /* LIBC_SCCS and not lint */
42 1.1 cgd
43 1.1 cgd /*
44 1.1 cgd * m68k machine dependent routines for kvm. Hopefully, the forthcoming
45 1.1 cgd * vm code will one day obsolete this module.
46 1.1 cgd */
47 1.1 cgd
48 1.1 cgd #include <sys/param.h>
49 1.1 cgd #include <sys/user.h>
50 1.1 cgd #include <sys/proc.h>
51 1.1 cgd #include <sys/stat.h>
52 1.1 cgd #include <unistd.h>
53 1.1 cgd #include <nlist.h>
54 1.1 cgd #include <kvm.h>
55 1.1 cgd
56 1.1 cgd #include <vm/vm.h>
57 1.1 cgd #include <vm/vm_param.h>
58 1.1 cgd
59 1.1 cgd #include <limits.h>
60 1.1 cgd #include <db.h>
61 1.1 cgd
62 1.1 cgd #include "kvm_private.h"
63 1.1 cgd
64 1.1 cgd #include <machine/pte.h>
65 1.1 cgd
66 1.1 cgd #ifndef btop
67 1.1 cgd #define btop(x) (((unsigned)(x)) >> PGSHIFT) /* XXX */
68 1.1 cgd #define ptob(x) ((caddr_t)((x) << PGSHIFT)) /* XXX */
69 1.1 cgd #endif
70 1.1 cgd
71 1.1 cgd struct vmstate {
72 1.1 cgd u_long lowram;
73 1.1 cgd int mmutype;
74 1.1 cgd struct ste *Sysseg;
75 1.1 cgd };
76 1.1 cgd
77 1.1 cgd #define KREAD(kd, addr, p)\
78 1.1 cgd (kvm_read(kd, addr, (char *)(p), sizeof(*(p))) != sizeof(*(p)))
79 1.1 cgd
80 1.1 cgd void
81 1.1 cgd _kvm_freevtop(kd)
82 1.1 cgd kvm_t *kd;
83 1.1 cgd {
84 1.1 cgd if (kd->vmst != 0)
85 1.1 cgd free(kd->vmst);
86 1.1 cgd }
87 1.1 cgd
88 1.1 cgd int
89 1.1 cgd _kvm_initvtop(kd)
90 1.1 cgd kvm_t *kd;
91 1.1 cgd {
92 1.1 cgd struct vmstate *vm;
93 1.1 cgd struct nlist nlist[4];
94 1.1 cgd
95 1.1 cgd vm = (struct vmstate *)_kvm_malloc(kd, sizeof(*vm));
96 1.1 cgd if (vm == 0)
97 1.1 cgd return (-1);
98 1.1 cgd kd->vmst = vm;
99 1.1 cgd
100 1.1 cgd nlist[0].n_name = "_lowram";
101 1.1 cgd nlist[1].n_name = "_mmutype";
102 1.1 cgd nlist[2].n_name = "_Sysseg";
103 1.1 cgd nlist[3].n_name = 0;
104 1.1 cgd
105 1.1 cgd if (kvm_nlist(kd, nlist) != 0) {
106 1.1 cgd _kvm_err(kd, kd->program, "bad namelist");
107 1.1 cgd return (-1);
108 1.1 cgd }
109 1.1 cgd vm->Sysseg = 0;
110 1.1 cgd if (KREAD(kd, (u_long)nlist[0].n_value, &vm->lowram)) {
111 1.1 cgd _kvm_err(kd, kd->program, "cannot read lowram");
112 1.1 cgd return (-1);
113 1.1 cgd }
114 1.1 cgd if (KREAD(kd, (u_long)nlist[1].n_value, &vm->mmutype)) {
115 1.1 cgd _kvm_err(kd, kd->program, "cannot read mmutype");
116 1.1 cgd return (-1);
117 1.1 cgd }
118 1.1 cgd if (KREAD(kd, (u_long)nlist[2].n_value, &vm->Sysseg)) {
119 1.1 cgd _kvm_err(kd, kd->program, "cannot read segment table");
120 1.1 cgd return (-1);
121 1.1 cgd }
122 1.1 cgd return (0);
123 1.1 cgd }
124 1.1 cgd
125 1.1 cgd static int
126 1.1 cgd _kvm_vatop(kd, sta, va, pa)
127 1.1 cgd kvm_t *kd;
128 1.1 cgd struct ste *sta;
129 1.1 cgd u_long va;
130 1.1 cgd u_long *pa;
131 1.1 cgd {
132 1.1 cgd register struct vmstate *vm;
133 1.1 cgd register u_long lowram;
134 1.1 cgd register u_long addr;
135 1.1 cgd int p, ste, pte;
136 1.1 cgd int offset;
137 1.1 cgd
138 1.1 cgd if (ISALIVE(kd)) {
139 1.1 cgd _kvm_err(kd, 0, "vatop called in live kernel!");
140 1.1 cgd return((off_t)0);
141 1.1 cgd }
142 1.1 cgd vm = kd->vmst;
143 1.1 cgd offset = va & PGOFSET;
144 1.1 cgd /*
145 1.1 cgd * If we are initializing (kernel segment table pointer not yet set)
146 1.1 cgd * then return pa == va to avoid infinite recursion.
147 1.1 cgd */
148 1.1 cgd if (vm->Sysseg == 0) {
149 1.1 cgd *pa = va;
150 1.1 cgd return (NBPG - offset);
151 1.1 cgd }
152 1.1 cgd lowram = vm->lowram;
153 1.1 cgd if (vm->mmutype == -2) {
154 1.1 cgd struct ste *sta2;
155 1.1 cgd
156 1.1 cgd addr = (u_long)&sta[va >> SG4_SHIFT1];
157 1.1 cgd /*
158 1.1 cgd * Can't use KREAD to read kernel segment table entries.
159 1.1 cgd * Fortunately it is 1-to-1 mapped so we don't have to.
160 1.1 cgd */
161 1.1 cgd if (sta == vm->Sysseg) {
162 1.1 cgd if (lseek(kd->pmfd, (off_t)addr, 0) == -1 ||
163 1.1 cgd read(kd->pmfd, (char *)&ste, sizeof(ste)) < 0)
164 1.1 cgd goto invalid;
165 1.1 cgd } else if (KREAD(kd, addr, &ste))
166 1.1 cgd goto invalid;
167 1.1 cgd if ((ste & SG_V) == 0) {
168 1.1 cgd _kvm_err(kd, 0, "invalid level 1 descriptor (%x)",
169 1.1 cgd ste);
170 1.1 cgd return((off_t)0);
171 1.1 cgd }
172 1.1 cgd sta2 = (struct ste *)(ste & SG4_ADDR1);
173 1.1 cgd addr = (u_long)&sta2[(va & SG4_MASK2) >> SG4_SHIFT2];
174 1.1 cgd /*
175 1.1 cgd * Address from level 1 STE is a physical address,
176 1.1 cgd * so don't use kvm_read.
177 1.1 cgd */
178 1.1 cgd if (lseek(kd->pmfd, (off_t)(addr - lowram), 0) == -1 ||
179 1.1 cgd read(kd->pmfd, (char *)&ste, sizeof(ste)) < 0)
180 1.1 cgd goto invalid;
181 1.1 cgd if ((ste & SG_V) == 0) {
182 1.1 cgd _kvm_err(kd, 0, "invalid level 2 descriptor (%x)",
183 1.1 cgd ste);
184 1.1 cgd return((off_t)0);
185 1.1 cgd }
186 1.1 cgd sta2 = (struct ste *)(ste & SG4_ADDR2);
187 1.1 cgd addr = (u_long)&sta2[(va & SG4_MASK3) >> SG4_SHIFT3];
188 1.1 cgd } else {
189 1.1 cgd addr = (u_long)&sta[va >> SEGSHIFT];
190 1.1 cgd /*
191 1.1 cgd * Can't use KREAD to read kernel segment table entries.
192 1.1 cgd * Fortunately it is 1-to-1 mapped so we don't have to.
193 1.1 cgd */
194 1.1 cgd if (sta == vm->Sysseg) {
195 1.1 cgd if (lseek(kd->pmfd, (off_t)addr, 0) == -1 ||
196 1.1 cgd read(kd->pmfd, (char *)&ste, sizeof(ste)) < 0)
197 1.1 cgd goto invalid;
198 1.1 cgd } else if (KREAD(kd, addr, &ste))
199 1.1 cgd goto invalid;
200 1.1 cgd if ((ste & SG_V) == 0) {
201 1.1 cgd _kvm_err(kd, 0, "invalid segment (%x)", ste);
202 1.1 cgd return((off_t)0);
203 1.1 cgd }
204 1.1 cgd p = btop(va & SG_PMASK);
205 1.1 cgd addr = (ste & SG_FRAME) + (p * sizeof(struct pte));
206 1.1 cgd }
207 1.1 cgd /*
208 1.1 cgd * Address from STE is a physical address so don't use kvm_read.
209 1.1 cgd */
210 1.1 cgd if (lseek(kd->pmfd, (off_t)(addr - lowram), 0) == -1 ||
211 1.1 cgd read(kd->pmfd, (char *)&pte, sizeof(pte)) < 0)
212 1.1 cgd goto invalid;
213 1.1 cgd addr = pte & PG_FRAME;
214 1.1 cgd if (pte == PG_NV) {
215 1.1 cgd _kvm_err(kd, 0, "page not valid");
216 1.1 cgd return (0);
217 1.1 cgd }
218 1.1 cgd *pa = addr - lowram + offset;
219 1.1 cgd
220 1.1 cgd return (NBPG - offset);
221 1.1 cgd invalid:
222 1.1 cgd _kvm_err(kd, 0, "invalid address (%x)", va);
223 1.1 cgd return (0);
224 1.1 cgd }
225 1.1 cgd
226 1.1 cgd int
227 1.1 cgd _kvm_kvatop(kd, va, pa)
228 1.1 cgd kvm_t *kd;
229 1.1 cgd u_long va;
230 1.1 cgd u_long *pa;
231 1.1 cgd {
232 1.1 cgd return (_kvm_vatop(kd, (u_long)kd->vmst->Sysseg, va, pa));
233 1.1 cgd }
234 1.1 cgd
235 1.1 cgd /*
236 1.1 cgd * Translate a user virtual address to a physical address.
237 1.1 cgd */
238 1.1 cgd int
239 1.1 cgd _kvm_uvatop(kd, p, va, pa)
240 1.1 cgd kvm_t *kd;
241 1.1 cgd const struct proc *p;
242 1.1 cgd u_long va;
243 1.1 cgd u_long *pa;
244 1.1 cgd {
245 1.1 cgd register struct vmspace *vms = p->p_vmspace;
246 1.1 cgd int kva;
247 1.1 cgd
248 1.1 cgd /*
249 1.1 cgd * If this is a live kernel we just look it up in the kernel
250 1.1 cgd * virtually allocated flat 4mb page table (i.e. let the kernel
251 1.1 cgd * do the table walk). In this way, we avoid needing to know
252 1.1 cgd * the MMU type.
253 1.1 cgd */
254 1.1 cgd if (ISALIVE(kd)) {
255 1.1 cgd struct pte *ptab;
256 1.1 cgd int pte, offset;
257 1.1 cgd
258 1.1 cgd kva = (int)&vms->vm_pmap.pm_ptab;
259 1.1 cgd if (KREAD(kd, kva, &ptab)) {
260 1.1 cgd _kvm_err(kd, 0, "invalid address (%x)", va);
261 1.1 cgd return (0);
262 1.1 cgd }
263 1.1 cgd kva = (int)&ptab[btop(va)];
264 1.1 cgd if (KREAD(kd, kva, &pte) || (pte & PG_V) == 0) {
265 1.1 cgd _kvm_err(kd, 0, "invalid address (%x)", va);
266 1.1 cgd return (0);
267 1.1 cgd }
268 1.1 cgd offset = va & PGOFSET;
269 1.1 cgd *pa = (pte & PG_FRAME) | offset;
270 1.1 cgd return (NBPG - offset);
271 1.1 cgd }
272 1.1 cgd /*
273 1.1 cgd * Otherwise, we just walk the table ourself.
274 1.1 cgd */
275 1.1 cgd kva = (int)&vms->vm_pmap.pm_stab;
276 1.1 cgd if (KREAD(kd, kva, &kva)) {
277 1.1 cgd _kvm_err(kd, 0, "invalid address (%x)", va);
278 1.1 cgd return (0);
279 1.1 cgd }
280 1.1 cgd return (_kvm_vatop(kd, kva, va, pa));
281 1.1 cgd }
282