kvm_alpha.c revision 1.11 1 1.11 thorpej /* $NetBSD: kvm_alpha.c,v 1.11 1998/03/25 00:47:20 thorpej Exp $ */
2 1.1 cgd
3 1.1 cgd /*
4 1.1 cgd * Copyright (c) 1994, 1995 Carnegie-Mellon University.
5 1.1 cgd * All rights reserved.
6 1.1 cgd *
7 1.1 cgd * Author: Chris G. Demetriou
8 1.1 cgd *
9 1.1 cgd * Permission to use, copy, modify and distribute this software and
10 1.1 cgd * its documentation is hereby granted, provided that both the copyright
11 1.1 cgd * notice and this permission notice appear in all copies of the
12 1.1 cgd * software, derivative works or modified versions, and any portions
13 1.1 cgd * thereof, and that both notices appear in supporting documentation.
14 1.1 cgd *
15 1.1 cgd * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
16 1.1 cgd * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
17 1.1 cgd * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
18 1.1 cgd *
19 1.1 cgd * Carnegie Mellon requests users of this software to return to
20 1.1 cgd *
21 1.1 cgd * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
22 1.1 cgd * School of Computer Science
23 1.1 cgd * Carnegie Mellon University
24 1.1 cgd * Pittsburgh PA 15213-3890
25 1.1 cgd *
26 1.1 cgd * any improvements or extensions that they make and grant Carnegie the
27 1.1 cgd * rights to redistribute these changes.
28 1.1 cgd */
29 1.1 cgd
30 1.11 thorpej #define __KVM_ALPHA_PRIVATE /* see <machine/pte.h> */
31 1.11 thorpej
32 1.1 cgd #include <sys/param.h>
33 1.1 cgd #include <sys/user.h>
34 1.1 cgd #include <sys/proc.h>
35 1.1 cgd #include <sys/stat.h>
36 1.4 cgd #include <sys/kcore.h>
37 1.4 cgd #include <machine/kcore.h>
38 1.1 cgd #include <unistd.h>
39 1.1 cgd #include <nlist.h>
40 1.1 cgd #include <kvm.h>
41 1.1 cgd
42 1.1 cgd #include <vm/vm.h>
43 1.1 cgd #include <vm/vm_param.h>
44 1.1 cgd
45 1.1 cgd #include <limits.h>
46 1.1 cgd #include <db.h>
47 1.7 mrg #include <stdlib.h>
48 1.1 cgd
49 1.1 cgd #include "kvm_private.h"
50 1.1 cgd
51 1.11 thorpej struct vmstate {
52 1.11 thorpej vm_size_t page_shift;
53 1.11 thorpej };
54 1.11 thorpej
55 1.1 cgd void
56 1.1 cgd _kvm_freevtop(kd)
57 1.1 cgd kvm_t *kd;
58 1.1 cgd {
59 1.1 cgd
60 1.5 cgd if (kd->vmst != 0)
61 1.5 cgd free(kd->vmst);
62 1.1 cgd }
63 1.1 cgd
64 1.1 cgd int
65 1.1 cgd _kvm_initvtop(kd)
66 1.1 cgd kvm_t *kd;
67 1.1 cgd {
68 1.11 thorpej cpu_kcore_hdr_t *cpu_kh;
69 1.11 thorpej struct vmstate *vm;
70 1.11 thorpej
71 1.11 thorpej vm = (struct vmstate *)_kvm_malloc(kd, sizeof(*vm));
72 1.11 thorpej if (vm == NULL)
73 1.11 thorpej return (-1);
74 1.11 thorpej
75 1.11 thorpej cpu_kh = kd->cpu_data;
76 1.11 thorpej
77 1.11 thorpej /* Compute page_shift. */
78 1.11 thorpej for (vm->page_shift = 0; (1 << vm->page_shift) < cpu_kh->page_size;
79 1.11 thorpej vm->page_shift++)
80 1.11 thorpej /* nothing */ ;
81 1.11 thorpej if ((1 << vm->page_shift) != cpu_kh->page_size) {
82 1.11 thorpej free(vm);
83 1.11 thorpej return (-1);
84 1.11 thorpej }
85 1.1 cgd
86 1.11 thorpej kd->vmst = vm;
87 1.1 cgd return (0);
88 1.1 cgd }
89 1.1 cgd
90 1.1 cgd int
91 1.1 cgd _kvm_kvatop(kd, va, pa)
92 1.1 cgd kvm_t *kd;
93 1.1 cgd u_long va;
94 1.1 cgd u_long *pa;
95 1.1 cgd {
96 1.4 cgd cpu_kcore_hdr_t *cpu_kh;
97 1.11 thorpej struct vmstate *vm;
98 1.4 cgd int rv, page_off;
99 1.5 cgd alpha_pt_entry_t pte;
100 1.5 cgd off_t pteoff;
101 1.2 cgd
102 1.4 cgd if (ISALIVE(kd)) {
103 1.4 cgd _kvm_err(kd, 0, "vatop called in live kernel!");
104 1.4 cgd return(0);
105 1.4 cgd }
106 1.4 cgd
107 1.4 cgd cpu_kh = kd->cpu_data;
108 1.11 thorpej vm = kd->vmst;
109 1.4 cgd page_off = va & (cpu_kh->page_size - 1);
110 1.4 cgd
111 1.11 thorpej #define PAGE_SHIFT vm->page_shift
112 1.11 thorpej
113 1.4 cgd if (va >= ALPHA_K0SEG_BASE && va <= ALPHA_K0SEG_END) {
114 1.4 cgd /*
115 1.5 cgd * Direct-mapped address: just convert it.
116 1.4 cgd */
117 1.5 cgd
118 1.4 cgd *pa = ALPHA_K0SEG_TO_PHYS(va);
119 1.4 cgd rv = cpu_kh->page_size - page_off;
120 1.4 cgd } else if (va >= ALPHA_K1SEG_BASE && va <= ALPHA_K1SEG_END) {
121 1.4 cgd /*
122 1.5 cgd * Real kernel virtual address: do the translation.
123 1.4 cgd */
124 1.5 cgd
125 1.5 cgd /* Find and read the L1 PTE. */
126 1.5 cgd pteoff = cpu_kh->lev1map_pa +
127 1.10 thorpej l1pte_index(va) * sizeof(alpha_pt_entry_t);
128 1.5 cgd if (lseek(kd->pmfd, _kvm_pa2off(kd, pteoff), 0) == -1 ||
129 1.5 cgd read(kd->pmfd, (char *)&pte, sizeof(pte)) != sizeof(pte)) {
130 1.5 cgd _kvm_syserr(kd, 0, "could not read L1 PTE");
131 1.5 cgd goto lose;
132 1.5 cgd }
133 1.5 cgd
134 1.5 cgd /* Find and read the L2 PTE. */
135 1.5 cgd if ((pte & ALPHA_PTE_VALID) == 0) {
136 1.5 cgd _kvm_err(kd, 0, "invalid translation (invalid L1 PTE)");
137 1.5 cgd goto lose;
138 1.5 cgd }
139 1.5 cgd pteoff = ALPHA_PTE_TO_PFN(pte) * cpu_kh->page_size +
140 1.10 thorpej l2pte_index(va) * sizeof(alpha_pt_entry_t);
141 1.5 cgd if (lseek(kd->pmfd, _kvm_pa2off(kd, pteoff), 0) == -1 ||
142 1.5 cgd read(kd->pmfd, (char *)&pte, sizeof(pte)) != sizeof(pte)) {
143 1.5 cgd _kvm_syserr(kd, 0, "could not read L2 PTE");
144 1.5 cgd goto lose;
145 1.5 cgd }
146 1.5 cgd
147 1.5 cgd /* Find and read the L3 PTE. */
148 1.5 cgd if ((pte & ALPHA_PTE_VALID) == 0) {
149 1.5 cgd _kvm_err(kd, 0, "invalid translation (invalid L2 PTE)");
150 1.5 cgd goto lose;
151 1.5 cgd }
152 1.5 cgd pteoff = ALPHA_PTE_TO_PFN(pte) * cpu_kh->page_size +
153 1.10 thorpej l3pte_index(va) * sizeof(alpha_pt_entry_t);
154 1.5 cgd if (lseek(kd->pmfd, _kvm_pa2off(kd, pteoff), 0) == -1 ||
155 1.5 cgd read(kd->pmfd, (char *)&pte, sizeof(pte)) != sizeof(pte)) {
156 1.5 cgd _kvm_syserr(kd, 0, "could not read L3 PTE");
157 1.5 cgd goto lose;
158 1.5 cgd }
159 1.5 cgd
160 1.5 cgd /* Fill in the PA. */
161 1.5 cgd if ((pte & ALPHA_PTE_VALID) == 0) {
162 1.5 cgd _kvm_err(kd, 0, "invalid translation (invalid L3 PTE)");
163 1.5 cgd goto lose;
164 1.5 cgd }
165 1.8 ross *pa = ALPHA_PTE_TO_PFN(pte) * cpu_kh->page_size + page_off;
166 1.5 cgd rv = cpu_kh->page_size - page_off;
167 1.4 cgd } else {
168 1.4 cgd /*
169 1.5 cgd * Bogus address (not in KV space): punt.
170 1.4 cgd */
171 1.5 cgd
172 1.5 cgd _kvm_err(kd, 0, "invalid kernel virtual address");
173 1.5 cgd lose:
174 1.4 cgd *pa = -1;
175 1.4 cgd rv = 0;
176 1.4 cgd }
177 1.11 thorpej
178 1.11 thorpej #undef PAGE_SHIFT
179 1.1 cgd
180 1.4 cgd return (rv);
181 1.3 cgd }
182 1.3 cgd
183 1.3 cgd /*
184 1.3 cgd * Translate a physical address to a file-offset in the crash-dump.
185 1.3 cgd */
186 1.3 cgd off_t
187 1.3 cgd _kvm_pa2off(kd, pa)
188 1.3 cgd kvm_t *kd;
189 1.3 cgd u_long pa;
190 1.3 cgd {
191 1.9 cgd cpu_kcore_hdr_t *cpu_kh;
192 1.9 cgd phys_ram_seg_t *ramsegs;
193 1.4 cgd off_t off;
194 1.9 cgd int i;
195 1.4 cgd
196 1.4 cgd cpu_kh = kd->cpu_data;
197 1.9 cgd ramsegs = (phys_ram_seg_t *)((char *)cpu_kh + ALIGN(sizeof *cpu_kh));
198 1.4 cgd
199 1.4 cgd off = 0;
200 1.9 cgd for (i = 0; i < cpu_kh->nmemsegs; i++) {
201 1.9 cgd if (pa >= ramsegs[i].start &&
202 1.9 cgd (pa - ramsegs[i].start) < ramsegs[i].size) {
203 1.9 cgd off += (pa - ramsegs[i].start);
204 1.9 cgd break;
205 1.9 cgd }
206 1.9 cgd off += ramsegs[i].size;
207 1.9 cgd }
208 1.3 cgd
209 1.9 cgd return (kd->dump_off + off);
210 1.6 gwr }
211 1.6 gwr
212 1.6 gwr /*
213 1.6 gwr * Machine-dependent initialization for ALL open kvm descriptors,
214 1.6 gwr * not just those for a kernel crash dump. Some architectures
215 1.6 gwr * have to deal with these NOT being constants! (i.e. m68k)
216 1.6 gwr */
217 1.6 gwr int
218 1.6 gwr _kvm_mdopen(kd)
219 1.6 gwr kvm_t *kd;
220 1.6 gwr {
221 1.6 gwr
222 1.6 gwr kd->usrstack = USRSTACK;
223 1.6 gwr kd->min_uva = VM_MIN_ADDRESS;
224 1.6 gwr kd->max_uva = VM_MAXUSER_ADDRESS;
225 1.6 gwr
226 1.6 gwr return (0);
227 1.1 cgd }
228