kvm_alpha.c revision 1.18 1 1.18 mrg /* $NetBSD: kvm_alpha.c,v 1.18 2000/06/29 06:34:23 mrg Exp $ */
2 1.1 cgd
3 1.1 cgd /*
4 1.1 cgd * Copyright (c) 1994, 1995 Carnegie-Mellon University.
5 1.1 cgd * All rights reserved.
6 1.1 cgd *
7 1.1 cgd * Author: Chris G. Demetriou
8 1.15 simonb *
9 1.1 cgd * Permission to use, copy, modify and distribute this software and
10 1.1 cgd * its documentation is hereby granted, provided that both the copyright
11 1.1 cgd * notice and this permission notice appear in all copies of the
12 1.1 cgd * software, derivative works or modified versions, and any portions
13 1.1 cgd * thereof, and that both notices appear in supporting documentation.
14 1.15 simonb *
15 1.15 simonb * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
16 1.15 simonb * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
17 1.1 cgd * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
18 1.15 simonb *
19 1.1 cgd * Carnegie Mellon requests users of this software to return to
20 1.1 cgd *
21 1.1 cgd * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
22 1.1 cgd * School of Computer Science
23 1.1 cgd * Carnegie Mellon University
24 1.1 cgd * Pittsburgh PA 15213-3890
25 1.1 cgd *
26 1.1 cgd * any improvements or extensions that they make and grant Carnegie the
27 1.1 cgd * rights to redistribute these changes.
28 1.1 cgd */
29 1.1 cgd
30 1.11 thorpej #define __KVM_ALPHA_PRIVATE /* see <machine/pte.h> */
31 1.11 thorpej
32 1.1 cgd #include <sys/param.h>
33 1.1 cgd #include <sys/user.h>
34 1.1 cgd #include <sys/proc.h>
35 1.1 cgd #include <sys/stat.h>
36 1.4 cgd #include <sys/kcore.h>
37 1.4 cgd #include <machine/kcore.h>
38 1.1 cgd #include <unistd.h>
39 1.1 cgd #include <nlist.h>
40 1.1 cgd #include <kvm.h>
41 1.1 cgd
42 1.18 mrg #include <uvm/uvm_extern.h>
43 1.1 cgd
44 1.1 cgd #include <limits.h>
45 1.1 cgd #include <db.h>
46 1.7 mrg #include <stdlib.h>
47 1.1 cgd
48 1.1 cgd #include "kvm_private.h"
49 1.1 cgd
50 1.11 thorpej struct vmstate {
51 1.14 thorpej vsize_t page_shift;
52 1.11 thorpej };
53 1.11 thorpej
54 1.1 cgd void
55 1.1 cgd _kvm_freevtop(kd)
56 1.1 cgd kvm_t *kd;
57 1.1 cgd {
58 1.1 cgd
59 1.5 cgd if (kd->vmst != 0)
60 1.5 cgd free(kd->vmst);
61 1.1 cgd }
62 1.1 cgd
63 1.1 cgd int
64 1.1 cgd _kvm_initvtop(kd)
65 1.1 cgd kvm_t *kd;
66 1.1 cgd {
67 1.11 thorpej cpu_kcore_hdr_t *cpu_kh;
68 1.11 thorpej struct vmstate *vm;
69 1.11 thorpej
70 1.11 thorpej vm = (struct vmstate *)_kvm_malloc(kd, sizeof(*vm));
71 1.11 thorpej if (vm == NULL)
72 1.11 thorpej return (-1);
73 1.11 thorpej
74 1.11 thorpej cpu_kh = kd->cpu_data;
75 1.11 thorpej
76 1.11 thorpej /* Compute page_shift. */
77 1.13 thorpej for (vm->page_shift = 0; (1L << vm->page_shift) < cpu_kh->page_size;
78 1.11 thorpej vm->page_shift++)
79 1.11 thorpej /* nothing */ ;
80 1.13 thorpej if ((1L << vm->page_shift) != cpu_kh->page_size) {
81 1.11 thorpej free(vm);
82 1.11 thorpej return (-1);
83 1.11 thorpej }
84 1.1 cgd
85 1.11 thorpej kd->vmst = vm;
86 1.1 cgd return (0);
87 1.1 cgd }
88 1.1 cgd
89 1.1 cgd int
90 1.1 cgd _kvm_kvatop(kd, va, pa)
91 1.1 cgd kvm_t *kd;
92 1.1 cgd u_long va;
93 1.1 cgd u_long *pa;
94 1.1 cgd {
95 1.4 cgd cpu_kcore_hdr_t *cpu_kh;
96 1.11 thorpej struct vmstate *vm;
97 1.5 cgd alpha_pt_entry_t pte;
98 1.13 thorpej u_long pteoff, page_off;
99 1.13 thorpej int rv;
100 1.2 cgd
101 1.4 cgd if (ISALIVE(kd)) {
102 1.4 cgd _kvm_err(kd, 0, "vatop called in live kernel!");
103 1.4 cgd return(0);
104 1.4 cgd }
105 1.4 cgd
106 1.4 cgd cpu_kh = kd->cpu_data;
107 1.11 thorpej vm = kd->vmst;
108 1.4 cgd page_off = va & (cpu_kh->page_size - 1);
109 1.4 cgd
110 1.11 thorpej #define PAGE_SHIFT vm->page_shift
111 1.11 thorpej
112 1.4 cgd if (va >= ALPHA_K0SEG_BASE && va <= ALPHA_K0SEG_END) {
113 1.4 cgd /*
114 1.5 cgd * Direct-mapped address: just convert it.
115 1.4 cgd */
116 1.5 cgd
117 1.4 cgd *pa = ALPHA_K0SEG_TO_PHYS(va);
118 1.4 cgd rv = cpu_kh->page_size - page_off;
119 1.4 cgd } else if (va >= ALPHA_K1SEG_BASE && va <= ALPHA_K1SEG_END) {
120 1.4 cgd /*
121 1.5 cgd * Real kernel virtual address: do the translation.
122 1.4 cgd */
123 1.5 cgd
124 1.5 cgd /* Find and read the L1 PTE. */
125 1.5 cgd pteoff = cpu_kh->lev1map_pa +
126 1.10 thorpej l1pte_index(va) * sizeof(alpha_pt_entry_t);
127 1.12 thorpej if (pread(kd->pmfd, &pte, sizeof(pte),
128 1.12 thorpej _kvm_pa2off(kd, pteoff)) != sizeof(pte)) {
129 1.5 cgd _kvm_syserr(kd, 0, "could not read L1 PTE");
130 1.5 cgd goto lose;
131 1.5 cgd }
132 1.5 cgd
133 1.5 cgd /* Find and read the L2 PTE. */
134 1.5 cgd if ((pte & ALPHA_PTE_VALID) == 0) {
135 1.5 cgd _kvm_err(kd, 0, "invalid translation (invalid L1 PTE)");
136 1.5 cgd goto lose;
137 1.5 cgd }
138 1.5 cgd pteoff = ALPHA_PTE_TO_PFN(pte) * cpu_kh->page_size +
139 1.10 thorpej l2pte_index(va) * sizeof(alpha_pt_entry_t);
140 1.12 thorpej if (pread(kd->pmfd, &pte, sizeof(pte),
141 1.12 thorpej _kvm_pa2off(kd, pteoff)) != sizeof(pte)) {
142 1.5 cgd _kvm_syserr(kd, 0, "could not read L2 PTE");
143 1.5 cgd goto lose;
144 1.5 cgd }
145 1.5 cgd
146 1.5 cgd /* Find and read the L3 PTE. */
147 1.5 cgd if ((pte & ALPHA_PTE_VALID) == 0) {
148 1.5 cgd _kvm_err(kd, 0, "invalid translation (invalid L2 PTE)");
149 1.5 cgd goto lose;
150 1.5 cgd }
151 1.5 cgd pteoff = ALPHA_PTE_TO_PFN(pte) * cpu_kh->page_size +
152 1.10 thorpej l3pte_index(va) * sizeof(alpha_pt_entry_t);
153 1.12 thorpej if (pread(kd->pmfd, &pte, sizeof(pte),
154 1.12 thorpej _kvm_pa2off(kd, pteoff)) != sizeof(pte)) {
155 1.5 cgd _kvm_syserr(kd, 0, "could not read L3 PTE");
156 1.5 cgd goto lose;
157 1.5 cgd }
158 1.5 cgd
159 1.5 cgd /* Fill in the PA. */
160 1.5 cgd if ((pte & ALPHA_PTE_VALID) == 0) {
161 1.5 cgd _kvm_err(kd, 0, "invalid translation (invalid L3 PTE)");
162 1.5 cgd goto lose;
163 1.5 cgd }
164 1.8 ross *pa = ALPHA_PTE_TO_PFN(pte) * cpu_kh->page_size + page_off;
165 1.5 cgd rv = cpu_kh->page_size - page_off;
166 1.4 cgd } else {
167 1.4 cgd /*
168 1.5 cgd * Bogus address (not in KV space): punt.
169 1.4 cgd */
170 1.5 cgd
171 1.5 cgd _kvm_err(kd, 0, "invalid kernel virtual address");
172 1.5 cgd lose:
173 1.4 cgd *pa = -1;
174 1.4 cgd rv = 0;
175 1.4 cgd }
176 1.11 thorpej
177 1.11 thorpej #undef PAGE_SHIFT
178 1.1 cgd
179 1.4 cgd return (rv);
180 1.3 cgd }
181 1.3 cgd
182 1.3 cgd /*
183 1.3 cgd * Translate a physical address to a file-offset in the crash-dump.
184 1.3 cgd */
185 1.15 simonb off_t
186 1.3 cgd _kvm_pa2off(kd, pa)
187 1.3 cgd kvm_t *kd;
188 1.3 cgd u_long pa;
189 1.3 cgd {
190 1.9 cgd cpu_kcore_hdr_t *cpu_kh;
191 1.9 cgd phys_ram_seg_t *ramsegs;
192 1.4 cgd off_t off;
193 1.9 cgd int i;
194 1.4 cgd
195 1.4 cgd cpu_kh = kd->cpu_data;
196 1.9 cgd ramsegs = (phys_ram_seg_t *)((char *)cpu_kh + ALIGN(sizeof *cpu_kh));
197 1.4 cgd
198 1.4 cgd off = 0;
199 1.9 cgd for (i = 0; i < cpu_kh->nmemsegs; i++) {
200 1.9 cgd if (pa >= ramsegs[i].start &&
201 1.9 cgd (pa - ramsegs[i].start) < ramsegs[i].size) {
202 1.9 cgd off += (pa - ramsegs[i].start);
203 1.9 cgd break;
204 1.9 cgd }
205 1.9 cgd off += ramsegs[i].size;
206 1.9 cgd }
207 1.3 cgd
208 1.9 cgd return (kd->dump_off + off);
209 1.6 gwr }
210 1.6 gwr
211 1.6 gwr /*
212 1.6 gwr * Machine-dependent initialization for ALL open kvm descriptors,
213 1.6 gwr * not just those for a kernel crash dump. Some architectures
214 1.6 gwr * have to deal with these NOT being constants! (i.e. m68k)
215 1.6 gwr */
216 1.6 gwr int
217 1.6 gwr _kvm_mdopen(kd)
218 1.6 gwr kvm_t *kd;
219 1.6 gwr {
220 1.6 gwr
221 1.6 gwr kd->usrstack = USRSTACK;
222 1.6 gwr kd->min_uva = VM_MIN_ADDRESS;
223 1.6 gwr kd->max_uva = VM_MAXUSER_ADDRESS;
224 1.6 gwr
225 1.6 gwr return (0);
226 1.1 cgd }
227