kvm_alpha.c revision 1.20 1 1.20 matt /* $NetBSD: kvm_alpha.c,v 1.20 2001/08/05 17:51:40 matt Exp $ */
2 1.1 cgd
3 1.1 cgd /*
4 1.1 cgd * Copyright (c) 1994, 1995 Carnegie-Mellon University.
5 1.1 cgd * All rights reserved.
6 1.1 cgd *
7 1.1 cgd * Author: Chris G. Demetriou
8 1.15 simonb *
9 1.1 cgd * Permission to use, copy, modify and distribute this software and
10 1.1 cgd * its documentation is hereby granted, provided that both the copyright
11 1.1 cgd * notice and this permission notice appear in all copies of the
12 1.1 cgd * software, derivative works or modified versions, and any portions
13 1.1 cgd * thereof, and that both notices appear in supporting documentation.
14 1.15 simonb *
15 1.15 simonb * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
16 1.15 simonb * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
17 1.1 cgd * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
18 1.15 simonb *
19 1.1 cgd * Carnegie Mellon requests users of this software to return to
20 1.1 cgd *
21 1.1 cgd * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
22 1.1 cgd * School of Computer Science
23 1.1 cgd * Carnegie Mellon University
24 1.1 cgd * Pittsburgh PA 15213-3890
25 1.1 cgd *
26 1.1 cgd * any improvements or extensions that they make and grant Carnegie the
27 1.1 cgd * rights to redistribute these changes.
28 1.1 cgd */
29 1.1 cgd
30 1.11 thorpej #define __KVM_ALPHA_PRIVATE /* see <machine/pte.h> */
31 1.11 thorpej
32 1.1 cgd #include <sys/param.h>
33 1.1 cgd #include <sys/user.h>
34 1.1 cgd #include <sys/proc.h>
35 1.1 cgd #include <sys/stat.h>
36 1.4 cgd #include <sys/kcore.h>
37 1.4 cgd #include <machine/kcore.h>
38 1.1 cgd #include <unistd.h>
39 1.1 cgd #include <nlist.h>
40 1.1 cgd #include <kvm.h>
41 1.1 cgd
42 1.18 mrg #include <uvm/uvm_extern.h>
43 1.19 matt #include <machine/pmap.h>
44 1.20 matt #include <machine/vmparam.h>
45 1.1 cgd
46 1.1 cgd #include <limits.h>
47 1.1 cgd #include <db.h>
48 1.7 mrg #include <stdlib.h>
49 1.1 cgd
50 1.1 cgd #include "kvm_private.h"
51 1.1 cgd
52 1.11 thorpej struct vmstate {
53 1.14 thorpej vsize_t page_shift;
54 1.11 thorpej };
55 1.11 thorpej
56 1.1 cgd void
57 1.1 cgd _kvm_freevtop(kd)
58 1.1 cgd kvm_t *kd;
59 1.1 cgd {
60 1.1 cgd
61 1.5 cgd if (kd->vmst != 0)
62 1.5 cgd free(kd->vmst);
63 1.1 cgd }
64 1.1 cgd
65 1.1 cgd int
66 1.1 cgd _kvm_initvtop(kd)
67 1.1 cgd kvm_t *kd;
68 1.1 cgd {
69 1.11 thorpej cpu_kcore_hdr_t *cpu_kh;
70 1.11 thorpej struct vmstate *vm;
71 1.11 thorpej
72 1.11 thorpej vm = (struct vmstate *)_kvm_malloc(kd, sizeof(*vm));
73 1.11 thorpej if (vm == NULL)
74 1.11 thorpej return (-1);
75 1.11 thorpej
76 1.11 thorpej cpu_kh = kd->cpu_data;
77 1.11 thorpej
78 1.11 thorpej /* Compute page_shift. */
79 1.13 thorpej for (vm->page_shift = 0; (1L << vm->page_shift) < cpu_kh->page_size;
80 1.11 thorpej vm->page_shift++)
81 1.11 thorpej /* nothing */ ;
82 1.13 thorpej if ((1L << vm->page_shift) != cpu_kh->page_size) {
83 1.11 thorpej free(vm);
84 1.11 thorpej return (-1);
85 1.11 thorpej }
86 1.1 cgd
87 1.11 thorpej kd->vmst = vm;
88 1.1 cgd return (0);
89 1.1 cgd }
90 1.1 cgd
91 1.1 cgd int
92 1.1 cgd _kvm_kvatop(kd, va, pa)
93 1.1 cgd kvm_t *kd;
94 1.1 cgd u_long va;
95 1.1 cgd u_long *pa;
96 1.1 cgd {
97 1.4 cgd cpu_kcore_hdr_t *cpu_kh;
98 1.11 thorpej struct vmstate *vm;
99 1.5 cgd alpha_pt_entry_t pte;
100 1.13 thorpej u_long pteoff, page_off;
101 1.13 thorpej int rv;
102 1.2 cgd
103 1.4 cgd if (ISALIVE(kd)) {
104 1.4 cgd _kvm_err(kd, 0, "vatop called in live kernel!");
105 1.4 cgd return(0);
106 1.4 cgd }
107 1.4 cgd
108 1.4 cgd cpu_kh = kd->cpu_data;
109 1.11 thorpej vm = kd->vmst;
110 1.4 cgd page_off = va & (cpu_kh->page_size - 1);
111 1.4 cgd
112 1.11 thorpej #define PAGE_SHIFT vm->page_shift
113 1.11 thorpej
114 1.4 cgd if (va >= ALPHA_K0SEG_BASE && va <= ALPHA_K0SEG_END) {
115 1.4 cgd /*
116 1.5 cgd * Direct-mapped address: just convert it.
117 1.4 cgd */
118 1.5 cgd
119 1.4 cgd *pa = ALPHA_K0SEG_TO_PHYS(va);
120 1.4 cgd rv = cpu_kh->page_size - page_off;
121 1.4 cgd } else if (va >= ALPHA_K1SEG_BASE && va <= ALPHA_K1SEG_END) {
122 1.4 cgd /*
123 1.5 cgd * Real kernel virtual address: do the translation.
124 1.4 cgd */
125 1.5 cgd
126 1.5 cgd /* Find and read the L1 PTE. */
127 1.5 cgd pteoff = cpu_kh->lev1map_pa +
128 1.10 thorpej l1pte_index(va) * sizeof(alpha_pt_entry_t);
129 1.12 thorpej if (pread(kd->pmfd, &pte, sizeof(pte),
130 1.12 thorpej _kvm_pa2off(kd, pteoff)) != sizeof(pte)) {
131 1.5 cgd _kvm_syserr(kd, 0, "could not read L1 PTE");
132 1.5 cgd goto lose;
133 1.5 cgd }
134 1.5 cgd
135 1.5 cgd /* Find and read the L2 PTE. */
136 1.5 cgd if ((pte & ALPHA_PTE_VALID) == 0) {
137 1.5 cgd _kvm_err(kd, 0, "invalid translation (invalid L1 PTE)");
138 1.5 cgd goto lose;
139 1.5 cgd }
140 1.5 cgd pteoff = ALPHA_PTE_TO_PFN(pte) * cpu_kh->page_size +
141 1.10 thorpej l2pte_index(va) * sizeof(alpha_pt_entry_t);
142 1.12 thorpej if (pread(kd->pmfd, &pte, sizeof(pte),
143 1.12 thorpej _kvm_pa2off(kd, pteoff)) != sizeof(pte)) {
144 1.5 cgd _kvm_syserr(kd, 0, "could not read L2 PTE");
145 1.5 cgd goto lose;
146 1.5 cgd }
147 1.5 cgd
148 1.5 cgd /* Find and read the L3 PTE. */
149 1.5 cgd if ((pte & ALPHA_PTE_VALID) == 0) {
150 1.5 cgd _kvm_err(kd, 0, "invalid translation (invalid L2 PTE)");
151 1.5 cgd goto lose;
152 1.5 cgd }
153 1.5 cgd pteoff = ALPHA_PTE_TO_PFN(pte) * cpu_kh->page_size +
154 1.10 thorpej l3pte_index(va) * sizeof(alpha_pt_entry_t);
155 1.12 thorpej if (pread(kd->pmfd, &pte, sizeof(pte),
156 1.12 thorpej _kvm_pa2off(kd, pteoff)) != sizeof(pte)) {
157 1.5 cgd _kvm_syserr(kd, 0, "could not read L3 PTE");
158 1.5 cgd goto lose;
159 1.5 cgd }
160 1.5 cgd
161 1.5 cgd /* Fill in the PA. */
162 1.5 cgd if ((pte & ALPHA_PTE_VALID) == 0) {
163 1.5 cgd _kvm_err(kd, 0, "invalid translation (invalid L3 PTE)");
164 1.5 cgd goto lose;
165 1.5 cgd }
166 1.8 ross *pa = ALPHA_PTE_TO_PFN(pte) * cpu_kh->page_size + page_off;
167 1.5 cgd rv = cpu_kh->page_size - page_off;
168 1.4 cgd } else {
169 1.4 cgd /*
170 1.5 cgd * Bogus address (not in KV space): punt.
171 1.4 cgd */
172 1.5 cgd
173 1.5 cgd _kvm_err(kd, 0, "invalid kernel virtual address");
174 1.5 cgd lose:
175 1.4 cgd *pa = -1;
176 1.4 cgd rv = 0;
177 1.4 cgd }
178 1.11 thorpej
179 1.11 thorpej #undef PAGE_SHIFT
180 1.1 cgd
181 1.4 cgd return (rv);
182 1.3 cgd }
183 1.3 cgd
184 1.3 cgd /*
185 1.3 cgd * Translate a physical address to a file-offset in the crash-dump.
186 1.3 cgd */
187 1.15 simonb off_t
188 1.3 cgd _kvm_pa2off(kd, pa)
189 1.3 cgd kvm_t *kd;
190 1.3 cgd u_long pa;
191 1.3 cgd {
192 1.9 cgd cpu_kcore_hdr_t *cpu_kh;
193 1.9 cgd phys_ram_seg_t *ramsegs;
194 1.4 cgd off_t off;
195 1.9 cgd int i;
196 1.4 cgd
197 1.4 cgd cpu_kh = kd->cpu_data;
198 1.9 cgd ramsegs = (phys_ram_seg_t *)((char *)cpu_kh + ALIGN(sizeof *cpu_kh));
199 1.4 cgd
200 1.4 cgd off = 0;
201 1.9 cgd for (i = 0; i < cpu_kh->nmemsegs; i++) {
202 1.9 cgd if (pa >= ramsegs[i].start &&
203 1.9 cgd (pa - ramsegs[i].start) < ramsegs[i].size) {
204 1.9 cgd off += (pa - ramsegs[i].start);
205 1.9 cgd break;
206 1.9 cgd }
207 1.9 cgd off += ramsegs[i].size;
208 1.9 cgd }
209 1.3 cgd
210 1.9 cgd return (kd->dump_off + off);
211 1.6 gwr }
212 1.6 gwr
213 1.6 gwr /*
214 1.6 gwr * Machine-dependent initialization for ALL open kvm descriptors,
215 1.6 gwr * not just those for a kernel crash dump. Some architectures
216 1.6 gwr * have to deal with these NOT being constants! (i.e. m68k)
217 1.6 gwr */
218 1.6 gwr int
219 1.6 gwr _kvm_mdopen(kd)
220 1.6 gwr kvm_t *kd;
221 1.6 gwr {
222 1.6 gwr
223 1.6 gwr kd->usrstack = USRSTACK;
224 1.6 gwr kd->min_uva = VM_MIN_ADDRESS;
225 1.6 gwr kd->max_uva = VM_MAXUSER_ADDRESS;
226 1.6 gwr
227 1.6 gwr return (0);
228 1.1 cgd }
229