kvm.c revision 1.71 1 /* $NetBSD: kvm.c,v 1.71 2002/05/26 18:33:25 wiz Exp $ */
2
3 /*-
4 * Copyright (c) 1989, 1992, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software developed by the Computer Systems
8 * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
9 * BG 91-66 and contributed to Berkeley.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 */
39
40 #include <sys/cdefs.h>
41 #if defined(LIBC_SCCS) && !defined(lint)
42 #if 0
43 static char sccsid[] = "@(#)kvm.c 8.2 (Berkeley) 2/13/94";
44 #else
45 __RCSID("$NetBSD: kvm.c,v 1.71 2002/05/26 18:33:25 wiz Exp $");
46 #endif
47 #endif /* LIBC_SCCS and not lint */
48
49 #include <sys/param.h>
50 #include <sys/user.h>
51 #include <sys/proc.h>
52 #include <sys/ioctl.h>
53 #include <sys/stat.h>
54 #include <sys/sysctl.h>
55
56 #include <sys/core.h>
57 #include <sys/exec_aout.h>
58 #include <sys/kcore.h>
59
60 #include <uvm/uvm_extern.h>
61
62 #include <ctype.h>
63 #include <db.h>
64 #include <fcntl.h>
65 #include <limits.h>
66 #include <nlist.h>
67 #include <paths.h>
68 #include <stdarg.h>
69 #include <stdio.h>
70 #include <stdlib.h>
71 #include <string.h>
72 #include <unistd.h>
73 #include <kvm.h>
74
75 #include "kvm_private.h"
76
77 static int kvm_dbopen __P((kvm_t *));
78 static int _kvm_get_header __P((kvm_t *));
79 static kvm_t *_kvm_open __P((kvm_t *, const char *, const char *,
80 const char *, int, char *));
81 static int clear_gap __P((kvm_t *, FILE *, int));
82 static off_t Lseek __P((kvm_t *, int, off_t, int));
83 static ssize_t Pread __P((kvm_t *, int, void *, size_t, off_t));
84
85 char *
86 kvm_geterr(kd)
87 kvm_t *kd;
88 {
89 return (kd->errbuf);
90 }
91
92 /*
93 * Report an error using printf style arguments. "program" is kd->program
94 * on hard errors, and 0 on soft errors, so that under sun error emulation,
95 * only hard errors are printed out (otherwise, programs like gdb will
96 * generate tons of error messages when trying to access bogus pointers).
97 */
98 void
99 _kvm_err(kvm_t *kd, const char *program, const char *fmt, ...)
100 {
101 va_list ap;
102
103 va_start(ap, fmt);
104 if (program != NULL) {
105 (void)fprintf(stderr, "%s: ", program);
106 (void)vfprintf(stderr, fmt, ap);
107 (void)fputc('\n', stderr);
108 } else
109 (void)vsnprintf(kd->errbuf,
110 sizeof(kd->errbuf), fmt, ap);
111
112 va_end(ap);
113 }
114
115 void
116 _kvm_syserr(kvm_t *kd, const char *program, const char *fmt, ...)
117 {
118 va_list ap;
119 size_t n;
120
121 va_start(ap, fmt);
122 if (program != NULL) {
123 (void)fprintf(stderr, "%s: ", program);
124 (void)vfprintf(stderr, fmt, ap);
125 (void)fprintf(stderr, ": %s\n", strerror(errno));
126 } else {
127 char *cp = kd->errbuf;
128
129 (void)vsnprintf(cp, sizeof(kd->errbuf), fmt, ap);
130 n = strlen(cp);
131 (void)snprintf(&cp[n], sizeof(kd->errbuf) - n, ": %s",
132 strerror(errno));
133 }
134 va_end(ap);
135 }
136
137 void *
138 _kvm_malloc(kd, n)
139 kvm_t *kd;
140 size_t n;
141 {
142 void *p;
143
144 if ((p = malloc(n)) == NULL)
145 _kvm_err(kd, kd->program, "%s", strerror(errno));
146 return (p);
147 }
148
149 /*
150 * Wrapper around the lseek(2) system call; calls _kvm_syserr() for us
151 * in the event of emergency.
152 */
153 static off_t
154 Lseek(kd, fd, offset, whence)
155 kvm_t *kd;
156 int fd;
157 off_t offset;
158 int whence;
159 {
160 off_t off;
161
162 errno = 0;
163
164 if ((off = lseek(fd, offset, whence)) == -1 && errno != 0) {
165 _kvm_syserr(kd, kd->program, "Lseek");
166 return ((off_t)-1);
167 }
168 return (off);
169 }
170
171 /*
172 * Wrapper around the pread(2) system call; calls _kvm_syserr() for us
173 * in the event of emergency.
174 */
175 static ssize_t
176 Pread(kd, fd, buf, nbytes, offset)
177 kvm_t *kd;
178 int fd;
179 void *buf;
180 size_t nbytes;
181 off_t offset;
182 {
183 ssize_t rv;
184
185 errno = 0;
186
187 if ((rv = pread(fd, buf, nbytes, offset)) != nbytes &&
188 errno != 0)
189 _kvm_syserr(kd, kd->program, "Pread");
190 return (rv);
191 }
192
193 static kvm_t *
194 _kvm_open(kd, uf, mf, sf, flag, errout)
195 kvm_t *kd;
196 const char *uf;
197 const char *mf;
198 const char *sf;
199 int flag;
200 char *errout;
201 {
202 struct stat st;
203 int ufgiven;
204
205 kd->db = 0;
206 kd->pmfd = -1;
207 kd->vmfd = -1;
208 kd->swfd = -1;
209 kd->nlfd = -1;
210 kd->alive = KVM_ALIVE_DEAD;
211 kd->procbase = 0;
212 kd->procbase2 = 0;
213 kd->nbpg = getpagesize();
214 kd->swapspc = 0;
215 kd->argspc = 0;
216 kd->arglen = 0;
217 kd->argbuf = 0;
218 kd->argv = 0;
219 kd->vmst = 0;
220 kd->vm_page_buckets = 0;
221 kd->kcore_hdr = 0;
222 kd->cpu_dsize = 0;
223 kd->cpu_data = 0;
224 kd->dump_off = 0;
225
226 if (flag & KVM_NO_FILES) {
227 kd->alive = KVM_ALIVE_SYSCTL;
228 return(kd);
229 }
230
231 /*
232 * Call the MD open hook. This sets:
233 * usrstack, min_uva, max_uva
234 */
235 if (_kvm_mdopen(kd)) {
236 _kvm_err(kd, kd->program, "md init failed");
237 goto failed;
238 }
239
240 ufgiven = (uf != NULL);
241 if (!ufgiven)
242 uf = _PATH_UNIX;
243 else if (strlen(uf) >= MAXPATHLEN) {
244 _kvm_err(kd, kd->program, "exec file name too long");
245 goto failed;
246 }
247 if (flag & ~O_RDWR) {
248 _kvm_err(kd, kd->program, "bad flags arg");
249 goto failed;
250 }
251 if (mf == 0)
252 mf = _PATH_MEM;
253 if (sf == 0)
254 sf = _PATH_DRUM;
255
256 if ((kd->pmfd = open(mf, flag, 0)) < 0) {
257 _kvm_syserr(kd, kd->program, "%s", mf);
258 goto failed;
259 }
260 if (fstat(kd->pmfd, &st) < 0) {
261 _kvm_syserr(kd, kd->program, "%s", mf);
262 goto failed;
263 }
264 if (S_ISCHR(st.st_mode)) {
265 /*
266 * If this is a character special device, then check that
267 * it's /dev/mem. If so, open kmem too. (Maybe we should
268 * make it work for either /dev/mem or /dev/kmem -- in either
269 * case you're working with a live kernel.)
270 */
271 if (strcmp(mf, _PATH_MEM) != 0) { /* XXX */
272 _kvm_err(kd, kd->program,
273 "%s: not physical memory device", mf);
274 goto failed;
275 }
276 if ((kd->vmfd = open(_PATH_KMEM, flag)) < 0) {
277 _kvm_syserr(kd, kd->program, "%s", _PATH_KMEM);
278 goto failed;
279 }
280 kd->alive = KVM_ALIVE_FILES;
281 if ((kd->swfd = open(sf, flag, 0)) < 0) {
282 _kvm_syserr(kd, kd->program, "%s", sf);
283 goto failed;
284 }
285 /*
286 * Open kvm nlist database. We only try to use
287 * the pre-built database if the namelist file name
288 * pointer is NULL. If the database cannot or should
289 * not be opened, open the namelist argument so we
290 * revert to slow nlist() calls.
291 */
292 if ((ufgiven || kvm_dbopen(kd) < 0) &&
293 (kd->nlfd = open(uf, O_RDONLY, 0)) < 0) {
294 _kvm_syserr(kd, kd->program, "%s", uf);
295 goto failed;
296 }
297 } else {
298 /*
299 * This is a crash dump.
300 * Initialize the virtual address translation machinery,
301 * but first setup the namelist fd.
302 */
303 if ((kd->nlfd = open(uf, O_RDONLY, 0)) < 0) {
304 _kvm_syserr(kd, kd->program, "%s", uf);
305 goto failed;
306 }
307
308 /*
309 * If there is no valid core header, fail silently here.
310 * The address translations however will fail without
311 * header. Things can be made to run by calling
312 * kvm_dump_mkheader() before doing any translation.
313 */
314 if (_kvm_get_header(kd) == 0) {
315 if (_kvm_initvtop(kd) < 0)
316 goto failed;
317 }
318 }
319 return (kd);
320 failed:
321 /*
322 * Copy out the error if doing sane error semantics.
323 */
324 if (errout != 0)
325 (void)strncpy(errout, kd->errbuf, _POSIX2_LINE_MAX - 1);
326 (void)kvm_close(kd);
327 return (0);
328 }
329
330 /*
331 * The kernel dump file (from savecore) contains:
332 * kcore_hdr_t kcore_hdr;
333 * kcore_seg_t cpu_hdr;
334 * (opaque) cpu_data; (size is cpu_hdr.c_size)
335 * kcore_seg_t mem_hdr;
336 * (memory) mem_data; (size is mem_hdr.c_size)
337 *
338 * Note: khdr is padded to khdr.c_hdrsize;
339 * cpu_hdr and mem_hdr are padded to khdr.c_seghdrsize
340 */
341 static int
342 _kvm_get_header(kd)
343 kvm_t *kd;
344 {
345 kcore_hdr_t kcore_hdr;
346 kcore_seg_t cpu_hdr;
347 kcore_seg_t mem_hdr;
348 size_t offset;
349 ssize_t sz;
350
351 /*
352 * Read the kcore_hdr_t
353 */
354 sz = Pread(kd, kd->pmfd, &kcore_hdr, sizeof(kcore_hdr), (off_t)0);
355 if (sz != sizeof(kcore_hdr))
356 return (-1);
357
358 /*
359 * Currently, we only support dump-files made by the current
360 * architecture...
361 */
362 if ((CORE_GETMAGIC(kcore_hdr) != KCORE_MAGIC) ||
363 (CORE_GETMID(kcore_hdr) != MID_MACHINE))
364 return (-1);
365
366 /*
367 * Currently, we only support exactly 2 segments: cpu-segment
368 * and data-segment in exactly that order.
369 */
370 if (kcore_hdr.c_nseg != 2)
371 return (-1);
372
373 /*
374 * Save away the kcore_hdr. All errors after this
375 * should do a to "goto fail" to deallocate things.
376 */
377 kd->kcore_hdr = _kvm_malloc(kd, sizeof(kcore_hdr));
378 memcpy(kd->kcore_hdr, &kcore_hdr, sizeof(kcore_hdr));
379 offset = kcore_hdr.c_hdrsize;
380
381 /*
382 * Read the CPU segment header
383 */
384 sz = Pread(kd, kd->pmfd, &cpu_hdr, sizeof(cpu_hdr), (off_t)offset);
385 if (sz != sizeof(cpu_hdr))
386 goto fail;
387 if ((CORE_GETMAGIC(cpu_hdr) != KCORESEG_MAGIC) ||
388 (CORE_GETFLAG(cpu_hdr) != CORE_CPU))
389 goto fail;
390 offset += kcore_hdr.c_seghdrsize;
391
392 /*
393 * Read the CPU segment DATA.
394 */
395 kd->cpu_dsize = cpu_hdr.c_size;
396 kd->cpu_data = _kvm_malloc(kd, cpu_hdr.c_size);
397 if (kd->cpu_data == NULL)
398 goto fail;
399 sz = Pread(kd, kd->pmfd, kd->cpu_data, cpu_hdr.c_size, (off_t)offset);
400 if (sz != cpu_hdr.c_size)
401 goto fail;
402 offset += cpu_hdr.c_size;
403
404 /*
405 * Read the next segment header: data segment
406 */
407 sz = Pread(kd, kd->pmfd, &mem_hdr, sizeof(mem_hdr), (off_t)offset);
408 if (sz != sizeof(mem_hdr))
409 goto fail;
410 offset += kcore_hdr.c_seghdrsize;
411
412 if ((CORE_GETMAGIC(mem_hdr) != KCORESEG_MAGIC) ||
413 (CORE_GETFLAG(mem_hdr) != CORE_DATA))
414 goto fail;
415
416 kd->dump_off = offset;
417 return (0);
418
419 fail:
420 if (kd->kcore_hdr != NULL) {
421 free(kd->kcore_hdr);
422 kd->kcore_hdr = NULL;
423 }
424 if (kd->cpu_data != NULL) {
425 free(kd->cpu_data);
426 kd->cpu_data = NULL;
427 kd->cpu_dsize = 0;
428 }
429 return (-1);
430 }
431
432 /*
433 * The format while on the dump device is: (new format)
434 * kcore_seg_t cpu_hdr;
435 * (opaque) cpu_data; (size is cpu_hdr.c_size)
436 * kcore_seg_t mem_hdr;
437 * (memory) mem_data; (size is mem_hdr.c_size)
438 */
439 int
440 kvm_dump_mkheader(kd, dump_off)
441 kvm_t *kd;
442 off_t dump_off;
443 {
444 kcore_seg_t cpu_hdr;
445 size_t hdr_size;
446 ssize_t sz;
447
448 if (kd->kcore_hdr != NULL) {
449 _kvm_err(kd, kd->program, "already has a dump header");
450 return (-1);
451 }
452 if (ISALIVE(kd)) {
453 _kvm_err(kd, kd->program, "don't use on live kernel");
454 return (-1);
455 }
456
457 /*
458 * Validate new format crash dump
459 */
460 sz = Pread(kd, kd->pmfd, &cpu_hdr, sizeof(cpu_hdr), dump_off);
461 if (sz != sizeof(cpu_hdr))
462 return (-1);
463 if ((CORE_GETMAGIC(cpu_hdr) != KCORE_MAGIC)
464 || (CORE_GETMID(cpu_hdr) != MID_MACHINE)) {
465 _kvm_err(kd, 0, "invalid magic in cpu_hdr");
466 return (0);
467 }
468 hdr_size = ALIGN(sizeof(cpu_hdr));
469
470 /*
471 * Read the CPU segment.
472 */
473 kd->cpu_dsize = cpu_hdr.c_size;
474 kd->cpu_data = _kvm_malloc(kd, kd->cpu_dsize);
475 if (kd->cpu_data == NULL)
476 goto fail;
477 sz = Pread(kd, kd->pmfd, kd->cpu_data, cpu_hdr.c_size,
478 dump_off + hdr_size);
479 if (sz != cpu_hdr.c_size)
480 goto fail;
481 hdr_size += kd->cpu_dsize;
482
483 /*
484 * Leave phys mem pointer at beginning of memory data
485 */
486 kd->dump_off = dump_off + hdr_size;
487 if (Lseek(kd, kd->pmfd, kd->dump_off, SEEK_SET) == -1)
488 goto fail;
489
490 /*
491 * Create a kcore_hdr.
492 */
493 kd->kcore_hdr = _kvm_malloc(kd, sizeof(kcore_hdr_t));
494 if (kd->kcore_hdr == NULL)
495 goto fail;
496
497 kd->kcore_hdr->c_hdrsize = ALIGN(sizeof(kcore_hdr_t));
498 kd->kcore_hdr->c_seghdrsize = ALIGN(sizeof(kcore_seg_t));
499 kd->kcore_hdr->c_nseg = 2;
500 CORE_SETMAGIC(*(kd->kcore_hdr), KCORE_MAGIC, MID_MACHINE,0);
501
502 /*
503 * Now that we have a valid header, enable translations.
504 */
505 if (_kvm_initvtop(kd) == 0)
506 /* Success */
507 return (hdr_size);
508
509 fail:
510 if (kd->kcore_hdr != NULL) {
511 free(kd->kcore_hdr);
512 kd->kcore_hdr = NULL;
513 }
514 if (kd->cpu_data != NULL) {
515 free(kd->cpu_data);
516 kd->cpu_data = NULL;
517 kd->cpu_dsize = 0;
518 }
519 return (-1);
520 }
521
522 static int
523 clear_gap(kd, fp, size)
524 kvm_t *kd;
525 FILE *fp;
526 int size;
527 {
528 if (size <= 0) /* XXX - < 0 should never happen */
529 return (0);
530 while (size-- > 0) {
531 if (fputc(0, fp) == EOF) {
532 _kvm_syserr(kd, kd->program, "clear_gap");
533 return (-1);
534 }
535 }
536 return (0);
537 }
538
539 /*
540 * Write the dump header info to 'fp'. Note that we can't use fseek(3) here
541 * because 'fp' might be a file pointer obtained by zopen().
542 */
543 int
544 kvm_dump_wrtheader(kd, fp, dumpsize)
545 kvm_t *kd;
546 FILE *fp;
547 int dumpsize;
548 {
549 kcore_seg_t seghdr;
550 long offset;
551 int gap;
552
553 if (kd->kcore_hdr == NULL || kd->cpu_data == NULL) {
554 _kvm_err(kd, kd->program, "no valid dump header(s)");
555 return (-1);
556 }
557
558 /*
559 * Write the generic header
560 */
561 offset = 0;
562 if (fwrite((void*)kd->kcore_hdr, sizeof(kcore_hdr_t), 1, fp) == 0) {
563 _kvm_syserr(kd, kd->program, "kvm_dump_wrtheader");
564 return (-1);
565 }
566 offset += kd->kcore_hdr->c_hdrsize;
567 gap = kd->kcore_hdr->c_hdrsize - sizeof(kcore_hdr_t);
568 if (clear_gap(kd, fp, gap) == -1)
569 return (-1);
570
571 /*
572 * Write the cpu header
573 */
574 CORE_SETMAGIC(seghdr, KCORESEG_MAGIC, 0, CORE_CPU);
575 seghdr.c_size = ALIGN(kd->cpu_dsize);
576 if (fwrite((void*)&seghdr, sizeof(seghdr), 1, fp) == 0) {
577 _kvm_syserr(kd, kd->program, "kvm_dump_wrtheader");
578 return (-1);
579 }
580 offset += kd->kcore_hdr->c_seghdrsize;
581 gap = kd->kcore_hdr->c_seghdrsize - sizeof(seghdr);
582 if (clear_gap(kd, fp, gap) == -1)
583 return (-1);
584
585 if (fwrite((void*)kd->cpu_data, kd->cpu_dsize, 1, fp) == 0) {
586 _kvm_syserr(kd, kd->program, "kvm_dump_wrtheader");
587 return (-1);
588 }
589 offset += seghdr.c_size;
590 gap = seghdr.c_size - kd->cpu_dsize;
591 if (clear_gap(kd, fp, gap) == -1)
592 return (-1);
593
594 /*
595 * Write the actual dump data segment header
596 */
597 CORE_SETMAGIC(seghdr, KCORESEG_MAGIC, 0, CORE_DATA);
598 seghdr.c_size = dumpsize;
599 if (fwrite((void*)&seghdr, sizeof(seghdr), 1, fp) == 0) {
600 _kvm_syserr(kd, kd->program, "kvm_dump_wrtheader");
601 return (-1);
602 }
603 offset += kd->kcore_hdr->c_seghdrsize;
604 gap = kd->kcore_hdr->c_seghdrsize - sizeof(seghdr);
605 if (clear_gap(kd, fp, gap) == -1)
606 return (-1);
607
608 return (int)offset;
609 }
610
611 kvm_t *
612 kvm_openfiles(uf, mf, sf, flag, errout)
613 const char *uf;
614 const char *mf;
615 const char *sf;
616 int flag;
617 char *errout;
618 {
619 kvm_t *kd;
620
621 if ((kd = malloc(sizeof(*kd))) == NULL) {
622 (void)strncpy(errout, strerror(errno), _POSIX2_LINE_MAX - 1);
623 return (0);
624 }
625 kd->program = 0;
626 return (_kvm_open(kd, uf, mf, sf, flag, errout));
627 }
628
629 kvm_t *
630 kvm_open(uf, mf, sf, flag, program)
631 const char *uf;
632 const char *mf;
633 const char *sf;
634 int flag;
635 const char *program;
636 {
637 kvm_t *kd;
638
639 if ((kd = malloc(sizeof(*kd))) == NULL && program != NULL) {
640 (void)fprintf(stderr, "%s: %s\n", program, strerror(errno));
641 return (0);
642 }
643 kd->program = program;
644 return (_kvm_open(kd, uf, mf, sf, flag, NULL));
645 }
646
647 int
648 kvm_close(kd)
649 kvm_t *kd;
650 {
651 int error = 0;
652
653 if (kd->pmfd >= 0)
654 error |= close(kd->pmfd);
655 if (kd->vmfd >= 0)
656 error |= close(kd->vmfd);
657 if (kd->nlfd >= 0)
658 error |= close(kd->nlfd);
659 if (kd->swfd >= 0)
660 error |= close(kd->swfd);
661 if (kd->db != 0)
662 error |= (kd->db->close)(kd->db);
663 if (kd->vmst)
664 _kvm_freevtop(kd);
665 kd->cpu_dsize = 0;
666 if (kd->cpu_data != NULL)
667 free((void *)kd->cpu_data);
668 if (kd->kcore_hdr != NULL)
669 free((void *)kd->kcore_hdr);
670 if (kd->procbase != 0)
671 free((void *)kd->procbase);
672 if (kd->procbase2 != 0)
673 free((void *)kd->procbase2);
674 if (kd->swapspc != 0)
675 free((void *)kd->swapspc);
676 if (kd->argspc != 0)
677 free((void *)kd->argspc);
678 if (kd->argbuf != 0)
679 free((void *)kd->argbuf);
680 if (kd->argv != 0)
681 free((void *)kd->argv);
682 free((void *)kd);
683
684 return (0);
685 }
686
687 /*
688 * Set up state necessary to do queries on the kernel namelist
689 * data base. If the data base is out-of-data/incompatible with
690 * given executable, set up things so we revert to standard nlist call.
691 * Only called for live kernels. Return 0 on success, -1 on failure.
692 */
693 static int
694 kvm_dbopen(kd)
695 kvm_t *kd;
696 {
697 DBT rec;
698 size_t dbversionlen;
699 struct nlist nitem;
700 char dbversion[_POSIX2_LINE_MAX];
701 char kversion[_POSIX2_LINE_MAX];
702
703 kd->db = dbopen(_PATH_KVMDB, O_RDONLY, 0, DB_HASH, NULL);
704 if (kd->db == 0)
705 return (-1);
706 /*
707 * read version out of database
708 */
709 rec.data = VRS_KEY;
710 rec.size = sizeof(VRS_KEY) - 1;
711 if ((kd->db->get)(kd->db, (DBT *)&rec, (DBT *)&rec, 0))
712 goto close;
713 if (rec.data == 0 || rec.size > sizeof(dbversion))
714 goto close;
715
716 memcpy(dbversion, rec.data, rec.size);
717 dbversionlen = rec.size;
718 /*
719 * Read version string from kernel memory.
720 * Since we are dealing with a live kernel, we can call kvm_read()
721 * at this point.
722 */
723 rec.data = VRS_SYM;
724 rec.size = sizeof(VRS_SYM) - 1;
725 if ((kd->db->get)(kd->db, (DBT *)&rec, (DBT *)&rec, 0))
726 goto close;
727 if (rec.data == 0 || rec.size != sizeof(struct nlist))
728 goto close;
729 memcpy(&nitem, rec.data, sizeof(nitem));
730 if (kvm_read(kd, (u_long)nitem.n_value, kversion, dbversionlen) !=
731 dbversionlen)
732 goto close;
733 /*
734 * If they match, we win - otherwise clear out kd->db so
735 * we revert to slow nlist().
736 */
737 if (memcmp(dbversion, kversion, dbversionlen) == 0)
738 return (0);
739 close:
740 (void)(kd->db->close)(kd->db);
741 kd->db = 0;
742
743 return (-1);
744 }
745
746 int
747 kvm_nlist(kd, nl)
748 kvm_t *kd;
749 struct nlist *nl;
750 {
751 struct nlist *p;
752 int nvalid, rv;
753
754 /*
755 * If we can't use the data base, revert to the
756 * slow library call.
757 */
758 if (kd->db == 0) {
759 rv = __fdnlist(kd->nlfd, nl);
760 if (rv == -1)
761 _kvm_err(kd, 0, "bad namelist");
762 return (rv);
763 }
764
765 /*
766 * We can use the kvm data base. Go through each nlist entry
767 * and look it up with a db query.
768 */
769 nvalid = 0;
770 for (p = nl; p->n_name && p->n_name[0]; ++p) {
771 int len;
772 DBT rec;
773
774 if ((len = strlen(p->n_name)) > 4096) {
775 /* sanity */
776 _kvm_err(kd, kd->program, "symbol too large");
777 return (-1);
778 }
779 rec.data = (char *)p->n_name;
780 rec.size = len;
781
782 /*
783 * Make sure that n_value = 0 when the symbol isn't found
784 */
785 p->n_value = 0;
786
787 if ((kd->db->get)(kd->db, (DBT *)&rec, (DBT *)&rec, 0))
788 continue;
789 if (rec.data == 0 || rec.size != sizeof(struct nlist))
790 continue;
791 ++nvalid;
792 /*
793 * Avoid alignment issues.
794 */
795 (void)memcpy(&p->n_type, &((struct nlist *)rec.data)->n_type,
796 sizeof(p->n_type));
797 (void)memcpy(&p->n_value, &((struct nlist *)rec.data)->n_value,
798 sizeof(p->n_value));
799 }
800 /*
801 * Return the number of entries that weren't found.
802 */
803 return ((p - nl) - nvalid);
804 }
805
806 int kvm_dump_inval(kd)
807 kvm_t *kd;
808 {
809 struct nlist nl[2];
810 u_long pa, val;
811
812 if (ISALIVE(kd)) {
813 _kvm_err(kd, kd->program, "clearing dump on live kernel");
814 return (-1);
815 }
816 nl[0].n_name = "_dumpmag";
817 nl[1].n_name = NULL;
818
819 if (kvm_nlist(kd, nl) == -1) {
820 _kvm_err(kd, 0, "bad namelist");
821 return (-1);
822 }
823 if (_kvm_kvatop(kd, (u_long)nl[0].n_value, &pa) == 0)
824 return (-1);
825
826 errno = 0;
827 val = 0;
828 if (pwrite(kd->pmfd, (void *) &val, sizeof(val),
829 _kvm_pa2off(kd, pa)) == -1) {
830 _kvm_syserr(kd, 0, "cannot invalidate dump - pwrite");
831 return (-1);
832 }
833 return (0);
834 }
835
836 ssize_t
837 kvm_read(kd, kva, buf, len)
838 kvm_t *kd;
839 u_long kva;
840 void *buf;
841 size_t len;
842 {
843 int cc;
844 void *cp;
845
846 if (ISKMEM(kd)) {
847 /*
848 * We're using /dev/kmem. Just read straight from the
849 * device and let the active kernel do the address translation.
850 */
851 errno = 0;
852 cc = pread(kd->vmfd, buf, len, (off_t)kva);
853 if (cc < 0) {
854 _kvm_syserr(kd, 0, "kvm_read");
855 return (-1);
856 } else if (cc < len)
857 _kvm_err(kd, kd->program, "short read");
858 return (cc);
859 } else if (ISSYSCTL(kd)) {
860 _kvm_err(kd, kd->program, "kvm_open called with KVM_NO_FILES, "
861 "can't use kvm_read");
862 return (-1);
863 } else {
864 if ((kd->kcore_hdr == NULL) || (kd->cpu_data == NULL)) {
865 _kvm_err(kd, kd->program, "no valid dump header");
866 return (-1);
867 }
868 cp = buf;
869 while (len > 0) {
870 u_long pa;
871 off_t foff;
872
873 cc = _kvm_kvatop(kd, kva, &pa);
874 if (cc == 0)
875 return (-1);
876 if (cc > len)
877 cc = len;
878 foff = _kvm_pa2off(kd, pa);
879 errno = 0;
880 cc = pread(kd->pmfd, cp, (size_t)cc, foff);
881 if (cc < 0) {
882 _kvm_syserr(kd, kd->program, "kvm_read");
883 break;
884 }
885 /*
886 * If kvm_kvatop returns a bogus value or our core
887 * file is truncated, we might wind up seeking beyond
888 * the end of the core file in which case the read will
889 * return 0 (EOF).
890 */
891 if (cc == 0)
892 break;
893 cp = (char *)cp + cc;
894 kva += cc;
895 len -= cc;
896 }
897 return ((char *)cp - (char *)buf);
898 }
899 /* NOTREACHED */
900 }
901
902 ssize_t
903 kvm_write(kd, kva, buf, len)
904 kvm_t *kd;
905 u_long kva;
906 const void *buf;
907 size_t len;
908 {
909 int cc;
910
911 if (ISKMEM(kd)) {
912 /*
913 * Just like kvm_read, only we write.
914 */
915 errno = 0;
916 cc = pwrite(kd->vmfd, buf, len, (off_t)kva);
917 if (cc < 0) {
918 _kvm_syserr(kd, 0, "kvm_write");
919 return (-1);
920 } else if (cc < len)
921 _kvm_err(kd, kd->program, "short write");
922 return (cc);
923 } else if (ISSYSCTL(kd)) {
924 _kvm_err(kd, kd->program, "kvm_open called with KVM_NO_FILES, "
925 "can't use kvm_write");
926 return (-1);
927 } else {
928 _kvm_err(kd, kd->program,
929 "kvm_write not implemented for dead kernels");
930 return (-1);
931 }
932 /* NOTREACHED */
933 }
934