kvm.c revision 1.79 1 /* $NetBSD: kvm.c,v 1.79 2003/01/18 10:40:41 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1989, 1992, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software developed by the Computer Systems
8 * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
9 * BG 91-66 and contributed to Berkeley.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 */
39
40 #include <sys/cdefs.h>
41 #if defined(LIBC_SCCS) && !defined(lint)
42 #if 0
43 static char sccsid[] = "@(#)kvm.c 8.2 (Berkeley) 2/13/94";
44 #else
45 __RCSID("$NetBSD: kvm.c,v 1.79 2003/01/18 10:40:41 thorpej Exp $");
46 #endif
47 #endif /* LIBC_SCCS and not lint */
48
49 #include <sys/param.h>
50 #include <sys/user.h>
51 #include <sys/lwp.h>
52 #include <sys/proc.h>
53 #include <sys/ioctl.h>
54 #include <sys/stat.h>
55 #include <sys/sysctl.h>
56
57 #include <sys/core.h>
58 #include <sys/exec_aout.h>
59 #include <sys/kcore.h>
60
61 #include <uvm/uvm_extern.h>
62
63 #include <machine/cpu.h>
64
65 #include <ctype.h>
66 #include <db.h>
67 #include <fcntl.h>
68 #include <limits.h>
69 #include <nlist.h>
70 #include <paths.h>
71 #include <stdarg.h>
72 #include <stdio.h>
73 #include <stdlib.h>
74 #include <string.h>
75 #include <unistd.h>
76 #include <kvm.h>
77
78 #include "kvm_private.h"
79
80 static int kvm_dbopen __P((kvm_t *));
81 static int _kvm_get_header __P((kvm_t *));
82 static kvm_t *_kvm_open __P((kvm_t *, const char *, const char *,
83 const char *, int, char *));
84 static int clear_gap __P((kvm_t *, FILE *, int));
85 static int open_cloexec __P((const char *, int, int));
86 static off_t Lseek __P((kvm_t *, int, off_t, int));
87 static ssize_t Pread __P((kvm_t *, int, void *, size_t, off_t));
88
89 char *
90 kvm_geterr(kd)
91 kvm_t *kd;
92 {
93 return (kd->errbuf);
94 }
95
96 /*
97 * Report an error using printf style arguments. "program" is kd->program
98 * on hard errors, and 0 on soft errors, so that under sun error emulation,
99 * only hard errors are printed out (otherwise, programs like gdb will
100 * generate tons of error messages when trying to access bogus pointers).
101 */
102 void
103 _kvm_err(kvm_t *kd, const char *program, const char *fmt, ...)
104 {
105 va_list ap;
106
107 va_start(ap, fmt);
108 if (program != NULL) {
109 (void)fprintf(stderr, "%s: ", program);
110 (void)vfprintf(stderr, fmt, ap);
111 (void)fputc('\n', stderr);
112 } else
113 (void)vsnprintf(kd->errbuf,
114 sizeof(kd->errbuf), fmt, ap);
115
116 va_end(ap);
117 }
118
119 void
120 _kvm_syserr(kvm_t *kd, const char *program, const char *fmt, ...)
121 {
122 va_list ap;
123 size_t n;
124
125 va_start(ap, fmt);
126 if (program != NULL) {
127 (void)fprintf(stderr, "%s: ", program);
128 (void)vfprintf(stderr, fmt, ap);
129 (void)fprintf(stderr, ": %s\n", strerror(errno));
130 } else {
131 char *cp = kd->errbuf;
132
133 (void)vsnprintf(cp, sizeof(kd->errbuf), fmt, ap);
134 n = strlen(cp);
135 (void)snprintf(&cp[n], sizeof(kd->errbuf) - n, ": %s",
136 strerror(errno));
137 }
138 va_end(ap);
139 }
140
141 void *
142 _kvm_malloc(kd, n)
143 kvm_t *kd;
144 size_t n;
145 {
146 void *p;
147
148 if ((p = malloc(n)) == NULL)
149 _kvm_err(kd, kd->program, "%s", strerror(errno));
150 return (p);
151 }
152
153 /*
154 * Open a file setting the close on exec bit.
155 */
156 static int
157 open_cloexec(fname, flags, mode)
158 const char *fname;
159 int flags, mode;
160 {
161 int fd;
162
163 if ((fd = open(fname, flags, mode)) == -1)
164 return fd;
165 if (fcntl(fd, F_SETFD, FD_CLOEXEC) == -1)
166 goto error;
167
168 return fd;
169 error:
170 flags = errno;
171 (void)close(fd);
172 errno = flags;
173 return -1;
174 }
175
176 /*
177 * Wrapper around the lseek(2) system call; calls _kvm_syserr() for us
178 * in the event of emergency.
179 */
180 static off_t
181 Lseek(kd, fd, offset, whence)
182 kvm_t *kd;
183 int fd;
184 off_t offset;
185 int whence;
186 {
187 off_t off;
188
189 errno = 0;
190
191 if ((off = lseek(fd, offset, whence)) == -1 && errno != 0) {
192 _kvm_syserr(kd, kd->program, "Lseek");
193 return ((off_t)-1);
194 }
195 return (off);
196 }
197
198 /*
199 * Wrapper around the pread(2) system call; calls _kvm_syserr() for us
200 * in the event of emergency.
201 */
202 static ssize_t
203 Pread(kd, fd, buf, nbytes, offset)
204 kvm_t *kd;
205 int fd;
206 void *buf;
207 size_t nbytes;
208 off_t offset;
209 {
210 ssize_t rv;
211
212 errno = 0;
213
214 if ((rv = pread(fd, buf, nbytes, offset)) != nbytes &&
215 errno != 0)
216 _kvm_syserr(kd, kd->program, "Pread");
217 return (rv);
218 }
219
220 static kvm_t *
221 _kvm_open(kd, uf, mf, sf, flag, errout)
222 kvm_t *kd;
223 const char *uf;
224 const char *mf;
225 const char *sf;
226 int flag;
227 char *errout;
228 {
229 struct stat st;
230 int ufgiven;
231
232 kd->db = 0;
233 kd->pmfd = -1;
234 kd->vmfd = -1;
235 kd->swfd = -1;
236 kd->nlfd = -1;
237 kd->alive = KVM_ALIVE_DEAD;
238 kd->procbase = 0;
239 kd->procbase2 = 0;
240 kd->lwpbase = 0;
241 kd->nbpg = getpagesize();
242 kd->swapspc = 0;
243 kd->argspc = 0;
244 kd->arglen = 0;
245 kd->argbuf = 0;
246 kd->argv = 0;
247 kd->vmst = 0;
248 kd->vm_page_buckets = 0;
249 kd->kcore_hdr = 0;
250 kd->cpu_dsize = 0;
251 kd->cpu_data = 0;
252 kd->dump_off = 0;
253
254 if (flag & KVM_NO_FILES) {
255 kd->alive = KVM_ALIVE_SYSCTL;
256 return(kd);
257 }
258
259 /*
260 * Call the MD open hook. This sets:
261 * usrstack, min_uva, max_uva
262 */
263 if (_kvm_mdopen(kd)) {
264 _kvm_err(kd, kd->program, "md init failed");
265 goto failed;
266 }
267
268 ufgiven = (uf != NULL);
269 if (!ufgiven) {
270 #ifdef CPU_BOOTED_KERNEL
271 /* 130 is 128 + '/' + '\0' */
272 static char booted_kernel[130];
273 int mib[2], rc;
274 size_t len;
275
276 mib[0] = CTL_MACHDEP;
277 mib[1] = CPU_BOOTED_KERNEL;
278 booted_kernel[0] = '/';
279 booted_kernel[1] = '\0';
280 len = sizeof(booted_kernel) - 2;
281 rc = sysctl(&mib[0], 2, &booted_kernel[1], &len, NULL, 0);
282 booted_kernel[sizeof(booted_kernel) - 1] = '\0';
283 uf = (booted_kernel[1] == '/') ?
284 &booted_kernel[1] : &booted_kernel[0];
285 if (rc != -1)
286 rc = stat(uf, &st);
287 if (rc != -1 && !S_ISREG(st.st_mode))
288 rc = -1;
289 if (rc == -1)
290 #endif /* CPU_BOOTED_KERNEL */
291 uf = _PATH_UNIX;
292 }
293 else if (strlen(uf) >= MAXPATHLEN) {
294 _kvm_err(kd, kd->program, "exec file name too long");
295 goto failed;
296 }
297 if (flag & ~O_RDWR) {
298 _kvm_err(kd, kd->program, "bad flags arg");
299 goto failed;
300 }
301 if (mf == 0)
302 mf = _PATH_MEM;
303 if (sf == 0)
304 sf = _PATH_DRUM;
305
306 if ((kd->pmfd = open_cloexec(mf, flag, 0)) < 0) {
307 _kvm_syserr(kd, kd->program, "%s", mf);
308 goto failed;
309 }
310 if (fstat(kd->pmfd, &st) < 0) {
311 _kvm_syserr(kd, kd->program, "%s", mf);
312 goto failed;
313 }
314 if (S_ISCHR(st.st_mode)) {
315 /*
316 * If this is a character special device, then check that
317 * it's /dev/mem. If so, open kmem too. (Maybe we should
318 * make it work for either /dev/mem or /dev/kmem -- in either
319 * case you're working with a live kernel.)
320 */
321 if (strcmp(mf, _PATH_MEM) != 0) { /* XXX */
322 _kvm_err(kd, kd->program,
323 "%s: not physical memory device", mf);
324 goto failed;
325 }
326 if ((kd->vmfd = open_cloexec(_PATH_KMEM, flag, 0)) < 0) {
327 _kvm_syserr(kd, kd->program, "%s", _PATH_KMEM);
328 goto failed;
329 }
330 kd->alive = KVM_ALIVE_FILES;
331 if ((kd->swfd = open_cloexec(sf, flag, 0)) < 0) {
332 _kvm_syserr(kd, kd->program, "%s", sf);
333 goto failed;
334 }
335 /*
336 * Open kvm nlist database. We only try to use
337 * the pre-built database if the namelist file name
338 * pointer is NULL. If the database cannot or should
339 * not be opened, open the namelist argument so we
340 * revert to slow nlist() calls.
341 */
342 if ((ufgiven || kvm_dbopen(kd) < 0) &&
343 (kd->nlfd = open_cloexec(uf, O_RDONLY, 0)) < 0) {
344 _kvm_syserr(kd, kd->program, "%s", uf);
345 goto failed;
346 }
347 } else {
348 /*
349 * This is a crash dump.
350 * Initialize the virtual address translation machinery,
351 * but first setup the namelist fd.
352 */
353 if ((kd->nlfd = open_cloexec(uf, O_RDONLY, 0)) < 0) {
354 _kvm_syserr(kd, kd->program, "%s", uf);
355 goto failed;
356 }
357
358 /*
359 * If there is no valid core header, fail silently here.
360 * The address translations however will fail without
361 * header. Things can be made to run by calling
362 * kvm_dump_mkheader() before doing any translation.
363 */
364 if (_kvm_get_header(kd) == 0) {
365 if (_kvm_initvtop(kd) < 0)
366 goto failed;
367 }
368 }
369 return (kd);
370 failed:
371 /*
372 * Copy out the error if doing sane error semantics.
373 */
374 if (errout != 0)
375 (void)strlcpy(errout, kd->errbuf, _POSIX2_LINE_MAX);
376 (void)kvm_close(kd);
377 return (0);
378 }
379
380 /*
381 * The kernel dump file (from savecore) contains:
382 * kcore_hdr_t kcore_hdr;
383 * kcore_seg_t cpu_hdr;
384 * (opaque) cpu_data; (size is cpu_hdr.c_size)
385 * kcore_seg_t mem_hdr;
386 * (memory) mem_data; (size is mem_hdr.c_size)
387 *
388 * Note: khdr is padded to khdr.c_hdrsize;
389 * cpu_hdr and mem_hdr are padded to khdr.c_seghdrsize
390 */
391 static int
392 _kvm_get_header(kd)
393 kvm_t *kd;
394 {
395 kcore_hdr_t kcore_hdr;
396 kcore_seg_t cpu_hdr;
397 kcore_seg_t mem_hdr;
398 size_t offset;
399 ssize_t sz;
400
401 /*
402 * Read the kcore_hdr_t
403 */
404 sz = Pread(kd, kd->pmfd, &kcore_hdr, sizeof(kcore_hdr), (off_t)0);
405 if (sz != sizeof(kcore_hdr))
406 return (-1);
407
408 /*
409 * Currently, we only support dump-files made by the current
410 * architecture...
411 */
412 if ((CORE_GETMAGIC(kcore_hdr) != KCORE_MAGIC) ||
413 (CORE_GETMID(kcore_hdr) != MID_MACHINE))
414 return (-1);
415
416 /*
417 * Currently, we only support exactly 2 segments: cpu-segment
418 * and data-segment in exactly that order.
419 */
420 if (kcore_hdr.c_nseg != 2)
421 return (-1);
422
423 /*
424 * Save away the kcore_hdr. All errors after this
425 * should do a to "goto fail" to deallocate things.
426 */
427 kd->kcore_hdr = _kvm_malloc(kd, sizeof(kcore_hdr));
428 memcpy(kd->kcore_hdr, &kcore_hdr, sizeof(kcore_hdr));
429 offset = kcore_hdr.c_hdrsize;
430
431 /*
432 * Read the CPU segment header
433 */
434 sz = Pread(kd, kd->pmfd, &cpu_hdr, sizeof(cpu_hdr), (off_t)offset);
435 if (sz != sizeof(cpu_hdr))
436 goto fail;
437 if ((CORE_GETMAGIC(cpu_hdr) != KCORESEG_MAGIC) ||
438 (CORE_GETFLAG(cpu_hdr) != CORE_CPU))
439 goto fail;
440 offset += kcore_hdr.c_seghdrsize;
441
442 /*
443 * Read the CPU segment DATA.
444 */
445 kd->cpu_dsize = cpu_hdr.c_size;
446 kd->cpu_data = _kvm_malloc(kd, cpu_hdr.c_size);
447 if (kd->cpu_data == NULL)
448 goto fail;
449 sz = Pread(kd, kd->pmfd, kd->cpu_data, cpu_hdr.c_size, (off_t)offset);
450 if (sz != cpu_hdr.c_size)
451 goto fail;
452 offset += cpu_hdr.c_size;
453
454 /*
455 * Read the next segment header: data segment
456 */
457 sz = Pread(kd, kd->pmfd, &mem_hdr, sizeof(mem_hdr), (off_t)offset);
458 if (sz != sizeof(mem_hdr))
459 goto fail;
460 offset += kcore_hdr.c_seghdrsize;
461
462 if ((CORE_GETMAGIC(mem_hdr) != KCORESEG_MAGIC) ||
463 (CORE_GETFLAG(mem_hdr) != CORE_DATA))
464 goto fail;
465
466 kd->dump_off = offset;
467 return (0);
468
469 fail:
470 if (kd->kcore_hdr != NULL) {
471 free(kd->kcore_hdr);
472 kd->kcore_hdr = NULL;
473 }
474 if (kd->cpu_data != NULL) {
475 free(kd->cpu_data);
476 kd->cpu_data = NULL;
477 kd->cpu_dsize = 0;
478 }
479 return (-1);
480 }
481
482 /*
483 * The format while on the dump device is: (new format)
484 * kcore_seg_t cpu_hdr;
485 * (opaque) cpu_data; (size is cpu_hdr.c_size)
486 * kcore_seg_t mem_hdr;
487 * (memory) mem_data; (size is mem_hdr.c_size)
488 */
489 int
490 kvm_dump_mkheader(kd, dump_off)
491 kvm_t *kd;
492 off_t dump_off;
493 {
494 kcore_seg_t cpu_hdr;
495 size_t hdr_size;
496 ssize_t sz;
497
498 if (kd->kcore_hdr != NULL) {
499 _kvm_err(kd, kd->program, "already has a dump header");
500 return (-1);
501 }
502 if (ISALIVE(kd)) {
503 _kvm_err(kd, kd->program, "don't use on live kernel");
504 return (-1);
505 }
506
507 /*
508 * Validate new format crash dump
509 */
510 sz = Pread(kd, kd->pmfd, &cpu_hdr, sizeof(cpu_hdr), dump_off);
511 if (sz != sizeof(cpu_hdr))
512 return (-1);
513 if ((CORE_GETMAGIC(cpu_hdr) != KCORE_MAGIC)
514 || (CORE_GETMID(cpu_hdr) != MID_MACHINE)) {
515 _kvm_err(kd, 0, "invalid magic in cpu_hdr");
516 return (0);
517 }
518 hdr_size = ALIGN(sizeof(cpu_hdr));
519
520 /*
521 * Read the CPU segment.
522 */
523 kd->cpu_dsize = cpu_hdr.c_size;
524 kd->cpu_data = _kvm_malloc(kd, kd->cpu_dsize);
525 if (kd->cpu_data == NULL)
526 goto fail;
527 sz = Pread(kd, kd->pmfd, kd->cpu_data, cpu_hdr.c_size,
528 dump_off + hdr_size);
529 if (sz != cpu_hdr.c_size)
530 goto fail;
531 hdr_size += kd->cpu_dsize;
532
533 /*
534 * Leave phys mem pointer at beginning of memory data
535 */
536 kd->dump_off = dump_off + hdr_size;
537 if (Lseek(kd, kd->pmfd, kd->dump_off, SEEK_SET) == -1)
538 goto fail;
539
540 /*
541 * Create a kcore_hdr.
542 */
543 kd->kcore_hdr = _kvm_malloc(kd, sizeof(kcore_hdr_t));
544 if (kd->kcore_hdr == NULL)
545 goto fail;
546
547 kd->kcore_hdr->c_hdrsize = ALIGN(sizeof(kcore_hdr_t));
548 kd->kcore_hdr->c_seghdrsize = ALIGN(sizeof(kcore_seg_t));
549 kd->kcore_hdr->c_nseg = 2;
550 CORE_SETMAGIC(*(kd->kcore_hdr), KCORE_MAGIC, MID_MACHINE,0);
551
552 /*
553 * Now that we have a valid header, enable translations.
554 */
555 if (_kvm_initvtop(kd) == 0)
556 /* Success */
557 return (hdr_size);
558
559 fail:
560 if (kd->kcore_hdr != NULL) {
561 free(kd->kcore_hdr);
562 kd->kcore_hdr = NULL;
563 }
564 if (kd->cpu_data != NULL) {
565 free(kd->cpu_data);
566 kd->cpu_data = NULL;
567 kd->cpu_dsize = 0;
568 }
569 return (-1);
570 }
571
572 static int
573 clear_gap(kd, fp, size)
574 kvm_t *kd;
575 FILE *fp;
576 int size;
577 {
578 if (size <= 0) /* XXX - < 0 should never happen */
579 return (0);
580 while (size-- > 0) {
581 if (fputc(0, fp) == EOF) {
582 _kvm_syserr(kd, kd->program, "clear_gap");
583 return (-1);
584 }
585 }
586 return (0);
587 }
588
589 /*
590 * Write the dump header info to 'fp'. Note that we can't use fseek(3) here
591 * because 'fp' might be a file pointer obtained by zopen().
592 */
593 int
594 kvm_dump_wrtheader(kd, fp, dumpsize)
595 kvm_t *kd;
596 FILE *fp;
597 int dumpsize;
598 {
599 kcore_seg_t seghdr;
600 long offset;
601 int gap;
602
603 if (kd->kcore_hdr == NULL || kd->cpu_data == NULL) {
604 _kvm_err(kd, kd->program, "no valid dump header(s)");
605 return (-1);
606 }
607
608 /*
609 * Write the generic header
610 */
611 offset = 0;
612 if (fwrite((void*)kd->kcore_hdr, sizeof(kcore_hdr_t), 1, fp) == 0) {
613 _kvm_syserr(kd, kd->program, "kvm_dump_wrtheader");
614 return (-1);
615 }
616 offset += kd->kcore_hdr->c_hdrsize;
617 gap = kd->kcore_hdr->c_hdrsize - sizeof(kcore_hdr_t);
618 if (clear_gap(kd, fp, gap) == -1)
619 return (-1);
620
621 /*
622 * Write the cpu header
623 */
624 CORE_SETMAGIC(seghdr, KCORESEG_MAGIC, 0, CORE_CPU);
625 seghdr.c_size = ALIGN(kd->cpu_dsize);
626 if (fwrite((void*)&seghdr, sizeof(seghdr), 1, fp) == 0) {
627 _kvm_syserr(kd, kd->program, "kvm_dump_wrtheader");
628 return (-1);
629 }
630 offset += kd->kcore_hdr->c_seghdrsize;
631 gap = kd->kcore_hdr->c_seghdrsize - sizeof(seghdr);
632 if (clear_gap(kd, fp, gap) == -1)
633 return (-1);
634
635 if (fwrite((void*)kd->cpu_data, kd->cpu_dsize, 1, fp) == 0) {
636 _kvm_syserr(kd, kd->program, "kvm_dump_wrtheader");
637 return (-1);
638 }
639 offset += seghdr.c_size;
640 gap = seghdr.c_size - kd->cpu_dsize;
641 if (clear_gap(kd, fp, gap) == -1)
642 return (-1);
643
644 /*
645 * Write the actual dump data segment header
646 */
647 CORE_SETMAGIC(seghdr, KCORESEG_MAGIC, 0, CORE_DATA);
648 seghdr.c_size = dumpsize;
649 if (fwrite((void*)&seghdr, sizeof(seghdr), 1, fp) == 0) {
650 _kvm_syserr(kd, kd->program, "kvm_dump_wrtheader");
651 return (-1);
652 }
653 offset += kd->kcore_hdr->c_seghdrsize;
654 gap = kd->kcore_hdr->c_seghdrsize - sizeof(seghdr);
655 if (clear_gap(kd, fp, gap) == -1)
656 return (-1);
657
658 return (int)offset;
659 }
660
661 kvm_t *
662 kvm_openfiles(uf, mf, sf, flag, errout)
663 const char *uf;
664 const char *mf;
665 const char *sf;
666 int flag;
667 char *errout;
668 {
669 kvm_t *kd;
670
671 if ((kd = malloc(sizeof(*kd))) == NULL) {
672 (void)strlcpy(errout, strerror(errno), _POSIX2_LINE_MAX);
673 return (0);
674 }
675 kd->program = 0;
676 return (_kvm_open(kd, uf, mf, sf, flag, errout));
677 }
678
679 kvm_t *
680 kvm_open(uf, mf, sf, flag, program)
681 const char *uf;
682 const char *mf;
683 const char *sf;
684 int flag;
685 const char *program;
686 {
687 kvm_t *kd;
688
689 if ((kd = malloc(sizeof(*kd))) == NULL && program != NULL) {
690 (void)fprintf(stderr, "%s: %s\n", program, strerror(errno));
691 return (0);
692 }
693 kd->program = program;
694 return (_kvm_open(kd, uf, mf, sf, flag, NULL));
695 }
696
697 int
698 kvm_close(kd)
699 kvm_t *kd;
700 {
701 int error = 0;
702
703 if (kd->pmfd >= 0)
704 error |= close(kd->pmfd);
705 if (kd->vmfd >= 0)
706 error |= close(kd->vmfd);
707 if (kd->nlfd >= 0)
708 error |= close(kd->nlfd);
709 if (kd->swfd >= 0)
710 error |= close(kd->swfd);
711 if (kd->db != 0)
712 error |= (kd->db->close)(kd->db);
713 if (kd->vmst)
714 _kvm_freevtop(kd);
715 kd->cpu_dsize = 0;
716 if (kd->cpu_data != NULL)
717 free((void *)kd->cpu_data);
718 if (kd->kcore_hdr != NULL)
719 free((void *)kd->kcore_hdr);
720 if (kd->procbase != 0)
721 free((void *)kd->procbase);
722 if (kd->procbase2 != 0)
723 free((void *)kd->procbase2);
724 if (kd->lwpbase != 0)
725 free((void *)kd->lwpbase);
726 if (kd->swapspc != 0)
727 free((void *)kd->swapspc);
728 if (kd->argspc != 0)
729 free((void *)kd->argspc);
730 if (kd->argbuf != 0)
731 free((void *)kd->argbuf);
732 if (kd->argv != 0)
733 free((void *)kd->argv);
734 free((void *)kd);
735
736 return (0);
737 }
738
739 /*
740 * Set up state necessary to do queries on the kernel namelist
741 * data base. If the data base is out-of-data/incompatible with
742 * given executable, set up things so we revert to standard nlist call.
743 * Only called for live kernels. Return 0 on success, -1 on failure.
744 */
745 static int
746 kvm_dbopen(kd)
747 kvm_t *kd;
748 {
749 DBT rec;
750 size_t dbversionlen;
751 struct nlist nitem;
752 char dbversion[_POSIX2_LINE_MAX];
753 char kversion[_POSIX2_LINE_MAX];
754 int fd;
755
756 kd->db = dbopen(_PATH_KVMDB, O_RDONLY, 0, DB_HASH, NULL);
757 if (kd->db == 0)
758 return (-1);
759 if ((fd = (*kd->db->fd)(kd->db)) >= 0) {
760 if (fcntl(fd, F_SETFD, FD_CLOEXEC) == -1) {
761 (*kd->db->close)(kd->db);
762 return (-1);
763 }
764 }
765 /*
766 * read version out of database
767 */
768 rec.data = VRS_KEY;
769 rec.size = sizeof(VRS_KEY) - 1;
770 if ((kd->db->get)(kd->db, (DBT *)&rec, (DBT *)&rec, 0))
771 goto close;
772 if (rec.data == 0 || rec.size > sizeof(dbversion))
773 goto close;
774
775 memcpy(dbversion, rec.data, rec.size);
776 dbversionlen = rec.size;
777 /*
778 * Read version string from kernel memory.
779 * Since we are dealing with a live kernel, we can call kvm_read()
780 * at this point.
781 */
782 rec.data = VRS_SYM;
783 rec.size = sizeof(VRS_SYM) - 1;
784 if ((kd->db->get)(kd->db, (DBT *)&rec, (DBT *)&rec, 0))
785 goto close;
786 if (rec.data == 0 || rec.size != sizeof(struct nlist))
787 goto close;
788 memcpy(&nitem, rec.data, sizeof(nitem));
789 if (kvm_read(kd, (u_long)nitem.n_value, kversion, dbversionlen) !=
790 dbversionlen)
791 goto close;
792 /*
793 * If they match, we win - otherwise clear out kd->db so
794 * we revert to slow nlist().
795 */
796 if (memcmp(dbversion, kversion, dbversionlen) == 0)
797 return (0);
798 close:
799 (void)(kd->db->close)(kd->db);
800 kd->db = 0;
801
802 return (-1);
803 }
804
805 int
806 kvm_nlist(kd, nl)
807 kvm_t *kd;
808 struct nlist *nl;
809 {
810 struct nlist *p;
811 int nvalid, rv;
812
813 /*
814 * If we can't use the data base, revert to the
815 * slow library call.
816 */
817 if (kd->db == 0) {
818 rv = __fdnlist(kd->nlfd, nl);
819 if (rv == -1)
820 _kvm_err(kd, 0, "bad namelist");
821 return (rv);
822 }
823
824 /*
825 * We can use the kvm data base. Go through each nlist entry
826 * and look it up with a db query.
827 */
828 nvalid = 0;
829 for (p = nl; p->n_name && p->n_name[0]; ++p) {
830 int len;
831 DBT rec;
832
833 if ((len = strlen(p->n_name)) > 4096) {
834 /* sanity */
835 _kvm_err(kd, kd->program, "symbol too large");
836 return (-1);
837 }
838 rec.data = (char *)p->n_name;
839 rec.size = len;
840
841 /*
842 * Make sure that n_value = 0 when the symbol isn't found
843 */
844 p->n_value = 0;
845
846 if ((kd->db->get)(kd->db, (DBT *)&rec, (DBT *)&rec, 0))
847 continue;
848 if (rec.data == 0 || rec.size != sizeof(struct nlist))
849 continue;
850 ++nvalid;
851 /*
852 * Avoid alignment issues.
853 */
854 (void)memcpy(&p->n_type, &((struct nlist *)rec.data)->n_type,
855 sizeof(p->n_type));
856 (void)memcpy(&p->n_value, &((struct nlist *)rec.data)->n_value,
857 sizeof(p->n_value));
858 }
859 /*
860 * Return the number of entries that weren't found.
861 */
862 return ((p - nl) - nvalid);
863 }
864
865 int kvm_dump_inval(kd)
866 kvm_t *kd;
867 {
868 struct nlist nl[2];
869 u_long pa, val;
870
871 if (ISALIVE(kd)) {
872 _kvm_err(kd, kd->program, "clearing dump on live kernel");
873 return (-1);
874 }
875 nl[0].n_name = "_dumpmag";
876 nl[1].n_name = NULL;
877
878 if (kvm_nlist(kd, nl) == -1) {
879 _kvm_err(kd, 0, "bad namelist");
880 return (-1);
881 }
882 if (_kvm_kvatop(kd, (u_long)nl[0].n_value, &pa) == 0)
883 return (-1);
884
885 errno = 0;
886 val = 0;
887 if (pwrite(kd->pmfd, (void *)&val, sizeof(val),
888 _kvm_pa2off(kd, pa)) == -1) {
889 _kvm_syserr(kd, 0, "cannot invalidate dump - pwrite");
890 return (-1);
891 }
892 return (0);
893 }
894
895 ssize_t
896 kvm_read(kd, kva, buf, len)
897 kvm_t *kd;
898 u_long kva;
899 void *buf;
900 size_t len;
901 {
902 int cc;
903 void *cp;
904
905 if (ISKMEM(kd)) {
906 /*
907 * We're using /dev/kmem. Just read straight from the
908 * device and let the active kernel do the address translation.
909 */
910 errno = 0;
911 cc = pread(kd->vmfd, buf, len, (off_t)kva);
912 if (cc < 0) {
913 _kvm_syserr(kd, 0, "kvm_read");
914 return (-1);
915 } else if (cc < len)
916 _kvm_err(kd, kd->program, "short read");
917 return (cc);
918 } else if (ISSYSCTL(kd)) {
919 _kvm_err(kd, kd->program, "kvm_open called with KVM_NO_FILES, "
920 "can't use kvm_read");
921 return (-1);
922 } else {
923 if ((kd->kcore_hdr == NULL) || (kd->cpu_data == NULL)) {
924 _kvm_err(kd, kd->program, "no valid dump header");
925 return (-1);
926 }
927 cp = buf;
928 while (len > 0) {
929 u_long pa;
930 off_t foff;
931
932 cc = _kvm_kvatop(kd, kva, &pa);
933 if (cc == 0)
934 return (-1);
935 if (cc > len)
936 cc = len;
937 foff = _kvm_pa2off(kd, pa);
938 errno = 0;
939 cc = pread(kd->pmfd, cp, (size_t)cc, foff);
940 if (cc < 0) {
941 _kvm_syserr(kd, kd->program, "kvm_read");
942 break;
943 }
944 /*
945 * If kvm_kvatop returns a bogus value or our core
946 * file is truncated, we might wind up seeking beyond
947 * the end of the core file in which case the read will
948 * return 0 (EOF).
949 */
950 if (cc == 0)
951 break;
952 cp = (char *)cp + cc;
953 kva += cc;
954 len -= cc;
955 }
956 return ((char *)cp - (char *)buf);
957 }
958 /* NOTREACHED */
959 }
960
961 ssize_t
962 kvm_write(kd, kva, buf, len)
963 kvm_t *kd;
964 u_long kva;
965 const void *buf;
966 size_t len;
967 {
968 int cc;
969
970 if (ISKMEM(kd)) {
971 /*
972 * Just like kvm_read, only we write.
973 */
974 errno = 0;
975 cc = pwrite(kd->vmfd, buf, len, (off_t)kva);
976 if (cc < 0) {
977 _kvm_syserr(kd, 0, "kvm_write");
978 return (-1);
979 } else if (cc < len)
980 _kvm_err(kd, kd->program, "short write");
981 return (cc);
982 } else if (ISSYSCTL(kd)) {
983 _kvm_err(kd, kd->program, "kvm_open called with KVM_NO_FILES, "
984 "can't use kvm_write");
985 return (-1);
986 } else {
987 _kvm_err(kd, kd->program,
988 "kvm_write not implemented for dead kernels");
989 return (-1);
990 }
991 /* NOTREACHED */
992 }
993