kvm.c revision 1.8 1 /*-
2 * Copyright (c) 1989 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 */
33
34 #if defined(LIBC_SCCS) && !defined(lint)
35 /* from: static char sccsid[] = "@(#)kvm.c 5.18 (Berkeley) 5/7/91"; */
36 static char rcsid[] = "$Id: kvm.c,v 1.8 1993/06/01 01:35:01 cgd Exp $";
37 #endif /* LIBC_SCCS and not lint */
38
39 #include <sys/param.h>
40 #include <sys/user.h>
41 #include <sys/proc.h>
42 #include <sys/ioctl.h>
43 #include <sys/kinfo.h>
44 #include <sys/tty.h>
45 #include <sys/exec.h>
46 #include <machine/vmparam.h>
47 #include <fcntl.h>
48 #include <nlist.h>
49 #include <kvm.h>
50 #include <ndbm.h>
51 #include <limits.h>
52 #include <paths.h>
53 #include <stdio.h>
54 #include <string.h>
55
56 #ifdef SPPWAIT
57 #define NEWVM
58 #endif
59
60 #ifdef NEWVM
61 #define btop(x) (((unsigned)(x)) >> PGSHIFT) /* XXX */
62 #define ptob(x) ((caddr_t)((x) << PGSHIFT)) /* XXX */
63 #include <vm/vm.h> /* ??? kinfo_proc currently includes this*/
64 #include <vm/vm_page.h>
65 #include <vm/swap_pager.h>
66 #include <sys/kinfo_proc.h>
67 #ifdef hp300
68 #include <hp300/hp300/pte.h>
69 #endif
70 #else /* NEWVM */
71 #include <machine/pte.h>
72 #include <sys/vmmac.h>
73 #include <sys/text.h>
74 #endif /* NEWVM */
75
76 /*
77 * files
78 */
79 static const char *unixf, *memf, *kmemf, *swapf;
80 static int unixx, mem, kmem, swap;
81 static DBM *db;
82 /*
83 * flags
84 */
85 static int deadkernel;
86 static int kvminit = 0;
87 static int kvmfilesopen = 0;
88 /*
89 * state
90 */
91 static struct kinfo_proc *kvmprocbase, *kvmprocptr;
92 static int kvmnprocs;
93 /*
94 * u. buffer
95 */
96 static union {
97 struct user user;
98 char upages[UPAGES][NBPG];
99 } user;
100
101 #ifdef NEWVM
102 struct swapblk {
103 long offset; /* offset in swap device */
104 long size; /* remaining size of block in swap device */
105 };
106 #endif
107 /*
108 * random other stuff
109 */
110 #ifndef NEWVM
111 static struct pte *Usrptmap, *usrpt;
112 static struct pte *Sysmap;
113 static int Syssize;
114 #endif
115 static int dmmin, dmmax;
116 static int pcbpf;
117 static int nswap;
118 static char *tmp;
119 #if defined(hp300)
120 static int lowram;
121 static struct ste *Sysseg;
122 #endif
123 #if defined(i386)
124 static struct pde *PTD;
125 #endif
126
127 #define basename(cp) ((tmp=rindex((cp), '/')) ? tmp+1 : (cp))
128 #define MAXSYMSIZE 256
129
130 #if defined(hp300)
131 #define pftoc(f) ((f) - lowram)
132 #define iskva(v) (1)
133 #endif
134
135 #ifndef pftoc
136 #define pftoc(f) (f)
137 #endif
138 #ifndef iskva
139 #define iskva(v) ((u_long)(v) & KERNBASE)
140 #endif
141
142 static struct nlist nl[] = {
143 { "_Usrptmap" },
144 #define X_USRPTMAP 0
145 { "_usrpt" },
146 #define X_USRPT 1
147 { "_nswap" },
148 #define X_NSWAP 2
149 { "_dmmin" },
150 #define X_DMMIN 3
151 { "_dmmax" },
152 #define X_DMMAX 4
153 { "_vm_page_buckets" },
154 #define X_VM_PAGE_BUCKETS 5
155 { "_vm_page_hash_mask" },
156 #define X_VM_PAGE_HASH_MASK 6
157 { "_page_shift" },
158 #define X_PAGE_SHIFT 7
159 /*
160 * everything here and down, only if a dead kernel
161 */
162 { "_Sysmap" },
163 #define X_SYSMAP 8
164 #define X_DEADKERNEL X_SYSMAP
165 { "_Syssize" },
166 #define X_SYSSIZE 9
167 { "_allproc" },
168 #define X_ALLPROC 10
169 { "_zombproc" },
170 #define X_ZOMBPROC 11
171 { "_nproc" },
172 #define X_NPROC 12
173 #define X_LAST 12
174 #if defined(hp300)
175 { "_Sysseg" },
176 #define X_SYSSEG (X_LAST+1)
177 { "_lowram" },
178 #define X_LOWRAM (X_LAST+2)
179 #endif
180 #if defined(i386)
181 { "_IdlePTD" },
182 #define X_IdlePTD (X_LAST+1)
183 #endif
184 { "" },
185 };
186
187 static off_t Vtophys();
188 static void klseek(), seterr(), setsyserr(), vstodb();
189 static int getkvars(), kvm_doprocs(), kvm_init();
190 #ifdef NEWVM
191 static int vatosw();
192 static int findpage();
193 #endif
194
195 /*
196 * returns 0 if files were opened now,
197 * 1 if files were already opened,
198 * -1 if files could not be opened.
199 */
200 kvm_openfiles(uf, mf, sf)
201 const char *uf, *mf, *sf;
202 {
203 if (kvmfilesopen)
204 return (1);
205 unixx = mem = kmem = swap = -1;
206 unixf = (uf == NULL) ? _PATH_UNIX : uf;
207 memf = (mf == NULL) ? _PATH_MEM : mf;
208
209 if ((unixx = open(unixf, O_RDONLY, 0)) == -1) {
210 setsyserr("can't open %s", unixf);
211 goto failed;
212 }
213 if ((mem = open(memf, O_RDONLY, 0)) == -1) {
214 setsyserr("can't open %s", memf);
215 goto failed;
216 }
217 if (sf != NULL)
218 swapf = sf;
219 if (mf != NULL) {
220 deadkernel++;
221 kmemf = mf;
222 kmem = mem;
223 swap = -1;
224 } else {
225 kmemf = _PATH_KMEM;
226 if ((kmem = open(kmemf, O_RDONLY, 0)) == -1) {
227 setsyserr("can't open %s", kmemf);
228 goto failed;
229 }
230 swapf = (sf == NULL) ? _PATH_DRUM : sf;
231 /*
232 * live kernel - avoid looking up nlist entries
233 * past X_DEADKERNEL.
234 */
235 nl[X_DEADKERNEL].n_name = "";
236 }
237 if (swapf != NULL && ((swap = open(swapf, O_RDONLY, 0)) == -1)) {
238 seterr("can't open %s", swapf);
239 goto failed;
240 }
241 kvmfilesopen++;
242 if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1) /*XXX*/
243 return (-1);
244 return (0);
245 failed:
246 kvm_close();
247 return (-1);
248 }
249
250 static
251 kvm_init(uf, mf, sf)
252 char *uf, *mf, *sf;
253 {
254 if (kvmfilesopen == 0 && kvm_openfiles(NULL, NULL, NULL) == -1)
255 return (-1);
256 if (getkvars() == -1)
257 return (-1);
258 kvminit = 1;
259
260 return (0);
261 }
262
263 kvm_close()
264 {
265 if (unixx != -1) {
266 close(unixx);
267 unixx = -1;
268 }
269 if (kmem != -1) {
270 if (kmem != mem)
271 close(kmem);
272 /* otherwise kmem is a copy of mem, and will be closed below */
273 kmem = -1;
274 }
275 if (mem != -1) {
276 close(mem);
277 mem = -1;
278 }
279 if (swap != -1) {
280 close(swap);
281 swap = -1;
282 }
283 if (db != NULL) {
284 dbm_close(db);
285 db = NULL;
286 }
287 kvminit = 0;
288 kvmfilesopen = 0;
289 deadkernel = 0;
290 #ifndef NEWVM
291 if (Sysmap) {
292 free(Sysmap);
293 Sysmap = NULL;
294 }
295 #endif
296 }
297
298 kvm_nlist(nl)
299 struct nlist *nl;
300 {
301 datum key, data;
302 char dbname[MAXPATHLEN];
303 char dbversion[_POSIX2_LINE_MAX];
304 char kversion[_POSIX2_LINE_MAX];
305 int dbversionlen;
306 char symbuf[MAXSYMSIZE];
307 struct nlist nbuf, *n;
308 int num, did;
309
310 if (kvmfilesopen == 0 && kvm_openfiles(NULL, NULL, NULL) == -1)
311 return (-1);
312 if (deadkernel)
313 goto hard2;
314 /*
315 * initialize key datum
316 */
317 key.dptr = symbuf;
318
319 if (db != NULL)
320 goto win; /* off to the races */
321 /*
322 * open database
323 */
324 sprintf(dbname, "%s/kvm_%s", _PATH_VARRUN, basename(unixf));
325 if ((db = dbm_open(dbname, O_RDONLY, 0)) == NULL)
326 goto hard2;
327 /*
328 * read version out of database
329 */
330 bcopy("VERSION", symbuf, sizeof ("VERSION")-1);
331 key.dsize = (sizeof ("VERSION") - 1);
332 data = dbm_fetch(db, key);
333 if (data.dptr == NULL)
334 goto hard1;
335 bcopy(data.dptr, dbversion, data.dsize);
336 dbversionlen = data.dsize;
337 /*
338 * read version string from kernel memory
339 */
340 bcopy("_version", symbuf, sizeof ("_version")-1);
341 key.dsize = (sizeof ("_version")-1);
342 data = dbm_fetch(db, key);
343 if (data.dptr == NULL)
344 goto hard1;
345 if (data.dsize != sizeof (struct nlist))
346 goto hard1;
347 bcopy(data.dptr, &nbuf, sizeof (struct nlist));
348 lseek(kmem, nbuf.n_value, 0);
349 if (read(kmem, kversion, dbversionlen) != dbversionlen)
350 goto hard1;
351 /*
352 * if they match, we win - otherwise do it the hard way
353 */
354 if (bcmp(dbversion, kversion, dbversionlen) != 0)
355 goto hard1;
356 /*
357 * getem from the database.
358 */
359 win:
360 num = did = 0;
361 for (n = nl; n->n_name && n->n_name[0]; n++, num++) {
362 int len;
363 /*
364 * clear out fields from users buffer
365 */
366 n->n_type = 0;
367 n->n_other = 0;
368 n->n_desc = 0;
369 n->n_value = 0;
370 /*
371 * query db
372 */
373 if ((len = strlen(n->n_name)) > MAXSYMSIZE) {
374 seterr("symbol too large");
375 return (-1);
376 }
377 (void)strcpy(symbuf, n->n_name);
378 key.dsize = len;
379 data = dbm_fetch(db, key);
380 if (data.dptr == NULL || data.dsize != sizeof (struct nlist))
381 continue;
382 bcopy(data.dptr, &nbuf, sizeof (struct nlist));
383 n->n_value = nbuf.n_value;
384 n->n_type = nbuf.n_type;
385 n->n_desc = nbuf.n_desc;
386 n->n_other = nbuf.n_other;
387 did++;
388 }
389 return (num - did);
390 hard1:
391 dbm_close(db);
392 db = NULL;
393 hard2:
394 num = nlist(unixf, nl);
395 if (num == -1)
396 seterr("nlist (hard way) failed");
397 return (num);
398 }
399
400 kvm_getprocs(what, arg)
401 int what, arg;
402 {
403 static int ocopysize = -1;
404
405 if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1)
406 return (NULL);
407 if (!deadkernel) {
408 int ret, copysize;
409
410 if ((ret = getkerninfo(what, NULL, NULL, arg)) == -1) {
411 setsyserr("can't get estimate for kerninfo");
412 return (-1);
413 }
414 copysize = ret;
415 if (copysize > ocopysize || !kvmprocbase) {
416 if (ocopysize == -1 || !kvmprocbase)
417 kvmprocbase =
418 (struct kinfo_proc *)malloc(copysize);
419 else
420 kvmprocbase =
421 (struct kinfo_proc *)realloc(kvmprocbase,
422 copysize);
423 if (!kvmprocbase) {
424 seterr("out of memory");
425 return (-1);
426 }
427 }
428 ocopysize = copysize;
429 if ((ret = getkerninfo(what, kvmprocbase, ©size,
430 arg)) == -1) {
431 setsyserr("can't get proc list");
432 return (-1);
433 }
434 if (copysize % sizeof (struct kinfo_proc)) {
435 seterr("proc size mismatch (got %d total, kinfo_proc: %d)",
436 copysize, sizeof (struct kinfo_proc));
437 return (-1);
438 }
439 kvmnprocs = copysize / sizeof (struct kinfo_proc);
440 } else {
441 int nproc;
442
443 if (kvm_read((void *) nl[X_NPROC].n_value, &nproc,
444 sizeof (int)) != sizeof (int)) {
445 seterr("can't read nproc");
446 return (-1);
447 }
448 if ((kvmprocbase = (struct kinfo_proc *)
449 malloc(nproc * sizeof (struct kinfo_proc))) == NULL) {
450 seterr("out of memory (addr: %x nproc = %d)",
451 nl[X_NPROC].n_value, nproc);
452 return (-1);
453 }
454 kvmnprocs = kvm_doprocs(what, arg, kvmprocbase);
455 realloc(kvmprocbase, kvmnprocs * sizeof (struct kinfo_proc));
456 }
457 kvmprocptr = kvmprocbase;
458
459 return (kvmnprocs);
460 }
461
462 /*
463 * XXX - should NOT give up so easily - especially since the kernel
464 * may be corrupt (it died). Should gather as much information as possible.
465 * Follows proc ptrs instead of reading table since table may go
466 * away soon.
467 */
468 static
469 kvm_doprocs(what, arg, buff)
470 int what, arg;
471 char *buff;
472 {
473 struct proc *p, proc;
474 register char *bp = buff;
475 int i = 0;
476 int doingzomb = 0;
477 struct eproc eproc;
478 struct pgrp pgrp;
479 struct session sess;
480 struct tty tty;
481 #ifndef NEWVM
482 struct text text;
483 #endif
484
485 /* allproc */
486 if (kvm_read((void *) nl[X_ALLPROC].n_value, &p,
487 sizeof (struct proc *)) != sizeof (struct proc *)) {
488 seterr("can't read allproc");
489 return (-1);
490 }
491
492 again:
493 for (; p; p = proc.p_nxt) {
494 if (kvm_read(p, &proc, sizeof (struct proc)) !=
495 sizeof (struct proc)) {
496 seterr("can't read proc at %x", p);
497 return (-1);
498 }
499 #ifdef NEWVM
500 if (kvm_read(proc.p_cred, &eproc.e_pcred,
501 sizeof (struct pcred)) == sizeof (struct pcred))
502 (void) kvm_read(eproc.e_pcred.pc_ucred, &eproc.e_ucred,
503 sizeof (struct ucred));
504 switch(ki_op(what)) {
505
506 case KINFO_PROC_PID:
507 if (proc.p_pid != (pid_t)arg)
508 continue;
509 break;
510
511
512 case KINFO_PROC_UID:
513 if (eproc.e_ucred.cr_uid != (uid_t)arg)
514 continue;
515 break;
516
517 case KINFO_PROC_RUID:
518 if (eproc.e_pcred.p_ruid != (uid_t)arg)
519 continue;
520 break;
521 }
522 #else
523 switch(ki_op(what)) {
524
525 case KINFO_PROC_PID:
526 if (proc.p_pid != (pid_t)arg)
527 continue;
528 break;
529
530
531 case KINFO_PROC_UID:
532 if (proc.p_uid != (uid_t)arg)
533 continue;
534 break;
535
536 case KINFO_PROC_RUID:
537 if (proc.p_ruid != (uid_t)arg)
538 continue;
539 break;
540 }
541 #endif
542 /*
543 * gather eproc
544 */
545 eproc.e_paddr = p;
546 if (kvm_read(proc.p_pgrp, &pgrp, sizeof (struct pgrp)) !=
547 sizeof (struct pgrp)) {
548 seterr("can't read pgrp at %x", proc.p_pgrp);
549 return (-1);
550 }
551 eproc.e_sess = pgrp.pg_session;
552 eproc.e_pgid = pgrp.pg_id;
553 eproc.e_jobc = pgrp.pg_jobc;
554 if (kvm_read(pgrp.pg_session, &sess, sizeof (struct session))
555 != sizeof (struct session)) {
556 seterr("can't read session at %x", pgrp.pg_session);
557 return (-1);
558 }
559 if ((proc.p_flag&SCTTY) && sess.s_ttyp != NULL) {
560 if (kvm_read(sess.s_ttyp, &tty, sizeof (struct tty))
561 != sizeof (struct tty)) {
562 seterr("can't read tty at %x", sess.s_ttyp);
563 return (-1);
564 }
565 eproc.e_tdev = tty.t_dev;
566 eproc.e_tsess = tty.t_session;
567 if (tty.t_pgrp != NULL) {
568 if (kvm_read(tty.t_pgrp, &pgrp, sizeof (struct
569 pgrp)) != sizeof (struct pgrp)) {
570 seterr("can't read tpgrp at &x",
571 tty.t_pgrp);
572 return (-1);
573 }
574 eproc.e_tpgid = pgrp.pg_id;
575 } else
576 eproc.e_tpgid = -1;
577 } else
578 eproc.e_tdev = NODEV;
579 if (proc.p_wmesg)
580 kvm_read(proc.p_wmesg, eproc.e_wmesg, WMESGLEN);
581 #ifdef NEWVM
582 (void) kvm_read(proc.p_vmspace, &eproc.e_vm,
583 sizeof (struct vmspace));
584 eproc.e_xsize = eproc.e_xrssize =
585 eproc.e_xccount = eproc.e_xswrss = 0;
586 #else
587 if (proc.p_textp) {
588 kvm_read(proc.p_textp, &text, sizeof (text));
589 eproc.e_xsize = text.x_size;
590 eproc.e_xrssize = text.x_rssize;
591 eproc.e_xccount = text.x_ccount;
592 eproc.e_xswrss = text.x_swrss;
593 } else {
594 eproc.e_xsize = eproc.e_xrssize =
595 eproc.e_xccount = eproc.e_xswrss = 0;
596 }
597 #endif
598
599 switch(ki_op(what)) {
600
601 case KINFO_PROC_PGRP:
602 if (eproc.e_pgid != (pid_t)arg)
603 continue;
604 break;
605
606 case KINFO_PROC_TTY:
607 if ((proc.p_flag&SCTTY) == 0 ||
608 eproc.e_tdev != (dev_t)arg)
609 continue;
610 break;
611 }
612
613 i++;
614 bcopy(&proc, bp, sizeof (struct proc));
615 bp += sizeof (struct proc);
616 bcopy(&eproc, bp, sizeof (struct eproc));
617 bp+= sizeof (struct eproc);
618 }
619 if (!doingzomb) {
620 /* zombproc */
621 if (kvm_read((void *) nl[X_ZOMBPROC].n_value, &p,
622 sizeof (struct proc *)) != sizeof (struct proc *)) {
623 seterr("can't read zombproc");
624 return (-1);
625 }
626 doingzomb = 1;
627 goto again;
628 }
629
630 return (i);
631 }
632
633 struct proc *
634 kvm_nextproc()
635 {
636
637 if (!kvmprocbase && kvm_getprocs(0, 0) == -1)
638 return (NULL);
639 if (kvmprocptr >= (kvmprocbase + kvmnprocs)) {
640 seterr("end of proc list");
641 return (NULL);
642 }
643 return((struct proc *)(kvmprocptr++));
644 }
645
646 struct eproc *
647 kvm_geteproc(p)
648 const struct proc *p;
649 {
650 return ((struct eproc *)(((char *)p) + sizeof (struct proc)));
651 }
652
653 kvm_setproc()
654 {
655 kvmprocptr = kvmprocbase;
656 }
657
658 kvm_freeprocs()
659 {
660
661 if (kvmprocbase) {
662 free(kvmprocbase);
663 kvmprocbase = NULL;
664 }
665 }
666
667 #ifdef NEWVM
668 struct user *
669 kvm_getu(p)
670 const struct proc *p;
671 {
672 register struct kinfo_proc *kp = (struct kinfo_proc *)p;
673 register int i;
674 register char *up;
675 u_int vaddr;
676 struct swapblk swb;
677
678 if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1)
679 return (NULL);
680 if (p->p_stat == SZOMB) {
681 seterr("zombie process");
682 return (NULL);
683 }
684
685 if ((p->p_flag & SLOAD) == 0) {
686 vm_offset_t maddr;
687
688 if (swap < 0) {
689 seterr("no swap");
690 return (NULL);
691 }
692 /*
693 * Costly operation, better set enable_swap to zero
694 * in vm/vm_glue.c, since paging of user pages isn't
695 * done yet anyway.
696 */
697 if (vatosw(p, USRSTACK + i * NBPG, &maddr, &swb) == 0)
698 return NULL;
699
700 if (maddr == 0 && swb.size < UPAGES * NBPG)
701 return NULL;
702
703 for (i = 0; i < UPAGES; i++) {
704 if (maddr) {
705 (void) lseek(mem, maddr + i * NBPG, 0);
706 if (read(mem,
707 (char *)user.upages[i], NBPG) != NBPG) {
708 seterr(
709 "can't read u for pid %d from %s",
710 p->p_pid, swapf);
711 return NULL;
712 }
713 } else {
714 (void) lseek(swap, swb.offset + i * NBPG, 0);
715 if (read(swap,
716 (char *)user.upages[i], NBPG) != NBPG) {
717 seterr(
718 "can't read u for pid %d from %s",
719 p->p_pid, swapf);
720 return NULL;
721 }
722 }
723 }
724 return(&user.user);
725 }
726 /*
727 * Read u-area one page at a time for the benefit of post-mortems
728 */
729 up = (char *) p->p_addr;
730 for (i = 0; i < UPAGES; i++) {
731 klseek(kmem, (long)up, 0);
732 if (read(kmem, user.upages[i], CLBYTES) != CLBYTES) {
733 seterr("cant read page %x of u of pid %d from %s",
734 up, p->p_pid, kmemf);
735 return(NULL);
736 }
737 up += CLBYTES;
738 }
739 pcbpf = (int) btop(p->p_addr); /* what should this be really? */
740
741 kp->kp_eproc.e_vm.vm_rssize =
742 kp->kp_eproc.e_vm.vm_pmap.pm_stats.resident_count; /* XXX */
743 return(&user.user);
744 }
745 #else
746 struct user *
747 kvm_getu(p)
748 const struct proc *p;
749 {
750 struct pte *pteaddr, apte;
751 struct pte arguutl[HIGHPAGES+(CLSIZE*2)];
752 register int i;
753 int ncl;
754
755 if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1)
756 return (NULL);
757 if (p->p_stat == SZOMB) {
758 seterr("zombie process");
759 return (NULL);
760 }
761 if ((p->p_flag & SLOAD) == 0) {
762 if (swap < 0) {
763 seterr("no swap");
764 return (NULL);
765 }
766 (void) lseek(swap, (long)dtob(p->p_swaddr), 0);
767 if (read(swap, (char *)&user.user, sizeof (struct user)) !=
768 sizeof (struct user)) {
769 seterr("can't read u for pid %d from %s",
770 p->p_pid, swapf);
771 return (NULL);
772 }
773 pcbpf = 0;
774 argaddr0 = 0;
775 argaddr1 = 0;
776 return (&user.user);
777 }
778 pteaddr = &Usrptmap[btokmx(p->p_p0br) + p->p_szpt - 1];
779 klseek(kmem, (long)pteaddr, 0);
780 if (read(kmem, (char *)&apte, sizeof(apte)) != sizeof(apte)) {
781 seterr("can't read indir pte to get u for pid %d from %s",
782 p->p_pid, kmemf);
783 return (NULL);
784 }
785 lseek(mem, (long)ctob(pftoc(apte.pg_pfnum+1)) - sizeof(arguutl), 0);
786 if (read(mem, (char *)arguutl, sizeof(arguutl)) != sizeof(arguutl)) {
787 seterr("can't read page table for u of pid %d from %s",
788 p->p_pid, memf);
789 return (NULL);
790 }
791 if (arguutl[0].pg_fod == 0 && arguutl[0].pg_pfnum)
792 argaddr0 = ctob(pftoc(arguutl[0].pg_pfnum));
793 else
794 argaddr0 = 0;
795 if (arguutl[CLSIZE*1].pg_fod == 0 && arguutl[CLSIZE*1].pg_pfnum)
796 argaddr1 = ctob(pftoc(arguutl[CLSIZE*1].pg_pfnum));
797 else
798 argaddr1 = 0;
799 pcbpf = arguutl[CLSIZE*2].pg_pfnum;
800 ncl = (sizeof (struct user) + CLBYTES - 1) / CLBYTES;
801 while (--ncl >= 0) {
802 i = ncl * CLSIZE;
803 lseek(mem,
804 (long)ctob(pftoc(arguutl[(CLSIZE*2)+i].pg_pfnum)), 0);
805 if (read(mem, user.upages[i], CLBYTES) != CLBYTES) {
806 seterr("can't read page %d of u of pid %d from %s",
807 arguutl[(CLSIZE*2)+i].pg_pfnum, p->p_pid, memf);
808 return(NULL);
809 }
810 }
811 return (&user.user);
812 }
813 #endif
814
815 int
816 kvm_procread(p, addr, buf, len)
817 const struct proc *p;
818 const unsigned addr, buf, len;
819 {
820 register struct kinfo_proc *kp = (struct kinfo_proc *) p;
821 struct swapblk swb;
822 vm_offset_t swaddr = 0, memaddr = 0;
823 unsigned real_len;
824
825 real_len = len < (CLBYTES - (addr & CLOFSET)) ? len : (CLBYTES - (addr & CLOFSET));
826
827 #if defined(hp300)
828 /*
829 * XXX DANGER WILL ROBINSON -- i have *no* idea to what extent this
830 * works... -- cgd
831 */
832 BREAK HERE!!!
833 #endif
834 #if defined(i386)
835 if (kp->kp_eproc.e_vm.vm_pmap.pm_pdir) {
836 struct pde pde;
837
838 klseek(kmem,
839 (long)(&kp->kp_eproc.e_vm.vm_pmap.pm_pdir[pdei(addr)]), 0);
840
841 if (read(kmem, (char *)&pde, sizeof pde) == sizeof pde
842 && pde.pd_v) {
843
844 struct pte pte;
845
846 if (lseek(mem, (long)ctob(pde.pd_pfnum) +
847 (ptei(addr) * sizeof pte), 0) == -1)
848 seterr("kvm_procread: lseek");
849 if (read(mem, (char *)&pte, sizeof pte) == sizeof pte) {
850 if (pte.pg_v) {
851 memaddr = (long)ctob(pte.pg_pfnum) +
852 (addr % (1 << PGSHIFT));
853 }
854 } else {
855 seterr("kvm_procread: read");
856 }
857 }
858 }
859 #endif /* i386 */
860
861 swb.size = 0; /* XXX */
862 if (memaddr == 0 && vatosw(p, addr & ~CLOFSET, &memaddr, &swb)) {
863 if (memaddr != 0) {
864 memaddr += addr & CLOFSET;
865 } else {
866 swaddr += addr & CLOFSET;
867 swb.size -= addr & CLOFSET;
868 if (swb.size >= real_len)
869 swaddr = swb.offset;
870 }
871 }
872
873 if (memaddr) {
874 if (lseek(mem, memaddr, 0) == -1)
875 seterr("kvm_getu: lseek");
876 real_len = read(mem, (char *)buf, real_len);
877 if (real_len == -1) {
878 real_len = 0;
879 seterr("kvm_procread: read");
880 }
881 } else if (swaddr) {
882 if (lseek(swap, swaddr, 0) == -1)
883 seterr("kvm_getu: lseek");
884 real_len = read(swap, (char *)buf, real_len);
885 if (real_len == -1) {
886 real_len = 0;
887 seterr("kvm_procread: read");
888 }
889 } else
890 real_len = 0;
891
892 return real_len;
893 }
894
895 int
896 kvm_procreadstr(p, addr, buf, len)
897 const struct proc *p;
898 const unsigned addr, buf;
899 unsigned len;
900 {
901 int done;
902 char a, *bp = (char *) buf;
903
904 /* XXX -- should be optimized */
905
906 done = 0;
907 while (len && kvm_procread(p, addr+done, &a, 1) == 1) {
908 *bp++ = a;
909 if (a == '\0')
910 return done;
911 done++;
912 len--;
913 }
914 return done;
915 }
916
917 char *
918 kvm_getargs(p, up)
919 const struct proc *p;
920 const struct user *up;
921 {
922 static char cmdbuf[ARG_MAX + sizeof(p->p_comm) + 5];
923 register char *cp, *acp;
924 int left, rv;
925 struct ps_strings arginfo;
926
927 if (up == NULL || p->p_pid == 0 || p->p_pid == 2)
928 goto retucomm;
929
930 if (kvm_procread(p, PS_STRINGS, &arginfo, sizeof(arginfo)) !=
931 sizeof(arginfo))
932 goto bad;
933
934 cp = cmdbuf;
935 acp = arginfo.ps_argvstr;
936 left = ARG_MAX + 1;
937 while (arginfo.ps_nargvstr--) {
938 if ((rv = kvm_procreadstr(p, acp, cp, left)) >= 0) {
939 acp += rv + 1;
940 left -= rv + 1;
941 cp += rv;
942 *cp++ = ' ';
943 *cp = '\0';
944 } else
945 goto bad;
946 }
947 cp-- ; *cp = '\0';
948
949 if (cmdbuf[0] == '-' || cmdbuf[0] == '?' || cmdbuf[0] <= ' ') {
950 (void) strcat(cmdbuf, " (");
951 (void) strncat(cmdbuf, p->p_comm, sizeof(p->p_comm));
952 (void) strcat(cmdbuf, ")");
953 }
954 return (cmdbuf);
955
956 bad:
957 seterr("error locating command name for pid %d", p->p_pid);
958 retucomm:
959 (void) strcpy(cmdbuf, "(");
960 (void) strncat(cmdbuf, p->p_comm, sizeof (p->p_comm));
961 (void) strcat(cmdbuf, ")");
962 return (cmdbuf);
963 }
964
965 char *
966 kvm_getenv(p, up)
967 const struct proc *p;
968 const struct user *up;
969 {
970 static char envbuf[ARG_MAX + 1];
971 register char *cp, *acp;
972 int left, rv;
973 struct ps_strings arginfo;
974
975 if (up == NULL || p->p_pid == 0 || p->p_pid == 2)
976 goto retemptyenv;
977
978 if (kvm_procread(p, PS_STRINGS, &arginfo, sizeof(arginfo)) !=
979 sizeof(arginfo))
980 goto bad;
981
982 cp = envbuf;
983 acp = arginfo.ps_envstr;
984 left = ARG_MAX + 1;
985 while (arginfo.ps_nenvstr--) {
986 if ((rv = kvm_procreadstr(p, acp, cp, left)) >= 0) {
987 acp += rv + 1;
988 left -= rv + 1;
989 cp += rv;
990 *cp++ = ' ';
991 *cp = '\0';
992 } else
993 goto bad;
994 }
995 cp-- ; *cp = '\0';
996 return (envbuf);
997
998 bad:
999 seterr("error locating environment for pid %d", p->p_pid);
1000 retemptyenv:
1001 envbuf[0] = '\0';
1002 return (envbuf);
1003 }
1004
1005 static
1006 getkvars()
1007 {
1008 if (kvm_nlist(nl) == -1)
1009 return (-1);
1010 if (deadkernel) {
1011 /* We must do the sys map first because klseek uses it */
1012 long addr;
1013
1014 #ifndef NEWVM
1015 Syssize = nl[X_SYSSIZE].n_value;
1016 Sysmap = (struct pte *)
1017 calloc((unsigned) Syssize, sizeof (struct pte));
1018 if (Sysmap == NULL) {
1019 seterr("out of space for Sysmap");
1020 return (-1);
1021 }
1022 addr = (long) nl[X_SYSMAP].n_value;
1023 addr &= ~KERNBASE;
1024 (void) lseek(kmem, addr, 0);
1025 if (read(kmem, (char *) Sysmap, Syssize * sizeof (struct pte))
1026 != Syssize * sizeof (struct pte)) {
1027 seterr("can't read Sysmap");
1028 return (-1);
1029 }
1030 #endif
1031 #if defined(hp300)
1032 addr = (long) nl[X_LOWRAM].n_value;
1033 (void) lseek(kmem, addr, 0);
1034 if (read(kmem, (char *) &lowram, sizeof (lowram))
1035 != sizeof (lowram)) {
1036 seterr("can't read lowram");
1037 return (-1);
1038 }
1039 lowram = btop(lowram);
1040 Sysseg = (struct ste *) malloc(NBPG);
1041 if (Sysseg == NULL) {
1042 seterr("out of space for Sysseg");
1043 return (-1);
1044 }
1045 addr = (long) nl[X_SYSSEG].n_value;
1046 (void) lseek(kmem, addr, 0);
1047 read(kmem, (char *)&addr, sizeof(addr));
1048 (void) lseek(kmem, (long)addr, 0);
1049 if (read(kmem, (char *) Sysseg, NBPG) != NBPG) {
1050 seterr("can't read Sysseg");
1051 return (-1);
1052 }
1053 #endif
1054 #if defined(i386)
1055 PTD = (struct pde *) malloc(NBPG);
1056 if (PTD == NULL) {
1057 seterr("out of space for PTD");
1058 return (-1);
1059 }
1060 addr = (long) nl[X_IdlePTD].n_value;
1061 (void) lseek(kmem, addr, 0);
1062 read(kmem, (char *)&addr, sizeof(addr));
1063 (void) lseek(kmem, (long)addr, 0);
1064 if (read(kmem, (char *) PTD, NBPG) != NBPG) {
1065 seterr("can't read PTD");
1066 return (-1);
1067 }
1068 #endif
1069 }
1070 #ifndef NEWVM
1071 usrpt = (struct pte *)nl[X_USRPT].n_value;
1072 Usrptmap = (struct pte *)nl[X_USRPTMAP].n_value;
1073 #endif
1074 if (kvm_read((void *) nl[X_NSWAP].n_value, &nswap, sizeof (long)) !=
1075 sizeof (long)) {
1076 seterr("can't read nswap");
1077 return (-1);
1078 }
1079 if (kvm_read((void *) nl[X_DMMIN].n_value, &dmmin, sizeof (long)) !=
1080 sizeof (long)) {
1081 seterr("can't read dmmin");
1082 return (-1);
1083 }
1084 if (kvm_read((void *) nl[X_DMMAX].n_value, &dmmax, sizeof (long)) !=
1085 sizeof (long)) {
1086 seterr("can't read dmmax");
1087 return (-1);
1088 }
1089 return (0);
1090 }
1091
1092 kvm_read(loc, buf, len)
1093 void *loc;
1094 void *buf;
1095 {
1096 if (kvmfilesopen == 0 && kvm_openfiles(NULL, NULL, NULL) == -1)
1097 return (-1);
1098 if (iskva(loc)) {
1099 klseek(kmem, (off_t) loc, 0);
1100 if (read(kmem, buf, len) != len) {
1101 seterr("error reading kmem at %x", loc);
1102 return (-1);
1103 }
1104 } else {
1105 lseek(mem, (off_t) loc, 0);
1106 if (read(mem, buf, len) != len) {
1107 seterr("error reading mem at %x", loc);
1108 return (-1);
1109 }
1110 }
1111 return (len);
1112 }
1113
1114 static void
1115 klseek(fd, loc, off)
1116 int fd;
1117 off_t loc;
1118 int off;
1119 {
1120
1121 if (deadkernel) {
1122 if ((loc = Vtophys(loc)) == -1)
1123 return;
1124 }
1125 (void) lseek(fd, (off_t)loc, off);
1126 }
1127
1128 #ifndef NEWVM
1129 /*
1130 * Given a base/size pair in virtual swap area,
1131 * return a physical base/size pair which is the
1132 * (largest) initial, physically contiguous block.
1133 */
1134 static void
1135 vstodb(vsbase, vssize, dmp, dbp, rev)
1136 register int vsbase;
1137 int vssize;
1138 struct dmap *dmp;
1139 register struct dblock *dbp;
1140 {
1141 register int blk = dmmin;
1142 register swblk_t *ip = dmp->dm_map;
1143
1144 vsbase = ctod(vsbase);
1145 vssize = ctod(vssize);
1146 if (vsbase < 0 || vsbase + vssize > dmp->dm_size)
1147 /*panic("vstodb")*/;
1148 while (vsbase >= blk) {
1149 vsbase -= blk;
1150 if (blk < dmmax)
1151 blk *= 2;
1152 ip++;
1153 }
1154 if (*ip <= 0 || *ip + blk > nswap)
1155 /*panic("vstodb")*/;
1156 dbp->db_size = MIN(vssize, blk - vsbase);
1157 dbp->db_base = *ip + (rev ? blk - (vsbase + dbp->db_size) : vsbase);
1158 }
1159 #endif
1160
1161 #ifdef NEWVM
1162 static off_t
1163 Vtophys(loc)
1164 u_long loc;
1165 {
1166 off_t newloc = (off_t) -1;
1167 #ifdef hp300
1168 int p, ste, pte;
1169
1170 ste = *(int *)&Sysseg[loc >> SG_ISHIFT];
1171 if ((ste & SG_V) == 0) {
1172 seterr("vtophys: segment not valid");
1173 return((off_t) -1);
1174 }
1175 p = btop(loc & SG_PMASK);
1176 newloc = (ste & SG_FRAME) + (p * sizeof(struct pte));
1177 (void) lseek(kmem, (long)(newloc-(off_t)ptob(lowram)), 0);
1178 if (read(kmem, (char *)&pte, sizeof pte) != sizeof pte) {
1179 seterr("vtophys: cannot locate pte");
1180 return((off_t) -1);
1181 }
1182 newloc = pte & PG_FRAME;
1183 if (pte == PG_NV || newloc < (off_t)ptob(lowram)) {
1184 seterr("vtophys: page not valid");
1185 return((off_t) -1);
1186 }
1187 newloc = (newloc - (off_t)ptob(lowram)) + (loc & PGOFSET);
1188 #endif
1189 #ifdef i386
1190 struct pde pde;
1191 struct pte pte;
1192 int p;
1193
1194 pde = PTD[loc >> PD_SHIFT];
1195 if (pde.pd_v == 0) {
1196 seterr("vtophys: page directory entry not valid");
1197 return((off_t) -1);
1198 }
1199 p = btop(loc & PT_MASK);
1200 newloc = pde.pd_pfnum + (p * sizeof(struct pte));
1201 (void) lseek(kmem, (long)newloc, 0);
1202 if (read(kmem, (char *)&pte, sizeof pte) != sizeof pte) {
1203 seterr("vtophys: cannot obtain desired pte");
1204 return((off_t) -1);
1205 }
1206 newloc = pte.pg_pfnum;
1207 if (pte.pg_v == 0) {
1208 seterr("vtophys: page table entry not valid");
1209 return((off_t) -1);
1210 }
1211 newloc += (loc & PGOFSET);
1212 #endif
1213 return((off_t) newloc);
1214 }
1215 #else
1216 static off_t
1217 vtophys(loc)
1218 long loc;
1219 {
1220 int p;
1221 off_t newloc;
1222 register struct pte *pte;
1223
1224 newloc = loc & ~KERNBASE;
1225 p = btop(newloc);
1226 #if defined(vax) || defined(tahoe)
1227 if ((loc & KERNBASE) == 0) {
1228 seterr("vtophys: translating non-kernel address");
1229 return((off_t) -1);
1230 }
1231 #endif
1232 if (p >= Syssize) {
1233 seterr("vtophys: page out of bound (%d>=%d)", p, Syssize);
1234 return((off_t) -1);
1235 }
1236 pte = &Sysmap[p];
1237 if (pte->pg_v == 0 && (pte->pg_fod || pte->pg_pfnum == 0)) {
1238 seterr("vtophys: page not valid");
1239 return((off_t) -1);
1240 }
1241 #if defined(hp300)
1242 if (pte->pg_pfnum < lowram) {
1243 seterr("vtophys: non-RAM page (%d<%d)", pte->pg_pfnum, lowram);
1244 return((off_t) -1);
1245 }
1246 #endif
1247 loc = (long) (ptob(pftoc(pte->pg_pfnum)) + (loc & PGOFSET));
1248 return(loc);
1249 }
1250 #endif
1251
1252
1253 #ifdef NEWVM
1254 /*
1255 * locate address of unwired or swapped page
1256 */
1257
1258 #define DEBUG 0
1259
1260 #define KREAD(off, addr, len) \
1261 (kvm_read((void *)(off), (char *)(addr), (len)) == (len))
1262
1263
1264 static int
1265 vatosw(p, vaddr, maddr, swb)
1266 struct proc *p ;
1267 vm_offset_t vaddr;
1268 vm_offset_t *maddr;
1269 struct swapblk *swb;
1270 {
1271 register struct kinfo_proc *kp = (struct kinfo_proc *)p;
1272 vm_map_t mp = &kp->kp_eproc.e_vm.vm_map;
1273 struct vm_object vm_object;
1274 struct vm_map_entry vm_entry;
1275 struct pager_struct pager;
1276 struct swpager swpager;
1277 struct swblock swblock;
1278 long addr, off;
1279 int i;
1280
1281 if (p->p_pid == 0 || p->p_pid == 2)
1282 return 0;
1283
1284 addr = (long)mp->header.next;
1285 for (i = 0; i < mp->nentries; i++) {
1286 /* Weed through map entries until vaddr in range */
1287 if (!KREAD(addr, &vm_entry, sizeof(vm_entry))) {
1288 setsyserr("vatosw: read vm_map_entry");
1289 return 0;
1290 }
1291 if ((vaddr >= vm_entry.start) && (vaddr <= vm_entry.end) &&
1292 (vm_entry.object.vm_object != 0))
1293 break;
1294
1295 addr = (long)vm_entry.next;
1296 }
1297 if (i == mp->nentries) {
1298 seterr("%u: map not found\n", p->p_pid);
1299 return 0;
1300 }
1301
1302 if (vm_entry.is_a_map || vm_entry.is_sub_map) {
1303 seterr("%u: Is a map\n", p->p_pid);
1304 return 0;
1305 }
1306
1307 /* Locate memory object */
1308 off = (vaddr - vm_entry.start) + vm_entry.offset;
1309 addr = (long)vm_entry.object.vm_object;
1310 while (1) {
1311 if (!KREAD(addr, &vm_object, sizeof vm_object)) {
1312 setsyserr("vatosw: read vm_object");
1313 return 0;
1314 }
1315
1316 #if DEBUG
1317 fprintf(stderr, "%u: find page: object %#x offset %x\n",
1318 p->p_pid, addr, off);
1319 #endif
1320
1321 /* Lookup in page queue */
1322 if (findpage(addr, off, maddr))
1323 return 1;
1324
1325 if (vm_object.shadow == 0)
1326 break;
1327
1328 #if DEBUG
1329 fprintf(stderr, "%u: shadow obj at %x: offset %x+%x\n",
1330 p->p_pid, addr, off, vm_object.shadow_offset);
1331 #endif
1332
1333 addr = (long)vm_object.shadow;
1334 off += vm_object.shadow_offset;
1335 }
1336
1337 if (!vm_object.pager) {
1338 seterr("%u: no pager\n", p->p_pid);
1339 return 0;
1340 }
1341
1342 /* Find address in swap space */
1343 if (!KREAD(vm_object.pager, &pager, sizeof pager)) {
1344 setsyserr("vatosw: read pager");
1345 return 0;
1346 }
1347 if (pager.pg_type != PG_SWAP) {
1348 seterr("%u: weird pager\n", p->p_pid);
1349 return 0;
1350 }
1351
1352 /* Get swap pager data */
1353 if (!KREAD(pager.pg_data, &swpager, sizeof swpager)) {
1354 setsyserr("vatosw: read swpager");
1355 return 0;
1356 }
1357
1358 off += vm_object.paging_offset;
1359
1360 /* Read swap block array */
1361 if (!KREAD((long)swpager.sw_blocks +
1362 (off/dbtob(swpager.sw_bsize)) * sizeof swblock,
1363 &swblock, sizeof swblock)) {
1364 setsyserr("vatosw: read swblock");
1365 return 0;
1366 }
1367 swb->offset = dbtob(swblock.swb_block)+ (off % dbtob(swpager.sw_bsize));
1368 swb->size = dbtob(swpager.sw_bsize) - (off % dbtob(swpager.sw_bsize));
1369 return 1;
1370 }
1371
1372
1373 #define atop(x) (((unsigned)(x)) >> page_shift)
1374 #define vm_page_hash(object, offset) \
1375 (((unsigned)object+(unsigned)atop(offset))&vm_page_hash_mask)
1376
1377 static int
1378 findpage(object, offset, maddr)
1379 long object;
1380 long offset;
1381 vm_offset_t *maddr;
1382 {
1383 static long vm_page_hash_mask;
1384 static long vm_page_buckets;
1385 static long page_shift;
1386 queue_head_t bucket;
1387 struct vm_page mem;
1388 long addr, baddr;
1389
1390 if (vm_page_hash_mask == 0 && !KREAD(nl[X_VM_PAGE_HASH_MASK].n_value,
1391 &vm_page_hash_mask, sizeof (long))) {
1392 seterr("can't read vm_page_hash_mask");
1393 return 0;
1394 }
1395 if (page_shift == 0 && !KREAD(nl[X_PAGE_SHIFT].n_value,
1396 &page_shift, sizeof (long))) {
1397 seterr("can't read page_shift");
1398 return 0;
1399 }
1400 if (vm_page_buckets == 0 && !KREAD(nl[X_VM_PAGE_BUCKETS].n_value,
1401 &vm_page_buckets, sizeof (long))) {
1402 seterr("can't read vm_page_buckets");
1403 return 0;
1404 }
1405
1406 baddr = vm_page_buckets + vm_page_hash(object,offset) * sizeof(queue_head_t);
1407 if (!KREAD(baddr, &bucket, sizeof (bucket))) {
1408 seterr("can't read vm_page_bucket");
1409 return 0;
1410 }
1411
1412 addr = (long)bucket.next;
1413 while (addr != baddr) {
1414 if (!KREAD(addr, &mem, sizeof (mem))) {
1415 seterr("can't read vm_page");
1416 return 0;
1417 }
1418 if ((long)mem.object == object && mem.offset == offset) {
1419 *maddr = (long)mem.phys_addr;
1420 return 1;
1421 }
1422 addr = (long)mem.hashq.next;
1423 }
1424 return 0;
1425 }
1426 #endif /* NEWVM */
1427
1428 #include <varargs.h>
1429 static char errbuf[_POSIX2_LINE_MAX];
1430
1431 static void
1432 seterr(va_alist)
1433 va_dcl
1434 {
1435 char *fmt;
1436 va_list ap;
1437
1438 va_start(ap);
1439 fmt = va_arg(ap, char *);
1440 (void) vsnprintf(errbuf, _POSIX2_LINE_MAX, fmt, ap);
1441 #if DEBUG
1442 (void) vfprintf(stderr, fmt, ap);
1443 #endif
1444 va_end(ap);
1445 }
1446
1447 static void
1448 setsyserr(va_alist)
1449 va_dcl
1450 {
1451 char *fmt, *cp;
1452 va_list ap;
1453 extern int errno;
1454
1455 va_start(ap);
1456 fmt = va_arg(ap, char *);
1457 (void) vsnprintf(errbuf, _POSIX2_LINE_MAX, fmt, ap);
1458 for (cp=errbuf; *cp; cp++)
1459 ;
1460 snprintf(cp, _POSIX2_LINE_MAX - (cp - errbuf), ": %s", strerror(errno));
1461 va_end(ap);
1462 }
1463
1464 char *
1465 kvm_geterr()
1466 {
1467 return (errbuf);
1468 }
1469