kvm.c revision 1.10 1 /*-
2 * Copyright (c) 1993 Christopher G. Demetriou
3 * Copyright (c) 1989 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by the University of
17 * California, Berkeley and its contributors.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35 #if defined(LIBC_SCCS) && !defined(lint)
36 /* from: static char sccsid[] = "@(#)kvm.c 5.18 (Berkeley) 5/7/91"; */
37 static char rcsid[] = "$Id: kvm.c,v 1.10 1993/06/15 07:16:06 deraadt Exp $";
38 #endif /* LIBC_SCCS and not lint */
39
40 #include <sys/param.h>
41 #include <sys/user.h>
42 #include <sys/proc.h>
43 #include <sys/ioctl.h>
44 #include <sys/kinfo.h>
45 #include <sys/tty.h>
46 #include <sys/exec.h>
47 #include <machine/vmparam.h>
48 #include <fcntl.h>
49 #include <nlist.h>
50 #include <kvm.h>
51 #include <ndbm.h>
52 #include <limits.h>
53 #include <paths.h>
54 #include <stdio.h>
55 #include <string.h>
56
57 #ifdef SPPWAIT
58 #define NEWVM
59 #endif
60
61 #ifdef NEWVM
62 #define btop(x) (((unsigned)(x)) >> PGSHIFT) /* XXX */
63 #define ptob(x) ((caddr_t)((x) << PGSHIFT)) /* XXX */
64 #include <vm/vm.h> /* ??? kinfo_proc currently includes this*/
65 #include <vm/vm_page.h>
66 #include <vm/swap_pager.h>
67 #include <sys/kinfo_proc.h>
68 #ifdef hp300
69 #include <hp300/hp300/pte.h>
70 #endif
71 #else /* NEWVM */
72 #include <machine/pte.h>
73 #include <sys/vmmac.h>
74 #include <sys/text.h>
75 #endif /* NEWVM */
76
77 /*
78 * files
79 */
80 static const char *unixf, *memf, *kmemf, *swapf;
81 static int unixx, mem, kmem, swap;
82 static DBM *db;
83 /*
84 * flags
85 */
86 static int deadkernel;
87 static int kvminit = 0;
88 static int kvmfilesopen = 0;
89 /*
90 * state
91 */
92 static struct kinfo_proc *kvmprocbase, *kvmprocptr;
93 static int kvmnprocs;
94 /*
95 * u. buffer
96 */
97 static union {
98 struct user user;
99 char upages[UPAGES][NBPG];
100 } user;
101
102 #ifdef NEWVM
103 struct swapblk {
104 long offset; /* offset in swap device */
105 long size; /* remaining size of block in swap device */
106 };
107 #endif
108 /*
109 * random other stuff
110 */
111 #ifndef NEWVM
112 static struct pte *Usrptmap, *usrpt;
113 static struct pte *Sysmap;
114 static int Syssize;
115 #endif
116 static int dmmin, dmmax;
117 static int pcbpf;
118 static int nswap;
119 static char *tmp;
120 #if defined(hp300)
121 static int lowram;
122 static struct ste *Sysseg;
123 #endif
124 #if defined(i386)
125 static struct pde *PTD;
126 #endif
127
128 #define basename(cp) ((tmp=rindex((cp), '/')) ? tmp+1 : (cp))
129 #define MAXSYMSIZE 256
130
131 #if defined(hp300)
132 #define pftoc(f) ((f) - lowram)
133 #define iskva(v) (1)
134 #endif
135
136 #ifndef pftoc
137 #define pftoc(f) (f)
138 #endif
139 #ifndef iskva
140 #define iskva(v) ((u_long)(v) & KERNBASE)
141 #endif
142
143 static struct nlist nl[] = {
144 { "_Usrptmap" },
145 #define X_USRPTMAP 0
146 { "_usrpt" },
147 #define X_USRPT 1
148 { "_nswap" },
149 #define X_NSWAP 2
150 { "_dmmin" },
151 #define X_DMMIN 3
152 { "_dmmax" },
153 #define X_DMMAX 4
154 { "_vm_page_buckets" },
155 #define X_VM_PAGE_BUCKETS 5
156 { "_vm_page_hash_mask" },
157 #define X_VM_PAGE_HASH_MASK 6
158 { "_page_shift" },
159 #define X_PAGE_SHIFT 7
160 /*
161 * everything here and down, only if a dead kernel
162 */
163 { "_Sysmap" },
164 #define X_SYSMAP 8
165 #define X_DEADKERNEL X_SYSMAP
166 { "_Syssize" },
167 #define X_SYSSIZE 9
168 { "_allproc" },
169 #define X_ALLPROC 10
170 { "_zombproc" },
171 #define X_ZOMBPROC 11
172 { "_nproc" },
173 #define X_NPROC 12
174 #define X_LAST 12
175 #if defined(hp300)
176 { "_Sysseg" },
177 #define X_SYSSEG (X_LAST+1)
178 { "_lowram" },
179 #define X_LOWRAM (X_LAST+2)
180 #endif
181 #if defined(i386)
182 { "_IdlePTD" },
183 #define X_IdlePTD (X_LAST+1)
184 #endif
185 { "" },
186 };
187
188 static off_t Vtophys();
189 static void klseek(), seterr(), setsyserr(), vstodb();
190 static int getkvars(), kvm_doprocs(), kvm_init();
191 #ifdef NEWVM
192 static int vatosw();
193 static int findpage();
194 #endif
195
196 /*
197 * returns 0 if files were opened now,
198 * 1 if files were already opened,
199 * -1 if files could not be opened.
200 */
201 kvm_openfiles(uf, mf, sf)
202 const char *uf, *mf, *sf;
203 {
204 if (kvmfilesopen)
205 return (1);
206 unixx = mem = kmem = swap = -1;
207 unixf = (uf == NULL) ? _PATH_UNIX : uf;
208 memf = (mf == NULL) ? _PATH_MEM : mf;
209
210 if ((unixx = open(unixf, O_RDONLY, 0)) == -1) {
211 setsyserr("can't open %s", unixf);
212 goto failed;
213 }
214 if ((mem = open(memf, O_RDONLY, 0)) == -1) {
215 setsyserr("can't open %s", memf);
216 goto failed;
217 }
218 if (sf != NULL)
219 swapf = sf;
220 if (mf != NULL) {
221 deadkernel++;
222 kmemf = mf;
223 kmem = mem;
224 swap = -1;
225 } else {
226 kmemf = _PATH_KMEM;
227 if ((kmem = open(kmemf, O_RDONLY, 0)) == -1) {
228 setsyserr("can't open %s", kmemf);
229 goto failed;
230 }
231 swapf = (sf == NULL) ? _PATH_DRUM : sf;
232 /*
233 * live kernel - avoid looking up nlist entries
234 * past X_DEADKERNEL.
235 */
236 nl[X_DEADKERNEL].n_name = "";
237 }
238 if (swapf != NULL && ((swap = open(swapf, O_RDONLY, 0)) == -1)) {
239 seterr("can't open %s", swapf);
240 goto failed;
241 }
242 kvmfilesopen++;
243 if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1) /*XXX*/
244 return (-1);
245 return (0);
246 failed:
247 kvm_close();
248 return (-1);
249 }
250
251 static
252 kvm_init(uf, mf, sf)
253 char *uf, *mf, *sf;
254 {
255 if (kvmfilesopen == 0 && kvm_openfiles(NULL, NULL, NULL) == -1)
256 return (-1);
257 if (getkvars() == -1)
258 return (-1);
259 kvminit = 1;
260
261 return (0);
262 }
263
264 kvm_close()
265 {
266 if (unixx != -1) {
267 close(unixx);
268 unixx = -1;
269 }
270 if (kmem != -1) {
271 if (kmem != mem)
272 close(kmem);
273 /* otherwise kmem is a copy of mem, and will be closed below */
274 kmem = -1;
275 }
276 if (mem != -1) {
277 close(mem);
278 mem = -1;
279 }
280 if (swap != -1) {
281 close(swap);
282 swap = -1;
283 }
284 if (db != NULL) {
285 dbm_close(db);
286 db = NULL;
287 }
288 kvminit = 0;
289 kvmfilesopen = 0;
290 deadkernel = 0;
291 #ifndef NEWVM
292 if (Sysmap) {
293 free(Sysmap);
294 Sysmap = NULL;
295 }
296 #endif
297 }
298
299 kvm_nlist(nl)
300 struct nlist *nl;
301 {
302 datum key, data;
303 char dbname[MAXPATHLEN];
304 char dbversion[_POSIX2_LINE_MAX];
305 char kversion[_POSIX2_LINE_MAX];
306 int dbversionlen;
307 char symbuf[MAXSYMSIZE];
308 struct nlist nbuf, *n;
309 int num, did;
310
311 if (kvmfilesopen == 0 && kvm_openfiles(NULL, NULL, NULL) == -1)
312 return (-1);
313 if (deadkernel)
314 goto hard2;
315 /*
316 * initialize key datum
317 */
318 key.dptr = symbuf;
319
320 if (db != NULL)
321 goto win; /* off to the races */
322 /*
323 * open database
324 */
325 sprintf(dbname, "%s/kvm_%s", _PATH_VARRUN, basename(unixf));
326 if ((db = dbm_open(dbname, O_RDONLY, 0)) == NULL)
327 goto hard2;
328 /*
329 * read version out of database
330 */
331 bcopy("VERSION", symbuf, sizeof ("VERSION")-1);
332 key.dsize = (sizeof ("VERSION") - 1);
333 data = dbm_fetch(db, key);
334 if (data.dptr == NULL)
335 goto hard1;
336 bcopy(data.dptr, dbversion, data.dsize);
337 dbversionlen = data.dsize;
338 /*
339 * read version string from kernel memory
340 */
341 bcopy("_version", symbuf, sizeof ("_version")-1);
342 key.dsize = (sizeof ("_version")-1);
343 data = dbm_fetch(db, key);
344 if (data.dptr == NULL)
345 goto hard1;
346 if (data.dsize != sizeof (struct nlist))
347 goto hard1;
348 bcopy(data.dptr, &nbuf, sizeof (struct nlist));
349 lseek(kmem, nbuf.n_value, 0);
350 if (read(kmem, kversion, dbversionlen) != dbversionlen)
351 goto hard1;
352 /*
353 * if they match, we win - otherwise do it the hard way
354 */
355 if (bcmp(dbversion, kversion, dbversionlen) != 0)
356 goto hard1;
357 /*
358 * getem from the database.
359 */
360 win:
361 num = did = 0;
362 for (n = nl; n->n_name && n->n_name[0]; n++, num++) {
363 int len;
364 /*
365 * clear out fields from users buffer
366 */
367 n->n_type = 0;
368 n->n_other = 0;
369 n->n_desc = 0;
370 n->n_value = 0;
371 /*
372 * query db
373 */
374 if ((len = strlen(n->n_name)) > MAXSYMSIZE) {
375 seterr("symbol too large");
376 return (-1);
377 }
378 (void)strcpy(symbuf, n->n_name);
379 key.dsize = len;
380 data = dbm_fetch(db, key);
381 if (data.dptr == NULL || data.dsize != sizeof (struct nlist))
382 continue;
383 bcopy(data.dptr, &nbuf, sizeof (struct nlist));
384 n->n_value = nbuf.n_value;
385 n->n_type = nbuf.n_type;
386 n->n_desc = nbuf.n_desc;
387 n->n_other = nbuf.n_other;
388 did++;
389 }
390 return (num - did);
391 hard1:
392 dbm_close(db);
393 db = NULL;
394 hard2:
395 num = nlist(unixf, nl);
396 if (num == -1)
397 seterr("nlist (hard way) failed");
398 return (num);
399 }
400
401 kvm_getprocs(what, arg)
402 int what, arg;
403 {
404 static int ocopysize = -1;
405
406 if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1)
407 return (NULL);
408 if (!deadkernel) {
409 int ret, copysize;
410
411 if ((ret = getkerninfo(what, NULL, NULL, arg)) == -1) {
412 setsyserr("can't get estimate for kerninfo");
413 return (-1);
414 }
415 copysize = ret;
416 if (copysize > ocopysize || !kvmprocbase) {
417 if (ocopysize == -1 || !kvmprocbase)
418 kvmprocbase =
419 (struct kinfo_proc *)malloc(copysize);
420 else
421 kvmprocbase =
422 (struct kinfo_proc *)realloc(kvmprocbase,
423 copysize);
424 if (!kvmprocbase) {
425 seterr("out of memory");
426 return (-1);
427 }
428 }
429 ocopysize = copysize;
430 if ((ret = getkerninfo(what, kvmprocbase, ©size,
431 arg)) == -1) {
432 setsyserr("can't get proc list");
433 return (-1);
434 }
435 if (copysize % sizeof (struct kinfo_proc)) {
436 seterr("proc size mismatch (got %d total, kinfo_proc: %d)",
437 copysize, sizeof (struct kinfo_proc));
438 return (-1);
439 }
440 kvmnprocs = copysize / sizeof (struct kinfo_proc);
441 } else {
442 int nproc;
443
444 if (kvm_read((void *) nl[X_NPROC].n_value, &nproc,
445 sizeof (int)) != sizeof (int)) {
446 seterr("can't read nproc");
447 return (-1);
448 }
449 if ((kvmprocbase = (struct kinfo_proc *)
450 malloc(nproc * sizeof (struct kinfo_proc))) == NULL) {
451 seterr("out of memory (addr: %x nproc = %d)",
452 nl[X_NPROC].n_value, nproc);
453 return (-1);
454 }
455 kvmnprocs = kvm_doprocs(what, arg, kvmprocbase);
456 realloc(kvmprocbase, kvmnprocs * sizeof (struct kinfo_proc));
457 }
458 kvmprocptr = kvmprocbase;
459
460 return (kvmnprocs);
461 }
462
463 /*
464 * XXX - should NOT give up so easily - especially since the kernel
465 * may be corrupt (it died). Should gather as much information as possible.
466 * Follows proc ptrs instead of reading table since table may go
467 * away soon.
468 */
469 static
470 kvm_doprocs(what, arg, buff)
471 int what, arg;
472 char *buff;
473 {
474 struct proc *p, proc;
475 register char *bp = buff;
476 int i = 0;
477 int doingzomb = 0;
478 struct eproc eproc;
479 struct pgrp pgrp;
480 struct session sess;
481 struct tty tty;
482 #ifndef NEWVM
483 struct text text;
484 #endif
485
486 /* allproc */
487 if (kvm_read((void *) nl[X_ALLPROC].n_value, &p,
488 sizeof (struct proc *)) != sizeof (struct proc *)) {
489 seterr("can't read allproc");
490 return (-1);
491 }
492
493 again:
494 for (; p; p = proc.p_nxt) {
495 if (kvm_read(p, &proc, sizeof (struct proc)) !=
496 sizeof (struct proc)) {
497 seterr("can't read proc at %x", p);
498 return (-1);
499 }
500 #ifdef NEWVM
501 if (kvm_read(proc.p_cred, &eproc.e_pcred,
502 sizeof (struct pcred)) == sizeof (struct pcred))
503 (void) kvm_read(eproc.e_pcred.pc_ucred, &eproc.e_ucred,
504 sizeof (struct ucred));
505 switch(ki_op(what)) {
506
507 case KINFO_PROC_PID:
508 if (proc.p_pid != (pid_t)arg)
509 continue;
510 break;
511
512
513 case KINFO_PROC_UID:
514 if (eproc.e_ucred.cr_uid != (uid_t)arg)
515 continue;
516 break;
517
518 case KINFO_PROC_RUID:
519 if (eproc.e_pcred.p_ruid != (uid_t)arg)
520 continue;
521 break;
522 }
523 #else
524 switch(ki_op(what)) {
525
526 case KINFO_PROC_PID:
527 if (proc.p_pid != (pid_t)arg)
528 continue;
529 break;
530
531
532 case KINFO_PROC_UID:
533 if (proc.p_uid != (uid_t)arg)
534 continue;
535 break;
536
537 case KINFO_PROC_RUID:
538 if (proc.p_ruid != (uid_t)arg)
539 continue;
540 break;
541 }
542 #endif
543 /*
544 * gather eproc
545 */
546 eproc.e_paddr = p;
547 if (kvm_read(proc.p_pgrp, &pgrp, sizeof (struct pgrp)) !=
548 sizeof (struct pgrp)) {
549 seterr("can't read pgrp at %x", proc.p_pgrp);
550 return (-1);
551 }
552 eproc.e_sess = pgrp.pg_session;
553 eproc.e_pgid = pgrp.pg_id;
554 eproc.e_jobc = pgrp.pg_jobc;
555 if (kvm_read(pgrp.pg_session, &sess, sizeof (struct session))
556 != sizeof (struct session)) {
557 seterr("can't read session at %x", pgrp.pg_session);
558 return (-1);
559 }
560 if ((proc.p_flag&SCTTY) && sess.s_ttyp != NULL) {
561 if (kvm_read(sess.s_ttyp, &tty, sizeof (struct tty))
562 != sizeof (struct tty)) {
563 seterr("can't read tty at %x", sess.s_ttyp);
564 return (-1);
565 }
566 eproc.e_tdev = tty.t_dev;
567 eproc.e_tsess = tty.t_session;
568 if (tty.t_pgrp != NULL) {
569 if (kvm_read(tty.t_pgrp, &pgrp, sizeof (struct
570 pgrp)) != sizeof (struct pgrp)) {
571 seterr("can't read tpgrp at &x",
572 tty.t_pgrp);
573 return (-1);
574 }
575 eproc.e_tpgid = pgrp.pg_id;
576 } else
577 eproc.e_tpgid = -1;
578 } else
579 eproc.e_tdev = NODEV;
580 if (proc.p_wmesg)
581 kvm_read(proc.p_wmesg, eproc.e_wmesg, WMESGLEN);
582 #ifdef NEWVM
583 (void) kvm_read(proc.p_vmspace, &eproc.e_vm,
584 sizeof (struct vmspace));
585 eproc.e_xsize = eproc.e_xrssize =
586 eproc.e_xccount = eproc.e_xswrss = 0;
587 #else
588 if (proc.p_textp) {
589 kvm_read(proc.p_textp, &text, sizeof (text));
590 eproc.e_xsize = text.x_size;
591 eproc.e_xrssize = text.x_rssize;
592 eproc.e_xccount = text.x_ccount;
593 eproc.e_xswrss = text.x_swrss;
594 } else {
595 eproc.e_xsize = eproc.e_xrssize =
596 eproc.e_xccount = eproc.e_xswrss = 0;
597 }
598 #endif
599
600 switch(ki_op(what)) {
601
602 case KINFO_PROC_PGRP:
603 if (eproc.e_pgid != (pid_t)arg)
604 continue;
605 break;
606
607 case KINFO_PROC_TTY:
608 if ((proc.p_flag&SCTTY) == 0 ||
609 eproc.e_tdev != (dev_t)arg)
610 continue;
611 break;
612 }
613
614 i++;
615 bcopy(&proc, bp, sizeof (struct proc));
616 bp += sizeof (struct proc);
617 bcopy(&eproc, bp, sizeof (struct eproc));
618 bp+= sizeof (struct eproc);
619 }
620 if (!doingzomb) {
621 /* zombproc */
622 if (kvm_read((void *) nl[X_ZOMBPROC].n_value, &p,
623 sizeof (struct proc *)) != sizeof (struct proc *)) {
624 seterr("can't read zombproc");
625 return (-1);
626 }
627 doingzomb = 1;
628 goto again;
629 }
630
631 return (i);
632 }
633
634 struct proc *
635 kvm_nextproc()
636 {
637
638 if (!kvmprocbase && kvm_getprocs(0, 0) == -1)
639 return (NULL);
640 if (kvmprocptr >= (kvmprocbase + kvmnprocs)) {
641 seterr("end of proc list");
642 return (NULL);
643 }
644 return((struct proc *)(kvmprocptr++));
645 }
646
647 struct eproc *
648 kvm_geteproc(p)
649 const struct proc *p;
650 {
651 return ((struct eproc *)(((char *)p) + sizeof (struct proc)));
652 }
653
654 kvm_setproc()
655 {
656 kvmprocptr = kvmprocbase;
657 }
658
659 kvm_freeprocs()
660 {
661
662 if (kvmprocbase) {
663 free(kvmprocbase);
664 kvmprocbase = NULL;
665 }
666 }
667
668 #ifdef NEWVM
669 struct user *
670 kvm_getu(p)
671 const struct proc *p;
672 {
673 register struct kinfo_proc *kp = (struct kinfo_proc *)p;
674 register int i;
675 register char *up;
676 u_int vaddr;
677 struct swapblk swb;
678
679 if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1)
680 return (NULL);
681 if (p->p_stat == SZOMB) {
682 seterr("zombie process");
683 return (NULL);
684 }
685
686 if ((p->p_flag & SLOAD) == 0) {
687 vm_offset_t maddr;
688
689 if (swap < 0) {
690 seterr("no swap");
691 return (NULL);
692 }
693 /*
694 * Costly operation, better set enable_swap to zero
695 * in vm/vm_glue.c, since paging of user pages isn't
696 * done yet anyway.
697 */
698 if (vatosw(p, USRSTACK + i * NBPG, &maddr, &swb) == 0)
699 return NULL;
700
701 if (maddr == 0 && swb.size < UPAGES * NBPG)
702 return NULL;
703
704 for (i = 0; i < UPAGES; i++) {
705 if (maddr) {
706 (void) lseek(mem, maddr + i * NBPG, 0);
707 if (read(mem,
708 (char *)user.upages[i], NBPG) != NBPG) {
709 seterr(
710 "can't read u for pid %d from %s",
711 p->p_pid, swapf);
712 return NULL;
713 }
714 } else {
715 (void) lseek(swap, swb.offset + i * NBPG, 0);
716 if (read(swap,
717 (char *)user.upages[i], NBPG) != NBPG) {
718 seterr(
719 "can't read u for pid %d from %s",
720 p->p_pid, swapf);
721 return NULL;
722 }
723 }
724 }
725 return(&user.user);
726 }
727 /*
728 * Read u-area one page at a time for the benefit of post-mortems
729 */
730 up = (char *) p->p_addr;
731 for (i = 0; i < UPAGES; i++) {
732 klseek(kmem, (long)up, 0);
733 if (read(kmem, user.upages[i], CLBYTES) != CLBYTES) {
734 seterr("cant read page %x of u of pid %d from %s",
735 up, p->p_pid, kmemf);
736 return(NULL);
737 }
738 up += CLBYTES;
739 }
740 pcbpf = (int) btop(p->p_addr); /* what should this be really? */
741
742 kp->kp_eproc.e_vm.vm_rssize =
743 kp->kp_eproc.e_vm.vm_pmap.pm_stats.resident_count; /* XXX */
744 return(&user.user);
745 }
746 #else
747 struct user *
748 kvm_getu(p)
749 const struct proc *p;
750 {
751 struct pte *pteaddr, apte;
752 struct pte arguutl[HIGHPAGES+(CLSIZE*2)];
753 register int i;
754 int ncl;
755
756 if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1)
757 return (NULL);
758 if (p->p_stat == SZOMB) {
759 seterr("zombie process");
760 return (NULL);
761 }
762 if ((p->p_flag & SLOAD) == 0) {
763 if (swap < 0) {
764 seterr("no swap");
765 return (NULL);
766 }
767 (void) lseek(swap, (long)dtob(p->p_swaddr), 0);
768 if (read(swap, (char *)&user.user, sizeof (struct user)) !=
769 sizeof (struct user)) {
770 seterr("can't read u for pid %d from %s",
771 p->p_pid, swapf);
772 return (NULL);
773 }
774 pcbpf = 0;
775 argaddr0 = 0;
776 argaddr1 = 0;
777 return (&user.user);
778 }
779 pteaddr = &Usrptmap[btokmx(p->p_p0br) + p->p_szpt - 1];
780 klseek(kmem, (long)pteaddr, 0);
781 if (read(kmem, (char *)&apte, sizeof(apte)) != sizeof(apte)) {
782 seterr("can't read indir pte to get u for pid %d from %s",
783 p->p_pid, kmemf);
784 return (NULL);
785 }
786 lseek(mem, (long)ctob(pftoc(apte.pg_pfnum+1)) - sizeof(arguutl), 0);
787 if (read(mem, (char *)arguutl, sizeof(arguutl)) != sizeof(arguutl)) {
788 seterr("can't read page table for u of pid %d from %s",
789 p->p_pid, memf);
790 return (NULL);
791 }
792 if (arguutl[0].pg_fod == 0 && arguutl[0].pg_pfnum)
793 argaddr0 = ctob(pftoc(arguutl[0].pg_pfnum));
794 else
795 argaddr0 = 0;
796 if (arguutl[CLSIZE*1].pg_fod == 0 && arguutl[CLSIZE*1].pg_pfnum)
797 argaddr1 = ctob(pftoc(arguutl[CLSIZE*1].pg_pfnum));
798 else
799 argaddr1 = 0;
800 pcbpf = arguutl[CLSIZE*2].pg_pfnum;
801 ncl = (sizeof (struct user) + CLBYTES - 1) / CLBYTES;
802 while (--ncl >= 0) {
803 i = ncl * CLSIZE;
804 lseek(mem,
805 (long)ctob(pftoc(arguutl[(CLSIZE*2)+i].pg_pfnum)), 0);
806 if (read(mem, user.upages[i], CLBYTES) != CLBYTES) {
807 seterr("can't read page %d of u of pid %d from %s",
808 arguutl[(CLSIZE*2)+i].pg_pfnum, p->p_pid, memf);
809 return(NULL);
810 }
811 }
812 return (&user.user);
813 }
814 #endif
815
816 int
817 kvm_procread(p, addr, buf, len)
818 const struct proc *p;
819 const unsigned addr, buf, len;
820 {
821 register struct kinfo_proc *kp = (struct kinfo_proc *) p;
822 struct swapblk swb;
823 vm_offset_t swaddr = 0, memaddr = 0;
824 unsigned real_len;
825
826 real_len = len < (CLBYTES - (addr & CLOFSET)) ? len : (CLBYTES - (addr & CLOFSET));
827
828 #if defined(hp300)
829 /*
830 * XXX DANGER WILL ROBINSON -- i have *no* idea to what extent this
831 * works... -- cgd
832 */
833 BREAK HERE!!!
834 #endif
835 #if defined(i386)
836 if (kp->kp_eproc.e_vm.vm_pmap.pm_pdir) {
837 struct pde pde;
838
839 klseek(kmem,
840 (long)(&kp->kp_eproc.e_vm.vm_pmap.pm_pdir[pdei(addr)]), 0);
841
842 if (read(kmem, (char *)&pde, sizeof pde) == sizeof pde
843 && pde.pd_v) {
844
845 struct pte pte;
846
847 if (lseek(mem, (long)ctob(pde.pd_pfnum) +
848 (ptei(addr) * sizeof pte), 0) == -1)
849 seterr("kvm_procread: lseek");
850 if (read(mem, (char *)&pte, sizeof pte) == sizeof pte) {
851 if (pte.pg_v) {
852 memaddr = (long)ctob(pte.pg_pfnum) +
853 (addr % (1 << PGSHIFT));
854 }
855 } else {
856 seterr("kvm_procread: read");
857 }
858 }
859 }
860 #endif /* i386 */
861
862 if (memaddr == 0 && vatosw(p, addr & ~CLOFSET, &memaddr, &swb)) {
863 if (memaddr != 0) {
864 memaddr += addr & CLOFSET;
865 } else {
866 swaddr += addr & CLOFSET;
867 swb.size -= addr & CLOFSET;
868 if (swb.size >= real_len)
869 swaddr = swb.offset;
870 }
871 }
872
873 if (memaddr) {
874 if (lseek(mem, memaddr, 0) == -1)
875 seterr("kvm_getu: lseek");
876 real_len = read(mem, (char *)buf, real_len);
877 if (real_len == -1) {
878 real_len = 0;
879 seterr("kvm_procread: read");
880 }
881 } else if (swaddr) {
882 if (lseek(swap, swaddr, 0) == -1)
883 seterr("kvm_getu: lseek");
884 real_len = read(swap, (char *)buf, real_len);
885 if (real_len == -1) {
886 real_len = 0;
887 seterr("kvm_procread: read");
888 }
889 } else
890 real_len = 0;
891
892 return real_len;
893 }
894
895 int
896 kvm_procreadstr(p, addr, buf, len)
897 const struct proc *p;
898 const unsigned addr, buf;
899 unsigned len;
900 {
901 int done, little;
902 char copy[200], *pb;
903 char a, *bp = (char *) buf;
904
905 done = 0;
906 while (len) {
907 little = kvm_procread(p, addr+done, copy, MIN(len, sizeof copy));
908 if (little<1)
909 break;
910 pb = copy;
911 while (little--) {
912 len--;
913 if( (*bp++ = *pb++) == '\0' )
914 return done;
915 done++;
916 }
917 }
918 return done;
919 }
920
921 char *
922 kvm_getargs(p, up)
923 const struct proc *p;
924 const struct user *up;
925 {
926 static char cmdbuf[ARG_MAX + sizeof(p->p_comm) + 5];
927 register char *cp, *acp;
928 int left, rv;
929 struct ps_strings arginfo;
930
931 if (up == NULL || p->p_pid == 0 || p->p_pid == 2)
932 goto retucomm;
933
934 if (kvm_procread(p, PS_STRINGS, &arginfo, sizeof(arginfo)) !=
935 sizeof(arginfo))
936 goto bad;
937
938 cp = cmdbuf;
939 acp = arginfo.ps_argvstr;
940 left = ARG_MAX + 1;
941 while (arginfo.ps_nargvstr--) {
942 if ((rv = kvm_procreadstr(p, acp, cp, left)) >= 0) {
943 acp += rv + 1;
944 left -= rv + 1;
945 cp += rv;
946 *cp++ = ' ';
947 *cp = '\0';
948 } else
949 goto bad;
950 }
951 cp-- ; *cp = '\0';
952
953 if (cmdbuf[0] == '-' || cmdbuf[0] == '?' || cmdbuf[0] <= ' ') {
954 (void) strcat(cmdbuf, " (");
955 (void) strncat(cmdbuf, p->p_comm, sizeof(p->p_comm));
956 (void) strcat(cmdbuf, ")");
957 }
958 return (cmdbuf);
959
960 bad:
961 seterr("error locating command name for pid %d", p->p_pid);
962 retucomm:
963 (void) strcpy(cmdbuf, "(");
964 (void) strncat(cmdbuf, p->p_comm, sizeof (p->p_comm));
965 (void) strcat(cmdbuf, ")");
966 return (cmdbuf);
967 }
968
969 char *
970 kvm_getenv(p, up)
971 const struct proc *p;
972 const struct user *up;
973 {
974 static char envbuf[ARG_MAX + 1];
975 register char *cp, *acp;
976 int left, rv;
977 struct ps_strings arginfo;
978
979 if (up == NULL || p->p_pid == 0 || p->p_pid == 2)
980 goto retemptyenv;
981
982 if (kvm_procread(p, PS_STRINGS, &arginfo, sizeof(arginfo)) !=
983 sizeof(arginfo))
984 goto bad;
985
986 cp = envbuf;
987 acp = arginfo.ps_envstr;
988 left = ARG_MAX + 1;
989 while (arginfo.ps_nenvstr--) {
990 if ((rv = kvm_procreadstr(p, acp, cp, left)) >= 0) {
991 acp += rv + 1;
992 left -= rv + 1;
993 cp += rv;
994 *cp++ = ' ';
995 *cp = '\0';
996 } else
997 goto bad;
998 }
999 cp-- ; *cp = '\0';
1000 return (envbuf);
1001
1002 bad:
1003 seterr("error locating environment for pid %d", p->p_pid);
1004 retemptyenv:
1005 envbuf[0] = '\0';
1006 return (envbuf);
1007 }
1008
1009 static
1010 getkvars()
1011 {
1012 if (kvm_nlist(nl) == -1)
1013 return (-1);
1014 if (deadkernel) {
1015 /* We must do the sys map first because klseek uses it */
1016 long addr;
1017
1018 #ifndef NEWVM
1019 Syssize = nl[X_SYSSIZE].n_value;
1020 Sysmap = (struct pte *)
1021 calloc((unsigned) Syssize, sizeof (struct pte));
1022 if (Sysmap == NULL) {
1023 seterr("out of space for Sysmap");
1024 return (-1);
1025 }
1026 addr = (long) nl[X_SYSMAP].n_value;
1027 addr &= ~KERNBASE;
1028 (void) lseek(kmem, addr, 0);
1029 if (read(kmem, (char *) Sysmap, Syssize * sizeof (struct pte))
1030 != Syssize * sizeof (struct pte)) {
1031 seterr("can't read Sysmap");
1032 return (-1);
1033 }
1034 #endif
1035 #if defined(hp300)
1036 addr = (long) nl[X_LOWRAM].n_value;
1037 (void) lseek(kmem, addr, 0);
1038 if (read(kmem, (char *) &lowram, sizeof (lowram))
1039 != sizeof (lowram)) {
1040 seterr("can't read lowram");
1041 return (-1);
1042 }
1043 lowram = btop(lowram);
1044 Sysseg = (struct ste *) malloc(NBPG);
1045 if (Sysseg == NULL) {
1046 seterr("out of space for Sysseg");
1047 return (-1);
1048 }
1049 addr = (long) nl[X_SYSSEG].n_value;
1050 (void) lseek(kmem, addr, 0);
1051 read(kmem, (char *)&addr, sizeof(addr));
1052 (void) lseek(kmem, (long)addr, 0);
1053 if (read(kmem, (char *) Sysseg, NBPG) != NBPG) {
1054 seterr("can't read Sysseg");
1055 return (-1);
1056 }
1057 #endif
1058 #if defined(i386)
1059 PTD = (struct pde *) malloc(NBPG);
1060 if (PTD == NULL) {
1061 seterr("out of space for PTD");
1062 return (-1);
1063 }
1064 addr = (long) nl[X_IdlePTD].n_value;
1065 (void) lseek(kmem, addr, 0);
1066 read(kmem, (char *)&addr, sizeof(addr));
1067 (void) lseek(kmem, (long)addr, 0);
1068 if (read(kmem, (char *) PTD, NBPG) != NBPG) {
1069 seterr("can't read PTD");
1070 return (-1);
1071 }
1072 #endif
1073 }
1074 #ifndef NEWVM
1075 usrpt = (struct pte *)nl[X_USRPT].n_value;
1076 Usrptmap = (struct pte *)nl[X_USRPTMAP].n_value;
1077 #endif
1078 if (kvm_read((void *) nl[X_NSWAP].n_value, &nswap, sizeof (long)) !=
1079 sizeof (long)) {
1080 seterr("can't read nswap");
1081 return (-1);
1082 }
1083 if (kvm_read((void *) nl[X_DMMIN].n_value, &dmmin, sizeof (long)) !=
1084 sizeof (long)) {
1085 seterr("can't read dmmin");
1086 return (-1);
1087 }
1088 if (kvm_read((void *) nl[X_DMMAX].n_value, &dmmax, sizeof (long)) !=
1089 sizeof (long)) {
1090 seterr("can't read dmmax");
1091 return (-1);
1092 }
1093 return (0);
1094 }
1095
1096 kvm_read(loc, buf, len)
1097 void *loc;
1098 void *buf;
1099 {
1100 if (kvmfilesopen == 0 && kvm_openfiles(NULL, NULL, NULL) == -1)
1101 return (-1);
1102 if (iskva(loc)) {
1103 klseek(kmem, (off_t) loc, 0);
1104 if (read(kmem, buf, len) != len) {
1105 seterr("error reading kmem at %x", loc);
1106 return (-1);
1107 }
1108 } else {
1109 lseek(mem, (off_t) loc, 0);
1110 if (read(mem, buf, len) != len) {
1111 seterr("error reading mem at %x", loc);
1112 return (-1);
1113 }
1114 }
1115 return (len);
1116 }
1117
1118 static void
1119 klseek(fd, loc, off)
1120 int fd;
1121 off_t loc;
1122 int off;
1123 {
1124
1125 if (deadkernel) {
1126 if ((loc = Vtophys(loc)) == -1)
1127 return;
1128 }
1129 (void) lseek(fd, (off_t)loc, off);
1130 }
1131
1132 #ifndef NEWVM
1133 /*
1134 * Given a base/size pair in virtual swap area,
1135 * return a physical base/size pair which is the
1136 * (largest) initial, physically contiguous block.
1137 */
1138 static void
1139 vstodb(vsbase, vssize, dmp, dbp, rev)
1140 register int vsbase;
1141 int vssize;
1142 struct dmap *dmp;
1143 register struct dblock *dbp;
1144 {
1145 register int blk = dmmin;
1146 register swblk_t *ip = dmp->dm_map;
1147
1148 vsbase = ctod(vsbase);
1149 vssize = ctod(vssize);
1150 if (vsbase < 0 || vsbase + vssize > dmp->dm_size)
1151 /*panic("vstodb")*/;
1152 while (vsbase >= blk) {
1153 vsbase -= blk;
1154 if (blk < dmmax)
1155 blk *= 2;
1156 ip++;
1157 }
1158 if (*ip <= 0 || *ip + blk > nswap)
1159 /*panic("vstodb")*/;
1160 dbp->db_size = MIN(vssize, blk - vsbase);
1161 dbp->db_base = *ip + (rev ? blk - (vsbase + dbp->db_size) : vsbase);
1162 }
1163 #endif
1164
1165 #ifdef NEWVM
1166 static off_t
1167 Vtophys(loc)
1168 u_long loc;
1169 {
1170 off_t newloc = (off_t) -1;
1171 #ifdef hp300
1172 int p, ste, pte;
1173
1174 ste = *(int *)&Sysseg[loc >> SG_ISHIFT];
1175 if ((ste & SG_V) == 0) {
1176 seterr("vtophys: segment not valid");
1177 return((off_t) -1);
1178 }
1179 p = btop(loc & SG_PMASK);
1180 newloc = (ste & SG_FRAME) + (p * sizeof(struct pte));
1181 (void) lseek(kmem, (long)(newloc-(off_t)ptob(lowram)), 0);
1182 if (read(kmem, (char *)&pte, sizeof pte) != sizeof pte) {
1183 seterr("vtophys: cannot locate pte");
1184 return((off_t) -1);
1185 }
1186 newloc = pte & PG_FRAME;
1187 if (pte == PG_NV || newloc < (off_t)ptob(lowram)) {
1188 seterr("vtophys: page not valid");
1189 return((off_t) -1);
1190 }
1191 newloc = (newloc - (off_t)ptob(lowram)) + (loc & PGOFSET);
1192 #endif
1193 #ifdef i386
1194 struct pde pde;
1195 struct pte pte;
1196 int p;
1197
1198 pde = PTD[loc >> PD_SHIFT];
1199 if (pde.pd_v == 0) {
1200 seterr("vtophys: page directory entry not valid");
1201 return((off_t) -1);
1202 }
1203 p = btop(loc & PT_MASK);
1204 newloc = pde.pd_pfnum + (p * sizeof(struct pte));
1205 (void) lseek(kmem, (long)newloc, 0);
1206 if (read(kmem, (char *)&pte, sizeof pte) != sizeof pte) {
1207 seterr("vtophys: cannot obtain desired pte");
1208 return((off_t) -1);
1209 }
1210 newloc = pte.pg_pfnum;
1211 if (pte.pg_v == 0) {
1212 seterr("vtophys: page table entry not valid");
1213 return((off_t) -1);
1214 }
1215 newloc += (loc & PGOFSET);
1216 #endif
1217 return((off_t) newloc);
1218 }
1219 #else
1220 static off_t
1221 vtophys(loc)
1222 long loc;
1223 {
1224 int p;
1225 off_t newloc;
1226 register struct pte *pte;
1227
1228 newloc = loc & ~KERNBASE;
1229 p = btop(newloc);
1230 #if defined(vax) || defined(tahoe)
1231 if ((loc & KERNBASE) == 0) {
1232 seterr("vtophys: translating non-kernel address");
1233 return((off_t) -1);
1234 }
1235 #endif
1236 if (p >= Syssize) {
1237 seterr("vtophys: page out of bound (%d>=%d)", p, Syssize);
1238 return((off_t) -1);
1239 }
1240 pte = &Sysmap[p];
1241 if (pte->pg_v == 0 && (pte->pg_fod || pte->pg_pfnum == 0)) {
1242 seterr("vtophys: page not valid");
1243 return((off_t) -1);
1244 }
1245 #if defined(hp300)
1246 if (pte->pg_pfnum < lowram) {
1247 seterr("vtophys: non-RAM page (%d<%d)", pte->pg_pfnum, lowram);
1248 return((off_t) -1);
1249 }
1250 #endif
1251 loc = (long) (ptob(pftoc(pte->pg_pfnum)) + (loc & PGOFSET));
1252 return(loc);
1253 }
1254 #endif
1255
1256
1257 #ifdef NEWVM
1258 /*
1259 * locate address of unwired or swapped page
1260 */
1261
1262 #define DEBUG 0
1263
1264 #define KREAD(off, addr, len) \
1265 (kvm_read((void *)(off), (char *)(addr), (len)) == (len))
1266
1267
1268 static int
1269 vatosw(p, vaddr, maddr, swb)
1270 struct proc *p ;
1271 vm_offset_t vaddr;
1272 vm_offset_t *maddr;
1273 struct swapblk *swb;
1274 {
1275 register struct kinfo_proc *kp = (struct kinfo_proc *)p;
1276 vm_map_t mp = &kp->kp_eproc.e_vm.vm_map;
1277 struct vm_object vm_object;
1278 struct vm_map_entry vm_entry;
1279 struct pager_struct pager;
1280 struct swpager swpager;
1281 struct swblock swblock;
1282 long addr, off;
1283 int i;
1284
1285 if (p->p_pid == 0 || p->p_pid == 2)
1286 return 0;
1287
1288 addr = (long)mp->header.next;
1289 for (i = 0; i < mp->nentries; i++) {
1290 /* Weed through map entries until vaddr in range */
1291 if (!KREAD(addr, &vm_entry, sizeof(vm_entry))) {
1292 setsyserr("vatosw: read vm_map_entry");
1293 return 0;
1294 }
1295 if ((vaddr >= vm_entry.start) && (vaddr <= vm_entry.end) &&
1296 (vm_entry.object.vm_object != 0))
1297 break;
1298
1299 addr = (long)vm_entry.next;
1300 }
1301 if (i == mp->nentries) {
1302 seterr("%u: map not found\n", p->p_pid);
1303 return 0;
1304 }
1305
1306 if (vm_entry.is_a_map || vm_entry.is_sub_map) {
1307 seterr("%u: Is a map\n", p->p_pid);
1308 return 0;
1309 }
1310
1311 /* Locate memory object */
1312 off = (vaddr - vm_entry.start) + vm_entry.offset;
1313 addr = (long)vm_entry.object.vm_object;
1314 while (1) {
1315 if (!KREAD(addr, &vm_object, sizeof vm_object)) {
1316 setsyserr("vatosw: read vm_object");
1317 return 0;
1318 }
1319
1320 #if DEBUG
1321 fprintf(stderr, "%u: find page: object %#x offset %x\n",
1322 p->p_pid, addr, off);
1323 #endif
1324
1325 /* Lookup in page queue */
1326 if (findpage(addr, off, maddr))
1327 return 1;
1328
1329 if (vm_object.shadow == 0)
1330 break;
1331
1332 #if DEBUG
1333 fprintf(stderr, "%u: shadow obj at %x: offset %x+%x\n",
1334 p->p_pid, addr, off, vm_object.shadow_offset);
1335 #endif
1336
1337 addr = (long)vm_object.shadow;
1338 off += vm_object.shadow_offset;
1339 }
1340
1341 if (!vm_object.pager) {
1342 seterr("%u: no pager\n", p->p_pid);
1343 return 0;
1344 }
1345
1346 /* Find address in swap space */
1347 if (!KREAD(vm_object.pager, &pager, sizeof pager)) {
1348 setsyserr("vatosw: read pager");
1349 return 0;
1350 }
1351 if (pager.pg_type != PG_SWAP) {
1352 seterr("%u: weird pager\n", p->p_pid);
1353 return 0;
1354 }
1355
1356 /* Get swap pager data */
1357 if (!KREAD(pager.pg_data, &swpager, sizeof swpager)) {
1358 setsyserr("vatosw: read swpager");
1359 return 0;
1360 }
1361
1362 off += vm_object.paging_offset;
1363
1364 /* Read swap block array */
1365 if (!KREAD((long)swpager.sw_blocks +
1366 (off/dbtob(swpager.sw_bsize)) * sizeof swblock,
1367 &swblock, sizeof swblock)) {
1368 setsyserr("vatosw: read swblock");
1369 return 0;
1370 }
1371 swb->offset = dbtob(swblock.swb_block)+ (off % dbtob(swpager.sw_bsize));
1372 swb->size = dbtob(swpager.sw_bsize) - (off % dbtob(swpager.sw_bsize));
1373 return 1;
1374 }
1375
1376
1377 #define atop(x) (((unsigned)(x)) >> page_shift)
1378 #define vm_page_hash(object, offset) \
1379 (((unsigned)object+(unsigned)atop(offset))&vm_page_hash_mask)
1380
1381 static int
1382 findpage(object, offset, maddr)
1383 long object;
1384 long offset;
1385 vm_offset_t *maddr;
1386 {
1387 static long vm_page_hash_mask;
1388 static long vm_page_buckets;
1389 static long page_shift;
1390 queue_head_t bucket;
1391 struct vm_page mem;
1392 long addr, baddr;
1393
1394 if (vm_page_hash_mask == 0 && !KREAD(nl[X_VM_PAGE_HASH_MASK].n_value,
1395 &vm_page_hash_mask, sizeof (long))) {
1396 seterr("can't read vm_page_hash_mask");
1397 return 0;
1398 }
1399 if (page_shift == 0 && !KREAD(nl[X_PAGE_SHIFT].n_value,
1400 &page_shift, sizeof (long))) {
1401 seterr("can't read page_shift");
1402 return 0;
1403 }
1404 if (vm_page_buckets == 0 && !KREAD(nl[X_VM_PAGE_BUCKETS].n_value,
1405 &vm_page_buckets, sizeof (long))) {
1406 seterr("can't read vm_page_buckets");
1407 return 0;
1408 }
1409
1410 baddr = vm_page_buckets + vm_page_hash(object,offset) * sizeof(queue_head_t);
1411 if (!KREAD(baddr, &bucket, sizeof (bucket))) {
1412 seterr("can't read vm_page_bucket");
1413 return 0;
1414 }
1415
1416 addr = (long)bucket.next;
1417 while (addr != baddr) {
1418 if (!KREAD(addr, &mem, sizeof (mem))) {
1419 seterr("can't read vm_page");
1420 return 0;
1421 }
1422 if ((long)mem.object == object && mem.offset == offset) {
1423 *maddr = (long)mem.phys_addr;
1424 return 1;
1425 }
1426 addr = (long)mem.hashq.next;
1427 }
1428 return 0;
1429 }
1430 #endif /* NEWVM */
1431
1432 #include <varargs.h>
1433 static char errbuf[_POSIX2_LINE_MAX];
1434
1435 static void
1436 seterr(va_alist)
1437 va_dcl
1438 {
1439 char *fmt;
1440 va_list ap;
1441
1442 va_start(ap);
1443 fmt = va_arg(ap, char *);
1444 (void) vsnprintf(errbuf, _POSIX2_LINE_MAX, fmt, ap);
1445 #if DEBUG
1446 (void) vfprintf(stderr, fmt, ap);
1447 #endif
1448 va_end(ap);
1449 }
1450
1451 static void
1452 setsyserr(va_alist)
1453 va_dcl
1454 {
1455 char *fmt, *cp;
1456 va_list ap;
1457 extern int errno;
1458
1459 va_start(ap);
1460 fmt = va_arg(ap, char *);
1461 (void) vsnprintf(errbuf, _POSIX2_LINE_MAX, fmt, ap);
1462 for (cp=errbuf; *cp; cp++)
1463 ;
1464 snprintf(cp, _POSIX2_LINE_MAX - (cp - errbuf), ": %s", strerror(errno));
1465 va_end(ap);
1466 }
1467
1468 char *
1469 kvm_geterr()
1470 {
1471 return (errbuf);
1472 }
1473