kvm.c revision 1.14 1 /*-
2 * Copyright (c) 1993 Christopher G. Demetriou
3 * Copyright (c) 1989 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by the University of
17 * California, Berkeley and its contributors.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35 #if defined(LIBC_SCCS) && !defined(lint)
36 /*static char sccsid[] = "from: @(#)kvm.c 5.18 (Berkeley) 5/7/91";*/
37 static char rcsid[] = "$Id: kvm.c,v 1.14 1993/08/14 11:44:45 cgd Exp $";
38 #endif /* LIBC_SCCS and not lint */
39
40 #include <sys/param.h>
41 #include <sys/user.h>
42 #include <sys/proc.h>
43 #include <sys/ioctl.h>
44 #include <sys/kinfo.h>
45 #include <sys/tty.h>
46 #include <sys/exec.h>
47 #include <machine/vmparam.h>
48 #include <fcntl.h>
49 #include <nlist.h>
50 #include <kvm.h>
51 #include <ndbm.h>
52 #include <limits.h>
53 #include <paths.h>
54 #include <stdio.h>
55 #include <string.h>
56
57 #ifdef SPPWAIT
58 #define NEWVM
59 #endif
60
61 #ifdef NEWVM
62 #define btop(x) (((unsigned)(x)) >> PGSHIFT) /* XXX */
63 #define ptob(x) ((caddr_t)((x) << PGSHIFT)) /* XXX */
64 #include <vm/vm.h> /* ??? kinfo_proc currently includes this*/
65 #include <vm/vm_page.h>
66 #include <vm/swap_pager.h>
67 #include <sys/kinfo_proc.h>
68 #ifdef hp300
69 #include <machine/pte.h>
70 #endif
71 #else /* NEWVM */
72 #include <machine/pte.h>
73 #include <sys/vmmac.h>
74 #include <sys/text.h>
75 #endif /* NEWVM */
76
77 /*
78 * files
79 */
80 static const char *unixf, *memf, *kmemf, *swapf;
81 static int unixx, mem, kmem, swap;
82 static DBM *db;
83 /*
84 * flags
85 */
86 static int deadkernel;
87 static int kvminit = 0;
88 static int kvmfilesopen = 0;
89 /*
90 * state
91 */
92 static struct kinfo_proc *kvmprocbase, *kvmprocptr;
93 static int kvmnprocs;
94 /*
95 * u. buffer
96 */
97 static union {
98 struct user user;
99 char upages[UPAGES][NBPG];
100 } user;
101
102 #ifdef NEWVM
103 struct swapblk {
104 long offset; /* offset in swap device */
105 long size; /* remaining size of block in swap device */
106 };
107 #endif
108 /*
109 * random other stuff
110 */
111 #ifndef NEWVM
112 static struct pte *Usrptmap, *usrpt;
113 static struct pte *Sysmap;
114 static int Syssize;
115 #endif
116 static int dmmin, dmmax;
117 static int pcbpf;
118 static int nswap;
119 static char *tmp;
120 #if defined(hp300)
121 static int lowram;
122 static struct ste *Sysseg;
123 #endif
124 #if defined(i386)
125 static struct pde *PTD;
126 #endif
127
128 #define basename(cp) ((tmp=rindex((cp), '/')) ? tmp+1 : (cp))
129 #define MAXSYMSIZE 256
130
131 #if defined(hp300)
132 #define pftoc(f) ((f) - lowram)
133 #define iskva(v) (1)
134 #endif
135
136 #ifndef pftoc
137 #define pftoc(f) (f)
138 #endif
139 #ifndef iskva
140 #define iskva(v) ((u_long)(v) & KERNBASE)
141 #endif
142
143 static struct nlist nl[] = {
144 { "_Usrptmap" },
145 #define X_USRPTMAP 0
146 { "_usrpt" },
147 #define X_USRPT 1
148 { "_nswap" },
149 #define X_NSWAP 2
150 { "_dmmin" },
151 #define X_DMMIN 3
152 { "_dmmax" },
153 #define X_DMMAX 4
154 { "_vm_page_buckets" },
155 #define X_VM_PAGE_BUCKETS 5
156 { "_vm_page_hash_mask" },
157 #define X_VM_PAGE_HASH_MASK 6
158 { "_page_shift" },
159 #define X_PAGE_SHIFT 7
160 /*
161 * everything here and down, only if a dead kernel
162 */
163 { "_Sysmap" },
164 #define X_SYSMAP 8
165 #define X_DEADKERNEL X_SYSMAP
166 { "_Syssize" },
167 #define X_SYSSIZE 9
168 { "_allproc" },
169 #define X_ALLPROC 10
170 { "_zombproc" },
171 #define X_ZOMBPROC 11
172 { "_nproc" },
173 #define X_NPROC 12
174 #define X_LAST 12
175 #if defined(hp300)
176 { "_Sysseg" },
177 #define X_SYSSEG (X_LAST+1)
178 { "_lowram" },
179 #define X_LOWRAM (X_LAST+2)
180 #endif
181 #if defined(i386)
182 { "_IdlePTD" },
183 #define X_IdlePTD (X_LAST+1)
184 #endif
185 { "" },
186 };
187
188 static off_t Vtophys();
189 static void klseek(), seterr(), setsyserr(), vstodb();
190 static int getkvars(), kvm_doprocs(), kvm_init();
191 #ifdef NEWVM
192 static int vatosw();
193 static int findpage();
194 #endif
195
196 /*
197 * returns 0 if files were opened now,
198 * 1 if files were already opened,
199 * -1 if files could not be opened.
200 */
201 kvm_openfiles(uf, mf, sf)
202 const char *uf, *mf, *sf;
203 {
204 if (kvmfilesopen)
205 return (1);
206 unixx = mem = kmem = swap = -1;
207 unixf = (uf == NULL) ? _PATH_UNIX : uf;
208 memf = (mf == NULL) ? _PATH_MEM : mf;
209
210 if ((unixx = open(unixf, O_RDONLY, 0)) == -1) {
211 setsyserr("can't open %s", unixf);
212 goto failed;
213 }
214 if ((mem = open(memf, O_RDONLY, 0)) == -1) {
215 setsyserr("can't open %s", memf);
216 goto failed;
217 }
218 if (sf != NULL)
219 swapf = sf;
220 if (mf != NULL) {
221 deadkernel++;
222 kmemf = mf;
223 kmem = mem;
224 swap = -1;
225 } else {
226 kmemf = _PATH_KMEM;
227 if ((kmem = open(kmemf, O_RDONLY, 0)) == -1) {
228 setsyserr("can't open %s", kmemf);
229 goto failed;
230 }
231 swapf = (sf == NULL) ? _PATH_DRUM : sf;
232 /*
233 * live kernel - avoid looking up nlist entries
234 * past X_DEADKERNEL.
235 */
236 nl[X_DEADKERNEL].n_name = "";
237 }
238 if (swapf != NULL && ((swap = open(swapf, O_RDONLY, 0)) == -1)) {
239 seterr("can't open %s", swapf);
240 goto failed;
241 }
242 kvmfilesopen++;
243 if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1) /*XXX*/
244 return (-1);
245 return (0);
246 failed:
247 kvm_close();
248 return (-1);
249 }
250
251 static
252 kvm_init(uf, mf, sf)
253 char *uf, *mf, *sf;
254 {
255 if (kvmfilesopen == 0 && kvm_openfiles(NULL, NULL, NULL) == -1)
256 return (-1);
257 if (getkvars() == -1)
258 return (-1);
259 kvminit = 1;
260
261 return (0);
262 }
263
264 kvm_close()
265 {
266 if (unixx != -1) {
267 close(unixx);
268 unixx = -1;
269 }
270 if (kmem != -1) {
271 if (kmem != mem)
272 close(kmem);
273 /* otherwise kmem is a copy of mem, and will be closed below */
274 kmem = -1;
275 }
276 if (mem != -1) {
277 close(mem);
278 mem = -1;
279 }
280 if (swap != -1) {
281 close(swap);
282 swap = -1;
283 }
284 if (db != NULL) {
285 dbm_close(db);
286 db = NULL;
287 }
288 kvminit = 0;
289 kvmfilesopen = 0;
290 deadkernel = 0;
291 #ifndef NEWVM
292 if (Sysmap) {
293 free(Sysmap);
294 Sysmap = NULL;
295 }
296 #endif
297 }
298
299 kvm_nlist(nl)
300 struct nlist *nl;
301 {
302 datum key, data;
303 char dbname[MAXPATHLEN];
304 char dbversion[_POSIX2_LINE_MAX];
305 char kversion[_POSIX2_LINE_MAX];
306 int dbversionlen;
307 char symbuf[MAXSYMSIZE];
308 struct nlist nbuf, *n;
309 int num, did;
310
311 if (kvmfilesopen == 0 && kvm_openfiles(NULL, NULL, NULL) == -1)
312 return (-1);
313 if (deadkernel)
314 goto hard2;
315 /*
316 * initialize key datum
317 */
318 key.dptr = symbuf;
319
320 if (db != NULL)
321 goto win; /* off to the races */
322 /*
323 * open database
324 */
325 sprintf(dbname, "%s/kvm_%s", _PATH_VARRUN, basename(unixf));
326 if ((db = dbm_open(dbname, O_RDONLY, 0)) == NULL)
327 goto hard2;
328 /*
329 * read version out of database
330 */
331 bcopy("VERSION", symbuf, sizeof ("VERSION")-1);
332 key.dsize = (sizeof ("VERSION") - 1);
333 data = dbm_fetch(db, key);
334 if (data.dptr == NULL)
335 goto hard1;
336 bcopy(data.dptr, dbversion, data.dsize);
337 dbversionlen = data.dsize;
338 /*
339 * read version string from kernel memory
340 */
341 bcopy("_version", symbuf, sizeof ("_version")-1);
342 key.dsize = (sizeof ("_version")-1);
343 data = dbm_fetch(db, key);
344 if (data.dptr == NULL)
345 goto hard1;
346 if (data.dsize != sizeof (struct nlist))
347 goto hard1;
348 bcopy(data.dptr, &nbuf, sizeof (struct nlist));
349 lseek(kmem, nbuf.n_value, 0);
350 if (read(kmem, kversion, dbversionlen) != dbversionlen)
351 goto hard1;
352 /*
353 * if they match, we win - otherwise do it the hard way
354 */
355 if (bcmp(dbversion, kversion, dbversionlen) != 0)
356 goto hard1;
357 /*
358 * getem from the database.
359 */
360 win:
361 num = did = 0;
362 for (n = nl; n->n_name && n->n_name[0]; n++, num++) {
363 int len;
364 /*
365 * clear out fields from users buffer
366 */
367 n->n_type = 0;
368 n->n_other = 0;
369 n->n_desc = 0;
370 n->n_value = 0;
371 /*
372 * query db
373 */
374 if ((len = strlen(n->n_name)) > MAXSYMSIZE) {
375 seterr("symbol too large");
376 return (-1);
377 }
378 (void)strcpy(symbuf, n->n_name);
379 key.dsize = len;
380 data = dbm_fetch(db, key);
381 if (data.dptr == NULL || data.dsize != sizeof (struct nlist))
382 continue;
383 bcopy(data.dptr, &nbuf, sizeof (struct nlist));
384 n->n_value = nbuf.n_value;
385 n->n_type = nbuf.n_type;
386 n->n_desc = nbuf.n_desc;
387 n->n_other = nbuf.n_other;
388 did++;
389 }
390 return (num - did);
391 hard1:
392 dbm_close(db);
393 db = NULL;
394 hard2:
395 num = nlist(unixf, nl);
396 if (num == -1)
397 seterr("nlist (hard way) failed");
398 return (num);
399 }
400
401 kvm_getprocs(what, arg)
402 int what, arg;
403 {
404 static int ocopysize = -1;
405
406 if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1)
407 return (NULL);
408 if (!deadkernel) {
409 int ret, copysize;
410
411 if ((ret = getkerninfo(what, NULL, NULL, arg)) == -1) {
412 setsyserr("can't get estimate for kerninfo");
413 return (-1);
414 }
415 copysize = ret;
416 if (copysize > ocopysize || !kvmprocbase) {
417 if (ocopysize == -1 || !kvmprocbase)
418 kvmprocbase =
419 (struct kinfo_proc *)malloc(copysize);
420 else
421 kvmprocbase =
422 (struct kinfo_proc *)realloc(kvmprocbase,
423 copysize);
424 if (!kvmprocbase) {
425 seterr("out of memory");
426 return (-1);
427 }
428 }
429 ocopysize = copysize;
430 if ((ret = getkerninfo(what, kvmprocbase, ©size,
431 arg)) == -1) {
432 setsyserr("can't get proc list");
433 return (-1);
434 }
435 if (copysize % sizeof (struct kinfo_proc)) {
436 seterr("proc size mismatch (got %d total, kinfo_proc: %d)",
437 copysize, sizeof (struct kinfo_proc));
438 return (-1);
439 }
440 kvmnprocs = copysize / sizeof (struct kinfo_proc);
441 } else {
442 int nproc;
443
444 if (kvm_read((void *) nl[X_NPROC].n_value, &nproc,
445 sizeof (int)) != sizeof (int)) {
446 seterr("can't read nproc");
447 return (-1);
448 }
449 if ((kvmprocbase = (struct kinfo_proc *)
450 malloc(nproc * sizeof (struct kinfo_proc))) == NULL) {
451 seterr("out of memory (addr: %x nproc = %d)",
452 nl[X_NPROC].n_value, nproc);
453 return (-1);
454 }
455 kvmnprocs = kvm_doprocs(what, arg, kvmprocbase);
456 realloc(kvmprocbase, kvmnprocs * sizeof (struct kinfo_proc));
457 }
458 kvmprocptr = kvmprocbase;
459
460 return (kvmnprocs);
461 }
462
463 /*
464 * XXX - should NOT give up so easily - especially since the kernel
465 * may be corrupt (it died). Should gather as much information as possible.
466 * Follows proc ptrs instead of reading table since table may go
467 * away soon.
468 */
469 static
470 kvm_doprocs(what, arg, buff)
471 int what, arg;
472 char *buff;
473 {
474 struct proc *p, proc;
475 register char *bp = buff;
476 int i = 0;
477 int doingzomb = 0;
478 struct eproc eproc;
479 struct pgrp pgrp;
480 struct session sess;
481 struct tty tty;
482 #ifndef NEWVM
483 struct text text;
484 #endif
485
486 /* allproc */
487 if (kvm_read((void *) nl[X_ALLPROC].n_value, &p,
488 sizeof (struct proc *)) != sizeof (struct proc *)) {
489 seterr("can't read allproc");
490 return (-1);
491 }
492
493 again:
494 for (; p; p = proc.p_nxt) {
495 if (kvm_read(p, &proc, sizeof (struct proc)) !=
496 sizeof (struct proc)) {
497 seterr("can't read proc at %x", p);
498 return (-1);
499 }
500 #ifdef NEWVM
501 if (kvm_read(proc.p_cred, &eproc.e_pcred,
502 sizeof (struct pcred)) == sizeof (struct pcred))
503 (void) kvm_read(eproc.e_pcred.pc_ucred, &eproc.e_ucred,
504 sizeof (struct ucred));
505 switch(ki_op(what)) {
506
507 case KINFO_PROC_PID:
508 if (proc.p_pid != (pid_t)arg)
509 continue;
510 break;
511
512
513 case KINFO_PROC_UID:
514 if (eproc.e_ucred.cr_uid != (uid_t)arg)
515 continue;
516 break;
517
518 case KINFO_PROC_RUID:
519 if (eproc.e_pcred.p_ruid != (uid_t)arg)
520 continue;
521 break;
522 }
523 #else
524 switch(ki_op(what)) {
525
526 case KINFO_PROC_PID:
527 if (proc.p_pid != (pid_t)arg)
528 continue;
529 break;
530
531
532 case KINFO_PROC_UID:
533 if (proc.p_uid != (uid_t)arg)
534 continue;
535 break;
536
537 case KINFO_PROC_RUID:
538 if (proc.p_ruid != (uid_t)arg)
539 continue;
540 break;
541 }
542 #endif
543 /*
544 * gather eproc
545 */
546 eproc.e_paddr = p;
547 if (kvm_read(proc.p_pgrp, &pgrp, sizeof (struct pgrp)) !=
548 sizeof (struct pgrp)) {
549 seterr("can't read pgrp at %x", proc.p_pgrp);
550 return (-1);
551 }
552 eproc.e_sess = pgrp.pg_session;
553 eproc.e_pgid = pgrp.pg_id;
554 eproc.e_jobc = pgrp.pg_jobc;
555 if (kvm_read(pgrp.pg_session, &sess, sizeof (struct session))
556 != sizeof (struct session)) {
557 seterr("can't read session at %x", pgrp.pg_session);
558 return (-1);
559 }
560 if ((proc.p_flag&SCTTY) && sess.s_ttyp != NULL) {
561 if (kvm_read(sess.s_ttyp, &tty, sizeof (struct tty))
562 != sizeof (struct tty)) {
563 seterr("can't read tty at %x", sess.s_ttyp);
564 return (-1);
565 }
566 eproc.e_tdev = tty.t_dev;
567 eproc.e_tsess = tty.t_session;
568 if (tty.t_pgrp != NULL) {
569 if (kvm_read(tty.t_pgrp, &pgrp, sizeof (struct
570 pgrp)) != sizeof (struct pgrp)) {
571 seterr("can't read tpgrp at &x",
572 tty.t_pgrp);
573 return (-1);
574 }
575 eproc.e_tpgid = pgrp.pg_id;
576 } else
577 eproc.e_tpgid = -1;
578 } else
579 eproc.e_tdev = NODEV;
580 if (proc.p_wmesg)
581 kvm_read(proc.p_wmesg, eproc.e_wmesg, WMESGLEN);
582 #ifdef NEWVM
583 (void) kvm_read(proc.p_vmspace, &eproc.e_vm,
584 sizeof (struct vmspace));
585 eproc.e_xsize = eproc.e_xrssize =
586 eproc.e_xccount = eproc.e_xswrss = 0;
587 #else
588 if (proc.p_textp) {
589 kvm_read(proc.p_textp, &text, sizeof (text));
590 eproc.e_xsize = text.x_size;
591 eproc.e_xrssize = text.x_rssize;
592 eproc.e_xccount = text.x_ccount;
593 eproc.e_xswrss = text.x_swrss;
594 } else {
595 eproc.e_xsize = eproc.e_xrssize =
596 eproc.e_xccount = eproc.e_xswrss = 0;
597 }
598 #endif
599
600 switch(ki_op(what)) {
601
602 case KINFO_PROC_PGRP:
603 if (eproc.e_pgid != (pid_t)arg)
604 continue;
605 break;
606
607 case KINFO_PROC_TTY:
608 if ((proc.p_flag&SCTTY) == 0 ||
609 eproc.e_tdev != (dev_t)arg)
610 continue;
611 break;
612 }
613
614 i++;
615 bcopy(&proc, bp, sizeof (struct proc));
616 bp += sizeof (struct proc);
617 bcopy(&eproc, bp, sizeof (struct eproc));
618 bp+= sizeof (struct eproc);
619 }
620 if (!doingzomb) {
621 /* zombproc */
622 if (kvm_read((void *) nl[X_ZOMBPROC].n_value, &p,
623 sizeof (struct proc *)) != sizeof (struct proc *)) {
624 seterr("can't read zombproc");
625 return (-1);
626 }
627 doingzomb = 1;
628 goto again;
629 }
630
631 return (i);
632 }
633
634 struct proc *
635 kvm_nextproc()
636 {
637
638 if (!kvmprocbase && kvm_getprocs(0, 0) == -1)
639 return (NULL);
640 if (kvmprocptr >= (kvmprocbase + kvmnprocs)) {
641 seterr("end of proc list");
642 return (NULL);
643 }
644 return((struct proc *)(kvmprocptr++));
645 }
646
647 struct eproc *
648 kvm_geteproc(p)
649 const struct proc *p;
650 {
651 return ((struct eproc *)(((char *)p) + sizeof (struct proc)));
652 }
653
654 kvm_setproc()
655 {
656 kvmprocptr = kvmprocbase;
657 }
658
659 kvm_freeprocs()
660 {
661
662 if (kvmprocbase) {
663 free(kvmprocbase);
664 kvmprocbase = NULL;
665 }
666 }
667
668 #ifdef NEWVM
669 struct user *
670 kvm_getu(p)
671 const struct proc *p;
672 {
673 register struct kinfo_proc *kp = (struct kinfo_proc *)p;
674 register int i;
675 register char *up;
676 u_int vaddr;
677 struct swapblk swb;
678
679 if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1)
680 return (NULL);
681 if (p->p_stat == SZOMB) {
682 seterr("zombie process");
683 return (NULL);
684 }
685
686 if ((p->p_flag & SLOAD) == 0) {
687 vm_offset_t maddr;
688
689 if (swap < 0) {
690 seterr("no swap");
691 return (NULL);
692 }
693 /*
694 * Costly operation, better set enable_swap to zero
695 * in vm/vm_glue.c, since paging of user pages isn't
696 * done yet anyway.
697 */
698 if (vatosw(p, USRSTACK + i * NBPG, &maddr, &swb) == 0)
699 return NULL;
700
701 if (maddr == 0 && swb.size < UPAGES * NBPG)
702 return NULL;
703
704 for (i = 0; i < UPAGES; i++) {
705 if (maddr) {
706 (void) lseek(mem, maddr + i * NBPG, 0);
707 if (read(mem,
708 (char *)user.upages[i], NBPG) != NBPG) {
709 seterr(
710 "can't read u for pid %d from %s",
711 p->p_pid, swapf);
712 return NULL;
713 }
714 } else {
715 (void) lseek(swap, swb.offset + i * NBPG, 0);
716 if (read(swap,
717 (char *)user.upages[i], NBPG) != NBPG) {
718 seterr(
719 "can't read u for pid %d from %s",
720 p->p_pid, swapf);
721 return NULL;
722 }
723 }
724 }
725 return(&user.user);
726 }
727 /*
728 * Read u-area one page at a time for the benefit of post-mortems
729 */
730 up = (char *) p->p_addr;
731 for (i = 0; i < UPAGES; i++) {
732 klseek(kmem, (long)up, 0);
733 if (read(kmem, user.upages[i], CLBYTES) != CLBYTES) {
734 seterr("cant read page %x of u of pid %d from %s",
735 up, p->p_pid, kmemf);
736 return(NULL);
737 }
738 up += CLBYTES;
739 }
740 pcbpf = (int) btop(p->p_addr); /* what should this be really? */
741
742 kp->kp_eproc.e_vm.vm_rssize =
743 kp->kp_eproc.e_vm.vm_pmap.pm_stats.resident_count; /* XXX */
744 return(&user.user);
745 }
746 #else
747 struct user *
748 kvm_getu(p)
749 const struct proc *p;
750 {
751 struct pte *pteaddr, apte;
752 struct pte arguutl[HIGHPAGES+(CLSIZE*2)];
753 register int i;
754 int ncl;
755
756 if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1)
757 return (NULL);
758 if (p->p_stat == SZOMB) {
759 seterr("zombie process");
760 return (NULL);
761 }
762 if ((p->p_flag & SLOAD) == 0) {
763 if (swap < 0) {
764 seterr("no swap");
765 return (NULL);
766 }
767 (void) lseek(swap, (long)dtob(p->p_swaddr), 0);
768 if (read(swap, (char *)&user.user, sizeof (struct user)) !=
769 sizeof (struct user)) {
770 seterr("can't read u for pid %d from %s",
771 p->p_pid, swapf);
772 return (NULL);
773 }
774 pcbpf = 0;
775 argaddr0 = 0;
776 argaddr1 = 0;
777 return (&user.user);
778 }
779 pteaddr = &Usrptmap[btokmx(p->p_p0br) + p->p_szpt - 1];
780 klseek(kmem, (long)pteaddr, 0);
781 if (read(kmem, (char *)&apte, sizeof(apte)) != sizeof(apte)) {
782 seterr("can't read indir pte to get u for pid %d from %s",
783 p->p_pid, kmemf);
784 return (NULL);
785 }
786 lseek(mem, (long)ctob(pftoc(apte.pg_pfnum+1)) - sizeof(arguutl), 0);
787 if (read(mem, (char *)arguutl, sizeof(arguutl)) != sizeof(arguutl)) {
788 seterr("can't read page table for u of pid %d from %s",
789 p->p_pid, memf);
790 return (NULL);
791 }
792 if (arguutl[0].pg_fod == 0 && arguutl[0].pg_pfnum)
793 argaddr0 = ctob(pftoc(arguutl[0].pg_pfnum));
794 else
795 argaddr0 = 0;
796 if (arguutl[CLSIZE*1].pg_fod == 0 && arguutl[CLSIZE*1].pg_pfnum)
797 argaddr1 = ctob(pftoc(arguutl[CLSIZE*1].pg_pfnum));
798 else
799 argaddr1 = 0;
800 pcbpf = arguutl[CLSIZE*2].pg_pfnum;
801 ncl = (sizeof (struct user) + CLBYTES - 1) / CLBYTES;
802 while (--ncl >= 0) {
803 i = ncl * CLSIZE;
804 lseek(mem,
805 (long)ctob(pftoc(arguutl[(CLSIZE*2)+i].pg_pfnum)), 0);
806 if (read(mem, user.upages[i], CLBYTES) != CLBYTES) {
807 seterr("can't read page %d of u of pid %d from %s",
808 arguutl[(CLSIZE*2)+i].pg_pfnum, p->p_pid, memf);
809 return(NULL);
810 }
811 }
812 return (&user.user);
813 }
814 #endif
815
816 int
817 kvm_procread(p, addr, buf, len)
818 const struct proc *p;
819 const unsigned addr, len;
820 char *buf;
821 {
822 register struct kinfo_proc *kp = (struct kinfo_proc *) p;
823 struct swapblk swb;
824 vm_offset_t swaddr = 0, memaddr = 0;
825 unsigned real_len;
826
827 real_len = len < (CLBYTES - (addr & CLOFSET)) ? len : (CLBYTES - (addr & CLOFSET));
828
829 #if defined(hp300)
830 if (kp->kp_eproc.e_vm.vm_pmap.pm_ptab) {
831 struct pte pte[CLSIZE*2];
832
833 klseek(kmem,
834 (long)&kp->kp_eproc.e_vm.vm_pmap.pm_ptab
835 [btoc(USRSTACK-CLBYTES*2)], 0);
836 if (read(kmem, (char *)&pte, sizeof(pte)) == sizeof(pte)) {
837 memaddr = ctob(pftoc(pte[CLSIZE*1].pg_pfnum)) +
838 (addr % (1 << CLSHIFT));
839 }
840 }
841 #endif
842 #if defined(i386)
843 if (kp->kp_eproc.e_vm.vm_pmap.pm_pdir) {
844 struct pde pde;
845
846 klseek(kmem,
847 (long)(&kp->kp_eproc.e_vm.vm_pmap.pm_pdir[pdei(addr)]), 0);
848
849 if (read(kmem, (char *)&pde, sizeof pde) == sizeof pde
850 && pde.pd_v) {
851
852 struct pte pte;
853
854 if (lseek(mem, (long)ctob(pde.pd_pfnum) +
855 (ptei(addr) * sizeof pte), 0) == -1)
856 seterr("kvm_procread: lseek");
857 if (read(mem, (char *)&pte, sizeof pte) == sizeof pte) {
858 if (pte.pg_v) {
859 memaddr = (long)ctob(pte.pg_pfnum) +
860 (addr % (1 << PGSHIFT));
861 }
862 } else {
863 seterr("kvm_procread: read");
864 }
865 }
866 }
867 #endif /* i386 */
868
869 if (memaddr == 0 && vatosw(p, addr & ~CLOFSET, &memaddr, &swb)) {
870 if (memaddr != 0) {
871 memaddr += addr & CLOFSET;
872 } else {
873 swaddr = swb.offset + (addr & CLOFSET);
874 swb.size -= addr & CLOFSET;
875 }
876 }
877
878 if (memaddr) {
879 if (lseek(mem, memaddr, 0) == -1)
880 seterr("kvm_getu: lseek");
881 real_len = read(mem, buf, real_len);
882 if (real_len == -1) {
883 seterr("kvm_procread: read");
884 return 0;
885 }
886 } else if (swaddr) {
887 char bouncebuf[CLBYTES];
888 unsigned len;
889 if (lseek(swap, swaddr & ~CLOFSET, 0) == -1) {
890 seterr("kvm_procread: lseek");
891 return 0;
892 }
893 len = read(swap, bouncebuf, CLBYTES);
894 if (len == -1 || len <= (swaddr & CLOFSET)) {
895 seterr("kvm_procread: read");
896 return 0;
897 }
898 len = MIN(len - (swaddr & CLOFSET), real_len);
899 memcpy(buf, &bouncebuf[swaddr & CLOFSET], len);
900 return len;
901 } else
902 real_len = 0;
903
904 return real_len;
905 }
906
907 int
908 kvm_procreadstr(p, addr, buf, len)
909 const struct proc *p;
910 const unsigned addr;
911 char *buf;
912 unsigned len;
913 {
914 int done, little;
915 char copy[200], *pb;
916 char a;
917
918 done = 0;
919 while (len) {
920 little = kvm_procread(p, addr+done, copy, MIN(len, sizeof copy));
921 if (little<1)
922 break;
923 pb = copy;
924 while (little--) {
925 len--;
926 if( (*buf++ = *pb++) == '\0' )
927 return done;
928 done++;
929 }
930 }
931 return done;
932 }
933
934 char *
935 kvm_getargs(p, up)
936 const struct proc *p;
937 const struct user *up;
938 {
939 static char cmdbuf[ARG_MAX + sizeof(p->p_comm) + 5];
940 register char *cp, *acp;
941 int left, rv;
942 struct ps_strings arginfo;
943
944 if (up == NULL || p->p_pid == 0 || p->p_pid == 2)
945 goto retucomm;
946
947 if (kvm_procread(p, PS_STRINGS, (char *)&arginfo, sizeof(arginfo)) !=
948 sizeof(arginfo))
949 goto bad;
950
951 cp = cmdbuf;
952 acp = arginfo.ps_argvstr;
953 left = ARG_MAX + 1;
954 while (arginfo.ps_nargvstr--) {
955 if ((rv = kvm_procreadstr(p, acp, cp, left)) >= 0) {
956 acp += rv + 1;
957 left -= rv + 1;
958 cp += rv;
959 *cp++ = ' ';
960 *cp = '\0';
961 } else
962 goto bad;
963 }
964 cp-- ; *cp = '\0';
965
966 if (cmdbuf[0] == '-' || cmdbuf[0] == '?' || cmdbuf[0] <= ' ') {
967 (void) strcat(cmdbuf, " (");
968 (void) strncat(cmdbuf, p->p_comm, sizeof(p->p_comm));
969 (void) strcat(cmdbuf, ")");
970 }
971 return (cmdbuf);
972
973 bad:
974 seterr("error locating command name for pid %d", p->p_pid);
975 retucomm:
976 (void) strcpy(cmdbuf, "(");
977 (void) strncat(cmdbuf, p->p_comm, sizeof (p->p_comm));
978 (void) strcat(cmdbuf, ")");
979 return (cmdbuf);
980 }
981
982 char *
983 kvm_getenv(p, up)
984 const struct proc *p;
985 const struct user *up;
986 {
987 static char envbuf[ARG_MAX + 1];
988 register char *cp, *acp;
989 int left, rv;
990 struct ps_strings arginfo;
991
992 if (up == NULL || p->p_pid == 0 || p->p_pid == 2)
993 goto retemptyenv;
994
995 if (kvm_procread(p, PS_STRINGS, (char *)&arginfo, sizeof(arginfo)) !=
996 sizeof(arginfo))
997 goto bad;
998
999 cp = envbuf;
1000 acp = arginfo.ps_envstr;
1001 left = ARG_MAX + 1;
1002 while (arginfo.ps_nenvstr--) {
1003 if ((rv = kvm_procreadstr(p, acp, cp, left)) >= 0) {
1004 acp += rv + 1;
1005 left -= rv + 1;
1006 cp += rv;
1007 *cp++ = ' ';
1008 *cp = '\0';
1009 } else
1010 goto bad;
1011 }
1012 cp-- ; *cp = '\0';
1013 return (envbuf);
1014
1015 bad:
1016 seterr("error locating environment for pid %d", p->p_pid);
1017 retemptyenv:
1018 envbuf[0] = '\0';
1019 return (envbuf);
1020 }
1021
1022 static
1023 getkvars()
1024 {
1025 if (kvm_nlist(nl) == -1)
1026 return (-1);
1027 if (deadkernel) {
1028 /* We must do the sys map first because klseek uses it */
1029 long addr;
1030
1031 #ifndef NEWVM
1032 Syssize = nl[X_SYSSIZE].n_value;
1033 Sysmap = (struct pte *)
1034 calloc((unsigned) Syssize, sizeof (struct pte));
1035 if (Sysmap == NULL) {
1036 seterr("out of space for Sysmap");
1037 return (-1);
1038 }
1039 addr = (long) nl[X_SYSMAP].n_value;
1040 addr &= ~KERNBASE;
1041 (void) lseek(kmem, addr, 0);
1042 if (read(kmem, (char *) Sysmap, Syssize * sizeof (struct pte))
1043 != Syssize * sizeof (struct pte)) {
1044 seterr("can't read Sysmap");
1045 return (-1);
1046 }
1047 #endif
1048 #if defined(hp300)
1049 addr = (long) nl[X_LOWRAM].n_value;
1050 (void) lseek(kmem, addr, 0);
1051 if (read(kmem, (char *) &lowram, sizeof (lowram))
1052 != sizeof (lowram)) {
1053 seterr("can't read lowram");
1054 return (-1);
1055 }
1056 lowram = btop(lowram);
1057 Sysseg = (struct ste *) malloc(NBPG);
1058 if (Sysseg == NULL) {
1059 seterr("out of space for Sysseg");
1060 return (-1);
1061 }
1062 addr = (long) nl[X_SYSSEG].n_value;
1063 (void) lseek(kmem, addr, 0);
1064 read(kmem, (char *)&addr, sizeof(addr));
1065 (void) lseek(kmem, (long)addr, 0);
1066 if (read(kmem, (char *) Sysseg, NBPG) != NBPG) {
1067 seterr("can't read Sysseg");
1068 return (-1);
1069 }
1070 #endif
1071 #if defined(i386)
1072 PTD = (struct pde *) malloc(NBPG);
1073 if (PTD == NULL) {
1074 seterr("out of space for PTD");
1075 return (-1);
1076 }
1077 addr = (long) nl[X_IdlePTD].n_value;
1078 (void) lseek(kmem, addr, 0);
1079 read(kmem, (char *)&addr, sizeof(addr));
1080 (void) lseek(kmem, (long)addr, 0);
1081 if (read(kmem, (char *) PTD, NBPG) != NBPG) {
1082 seterr("can't read PTD");
1083 return (-1);
1084 }
1085 #endif
1086 }
1087 #ifndef NEWVM
1088 usrpt = (struct pte *)nl[X_USRPT].n_value;
1089 Usrptmap = (struct pte *)nl[X_USRPTMAP].n_value;
1090 #endif
1091 if (kvm_read((void *) nl[X_NSWAP].n_value, &nswap, sizeof (long)) !=
1092 sizeof (long)) {
1093 seterr("can't read nswap");
1094 return (-1);
1095 }
1096 if (kvm_read((void *) nl[X_DMMIN].n_value, &dmmin, sizeof (long)) !=
1097 sizeof (long)) {
1098 seterr("can't read dmmin");
1099 return (-1);
1100 }
1101 if (kvm_read((void *) nl[X_DMMAX].n_value, &dmmax, sizeof (long)) !=
1102 sizeof (long)) {
1103 seterr("can't read dmmax");
1104 return (-1);
1105 }
1106 return (0);
1107 }
1108
1109 kvm_read(loc, buf, len)
1110 void *loc;
1111 void *buf;
1112 {
1113 if (kvmfilesopen == 0 && kvm_openfiles(NULL, NULL, NULL) == -1)
1114 return (-1);
1115 if (iskva(loc)) {
1116 klseek(kmem, (off_t) loc, 0);
1117 if (read(kmem, buf, len) != len) {
1118 seterr("error reading kmem at %x", loc);
1119 return (-1);
1120 }
1121 } else {
1122 lseek(mem, (off_t) loc, 0);
1123 if (read(mem, buf, len) != len) {
1124 seterr("error reading mem at %x", loc);
1125 return (-1);
1126 }
1127 }
1128 return (len);
1129 }
1130
1131 static void
1132 klseek(fd, loc, off)
1133 int fd;
1134 off_t loc;
1135 int off;
1136 {
1137
1138 if (deadkernel) {
1139 if ((loc = Vtophys(loc)) == -1)
1140 return;
1141 }
1142 (void) lseek(fd, (off_t)loc, off);
1143 }
1144
1145 #ifndef NEWVM
1146 /*
1147 * Given a base/size pair in virtual swap area,
1148 * return a physical base/size pair which is the
1149 * (largest) initial, physically contiguous block.
1150 */
1151 static void
1152 vstodb(vsbase, vssize, dmp, dbp, rev)
1153 register int vsbase;
1154 int vssize;
1155 struct dmap *dmp;
1156 register struct dblock *dbp;
1157 {
1158 register int blk = dmmin;
1159 register swblk_t *ip = dmp->dm_map;
1160
1161 vsbase = ctod(vsbase);
1162 vssize = ctod(vssize);
1163 if (vsbase < 0 || vsbase + vssize > dmp->dm_size)
1164 /*panic("vstodb")*/;
1165 while (vsbase >= blk) {
1166 vsbase -= blk;
1167 if (blk < dmmax)
1168 blk *= 2;
1169 ip++;
1170 }
1171 if (*ip <= 0 || *ip + blk > nswap)
1172 /*panic("vstodb")*/;
1173 dbp->db_size = MIN(vssize, blk - vsbase);
1174 dbp->db_base = *ip + (rev ? blk - (vsbase + dbp->db_size) : vsbase);
1175 }
1176 #endif
1177
1178 #ifdef NEWVM
1179 static off_t
1180 Vtophys(loc)
1181 u_long loc;
1182 {
1183 off_t newloc = (off_t) -1;
1184 #ifdef hp300
1185 int p, ste, pte;
1186
1187 ste = *(int *)&Sysseg[loc >> SG_ISHIFT];
1188 if ((ste & SG_V) == 0) {
1189 seterr("vtophys: segment not valid");
1190 return((off_t) -1);
1191 }
1192 p = btop(loc & SG_PMASK);
1193 newloc = (ste & SG_FRAME) + (p * sizeof(struct pte));
1194 (void) lseek(kmem, (long)(newloc-(off_t)ptob(lowram)), 0);
1195 if (read(kmem, (char *)&pte, sizeof pte) != sizeof pte) {
1196 seterr("vtophys: cannot locate pte");
1197 return((off_t) -1);
1198 }
1199 newloc = pte & PG_FRAME;
1200 if (pte == PG_NV || newloc < (off_t)ptob(lowram)) {
1201 seterr("vtophys: page not valid");
1202 return((off_t) -1);
1203 }
1204 newloc = (newloc - (off_t)ptob(lowram)) + (loc & PGOFSET);
1205 #endif
1206 #ifdef i386
1207 struct pde pde;
1208 struct pte pte;
1209 int p;
1210
1211 pde = PTD[loc >> PD_SHIFT];
1212 if (pde.pd_v == 0) {
1213 seterr("vtophys: page directory entry not valid");
1214 return((off_t) -1);
1215 }
1216 p = btop(loc & PT_MASK);
1217 newloc = pde.pd_pfnum + (p * sizeof(struct pte));
1218 (void) lseek(kmem, (long)newloc, 0);
1219 if (read(kmem, (char *)&pte, sizeof pte) != sizeof pte) {
1220 seterr("vtophys: cannot obtain desired pte");
1221 return((off_t) -1);
1222 }
1223 newloc = pte.pg_pfnum;
1224 if (pte.pg_v == 0) {
1225 seterr("vtophys: page table entry not valid");
1226 return((off_t) -1);
1227 }
1228 newloc += (loc & PGOFSET);
1229 #endif
1230 return((off_t) newloc);
1231 }
1232 #else
1233 static off_t
1234 vtophys(loc)
1235 long loc;
1236 {
1237 int p;
1238 off_t newloc;
1239 register struct pte *pte;
1240
1241 newloc = loc & ~KERNBASE;
1242 p = btop(newloc);
1243 #if defined(vax) || defined(tahoe)
1244 if ((loc & KERNBASE) == 0) {
1245 seterr("vtophys: translating non-kernel address");
1246 return((off_t) -1);
1247 }
1248 #endif
1249 if (p >= Syssize) {
1250 seterr("vtophys: page out of bound (%d>=%d)", p, Syssize);
1251 return((off_t) -1);
1252 }
1253 pte = &Sysmap[p];
1254 if (pte->pg_v == 0 && (pte->pg_fod || pte->pg_pfnum == 0)) {
1255 seterr("vtophys: page not valid");
1256 return((off_t) -1);
1257 }
1258 #if defined(hp300)
1259 if (pte->pg_pfnum < lowram) {
1260 seterr("vtophys: non-RAM page (%d<%d)", pte->pg_pfnum, lowram);
1261 return((off_t) -1);
1262 }
1263 #endif
1264 loc = (long) (ptob(pftoc(pte->pg_pfnum)) + (loc & PGOFSET));
1265 return(loc);
1266 }
1267 #endif
1268
1269
1270 #ifdef NEWVM
1271 /*
1272 * locate address of unwired or swapped page
1273 */
1274
1275 #define DEBUG 0
1276
1277 #define KREAD(off, addr, len) \
1278 (kvm_read((void *)(off), (char *)(addr), (len)) == (len))
1279
1280
1281 static int
1282 vatosw(p, vaddr, maddr, swb)
1283 struct proc *p ;
1284 vm_offset_t vaddr;
1285 vm_offset_t *maddr;
1286 struct swapblk *swb;
1287 {
1288 register struct kinfo_proc *kp = (struct kinfo_proc *)p;
1289 vm_map_t mp = &kp->kp_eproc.e_vm.vm_map;
1290 struct vm_object vm_object;
1291 struct vm_map_entry vm_entry;
1292 struct pager_struct pager;
1293 struct swpager swpager;
1294 struct swblock swblock;
1295 long addr, off;
1296 int i;
1297
1298 if (p->p_pid == 0 || p->p_pid == 2)
1299 return 0;
1300
1301 addr = (long)mp->header.next;
1302 for (i = 0; i < mp->nentries; i++) {
1303 /* Weed through map entries until vaddr in range */
1304 if (!KREAD(addr, &vm_entry, sizeof(vm_entry))) {
1305 setsyserr("vatosw: read vm_map_entry");
1306 return 0;
1307 }
1308 if ((vaddr >= vm_entry.start) && (vaddr <= vm_entry.end) &&
1309 (vm_entry.object.vm_object != 0))
1310 break;
1311
1312 addr = (long)vm_entry.next;
1313 }
1314 if (i == mp->nentries) {
1315 seterr("%u: map not found\n", p->p_pid);
1316 return 0;
1317 }
1318
1319 if (vm_entry.is_a_map || vm_entry.is_sub_map) {
1320 seterr("%u: Is a map\n", p->p_pid);
1321 return 0;
1322 }
1323
1324 /* Locate memory object */
1325 off = (vaddr - vm_entry.start) + vm_entry.offset;
1326 addr = (long)vm_entry.object.vm_object;
1327 while (1) {
1328 if (!KREAD(addr, &vm_object, sizeof vm_object)) {
1329 setsyserr("vatosw: read vm_object");
1330 return 0;
1331 }
1332
1333 #if DEBUG
1334 fprintf(stderr, "%u: find page: object %#x offset %x\n",
1335 p->p_pid, addr, off);
1336 #endif
1337
1338 /* Lookup in page queue */
1339 if (findpage(addr, off, maddr))
1340 return 1;
1341
1342 if (vm_object.shadow == 0)
1343 break;
1344
1345 #if DEBUG
1346 fprintf(stderr, "%u: shadow obj at %x: offset %x+%x\n",
1347 p->p_pid, addr, off, vm_object.shadow_offset);
1348 #endif
1349
1350 addr = (long)vm_object.shadow;
1351 off += vm_object.shadow_offset;
1352 }
1353
1354 if (!vm_object.pager) {
1355 seterr("%u: no pager\n", p->p_pid);
1356 return 0;
1357 }
1358
1359 /* Find address in swap space */
1360 if (!KREAD(vm_object.pager, &pager, sizeof pager)) {
1361 setsyserr("vatosw: read pager");
1362 return 0;
1363 }
1364 if (pager.pg_type != PG_SWAP) {
1365 seterr("%u: weird pager\n", p->p_pid);
1366 return 0;
1367 }
1368
1369 /* Get swap pager data */
1370 if (!KREAD(pager.pg_data, &swpager, sizeof swpager)) {
1371 setsyserr("vatosw: read swpager");
1372 return 0;
1373 }
1374
1375 off += vm_object.paging_offset;
1376
1377 /* Read swap block array */
1378 if (!KREAD((long)swpager.sw_blocks +
1379 (off/dbtob(swpager.sw_bsize)) * sizeof swblock,
1380 &swblock, sizeof swblock)) {
1381 setsyserr("vatosw: read swblock");
1382 return 0;
1383 }
1384 swb->offset = dbtob(swblock.swb_block)+ (off % dbtob(swpager.sw_bsize));
1385 swb->size = dbtob(swpager.sw_bsize) - (off % dbtob(swpager.sw_bsize));
1386 return 1;
1387 }
1388
1389
1390 #define atop(x) (((unsigned)(x)) >> page_shift)
1391 #define vm_page_hash(object, offset) \
1392 (((unsigned)object+(unsigned)atop(offset))&vm_page_hash_mask)
1393
1394 static int
1395 findpage(object, offset, maddr)
1396 long object;
1397 long offset;
1398 vm_offset_t *maddr;
1399 {
1400 static long vm_page_hash_mask;
1401 static long vm_page_buckets;
1402 static long page_shift;
1403 queue_head_t bucket;
1404 struct vm_page mem;
1405 long addr, baddr;
1406
1407 if (vm_page_hash_mask == 0 && !KREAD(nl[X_VM_PAGE_HASH_MASK].n_value,
1408 &vm_page_hash_mask, sizeof (long))) {
1409 seterr("can't read vm_page_hash_mask");
1410 return 0;
1411 }
1412 if (page_shift == 0 && !KREAD(nl[X_PAGE_SHIFT].n_value,
1413 &page_shift, sizeof (long))) {
1414 seterr("can't read page_shift");
1415 return 0;
1416 }
1417 if (vm_page_buckets == 0 && !KREAD(nl[X_VM_PAGE_BUCKETS].n_value,
1418 &vm_page_buckets, sizeof (long))) {
1419 seterr("can't read vm_page_buckets");
1420 return 0;
1421 }
1422
1423 baddr = vm_page_buckets + vm_page_hash(object,offset) * sizeof(queue_head_t);
1424 if (!KREAD(baddr, &bucket, sizeof (bucket))) {
1425 seterr("can't read vm_page_bucket");
1426 return 0;
1427 }
1428
1429 addr = (long)bucket.next;
1430 while (addr != baddr) {
1431 if (!KREAD(addr, &mem, sizeof (mem))) {
1432 seterr("can't read vm_page");
1433 return 0;
1434 }
1435 if ((long)mem.object == object && mem.offset == offset) {
1436 *maddr = (long)mem.phys_addr;
1437 return 1;
1438 }
1439 addr = (long)mem.hashq.next;
1440 }
1441 return 0;
1442 }
1443 #endif /* NEWVM */
1444
1445 #include <varargs.h>
1446 static char errbuf[_POSIX2_LINE_MAX];
1447
1448 static void
1449 seterr(va_alist)
1450 va_dcl
1451 {
1452 char *fmt;
1453 va_list ap;
1454
1455 va_start(ap);
1456 fmt = va_arg(ap, char *);
1457 (void) vsnprintf(errbuf, _POSIX2_LINE_MAX, fmt, ap);
1458 #if DEBUG
1459 (void) vfprintf(stderr, fmt, ap);
1460 #endif
1461 va_end(ap);
1462 }
1463
1464 static void
1465 setsyserr(va_alist)
1466 va_dcl
1467 {
1468 char *fmt, *cp;
1469 va_list ap;
1470 extern int errno;
1471
1472 va_start(ap);
1473 fmt = va_arg(ap, char *);
1474 (void) vsnprintf(errbuf, _POSIX2_LINE_MAX, fmt, ap);
1475 for (cp=errbuf; *cp; cp++)
1476 ;
1477 snprintf(cp, _POSIX2_LINE_MAX - (cp - errbuf), ": %s", strerror(errno));
1478 va_end(ap);
1479 }
1480
1481 char *
1482 kvm_geterr()
1483 {
1484 return (errbuf);
1485 }
1486