kvm.c revision 1.15 1 /*-
2 * Copyright (c) 1993 Christopher G. Demetriou
3 * Copyright (c) 1989 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by the University of
17 * California, Berkeley and its contributors.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35 #if defined(LIBC_SCCS) && !defined(lint)
36 /*static char sccsid[] = "from: @(#)kvm.c 5.18 (Berkeley) 5/7/91";*/
37 static char rcsid[] = "$Id: kvm.c,v 1.15 1993/08/14 11:47:51 cgd Exp $";
38 #endif /* LIBC_SCCS and not lint */
39
40 #include <sys/param.h>
41 #include <sys/user.h>
42 #include <sys/proc.h>
43 #include <sys/ioctl.h>
44 #include <sys/kinfo.h>
45 #include <sys/tty.h>
46 #include <sys/exec.h>
47 #include <machine/vmparam.h>
48 #include <fcntl.h>
49 #include <nlist.h>
50 #include <kvm.h>
51 #include <ndbm.h>
52 #include <limits.h>
53 #include <paths.h>
54 #include <stdio.h>
55 #include <string.h>
56
57 #ifdef SPPWAIT
58 #define NEWVM
59 #endif
60
61 #ifdef NEWVM
62 #define btop(x) (((unsigned)(x)) >> PGSHIFT) /* XXX */
63 #define ptob(x) ((caddr_t)((x) << PGSHIFT)) /* XXX */
64 #include <vm/vm.h> /* ??? kinfo_proc currently includes this*/
65 #include <vm/vm_page.h>
66 #include <vm/swap_pager.h>
67 #include <sys/kinfo_proc.h>
68 #ifdef hp300
69 #include <machine/pte.h>
70 #endif
71 #else /* NEWVM */
72 #include <machine/pte.h>
73 #include <sys/vmmac.h>
74 #include <sys/text.h>
75 #endif /* NEWVM */
76
77 /*
78 * files
79 */
80 static const char *unixf, *memf, *kmemf, *swapf;
81 static int unixx, mem, kmem, swap;
82 static DBM *db;
83 /*
84 * flags
85 */
86 static int deadkernel;
87 static int kvminit = 0;
88 static int kvmfilesopen = 0;
89 /*
90 * state
91 */
92 static struct kinfo_proc *kvmprocbase, *kvmprocptr;
93 static int kvmnprocs;
94 /*
95 * u. buffer
96 */
97 static union {
98 struct user user;
99 char upages[UPAGES][NBPG];
100 } user;
101
102 #ifdef NEWVM
103 struct swapblk {
104 long offset; /* offset in swap device */
105 long size; /* remaining size of block in swap device */
106 };
107 #endif
108 /*
109 * random other stuff
110 */
111 #ifndef NEWVM
112 static struct pte *Usrptmap, *usrpt;
113 static struct pte *Sysmap;
114 static int Syssize;
115 #endif
116 static int dmmin, dmmax;
117 static int pcbpf;
118 static int nswap;
119 static char *tmp;
120 #if defined(hp300)
121 static int lowram;
122 static struct ste *Sysseg;
123 #endif
124 #if defined(i386)
125 static struct pde *PTD;
126 #endif
127
128 #define basename(cp) ((tmp=rindex((cp), '/')) ? tmp+1 : (cp))
129 #define MAXSYMSIZE 256
130
131 #if defined(hp300)
132 #define pftoc(f) ((f) - lowram)
133 #define iskva(v) (1)
134 #endif
135
136 #ifndef pftoc
137 #define pftoc(f) (f)
138 #endif
139 #ifndef iskva
140 #define iskva(v) ((u_long)(v) & KERNBASE)
141 #endif
142
143 static struct nlist nl[] = {
144 { "_Usrptmap" },
145 #define X_USRPTMAP 0
146 { "_usrpt" },
147 #define X_USRPT 1
148 { "_nswap" },
149 #define X_NSWAP 2
150 { "_dmmin" },
151 #define X_DMMIN 3
152 { "_dmmax" },
153 #define X_DMMAX 4
154 { "_vm_page_buckets" },
155 #define X_VM_PAGE_BUCKETS 5
156 { "_vm_page_hash_mask" },
157 #define X_VM_PAGE_HASH_MASK 6
158 { "_page_shift" },
159 #define X_PAGE_SHIFT 7
160 /*
161 * everything here and down, only if a dead kernel
162 */
163 { "_Sysmap" },
164 #define X_SYSMAP 8
165 #define X_DEADKERNEL X_SYSMAP
166 { "_Syssize" },
167 #define X_SYSSIZE 9
168 { "_allproc" },
169 #define X_ALLPROC 10
170 { "_zombproc" },
171 #define X_ZOMBPROC 11
172 { "_nproc" },
173 #define X_NPROC 12
174 #define X_LAST 12
175 #if defined(hp300)
176 { "_Sysseg" },
177 #define X_SYSSEG (X_LAST+1)
178 { "_lowram" },
179 #define X_LOWRAM (X_LAST+2)
180 #endif
181 #if defined(i386)
182 { "_IdlePTD" },
183 #define X_IdlePTD (X_LAST+1)
184 #endif
185 { "" },
186 };
187
188 static off_t Vtophys();
189 static void klseek(), seterr(), setsyserr(), vstodb();
190 static int getkvars(), kvm_doprocs(), kvm_init();
191 #ifdef NEWVM
192 static int vatosw();
193 static int findpage();
194 #endif
195
196 /*
197 * returns 0 if files were opened now,
198 * 1 if files were already opened,
199 * -1 if files could not be opened.
200 */
201 kvm_openfiles(uf, mf, sf)
202 const char *uf, *mf, *sf;
203 {
204 if (kvmfilesopen)
205 return (1);
206 unixx = mem = kmem = swap = -1;
207 unixf = (uf == NULL) ? _PATH_UNIX : uf;
208 memf = (mf == NULL) ? _PATH_MEM : mf;
209
210 if ((unixx = open(unixf, O_RDONLY, 0)) == -1) {
211 setsyserr("can't open %s", unixf);
212 goto failed;
213 }
214 if ((mem = open(memf, O_RDONLY, 0)) == -1) {
215 setsyserr("can't open %s", memf);
216 goto failed;
217 }
218 if (sf != NULL)
219 swapf = sf;
220 if (mf != NULL) {
221 deadkernel++;
222 kmemf = mf;
223 kmem = mem;
224 swap = -1;
225 } else {
226 kmemf = _PATH_KMEM;
227 if ((kmem = open(kmemf, O_RDONLY, 0)) == -1) {
228 setsyserr("can't open %s", kmemf);
229 goto failed;
230 }
231 swapf = (sf == NULL) ? _PATH_DRUM : sf;
232 /*
233 * live kernel - avoid looking up nlist entries
234 * past X_DEADKERNEL.
235 */
236 nl[X_DEADKERNEL].n_name = "";
237 }
238 if (swapf != NULL && ((swap = open(swapf, O_RDONLY, 0)) == -1)) {
239 seterr("can't open %s", swapf);
240 goto failed;
241 }
242 kvmfilesopen++;
243 if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1) /*XXX*/
244 return (-1);
245 return (0);
246 failed:
247 kvm_close();
248 return (-1);
249 }
250
251 static
252 kvm_init(uf, mf, sf)
253 char *uf, *mf, *sf;
254 {
255 if (kvmfilesopen == 0 && kvm_openfiles(NULL, NULL, NULL) == -1)
256 return (-1);
257 if (getkvars() == -1)
258 return (-1);
259 kvminit = 1;
260
261 return (0);
262 }
263
264 kvm_close()
265 {
266 if (unixx != -1) {
267 close(unixx);
268 unixx = -1;
269 }
270 if (kmem != -1) {
271 if (kmem != mem)
272 close(kmem);
273 /* otherwise kmem is a copy of mem, and will be closed below */
274 kmem = -1;
275 }
276 if (mem != -1) {
277 close(mem);
278 mem = -1;
279 }
280 if (swap != -1) {
281 close(swap);
282 swap = -1;
283 }
284 if (db != NULL) {
285 dbm_close(db);
286 db = NULL;
287 }
288 kvminit = 0;
289 kvmfilesopen = 0;
290 deadkernel = 0;
291 #ifndef NEWVM
292 if (Sysmap) {
293 free(Sysmap);
294 Sysmap = NULL;
295 }
296 #endif
297 }
298
299 kvm_nlist(nl)
300 struct nlist *nl;
301 {
302 datum key, data;
303 char dbname[MAXPATHLEN];
304 char dbversion[_POSIX2_LINE_MAX];
305 char kversion[_POSIX2_LINE_MAX];
306 int dbversionlen;
307 char symbuf[MAXSYMSIZE];
308 struct nlist nbuf, *n;
309 int num, did;
310
311 if (kvmfilesopen == 0 && kvm_openfiles(NULL, NULL, NULL) == -1)
312 return (-1);
313 if (deadkernel)
314 goto hard2;
315 /*
316 * initialize key datum
317 */
318 key.dptr = symbuf;
319
320 if (db != NULL)
321 goto win; /* off to the races */
322 /*
323 * open database
324 */
325 sprintf(dbname, "%s/kvm_%s", _PATH_VARRUN, basename(unixf));
326 if ((db = dbm_open(dbname, O_RDONLY, 0)) == NULL)
327 goto hard2;
328 /*
329 * read version out of database
330 */
331 bcopy("VERSION", symbuf, sizeof ("VERSION")-1);
332 key.dsize = (sizeof ("VERSION") - 1);
333 data = dbm_fetch(db, key);
334 if (data.dptr == NULL)
335 goto hard1;
336 bcopy(data.dptr, dbversion, data.dsize);
337 dbversionlen = data.dsize;
338 /*
339 * read version string from kernel memory
340 */
341 bcopy("_version", symbuf, sizeof ("_version")-1);
342 key.dsize = (sizeof ("_version")-1);
343 data = dbm_fetch(db, key);
344 if (data.dptr == NULL)
345 goto hard1;
346 if (data.dsize != sizeof (struct nlist))
347 goto hard1;
348 bcopy(data.dptr, &nbuf, sizeof (struct nlist));
349 lseek(kmem, nbuf.n_value, 0);
350 if (read(kmem, kversion, dbversionlen) != dbversionlen)
351 goto hard1;
352 /*
353 * if they match, we win - otherwise do it the hard way
354 */
355 if (bcmp(dbversion, kversion, dbversionlen) != 0)
356 goto hard1;
357 /*
358 * getem from the database.
359 */
360 win:
361 num = did = 0;
362 for (n = nl; n->n_name && n->n_name[0]; n++, num++) {
363 int len;
364 /*
365 * clear out fields from users buffer
366 */
367 n->n_type = 0;
368 n->n_other = 0;
369 n->n_desc = 0;
370 n->n_value = 0;
371 /*
372 * query db
373 */
374 if ((len = strlen(n->n_name)) > MAXSYMSIZE) {
375 seterr("symbol too large");
376 return (-1);
377 }
378 (void)strcpy(symbuf, n->n_name);
379 key.dsize = len;
380 data = dbm_fetch(db, key);
381 if (data.dptr == NULL || data.dsize != sizeof (struct nlist))
382 continue;
383 bcopy(data.dptr, &nbuf, sizeof (struct nlist));
384 n->n_value = nbuf.n_value;
385 n->n_type = nbuf.n_type;
386 n->n_desc = nbuf.n_desc;
387 n->n_other = nbuf.n_other;
388 did++;
389 }
390 return (num - did);
391 hard1:
392 dbm_close(db);
393 db = NULL;
394 hard2:
395 num = nlist(unixf, nl);
396 if (num == -1)
397 seterr("nlist (hard way) failed");
398 return (num);
399 }
400
401 kvm_getprocs(what, arg)
402 int what, arg;
403 {
404 static int ocopysize = -1;
405
406 if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1)
407 return (NULL);
408 if (!deadkernel) {
409 int ret, copysize;
410
411 if ((ret = getkerninfo(what, NULL, NULL, arg)) == -1) {
412 setsyserr("can't get estimate for kerninfo");
413 return (-1);
414 }
415 copysize = ret;
416 if (copysize > ocopysize || !kvmprocbase) {
417 if (ocopysize == -1 || !kvmprocbase)
418 kvmprocbase =
419 (struct kinfo_proc *)malloc(copysize);
420 else
421 kvmprocbase =
422 (struct kinfo_proc *)realloc(kvmprocbase,
423 copysize);
424 if (!kvmprocbase) {
425 seterr("out of memory");
426 return (-1);
427 }
428 }
429 ocopysize = copysize;
430 if ((ret = getkerninfo(what, kvmprocbase, ©size,
431 arg)) == -1) {
432 setsyserr("can't get proc list");
433 return (-1);
434 }
435 if (copysize % sizeof (struct kinfo_proc)) {
436 seterr("proc size mismatch (got %d total, kinfo_proc: %d)",
437 copysize, sizeof (struct kinfo_proc));
438 return (-1);
439 }
440 kvmnprocs = copysize / sizeof (struct kinfo_proc);
441 } else {
442 int nproc;
443
444 if (kvm_read((void *) nl[X_NPROC].n_value, &nproc,
445 sizeof (int)) != sizeof (int)) {
446 seterr("can't read nproc");
447 return (-1);
448 }
449 if ((kvmprocbase = (struct kinfo_proc *)
450 malloc(nproc * sizeof (struct kinfo_proc))) == NULL) {
451 seterr("out of memory (addr: %x nproc = %d)",
452 nl[X_NPROC].n_value, nproc);
453 return (-1);
454 }
455 kvmnprocs = kvm_doprocs(what, arg, kvmprocbase);
456 realloc(kvmprocbase, kvmnprocs * sizeof (struct kinfo_proc));
457 }
458 kvmprocptr = kvmprocbase;
459
460 return (kvmnprocs);
461 }
462
463 /*
464 * XXX - should NOT give up so easily - especially since the kernel
465 * may be corrupt (it died). Should gather as much information as possible.
466 * Follows proc ptrs instead of reading table since table may go
467 * away soon.
468 */
469 static
470 kvm_doprocs(what, arg, buff)
471 int what, arg;
472 char *buff;
473 {
474 struct proc *p, proc;
475 register char *bp = buff;
476 int i = 0;
477 int doingzomb = 0;
478 struct eproc eproc;
479 struct pgrp pgrp;
480 struct session sess;
481 struct tty tty;
482 #ifndef NEWVM
483 struct text text;
484 #endif
485
486 /* allproc */
487 if (kvm_read((void *) nl[X_ALLPROC].n_value, &p,
488 sizeof (struct proc *)) != sizeof (struct proc *)) {
489 seterr("can't read allproc");
490 return (-1);
491 }
492
493 again:
494 for (; p; p = proc.p_nxt) {
495 if (kvm_read(p, &proc, sizeof (struct proc)) !=
496 sizeof (struct proc)) {
497 seterr("can't read proc at %x", p);
498 return (-1);
499 }
500 #ifdef NEWVM
501 if (kvm_read(proc.p_cred, &eproc.e_pcred,
502 sizeof (struct pcred)) == sizeof (struct pcred))
503 (void) kvm_read(eproc.e_pcred.pc_ucred, &eproc.e_ucred,
504 sizeof (struct ucred));
505 switch(ki_op(what)) {
506
507 case KINFO_PROC_PID:
508 if (proc.p_pid != (pid_t)arg)
509 continue;
510 break;
511
512
513 case KINFO_PROC_UID:
514 if (eproc.e_ucred.cr_uid != (uid_t)arg)
515 continue;
516 break;
517
518 case KINFO_PROC_RUID:
519 if (eproc.e_pcred.p_ruid != (uid_t)arg)
520 continue;
521 break;
522 }
523 #else
524 switch(ki_op(what)) {
525
526 case KINFO_PROC_PID:
527 if (proc.p_pid != (pid_t)arg)
528 continue;
529 break;
530
531
532 case KINFO_PROC_UID:
533 if (proc.p_uid != (uid_t)arg)
534 continue;
535 break;
536
537 case KINFO_PROC_RUID:
538 if (proc.p_ruid != (uid_t)arg)
539 continue;
540 break;
541 }
542 #endif
543 /*
544 * gather eproc
545 */
546 eproc.e_paddr = p;
547 if (kvm_read(proc.p_pgrp, &pgrp, sizeof (struct pgrp)) !=
548 sizeof (struct pgrp)) {
549 seterr("can't read pgrp at %x", proc.p_pgrp);
550 return (-1);
551 }
552 eproc.e_sess = pgrp.pg_session;
553 eproc.e_pgid = pgrp.pg_id;
554 eproc.e_jobc = pgrp.pg_jobc;
555 if (kvm_read(pgrp.pg_session, &sess, sizeof (struct session))
556 != sizeof (struct session)) {
557 seterr("can't read session at %x", pgrp.pg_session);
558 return (-1);
559 }
560 if ((proc.p_flag&SCTTY) && sess.s_ttyp != NULL) {
561 if (kvm_read(sess.s_ttyp, &tty, sizeof (struct tty))
562 != sizeof (struct tty)) {
563 seterr("can't read tty at %x", sess.s_ttyp);
564 return (-1);
565 }
566 eproc.e_tdev = tty.t_dev;
567 eproc.e_tsess = tty.t_session;
568 if (tty.t_pgrp != NULL) {
569 if (kvm_read(tty.t_pgrp, &pgrp, sizeof (struct
570 pgrp)) != sizeof (struct pgrp)) {
571 seterr("can't read tpgrp at &x",
572 tty.t_pgrp);
573 return (-1);
574 }
575 eproc.e_tpgid = pgrp.pg_id;
576 } else
577 eproc.e_tpgid = -1;
578 } else
579 eproc.e_tdev = NODEV;
580 if (proc.p_wmesg)
581 kvm_read(proc.p_wmesg, eproc.e_wmesg, WMESGLEN);
582 #ifdef NEWVM
583 (void) kvm_read(proc.p_vmspace, &eproc.e_vm,
584 sizeof (struct vmspace));
585 eproc.e_xsize = eproc.e_xrssize =
586 eproc.e_xccount = eproc.e_xswrss = 0;
587 #else
588 if (proc.p_textp) {
589 kvm_read(proc.p_textp, &text, sizeof (text));
590 eproc.e_xsize = text.x_size;
591 eproc.e_xrssize = text.x_rssize;
592 eproc.e_xccount = text.x_ccount;
593 eproc.e_xswrss = text.x_swrss;
594 } else {
595 eproc.e_xsize = eproc.e_xrssize =
596 eproc.e_xccount = eproc.e_xswrss = 0;
597 }
598 #endif
599
600 switch(ki_op(what)) {
601
602 case KINFO_PROC_PGRP:
603 if (eproc.e_pgid != (pid_t)arg)
604 continue;
605 break;
606
607 case KINFO_PROC_TTY:
608 if ((proc.p_flag&SCTTY) == 0 ||
609 eproc.e_tdev != (dev_t)arg)
610 continue;
611 break;
612 }
613
614 i++;
615 bcopy(&proc, bp, sizeof (struct proc));
616 bp += sizeof (struct proc);
617 bcopy(&eproc, bp, sizeof (struct eproc));
618 bp+= sizeof (struct eproc);
619 }
620 if (!doingzomb) {
621 /* zombproc */
622 if (kvm_read((void *) nl[X_ZOMBPROC].n_value, &p,
623 sizeof (struct proc *)) != sizeof (struct proc *)) {
624 seterr("can't read zombproc");
625 return (-1);
626 }
627 doingzomb = 1;
628 goto again;
629 }
630
631 return (i);
632 }
633
634 struct proc *
635 kvm_nextproc()
636 {
637
638 if (!kvmprocbase && kvm_getprocs(0, 0) == -1)
639 return (NULL);
640 if (kvmprocptr >= (kvmprocbase + kvmnprocs)) {
641 seterr("end of proc list");
642 return (NULL);
643 }
644 return((struct proc *)(kvmprocptr++));
645 }
646
647 struct eproc *
648 kvm_geteproc(p)
649 const struct proc *p;
650 {
651 return ((struct eproc *)(((char *)p) + sizeof (struct proc)));
652 }
653
654 kvm_setproc()
655 {
656 kvmprocptr = kvmprocbase;
657 }
658
659 kvm_freeprocs()
660 {
661
662 if (kvmprocbase) {
663 free(kvmprocbase);
664 kvmprocbase = NULL;
665 }
666 }
667
668 #ifdef NEWVM
669 struct user *
670 kvm_getu(p)
671 const struct proc *p;
672 {
673 register struct kinfo_proc *kp = (struct kinfo_proc *)p;
674 register int i;
675 register char *up;
676 u_int vaddr;
677 struct swapblk swb;
678
679 if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1)
680 return (NULL);
681 if (p->p_stat == SZOMB) {
682 seterr("zombie process");
683 return (NULL);
684 }
685
686 if ((p->p_flag & SLOAD) == 0) {
687 vm_offset_t maddr;
688
689 if (swap < 0) {
690 seterr("no swap");
691 return (NULL);
692 }
693 /*
694 * Costly operation, better set enable_swap to zero
695 * in vm/vm_glue.c, since paging of user pages isn't
696 * done yet anyway.
697 */
698 if (vatosw(p, USRSTACK + i * NBPG, &maddr, &swb) == 0)
699 return NULL;
700
701 if (maddr == 0 && swb.size < UPAGES * NBPG)
702 return NULL;
703
704 for (i = 0; i < UPAGES; i++) {
705 if (maddr) {
706 (void) lseek(mem, maddr + i * NBPG, 0);
707 if (read(mem,
708 (char *)user.upages[i], NBPG) != NBPG) {
709 seterr(
710 "can't read u for pid %d from %s",
711 p->p_pid, swapf);
712 return NULL;
713 }
714 } else {
715 (void) lseek(swap, swb.offset + i * NBPG, 0);
716 if (read(swap,
717 (char *)user.upages[i], NBPG) != NBPG) {
718 seterr(
719 "can't read u for pid %d from %s",
720 p->p_pid, swapf);
721 return NULL;
722 }
723 }
724 }
725 return(&user.user);
726 }
727 /*
728 * Read u-area one page at a time for the benefit of post-mortems
729 */
730 up = (char *) p->p_addr;
731 for (i = 0; i < UPAGES; i++) {
732 klseek(kmem, (long)up, 0);
733 if (read(kmem, user.upages[i], CLBYTES) != CLBYTES) {
734 seterr("cant read page %x of u of pid %d from %s",
735 up, p->p_pid, kmemf);
736 return(NULL);
737 }
738 up += CLBYTES;
739 }
740 pcbpf = (int) btop(p->p_addr); /* what should this be really? */
741
742 kp->kp_eproc.e_vm.vm_rssize =
743 kp->kp_eproc.e_vm.vm_pmap.pm_stats.resident_count; /* XXX */
744 return(&user.user);
745 }
746 #else
747 struct user *
748 kvm_getu(p)
749 const struct proc *p;
750 {
751 struct pte *pteaddr, apte;
752 struct pte arguutl[HIGHPAGES+(CLSIZE*2)];
753 register int i;
754 int ncl;
755
756 if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1)
757 return (NULL);
758 if (p->p_stat == SZOMB) {
759 seterr("zombie process");
760 return (NULL);
761 }
762 if ((p->p_flag & SLOAD) == 0) {
763 if (swap < 0) {
764 seterr("no swap");
765 return (NULL);
766 }
767 (void) lseek(swap, (long)dtob(p->p_swaddr), 0);
768 if (read(swap, (char *)&user.user, sizeof (struct user)) !=
769 sizeof (struct user)) {
770 seterr("can't read u for pid %d from %s",
771 p->p_pid, swapf);
772 return (NULL);
773 }
774 pcbpf = 0;
775 argaddr0 = 0;
776 argaddr1 = 0;
777 return (&user.user);
778 }
779 pteaddr = &Usrptmap[btokmx(p->p_p0br) + p->p_szpt - 1];
780 klseek(kmem, (long)pteaddr, 0);
781 if (read(kmem, (char *)&apte, sizeof(apte)) != sizeof(apte)) {
782 seterr("can't read indir pte to get u for pid %d from %s",
783 p->p_pid, kmemf);
784 return (NULL);
785 }
786 lseek(mem, (long)ctob(pftoc(apte.pg_pfnum+1)) - sizeof(arguutl), 0);
787 if (read(mem, (char *)arguutl, sizeof(arguutl)) != sizeof(arguutl)) {
788 seterr("can't read page table for u of pid %d from %s",
789 p->p_pid, memf);
790 return (NULL);
791 }
792 if (arguutl[0].pg_fod == 0 && arguutl[0].pg_pfnum)
793 argaddr0 = ctob(pftoc(arguutl[0].pg_pfnum));
794 else
795 argaddr0 = 0;
796 if (arguutl[CLSIZE*1].pg_fod == 0 && arguutl[CLSIZE*1].pg_pfnum)
797 argaddr1 = ctob(pftoc(arguutl[CLSIZE*1].pg_pfnum));
798 else
799 argaddr1 = 0;
800 pcbpf = arguutl[CLSIZE*2].pg_pfnum;
801 ncl = (sizeof (struct user) + CLBYTES - 1) / CLBYTES;
802 while (--ncl >= 0) {
803 i = ncl * CLSIZE;
804 lseek(mem,
805 (long)ctob(pftoc(arguutl[(CLSIZE*2)+i].pg_pfnum)), 0);
806 if (read(mem, user.upages[i], CLBYTES) != CLBYTES) {
807 seterr("can't read page %d of u of pid %d from %s",
808 arguutl[(CLSIZE*2)+i].pg_pfnum, p->p_pid, memf);
809 return(NULL);
810 }
811 }
812 return (&user.user);
813 }
814 #endif
815
816 int
817 kvm_procread(p, addr, buf, len)
818 const struct proc *p;
819 const unsigned addr, len;
820 char *buf;
821 {
822 register struct kinfo_proc *kp = (struct kinfo_proc *) p;
823 struct swapblk swb;
824 vm_offset_t swaddr = 0, memaddr = 0;
825 unsigned real_len;
826
827 real_len = len < (CLBYTES - (addr & CLOFSET)) ? len : (CLBYTES - (addr & CLOFSET));
828
829 #if defined(hp300)
830 if (kp->kp_eproc.e_vm.vm_pmap.pm_ptab) {
831 struct pte pte[CLSIZE*2];
832
833 klseek(kmem,
834 (long)&kp->kp_eproc.e_vm.vm_pmap.pm_ptab
835 [btoc(USRSTACK-CLBYTES*2)], 0);
836 if (read(kmem, (char *)&pte, sizeof(pte)) == sizeof(pte)) {
837 memaddr = ctob(pftoc(pte[CLSIZE*1].pg_pfnum)) +
838 (addr % (1 << CLSHIFT));
839 }
840 }
841 #endif
842 #if defined(i386)
843 if (kp->kp_eproc.e_vm.vm_pmap.pm_pdir) {
844 struct pde pde;
845
846 klseek(kmem,
847 (long)(&kp->kp_eproc.e_vm.vm_pmap.pm_pdir[pdei(addr)]), 0);
848
849 if (read(kmem, (char *)&pde, sizeof pde) == sizeof pde
850 && pde.pd_v) {
851
852 struct pte pte;
853
854 if (lseek(mem, (long)ctob(pde.pd_pfnum) +
855 (ptei(addr) * sizeof pte), 0) == -1)
856 seterr("kvm_procread: lseek");
857 if (read(mem, (char *)&pte, sizeof pte) == sizeof pte) {
858 if (pte.pg_v) {
859 memaddr = (long)ctob(pte.pg_pfnum) +
860 (addr % (1 << CLSHIFT));
861 }
862 } else {
863 seterr("kvm_procread: read");
864 }
865 }
866 }
867 #endif /* i386 */
868
869 if (memaddr == 0 && vatosw(p, addr & ~CLOFSET, &memaddr, &swb)) {
870 if (memaddr != 0) {
871 memaddr += addr & CLOFSET;
872 } else {
873 swaddr = swb.offset + (addr & CLOFSET);
874 swb.size -= addr & CLOFSET;
875 }
876 }
877
878 if (memaddr) {
879 if (lseek(mem, memaddr, 0) == -1)
880 seterr("kvm_getu: lseek");
881 real_len = read(mem, buf, real_len);
882 if (real_len == -1) {
883 seterr("kvm_procread: read");
884 return 0;
885 }
886 } else if (swaddr) {
887 char bouncebuf[CLBYTES];
888 unsigned len;
889 if (lseek(swap, swaddr & ~CLOFSET, 0) == -1) {
890 seterr("kvm_procread: lseek");
891 return 0;
892 }
893 len = read(swap, bouncebuf, CLBYTES);
894 if (len == -1 || len <= (swaddr & CLOFSET)) {
895 seterr("kvm_procread: read");
896 return 0;
897 }
898 len = MIN(len - (swaddr & CLOFSET), real_len);
899 memcpy(buf, &bouncebuf[swaddr & CLOFSET], len);
900 return len;
901 } else
902 real_len = 0;
903
904 return real_len;
905 }
906
907 int
908 kvm_procreadstr(p, addr, buf, len)
909 const struct proc *p;
910 const unsigned addr;
911 char *buf;
912 unsigned len;
913 {
914 int done, little;
915 char copy[200], *pb;
916 char a;
917
918 done = 0;
919 copy[0] = '\0';
920 while (len) {
921 little = kvm_procread(p, addr+done, copy, MIN(len, sizeof copy));
922 if (little<1)
923 break;
924 pb = copy;
925 while (little--) {
926 len--;
927 if( (*buf++ = *pb++) == '\0' )
928 return done;
929 done++;
930 }
931 }
932 return done;
933 }
934
935 char *
936 kvm_getargs(p, up)
937 const struct proc *p;
938 const struct user *up;
939 {
940 static char cmdbuf[ARG_MAX + sizeof(p->p_comm) + 5];
941 register char *cp, *acp;
942 int left, rv;
943 struct ps_strings arginfo;
944
945 if (up == NULL || p->p_pid == 0 || p->p_pid == 2)
946 goto retucomm;
947
948 if (kvm_procread(p, PS_STRINGS, (char *)&arginfo, sizeof(arginfo)) !=
949 sizeof(arginfo))
950 goto bad;
951
952 cmdbuf[0] = '\0';
953 cp = cmdbuf;
954 acp = arginfo.ps_argvstr;
955 left = ARG_MAX + 1;
956 while (arginfo.ps_nargvstr--) {
957 if ((rv = kvm_procreadstr(p, acp, cp, left)) >= 0) {
958 acp += rv + 1;
959 left -= rv + 1;
960 cp += rv;
961 *cp++ = ' ';
962 *cp = '\0';
963 } else
964 goto bad;
965 }
966 cp-- ; *cp = '\0';
967
968 if (cmdbuf[0] == '-' || cmdbuf[0] == '?' || cmdbuf[0] <= ' ') {
969 (void) strcat(cmdbuf, " (");
970 (void) strncat(cmdbuf, p->p_comm, sizeof(p->p_comm));
971 (void) strcat(cmdbuf, ")");
972 }
973 return (cmdbuf);
974
975 bad:
976 seterr("error locating command name for pid %d", p->p_pid);
977 retucomm:
978 (void) strcpy(cmdbuf, "(");
979 (void) strncat(cmdbuf, p->p_comm, sizeof (p->p_comm));
980 (void) strcat(cmdbuf, ")");
981 return (cmdbuf);
982 }
983
984 char *
985 kvm_getenv(p, up)
986 const struct proc *p;
987 const struct user *up;
988 {
989 static char envbuf[ARG_MAX + 1];
990 register char *cp, *acp;
991 int left, rv;
992 struct ps_strings arginfo;
993
994 if (up == NULL || p->p_pid == 0 || p->p_pid == 2)
995 goto retemptyenv;
996
997 if (kvm_procread(p, PS_STRINGS, (char *)&arginfo, sizeof(arginfo)) !=
998 sizeof(arginfo))
999 goto bad;
1000
1001 cp = envbuf;
1002 acp = arginfo.ps_envstr;
1003 left = ARG_MAX + 1;
1004 while (arginfo.ps_nenvstr--) {
1005 if ((rv = kvm_procreadstr(p, acp, cp, left)) >= 0) {
1006 acp += rv + 1;
1007 left -= rv + 1;
1008 cp += rv;
1009 *cp++ = ' ';
1010 *cp = '\0';
1011 } else
1012 goto bad;
1013 }
1014 cp-- ; *cp = '\0';
1015 return (envbuf);
1016
1017 bad:
1018 seterr("error locating environment for pid %d", p->p_pid);
1019 retemptyenv:
1020 envbuf[0] = '\0';
1021 return (envbuf);
1022 }
1023
1024 static
1025 getkvars()
1026 {
1027 if (kvm_nlist(nl) == -1)
1028 return (-1);
1029 if (deadkernel) {
1030 /* We must do the sys map first because klseek uses it */
1031 long addr;
1032
1033 #ifndef NEWVM
1034 Syssize = nl[X_SYSSIZE].n_value;
1035 Sysmap = (struct pte *)
1036 calloc((unsigned) Syssize, sizeof (struct pte));
1037 if (Sysmap == NULL) {
1038 seterr("out of space for Sysmap");
1039 return (-1);
1040 }
1041 addr = (long) nl[X_SYSMAP].n_value;
1042 addr &= ~KERNBASE;
1043 (void) lseek(kmem, addr, 0);
1044 if (read(kmem, (char *) Sysmap, Syssize * sizeof (struct pte))
1045 != Syssize * sizeof (struct pte)) {
1046 seterr("can't read Sysmap");
1047 return (-1);
1048 }
1049 #endif
1050 #if defined(hp300)
1051 addr = (long) nl[X_LOWRAM].n_value;
1052 (void) lseek(kmem, addr, 0);
1053 if (read(kmem, (char *) &lowram, sizeof (lowram))
1054 != sizeof (lowram)) {
1055 seterr("can't read lowram");
1056 return (-1);
1057 }
1058 lowram = btop(lowram);
1059 Sysseg = (struct ste *) malloc(NBPG);
1060 if (Sysseg == NULL) {
1061 seterr("out of space for Sysseg");
1062 return (-1);
1063 }
1064 addr = (long) nl[X_SYSSEG].n_value;
1065 (void) lseek(kmem, addr, 0);
1066 read(kmem, (char *)&addr, sizeof(addr));
1067 (void) lseek(kmem, (long)addr, 0);
1068 if (read(kmem, (char *) Sysseg, NBPG) != NBPG) {
1069 seterr("can't read Sysseg");
1070 return (-1);
1071 }
1072 #endif
1073 #if defined(i386)
1074 PTD = (struct pde *) malloc(NBPG);
1075 if (PTD == NULL) {
1076 seterr("out of space for PTD");
1077 return (-1);
1078 }
1079 addr = (long) nl[X_IdlePTD].n_value;
1080 (void) lseek(kmem, addr, 0);
1081 read(kmem, (char *)&addr, sizeof(addr));
1082 (void) lseek(kmem, (long)addr, 0);
1083 if (read(kmem, (char *) PTD, NBPG) != NBPG) {
1084 seterr("can't read PTD");
1085 return (-1);
1086 }
1087 #endif
1088 }
1089 #ifndef NEWVM
1090 usrpt = (struct pte *)nl[X_USRPT].n_value;
1091 Usrptmap = (struct pte *)nl[X_USRPTMAP].n_value;
1092 #endif
1093 if (kvm_read((void *) nl[X_NSWAP].n_value, &nswap, sizeof (long)) !=
1094 sizeof (long)) {
1095 seterr("can't read nswap");
1096 return (-1);
1097 }
1098 if (kvm_read((void *) nl[X_DMMIN].n_value, &dmmin, sizeof (long)) !=
1099 sizeof (long)) {
1100 seterr("can't read dmmin");
1101 return (-1);
1102 }
1103 if (kvm_read((void *) nl[X_DMMAX].n_value, &dmmax, sizeof (long)) !=
1104 sizeof (long)) {
1105 seterr("can't read dmmax");
1106 return (-1);
1107 }
1108 return (0);
1109 }
1110
1111 kvm_read(loc, buf, len)
1112 void *loc;
1113 void *buf;
1114 {
1115 if (kvmfilesopen == 0 && kvm_openfiles(NULL, NULL, NULL) == -1)
1116 return (-1);
1117 if (iskva(loc)) {
1118 klseek(kmem, (off_t) loc, 0);
1119 if (read(kmem, buf, len) != len) {
1120 seterr("error reading kmem at %x", loc);
1121 return (-1);
1122 }
1123 } else {
1124 lseek(mem, (off_t) loc, 0);
1125 if (read(mem, buf, len) != len) {
1126 seterr("error reading mem at %x", loc);
1127 return (-1);
1128 }
1129 }
1130 return (len);
1131 }
1132
1133 static void
1134 klseek(fd, loc, off)
1135 int fd;
1136 off_t loc;
1137 int off;
1138 {
1139
1140 if (deadkernel) {
1141 if ((loc = Vtophys(loc)) == -1)
1142 return;
1143 }
1144 (void) lseek(fd, (off_t)loc, off);
1145 }
1146
1147 #ifndef NEWVM
1148 /*
1149 * Given a base/size pair in virtual swap area,
1150 * return a physical base/size pair which is the
1151 * (largest) initial, physically contiguous block.
1152 */
1153 static void
1154 vstodb(vsbase, vssize, dmp, dbp, rev)
1155 register int vsbase;
1156 int vssize;
1157 struct dmap *dmp;
1158 register struct dblock *dbp;
1159 {
1160 register int blk = dmmin;
1161 register swblk_t *ip = dmp->dm_map;
1162
1163 vsbase = ctod(vsbase);
1164 vssize = ctod(vssize);
1165 if (vsbase < 0 || vsbase + vssize > dmp->dm_size)
1166 /*panic("vstodb")*/;
1167 while (vsbase >= blk) {
1168 vsbase -= blk;
1169 if (blk < dmmax)
1170 blk *= 2;
1171 ip++;
1172 }
1173 if (*ip <= 0 || *ip + blk > nswap)
1174 /*panic("vstodb")*/;
1175 dbp->db_size = MIN(vssize, blk - vsbase);
1176 dbp->db_base = *ip + (rev ? blk - (vsbase + dbp->db_size) : vsbase);
1177 }
1178 #endif
1179
1180 #ifdef NEWVM
1181 static off_t
1182 Vtophys(loc)
1183 u_long loc;
1184 {
1185 off_t newloc = (off_t) -1;
1186 #ifdef hp300
1187 int p, ste, pte;
1188
1189 ste = *(int *)&Sysseg[loc >> SG_ISHIFT];
1190 if ((ste & SG_V) == 0) {
1191 seterr("vtophys: segment not valid");
1192 return((off_t) -1);
1193 }
1194 p = btop(loc & SG_PMASK);
1195 newloc = (ste & SG_FRAME) + (p * sizeof(struct pte));
1196 (void) lseek(kmem, (long)(newloc-(off_t)ptob(lowram)), 0);
1197 if (read(kmem, (char *)&pte, sizeof pte) != sizeof pte) {
1198 seterr("vtophys: cannot locate pte");
1199 return((off_t) -1);
1200 }
1201 newloc = pte & PG_FRAME;
1202 if (pte == PG_NV || newloc < (off_t)ptob(lowram)) {
1203 seterr("vtophys: page not valid");
1204 return((off_t) -1);
1205 }
1206 newloc = (newloc - (off_t)ptob(lowram)) + (loc & PGOFSET);
1207 #endif
1208 #ifdef i386
1209 struct pde pde;
1210 struct pte pte;
1211 int p;
1212
1213 pde = PTD[loc >> PD_SHIFT];
1214 if (pde.pd_v == 0) {
1215 seterr("vtophys: page directory entry not valid");
1216 return((off_t) -1);
1217 }
1218 p = btop(loc & PT_MASK);
1219 newloc = pde.pd_pfnum + (p * sizeof(struct pte));
1220 (void) lseek(kmem, (long)newloc, 0);
1221 if (read(kmem, (char *)&pte, sizeof pte) != sizeof pte) {
1222 seterr("vtophys: cannot obtain desired pte");
1223 return((off_t) -1);
1224 }
1225 newloc = pte.pg_pfnum;
1226 if (pte.pg_v == 0) {
1227 seterr("vtophys: page table entry not valid");
1228 return((off_t) -1);
1229 }
1230 newloc += (loc & PGOFSET);
1231 #endif
1232 return((off_t) newloc);
1233 }
1234 #else
1235 static off_t
1236 vtophys(loc)
1237 long loc;
1238 {
1239 int p;
1240 off_t newloc;
1241 register struct pte *pte;
1242
1243 newloc = loc & ~KERNBASE;
1244 p = btop(newloc);
1245 #if defined(vax) || defined(tahoe)
1246 if ((loc & KERNBASE) == 0) {
1247 seterr("vtophys: translating non-kernel address");
1248 return((off_t) -1);
1249 }
1250 #endif
1251 if (p >= Syssize) {
1252 seterr("vtophys: page out of bound (%d>=%d)", p, Syssize);
1253 return((off_t) -1);
1254 }
1255 pte = &Sysmap[p];
1256 if (pte->pg_v == 0 && (pte->pg_fod || pte->pg_pfnum == 0)) {
1257 seterr("vtophys: page not valid");
1258 return((off_t) -1);
1259 }
1260 #if defined(hp300)
1261 if (pte->pg_pfnum < lowram) {
1262 seterr("vtophys: non-RAM page (%d<%d)", pte->pg_pfnum, lowram);
1263 return((off_t) -1);
1264 }
1265 #endif
1266 loc = (long) (ptob(pftoc(pte->pg_pfnum)) + (loc & PGOFSET));
1267 return(loc);
1268 }
1269 #endif
1270
1271
1272 #ifdef NEWVM
1273 /*
1274 * locate address of unwired or swapped page
1275 */
1276
1277 #define DEBUG 0
1278
1279 #define KREAD(off, addr, len) \
1280 (kvm_read((void *)(off), (char *)(addr), (len)) == (len))
1281
1282
1283 static int
1284 vatosw(p, vaddr, maddr, swb)
1285 struct proc *p ;
1286 vm_offset_t vaddr;
1287 vm_offset_t *maddr;
1288 struct swapblk *swb;
1289 {
1290 register struct kinfo_proc *kp = (struct kinfo_proc *)p;
1291 vm_map_t mp = &kp->kp_eproc.e_vm.vm_map;
1292 struct vm_object vm_object;
1293 struct vm_map_entry vm_entry;
1294 struct pager_struct pager;
1295 struct swpager swpager;
1296 struct swblock swblock;
1297 long addr, off;
1298 int i;
1299
1300 if (p->p_pid == 0 || p->p_pid == 2)
1301 return 0;
1302
1303 addr = (long)mp->header.next;
1304 for (i = 0; i < mp->nentries; i++) {
1305 /* Weed through map entries until vaddr in range */
1306 if (!KREAD(addr, &vm_entry, sizeof(vm_entry))) {
1307 setsyserr("vatosw: read vm_map_entry");
1308 return 0;
1309 }
1310 if ((vaddr >= vm_entry.start) && (vaddr <= vm_entry.end) &&
1311 (vm_entry.object.vm_object != 0))
1312 break;
1313
1314 addr = (long)vm_entry.next;
1315 }
1316 if (i == mp->nentries) {
1317 seterr("%u: map not found\n", p->p_pid);
1318 return 0;
1319 }
1320
1321 if (vm_entry.is_a_map || vm_entry.is_sub_map) {
1322 seterr("%u: Is a map\n", p->p_pid);
1323 return 0;
1324 }
1325
1326 /* Locate memory object */
1327 off = (vaddr - vm_entry.start) + vm_entry.offset;
1328 addr = (long)vm_entry.object.vm_object;
1329 while (1) {
1330 if (!KREAD(addr, &vm_object, sizeof vm_object)) {
1331 setsyserr("vatosw: read vm_object");
1332 return 0;
1333 }
1334
1335 #if DEBUG
1336 fprintf(stderr, "%u: find page: object %#x offset %x\n",
1337 p->p_pid, addr, off);
1338 #endif
1339
1340 /* Lookup in page queue */
1341 if (findpage(addr, off, maddr))
1342 return 1;
1343
1344 if (vm_object.shadow == 0)
1345 break;
1346
1347 #if DEBUG
1348 fprintf(stderr, "%u: shadow obj at %x: offset %x+%x\n",
1349 p->p_pid, addr, off, vm_object.shadow_offset);
1350 #endif
1351
1352 addr = (long)vm_object.shadow;
1353 off += vm_object.shadow_offset;
1354 }
1355
1356 if (!vm_object.pager) {
1357 seterr("%u: no pager\n", p->p_pid);
1358 return 0;
1359 }
1360
1361 /* Find address in swap space */
1362 if (!KREAD(vm_object.pager, &pager, sizeof pager)) {
1363 setsyserr("vatosw: read pager");
1364 return 0;
1365 }
1366 if (pager.pg_type != PG_SWAP) {
1367 seterr("%u: weird pager\n", p->p_pid);
1368 return 0;
1369 }
1370
1371 /* Get swap pager data */
1372 if (!KREAD(pager.pg_data, &swpager, sizeof swpager)) {
1373 setsyserr("vatosw: read swpager");
1374 return 0;
1375 }
1376
1377 off += vm_object.paging_offset;
1378
1379 /* Read swap block array */
1380 if (!KREAD((long)swpager.sw_blocks +
1381 (off/dbtob(swpager.sw_bsize)) * sizeof swblock,
1382 &swblock, sizeof swblock)) {
1383 setsyserr("vatosw: read swblock");
1384 return 0;
1385 }
1386 swb->offset = dbtob(swblock.swb_block)+ (off % dbtob(swpager.sw_bsize));
1387 swb->size = dbtob(swpager.sw_bsize) - (off % dbtob(swpager.sw_bsize));
1388 return 1;
1389 }
1390
1391
1392 #define atop(x) (((unsigned)(x)) >> page_shift)
1393 #define vm_page_hash(object, offset) \
1394 (((unsigned)object+(unsigned)atop(offset))&vm_page_hash_mask)
1395
1396 static int
1397 findpage(object, offset, maddr)
1398 long object;
1399 long offset;
1400 vm_offset_t *maddr;
1401 {
1402 static long vm_page_hash_mask;
1403 static long vm_page_buckets;
1404 static long page_shift;
1405 queue_head_t bucket;
1406 struct vm_page mem;
1407 long addr, baddr;
1408
1409 if (vm_page_hash_mask == 0 && !KREAD(nl[X_VM_PAGE_HASH_MASK].n_value,
1410 &vm_page_hash_mask, sizeof (long))) {
1411 seterr("can't read vm_page_hash_mask");
1412 return 0;
1413 }
1414 if (page_shift == 0 && !KREAD(nl[X_PAGE_SHIFT].n_value,
1415 &page_shift, sizeof (long))) {
1416 seterr("can't read page_shift");
1417 return 0;
1418 }
1419 if (vm_page_buckets == 0 && !KREAD(nl[X_VM_PAGE_BUCKETS].n_value,
1420 &vm_page_buckets, sizeof (long))) {
1421 seterr("can't read vm_page_buckets");
1422 return 0;
1423 }
1424
1425 baddr = vm_page_buckets + vm_page_hash(object,offset) * sizeof(queue_head_t);
1426 if (!KREAD(baddr, &bucket, sizeof (bucket))) {
1427 seterr("can't read vm_page_bucket");
1428 return 0;
1429 }
1430
1431 addr = (long)bucket.next;
1432 while (addr != baddr) {
1433 if (!KREAD(addr, &mem, sizeof (mem))) {
1434 seterr("can't read vm_page");
1435 return 0;
1436 }
1437 if ((long)mem.object == object && mem.offset == offset) {
1438 *maddr = (long)mem.phys_addr;
1439 return 1;
1440 }
1441 addr = (long)mem.hashq.next;
1442 }
1443 return 0;
1444 }
1445 #endif /* NEWVM */
1446
1447 #include <varargs.h>
1448 static char errbuf[_POSIX2_LINE_MAX];
1449
1450 static void
1451 seterr(va_alist)
1452 va_dcl
1453 {
1454 char *fmt;
1455 va_list ap;
1456
1457 va_start(ap);
1458 fmt = va_arg(ap, char *);
1459 (void) vsnprintf(errbuf, _POSIX2_LINE_MAX, fmt, ap);
1460 #if DEBUG
1461 (void) vfprintf(stderr, fmt, ap);
1462 #endif
1463 va_end(ap);
1464 }
1465
1466 static void
1467 setsyserr(va_alist)
1468 va_dcl
1469 {
1470 char *fmt, *cp;
1471 va_list ap;
1472 extern int errno;
1473
1474 va_start(ap);
1475 fmt = va_arg(ap, char *);
1476 (void) vsnprintf(errbuf, _POSIX2_LINE_MAX, fmt, ap);
1477 for (cp=errbuf; *cp; cp++)
1478 ;
1479 snprintf(cp, _POSIX2_LINE_MAX - (cp - errbuf), ": %s", strerror(errno));
1480 va_end(ap);
1481 }
1482
1483 char *
1484 kvm_geterr()
1485 {
1486 return (errbuf);
1487 }
1488