kvm.c revision 1.16 1 /*-
2 * Copyright (c) 1993 Christopher G. Demetriou
3 * Copyright (c) 1989 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by the University of
17 * California, Berkeley and its contributors.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35 #if defined(LIBC_SCCS) && !defined(lint)
36 /*static char sccsid[] = "from: @(#)kvm.c 5.18 (Berkeley) 5/7/91";*/
37 static char rcsid[] = "$Id: kvm.c,v 1.16 1993/08/15 01:54:29 mycroft Exp $";
38 #endif /* LIBC_SCCS and not lint */
39
40 #include <sys/param.h>
41 #include <sys/user.h>
42 #include <sys/proc.h>
43 #include <sys/ioctl.h>
44 #include <sys/kinfo.h>
45 #include <sys/tty.h>
46 #include <sys/exec.h>
47 #include <machine/vmparam.h>
48 #include <fcntl.h>
49 #include <nlist.h>
50 #include <kvm.h>
51 #include <ndbm.h>
52 #include <limits.h>
53 #include <paths.h>
54 #include <stdio.h>
55 #include <string.h>
56
57 #ifdef SPPWAIT
58 #define NEWVM
59 #endif
60
61 #ifdef NEWVM
62 #define btop(x) (((unsigned)(x)) >> PGSHIFT) /* XXX */
63 #define ptob(x) ((caddr_t)((x) << PGSHIFT)) /* XXX */
64 #include <vm/vm.h> /* ??? kinfo_proc currently includes this*/
65 #include <vm/vm_page.h>
66 #include <vm/swap_pager.h>
67 #include <sys/kinfo_proc.h>
68 #if defined(hp300) || defined(amiga)
69 #include <machine/pte.h>
70 #define btos(x) (((unsigned)(x)) >> SEGSHIFT) /* XXX */
71 #endif
72 #else /* NEWVM */
73 #include <machine/pte.h>
74 #include <sys/vmmac.h>
75 #include <sys/text.h>
76 #endif /* NEWVM */
77
78 /*
79 * files
80 */
81 static const char *unixf, *memf, *kmemf, *swapf;
82 static int unixx, mem, kmem, swap;
83 static DBM *db;
84 /*
85 * flags
86 */
87 static int deadkernel;
88 static int kvminit = 0;
89 static int kvmfilesopen = 0;
90 /*
91 * state
92 */
93 static struct kinfo_proc *kvmprocbase, *kvmprocptr;
94 static int kvmnprocs;
95 /*
96 * u. buffer
97 */
98 static union {
99 struct user user;
100 char upages[UPAGES][NBPG];
101 } user;
102
103 #ifdef NEWVM
104 struct swapblk {
105 long offset; /* offset in swap device */
106 long size; /* remaining size of block in swap device */
107 };
108 #endif
109 /*
110 * random other stuff
111 */
112 #ifndef NEWVM
113 static struct pte *Usrptmap, *usrpt;
114 static struct pte *Sysmap;
115 static int Syssize;
116 #endif
117 static int dmmin, dmmax;
118 static int pcbpf;
119 static int nswap;
120 static char *tmp;
121 #if defined(hp300) || defined(amiga)
122 static int lowram;
123 static struct ste *Sysseg;
124 #endif
125 #if defined(i386)
126 static struct pde *PTD;
127 #endif
128
129 #define basename(cp) ((tmp=rindex((cp), '/')) ? tmp+1 : (cp))
130 #define MAXSYMSIZE 256
131
132 #if defined(hp300) || defined(amiga)
133 #define pftoc(f) ((f) - lowram)
134 #define iskva(v) (1)
135 #endif
136
137 #ifndef pftoc
138 #define pftoc(f) (f)
139 #endif
140 #ifndef iskva
141 #define iskva(v) ((u_long)(v) & KERNBASE)
142 #endif
143
144 static struct nlist nl[] = {
145 { "_Usrptmap" },
146 #define X_USRPTMAP 0
147 { "_usrpt" },
148 #define X_USRPT 1
149 { "_nswap" },
150 #define X_NSWAP 2
151 { "_dmmin" },
152 #define X_DMMIN 3
153 { "_dmmax" },
154 #define X_DMMAX 4
155 { "_vm_page_buckets" },
156 #define X_VM_PAGE_BUCKETS 5
157 { "_vm_page_hash_mask" },
158 #define X_VM_PAGE_HASH_MASK 6
159 { "_page_shift" },
160 #define X_PAGE_SHIFT 7
161 /*
162 * everything here and down, only if a dead kernel
163 */
164 { "_Sysmap" },
165 #define X_SYSMAP 8
166 #define X_DEADKERNEL X_SYSMAP
167 { "_Syssize" },
168 #define X_SYSSIZE 9
169 { "_allproc" },
170 #define X_ALLPROC 10
171 { "_zombproc" },
172 #define X_ZOMBPROC 11
173 { "_nproc" },
174 #define X_NPROC 12
175 #define X_LAST 12
176 #if defined(hp300) || defined(amiga)
177 { "_Sysseg" },
178 #define X_SYSSEG (X_LAST+1)
179 { "_lowram" },
180 #define X_LOWRAM (X_LAST+2)
181 #endif
182 #if defined(i386)
183 { "_IdlePTD" },
184 #define X_IdlePTD (X_LAST+1)
185 #endif
186 { "" },
187 };
188
189 static off_t Vtophys();
190 static void klseek(), seterr(), setsyserr(), vstodb();
191 static int getkvars(), kvm_doprocs(), kvm_init();
192 #ifdef NEWVM
193 static int vatosw();
194 static int findpage();
195 #endif
196
197 /*
198 * returns 0 if files were opened now,
199 * 1 if files were already opened,
200 * -1 if files could not be opened.
201 */
202 kvm_openfiles(uf, mf, sf)
203 const char *uf, *mf, *sf;
204 {
205 if (kvmfilesopen)
206 return (1);
207 unixx = mem = kmem = swap = -1;
208 unixf = (uf == NULL) ? _PATH_UNIX : uf;
209 memf = (mf == NULL) ? _PATH_MEM : mf;
210
211 if ((unixx = open(unixf, O_RDONLY, 0)) == -1) {
212 setsyserr("can't open %s", unixf);
213 goto failed;
214 }
215 if ((mem = open(memf, O_RDONLY, 0)) == -1) {
216 setsyserr("can't open %s", memf);
217 goto failed;
218 }
219 if (sf != NULL)
220 swapf = sf;
221 if (mf != NULL) {
222 deadkernel++;
223 kmemf = mf;
224 kmem = mem;
225 swap = -1;
226 } else {
227 kmemf = _PATH_KMEM;
228 if ((kmem = open(kmemf, O_RDONLY, 0)) == -1) {
229 setsyserr("can't open %s", kmemf);
230 goto failed;
231 }
232 swapf = (sf == NULL) ? _PATH_DRUM : sf;
233 /*
234 * live kernel - avoid looking up nlist entries
235 * past X_DEADKERNEL.
236 */
237 nl[X_DEADKERNEL].n_name = "";
238 }
239 if (swapf != NULL && ((swap = open(swapf, O_RDONLY, 0)) == -1)) {
240 seterr("can't open %s", swapf);
241 goto failed;
242 }
243 kvmfilesopen++;
244 if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1) /*XXX*/
245 return (-1);
246 return (0);
247 failed:
248 kvm_close();
249 return (-1);
250 }
251
252 static
253 kvm_init(uf, mf, sf)
254 char *uf, *mf, *sf;
255 {
256 if (kvmfilesopen == 0 && kvm_openfiles(NULL, NULL, NULL) == -1)
257 return (-1);
258 if (getkvars() == -1)
259 return (-1);
260 kvminit = 1;
261
262 return (0);
263 }
264
265 kvm_close()
266 {
267 if (unixx != -1) {
268 close(unixx);
269 unixx = -1;
270 }
271 if (kmem != -1) {
272 if (kmem != mem)
273 close(kmem);
274 /* otherwise kmem is a copy of mem, and will be closed below */
275 kmem = -1;
276 }
277 if (mem != -1) {
278 close(mem);
279 mem = -1;
280 }
281 if (swap != -1) {
282 close(swap);
283 swap = -1;
284 }
285 if (db != NULL) {
286 dbm_close(db);
287 db = NULL;
288 }
289 kvminit = 0;
290 kvmfilesopen = 0;
291 deadkernel = 0;
292 #ifndef NEWVM
293 if (Sysmap) {
294 free(Sysmap);
295 Sysmap = NULL;
296 }
297 #endif
298 }
299
300 kvm_nlist(nl)
301 struct nlist *nl;
302 {
303 datum key, data;
304 char dbname[MAXPATHLEN];
305 char dbversion[_POSIX2_LINE_MAX];
306 char kversion[_POSIX2_LINE_MAX];
307 int dbversionlen;
308 char symbuf[MAXSYMSIZE];
309 struct nlist nbuf, *n;
310 int num, did;
311
312 if (kvmfilesopen == 0 && kvm_openfiles(NULL, NULL, NULL) == -1)
313 return (-1);
314 if (deadkernel)
315 goto hard2;
316 /*
317 * initialize key datum
318 */
319 key.dptr = symbuf;
320
321 if (db != NULL)
322 goto win; /* off to the races */
323 /*
324 * open database
325 */
326 sprintf(dbname, "%s/kvm_%s", _PATH_VARRUN, basename(unixf));
327 if ((db = dbm_open(dbname, O_RDONLY, 0)) == NULL)
328 goto hard2;
329 /*
330 * read version out of database
331 */
332 bcopy("VERSION", symbuf, sizeof ("VERSION")-1);
333 key.dsize = (sizeof ("VERSION") - 1);
334 data = dbm_fetch(db, key);
335 if (data.dptr == NULL)
336 goto hard1;
337 bcopy(data.dptr, dbversion, data.dsize);
338 dbversionlen = data.dsize;
339 /*
340 * read version string from kernel memory
341 */
342 bcopy("_version", symbuf, sizeof ("_version")-1);
343 key.dsize = (sizeof ("_version")-1);
344 data = dbm_fetch(db, key);
345 if (data.dptr == NULL)
346 goto hard1;
347 if (data.dsize != sizeof (struct nlist))
348 goto hard1;
349 bcopy(data.dptr, &nbuf, sizeof (struct nlist));
350 lseek(kmem, nbuf.n_value, 0);
351 if (read(kmem, kversion, dbversionlen) != dbversionlen)
352 goto hard1;
353 /*
354 * if they match, we win - otherwise do it the hard way
355 */
356 if (bcmp(dbversion, kversion, dbversionlen) != 0)
357 goto hard1;
358 /*
359 * getem from the database.
360 */
361 win:
362 num = did = 0;
363 for (n = nl; n->n_name && n->n_name[0]; n++, num++) {
364 int len;
365 /*
366 * clear out fields from users buffer
367 */
368 n->n_type = 0;
369 n->n_other = 0;
370 n->n_desc = 0;
371 n->n_value = 0;
372 /*
373 * query db
374 */
375 if ((len = strlen(n->n_name)) > MAXSYMSIZE) {
376 seterr("symbol too large");
377 return (-1);
378 }
379 (void)strcpy(symbuf, n->n_name);
380 key.dsize = len;
381 data = dbm_fetch(db, key);
382 if (data.dptr == NULL || data.dsize != sizeof (struct nlist))
383 continue;
384 bcopy(data.dptr, &nbuf, sizeof (struct nlist));
385 n->n_value = nbuf.n_value;
386 n->n_type = nbuf.n_type;
387 n->n_desc = nbuf.n_desc;
388 n->n_other = nbuf.n_other;
389 did++;
390 }
391 return (num - did);
392 hard1:
393 dbm_close(db);
394 db = NULL;
395 hard2:
396 num = nlist(unixf, nl);
397 if (num == -1)
398 seterr("nlist (hard way) failed");
399 return (num);
400 }
401
402 kvm_getprocs(what, arg)
403 int what, arg;
404 {
405 static int ocopysize = -1;
406
407 if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1)
408 return (NULL);
409 if (!deadkernel) {
410 int ret, copysize;
411
412 if ((ret = getkerninfo(what, NULL, NULL, arg)) == -1) {
413 setsyserr("can't get estimate for kerninfo");
414 return (-1);
415 }
416 copysize = ret;
417 if (copysize > ocopysize || !kvmprocbase) {
418 if (ocopysize == -1 || !kvmprocbase)
419 kvmprocbase =
420 (struct kinfo_proc *)malloc(copysize);
421 else
422 kvmprocbase =
423 (struct kinfo_proc *)realloc(kvmprocbase,
424 copysize);
425 if (!kvmprocbase) {
426 seterr("out of memory");
427 return (-1);
428 }
429 }
430 ocopysize = copysize;
431 if ((ret = getkerninfo(what, kvmprocbase, ©size,
432 arg)) == -1) {
433 setsyserr("can't get proc list");
434 return (-1);
435 }
436 if (copysize % sizeof (struct kinfo_proc)) {
437 seterr("proc size mismatch (got %d total, kinfo_proc: %d)",
438 copysize, sizeof (struct kinfo_proc));
439 return (-1);
440 }
441 kvmnprocs = copysize / sizeof (struct kinfo_proc);
442 } else {
443 int nproc;
444
445 if (kvm_read((void *) nl[X_NPROC].n_value, &nproc,
446 sizeof (int)) != sizeof (int)) {
447 seterr("can't read nproc");
448 return (-1);
449 }
450 if ((kvmprocbase = (struct kinfo_proc *)
451 malloc(nproc * sizeof (struct kinfo_proc))) == NULL) {
452 seterr("out of memory (addr: %x nproc = %d)",
453 nl[X_NPROC].n_value, nproc);
454 return (-1);
455 }
456 kvmnprocs = kvm_doprocs(what, arg, kvmprocbase);
457 realloc(kvmprocbase, kvmnprocs * sizeof (struct kinfo_proc));
458 }
459 kvmprocptr = kvmprocbase;
460
461 return (kvmnprocs);
462 }
463
464 /*
465 * XXX - should NOT give up so easily - especially since the kernel
466 * may be corrupt (it died). Should gather as much information as possible.
467 * Follows proc ptrs instead of reading table since table may go
468 * away soon.
469 */
470 static
471 kvm_doprocs(what, arg, buff)
472 int what, arg;
473 char *buff;
474 {
475 struct proc *p, proc;
476 register char *bp = buff;
477 int i = 0;
478 int doingzomb = 0;
479 struct eproc eproc;
480 struct pgrp pgrp;
481 struct session sess;
482 struct tty tty;
483 #ifndef NEWVM
484 struct text text;
485 #endif
486
487 /* allproc */
488 if (kvm_read((void *) nl[X_ALLPROC].n_value, &p,
489 sizeof (struct proc *)) != sizeof (struct proc *)) {
490 seterr("can't read allproc");
491 return (-1);
492 }
493
494 again:
495 for (; p; p = proc.p_nxt) {
496 if (kvm_read(p, &proc, sizeof (struct proc)) !=
497 sizeof (struct proc)) {
498 seterr("can't read proc at %x", p);
499 return (-1);
500 }
501 #ifdef NEWVM
502 if (kvm_read(proc.p_cred, &eproc.e_pcred,
503 sizeof (struct pcred)) == sizeof (struct pcred))
504 (void) kvm_read(eproc.e_pcred.pc_ucred, &eproc.e_ucred,
505 sizeof (struct ucred));
506 switch(ki_op(what)) {
507
508 case KINFO_PROC_PID:
509 if (proc.p_pid != (pid_t)arg)
510 continue;
511 break;
512
513
514 case KINFO_PROC_UID:
515 if (eproc.e_ucred.cr_uid != (uid_t)arg)
516 continue;
517 break;
518
519 case KINFO_PROC_RUID:
520 if (eproc.e_pcred.p_ruid != (uid_t)arg)
521 continue;
522 break;
523 }
524 #else
525 switch(ki_op(what)) {
526
527 case KINFO_PROC_PID:
528 if (proc.p_pid != (pid_t)arg)
529 continue;
530 break;
531
532
533 case KINFO_PROC_UID:
534 if (proc.p_uid != (uid_t)arg)
535 continue;
536 break;
537
538 case KINFO_PROC_RUID:
539 if (proc.p_ruid != (uid_t)arg)
540 continue;
541 break;
542 }
543 #endif
544 /*
545 * gather eproc
546 */
547 eproc.e_paddr = p;
548 if (kvm_read(proc.p_pgrp, &pgrp, sizeof (struct pgrp)) !=
549 sizeof (struct pgrp)) {
550 seterr("can't read pgrp at %x", proc.p_pgrp);
551 return (-1);
552 }
553 eproc.e_sess = pgrp.pg_session;
554 eproc.e_pgid = pgrp.pg_id;
555 eproc.e_jobc = pgrp.pg_jobc;
556 if (kvm_read(pgrp.pg_session, &sess, sizeof (struct session))
557 != sizeof (struct session)) {
558 seterr("can't read session at %x", pgrp.pg_session);
559 return (-1);
560 }
561 if ((proc.p_flag&SCTTY) && sess.s_ttyp != NULL) {
562 if (kvm_read(sess.s_ttyp, &tty, sizeof (struct tty))
563 != sizeof (struct tty)) {
564 seterr("can't read tty at %x", sess.s_ttyp);
565 return (-1);
566 }
567 eproc.e_tdev = tty.t_dev;
568 eproc.e_tsess = tty.t_session;
569 if (tty.t_pgrp != NULL) {
570 if (kvm_read(tty.t_pgrp, &pgrp, sizeof (struct
571 pgrp)) != sizeof (struct pgrp)) {
572 seterr("can't read tpgrp at &x",
573 tty.t_pgrp);
574 return (-1);
575 }
576 eproc.e_tpgid = pgrp.pg_id;
577 } else
578 eproc.e_tpgid = -1;
579 } else
580 eproc.e_tdev = NODEV;
581 if (proc.p_wmesg)
582 kvm_read(proc.p_wmesg, eproc.e_wmesg, WMESGLEN);
583 #ifdef NEWVM
584 (void) kvm_read(proc.p_vmspace, &eproc.e_vm,
585 sizeof (struct vmspace));
586 eproc.e_xsize = eproc.e_xrssize =
587 eproc.e_xccount = eproc.e_xswrss = 0;
588 #else
589 if (proc.p_textp) {
590 kvm_read(proc.p_textp, &text, sizeof (text));
591 eproc.e_xsize = text.x_size;
592 eproc.e_xrssize = text.x_rssize;
593 eproc.e_xccount = text.x_ccount;
594 eproc.e_xswrss = text.x_swrss;
595 } else {
596 eproc.e_xsize = eproc.e_xrssize =
597 eproc.e_xccount = eproc.e_xswrss = 0;
598 }
599 #endif
600
601 switch(ki_op(what)) {
602
603 case KINFO_PROC_PGRP:
604 if (eproc.e_pgid != (pid_t)arg)
605 continue;
606 break;
607
608 case KINFO_PROC_TTY:
609 if ((proc.p_flag&SCTTY) == 0 ||
610 eproc.e_tdev != (dev_t)arg)
611 continue;
612 break;
613 }
614
615 i++;
616 bcopy(&proc, bp, sizeof (struct proc));
617 bp += sizeof (struct proc);
618 bcopy(&eproc, bp, sizeof (struct eproc));
619 bp+= sizeof (struct eproc);
620 }
621 if (!doingzomb) {
622 /* zombproc */
623 if (kvm_read((void *) nl[X_ZOMBPROC].n_value, &p,
624 sizeof (struct proc *)) != sizeof (struct proc *)) {
625 seterr("can't read zombproc");
626 return (-1);
627 }
628 doingzomb = 1;
629 goto again;
630 }
631
632 return (i);
633 }
634
635 struct proc *
636 kvm_nextproc()
637 {
638
639 if (!kvmprocbase && kvm_getprocs(0, 0) == -1)
640 return (NULL);
641 if (kvmprocptr >= (kvmprocbase + kvmnprocs)) {
642 seterr("end of proc list");
643 return (NULL);
644 }
645 return((struct proc *)(kvmprocptr++));
646 }
647
648 struct eproc *
649 kvm_geteproc(p)
650 const struct proc *p;
651 {
652 return ((struct eproc *)(((char *)p) + sizeof (struct proc)));
653 }
654
655 kvm_setproc()
656 {
657 kvmprocptr = kvmprocbase;
658 }
659
660 kvm_freeprocs()
661 {
662
663 if (kvmprocbase) {
664 free(kvmprocbase);
665 kvmprocbase = NULL;
666 }
667 }
668
669 #ifdef NEWVM
670 struct user *
671 kvm_getu(p)
672 const struct proc *p;
673 {
674 register struct kinfo_proc *kp = (struct kinfo_proc *)p;
675 register int i;
676 register char *up;
677 u_int vaddr;
678 struct swapblk swb;
679
680 if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1)
681 return (NULL);
682 if (p->p_stat == SZOMB) {
683 seterr("zombie process");
684 return (NULL);
685 }
686
687 if ((p->p_flag & SLOAD) == 0) {
688 vm_offset_t maddr;
689
690 if (swap < 0) {
691 seterr("no swap");
692 return (NULL);
693 }
694 /*
695 * Costly operation, better set enable_swap to zero
696 * in vm/vm_glue.c, since paging of user pages isn't
697 * done yet anyway.
698 */
699 if (vatosw(p, USRSTACK + i * NBPG, &maddr, &swb) == 0)
700 return NULL;
701
702 if (maddr == 0 && swb.size < UPAGES * NBPG)
703 return NULL;
704
705 for (i = 0; i < UPAGES; i++) {
706 if (maddr) {
707 (void) lseek(mem, maddr + i * NBPG, 0);
708 if (read(mem,
709 (char *)user.upages[i], NBPG) != NBPG) {
710 seterr(
711 "can't read u for pid %d from %s",
712 p->p_pid, swapf);
713 return NULL;
714 }
715 } else {
716 (void) lseek(swap, swb.offset + i * NBPG, 0);
717 if (read(swap,
718 (char *)user.upages[i], NBPG) != NBPG) {
719 seterr(
720 "can't read u for pid %d from %s",
721 p->p_pid, swapf);
722 return NULL;
723 }
724 }
725 }
726 return(&user.user);
727 }
728 /*
729 * Read u-area one page at a time for the benefit of post-mortems
730 */
731 up = (char *) p->p_addr;
732 for (i = 0; i < UPAGES; i++) {
733 klseek(kmem, (long)up, 0);
734 if (read(kmem, user.upages[i], CLBYTES) != CLBYTES) {
735 seterr("cant read page %x of u of pid %d from %s",
736 up, p->p_pid, kmemf);
737 return(NULL);
738 }
739 up += CLBYTES;
740 }
741 pcbpf = (int) btop(p->p_addr); /* what should this be really? */
742
743 kp->kp_eproc.e_vm.vm_rssize =
744 kp->kp_eproc.e_vm.vm_pmap.pm_stats.resident_count; /* XXX */
745 return(&user.user);
746 }
747 #else
748 struct user *
749 kvm_getu(p)
750 const struct proc *p;
751 {
752 struct pte *pteaddr, apte;
753 struct pte arguutl[HIGHPAGES+(CLSIZE*2)];
754 register int i;
755 int ncl;
756
757 if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1)
758 return (NULL);
759 if (p->p_stat == SZOMB) {
760 seterr("zombie process");
761 return (NULL);
762 }
763 if ((p->p_flag & SLOAD) == 0) {
764 if (swap < 0) {
765 seterr("no swap");
766 return (NULL);
767 }
768 (void) lseek(swap, (long)dtob(p->p_swaddr), 0);
769 if (read(swap, (char *)&user.user, sizeof (struct user)) !=
770 sizeof (struct user)) {
771 seterr("can't read u for pid %d from %s",
772 p->p_pid, swapf);
773 return (NULL);
774 }
775 pcbpf = 0;
776 argaddr0 = 0;
777 argaddr1 = 0;
778 return (&user.user);
779 }
780 pteaddr = &Usrptmap[btokmx(p->p_p0br) + p->p_szpt - 1];
781 klseek(kmem, (long)pteaddr, 0);
782 if (read(kmem, (char *)&apte, sizeof(apte)) != sizeof(apte)) {
783 seterr("can't read indir pte to get u for pid %d from %s",
784 p->p_pid, kmemf);
785 return (NULL);
786 }
787 lseek(mem, (long)ctob(pftoc(apte.pg_pfnum+1)) - sizeof(arguutl), 0);
788 if (read(mem, (char *)arguutl, sizeof(arguutl)) != sizeof(arguutl)) {
789 seterr("can't read page table for u of pid %d from %s",
790 p->p_pid, memf);
791 return (NULL);
792 }
793 if (arguutl[0].pg_fod == 0 && arguutl[0].pg_pfnum)
794 argaddr0 = ctob(pftoc(arguutl[0].pg_pfnum));
795 else
796 argaddr0 = 0;
797 if (arguutl[CLSIZE*1].pg_fod == 0 && arguutl[CLSIZE*1].pg_pfnum)
798 argaddr1 = ctob(pftoc(arguutl[CLSIZE*1].pg_pfnum));
799 else
800 argaddr1 = 0;
801 pcbpf = arguutl[CLSIZE*2].pg_pfnum;
802 ncl = (sizeof (struct user) + CLBYTES - 1) / CLBYTES;
803 while (--ncl >= 0) {
804 i = ncl * CLSIZE;
805 lseek(mem,
806 (long)ctob(pftoc(arguutl[(CLSIZE*2)+i].pg_pfnum)), 0);
807 if (read(mem, user.upages[i], CLBYTES) != CLBYTES) {
808 seterr("can't read page %d of u of pid %d from %s",
809 arguutl[(CLSIZE*2)+i].pg_pfnum, p->p_pid, memf);
810 return(NULL);
811 }
812 }
813 return (&user.user);
814 }
815 #endif
816
817 int
818 kvm_procread(p, addr, buf, len)
819 const struct proc *p;
820 const unsigned addr, len;
821 char *buf;
822 {
823 register struct kinfo_proc *kp = (struct kinfo_proc *) p;
824 struct swapblk swb;
825 vm_offset_t swaddr = 0, memaddr = 0;
826 unsigned real_len;
827
828 real_len = len < (CLBYTES - (addr & CLOFSET)) ? len : (CLBYTES - (addr & CLOFSET));
829
830 #if defined(hp300) || defined(amiga)
831 if (kp->kp_eproc.e_vm.vm_pmap.pm_stab) {
832 unsigned long ste;
833
834 /* position at process segment table */
835 klseek (kmem,
836 (int) kp->kp_eproc.e_vm.vm_pmap.pm_stab
837 + btos(addr) * sizeof (struct ste), 0);
838
839 if (read (kmem, (char *) &ste, sizeof (ste))
840 == sizeof (ste) && (ste & SG_V)) {
841 int p, pte;
842
843 p = btop(addr & SG_PMASK);
844 memaddr = (ste & SG_FRAME) + (p * sizeof(struct pte));
845 (void) lseek(mem, memaddr, 0);
846 if (read(mem, (char *)&pte, sizeof pte) != sizeof pte) {
847 seterr("kvmprocread: cannot locate pte");
848 memaddr = 0;
849 }
850 else {
851 memaddr = pte & PG_FRAME;
852 if (pte == PG_NV
853 || memaddr < (off_t)ptob(lowram)) {
854 seterr("kvmprocread: page not valid");
855 memaddr = 0;
856 }
857 else
858 memaddr = (memaddr
859 - (off_t)ptob(lowram))
860 + (addr & PGOFSET);
861 }
862 }
863 }
864 #endif
865 #if defined(i386)
866 if (kp->kp_eproc.e_vm.vm_pmap.pm_pdir) {
867 struct pde pde;
868
869 klseek(kmem,
870 (long)(&kp->kp_eproc.e_vm.vm_pmap.pm_pdir[pdei(addr)]), 0);
871
872 if (read(kmem, (char *)&pde, sizeof pde) == sizeof pde
873 && pde.pd_v) {
874
875 struct pte pte;
876
877 if (lseek(mem, (long)ctob(pde.pd_pfnum) +
878 (ptei(addr) * sizeof pte), 0) == -1)
879 seterr("kvm_procread: lseek");
880 if (read(mem, (char *)&pte, sizeof pte) == sizeof pte) {
881 if (pte.pg_v) {
882 memaddr = (long)ctob(pte.pg_pfnum) +
883 (addr % (1 << CLSHIFT));
884 }
885 } else {
886 seterr("kvm_procread: read");
887 }
888 }
889 }
890 #endif /* i386 */
891
892 if (memaddr == 0 && vatosw(p, addr & ~CLOFSET, &memaddr, &swb)) {
893 if (memaddr != 0) {
894 memaddr += addr & CLOFSET;
895 } else {
896 swaddr = swb.offset + (addr & CLOFSET);
897 swb.size -= addr & CLOFSET;
898 }
899 }
900
901 if (memaddr) {
902 if (lseek(mem, memaddr, 0) == -1)
903 seterr("kvm_getu: lseek");
904 real_len = read(mem, buf, real_len);
905 if (real_len == -1) {
906 seterr("kvm_procread: read");
907 return 0;
908 }
909 } else if (swaddr) {
910 char bouncebuf[CLBYTES];
911 unsigned len;
912 if (lseek(swap, swaddr & ~CLOFSET, 0) == -1) {
913 seterr("kvm_procread: lseek");
914 return 0;
915 }
916 len = read(swap, bouncebuf, CLBYTES);
917 if (len == -1 || len <= (swaddr & CLOFSET)) {
918 seterr("kvm_procread: read");
919 return 0;
920 }
921 len = MIN(len - (swaddr & CLOFSET), real_len);
922 memcpy(buf, &bouncebuf[swaddr & CLOFSET], len);
923 return len;
924 } else
925 real_len = 0;
926
927 return real_len;
928 }
929
930 int
931 kvm_procreadstr(p, addr, buf, len)
932 const struct proc *p;
933 const unsigned addr;
934 char *buf;
935 unsigned len;
936 {
937 int done, little;
938 char copy[200], *pb;
939 char a;
940
941 done = 0;
942 copy[0] = '\0';
943 while (len) {
944 little = kvm_procread(p, addr+done, copy, MIN(len, sizeof copy));
945 if (little<1)
946 break;
947 pb = copy;
948 while (little--) {
949 len--;
950 if( (*buf++ = *pb++) == '\0' )
951 return done;
952 done++;
953 }
954 }
955 return done;
956 }
957
958 char *
959 kvm_getargs(p, up)
960 const struct proc *p;
961 const struct user *up;
962 {
963 static char cmdbuf[ARG_MAX + sizeof(p->p_comm) + 5];
964 register char *cp, *acp;
965 int left, rv;
966 struct ps_strings arginfo;
967
968 if (up == NULL || p->p_pid == 0 || p->p_pid == 2)
969 goto retucomm;
970
971 if (kvm_procread(p, PS_STRINGS, (char *)&arginfo, sizeof(arginfo)) !=
972 sizeof(arginfo))
973 goto bad;
974
975 cmdbuf[0] = '\0';
976 cp = cmdbuf;
977 acp = arginfo.ps_argvstr;
978 left = ARG_MAX + 1;
979 while (arginfo.ps_nargvstr--) {
980 if ((rv = kvm_procreadstr(p, acp, cp, left)) >= 0) {
981 acp += rv + 1;
982 left -= rv + 1;
983 cp += rv;
984 *cp++ = ' ';
985 *cp = '\0';
986 } else
987 goto bad;
988 }
989 cp-- ; *cp = '\0';
990
991 if (cmdbuf[0] == '-' || cmdbuf[0] == '?' || cmdbuf[0] <= ' ') {
992 (void) strcat(cmdbuf, " (");
993 (void) strncat(cmdbuf, p->p_comm, sizeof(p->p_comm));
994 (void) strcat(cmdbuf, ")");
995 }
996 return (cmdbuf);
997
998 bad:
999 seterr("error locating command name for pid %d", p->p_pid);
1000 retucomm:
1001 (void) strcpy(cmdbuf, "(");
1002 (void) strncat(cmdbuf, p->p_comm, sizeof (p->p_comm));
1003 (void) strcat(cmdbuf, ")");
1004 return (cmdbuf);
1005 }
1006
1007 char *
1008 kvm_getenv(p, up)
1009 const struct proc *p;
1010 const struct user *up;
1011 {
1012 static char envbuf[ARG_MAX + 1];
1013 register char *cp, *acp;
1014 int left, rv;
1015 struct ps_strings arginfo;
1016
1017 if (up == NULL || p->p_pid == 0 || p->p_pid == 2)
1018 goto retemptyenv;
1019
1020 if (kvm_procread(p, PS_STRINGS, (char *)&arginfo, sizeof(arginfo)) !=
1021 sizeof(arginfo))
1022 goto bad;
1023
1024 cp = envbuf;
1025 acp = arginfo.ps_envstr;
1026 left = ARG_MAX + 1;
1027 while (arginfo.ps_nenvstr--) {
1028 if ((rv = kvm_procreadstr(p, acp, cp, left)) >= 0) {
1029 acp += rv + 1;
1030 left -= rv + 1;
1031 cp += rv;
1032 *cp++ = ' ';
1033 *cp = '\0';
1034 } else
1035 goto bad;
1036 }
1037 cp-- ; *cp = '\0';
1038 return (envbuf);
1039
1040 bad:
1041 seterr("error locating environment for pid %d", p->p_pid);
1042 retemptyenv:
1043 envbuf[0] = '\0';
1044 return (envbuf);
1045 }
1046
1047 static
1048 getkvars()
1049 {
1050 if (kvm_nlist(nl) == -1)
1051 return (-1);
1052 if (deadkernel) {
1053 /* We must do the sys map first because klseek uses it */
1054 long addr;
1055
1056 #ifndef NEWVM
1057 Syssize = nl[X_SYSSIZE].n_value;
1058 Sysmap = (struct pte *)
1059 calloc((unsigned) Syssize, sizeof (struct pte));
1060 if (Sysmap == NULL) {
1061 seterr("out of space for Sysmap");
1062 return (-1);
1063 }
1064 addr = (long) nl[X_SYSMAP].n_value;
1065 addr &= ~KERNBASE;
1066 (void) lseek(kmem, addr, 0);
1067 if (read(kmem, (char *) Sysmap, Syssize * sizeof (struct pte))
1068 != Syssize * sizeof (struct pte)) {
1069 seterr("can't read Sysmap");
1070 return (-1);
1071 }
1072 #endif
1073 #if defined(hp300) || defined(amiga)
1074 addr = (long) nl[X_LOWRAM].n_value;
1075 (void) lseek(kmem, addr, 0);
1076 if (read(kmem, (char *) &lowram, sizeof (lowram))
1077 != sizeof (lowram)) {
1078 seterr("can't read lowram");
1079 return (-1);
1080 }
1081 lowram = btop(lowram);
1082 Sysseg = (struct ste *) malloc(NBPG);
1083 if (Sysseg == NULL) {
1084 seterr("out of space for Sysseg");
1085 return (-1);
1086 }
1087 addr = (long) nl[X_SYSSEG].n_value;
1088 (void) lseek(kmem, addr, 0);
1089 read(kmem, (char *)&addr, sizeof(addr));
1090 (void) lseek(kmem, (long)addr, 0);
1091 if (read(kmem, (char *) Sysseg, NBPG) != NBPG) {
1092 seterr("can't read Sysseg");
1093 return (-1);
1094 }
1095 #endif
1096 #if defined(i386)
1097 PTD = (struct pde *) malloc(NBPG);
1098 if (PTD == NULL) {
1099 seterr("out of space for PTD");
1100 return (-1);
1101 }
1102 addr = (long) nl[X_IdlePTD].n_value;
1103 (void) lseek(kmem, addr, 0);
1104 read(kmem, (char *)&addr, sizeof(addr));
1105 (void) lseek(kmem, (long)addr, 0);
1106 if (read(kmem, (char *) PTD, NBPG) != NBPG) {
1107 seterr("can't read PTD");
1108 return (-1);
1109 }
1110 #endif
1111 }
1112 #ifndef NEWVM
1113 usrpt = (struct pte *)nl[X_USRPT].n_value;
1114 Usrptmap = (struct pte *)nl[X_USRPTMAP].n_value;
1115 #endif
1116 if (kvm_read((void *) nl[X_NSWAP].n_value, &nswap, sizeof (long)) !=
1117 sizeof (long)) {
1118 seterr("can't read nswap");
1119 return (-1);
1120 }
1121 if (kvm_read((void *) nl[X_DMMIN].n_value, &dmmin, sizeof (long)) !=
1122 sizeof (long)) {
1123 seterr("can't read dmmin");
1124 return (-1);
1125 }
1126 if (kvm_read((void *) nl[X_DMMAX].n_value, &dmmax, sizeof (long)) !=
1127 sizeof (long)) {
1128 seterr("can't read dmmax");
1129 return (-1);
1130 }
1131 return (0);
1132 }
1133
1134 kvm_read(loc, buf, len)
1135 void *loc;
1136 void *buf;
1137 {
1138 if (kvmfilesopen == 0 && kvm_openfiles(NULL, NULL, NULL) == -1)
1139 return (-1);
1140 if (iskva(loc)) {
1141 klseek(kmem, (off_t) loc, 0);
1142 if (read(kmem, buf, len) != len) {
1143 seterr("error reading kmem at %x", loc);
1144 return (-1);
1145 }
1146 } else {
1147 lseek(mem, (off_t) loc, 0);
1148 if (read(mem, buf, len) != len) {
1149 seterr("error reading mem at %x", loc);
1150 return (-1);
1151 }
1152 }
1153 return (len);
1154 }
1155
1156 static void
1157 klseek(fd, loc, off)
1158 int fd;
1159 off_t loc;
1160 int off;
1161 {
1162
1163 if (deadkernel) {
1164 if ((loc = Vtophys(loc)) == -1)
1165 return;
1166 }
1167 (void) lseek(fd, (off_t)loc, off);
1168 }
1169
1170 #ifndef NEWVM
1171 /*
1172 * Given a base/size pair in virtual swap area,
1173 * return a physical base/size pair which is the
1174 * (largest) initial, physically contiguous block.
1175 */
1176 static void
1177 vstodb(vsbase, vssize, dmp, dbp, rev)
1178 register int vsbase;
1179 int vssize;
1180 struct dmap *dmp;
1181 register struct dblock *dbp;
1182 {
1183 register int blk = dmmin;
1184 register swblk_t *ip = dmp->dm_map;
1185
1186 vsbase = ctod(vsbase);
1187 vssize = ctod(vssize);
1188 if (vsbase < 0 || vsbase + vssize > dmp->dm_size)
1189 /*panic("vstodb")*/;
1190 while (vsbase >= blk) {
1191 vsbase -= blk;
1192 if (blk < dmmax)
1193 blk *= 2;
1194 ip++;
1195 }
1196 if (*ip <= 0 || *ip + blk > nswap)
1197 /*panic("vstodb")*/;
1198 dbp->db_size = MIN(vssize, blk - vsbase);
1199 dbp->db_base = *ip + (rev ? blk - (vsbase + dbp->db_size) : vsbase);
1200 }
1201 #endif
1202
1203 #ifdef NEWVM
1204 static off_t
1205 Vtophys(loc)
1206 u_long loc;
1207 {
1208 off_t newloc = (off_t) -1;
1209 #if defined(hp300) || defined(amiga)
1210 int p, ste, pte;
1211
1212 ste = *(int *)&Sysseg[btos(loc)];
1213 if ((ste & SG_V) == 0) {
1214 seterr("vtophys: segment not valid");
1215 return((off_t) -1);
1216 }
1217 p = btop(loc & SG_PMASK);
1218 newloc = (ste & SG_FRAME) + (p * sizeof(struct pte));
1219 (void) lseek(mem, newloc, 0);
1220 if (read(mem, (char *)&pte, sizeof pte) != sizeof pte) {
1221 seterr("vtophys: cannot locate pte");
1222 return((off_t) -1);
1223 }
1224 newloc = pte & PG_FRAME;
1225 if (pte == PG_NV || newloc < (off_t)ptob(lowram)) {
1226 seterr("vtophys: page not valid");
1227 return((off_t) -1);
1228 }
1229 newloc = (newloc - (off_t)ptob(lowram)) + (loc & PGOFSET);
1230 #endif
1231 #ifdef i386
1232 struct pde pde;
1233 struct pte pte;
1234 int p;
1235
1236 pde = PTD[loc >> PD_SHIFT];
1237 if (pde.pd_v == 0) {
1238 seterr("vtophys: page directory entry not valid");
1239 return((off_t) -1);
1240 }
1241 p = btop(loc & PT_MASK);
1242 newloc = pde.pd_pfnum + (p * sizeof(struct pte));
1243 (void) lseek(kmem, (long)newloc, 0);
1244 if (read(kmem, (char *)&pte, sizeof pte) != sizeof pte) {
1245 seterr("vtophys: cannot obtain desired pte");
1246 return((off_t) -1);
1247 }
1248 newloc = pte.pg_pfnum;
1249 if (pte.pg_v == 0) {
1250 seterr("vtophys: page table entry not valid");
1251 return((off_t) -1);
1252 }
1253 newloc += (loc & PGOFSET);
1254 #endif
1255 return((off_t) newloc);
1256 }
1257 #else
1258 static off_t
1259 vtophys(loc)
1260 long loc;
1261 {
1262 int p;
1263 off_t newloc;
1264 register struct pte *pte;
1265
1266 newloc = loc & ~KERNBASE;
1267 p = btop(newloc);
1268 #if defined(vax) || defined(tahoe)
1269 if ((loc & KERNBASE) == 0) {
1270 seterr("vtophys: translating non-kernel address");
1271 return((off_t) -1);
1272 }
1273 #endif
1274 if (p >= Syssize) {
1275 seterr("vtophys: page out of bound (%d>=%d)", p, Syssize);
1276 return((off_t) -1);
1277 }
1278 pte = &Sysmap[p];
1279 if (pte->pg_v == 0 && (pte->pg_fod || pte->pg_pfnum == 0)) {
1280 seterr("vtophys: page not valid");
1281 return((off_t) -1);
1282 }
1283 #if defined(hp300) || defined(amiga)
1284 if (pte->pg_pfnum < lowram) {
1285 seterr("vtophys: non-RAM page (%d<%d)", pte->pg_pfnum, lowram);
1286 return((off_t) -1);
1287 }
1288 #endif
1289 loc = (long) (ptob(pftoc(pte->pg_pfnum)) + (loc & PGOFSET));
1290 return(loc);
1291 }
1292 #endif
1293
1294
1295 #ifdef NEWVM
1296 /*
1297 * locate address of unwired or swapped page
1298 */
1299
1300 #define DEBUG 0
1301
1302 #define KREAD(off, addr, len) \
1303 (kvm_read((void *)(off), (char *)(addr), (len)) == (len))
1304
1305
1306 static int
1307 vatosw(p, vaddr, maddr, swb)
1308 struct proc *p ;
1309 vm_offset_t vaddr;
1310 vm_offset_t *maddr;
1311 struct swapblk *swb;
1312 {
1313 register struct kinfo_proc *kp = (struct kinfo_proc *)p;
1314 vm_map_t mp = &kp->kp_eproc.e_vm.vm_map;
1315 struct vm_object vm_object;
1316 struct vm_map_entry vm_entry;
1317 struct pager_struct pager;
1318 struct swpager swpager;
1319 struct swblock swblock;
1320 long addr, off;
1321 int i;
1322
1323 if (p->p_pid == 0 || p->p_pid == 2)
1324 return 0;
1325
1326 addr = (long)mp->header.next;
1327 for (i = 0; i < mp->nentries; i++) {
1328 /* Weed through map entries until vaddr in range */
1329 if (!KREAD(addr, &vm_entry, sizeof(vm_entry))) {
1330 setsyserr("vatosw: read vm_map_entry");
1331 return 0;
1332 }
1333 if ((vaddr >= vm_entry.start) && (vaddr <= vm_entry.end) &&
1334 (vm_entry.object.vm_object != 0))
1335 break;
1336
1337 addr = (long)vm_entry.next;
1338 }
1339 if (i == mp->nentries) {
1340 seterr("%u: map not found\n", p->p_pid);
1341 return 0;
1342 }
1343
1344 if (vm_entry.is_a_map || vm_entry.is_sub_map) {
1345 seterr("%u: Is a map\n", p->p_pid);
1346 return 0;
1347 }
1348
1349 /* Locate memory object */
1350 off = (vaddr - vm_entry.start) + vm_entry.offset;
1351 addr = (long)vm_entry.object.vm_object;
1352 while (1) {
1353 if (!KREAD(addr, &vm_object, sizeof vm_object)) {
1354 setsyserr("vatosw: read vm_object");
1355 return 0;
1356 }
1357
1358 #if DEBUG
1359 fprintf(stderr, "%u: find page: object %#x offset %x\n",
1360 p->p_pid, addr, off);
1361 #endif
1362
1363 /* Lookup in page queue */
1364 if (findpage(addr, off, maddr))
1365 return 1;
1366
1367 if (vm_object.shadow == 0)
1368 break;
1369
1370 #if DEBUG
1371 fprintf(stderr, "%u: shadow obj at %x: offset %x+%x\n",
1372 p->p_pid, addr, off, vm_object.shadow_offset);
1373 #endif
1374
1375 addr = (long)vm_object.shadow;
1376 off += vm_object.shadow_offset;
1377 }
1378
1379 if (!vm_object.pager) {
1380 seterr("%u: no pager\n", p->p_pid);
1381 return 0;
1382 }
1383
1384 /* Find address in swap space */
1385 if (!KREAD(vm_object.pager, &pager, sizeof pager)) {
1386 setsyserr("vatosw: read pager");
1387 return 0;
1388 }
1389 if (pager.pg_type != PG_SWAP) {
1390 seterr("%u: weird pager\n", p->p_pid);
1391 return 0;
1392 }
1393
1394 /* Get swap pager data */
1395 if (!KREAD(pager.pg_data, &swpager, sizeof swpager)) {
1396 setsyserr("vatosw: read swpager");
1397 return 0;
1398 }
1399
1400 off += vm_object.paging_offset;
1401
1402 /* Read swap block array */
1403 if (!KREAD((long)swpager.sw_blocks +
1404 (off/dbtob(swpager.sw_bsize)) * sizeof swblock,
1405 &swblock, sizeof swblock)) {
1406 setsyserr("vatosw: read swblock");
1407 return 0;
1408 }
1409 swb->offset = dbtob(swblock.swb_block)+ (off % dbtob(swpager.sw_bsize));
1410 swb->size = dbtob(swpager.sw_bsize) - (off % dbtob(swpager.sw_bsize));
1411 return 1;
1412 }
1413
1414
1415 #define atop(x) (((unsigned)(x)) >> page_shift)
1416 #define vm_page_hash(object, offset) \
1417 (((unsigned)object+(unsigned)atop(offset))&vm_page_hash_mask)
1418
1419 static int
1420 findpage(object, offset, maddr)
1421 long object;
1422 long offset;
1423 vm_offset_t *maddr;
1424 {
1425 static long vm_page_hash_mask;
1426 static long vm_page_buckets;
1427 static long page_shift;
1428 queue_head_t bucket;
1429 struct vm_page mem;
1430 long addr, baddr;
1431
1432 if (vm_page_hash_mask == 0 && !KREAD(nl[X_VM_PAGE_HASH_MASK].n_value,
1433 &vm_page_hash_mask, sizeof (long))) {
1434 seterr("can't read vm_page_hash_mask");
1435 return 0;
1436 }
1437 if (page_shift == 0 && !KREAD(nl[X_PAGE_SHIFT].n_value,
1438 &page_shift, sizeof (long))) {
1439 seterr("can't read page_shift");
1440 return 0;
1441 }
1442 if (vm_page_buckets == 0 && !KREAD(nl[X_VM_PAGE_BUCKETS].n_value,
1443 &vm_page_buckets, sizeof (long))) {
1444 seterr("can't read vm_page_buckets");
1445 return 0;
1446 }
1447
1448 baddr = vm_page_buckets + vm_page_hash(object,offset) * sizeof(queue_head_t);
1449 if (!KREAD(baddr, &bucket, sizeof (bucket))) {
1450 seterr("can't read vm_page_bucket");
1451 return 0;
1452 }
1453
1454 addr = (long)bucket.next;
1455 while (addr != baddr) {
1456 if (!KREAD(addr, &mem, sizeof (mem))) {
1457 seterr("can't read vm_page");
1458 return 0;
1459 }
1460 if ((long)mem.object == object && mem.offset == offset) {
1461 *maddr = (long)mem.phys_addr;
1462 return 1;
1463 }
1464 addr = (long)mem.hashq.next;
1465 }
1466 return 0;
1467 }
1468 #endif /* NEWVM */
1469
1470 #include <varargs.h>
1471 static char errbuf[_POSIX2_LINE_MAX];
1472
1473 static void
1474 seterr(va_alist)
1475 va_dcl
1476 {
1477 char *fmt;
1478 va_list ap;
1479
1480 va_start(ap);
1481 fmt = va_arg(ap, char *);
1482 (void) vsnprintf(errbuf, _POSIX2_LINE_MAX, fmt, ap);
1483 #if DEBUG
1484 (void) vfprintf(stderr, fmt, ap);
1485 #endif
1486 va_end(ap);
1487 }
1488
1489 static void
1490 setsyserr(va_alist)
1491 va_dcl
1492 {
1493 char *fmt, *cp;
1494 va_list ap;
1495 extern int errno;
1496
1497 va_start(ap);
1498 fmt = va_arg(ap, char *);
1499 (void) vsnprintf(errbuf, _POSIX2_LINE_MAX, fmt, ap);
1500 for (cp=errbuf; *cp; cp++)
1501 ;
1502 snprintf(cp, _POSIX2_LINE_MAX - (cp - errbuf), ": %s", strerror(errno));
1503 va_end(ap);
1504 }
1505
1506 char *
1507 kvm_geterr()
1508 {
1509 return (errbuf);
1510 }
1511