kvm.c revision 1.7 1 /*-
2 * Copyright (c) 1989 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 */
33
34 #if defined(LIBC_SCCS) && !defined(lint)
35 /* from: static char sccsid[] = "@(#)kvm.c 5.18 (Berkeley) 5/7/91"; */
36 static char rcsid[] = "$Id: kvm.c,v 1.7 1993/05/20 11:53:04 cgd Exp $";
37 #endif /* LIBC_SCCS and not lint */
38
39 /*
40 * Updated for 386BSD 0.1 by David Greenman (davidg%implode (at) percy.rain.com)
41 * and Paul Kranenburg (pk (at) cs.few.eur.nl)
42 * 20-Aug-1992
43 */
44
45
46 #include <sys/param.h>
47 #include <sys/user.h>
48 #include <sys/proc.h>
49 #include <sys/ioctl.h>
50 #include <sys/kinfo.h>
51 #include <sys/tty.h>
52 #include <machine/vmparam.h>
53 #include <fcntl.h>
54 #include <nlist.h>
55 #include <kvm.h>
56 #include <ndbm.h>
57 #include <limits.h>
58 #include <paths.h>
59 #include <stdio.h>
60 #include <string.h>
61
62 #ifdef SPPWAIT
63 #define NEWVM
64 #endif
65
66 #ifdef NEWVM
67 #define btop(x) (((unsigned)(x)) >> PGSHIFT) /* XXX */
68 #define ptob(x) ((caddr_t)((x) << PGSHIFT)) /* XXX */
69 #include <vm/vm.h> /* ??? kinfo_proc currently includes this*/
70 #include <vm/vm_page.h>
71 #include <vm/swap_pager.h>
72 #include <sys/kinfo_proc.h>
73 #ifdef hp300
74 #include <hp300/hp300/pte.h>
75 #endif
76 #else /* NEWVM */
77 #include <machine/pte.h>
78 #include <sys/vmmac.h>
79 #include <sys/text.h>
80 #endif /* NEWVM */
81
82 /*
83 * files
84 */
85 static const char *unixf, *memf, *kmemf, *swapf;
86 static int unixx, mem, kmem, swap;
87 static DBM *db;
88 /*
89 * flags
90 */
91 static int deadkernel;
92 static int kvminit = 0;
93 static int kvmfilesopen = 0;
94 /*
95 * state
96 */
97 static struct kinfo_proc *kvmprocbase, *kvmprocptr;
98 static int kvmnprocs;
99 /*
100 * u. buffer
101 */
102 static union {
103 struct user user;
104 char upages[UPAGES][NBPG];
105 } user;
106
107 #ifdef NEWVM
108 struct swapblk {
109 long offset; /* offset in swap device */
110 long size; /* remaining size of block in swap device */
111 };
112 #endif
113 /*
114 * random other stuff
115 */
116 #ifndef NEWVM
117 static struct pte *Usrptmap, *usrpt;
118 static struct pte *Sysmap;
119 static int Syssize;
120 #endif
121 static int dmmin, dmmax;
122 static int pcbpf;
123 static int argaddr0; /* XXX */
124 static int argaddr1;
125 static int swaddr;
126 static int nswap;
127 static char *tmp;
128 #if defined(hp300)
129 static int lowram;
130 static struct ste *Sysseg;
131 #endif
132 #if defined(i386)
133 static struct pde *PTD;
134 #endif
135
136 #define basename(cp) ((tmp=rindex((cp), '/')) ? tmp+1 : (cp))
137 #define MAXSYMSIZE 256
138
139 #if defined(hp300)
140 #define pftoc(f) ((f) - lowram)
141 #define iskva(v) (1)
142 #endif
143
144 #ifndef pftoc
145 #define pftoc(f) (f)
146 #endif
147 #ifndef iskva
148 #define iskva(v) ((u_long)(v) & KERNBASE)
149 #endif
150
151 static struct nlist nl[] = {
152 { "_Usrptmap" },
153 #define X_USRPTMAP 0
154 { "_usrpt" },
155 #define X_USRPT 1
156 { "_nswap" },
157 #define X_NSWAP 2
158 { "_dmmin" },
159 #define X_DMMIN 3
160 { "_dmmax" },
161 #define X_DMMAX 4
162 { "_vm_page_buckets" },
163 #define X_VM_PAGE_BUCKETS 5
164 { "_vm_page_hash_mask" },
165 #define X_VM_PAGE_HASH_MASK 6
166 { "_page_shift" },
167 #define X_PAGE_SHIFT 7
168 /*
169 * everything here and down, only if a dead kernel
170 */
171 { "_Sysmap" },
172 #define X_SYSMAP 8
173 #define X_DEADKERNEL X_SYSMAP
174 { "_Syssize" },
175 #define X_SYSSIZE 9
176 { "_allproc" },
177 #define X_ALLPROC 10
178 { "_zombproc" },
179 #define X_ZOMBPROC 11
180 { "_nproc" },
181 #define X_NPROC 12
182 #define X_LAST 12
183 #if defined(hp300)
184 { "_Sysseg" },
185 #define X_SYSSEG (X_LAST+1)
186 { "_lowram" },
187 #define X_LOWRAM (X_LAST+2)
188 #endif
189 #if defined(i386)
190 { "_IdlePTD" },
191 #define X_IdlePTD (X_LAST+1)
192 #endif
193 { "" },
194 };
195
196 static off_t Vtophys();
197 static void klseek(), seterr(), setsyserr(), vstodb();
198 static int getkvars(), kvm_doprocs(), kvm_init();
199 #ifdef NEWVM
200 static int vatosw();
201 static int findpage();
202 #endif
203
204 /*
205 * returns 0 if files were opened now,
206 * 1 if files were already opened,
207 * -1 if files could not be opened.
208 */
209 kvm_openfiles(uf, mf, sf)
210 const char *uf, *mf, *sf;
211 {
212 if (kvmfilesopen)
213 return (1);
214 unixx = mem = kmem = swap = -1;
215 unixf = (uf == NULL) ? _PATH_UNIX : uf;
216 memf = (mf == NULL) ? _PATH_MEM : mf;
217
218 if ((unixx = open(unixf, O_RDONLY, 0)) == -1) {
219 setsyserr("can't open %s", unixf);
220 goto failed;
221 }
222 if ((mem = open(memf, O_RDONLY, 0)) == -1) {
223 setsyserr("can't open %s", memf);
224 goto failed;
225 }
226 if (sf != NULL)
227 swapf = sf;
228 if (mf != NULL) {
229 deadkernel++;
230 kmemf = mf;
231 kmem = mem;
232 swap = -1;
233 } else {
234 kmemf = _PATH_KMEM;
235 if ((kmem = open(kmemf, O_RDONLY, 0)) == -1) {
236 setsyserr("can't open %s", kmemf);
237 goto failed;
238 }
239 swapf = (sf == NULL) ? _PATH_DRUM : sf;
240 /*
241 * live kernel - avoid looking up nlist entries
242 * past X_DEADKERNEL.
243 */
244 nl[X_DEADKERNEL].n_name = "";
245 }
246 if (swapf != NULL && ((swap = open(swapf, O_RDONLY, 0)) == -1)) {
247 seterr("can't open %s", swapf);
248 goto failed;
249 }
250 kvmfilesopen++;
251 if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1) /*XXX*/
252 return (-1);
253 return (0);
254 failed:
255 kvm_close();
256 return (-1);
257 }
258
259 static
260 kvm_init(uf, mf, sf)
261 char *uf, *mf, *sf;
262 {
263 if (kvmfilesopen == 0 && kvm_openfiles(NULL, NULL, NULL) == -1)
264 return (-1);
265 if (getkvars() == -1)
266 return (-1);
267 kvminit = 1;
268
269 return (0);
270 }
271
272 kvm_close()
273 {
274 if (unixx != -1) {
275 close(unixx);
276 unixx = -1;
277 }
278 if (kmem != -1) {
279 if (kmem != mem)
280 close(kmem);
281 /* otherwise kmem is a copy of mem, and will be closed below */
282 kmem = -1;
283 }
284 if (mem != -1) {
285 close(mem);
286 mem = -1;
287 }
288 if (swap != -1) {
289 close(swap);
290 swap = -1;
291 }
292 if (db != NULL) {
293 dbm_close(db);
294 db = NULL;
295 }
296 kvminit = 0;
297 kvmfilesopen = 0;
298 deadkernel = 0;
299 #ifndef NEWVM
300 if (Sysmap) {
301 free(Sysmap);
302 Sysmap = NULL;
303 }
304 #endif
305 }
306
307 kvm_nlist(nl)
308 struct nlist *nl;
309 {
310 datum key, data;
311 char dbname[MAXPATHLEN];
312 char dbversion[_POSIX2_LINE_MAX];
313 char kversion[_POSIX2_LINE_MAX];
314 int dbversionlen;
315 char symbuf[MAXSYMSIZE];
316 struct nlist nbuf, *n;
317 int num, did;
318
319 if (kvmfilesopen == 0 && kvm_openfiles(NULL, NULL, NULL) == -1)
320 return (-1);
321 if (deadkernel)
322 goto hard2;
323 /*
324 * initialize key datum
325 */
326 key.dptr = symbuf;
327
328 if (db != NULL)
329 goto win; /* off to the races */
330 /*
331 * open database
332 */
333 sprintf(dbname, "%s/kvm_%s", _PATH_VARRUN, basename(unixf));
334 if ((db = dbm_open(dbname, O_RDONLY, 0)) == NULL)
335 goto hard2;
336 /*
337 * read version out of database
338 */
339 bcopy("VERSION", symbuf, sizeof ("VERSION")-1);
340 key.dsize = (sizeof ("VERSION") - 1);
341 data = dbm_fetch(db, key);
342 if (data.dptr == NULL)
343 goto hard1;
344 bcopy(data.dptr, dbversion, data.dsize);
345 dbversionlen = data.dsize;
346 /*
347 * read version string from kernel memory
348 */
349 bcopy("_version", symbuf, sizeof ("_version")-1);
350 key.dsize = (sizeof ("_version")-1);
351 data = dbm_fetch(db, key);
352 if (data.dptr == NULL)
353 goto hard1;
354 if (data.dsize != sizeof (struct nlist))
355 goto hard1;
356 bcopy(data.dptr, &nbuf, sizeof (struct nlist));
357 lseek(kmem, nbuf.n_value, 0);
358 if (read(kmem, kversion, dbversionlen) != dbversionlen)
359 goto hard1;
360 /*
361 * if they match, we win - otherwise do it the hard way
362 */
363 if (bcmp(dbversion, kversion, dbversionlen) != 0)
364 goto hard1;
365 /*
366 * getem from the database.
367 */
368 win:
369 num = did = 0;
370 for (n = nl; n->n_name && n->n_name[0]; n++, num++) {
371 int len;
372 /*
373 * clear out fields from users buffer
374 */
375 n->n_type = 0;
376 n->n_other = 0;
377 n->n_desc = 0;
378 n->n_value = 0;
379 /*
380 * query db
381 */
382 if ((len = strlen(n->n_name)) > MAXSYMSIZE) {
383 seterr("symbol too large");
384 return (-1);
385 }
386 (void)strcpy(symbuf, n->n_name);
387 key.dsize = len;
388 data = dbm_fetch(db, key);
389 if (data.dptr == NULL || data.dsize != sizeof (struct nlist))
390 continue;
391 bcopy(data.dptr, &nbuf, sizeof (struct nlist));
392 n->n_value = nbuf.n_value;
393 n->n_type = nbuf.n_type;
394 n->n_desc = nbuf.n_desc;
395 n->n_other = nbuf.n_other;
396 did++;
397 }
398 return (num - did);
399 hard1:
400 dbm_close(db);
401 db = NULL;
402 hard2:
403 num = nlist(unixf, nl);
404 if (num == -1)
405 seterr("nlist (hard way) failed");
406 return (num);
407 }
408
409 kvm_getprocs(what, arg)
410 int what, arg;
411 {
412 static int ocopysize = -1;
413
414 if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1)
415 return (NULL);
416 if (!deadkernel) {
417 int ret, copysize;
418
419 if ((ret = getkerninfo(what, NULL, NULL, arg)) == -1) {
420 setsyserr("can't get estimate for kerninfo");
421 return (-1);
422 }
423 copysize = ret;
424 if (copysize > ocopysize || !kvmprocbase) {
425 if (ocopysize == -1 || !kvmprocbase)
426 kvmprocbase =
427 (struct kinfo_proc *)malloc(copysize);
428 else
429 kvmprocbase =
430 (struct kinfo_proc *)realloc(kvmprocbase,
431 copysize);
432 if (!kvmprocbase) {
433 seterr("out of memory");
434 return (-1);
435 }
436 }
437 ocopysize = copysize;
438 if ((ret = getkerninfo(what, kvmprocbase, ©size,
439 arg)) == -1) {
440 setsyserr("can't get proc list");
441 return (-1);
442 }
443 if (copysize % sizeof (struct kinfo_proc)) {
444 seterr("proc size mismatch (got %d total, kinfo_proc: %d)",
445 copysize, sizeof (struct kinfo_proc));
446 return (-1);
447 }
448 kvmnprocs = copysize / sizeof (struct kinfo_proc);
449 } else {
450 int nproc;
451
452 if (kvm_read((void *) nl[X_NPROC].n_value, &nproc,
453 sizeof (int)) != sizeof (int)) {
454 seterr("can't read nproc");
455 return (-1);
456 }
457 if ((kvmprocbase = (struct kinfo_proc *)
458 malloc(nproc * sizeof (struct kinfo_proc))) == NULL) {
459 seterr("out of memory (addr: %x nproc = %d)",
460 nl[X_NPROC].n_value, nproc);
461 return (-1);
462 }
463 kvmnprocs = kvm_doprocs(what, arg, kvmprocbase);
464 realloc(kvmprocbase, kvmnprocs * sizeof (struct kinfo_proc));
465 }
466 kvmprocptr = kvmprocbase;
467
468 return (kvmnprocs);
469 }
470
471 /*
472 * XXX - should NOT give up so easily - especially since the kernel
473 * may be corrupt (it died). Should gather as much information as possible.
474 * Follows proc ptrs instead of reading table since table may go
475 * away soon.
476 */
477 static
478 kvm_doprocs(what, arg, buff)
479 int what, arg;
480 char *buff;
481 {
482 struct proc *p, proc;
483 register char *bp = buff;
484 int i = 0;
485 int doingzomb = 0;
486 struct eproc eproc;
487 struct pgrp pgrp;
488 struct session sess;
489 struct tty tty;
490 #ifndef NEWVM
491 struct text text;
492 #endif
493
494 /* allproc */
495 if (kvm_read((void *) nl[X_ALLPROC].n_value, &p,
496 sizeof (struct proc *)) != sizeof (struct proc *)) {
497 seterr("can't read allproc");
498 return (-1);
499 }
500
501 again:
502 for (; p; p = proc.p_nxt) {
503 if (kvm_read(p, &proc, sizeof (struct proc)) !=
504 sizeof (struct proc)) {
505 seterr("can't read proc at %x", p);
506 return (-1);
507 }
508 #ifdef NEWVM
509 if (kvm_read(proc.p_cred, &eproc.e_pcred,
510 sizeof (struct pcred)) == sizeof (struct pcred))
511 (void) kvm_read(eproc.e_pcred.pc_ucred, &eproc.e_ucred,
512 sizeof (struct ucred));
513 switch(ki_op(what)) {
514
515 case KINFO_PROC_PID:
516 if (proc.p_pid != (pid_t)arg)
517 continue;
518 break;
519
520
521 case KINFO_PROC_UID:
522 if (eproc.e_ucred.cr_uid != (uid_t)arg)
523 continue;
524 break;
525
526 case KINFO_PROC_RUID:
527 if (eproc.e_pcred.p_ruid != (uid_t)arg)
528 continue;
529 break;
530 }
531 #else
532 switch(ki_op(what)) {
533
534 case KINFO_PROC_PID:
535 if (proc.p_pid != (pid_t)arg)
536 continue;
537 break;
538
539
540 case KINFO_PROC_UID:
541 if (proc.p_uid != (uid_t)arg)
542 continue;
543 break;
544
545 case KINFO_PROC_RUID:
546 if (proc.p_ruid != (uid_t)arg)
547 continue;
548 break;
549 }
550 #endif
551 /*
552 * gather eproc
553 */
554 eproc.e_paddr = p;
555 if (kvm_read(proc.p_pgrp, &pgrp, sizeof (struct pgrp)) !=
556 sizeof (struct pgrp)) {
557 seterr("can't read pgrp at %x", proc.p_pgrp);
558 return (-1);
559 }
560 eproc.e_sess = pgrp.pg_session;
561 eproc.e_pgid = pgrp.pg_id;
562 eproc.e_jobc = pgrp.pg_jobc;
563 if (kvm_read(pgrp.pg_session, &sess, sizeof (struct session))
564 != sizeof (struct session)) {
565 seterr("can't read session at %x", pgrp.pg_session);
566 return (-1);
567 }
568 if ((proc.p_flag&SCTTY) && sess.s_ttyp != NULL) {
569 if (kvm_read(sess.s_ttyp, &tty, sizeof (struct tty))
570 != sizeof (struct tty)) {
571 seterr("can't read tty at %x", sess.s_ttyp);
572 return (-1);
573 }
574 eproc.e_tdev = tty.t_dev;
575 eproc.e_tsess = tty.t_session;
576 if (tty.t_pgrp != NULL) {
577 if (kvm_read(tty.t_pgrp, &pgrp, sizeof (struct
578 pgrp)) != sizeof (struct pgrp)) {
579 seterr("can't read tpgrp at &x",
580 tty.t_pgrp);
581 return (-1);
582 }
583 eproc.e_tpgid = pgrp.pg_id;
584 } else
585 eproc.e_tpgid = -1;
586 } else
587 eproc.e_tdev = NODEV;
588 if (proc.p_wmesg)
589 kvm_read(proc.p_wmesg, eproc.e_wmesg, WMESGLEN);
590 #ifdef NEWVM
591 (void) kvm_read(proc.p_vmspace, &eproc.e_vm,
592 sizeof (struct vmspace));
593 eproc.e_xsize = eproc.e_xrssize =
594 eproc.e_xccount = eproc.e_xswrss = 0;
595 #else
596 if (proc.p_textp) {
597 kvm_read(proc.p_textp, &text, sizeof (text));
598 eproc.e_xsize = text.x_size;
599 eproc.e_xrssize = text.x_rssize;
600 eproc.e_xccount = text.x_ccount;
601 eproc.e_xswrss = text.x_swrss;
602 } else {
603 eproc.e_xsize = eproc.e_xrssize =
604 eproc.e_xccount = eproc.e_xswrss = 0;
605 }
606 #endif
607
608 switch(ki_op(what)) {
609
610 case KINFO_PROC_PGRP:
611 if (eproc.e_pgid != (pid_t)arg)
612 continue;
613 break;
614
615 case KINFO_PROC_TTY:
616 if ((proc.p_flag&SCTTY) == 0 ||
617 eproc.e_tdev != (dev_t)arg)
618 continue;
619 break;
620 }
621
622 i++;
623 bcopy(&proc, bp, sizeof (struct proc));
624 bp += sizeof (struct proc);
625 bcopy(&eproc, bp, sizeof (struct eproc));
626 bp+= sizeof (struct eproc);
627 }
628 if (!doingzomb) {
629 /* zombproc */
630 if (kvm_read((void *) nl[X_ZOMBPROC].n_value, &p,
631 sizeof (struct proc *)) != sizeof (struct proc *)) {
632 seterr("can't read zombproc");
633 return (-1);
634 }
635 doingzomb = 1;
636 goto again;
637 }
638
639 return (i);
640 }
641
642 struct proc *
643 kvm_nextproc()
644 {
645
646 if (!kvmprocbase && kvm_getprocs(0, 0) == -1)
647 return (NULL);
648 if (kvmprocptr >= (kvmprocbase + kvmnprocs)) {
649 seterr("end of proc list");
650 return (NULL);
651 }
652 return((struct proc *)(kvmprocptr++));
653 }
654
655 struct eproc *
656 kvm_geteproc(p)
657 const struct proc *p;
658 {
659 return ((struct eproc *)(((char *)p) + sizeof (struct proc)));
660 }
661
662 kvm_setproc()
663 {
664 kvmprocptr = kvmprocbase;
665 }
666
667 kvm_freeprocs()
668 {
669
670 if (kvmprocbase) {
671 free(kvmprocbase);
672 kvmprocbase = NULL;
673 }
674 }
675
676 #ifdef i386
677 /* See also ./sys/kern/kern_execve.c */
678 #define ARGSIZE (roundup(ARG_MAX, NBPG))
679 #endif
680
681 #ifdef NEWVM
682 struct user *
683 kvm_getu(p)
684 const struct proc *p;
685 {
686 register struct kinfo_proc *kp = (struct kinfo_proc *)p;
687 register int i;
688 register char *up;
689 u_int vaddr;
690 struct swapblk swb;
691
692 if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1)
693 return (NULL);
694 if (p->p_stat == SZOMB) {
695 seterr("zombie process");
696 return (NULL);
697 }
698
699 argaddr0 = argaddr1 = swaddr = 0;
700 if ((p->p_flag & SLOAD) == 0) {
701 vm_offset_t maddr;
702
703 if (swap < 0) {
704 seterr("no swap");
705 return (NULL);
706 }
707 /*
708 * Costly operation, better set enable_swap to zero
709 * in vm/vm_glue.c, since paging of user pages isn't
710 * done yet anyway.
711 */
712 if (vatosw(p, USRSTACK + i * NBPG, &maddr, &swb) == 0)
713 return NULL;
714
715 if (maddr == 0 && swb.size < UPAGES * NBPG)
716 return NULL;
717
718 for (i = 0; i < UPAGES; i++) {
719 if (maddr) {
720 (void) lseek(mem, maddr + i * NBPG, 0);
721 if (read(mem,
722 (char *)user.upages[i], NBPG) != NBPG) {
723 seterr(
724 "can't read u for pid %d from %s",
725 p->p_pid, swapf);
726 return NULL;
727 }
728 } else {
729 (void) lseek(swap, swb.offset + i * NBPG, 0);
730 if (read(swap,
731 (char *)user.upages[i], NBPG) != NBPG) {
732 seterr(
733 "can't read u for pid %d from %s",
734 p->p_pid, swapf);
735 return NULL;
736 }
737 }
738 }
739 return(&user.user);
740 }
741 /*
742 * Read u-area one page at a time for the benefit of post-mortems
743 */
744 up = (char *) p->p_addr;
745 for (i = 0; i < UPAGES; i++) {
746 klseek(kmem, (long)up, 0);
747 if (read(kmem, user.upages[i], CLBYTES) != CLBYTES) {
748 seterr("cant read page %x of u of pid %d from %s",
749 up, p->p_pid, kmemf);
750 return(NULL);
751 }
752 up += CLBYTES;
753 }
754 pcbpf = (int) btop(p->p_addr); /* what should this be really? */
755 /*
756 * Conjure up a physical address for the arguments.
757 */
758 #ifdef hp300
759 if (kp->kp_eproc.e_vm.vm_pmap.pm_ptab) {
760 struct pte pte[CLSIZE*2];
761
762 klseek(kmem,
763 (long)&kp->kp_eproc.e_vm.vm_pmap.pm_ptab
764 [btoc(USRSTACK-CLBYTES*2)], 0);
765 if (read(kmem, (char *)&pte, sizeof(pte)) == sizeof(pte)) {
766 #if CLBYTES < 2048
767 argaddr0 = ctob(pftoc(pte[CLSIZE*0].pg_pfnum));
768 #endif
769 argaddr1 = ctob(pftoc(pte[CLSIZE*1].pg_pfnum));
770 }
771 }
772 #endif
773 kp->kp_eproc.e_vm.vm_rssize =
774 kp->kp_eproc.e_vm.vm_pmap.pm_stats.resident_count; /* XXX */
775
776 vaddr = (u_int)kp->kp_eproc.e_vm.vm_maxsaddr + MAXSSIZ - ARGSIZE;
777
778 #ifdef i386
779 if (kp->kp_eproc.e_vm.vm_pmap.pm_pdir) {
780 struct pde pde;
781
782 klseek(kmem,
783 (long)(&kp->kp_eproc.e_vm.vm_pmap.pm_pdir[pdei(vaddr)]), 0);
784
785 if (read(kmem, (char *)&pde, sizeof pde) == sizeof pde
786 && pde.pd_v) {
787
788 struct pte pte;
789
790 if (lseek(mem, (long)ctob(pde.pd_pfnum) +
791 (ptei(vaddr) * sizeof pte), 0) == -1)
792 seterr("kvm_getu: lseek");
793 if (read(mem, (char *)&pte, sizeof pte) == sizeof pte) {
794 if (pte.pg_v) {
795 argaddr1 = (long)ctob(pte.pg_pfnum);
796 } else {
797 goto hard;
798 }
799 } else {
800 seterr("kvm_getu: read");
801 }
802 } else {
803 goto hard;
804 }
805 }
806 #endif /* i386 */
807
808 hard:
809 if (vatosw(p, vaddr, &argaddr1, &swb)) {
810 if (argaddr1 == 0 && swb.size >= ARGSIZE)
811 swaddr = swb.offset;
812 }
813
814 return(&user.user);
815 }
816 #else
817 struct user *
818 kvm_getu(p)
819 const struct proc *p;
820 {
821 struct pte *pteaddr, apte;
822 struct pte arguutl[HIGHPAGES+(CLSIZE*2)];
823 register int i;
824 int ncl;
825
826 if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1)
827 return (NULL);
828 if (p->p_stat == SZOMB) {
829 seterr("zombie process");
830 return (NULL);
831 }
832 if ((p->p_flag & SLOAD) == 0) {
833 if (swap < 0) {
834 seterr("no swap");
835 return (NULL);
836 }
837 (void) lseek(swap, (long)dtob(p->p_swaddr), 0);
838 if (read(swap, (char *)&user.user, sizeof (struct user)) !=
839 sizeof (struct user)) {
840 seterr("can't read u for pid %d from %s",
841 p->p_pid, swapf);
842 return (NULL);
843 }
844 pcbpf = 0;
845 argaddr0 = 0;
846 argaddr1 = 0;
847 return (&user.user);
848 }
849 pteaddr = &Usrptmap[btokmx(p->p_p0br) + p->p_szpt - 1];
850 klseek(kmem, (long)pteaddr, 0);
851 if (read(kmem, (char *)&apte, sizeof(apte)) != sizeof(apte)) {
852 seterr("can't read indir pte to get u for pid %d from %s",
853 p->p_pid, kmemf);
854 return (NULL);
855 }
856 lseek(mem, (long)ctob(pftoc(apte.pg_pfnum+1)) - sizeof(arguutl), 0);
857 if (read(mem, (char *)arguutl, sizeof(arguutl)) != sizeof(arguutl)) {
858 seterr("can't read page table for u of pid %d from %s",
859 p->p_pid, memf);
860 return (NULL);
861 }
862 if (arguutl[0].pg_fod == 0 && arguutl[0].pg_pfnum)
863 argaddr0 = ctob(pftoc(arguutl[0].pg_pfnum));
864 else
865 argaddr0 = 0;
866 if (arguutl[CLSIZE*1].pg_fod == 0 && arguutl[CLSIZE*1].pg_pfnum)
867 argaddr1 = ctob(pftoc(arguutl[CLSIZE*1].pg_pfnum));
868 else
869 argaddr1 = 0;
870 pcbpf = arguutl[CLSIZE*2].pg_pfnum;
871 ncl = (sizeof (struct user) + CLBYTES - 1) / CLBYTES;
872 while (--ncl >= 0) {
873 i = ncl * CLSIZE;
874 lseek(mem,
875 (long)ctob(pftoc(arguutl[(CLSIZE*2)+i].pg_pfnum)), 0);
876 if (read(mem, user.upages[i], CLBYTES) != CLBYTES) {
877 seterr("can't read page %d of u of pid %d from %s",
878 arguutl[(CLSIZE*2)+i].pg_pfnum, p->p_pid, memf);
879 return(NULL);
880 }
881 }
882 return (&user.user);
883 }
884 #endif
885
886 char *
887 kvm_getargs(p, up)
888 const struct proc *p;
889 const struct user *up;
890 {
891 #ifdef i386
892 /* See also ./sys/kern/kern_execve.c */
893 static char cmdbuf[ARGSIZE];
894 static union {
895 char argc[ARGSIZE];
896 int argi[ARGSIZE/sizeof (int)];
897 } argspac;
898 #else
899 static char cmdbuf[CLBYTES*2];
900 static union {
901 char argc[CLBYTES*2];
902 int argi[CLBYTES*2/sizeof (int)];
903 } argspac;
904 #endif
905 register char *cp;
906 register int *ip;
907 char c;
908 int nbad;
909 #ifndef NEWVM
910 struct dblock db;
911 #endif
912 const char *file;
913 int stkoff = 0;
914
915 #if defined(NEWVM) && defined(hp300)
916 stkoff = 20; /* XXX for sigcode */
917 #endif
918 if (up == NULL || p->p_pid == 0 || p->p_pid == 2)
919 goto retucomm;
920 if ((p->p_flag & SLOAD) == 0 || argaddr1 == 0) {
921 #ifdef NEWVM
922 if (swaddr == 0)
923 goto retucomm; /* XXX for now */
924 #ifdef i386
925 (void) lseek(swap, swaddr, 0);
926 if (read(swap, &argspac.argc[0], ARGSIZE) != ARGSIZE)
927 goto bad;
928 #else
929 if (argaddr0) {
930 lseek(swap, (long)argaddr0, 0);
931 if (read(swap, (char *)&argspac, CLBYTES) != CLBYTES)
932 goto bad;
933 } else
934 bzero(&argspac, CLBYTES);
935 lseek(swap, (long)argaddr1, 0);
936 if (read(swap, &argspac.argc[CLBYTES], CLBYTES) != CLBYTES)
937 goto bad;
938 #endif
939 #else
940 if (swap < 0 || p->p_ssize == 0)
941 goto retucomm;
942 vstodb(0, CLSIZE, &up->u_smap, &db, 1);
943 (void) lseek(swap, (long)dtob(db.db_base), 0);
944 if (read(swap, (char *)&argspac.argc[CLBYTES], CLBYTES)
945 != CLBYTES)
946 goto bad;
947 vstodb(1, CLSIZE, &up->u_smap, &db, 1);
948 (void) lseek(swap, (long)dtob(db.db_base), 0);
949 if (read(swap, (char *)&argspac.argc[0], CLBYTES) != CLBYTES)
950 goto bad;
951 file = swapf;
952 #endif
953 } else {
954 #ifdef i386
955 lseek(mem, (long)argaddr1, 0);
956 if (read(mem, &argspac.argc[0], ARGSIZE) != ARGSIZE)
957 goto bad;
958 #else
959 if (argaddr0) {
960 lseek(mem, (long)argaddr0, 0);
961 if (read(mem, (char *)&argspac, CLBYTES) != CLBYTES)
962 goto bad;
963 } else
964 bzero(&argspac, CLBYTES);
965 lseek(mem, (long)argaddr1, 0);
966 if (read(mem, &argspac.argc[CLBYTES], CLBYTES) != CLBYTES)
967 goto bad;
968 #endif
969 file = (char *) memf;
970 }
971
972 nbad = 0;
973 #ifdef i386
974 ip = &argspac.argi[(ARGSIZE-ARG_MAX)/sizeof (int)];
975
976 for (cp = (char *)ip; cp < &argspac.argc[ARGSIZE-stkoff]; cp++) {
977 #else
978 ip = &argspac.argi[CLBYTES*2/sizeof (int)];
979 ip -= 2; /* last arg word and .long 0 */
980 ip -= stkoff / sizeof (int);
981 while (*--ip) {
982 if (ip == argspac.argi)
983 goto retucomm;
984 }
985 *(char *)ip = ' ';
986 ip++;
987
988 for (cp = (char *)ip; cp < &argspac.argc[CLBYTES*2-stkoff]; cp++) {
989 #endif
990 c = *cp;
991 if (c == 0) { /* convert null between arguments to space */
992 *cp = ' ';
993 if (*(cp+1) == 0) break; /* if null argument follows then no more args */
994 }
995 else if (c < ' ' || c > 0176) {
996 if (++nbad >= 5*(0+1)) { /* eflg -> 0 XXX */ /* limit number of bad chars to 5 */
997 *cp++ = '?';
998 break;
999 }
1000 *cp = '?';
1001 }
1002 else if (0 == 0 && c == '=') { /* eflg -> 0 XXX */
1003 while (*--cp != ' ')
1004 if (cp <= (char *)ip)
1005 break;
1006 break;
1007 }
1008 }
1009 *cp = 0;
1010 while (*--cp == ' ')
1011 *cp = 0;
1012 cp = (char *)ip;
1013 (void) strcpy(cmdbuf, cp);
1014 if (cp[0] == '-' || cp[0] == '?' || cp[0] <= ' ') {
1015 (void) strcat(cmdbuf, " (");
1016 (void) strncat(cmdbuf, p->p_comm, sizeof(p->p_comm));
1017 (void) strcat(cmdbuf, ")");
1018 }
1019 return (cmdbuf);
1020
1021 bad:
1022 seterr("error locating command name for pid %d from %s",
1023 p->p_pid, file);
1024 retucomm:
1025 (void) strcpy(cmdbuf, " (");
1026 (void) strncat(cmdbuf, p->p_comm, sizeof (p->p_comm));
1027 (void) strcat(cmdbuf, ")");
1028 return (cmdbuf);
1029 }
1030
1031
1032 static
1033 getkvars()
1034 {
1035 if (kvm_nlist(nl) == -1)
1036 return (-1);
1037 if (deadkernel) {
1038 /* We must do the sys map first because klseek uses it */
1039 long addr;
1040
1041 #ifndef NEWVM
1042 Syssize = nl[X_SYSSIZE].n_value;
1043 Sysmap = (struct pte *)
1044 calloc((unsigned) Syssize, sizeof (struct pte));
1045 if (Sysmap == NULL) {
1046 seterr("out of space for Sysmap");
1047 return (-1);
1048 }
1049 addr = (long) nl[X_SYSMAP].n_value;
1050 addr &= ~KERNBASE;
1051 (void) lseek(kmem, addr, 0);
1052 if (read(kmem, (char *) Sysmap, Syssize * sizeof (struct pte))
1053 != Syssize * sizeof (struct pte)) {
1054 seterr("can't read Sysmap");
1055 return (-1);
1056 }
1057 #endif
1058 #if defined(hp300)
1059 addr = (long) nl[X_LOWRAM].n_value;
1060 (void) lseek(kmem, addr, 0);
1061 if (read(kmem, (char *) &lowram, sizeof (lowram))
1062 != sizeof (lowram)) {
1063 seterr("can't read lowram");
1064 return (-1);
1065 }
1066 lowram = btop(lowram);
1067 Sysseg = (struct ste *) malloc(NBPG);
1068 if (Sysseg == NULL) {
1069 seterr("out of space for Sysseg");
1070 return (-1);
1071 }
1072 addr = (long) nl[X_SYSSEG].n_value;
1073 (void) lseek(kmem, addr, 0);
1074 read(kmem, (char *)&addr, sizeof(addr));
1075 (void) lseek(kmem, (long)addr, 0);
1076 if (read(kmem, (char *) Sysseg, NBPG) != NBPG) {
1077 seterr("can't read Sysseg");
1078 return (-1);
1079 }
1080 #endif
1081 #if defined(i386)
1082 PTD = (struct pde *) malloc(NBPG);
1083 if (PTD == NULL) {
1084 seterr("out of space for PTD");
1085 return (-1);
1086 }
1087 addr = (long) nl[X_IdlePTD].n_value;
1088 (void) lseek(kmem, addr, 0);
1089 read(kmem, (char *)&addr, sizeof(addr));
1090 (void) lseek(kmem, (long)addr, 0);
1091 if (read(kmem, (char *) PTD, NBPG) != NBPG) {
1092 seterr("can't read PTD");
1093 return (-1);
1094 }
1095 #endif
1096 }
1097 #ifndef NEWVM
1098 usrpt = (struct pte *)nl[X_USRPT].n_value;
1099 Usrptmap = (struct pte *)nl[X_USRPTMAP].n_value;
1100 #endif
1101 if (kvm_read((void *) nl[X_NSWAP].n_value, &nswap, sizeof (long)) !=
1102 sizeof (long)) {
1103 seterr("can't read nswap");
1104 return (-1);
1105 }
1106 if (kvm_read((void *) nl[X_DMMIN].n_value, &dmmin, sizeof (long)) !=
1107 sizeof (long)) {
1108 seterr("can't read dmmin");
1109 return (-1);
1110 }
1111 if (kvm_read((void *) nl[X_DMMAX].n_value, &dmmax, sizeof (long)) !=
1112 sizeof (long)) {
1113 seterr("can't read dmmax");
1114 return (-1);
1115 }
1116 return (0);
1117 }
1118
1119 kvm_read(loc, buf, len)
1120 void *loc;
1121 void *buf;
1122 {
1123 if (kvmfilesopen == 0 && kvm_openfiles(NULL, NULL, NULL) == -1)
1124 return (-1);
1125 if (iskva(loc)) {
1126 klseek(kmem, (off_t) loc, 0);
1127 if (read(kmem, buf, len) != len) {
1128 seterr("error reading kmem at %x", loc);
1129 return (-1);
1130 }
1131 } else {
1132 lseek(mem, (off_t) loc, 0);
1133 if (read(mem, buf, len) != len) {
1134 seterr("error reading mem at %x", loc);
1135 return (-1);
1136 }
1137 }
1138 return (len);
1139 }
1140
1141 static void
1142 klseek(fd, loc, off)
1143 int fd;
1144 off_t loc;
1145 int off;
1146 {
1147
1148 if (deadkernel) {
1149 if ((loc = Vtophys(loc)) == -1)
1150 return;
1151 }
1152 (void) lseek(fd, (off_t)loc, off);
1153 }
1154
1155 #ifndef NEWVM
1156 /*
1157 * Given a base/size pair in virtual swap area,
1158 * return a physical base/size pair which is the
1159 * (largest) initial, physically contiguous block.
1160 */
1161 static void
1162 vstodb(vsbase, vssize, dmp, dbp, rev)
1163 register int vsbase;
1164 int vssize;
1165 struct dmap *dmp;
1166 register struct dblock *dbp;
1167 {
1168 register int blk = dmmin;
1169 register swblk_t *ip = dmp->dm_map;
1170
1171 vsbase = ctod(vsbase);
1172 vssize = ctod(vssize);
1173 if (vsbase < 0 || vsbase + vssize > dmp->dm_size)
1174 /*panic("vstodb")*/;
1175 while (vsbase >= blk) {
1176 vsbase -= blk;
1177 if (blk < dmmax)
1178 blk *= 2;
1179 ip++;
1180 }
1181 if (*ip <= 0 || *ip + blk > nswap)
1182 /*panic("vstodb")*/;
1183 dbp->db_size = MIN(vssize, blk - vsbase);
1184 dbp->db_base = *ip + (rev ? blk - (vsbase + dbp->db_size) : vsbase);
1185 }
1186 #endif
1187
1188 #ifdef NEWVM
1189 static off_t
1190 Vtophys(loc)
1191 u_long loc;
1192 {
1193 off_t newloc = (off_t) -1;
1194 #ifdef hp300
1195 int p, ste, pte;
1196
1197 ste = *(int *)&Sysseg[loc >> SG_ISHIFT];
1198 if ((ste & SG_V) == 0) {
1199 seterr("vtophys: segment not valid");
1200 return((off_t) -1);
1201 }
1202 p = btop(loc & SG_PMASK);
1203 newloc = (ste & SG_FRAME) + (p * sizeof(struct pte));
1204 (void) lseek(kmem, (long)(newloc-(off_t)ptob(lowram)), 0);
1205 if (read(kmem, (char *)&pte, sizeof pte) != sizeof pte) {
1206 seterr("vtophys: cannot locate pte");
1207 return((off_t) -1);
1208 }
1209 newloc = pte & PG_FRAME;
1210 if (pte == PG_NV || newloc < (off_t)ptob(lowram)) {
1211 seterr("vtophys: page not valid");
1212 return((off_t) -1);
1213 }
1214 newloc = (newloc - (off_t)ptob(lowram)) + (loc & PGOFSET);
1215 #endif
1216 #ifdef i386
1217 struct pde pde;
1218 struct pte pte;
1219 int p;
1220
1221 pde = PTD[loc >> PD_SHIFT];
1222 if (pde.pd_v == 0) {
1223 seterr("vtophys: page directory entry not valid");
1224 return((off_t) -1);
1225 }
1226 p = btop(loc & PT_MASK);
1227 newloc = pde.pd_pfnum + (p * sizeof(struct pte));
1228 (void) lseek(kmem, (long)newloc, 0);
1229 if (read(kmem, (char *)&pte, sizeof pte) != sizeof pte) {
1230 seterr("vtophys: cannot obtain desired pte");
1231 return((off_t) -1);
1232 }
1233 newloc = pte.pg_pfnum;
1234 if (pte.pg_v == 0) {
1235 seterr("vtophys: page table entry not valid");
1236 return((off_t) -1);
1237 }
1238 newloc += (loc & PGOFSET);
1239 #endif
1240 return((off_t) newloc);
1241 }
1242 #else
1243 static off_t
1244 vtophys(loc)
1245 long loc;
1246 {
1247 int p;
1248 off_t newloc;
1249 register struct pte *pte;
1250
1251 newloc = loc & ~KERNBASE;
1252 p = btop(newloc);
1253 #if defined(vax) || defined(tahoe)
1254 if ((loc & KERNBASE) == 0) {
1255 seterr("vtophys: translating non-kernel address");
1256 return((off_t) -1);
1257 }
1258 #endif
1259 if (p >= Syssize) {
1260 seterr("vtophys: page out of bound (%d>=%d)", p, Syssize);
1261 return((off_t) -1);
1262 }
1263 pte = &Sysmap[p];
1264 if (pte->pg_v == 0 && (pte->pg_fod || pte->pg_pfnum == 0)) {
1265 seterr("vtophys: page not valid");
1266 return((off_t) -1);
1267 }
1268 #if defined(hp300)
1269 if (pte->pg_pfnum < lowram) {
1270 seterr("vtophys: non-RAM page (%d<%d)", pte->pg_pfnum, lowram);
1271 return((off_t) -1);
1272 }
1273 #endif
1274 loc = (long) (ptob(pftoc(pte->pg_pfnum)) + (loc & PGOFSET));
1275 return(loc);
1276 }
1277 #endif
1278
1279
1280 #ifdef NEWVM
1281 /*
1282 * locate address of unwired or swapped page
1283 */
1284
1285 #define DEBUG 0
1286
1287 #define KREAD(off, addr, len) \
1288 (kvm_read((void *)(off), (char *)(addr), (len)) == (len))
1289
1290
1291 static int
1292 vatosw(p, vaddr, maddr, swb)
1293 struct proc *p ;
1294 vm_offset_t vaddr;
1295 vm_offset_t *maddr;
1296 struct swapblk *swb;
1297 {
1298 register struct kinfo_proc *kp = (struct kinfo_proc *)p;
1299 vm_map_t mp = &kp->kp_eproc.e_vm.vm_map;
1300 struct vm_object vm_object;
1301 struct vm_map_entry vm_entry;
1302 struct pager_struct pager;
1303 struct swpager swpager;
1304 struct swblock swblock;
1305 long addr, off;
1306 int i;
1307
1308 if (p->p_pid == 0 || p->p_pid == 2)
1309 return 0;
1310
1311 addr = (long)mp->header.next;
1312 for (i = 0; i < mp->nentries; i++) {
1313 /* Weed through map entries until vaddr in range */
1314 if (!KREAD(addr, &vm_entry, sizeof(vm_entry))) {
1315 setsyserr("vatosw: read vm_map_entry");
1316 return 0;
1317 }
1318 if ((vaddr >= vm_entry.start) && (vaddr <= vm_entry.end) &&
1319 (vm_entry.object.vm_object != 0))
1320 break;
1321
1322 addr = (long)vm_entry.next;
1323 }
1324 if (i == mp->nentries) {
1325 seterr("%u: map not found\n", p->p_pid);
1326 return 0;
1327 }
1328
1329 if (vm_entry.is_a_map || vm_entry.is_sub_map) {
1330 seterr("%u: Is a map\n", p->p_pid);
1331 return 0;
1332 }
1333
1334 /* Locate memory object */
1335 off = (vaddr - vm_entry.start) + vm_entry.offset;
1336 addr = (long)vm_entry.object.vm_object;
1337 while (1) {
1338 if (!KREAD(addr, &vm_object, sizeof vm_object)) {
1339 setsyserr("vatosw: read vm_object");
1340 return 0;
1341 }
1342
1343 #if DEBUG
1344 fprintf(stderr, "%u: find page: object %#x offset %x\n",
1345 p->p_pid, addr, off);
1346 #endif
1347
1348 /* Lookup in page queue */
1349 if (findpage(addr, off, maddr))
1350 return 1;
1351
1352 if (vm_object.shadow == 0)
1353 break;
1354
1355 #if DEBUG
1356 fprintf(stderr, "%u: shadow obj at %x: offset %x+%x\n",
1357 p->p_pid, addr, off, vm_object.shadow_offset);
1358 #endif
1359
1360 addr = (long)vm_object.shadow;
1361 off += vm_object.shadow_offset;
1362 }
1363
1364 if (!vm_object.pager) {
1365 seterr("%u: no pager\n", p->p_pid);
1366 return 0;
1367 }
1368
1369 /* Find address in swap space */
1370 if (!KREAD(vm_object.pager, &pager, sizeof pager)) {
1371 setsyserr("vatosw: read pager");
1372 return 0;
1373 }
1374 if (pager.pg_type != PG_SWAP) {
1375 seterr("%u: weird pager\n", p->p_pid);
1376 return 0;
1377 }
1378
1379 /* Get swap pager data */
1380 if (!KREAD(pager.pg_data, &swpager, sizeof swpager)) {
1381 setsyserr("vatosw: read swpager");
1382 return 0;
1383 }
1384
1385 off += vm_object.paging_offset;
1386
1387 /* Read swap block array */
1388 if (!KREAD((long)swpager.sw_blocks +
1389 (off/dbtob(swpager.sw_bsize)) * sizeof swblock,
1390 &swblock, sizeof swblock)) {
1391 setsyserr("vatosw: read swblock");
1392 return 0;
1393 }
1394 swb->offset = dbtob(swblock.swb_block)+ (off % dbtob(swpager.sw_bsize));
1395 swb->size = dbtob(swpager.sw_bsize) - (off % dbtob(swpager.sw_bsize));
1396 return 1;
1397 }
1398
1399
1400 #define atop(x) (((unsigned)(x)) >> page_shift)
1401 #define vm_page_hash(object, offset) \
1402 (((unsigned)object+(unsigned)atop(offset))&vm_page_hash_mask)
1403
1404 static int
1405 findpage(object, offset, maddr)
1406 long object;
1407 long offset;
1408 vm_offset_t *maddr;
1409 {
1410 static long vm_page_hash_mask;
1411 static long vm_page_buckets;
1412 static long page_shift;
1413 queue_head_t bucket;
1414 struct vm_page mem;
1415 long addr, baddr;
1416
1417 if (vm_page_hash_mask == 0 && !KREAD(nl[X_VM_PAGE_HASH_MASK].n_value,
1418 &vm_page_hash_mask, sizeof (long))) {
1419 seterr("can't read vm_page_hash_mask");
1420 return 0;
1421 }
1422 if (page_shift == 0 && !KREAD(nl[X_PAGE_SHIFT].n_value,
1423 &page_shift, sizeof (long))) {
1424 seterr("can't read page_shift");
1425 return 0;
1426 }
1427 if (vm_page_buckets == 0 && !KREAD(nl[X_VM_PAGE_BUCKETS].n_value,
1428 &vm_page_buckets, sizeof (long))) {
1429 seterr("can't read vm_page_buckets");
1430 return 0;
1431 }
1432
1433 baddr = vm_page_buckets + vm_page_hash(object,offset) * sizeof(queue_head_t);
1434 if (!KREAD(baddr, &bucket, sizeof (bucket))) {
1435 seterr("can't read vm_page_bucket");
1436 return 0;
1437 }
1438
1439 addr = (long)bucket.next;
1440 while (addr != baddr) {
1441 if (!KREAD(addr, &mem, sizeof (mem))) {
1442 seterr("can't read vm_page");
1443 return 0;
1444 }
1445 if ((long)mem.object == object && mem.offset == offset) {
1446 *maddr = (long)mem.phys_addr;
1447 return 1;
1448 }
1449 addr = (long)mem.hashq.next;
1450 }
1451 return 0;
1452 }
1453 #endif /* NEWVM */
1454
1455 #include <varargs.h>
1456 static char errbuf[_POSIX2_LINE_MAX];
1457
1458 static void
1459 seterr(va_alist)
1460 va_dcl
1461 {
1462 char *fmt;
1463 va_list ap;
1464
1465 va_start(ap);
1466 fmt = va_arg(ap, char *);
1467 (void) vsnprintf(errbuf, _POSIX2_LINE_MAX, fmt, ap);
1468 #if DEBUG
1469 (void) vfprintf(stderr, fmt, ap);
1470 #endif
1471 va_end(ap);
1472 }
1473
1474 static void
1475 setsyserr(va_alist)
1476 va_dcl
1477 {
1478 char *fmt, *cp;
1479 va_list ap;
1480 extern int errno;
1481
1482 va_start(ap);
1483 fmt = va_arg(ap, char *);
1484 (void) vsnprintf(errbuf, _POSIX2_LINE_MAX, fmt, ap);
1485 for (cp=errbuf; *cp; cp++)
1486 ;
1487 snprintf(cp, _POSIX2_LINE_MAX - (cp - errbuf), ": %s", strerror(errno));
1488 va_end(ap);
1489 }
1490
1491 char *
1492 kvm_geterr()
1493 {
1494 return (errbuf);
1495 }
1496