kvm.c revision 1.6 1 /*-
2 * Copyright (c) 1989 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 */
33
34 #if defined(LIBC_SCCS) && !defined(lint)
35 /* from: static char sccsid[] = "@(#)kvm.c 5.18 (Berkeley) 5/7/91"; */
36 static char rcsid[] = "$Id: kvm.c,v 1.6 1993/05/20 10:30:31 cgd Exp $";
37 #endif /* LIBC_SCCS and not lint */
38
39 /*
40 * Updated for 386BSD 0.1 by David Greenman (davidg%implode (at) percy.rain.com)
41 * and Paul Kranenburg (pk (at) cs.few.eur.nl)
42 * 20-Aug-1992
43 */
44
45
46 #include <sys/param.h>
47 #include <sys/user.h>
48 #include <sys/proc.h>
49 #include <sys/ioctl.h>
50 #include <sys/kinfo.h>
51 #include <sys/select.h>
52 #include <sys/tty.h>
53 #include <machine/vmparam.h>
54 #include <fcntl.h>
55 #include <nlist.h>
56 #include <kvm.h>
57 #include <ndbm.h>
58 #include <limits.h>
59 #include <paths.h>
60 #include <stdio.h>
61 #include <string.h>
62
63 #ifdef SPPWAIT
64 #define NEWVM
65 #endif
66
67 #ifdef NEWVM
68 #define btop(x) (((unsigned)(x)) >> PGSHIFT) /* XXX */
69 #define ptob(x) ((caddr_t)((x) << PGSHIFT)) /* XXX */
70 #include <vm/vm.h> /* ??? kinfo_proc currently includes this*/
71 #include <vm/vm_page.h>
72 #include <vm/swap_pager.h>
73 #include <sys/kinfo_proc.h>
74 #ifdef hp300
75 #include <hp300/hp300/pte.h>
76 #endif
77 #else /* NEWVM */
78 #include <machine/pte.h>
79 #include <sys/vmmac.h>
80 #include <sys/text.h>
81 #endif /* NEWVM */
82
83 /*
84 * files
85 */
86 static const char *unixf, *memf, *kmemf, *swapf;
87 static int unixx, mem, kmem, swap;
88 static DBM *db;
89 /*
90 * flags
91 */
92 static int deadkernel;
93 static int kvminit = 0;
94 static int kvmfilesopen = 0;
95 /*
96 * state
97 */
98 static struct kinfo_proc *kvmprocbase, *kvmprocptr;
99 static int kvmnprocs;
100 /*
101 * u. buffer
102 */
103 static union {
104 struct user user;
105 char upages[UPAGES][NBPG];
106 } user;
107
108 #ifdef NEWVM
109 struct swapblk {
110 long offset; /* offset in swap device */
111 long size; /* remaining size of block in swap device */
112 };
113 #endif
114 /*
115 * random other stuff
116 */
117 #ifndef NEWVM
118 static struct pte *Usrptmap, *usrpt;
119 static struct pte *Sysmap;
120 static int Syssize;
121 #endif
122 static int dmmin, dmmax;
123 static int pcbpf;
124 static int argaddr0; /* XXX */
125 static int argaddr1;
126 static int swaddr;
127 static int nswap;
128 static char *tmp;
129 #if defined(hp300)
130 static int lowram;
131 static struct ste *Sysseg;
132 #endif
133 #if defined(i386)
134 static struct pde *PTD;
135 #endif
136
137 #define basename(cp) ((tmp=rindex((cp), '/')) ? tmp+1 : (cp))
138 #define MAXSYMSIZE 256
139
140 #if defined(hp300)
141 #define pftoc(f) ((f) - lowram)
142 #define iskva(v) (1)
143 #endif
144
145 #ifndef pftoc
146 #define pftoc(f) (f)
147 #endif
148 #ifndef iskva
149 #define iskva(v) ((u_long)(v) & KERNBASE)
150 #endif
151
152 static struct nlist nl[] = {
153 { "_Usrptmap" },
154 #define X_USRPTMAP 0
155 { "_usrpt" },
156 #define X_USRPT 1
157 { "_nswap" },
158 #define X_NSWAP 2
159 { "_dmmin" },
160 #define X_DMMIN 3
161 { "_dmmax" },
162 #define X_DMMAX 4
163 { "_vm_page_buckets" },
164 #define X_VM_PAGE_BUCKETS 5
165 { "_vm_page_hash_mask" },
166 #define X_VM_PAGE_HASH_MASK 6
167 { "_page_shift" },
168 #define X_PAGE_SHIFT 7
169 /*
170 * everything here and down, only if a dead kernel
171 */
172 { "_Sysmap" },
173 #define X_SYSMAP 8
174 #define X_DEADKERNEL X_SYSMAP
175 { "_Syssize" },
176 #define X_SYSSIZE 9
177 { "_allproc" },
178 #define X_ALLPROC 10
179 { "_zombproc" },
180 #define X_ZOMBPROC 11
181 { "_nproc" },
182 #define X_NPROC 12
183 #define X_LAST 12
184 #if defined(hp300)
185 { "_Sysseg" },
186 #define X_SYSSEG (X_LAST+1)
187 { "_lowram" },
188 #define X_LOWRAM (X_LAST+2)
189 #endif
190 #if defined(i386)
191 { "_IdlePTD" },
192 #define X_IdlePTD (X_LAST+1)
193 #endif
194 { "" },
195 };
196
197 static off_t Vtophys();
198 static void klseek(), seterr(), setsyserr(), vstodb();
199 static int getkvars(), kvm_doprocs(), kvm_init();
200 #ifdef NEWVM
201 static int vatosw();
202 static int findpage();
203 #endif
204
205 /*
206 * returns 0 if files were opened now,
207 * 1 if files were already opened,
208 * -1 if files could not be opened.
209 */
210 kvm_openfiles(uf, mf, sf)
211 const char *uf, *mf, *sf;
212 {
213 if (kvmfilesopen)
214 return (1);
215 unixx = mem = kmem = swap = -1;
216 unixf = (uf == NULL) ? _PATH_UNIX : uf;
217 memf = (mf == NULL) ? _PATH_MEM : mf;
218
219 if ((unixx = open(unixf, O_RDONLY, 0)) == -1) {
220 setsyserr("can't open %s", unixf);
221 goto failed;
222 }
223 if ((mem = open(memf, O_RDONLY, 0)) == -1) {
224 setsyserr("can't open %s", memf);
225 goto failed;
226 }
227 if (sf != NULL)
228 swapf = sf;
229 if (mf != NULL) {
230 deadkernel++;
231 kmemf = mf;
232 kmem = mem;
233 swap = -1;
234 } else {
235 kmemf = _PATH_KMEM;
236 if ((kmem = open(kmemf, O_RDONLY, 0)) == -1) {
237 setsyserr("can't open %s", kmemf);
238 goto failed;
239 }
240 swapf = (sf == NULL) ? _PATH_DRUM : sf;
241 /*
242 * live kernel - avoid looking up nlist entries
243 * past X_DEADKERNEL.
244 */
245 nl[X_DEADKERNEL].n_name = "";
246 }
247 if (swapf != NULL && ((swap = open(swapf, O_RDONLY, 0)) == -1)) {
248 seterr("can't open %s", swapf);
249 goto failed;
250 }
251 kvmfilesopen++;
252 if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1) /*XXX*/
253 return (-1);
254 return (0);
255 failed:
256 kvm_close();
257 return (-1);
258 }
259
260 static
261 kvm_init(uf, mf, sf)
262 char *uf, *mf, *sf;
263 {
264 if (kvmfilesopen == 0 && kvm_openfiles(NULL, NULL, NULL) == -1)
265 return (-1);
266 if (getkvars() == -1)
267 return (-1);
268 kvminit = 1;
269
270 return (0);
271 }
272
273 kvm_close()
274 {
275 if (unixx != -1) {
276 close(unixx);
277 unixx = -1;
278 }
279 if (kmem != -1) {
280 if (kmem != mem)
281 close(kmem);
282 /* otherwise kmem is a copy of mem, and will be closed below */
283 kmem = -1;
284 }
285 if (mem != -1) {
286 close(mem);
287 mem = -1;
288 }
289 if (swap != -1) {
290 close(swap);
291 swap = -1;
292 }
293 if (db != NULL) {
294 dbm_close(db);
295 db = NULL;
296 }
297 kvminit = 0;
298 kvmfilesopen = 0;
299 deadkernel = 0;
300 #ifndef NEWVM
301 if (Sysmap) {
302 free(Sysmap);
303 Sysmap = NULL;
304 }
305 #endif
306 }
307
308 kvm_nlist(nl)
309 struct nlist *nl;
310 {
311 datum key, data;
312 char dbname[MAXPATHLEN];
313 char dbversion[_POSIX2_LINE_MAX];
314 char kversion[_POSIX2_LINE_MAX];
315 int dbversionlen;
316 char symbuf[MAXSYMSIZE];
317 struct nlist nbuf, *n;
318 int num, did;
319
320 if (kvmfilesopen == 0 && kvm_openfiles(NULL, NULL, NULL) == -1)
321 return (-1);
322 if (deadkernel)
323 goto hard2;
324 /*
325 * initialize key datum
326 */
327 key.dptr = symbuf;
328
329 if (db != NULL)
330 goto win; /* off to the races */
331 /*
332 * open database
333 */
334 sprintf(dbname, "%s/kvm_%s", _PATH_VARRUN, basename(unixf));
335 if ((db = dbm_open(dbname, O_RDONLY, 0)) == NULL)
336 goto hard2;
337 /*
338 * read version out of database
339 */
340 bcopy("VERSION", symbuf, sizeof ("VERSION")-1);
341 key.dsize = (sizeof ("VERSION") - 1);
342 data = dbm_fetch(db, key);
343 if (data.dptr == NULL)
344 goto hard1;
345 bcopy(data.dptr, dbversion, data.dsize);
346 dbversionlen = data.dsize;
347 /*
348 * read version string from kernel memory
349 */
350 bcopy("_version", symbuf, sizeof ("_version")-1);
351 key.dsize = (sizeof ("_version")-1);
352 data = dbm_fetch(db, key);
353 if (data.dptr == NULL)
354 goto hard1;
355 if (data.dsize != sizeof (struct nlist))
356 goto hard1;
357 bcopy(data.dptr, &nbuf, sizeof (struct nlist));
358 lseek(kmem, nbuf.n_value, 0);
359 if (read(kmem, kversion, dbversionlen) != dbversionlen)
360 goto hard1;
361 /*
362 * if they match, we win - otherwise do it the hard way
363 */
364 if (bcmp(dbversion, kversion, dbversionlen) != 0)
365 goto hard1;
366 /*
367 * getem from the database.
368 */
369 win:
370 num = did = 0;
371 for (n = nl; n->n_name && n->n_name[0]; n++, num++) {
372 int len;
373 /*
374 * clear out fields from users buffer
375 */
376 n->n_type = 0;
377 n->n_other = 0;
378 n->n_desc = 0;
379 n->n_value = 0;
380 /*
381 * query db
382 */
383 if ((len = strlen(n->n_name)) > MAXSYMSIZE) {
384 seterr("symbol too large");
385 return (-1);
386 }
387 (void)strcpy(symbuf, n->n_name);
388 key.dsize = len;
389 data = dbm_fetch(db, key);
390 if (data.dptr == NULL || data.dsize != sizeof (struct nlist))
391 continue;
392 bcopy(data.dptr, &nbuf, sizeof (struct nlist));
393 n->n_value = nbuf.n_value;
394 n->n_type = nbuf.n_type;
395 n->n_desc = nbuf.n_desc;
396 n->n_other = nbuf.n_other;
397 did++;
398 }
399 return (num - did);
400 hard1:
401 dbm_close(db);
402 db = NULL;
403 hard2:
404 num = nlist(unixf, nl);
405 if (num == -1)
406 seterr("nlist (hard way) failed");
407 return (num);
408 }
409
410 kvm_getprocs(what, arg)
411 int what, arg;
412 {
413 static int ocopysize = -1;
414
415 if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1)
416 return (NULL);
417 if (!deadkernel) {
418 int ret, copysize;
419
420 if ((ret = getkerninfo(what, NULL, NULL, arg)) == -1) {
421 setsyserr("can't get estimate for kerninfo");
422 return (-1);
423 }
424 copysize = ret;
425 if (copysize > ocopysize || !kvmprocbase) {
426 if (ocopysize == -1 || !kvmprocbase)
427 kvmprocbase =
428 (struct kinfo_proc *)malloc(copysize);
429 else
430 kvmprocbase =
431 (struct kinfo_proc *)realloc(kvmprocbase,
432 copysize);
433 if (!kvmprocbase) {
434 seterr("out of memory");
435 return (-1);
436 }
437 }
438 ocopysize = copysize;
439 if ((ret = getkerninfo(what, kvmprocbase, ©size,
440 arg)) == -1) {
441 setsyserr("can't get proc list");
442 return (-1);
443 }
444 if (copysize % sizeof (struct kinfo_proc)) {
445 seterr("proc size mismatch (got %d total, kinfo_proc: %d)",
446 copysize, sizeof (struct kinfo_proc));
447 return (-1);
448 }
449 kvmnprocs = copysize / sizeof (struct kinfo_proc);
450 } else {
451 int nproc;
452
453 if (kvm_read((void *) nl[X_NPROC].n_value, &nproc,
454 sizeof (int)) != sizeof (int)) {
455 seterr("can't read nproc");
456 return (-1);
457 }
458 if ((kvmprocbase = (struct kinfo_proc *)
459 malloc(nproc * sizeof (struct kinfo_proc))) == NULL) {
460 seterr("out of memory (addr: %x nproc = %d)",
461 nl[X_NPROC].n_value, nproc);
462 return (-1);
463 }
464 kvmnprocs = kvm_doprocs(what, arg, kvmprocbase);
465 realloc(kvmprocbase, kvmnprocs * sizeof (struct kinfo_proc));
466 }
467 kvmprocptr = kvmprocbase;
468
469 return (kvmnprocs);
470 }
471
472 /*
473 * XXX - should NOT give up so easily - especially since the kernel
474 * may be corrupt (it died). Should gather as much information as possible.
475 * Follows proc ptrs instead of reading table since table may go
476 * away soon.
477 */
478 static
479 kvm_doprocs(what, arg, buff)
480 int what, arg;
481 char *buff;
482 {
483 struct proc *p, proc;
484 register char *bp = buff;
485 int i = 0;
486 int doingzomb = 0;
487 struct eproc eproc;
488 struct pgrp pgrp;
489 struct session sess;
490 struct tty tty;
491 #ifndef NEWVM
492 struct text text;
493 #endif
494
495 /* allproc */
496 if (kvm_read((void *) nl[X_ALLPROC].n_value, &p,
497 sizeof (struct proc *)) != sizeof (struct proc *)) {
498 seterr("can't read allproc");
499 return (-1);
500 }
501
502 again:
503 for (; p; p = proc.p_nxt) {
504 if (kvm_read(p, &proc, sizeof (struct proc)) !=
505 sizeof (struct proc)) {
506 seterr("can't read proc at %x", p);
507 return (-1);
508 }
509 #ifdef NEWVM
510 if (kvm_read(proc.p_cred, &eproc.e_pcred,
511 sizeof (struct pcred)) == sizeof (struct pcred))
512 (void) kvm_read(eproc.e_pcred.pc_ucred, &eproc.e_ucred,
513 sizeof (struct ucred));
514 switch(ki_op(what)) {
515
516 case KINFO_PROC_PID:
517 if (proc.p_pid != (pid_t)arg)
518 continue;
519 break;
520
521
522 case KINFO_PROC_UID:
523 if (eproc.e_ucred.cr_uid != (uid_t)arg)
524 continue;
525 break;
526
527 case KINFO_PROC_RUID:
528 if (eproc.e_pcred.p_ruid != (uid_t)arg)
529 continue;
530 break;
531 }
532 #else
533 switch(ki_op(what)) {
534
535 case KINFO_PROC_PID:
536 if (proc.p_pid != (pid_t)arg)
537 continue;
538 break;
539
540
541 case KINFO_PROC_UID:
542 if (proc.p_uid != (uid_t)arg)
543 continue;
544 break;
545
546 case KINFO_PROC_RUID:
547 if (proc.p_ruid != (uid_t)arg)
548 continue;
549 break;
550 }
551 #endif
552 /*
553 * gather eproc
554 */
555 eproc.e_paddr = p;
556 if (kvm_read(proc.p_pgrp, &pgrp, sizeof (struct pgrp)) !=
557 sizeof (struct pgrp)) {
558 seterr("can't read pgrp at %x", proc.p_pgrp);
559 return (-1);
560 }
561 eproc.e_sess = pgrp.pg_session;
562 eproc.e_pgid = pgrp.pg_id;
563 eproc.e_jobc = pgrp.pg_jobc;
564 if (kvm_read(pgrp.pg_session, &sess, sizeof (struct session))
565 != sizeof (struct session)) {
566 seterr("can't read session at %x", pgrp.pg_session);
567 return (-1);
568 }
569 if ((proc.p_flag&SCTTY) && sess.s_ttyp != NULL) {
570 if (kvm_read(sess.s_ttyp, &tty, sizeof (struct tty))
571 != sizeof (struct tty)) {
572 seterr("can't read tty at %x", sess.s_ttyp);
573 return (-1);
574 }
575 eproc.e_tdev = tty.t_dev;
576 eproc.e_tsess = tty.t_session;
577 if (tty.t_pgrp != NULL) {
578 if (kvm_read(tty.t_pgrp, &pgrp, sizeof (struct
579 pgrp)) != sizeof (struct pgrp)) {
580 seterr("can't read tpgrp at &x",
581 tty.t_pgrp);
582 return (-1);
583 }
584 eproc.e_tpgid = pgrp.pg_id;
585 } else
586 eproc.e_tpgid = -1;
587 } else
588 eproc.e_tdev = NODEV;
589 if (proc.p_wmesg)
590 kvm_read(proc.p_wmesg, eproc.e_wmesg, WMESGLEN);
591 #ifdef NEWVM
592 (void) kvm_read(proc.p_vmspace, &eproc.e_vm,
593 sizeof (struct vmspace));
594 eproc.e_xsize = eproc.e_xrssize =
595 eproc.e_xccount = eproc.e_xswrss = 0;
596 #else
597 if (proc.p_textp) {
598 kvm_read(proc.p_textp, &text, sizeof (text));
599 eproc.e_xsize = text.x_size;
600 eproc.e_xrssize = text.x_rssize;
601 eproc.e_xccount = text.x_ccount;
602 eproc.e_xswrss = text.x_swrss;
603 } else {
604 eproc.e_xsize = eproc.e_xrssize =
605 eproc.e_xccount = eproc.e_xswrss = 0;
606 }
607 #endif
608
609 switch(ki_op(what)) {
610
611 case KINFO_PROC_PGRP:
612 if (eproc.e_pgid != (pid_t)arg)
613 continue;
614 break;
615
616 case KINFO_PROC_TTY:
617 if ((proc.p_flag&SCTTY) == 0 ||
618 eproc.e_tdev != (dev_t)arg)
619 continue;
620 break;
621 }
622
623 i++;
624 bcopy(&proc, bp, sizeof (struct proc));
625 bp += sizeof (struct proc);
626 bcopy(&eproc, bp, sizeof (struct eproc));
627 bp+= sizeof (struct eproc);
628 }
629 if (!doingzomb) {
630 /* zombproc */
631 if (kvm_read((void *) nl[X_ZOMBPROC].n_value, &p,
632 sizeof (struct proc *)) != sizeof (struct proc *)) {
633 seterr("can't read zombproc");
634 return (-1);
635 }
636 doingzomb = 1;
637 goto again;
638 }
639
640 return (i);
641 }
642
643 struct proc *
644 kvm_nextproc()
645 {
646
647 if (!kvmprocbase && kvm_getprocs(0, 0) == -1)
648 return (NULL);
649 if (kvmprocptr >= (kvmprocbase + kvmnprocs)) {
650 seterr("end of proc list");
651 return (NULL);
652 }
653 return((struct proc *)(kvmprocptr++));
654 }
655
656 struct eproc *
657 kvm_geteproc(p)
658 const struct proc *p;
659 {
660 return ((struct eproc *)(((char *)p) + sizeof (struct proc)));
661 }
662
663 kvm_setproc()
664 {
665 kvmprocptr = kvmprocbase;
666 }
667
668 kvm_freeprocs()
669 {
670
671 if (kvmprocbase) {
672 free(kvmprocbase);
673 kvmprocbase = NULL;
674 }
675 }
676
677 #ifdef i386
678 /* See also ./sys/kern/kern_execve.c */
679 #define ARGSIZE (roundup(ARG_MAX, NBPG))
680 #endif
681
682 #ifdef NEWVM
683 struct user *
684 kvm_getu(p)
685 const struct proc *p;
686 {
687 register struct kinfo_proc *kp = (struct kinfo_proc *)p;
688 register int i;
689 register char *up;
690 u_int vaddr;
691 struct swapblk swb;
692
693 if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1)
694 return (NULL);
695 if (p->p_stat == SZOMB) {
696 seterr("zombie process");
697 return (NULL);
698 }
699
700 argaddr0 = argaddr1 = swaddr = 0;
701 if ((p->p_flag & SLOAD) == 0) {
702 vm_offset_t maddr;
703
704 if (swap < 0) {
705 seterr("no swap");
706 return (NULL);
707 }
708 /*
709 * Costly operation, better set enable_swap to zero
710 * in vm/vm_glue.c, since paging of user pages isn't
711 * done yet anyway.
712 */
713 if (vatosw(p, USRSTACK + i * NBPG, &maddr, &swb) == 0)
714 return NULL;
715
716 if (maddr == 0 && swb.size < UPAGES * NBPG)
717 return NULL;
718
719 for (i = 0; i < UPAGES; i++) {
720 if (maddr) {
721 (void) lseek(mem, maddr + i * NBPG, 0);
722 if (read(mem,
723 (char *)user.upages[i], NBPG) != NBPG) {
724 seterr(
725 "can't read u for pid %d from %s",
726 p->p_pid, swapf);
727 return NULL;
728 }
729 } else {
730 (void) lseek(swap, swb.offset + i * NBPG, 0);
731 if (read(swap,
732 (char *)user.upages[i], NBPG) != NBPG) {
733 seterr(
734 "can't read u for pid %d from %s",
735 p->p_pid, swapf);
736 return NULL;
737 }
738 }
739 }
740 return(&user.user);
741 }
742 /*
743 * Read u-area one page at a time for the benefit of post-mortems
744 */
745 up = (char *) p->p_addr;
746 for (i = 0; i < UPAGES; i++) {
747 klseek(kmem, (long)up, 0);
748 if (read(kmem, user.upages[i], CLBYTES) != CLBYTES) {
749 seterr("cant read page %x of u of pid %d from %s",
750 up, p->p_pid, kmemf);
751 return(NULL);
752 }
753 up += CLBYTES;
754 }
755 pcbpf = (int) btop(p->p_addr); /* what should this be really? */
756 /*
757 * Conjure up a physical address for the arguments.
758 */
759 #ifdef hp300
760 if (kp->kp_eproc.e_vm.vm_pmap.pm_ptab) {
761 struct pte pte[CLSIZE*2];
762
763 klseek(kmem,
764 (long)&kp->kp_eproc.e_vm.vm_pmap.pm_ptab
765 [btoc(USRSTACK-CLBYTES*2)], 0);
766 if (read(kmem, (char *)&pte, sizeof(pte)) == sizeof(pte)) {
767 #if CLBYTES < 2048
768 argaddr0 = ctob(pftoc(pte[CLSIZE*0].pg_pfnum));
769 #endif
770 argaddr1 = ctob(pftoc(pte[CLSIZE*1].pg_pfnum));
771 }
772 }
773 #endif
774 kp->kp_eproc.e_vm.vm_rssize =
775 kp->kp_eproc.e_vm.vm_pmap.pm_stats.resident_count; /* XXX */
776
777 vaddr = (u_int)kp->kp_eproc.e_vm.vm_maxsaddr + MAXSSIZ - ARGSIZE;
778
779 #ifdef i386
780 if (kp->kp_eproc.e_vm.vm_pmap.pm_pdir) {
781 struct pde pde;
782
783 klseek(kmem,
784 (long)(&kp->kp_eproc.e_vm.vm_pmap.pm_pdir[pdei(vaddr)]), 0);
785
786 if (read(kmem, (char *)&pde, sizeof pde) == sizeof pde
787 && pde.pd_v) {
788
789 struct pte pte;
790
791 if (lseek(mem, (long)ctob(pde.pd_pfnum) +
792 (ptei(vaddr) * sizeof pte), 0) == -1)
793 seterr("kvm_getu: lseek");
794 if (read(mem, (char *)&pte, sizeof pte) == sizeof pte) {
795 if (pte.pg_v) {
796 argaddr1 = (long)ctob(pte.pg_pfnum);
797 } else {
798 goto hard;
799 }
800 } else {
801 seterr("kvm_getu: read");
802 }
803 } else {
804 goto hard;
805 }
806 }
807 #endif /* i386 */
808
809 hard:
810 if (vatosw(p, vaddr, &argaddr1, &swb)) {
811 if (argaddr1 == 0 && swb.size >= ARGSIZE)
812 swaddr = swb.offset;
813 }
814
815 return(&user.user);
816 }
817 #else
818 struct user *
819 kvm_getu(p)
820 const struct proc *p;
821 {
822 struct pte *pteaddr, apte;
823 struct pte arguutl[HIGHPAGES+(CLSIZE*2)];
824 register int i;
825 int ncl;
826
827 if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1)
828 return (NULL);
829 if (p->p_stat == SZOMB) {
830 seterr("zombie process");
831 return (NULL);
832 }
833 if ((p->p_flag & SLOAD) == 0) {
834 if (swap < 0) {
835 seterr("no swap");
836 return (NULL);
837 }
838 (void) lseek(swap, (long)dtob(p->p_swaddr), 0);
839 if (read(swap, (char *)&user.user, sizeof (struct user)) !=
840 sizeof (struct user)) {
841 seterr("can't read u for pid %d from %s",
842 p->p_pid, swapf);
843 return (NULL);
844 }
845 pcbpf = 0;
846 argaddr0 = 0;
847 argaddr1 = 0;
848 return (&user.user);
849 }
850 pteaddr = &Usrptmap[btokmx(p->p_p0br) + p->p_szpt - 1];
851 klseek(kmem, (long)pteaddr, 0);
852 if (read(kmem, (char *)&apte, sizeof(apte)) != sizeof(apte)) {
853 seterr("can't read indir pte to get u for pid %d from %s",
854 p->p_pid, kmemf);
855 return (NULL);
856 }
857 lseek(mem, (long)ctob(pftoc(apte.pg_pfnum+1)) - sizeof(arguutl), 0);
858 if (read(mem, (char *)arguutl, sizeof(arguutl)) != sizeof(arguutl)) {
859 seterr("can't read page table for u of pid %d from %s",
860 p->p_pid, memf);
861 return (NULL);
862 }
863 if (arguutl[0].pg_fod == 0 && arguutl[0].pg_pfnum)
864 argaddr0 = ctob(pftoc(arguutl[0].pg_pfnum));
865 else
866 argaddr0 = 0;
867 if (arguutl[CLSIZE*1].pg_fod == 0 && arguutl[CLSIZE*1].pg_pfnum)
868 argaddr1 = ctob(pftoc(arguutl[CLSIZE*1].pg_pfnum));
869 else
870 argaddr1 = 0;
871 pcbpf = arguutl[CLSIZE*2].pg_pfnum;
872 ncl = (sizeof (struct user) + CLBYTES - 1) / CLBYTES;
873 while (--ncl >= 0) {
874 i = ncl * CLSIZE;
875 lseek(mem,
876 (long)ctob(pftoc(arguutl[(CLSIZE*2)+i].pg_pfnum)), 0);
877 if (read(mem, user.upages[i], CLBYTES) != CLBYTES) {
878 seterr("can't read page %d of u of pid %d from %s",
879 arguutl[(CLSIZE*2)+i].pg_pfnum, p->p_pid, memf);
880 return(NULL);
881 }
882 }
883 return (&user.user);
884 }
885 #endif
886
887 char *
888 kvm_getargs(p, up)
889 const struct proc *p;
890 const struct user *up;
891 {
892 #ifdef i386
893 /* See also ./sys/kern/kern_execve.c */
894 static char cmdbuf[ARGSIZE];
895 static union {
896 char argc[ARGSIZE];
897 int argi[ARGSIZE/sizeof (int)];
898 } argspac;
899 #else
900 static char cmdbuf[CLBYTES*2];
901 static union {
902 char argc[CLBYTES*2];
903 int argi[CLBYTES*2/sizeof (int)];
904 } argspac;
905 #endif
906 register char *cp;
907 register int *ip;
908 char c;
909 int nbad;
910 #ifndef NEWVM
911 struct dblock db;
912 #endif
913 const char *file;
914 int stkoff = 0;
915
916 #if defined(NEWVM) && defined(hp300)
917 stkoff = 20; /* XXX for sigcode */
918 #endif
919 if (up == NULL || p->p_pid == 0 || p->p_pid == 2)
920 goto retucomm;
921 if ((p->p_flag & SLOAD) == 0 || argaddr1 == 0) {
922 #ifdef NEWVM
923 if (swaddr == 0)
924 goto retucomm; /* XXX for now */
925 #ifdef i386
926 (void) lseek(swap, swaddr, 0);
927 if (read(swap, &argspac.argc[0], ARGSIZE) != ARGSIZE)
928 goto bad;
929 #else
930 if (argaddr0) {
931 lseek(swap, (long)argaddr0, 0);
932 if (read(swap, (char *)&argspac, CLBYTES) != CLBYTES)
933 goto bad;
934 } else
935 bzero(&argspac, CLBYTES);
936 lseek(swap, (long)argaddr1, 0);
937 if (read(swap, &argspac.argc[CLBYTES], CLBYTES) != CLBYTES)
938 goto bad;
939 #endif
940 #else
941 if (swap < 0 || p->p_ssize == 0)
942 goto retucomm;
943 vstodb(0, CLSIZE, &up->u_smap, &db, 1);
944 (void) lseek(swap, (long)dtob(db.db_base), 0);
945 if (read(swap, (char *)&argspac.argc[CLBYTES], CLBYTES)
946 != CLBYTES)
947 goto bad;
948 vstodb(1, CLSIZE, &up->u_smap, &db, 1);
949 (void) lseek(swap, (long)dtob(db.db_base), 0);
950 if (read(swap, (char *)&argspac.argc[0], CLBYTES) != CLBYTES)
951 goto bad;
952 file = swapf;
953 #endif
954 } else {
955 #ifdef i386
956 lseek(mem, (long)argaddr1, 0);
957 if (read(mem, &argspac.argc[0], ARGSIZE) != ARGSIZE)
958 goto bad;
959 #else
960 if (argaddr0) {
961 lseek(mem, (long)argaddr0, 0);
962 if (read(mem, (char *)&argspac, CLBYTES) != CLBYTES)
963 goto bad;
964 } else
965 bzero(&argspac, CLBYTES);
966 lseek(mem, (long)argaddr1, 0);
967 if (read(mem, &argspac.argc[CLBYTES], CLBYTES) != CLBYTES)
968 goto bad;
969 #endif
970 file = (char *) memf;
971 }
972
973 nbad = 0;
974 #ifdef i386
975 ip = &argspac.argi[(ARGSIZE-ARG_MAX)/sizeof (int)];
976
977 for (cp = (char *)ip; cp < &argspac.argc[ARGSIZE-stkoff]; cp++) {
978 #else
979 ip = &argspac.argi[CLBYTES*2/sizeof (int)];
980 ip -= 2; /* last arg word and .long 0 */
981 ip -= stkoff / sizeof (int);
982 while (*--ip) {
983 if (ip == argspac.argi)
984 goto retucomm;
985 }
986 *(char *)ip = ' ';
987 ip++;
988
989 for (cp = (char *)ip; cp < &argspac.argc[CLBYTES*2-stkoff]; cp++) {
990 #endif
991 c = *cp;
992 if (c == 0) { /* convert null between arguments to space */
993 *cp = ' ';
994 if (*(cp+1) == 0) break; /* if null argument follows then no more args */
995 }
996 else if (c < ' ' || c > 0176) {
997 if (++nbad >= 5*(0+1)) { /* eflg -> 0 XXX */ /* limit number of bad chars to 5 */
998 *cp++ = '?';
999 break;
1000 }
1001 *cp = '?';
1002 }
1003 else if (0 == 0 && c == '=') { /* eflg -> 0 XXX */
1004 while (*--cp != ' ')
1005 if (cp <= (char *)ip)
1006 break;
1007 break;
1008 }
1009 }
1010 *cp = 0;
1011 while (*--cp == ' ')
1012 *cp = 0;
1013 cp = (char *)ip;
1014 (void) strcpy(cmdbuf, cp);
1015 if (cp[0] == '-' || cp[0] == '?' || cp[0] <= ' ') {
1016 (void) strcat(cmdbuf, " (");
1017 (void) strncat(cmdbuf, p->p_comm, sizeof(p->p_comm));
1018 (void) strcat(cmdbuf, ")");
1019 }
1020 return (cmdbuf);
1021
1022 bad:
1023 seterr("error locating command name for pid %d from %s",
1024 p->p_pid, file);
1025 retucomm:
1026 (void) strcpy(cmdbuf, " (");
1027 (void) strncat(cmdbuf, p->p_comm, sizeof (p->p_comm));
1028 (void) strcat(cmdbuf, ")");
1029 return (cmdbuf);
1030 }
1031
1032
1033 static
1034 getkvars()
1035 {
1036 if (kvm_nlist(nl) == -1)
1037 return (-1);
1038 if (deadkernel) {
1039 /* We must do the sys map first because klseek uses it */
1040 long addr;
1041
1042 #ifndef NEWVM
1043 Syssize = nl[X_SYSSIZE].n_value;
1044 Sysmap = (struct pte *)
1045 calloc((unsigned) Syssize, sizeof (struct pte));
1046 if (Sysmap == NULL) {
1047 seterr("out of space for Sysmap");
1048 return (-1);
1049 }
1050 addr = (long) nl[X_SYSMAP].n_value;
1051 addr &= ~KERNBASE;
1052 (void) lseek(kmem, addr, 0);
1053 if (read(kmem, (char *) Sysmap, Syssize * sizeof (struct pte))
1054 != Syssize * sizeof (struct pte)) {
1055 seterr("can't read Sysmap");
1056 return (-1);
1057 }
1058 #endif
1059 #if defined(hp300)
1060 addr = (long) nl[X_LOWRAM].n_value;
1061 (void) lseek(kmem, addr, 0);
1062 if (read(kmem, (char *) &lowram, sizeof (lowram))
1063 != sizeof (lowram)) {
1064 seterr("can't read lowram");
1065 return (-1);
1066 }
1067 lowram = btop(lowram);
1068 Sysseg = (struct ste *) malloc(NBPG);
1069 if (Sysseg == NULL) {
1070 seterr("out of space for Sysseg");
1071 return (-1);
1072 }
1073 addr = (long) nl[X_SYSSEG].n_value;
1074 (void) lseek(kmem, addr, 0);
1075 read(kmem, (char *)&addr, sizeof(addr));
1076 (void) lseek(kmem, (long)addr, 0);
1077 if (read(kmem, (char *) Sysseg, NBPG) != NBPG) {
1078 seterr("can't read Sysseg");
1079 return (-1);
1080 }
1081 #endif
1082 #if defined(i386)
1083 PTD = (struct pde *) malloc(NBPG);
1084 if (PTD == NULL) {
1085 seterr("out of space for PTD");
1086 return (-1);
1087 }
1088 addr = (long) nl[X_IdlePTD].n_value;
1089 (void) lseek(kmem, addr, 0);
1090 read(kmem, (char *)&addr, sizeof(addr));
1091 (void) lseek(kmem, (long)addr, 0);
1092 if (read(kmem, (char *) PTD, NBPG) != NBPG) {
1093 seterr("can't read PTD");
1094 return (-1);
1095 }
1096 #endif
1097 }
1098 #ifndef NEWVM
1099 usrpt = (struct pte *)nl[X_USRPT].n_value;
1100 Usrptmap = (struct pte *)nl[X_USRPTMAP].n_value;
1101 #endif
1102 if (kvm_read((void *) nl[X_NSWAP].n_value, &nswap, sizeof (long)) !=
1103 sizeof (long)) {
1104 seterr("can't read nswap");
1105 return (-1);
1106 }
1107 if (kvm_read((void *) nl[X_DMMIN].n_value, &dmmin, sizeof (long)) !=
1108 sizeof (long)) {
1109 seterr("can't read dmmin");
1110 return (-1);
1111 }
1112 if (kvm_read((void *) nl[X_DMMAX].n_value, &dmmax, sizeof (long)) !=
1113 sizeof (long)) {
1114 seterr("can't read dmmax");
1115 return (-1);
1116 }
1117 return (0);
1118 }
1119
1120 kvm_read(loc, buf, len)
1121 void *loc;
1122 void *buf;
1123 {
1124 if (kvmfilesopen == 0 && kvm_openfiles(NULL, NULL, NULL) == -1)
1125 return (-1);
1126 if (iskva(loc)) {
1127 klseek(kmem, (off_t) loc, 0);
1128 if (read(kmem, buf, len) != len) {
1129 seterr("error reading kmem at %x", loc);
1130 return (-1);
1131 }
1132 } else {
1133 lseek(mem, (off_t) loc, 0);
1134 if (read(mem, buf, len) != len) {
1135 seterr("error reading mem at %x", loc);
1136 return (-1);
1137 }
1138 }
1139 return (len);
1140 }
1141
1142 static void
1143 klseek(fd, loc, off)
1144 int fd;
1145 off_t loc;
1146 int off;
1147 {
1148
1149 if (deadkernel) {
1150 if ((loc = Vtophys(loc)) == -1)
1151 return;
1152 }
1153 (void) lseek(fd, (off_t)loc, off);
1154 }
1155
1156 #ifndef NEWVM
1157 /*
1158 * Given a base/size pair in virtual swap area,
1159 * return a physical base/size pair which is the
1160 * (largest) initial, physically contiguous block.
1161 */
1162 static void
1163 vstodb(vsbase, vssize, dmp, dbp, rev)
1164 register int vsbase;
1165 int vssize;
1166 struct dmap *dmp;
1167 register struct dblock *dbp;
1168 {
1169 register int blk = dmmin;
1170 register swblk_t *ip = dmp->dm_map;
1171
1172 vsbase = ctod(vsbase);
1173 vssize = ctod(vssize);
1174 if (vsbase < 0 || vsbase + vssize > dmp->dm_size)
1175 /*panic("vstodb")*/;
1176 while (vsbase >= blk) {
1177 vsbase -= blk;
1178 if (blk < dmmax)
1179 blk *= 2;
1180 ip++;
1181 }
1182 if (*ip <= 0 || *ip + blk > nswap)
1183 /*panic("vstodb")*/;
1184 dbp->db_size = MIN(vssize, blk - vsbase);
1185 dbp->db_base = *ip + (rev ? blk - (vsbase + dbp->db_size) : vsbase);
1186 }
1187 #endif
1188
1189 #ifdef NEWVM
1190 static off_t
1191 Vtophys(loc)
1192 u_long loc;
1193 {
1194 off_t newloc = (off_t) -1;
1195 #ifdef hp300
1196 int p, ste, pte;
1197
1198 ste = *(int *)&Sysseg[loc >> SG_ISHIFT];
1199 if ((ste & SG_V) == 0) {
1200 seterr("vtophys: segment not valid");
1201 return((off_t) -1);
1202 }
1203 p = btop(loc & SG_PMASK);
1204 newloc = (ste & SG_FRAME) + (p * sizeof(struct pte));
1205 (void) lseek(kmem, (long)(newloc-(off_t)ptob(lowram)), 0);
1206 if (read(kmem, (char *)&pte, sizeof pte) != sizeof pte) {
1207 seterr("vtophys: cannot locate pte");
1208 return((off_t) -1);
1209 }
1210 newloc = pte & PG_FRAME;
1211 if (pte == PG_NV || newloc < (off_t)ptob(lowram)) {
1212 seterr("vtophys: page not valid");
1213 return((off_t) -1);
1214 }
1215 newloc = (newloc - (off_t)ptob(lowram)) + (loc & PGOFSET);
1216 #endif
1217 #ifdef i386
1218 struct pde pde;
1219 struct pte pte;
1220 int p;
1221
1222 pde = PTD[loc >> PD_SHIFT];
1223 if (pde.pd_v == 0) {
1224 seterr("vtophys: page directory entry not valid");
1225 return((off_t) -1);
1226 }
1227 p = btop(loc & PT_MASK);
1228 newloc = pde.pd_pfnum + (p * sizeof(struct pte));
1229 (void) lseek(kmem, (long)newloc, 0);
1230 if (read(kmem, (char *)&pte, sizeof pte) != sizeof pte) {
1231 seterr("vtophys: cannot obtain desired pte");
1232 return((off_t) -1);
1233 }
1234 newloc = pte.pg_pfnum;
1235 if (pte.pg_v == 0) {
1236 seterr("vtophys: page table entry not valid");
1237 return((off_t) -1);
1238 }
1239 newloc += (loc & PGOFSET);
1240 #endif
1241 return((off_t) newloc);
1242 }
1243 #else
1244 static off_t
1245 vtophys(loc)
1246 long loc;
1247 {
1248 int p;
1249 off_t newloc;
1250 register struct pte *pte;
1251
1252 newloc = loc & ~KERNBASE;
1253 p = btop(newloc);
1254 #if defined(vax) || defined(tahoe)
1255 if ((loc & KERNBASE) == 0) {
1256 seterr("vtophys: translating non-kernel address");
1257 return((off_t) -1);
1258 }
1259 #endif
1260 if (p >= Syssize) {
1261 seterr("vtophys: page out of bound (%d>=%d)", p, Syssize);
1262 return((off_t) -1);
1263 }
1264 pte = &Sysmap[p];
1265 if (pte->pg_v == 0 && (pte->pg_fod || pte->pg_pfnum == 0)) {
1266 seterr("vtophys: page not valid");
1267 return((off_t) -1);
1268 }
1269 #if defined(hp300)
1270 if (pte->pg_pfnum < lowram) {
1271 seterr("vtophys: non-RAM page (%d<%d)", pte->pg_pfnum, lowram);
1272 return((off_t) -1);
1273 }
1274 #endif
1275 loc = (long) (ptob(pftoc(pte->pg_pfnum)) + (loc & PGOFSET));
1276 return(loc);
1277 }
1278 #endif
1279
1280
1281 #ifdef NEWVM
1282 /*
1283 * locate address of unwired or swapped page
1284 */
1285
1286 #define DEBUG 0
1287
1288 #define KREAD(off, addr, len) \
1289 (kvm_read((void *)(off), (char *)(addr), (len)) == (len))
1290
1291
1292 static int
1293 vatosw(p, vaddr, maddr, swb)
1294 struct proc *p ;
1295 vm_offset_t vaddr;
1296 vm_offset_t *maddr;
1297 struct swapblk *swb;
1298 {
1299 register struct kinfo_proc *kp = (struct kinfo_proc *)p;
1300 vm_map_t mp = &kp->kp_eproc.e_vm.vm_map;
1301 struct vm_object vm_object;
1302 struct vm_map_entry vm_entry;
1303 struct pager_struct pager;
1304 struct swpager swpager;
1305 struct swblock swblock;
1306 long addr, off;
1307 int i;
1308
1309 if (p->p_pid == 0 || p->p_pid == 2)
1310 return 0;
1311
1312 addr = (long)mp->header.next;
1313 for (i = 0; i < mp->nentries; i++) {
1314 /* Weed through map entries until vaddr in range */
1315 if (!KREAD(addr, &vm_entry, sizeof(vm_entry))) {
1316 setsyserr("vatosw: read vm_map_entry");
1317 return 0;
1318 }
1319 if ((vaddr >= vm_entry.start) && (vaddr <= vm_entry.end) &&
1320 (vm_entry.object.vm_object != 0))
1321 break;
1322
1323 addr = (long)vm_entry.next;
1324 }
1325 if (i == mp->nentries) {
1326 seterr("%u: map not found\n", p->p_pid);
1327 return 0;
1328 }
1329
1330 if (vm_entry.is_a_map || vm_entry.is_sub_map) {
1331 seterr("%u: Is a map\n", p->p_pid);
1332 return 0;
1333 }
1334
1335 /* Locate memory object */
1336 off = (vaddr - vm_entry.start) + vm_entry.offset;
1337 addr = (long)vm_entry.object.vm_object;
1338 while (1) {
1339 if (!KREAD(addr, &vm_object, sizeof vm_object)) {
1340 setsyserr("vatosw: read vm_object");
1341 return 0;
1342 }
1343
1344 #if DEBUG
1345 fprintf(stderr, "%u: find page: object %#x offset %x\n",
1346 p->p_pid, addr, off);
1347 #endif
1348
1349 /* Lookup in page queue */
1350 if (findpage(addr, off, maddr))
1351 return 1;
1352
1353 if (vm_object.shadow == 0)
1354 break;
1355
1356 #if DEBUG
1357 fprintf(stderr, "%u: shadow obj at %x: offset %x+%x\n",
1358 p->p_pid, addr, off, vm_object.shadow_offset);
1359 #endif
1360
1361 addr = (long)vm_object.shadow;
1362 off += vm_object.shadow_offset;
1363 }
1364
1365 if (!vm_object.pager) {
1366 seterr("%u: no pager\n", p->p_pid);
1367 return 0;
1368 }
1369
1370 /* Find address in swap space */
1371 if (!KREAD(vm_object.pager, &pager, sizeof pager)) {
1372 setsyserr("vatosw: read pager");
1373 return 0;
1374 }
1375 if (pager.pg_type != PG_SWAP) {
1376 seterr("%u: weird pager\n", p->p_pid);
1377 return 0;
1378 }
1379
1380 /* Get swap pager data */
1381 if (!KREAD(pager.pg_data, &swpager, sizeof swpager)) {
1382 setsyserr("vatosw: read swpager");
1383 return 0;
1384 }
1385
1386 off += vm_object.paging_offset;
1387
1388 /* Read swap block array */
1389 if (!KREAD((long)swpager.sw_blocks +
1390 (off/dbtob(swpager.sw_bsize)) * sizeof swblock,
1391 &swblock, sizeof swblock)) {
1392 setsyserr("vatosw: read swblock");
1393 return 0;
1394 }
1395 swb->offset = dbtob(swblock.swb_block)+ (off % dbtob(swpager.sw_bsize));
1396 swb->size = dbtob(swpager.sw_bsize) - (off % dbtob(swpager.sw_bsize));
1397 return 1;
1398 }
1399
1400
1401 #define atop(x) (((unsigned)(x)) >> page_shift)
1402 #define vm_page_hash(object, offset) \
1403 (((unsigned)object+(unsigned)atop(offset))&vm_page_hash_mask)
1404
1405 static int
1406 findpage(object, offset, maddr)
1407 long object;
1408 long offset;
1409 vm_offset_t *maddr;
1410 {
1411 static long vm_page_hash_mask;
1412 static long vm_page_buckets;
1413 static long page_shift;
1414 queue_head_t bucket;
1415 struct vm_page mem;
1416 long addr, baddr;
1417
1418 if (vm_page_hash_mask == 0 && !KREAD(nl[X_VM_PAGE_HASH_MASK].n_value,
1419 &vm_page_hash_mask, sizeof (long))) {
1420 seterr("can't read vm_page_hash_mask");
1421 return 0;
1422 }
1423 if (page_shift == 0 && !KREAD(nl[X_PAGE_SHIFT].n_value,
1424 &page_shift, sizeof (long))) {
1425 seterr("can't read page_shift");
1426 return 0;
1427 }
1428 if (vm_page_buckets == 0 && !KREAD(nl[X_VM_PAGE_BUCKETS].n_value,
1429 &vm_page_buckets, sizeof (long))) {
1430 seterr("can't read vm_page_buckets");
1431 return 0;
1432 }
1433
1434 baddr = vm_page_buckets + vm_page_hash(object,offset) * sizeof(queue_head_t);
1435 if (!KREAD(baddr, &bucket, sizeof (bucket))) {
1436 seterr("can't read vm_page_bucket");
1437 return 0;
1438 }
1439
1440 addr = (long)bucket.next;
1441 while (addr != baddr) {
1442 if (!KREAD(addr, &mem, sizeof (mem))) {
1443 seterr("can't read vm_page");
1444 return 0;
1445 }
1446 if ((long)mem.object == object && mem.offset == offset) {
1447 *maddr = (long)mem.phys_addr;
1448 return 1;
1449 }
1450 addr = (long)mem.hashq.next;
1451 }
1452 return 0;
1453 }
1454 #endif /* NEWVM */
1455
1456 #include <varargs.h>
1457 static char errbuf[_POSIX2_LINE_MAX];
1458
1459 static void
1460 seterr(va_alist)
1461 va_dcl
1462 {
1463 char *fmt;
1464 va_list ap;
1465
1466 va_start(ap);
1467 fmt = va_arg(ap, char *);
1468 (void) vsnprintf(errbuf, _POSIX2_LINE_MAX, fmt, ap);
1469 #if DEBUG
1470 (void) vfprintf(stderr, fmt, ap);
1471 #endif
1472 va_end(ap);
1473 }
1474
1475 static void
1476 setsyserr(va_alist)
1477 va_dcl
1478 {
1479 char *fmt, *cp;
1480 va_list ap;
1481 extern int errno;
1482
1483 va_start(ap);
1484 fmt = va_arg(ap, char *);
1485 (void) vsnprintf(errbuf, _POSIX2_LINE_MAX, fmt, ap);
1486 for (cp=errbuf; *cp; cp++)
1487 ;
1488 snprintf(cp, _POSIX2_LINE_MAX - (cp - errbuf), ": %s", strerror(errno));
1489 va_end(ap);
1490 }
1491
1492 char *
1493 kvm_geterr()
1494 {
1495 return (errbuf);
1496 }
1497