kvm.c revision 1.5 1 /*-
2 * Copyright (c) 1989 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * PATCHES MAGIC LEVEL PATCH THAT GOT US HERE
34 * -------------------- ----- ----------------------
35 * CURRENT PATCH LEVEL: 2 00052
36 * -------------------- ----- ----------------------
37 *
38 * 08 Sep 92 Greenman & Kranenburg Change vaddr calc, move bogus #endif
39 * 05 Aug 92 David Greenman Fix kernel namelist db create/use
40 */
41
42 #if defined(LIBC_SCCS) && !defined(lint)
43 static char sccsid[] = "@(#)kvm.c 5.18 (Berkeley) 5/7/91";
44 #endif /* LIBC_SCCS and not lint */
45
46 /*
47 * Updated for 386BSD 0.1 by David Greenman (davidg%implode (at) percy.rain.com)
48 * and Paul Kranenburg (pk (at) cs.few.eur.nl)
49 * 20-Aug-1992
50 */
51
52
53 #include <sys/param.h>
54 #include <sys/user.h>
55 #include <sys/proc.h>
56 #include <sys/ioctl.h>
57 #include <sys/kinfo.h>
58 #include <sys/tty.h>
59 #include <machine/vmparam.h>
60 #include <fcntl.h>
61 #include <nlist.h>
62 #include <kvm.h>
63 #include <ndbm.h>
64 #include <limits.h>
65 #include <paths.h>
66 #include <stdio.h>
67 #include <string.h>
68
69 #ifdef SPPWAIT
70 #define NEWVM
71 #endif
72
73 #ifdef NEWVM
74 #define btop(x) (((unsigned)(x)) >> PGSHIFT) /* XXX */
75 #define ptob(x) ((caddr_t)((x) << PGSHIFT)) /* XXX */
76 #include <vm/vm.h> /* ??? kinfo_proc currently includes this*/
77 #include <vm/vm_page.h>
78 #include <vm/swap_pager.h>
79 #include <sys/kinfo_proc.h>
80 #ifdef hp300
81 #include <hp300/hp300/pte.h>
82 #endif
83 #else /* NEWVM */
84 #include <machine/pte.h>
85 #include <sys/vmmac.h>
86 #include <sys/text.h>
87 #endif /* NEWVM */
88
89 /*
90 * files
91 */
92 static const char *unixf, *memf, *kmemf, *swapf;
93 static int unixx, mem, kmem, swap;
94 static DBM *db;
95 /*
96 * flags
97 */
98 static int deadkernel;
99 static int kvminit = 0;
100 static int kvmfilesopen = 0;
101 /*
102 * state
103 */
104 static struct kinfo_proc *kvmprocbase, *kvmprocptr;
105 static int kvmnprocs;
106 /*
107 * u. buffer
108 */
109 static union {
110 struct user user;
111 char upages[UPAGES][NBPG];
112 } user;
113
114 #ifdef NEWVM
115 struct swapblk {
116 long offset; /* offset in swap device */
117 long size; /* remaining size of block in swap device */
118 };
119 #endif
120 /*
121 * random other stuff
122 */
123 #ifndef NEWVM
124 static struct pte *Usrptmap, *usrpt;
125 static struct pte *Sysmap;
126 static int Syssize;
127 #endif
128 static int dmmin, dmmax;
129 static int pcbpf;
130 static int argaddr0; /* XXX */
131 static int argaddr1;
132 static int swaddr;
133 static int nswap;
134 static char *tmp;
135 #if defined(hp300)
136 static int lowram;
137 static struct ste *Sysseg;
138 #endif
139 #if defined(i386)
140 static struct pde *PTD;
141 #endif
142
143 #define basename(cp) ((tmp=rindex((cp), '/')) ? tmp+1 : (cp))
144 #define MAXSYMSIZE 256
145
146 #if defined(hp300)
147 #define pftoc(f) ((f) - lowram)
148 #define iskva(v) (1)
149 #endif
150
151 #ifndef pftoc
152 #define pftoc(f) (f)
153 #endif
154 #ifndef iskva
155 #define iskva(v) ((u_long)(v) & KERNBASE)
156 #endif
157
158 static struct nlist nl[] = {
159 { "_Usrptmap" },
160 #define X_USRPTMAP 0
161 { "_usrpt" },
162 #define X_USRPT 1
163 { "_nswap" },
164 #define X_NSWAP 2
165 { "_dmmin" },
166 #define X_DMMIN 3
167 { "_dmmax" },
168 #define X_DMMAX 4
169 { "_vm_page_buckets" },
170 #define X_VM_PAGE_BUCKETS 5
171 { "_vm_page_hash_mask" },
172 #define X_VM_PAGE_HASH_MASK 6
173 { "_page_shift" },
174 #define X_PAGE_SHIFT 7
175 /*
176 * everything here and down, only if a dead kernel
177 */
178 { "_Sysmap" },
179 #define X_SYSMAP 8
180 #define X_DEADKERNEL X_SYSMAP
181 { "_Syssize" },
182 #define X_SYSSIZE 9
183 { "_allproc" },
184 #define X_ALLPROC 10
185 { "_zombproc" },
186 #define X_ZOMBPROC 11
187 { "_nproc" },
188 #define X_NPROC 12
189 #define X_LAST 12
190 #if defined(hp300)
191 { "_Sysseg" },
192 #define X_SYSSEG (X_LAST+1)
193 { "_lowram" },
194 #define X_LOWRAM (X_LAST+2)
195 #endif
196 #if defined(i386)
197 { "_IdlePTD" },
198 #define X_IdlePTD (X_LAST+1)
199 #endif
200 { "" },
201 };
202
203 static off_t Vtophys();
204 static void klseek(), seterr(), setsyserr(), vstodb();
205 static int getkvars(), kvm_doprocs(), kvm_init();
206 #ifdef NEWVM
207 static int vatosw();
208 static int findpage();
209 #endif
210
211 /*
212 * returns 0 if files were opened now,
213 * 1 if files were already opened,
214 * -1 if files could not be opened.
215 */
216 kvm_openfiles(uf, mf, sf)
217 const char *uf, *mf, *sf;
218 {
219 if (kvmfilesopen)
220 return (1);
221 unixx = mem = kmem = swap = -1;
222 unixf = (uf == NULL) ? _PATH_UNIX : uf;
223 memf = (mf == NULL) ? _PATH_MEM : mf;
224
225 if ((unixx = open(unixf, O_RDONLY, 0)) == -1) {
226 setsyserr("can't open %s", unixf);
227 goto failed;
228 }
229 if ((mem = open(memf, O_RDONLY, 0)) == -1) {
230 setsyserr("can't open %s", memf);
231 goto failed;
232 }
233 if (sf != NULL)
234 swapf = sf;
235 if (mf != NULL) {
236 deadkernel++;
237 kmemf = mf;
238 kmem = mem;
239 swap = -1;
240 } else {
241 kmemf = _PATH_KMEM;
242 if ((kmem = open(kmemf, O_RDONLY, 0)) == -1) {
243 setsyserr("can't open %s", kmemf);
244 goto failed;
245 }
246 swapf = (sf == NULL) ? _PATH_DRUM : sf;
247 /*
248 * live kernel - avoid looking up nlist entries
249 * past X_DEADKERNEL.
250 */
251 nl[X_DEADKERNEL].n_name = "";
252 }
253 if (swapf != NULL && ((swap = open(swapf, O_RDONLY, 0)) == -1)) {
254 seterr("can't open %s", swapf);
255 goto failed;
256 }
257 kvmfilesopen++;
258 if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1) /*XXX*/
259 return (-1);
260 return (0);
261 failed:
262 kvm_close();
263 return (-1);
264 }
265
266 static
267 kvm_init(uf, mf, sf)
268 char *uf, *mf, *sf;
269 {
270 if (kvmfilesopen == 0 && kvm_openfiles(NULL, NULL, NULL) == -1)
271 return (-1);
272 if (getkvars() == -1)
273 return (-1);
274 kvminit = 1;
275
276 return (0);
277 }
278
279 kvm_close()
280 {
281 if (unixx != -1) {
282 close(unixx);
283 unixx = -1;
284 }
285 if (kmem != -1) {
286 if (kmem != mem)
287 close(kmem);
288 /* otherwise kmem is a copy of mem, and will be closed below */
289 kmem = -1;
290 }
291 if (mem != -1) {
292 close(mem);
293 mem = -1;
294 }
295 if (swap != -1) {
296 close(swap);
297 swap = -1;
298 }
299 if (db != NULL) {
300 dbm_close(db);
301 db = NULL;
302 }
303 kvminit = 0;
304 kvmfilesopen = 0;
305 deadkernel = 0;
306 #ifndef NEWVM
307 if (Sysmap) {
308 free(Sysmap);
309 Sysmap = NULL;
310 }
311 #endif
312 }
313
314 kvm_nlist(nl)
315 struct nlist *nl;
316 {
317 datum key, data;
318 char dbname[MAXPATHLEN];
319 char dbversion[_POSIX2_LINE_MAX];
320 char kversion[_POSIX2_LINE_MAX];
321 int dbversionlen;
322 char symbuf[MAXSYMSIZE];
323 struct nlist nbuf, *n;
324 int num, did;
325
326 if (kvmfilesopen == 0 && kvm_openfiles(NULL, NULL, NULL) == -1)
327 return (-1);
328 if (deadkernel)
329 goto hard2;
330 /*
331 * initialize key datum
332 */
333 key.dptr = symbuf;
334
335 if (db != NULL)
336 goto win; /* off to the races */
337 /*
338 * open database
339 */
340 sprintf(dbname, "%s/kvm_%s", _PATH_VARRUN, basename(unixf));
341 if ((db = dbm_open(dbname, O_RDONLY, 0)) == NULL)
342 goto hard2;
343 /*
344 * read version out of database
345 */
346 bcopy("VERSION", symbuf, sizeof ("VERSION")-1);
347 key.dsize = (sizeof ("VERSION") - 1);
348 data = dbm_fetch(db, key);
349 if (data.dptr == NULL)
350 goto hard1;
351 bcopy(data.dptr, dbversion, data.dsize);
352 dbversionlen = data.dsize;
353 /*
354 * read version string from kernel memory
355 */
356 bcopy("_version", symbuf, sizeof ("_version")-1);
357 key.dsize = (sizeof ("_version")-1);
358 data = dbm_fetch(db, key);
359 if (data.dptr == NULL)
360 goto hard1;
361 if (data.dsize != sizeof (struct nlist))
362 goto hard1;
363 bcopy(data.dptr, &nbuf, sizeof (struct nlist));
364 lseek(kmem, nbuf.n_value, 0);
365 if (read(kmem, kversion, dbversionlen) != dbversionlen)
366 goto hard1;
367 /*
368 * if they match, we win - otherwise do it the hard way
369 */
370 if (bcmp(dbversion, kversion, dbversionlen) != 0)
371 goto hard1;
372 /*
373 * getem from the database.
374 */
375 win:
376 num = did = 0;
377 for (n = nl; n->n_name && n->n_name[0]; n++, num++) {
378 int len;
379 /*
380 * clear out fields from users buffer
381 */
382 n->n_type = 0;
383 n->n_other = 0;
384 n->n_desc = 0;
385 n->n_value = 0;
386 /*
387 * query db
388 */
389 if ((len = strlen(n->n_name)) > MAXSYMSIZE) {
390 seterr("symbol too large");
391 return (-1);
392 }
393 (void)strcpy(symbuf, n->n_name);
394 key.dsize = len;
395 data = dbm_fetch(db, key);
396 if (data.dptr == NULL || data.dsize != sizeof (struct nlist))
397 continue;
398 bcopy(data.dptr, &nbuf, sizeof (struct nlist));
399 n->n_value = nbuf.n_value;
400 n->n_type = nbuf.n_type;
401 n->n_desc = nbuf.n_desc;
402 n->n_other = nbuf.n_other;
403 did++;
404 }
405 return (num - did);
406 hard1:
407 dbm_close(db);
408 db = NULL;
409 hard2:
410 num = nlist(unixf, nl);
411 if (num == -1)
412 seterr("nlist (hard way) failed");
413 return (num);
414 }
415
416 kvm_getprocs(what, arg)
417 int what, arg;
418 {
419 static int ocopysize = -1;
420
421 if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1)
422 return (NULL);
423 if (!deadkernel) {
424 int ret, copysize;
425
426 if ((ret = getkerninfo(what, NULL, NULL, arg)) == -1) {
427 setsyserr("can't get estimate for kerninfo");
428 return (-1);
429 }
430 copysize = ret;
431 if (copysize > ocopysize || !kvmprocbase) {
432 if (ocopysize == -1 || !kvmprocbase)
433 kvmprocbase =
434 (struct kinfo_proc *)malloc(copysize);
435 else
436 kvmprocbase =
437 (struct kinfo_proc *)realloc(kvmprocbase,
438 copysize);
439 if (!kvmprocbase) {
440 seterr("out of memory");
441 return (-1);
442 }
443 }
444 ocopysize = copysize;
445 if ((ret = getkerninfo(what, kvmprocbase, ©size,
446 arg)) == -1) {
447 setsyserr("can't get proc list");
448 return (-1);
449 }
450 if (copysize % sizeof (struct kinfo_proc)) {
451 seterr("proc size mismatch (got %d total, kinfo_proc: %d)",
452 copysize, sizeof (struct kinfo_proc));
453 return (-1);
454 }
455 kvmnprocs = copysize / sizeof (struct kinfo_proc);
456 } else {
457 int nproc;
458
459 if (kvm_read((void *) nl[X_NPROC].n_value, &nproc,
460 sizeof (int)) != sizeof (int)) {
461 seterr("can't read nproc");
462 return (-1);
463 }
464 if ((kvmprocbase = (struct kinfo_proc *)
465 malloc(nproc * sizeof (struct kinfo_proc))) == NULL) {
466 seterr("out of memory (addr: %x nproc = %d)",
467 nl[X_NPROC].n_value, nproc);
468 return (-1);
469 }
470 kvmnprocs = kvm_doprocs(what, arg, kvmprocbase);
471 realloc(kvmprocbase, kvmnprocs * sizeof (struct kinfo_proc));
472 }
473 kvmprocptr = kvmprocbase;
474
475 return (kvmnprocs);
476 }
477
478 /*
479 * XXX - should NOT give up so easily - especially since the kernel
480 * may be corrupt (it died). Should gather as much information as possible.
481 * Follows proc ptrs instead of reading table since table may go
482 * away soon.
483 */
484 static
485 kvm_doprocs(what, arg, buff)
486 int what, arg;
487 char *buff;
488 {
489 struct proc *p, proc;
490 register char *bp = buff;
491 int i = 0;
492 int doingzomb = 0;
493 struct eproc eproc;
494 struct pgrp pgrp;
495 struct session sess;
496 struct tty tty;
497 #ifndef NEWVM
498 struct text text;
499 #endif
500
501 /* allproc */
502 if (kvm_read((void *) nl[X_ALLPROC].n_value, &p,
503 sizeof (struct proc *)) != sizeof (struct proc *)) {
504 seterr("can't read allproc");
505 return (-1);
506 }
507
508 again:
509 for (; p; p = proc.p_nxt) {
510 if (kvm_read(p, &proc, sizeof (struct proc)) !=
511 sizeof (struct proc)) {
512 seterr("can't read proc at %x", p);
513 return (-1);
514 }
515 #ifdef NEWVM
516 if (kvm_read(proc.p_cred, &eproc.e_pcred,
517 sizeof (struct pcred)) == sizeof (struct pcred))
518 (void) kvm_read(eproc.e_pcred.pc_ucred, &eproc.e_ucred,
519 sizeof (struct ucred));
520 switch(ki_op(what)) {
521
522 case KINFO_PROC_PID:
523 if (proc.p_pid != (pid_t)arg)
524 continue;
525 break;
526
527
528 case KINFO_PROC_UID:
529 if (eproc.e_ucred.cr_uid != (uid_t)arg)
530 continue;
531 break;
532
533 case KINFO_PROC_RUID:
534 if (eproc.e_pcred.p_ruid != (uid_t)arg)
535 continue;
536 break;
537 }
538 #else
539 switch(ki_op(what)) {
540
541 case KINFO_PROC_PID:
542 if (proc.p_pid != (pid_t)arg)
543 continue;
544 break;
545
546
547 case KINFO_PROC_UID:
548 if (proc.p_uid != (uid_t)arg)
549 continue;
550 break;
551
552 case KINFO_PROC_RUID:
553 if (proc.p_ruid != (uid_t)arg)
554 continue;
555 break;
556 }
557 #endif
558 /*
559 * gather eproc
560 */
561 eproc.e_paddr = p;
562 if (kvm_read(proc.p_pgrp, &pgrp, sizeof (struct pgrp)) !=
563 sizeof (struct pgrp)) {
564 seterr("can't read pgrp at %x", proc.p_pgrp);
565 return (-1);
566 }
567 eproc.e_sess = pgrp.pg_session;
568 eproc.e_pgid = pgrp.pg_id;
569 eproc.e_jobc = pgrp.pg_jobc;
570 if (kvm_read(pgrp.pg_session, &sess, sizeof (struct session))
571 != sizeof (struct session)) {
572 seterr("can't read session at %x", pgrp.pg_session);
573 return (-1);
574 }
575 if ((proc.p_flag&SCTTY) && sess.s_ttyp != NULL) {
576 if (kvm_read(sess.s_ttyp, &tty, sizeof (struct tty))
577 != sizeof (struct tty)) {
578 seterr("can't read tty at %x", sess.s_ttyp);
579 return (-1);
580 }
581 eproc.e_tdev = tty.t_dev;
582 eproc.e_tsess = tty.t_session;
583 if (tty.t_pgrp != NULL) {
584 if (kvm_read(tty.t_pgrp, &pgrp, sizeof (struct
585 pgrp)) != sizeof (struct pgrp)) {
586 seterr("can't read tpgrp at &x",
587 tty.t_pgrp);
588 return (-1);
589 }
590 eproc.e_tpgid = pgrp.pg_id;
591 } else
592 eproc.e_tpgid = -1;
593 } else
594 eproc.e_tdev = NODEV;
595 if (proc.p_wmesg)
596 kvm_read(proc.p_wmesg, eproc.e_wmesg, WMESGLEN);
597 #ifdef NEWVM
598 (void) kvm_read(proc.p_vmspace, &eproc.e_vm,
599 sizeof (struct vmspace));
600 eproc.e_xsize = eproc.e_xrssize =
601 eproc.e_xccount = eproc.e_xswrss = 0;
602 #else
603 if (proc.p_textp) {
604 kvm_read(proc.p_textp, &text, sizeof (text));
605 eproc.e_xsize = text.x_size;
606 eproc.e_xrssize = text.x_rssize;
607 eproc.e_xccount = text.x_ccount;
608 eproc.e_xswrss = text.x_swrss;
609 } else {
610 eproc.e_xsize = eproc.e_xrssize =
611 eproc.e_xccount = eproc.e_xswrss = 0;
612 }
613 #endif
614
615 switch(ki_op(what)) {
616
617 case KINFO_PROC_PGRP:
618 if (eproc.e_pgid != (pid_t)arg)
619 continue;
620 break;
621
622 case KINFO_PROC_TTY:
623 if ((proc.p_flag&SCTTY) == 0 ||
624 eproc.e_tdev != (dev_t)arg)
625 continue;
626 break;
627 }
628
629 i++;
630 bcopy(&proc, bp, sizeof (struct proc));
631 bp += sizeof (struct proc);
632 bcopy(&eproc, bp, sizeof (struct eproc));
633 bp+= sizeof (struct eproc);
634 }
635 if (!doingzomb) {
636 /* zombproc */
637 if (kvm_read((void *) nl[X_ZOMBPROC].n_value, &p,
638 sizeof (struct proc *)) != sizeof (struct proc *)) {
639 seterr("can't read zombproc");
640 return (-1);
641 }
642 doingzomb = 1;
643 goto again;
644 }
645
646 return (i);
647 }
648
649 struct proc *
650 kvm_nextproc()
651 {
652
653 if (!kvmprocbase && kvm_getprocs(0, 0) == -1)
654 return (NULL);
655 if (kvmprocptr >= (kvmprocbase + kvmnprocs)) {
656 seterr("end of proc list");
657 return (NULL);
658 }
659 return((struct proc *)(kvmprocptr++));
660 }
661
662 struct eproc *
663 kvm_geteproc(p)
664 const struct proc *p;
665 {
666 return ((struct eproc *)(((char *)p) + sizeof (struct proc)));
667 }
668
669 kvm_setproc()
670 {
671 kvmprocptr = kvmprocbase;
672 }
673
674 kvm_freeprocs()
675 {
676
677 if (kvmprocbase) {
678 free(kvmprocbase);
679 kvmprocbase = NULL;
680 }
681 }
682
683 #ifdef i386
684 /* See also ./sys/kern/kern_execve.c */
685 #define ARGSIZE (roundup(ARG_MAX, NBPG))
686 #endif
687
688 #ifdef NEWVM
689 struct user *
690 kvm_getu(p)
691 const struct proc *p;
692 {
693 register struct kinfo_proc *kp = (struct kinfo_proc *)p;
694 register int i;
695 register char *up;
696 u_int vaddr;
697 struct swapblk swb;
698
699 if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1)
700 return (NULL);
701 if (p->p_stat == SZOMB) {
702 seterr("zombie process");
703 return (NULL);
704 }
705
706 argaddr0 = argaddr1 = swaddr = 0;
707 if ((p->p_flag & SLOAD) == 0) {
708 vm_offset_t maddr;
709
710 if (swap < 0) {
711 seterr("no swap");
712 return (NULL);
713 }
714 /*
715 * Costly operation, better set enable_swap to zero
716 * in vm/vm_glue.c, since paging of user pages isn't
717 * done yet anyway.
718 */
719 if (vatosw(p, USRSTACK + i * NBPG, &maddr, &swb) == 0)
720 return NULL;
721
722 if (maddr == 0 && swb.size < UPAGES * NBPG)
723 return NULL;
724
725 for (i = 0; i < UPAGES; i++) {
726 if (maddr) {
727 (void) lseek(mem, maddr + i * NBPG, 0);
728 if (read(mem,
729 (char *)user.upages[i], NBPG) != NBPG) {
730 seterr(
731 "can't read u for pid %d from %s",
732 p->p_pid, swapf);
733 return NULL;
734 }
735 } else {
736 (void) lseek(swap, swb.offset + i * NBPG, 0);
737 if (read(swap,
738 (char *)user.upages[i], NBPG) != NBPG) {
739 seterr(
740 "can't read u for pid %d from %s",
741 p->p_pid, swapf);
742 return NULL;
743 }
744 }
745 }
746 return(&user.user);
747 }
748 /*
749 * Read u-area one page at a time for the benefit of post-mortems
750 */
751 up = (char *) p->p_addr;
752 for (i = 0; i < UPAGES; i++) {
753 klseek(kmem, (long)up, 0);
754 if (read(kmem, user.upages[i], CLBYTES) != CLBYTES) {
755 seterr("cant read page %x of u of pid %d from %s",
756 up, p->p_pid, kmemf);
757 return(NULL);
758 }
759 up += CLBYTES;
760 }
761 pcbpf = (int) btop(p->p_addr); /* what should this be really? */
762 /*
763 * Conjure up a physical address for the arguments.
764 */
765 #ifdef hp300
766 if (kp->kp_eproc.e_vm.vm_pmap.pm_ptab) {
767 struct pte pte[CLSIZE*2];
768
769 klseek(kmem,
770 (long)&kp->kp_eproc.e_vm.vm_pmap.pm_ptab
771 [btoc(USRSTACK-CLBYTES*2)], 0);
772 if (read(kmem, (char *)&pte, sizeof(pte)) == sizeof(pte)) {
773 #if CLBYTES < 2048
774 argaddr0 = ctob(pftoc(pte[CLSIZE*0].pg_pfnum));
775 #endif
776 argaddr1 = ctob(pftoc(pte[CLSIZE*1].pg_pfnum));
777 }
778 }
779 #endif
780 kp->kp_eproc.e_vm.vm_rssize =
781 kp->kp_eproc.e_vm.vm_pmap.pm_stats.resident_count; /* XXX */
782
783 vaddr = (u_int)kp->kp_eproc.e_vm.vm_maxsaddr + MAXSSIZ - ARGSIZE;
784
785 #ifdef i386
786 if (kp->kp_eproc.e_vm.vm_pmap.pm_pdir) {
787 struct pde pde;
788
789 klseek(kmem,
790 (long)(&kp->kp_eproc.e_vm.vm_pmap.pm_pdir[pdei(vaddr)]), 0);
791
792 if (read(kmem, (char *)&pde, sizeof pde) == sizeof pde
793 && pde.pd_v) {
794
795 struct pte pte;
796
797 if (lseek(mem, (long)ctob(pde.pd_pfnum) +
798 (ptei(vaddr) * sizeof pte), 0) == -1)
799 seterr("kvm_getu: lseek");
800 if (read(mem, (char *)&pte, sizeof pte) == sizeof pte) {
801 if (pte.pg_v) {
802 argaddr1 = (long)ctob(pte.pg_pfnum);
803 } else {
804 goto hard;
805 }
806 } else {
807 seterr("kvm_getu: read");
808 }
809 } else {
810 goto hard;
811 }
812 }
813 #endif /* i386 */
814
815 hard:
816 if (vatosw(p, vaddr, &argaddr1, &swb)) {
817 if (argaddr1 == 0 && swb.size >= ARGSIZE)
818 swaddr = swb.offset;
819 }
820
821 return(&user.user);
822 }
823 #else
824 struct user *
825 kvm_getu(p)
826 const struct proc *p;
827 {
828 struct pte *pteaddr, apte;
829 struct pte arguutl[HIGHPAGES+(CLSIZE*2)];
830 register int i;
831 int ncl;
832
833 if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1)
834 return (NULL);
835 if (p->p_stat == SZOMB) {
836 seterr("zombie process");
837 return (NULL);
838 }
839 if ((p->p_flag & SLOAD) == 0) {
840 if (swap < 0) {
841 seterr("no swap");
842 return (NULL);
843 }
844 (void) lseek(swap, (long)dtob(p->p_swaddr), 0);
845 if (read(swap, (char *)&user.user, sizeof (struct user)) !=
846 sizeof (struct user)) {
847 seterr("can't read u for pid %d from %s",
848 p->p_pid, swapf);
849 return (NULL);
850 }
851 pcbpf = 0;
852 argaddr0 = 0;
853 argaddr1 = 0;
854 return (&user.user);
855 }
856 pteaddr = &Usrptmap[btokmx(p->p_p0br) + p->p_szpt - 1];
857 klseek(kmem, (long)pteaddr, 0);
858 if (read(kmem, (char *)&apte, sizeof(apte)) != sizeof(apte)) {
859 seterr("can't read indir pte to get u for pid %d from %s",
860 p->p_pid, kmemf);
861 return (NULL);
862 }
863 lseek(mem, (long)ctob(pftoc(apte.pg_pfnum+1)) - sizeof(arguutl), 0);
864 if (read(mem, (char *)arguutl, sizeof(arguutl)) != sizeof(arguutl)) {
865 seterr("can't read page table for u of pid %d from %s",
866 p->p_pid, memf);
867 return (NULL);
868 }
869 if (arguutl[0].pg_fod == 0 && arguutl[0].pg_pfnum)
870 argaddr0 = ctob(pftoc(arguutl[0].pg_pfnum));
871 else
872 argaddr0 = 0;
873 if (arguutl[CLSIZE*1].pg_fod == 0 && arguutl[CLSIZE*1].pg_pfnum)
874 argaddr1 = ctob(pftoc(arguutl[CLSIZE*1].pg_pfnum));
875 else
876 argaddr1 = 0;
877 pcbpf = arguutl[CLSIZE*2].pg_pfnum;
878 ncl = (sizeof (struct user) + CLBYTES - 1) / CLBYTES;
879 while (--ncl >= 0) {
880 i = ncl * CLSIZE;
881 lseek(mem,
882 (long)ctob(pftoc(arguutl[(CLSIZE*2)+i].pg_pfnum)), 0);
883 if (read(mem, user.upages[i], CLBYTES) != CLBYTES) {
884 seterr("can't read page %d of u of pid %d from %s",
885 arguutl[(CLSIZE*2)+i].pg_pfnum, p->p_pid, memf);
886 return(NULL);
887 }
888 }
889 return (&user.user);
890 }
891 #endif
892
893 char *
894 kvm_getargs(p, up)
895 const struct proc *p;
896 const struct user *up;
897 {
898 #ifdef i386
899 /* See also ./sys/kern/kern_execve.c */
900 static char cmdbuf[ARGSIZE];
901 static union {
902 char argc[ARGSIZE];
903 int argi[ARGSIZE/sizeof (int)];
904 } argspac;
905 #else
906 static char cmdbuf[CLBYTES*2];
907 static union {
908 char argc[CLBYTES*2];
909 int argi[CLBYTES*2/sizeof (int)];
910 } argspac;
911 #endif
912 register char *cp;
913 register int *ip;
914 char c;
915 int nbad;
916 #ifndef NEWVM
917 struct dblock db;
918 #endif
919 const char *file;
920 int stkoff = 0;
921
922 #if defined(NEWVM) && defined(hp300)
923 stkoff = 20; /* XXX for sigcode */
924 #endif
925 if (up == NULL || p->p_pid == 0 || p->p_pid == 2)
926 goto retucomm;
927 if ((p->p_flag & SLOAD) == 0 || argaddr1 == 0) {
928 #ifdef NEWVM
929 if (swaddr == 0)
930 goto retucomm; /* XXX for now */
931 #ifdef i386
932 (void) lseek(swap, swaddr, 0);
933 if (read(swap, &argspac.argc[0], ARGSIZE) != ARGSIZE)
934 goto bad;
935 #else
936 if (argaddr0) {
937 lseek(swap, (long)argaddr0, 0);
938 if (read(swap, (char *)&argspac, CLBYTES) != CLBYTES)
939 goto bad;
940 } else
941 bzero(&argspac, CLBYTES);
942 lseek(swap, (long)argaddr1, 0);
943 if (read(swap, &argspac.argc[CLBYTES], CLBYTES) != CLBYTES)
944 goto bad;
945 #endif
946 #else
947 if (swap < 0 || p->p_ssize == 0)
948 goto retucomm;
949 vstodb(0, CLSIZE, &up->u_smap, &db, 1);
950 (void) lseek(swap, (long)dtob(db.db_base), 0);
951 if (read(swap, (char *)&argspac.argc[CLBYTES], CLBYTES)
952 != CLBYTES)
953 goto bad;
954 vstodb(1, CLSIZE, &up->u_smap, &db, 1);
955 (void) lseek(swap, (long)dtob(db.db_base), 0);
956 if (read(swap, (char *)&argspac.argc[0], CLBYTES) != CLBYTES)
957 goto bad;
958 file = swapf;
959 #endif
960 } else {
961 #ifdef i386
962 lseek(mem, (long)argaddr1, 0);
963 if (read(mem, &argspac.argc[0], ARGSIZE) != ARGSIZE)
964 goto bad;
965 #else
966 if (argaddr0) {
967 lseek(mem, (long)argaddr0, 0);
968 if (read(mem, (char *)&argspac, CLBYTES) != CLBYTES)
969 goto bad;
970 } else
971 bzero(&argspac, CLBYTES);
972 lseek(mem, (long)argaddr1, 0);
973 if (read(mem, &argspac.argc[CLBYTES], CLBYTES) != CLBYTES)
974 goto bad;
975 #endif
976 file = (char *) memf;
977 }
978
979 nbad = 0;
980 #ifdef i386
981 ip = &argspac.argi[(ARGSIZE-ARG_MAX)/sizeof (int)];
982
983 for (cp = (char *)ip; cp < &argspac.argc[ARGSIZE-stkoff]; cp++) {
984 #else
985 ip = &argspac.argi[CLBYTES*2/sizeof (int)];
986 ip -= 2; /* last arg word and .long 0 */
987 ip -= stkoff / sizeof (int);
988 while (*--ip) {
989 if (ip == argspac.argi)
990 goto retucomm;
991 }
992 *(char *)ip = ' ';
993 ip++;
994
995 for (cp = (char *)ip; cp < &argspac.argc[CLBYTES*2-stkoff]; cp++) {
996 #endif
997 c = *cp;
998 if (c == 0) { /* convert null between arguments to space */
999 *cp = ' ';
1000 if (*(cp+1) == 0) break; /* if null argument follows then no more args */
1001 }
1002 else if (c < ' ' || c > 0176) {
1003 if (++nbad >= 5*(0+1)) { /* eflg -> 0 XXX */ /* limit number of bad chars to 5 */
1004 *cp++ = '?';
1005 break;
1006 }
1007 *cp = '?';
1008 }
1009 else if (0 == 0 && c == '=') { /* eflg -> 0 XXX */
1010 while (*--cp != ' ')
1011 if (cp <= (char *)ip)
1012 break;
1013 break;
1014 }
1015 }
1016 *cp = 0;
1017 while (*--cp == ' ')
1018 *cp = 0;
1019 cp = (char *)ip;
1020 (void) strcpy(cmdbuf, cp);
1021 if (cp[0] == '-' || cp[0] == '?' || cp[0] <= ' ') {
1022 (void) strcat(cmdbuf, " (");
1023 (void) strncat(cmdbuf, p->p_comm, sizeof(p->p_comm));
1024 (void) strcat(cmdbuf, ")");
1025 }
1026 return (cmdbuf);
1027
1028 bad:
1029 seterr("error locating command name for pid %d from %s",
1030 p->p_pid, file);
1031 retucomm:
1032 (void) strcpy(cmdbuf, " (");
1033 (void) strncat(cmdbuf, p->p_comm, sizeof (p->p_comm));
1034 (void) strcat(cmdbuf, ")");
1035 return (cmdbuf);
1036 }
1037
1038
1039 static
1040 getkvars()
1041 {
1042 if (kvm_nlist(nl) == -1)
1043 return (-1);
1044 if (deadkernel) {
1045 /* We must do the sys map first because klseek uses it */
1046 long addr;
1047
1048 #ifndef NEWVM
1049 Syssize = nl[X_SYSSIZE].n_value;
1050 Sysmap = (struct pte *)
1051 calloc((unsigned) Syssize, sizeof (struct pte));
1052 if (Sysmap == NULL) {
1053 seterr("out of space for Sysmap");
1054 return (-1);
1055 }
1056 addr = (long) nl[X_SYSMAP].n_value;
1057 addr &= ~KERNBASE;
1058 (void) lseek(kmem, addr, 0);
1059 if (read(kmem, (char *) Sysmap, Syssize * sizeof (struct pte))
1060 != Syssize * sizeof (struct pte)) {
1061 seterr("can't read Sysmap");
1062 return (-1);
1063 }
1064 #endif
1065 #if defined(hp300)
1066 addr = (long) nl[X_LOWRAM].n_value;
1067 (void) lseek(kmem, addr, 0);
1068 if (read(kmem, (char *) &lowram, sizeof (lowram))
1069 != sizeof (lowram)) {
1070 seterr("can't read lowram");
1071 return (-1);
1072 }
1073 lowram = btop(lowram);
1074 Sysseg = (struct ste *) malloc(NBPG);
1075 if (Sysseg == NULL) {
1076 seterr("out of space for Sysseg");
1077 return (-1);
1078 }
1079 addr = (long) nl[X_SYSSEG].n_value;
1080 (void) lseek(kmem, addr, 0);
1081 read(kmem, (char *)&addr, sizeof(addr));
1082 (void) lseek(kmem, (long)addr, 0);
1083 if (read(kmem, (char *) Sysseg, NBPG) != NBPG) {
1084 seterr("can't read Sysseg");
1085 return (-1);
1086 }
1087 #endif
1088 #if defined(i386)
1089 PTD = (struct pde *) malloc(NBPG);
1090 if (PTD == NULL) {
1091 seterr("out of space for PTD");
1092 return (-1);
1093 }
1094 addr = (long) nl[X_IdlePTD].n_value;
1095 (void) lseek(kmem, addr, 0);
1096 read(kmem, (char *)&addr, sizeof(addr));
1097 (void) lseek(kmem, (long)addr, 0);
1098 if (read(kmem, (char *) PTD, NBPG) != NBPG) {
1099 seterr("can't read PTD");
1100 return (-1);
1101 }
1102 #endif
1103 }
1104 #ifndef NEWVM
1105 usrpt = (struct pte *)nl[X_USRPT].n_value;
1106 Usrptmap = (struct pte *)nl[X_USRPTMAP].n_value;
1107 #endif
1108 if (kvm_read((void *) nl[X_NSWAP].n_value, &nswap, sizeof (long)) !=
1109 sizeof (long)) {
1110 seterr("can't read nswap");
1111 return (-1);
1112 }
1113 if (kvm_read((void *) nl[X_DMMIN].n_value, &dmmin, sizeof (long)) !=
1114 sizeof (long)) {
1115 seterr("can't read dmmin");
1116 return (-1);
1117 }
1118 if (kvm_read((void *) nl[X_DMMAX].n_value, &dmmax, sizeof (long)) !=
1119 sizeof (long)) {
1120 seterr("can't read dmmax");
1121 return (-1);
1122 }
1123 return (0);
1124 }
1125
1126 kvm_read(loc, buf, len)
1127 void *loc;
1128 void *buf;
1129 {
1130 if (kvmfilesopen == 0 && kvm_openfiles(NULL, NULL, NULL) == -1)
1131 return (-1);
1132 if (iskva(loc)) {
1133 klseek(kmem, (off_t) loc, 0);
1134 if (read(kmem, buf, len) != len) {
1135 seterr("error reading kmem at %x", loc);
1136 return (-1);
1137 }
1138 } else {
1139 lseek(mem, (off_t) loc, 0);
1140 if (read(mem, buf, len) != len) {
1141 seterr("error reading mem at %x", loc);
1142 return (-1);
1143 }
1144 }
1145 return (len);
1146 }
1147
1148 static void
1149 klseek(fd, loc, off)
1150 int fd;
1151 off_t loc;
1152 int off;
1153 {
1154
1155 if (deadkernel) {
1156 if ((loc = Vtophys(loc)) == -1)
1157 return;
1158 }
1159 (void) lseek(fd, (off_t)loc, off);
1160 }
1161
1162 #ifndef NEWVM
1163 /*
1164 * Given a base/size pair in virtual swap area,
1165 * return a physical base/size pair which is the
1166 * (largest) initial, physically contiguous block.
1167 */
1168 static void
1169 vstodb(vsbase, vssize, dmp, dbp, rev)
1170 register int vsbase;
1171 int vssize;
1172 struct dmap *dmp;
1173 register struct dblock *dbp;
1174 {
1175 register int blk = dmmin;
1176 register swblk_t *ip = dmp->dm_map;
1177
1178 vsbase = ctod(vsbase);
1179 vssize = ctod(vssize);
1180 if (vsbase < 0 || vsbase + vssize > dmp->dm_size)
1181 /*panic("vstodb")*/;
1182 while (vsbase >= blk) {
1183 vsbase -= blk;
1184 if (blk < dmmax)
1185 blk *= 2;
1186 ip++;
1187 }
1188 if (*ip <= 0 || *ip + blk > nswap)
1189 /*panic("vstodb")*/;
1190 dbp->db_size = MIN(vssize, blk - vsbase);
1191 dbp->db_base = *ip + (rev ? blk - (vsbase + dbp->db_size) : vsbase);
1192 }
1193 #endif
1194
1195 #ifdef NEWVM
1196 static off_t
1197 Vtophys(loc)
1198 u_long loc;
1199 {
1200 off_t newloc = (off_t) -1;
1201 #ifdef hp300
1202 int p, ste, pte;
1203
1204 ste = *(int *)&Sysseg[loc >> SG_ISHIFT];
1205 if ((ste & SG_V) == 0) {
1206 seterr("vtophys: segment not valid");
1207 return((off_t) -1);
1208 }
1209 p = btop(loc & SG_PMASK);
1210 newloc = (ste & SG_FRAME) + (p * sizeof(struct pte));
1211 (void) lseek(kmem, (long)(newloc-(off_t)ptob(lowram)), 0);
1212 if (read(kmem, (char *)&pte, sizeof pte) != sizeof pte) {
1213 seterr("vtophys: cannot locate pte");
1214 return((off_t) -1);
1215 }
1216 newloc = pte & PG_FRAME;
1217 if (pte == PG_NV || newloc < (off_t)ptob(lowram)) {
1218 seterr("vtophys: page not valid");
1219 return((off_t) -1);
1220 }
1221 newloc = (newloc - (off_t)ptob(lowram)) + (loc & PGOFSET);
1222 #endif
1223 #ifdef i386
1224 struct pde pde;
1225 struct pte pte;
1226 int p;
1227
1228 pde = PTD[loc >> PD_SHIFT];
1229 if (pde.pd_v == 0) {
1230 seterr("vtophys: page directory entry not valid");
1231 return((off_t) -1);
1232 }
1233 p = btop(loc & PT_MASK);
1234 newloc = pde.pd_pfnum + (p * sizeof(struct pte));
1235 (void) lseek(kmem, (long)newloc, 0);
1236 if (read(kmem, (char *)&pte, sizeof pte) != sizeof pte) {
1237 seterr("vtophys: cannot obtain desired pte");
1238 return((off_t) -1);
1239 }
1240 newloc = pte.pg_pfnum;
1241 if (pte.pg_v == 0) {
1242 seterr("vtophys: page table entry not valid");
1243 return((off_t) -1);
1244 }
1245 newloc += (loc & PGOFSET);
1246 #endif
1247 return((off_t) newloc);
1248 }
1249 #else
1250 static off_t
1251 vtophys(loc)
1252 long loc;
1253 {
1254 int p;
1255 off_t newloc;
1256 register struct pte *pte;
1257
1258 newloc = loc & ~KERNBASE;
1259 p = btop(newloc);
1260 #if defined(vax) || defined(tahoe)
1261 if ((loc & KERNBASE) == 0) {
1262 seterr("vtophys: translating non-kernel address");
1263 return((off_t) -1);
1264 }
1265 #endif
1266 if (p >= Syssize) {
1267 seterr("vtophys: page out of bound (%d>=%d)", p, Syssize);
1268 return((off_t) -1);
1269 }
1270 pte = &Sysmap[p];
1271 if (pte->pg_v == 0 && (pte->pg_fod || pte->pg_pfnum == 0)) {
1272 seterr("vtophys: page not valid");
1273 return((off_t) -1);
1274 }
1275 #if defined(hp300)
1276 if (pte->pg_pfnum < lowram) {
1277 seterr("vtophys: non-RAM page (%d<%d)", pte->pg_pfnum, lowram);
1278 return((off_t) -1);
1279 }
1280 #endif
1281 loc = (long) (ptob(pftoc(pte->pg_pfnum)) + (loc & PGOFSET));
1282 return(loc);
1283 }
1284 #endif
1285
1286
1287 #ifdef NEWVM
1288 /*
1289 * locate address of unwired or swapped page
1290 */
1291
1292 #define DEBUG 0
1293
1294 #define KREAD(off, addr, len) \
1295 (kvm_read((void *)(off), (char *)(addr), (len)) == (len))
1296
1297
1298 static int
1299 vatosw(p, vaddr, maddr, swb)
1300 struct proc *p ;
1301 vm_offset_t vaddr;
1302 vm_offset_t *maddr;
1303 struct swapblk *swb;
1304 {
1305 register struct kinfo_proc *kp = (struct kinfo_proc *)p;
1306 vm_map_t mp = &kp->kp_eproc.e_vm.vm_map;
1307 struct vm_object vm_object;
1308 struct vm_map_entry vm_entry;
1309 struct pager_struct pager;
1310 struct swpager swpager;
1311 struct swblock swblock;
1312 long addr, off;
1313 int i;
1314
1315 if (p->p_pid == 0 || p->p_pid == 2)
1316 return 0;
1317
1318 addr = (long)mp->header.next;
1319 for (i = 0; i < mp->nentries; i++) {
1320 /* Weed through map entries until vaddr in range */
1321 if (!KREAD(addr, &vm_entry, sizeof(vm_entry))) {
1322 setsyserr("vatosw: read vm_map_entry");
1323 return 0;
1324 }
1325 if ((vaddr >= vm_entry.start) && (vaddr <= vm_entry.end) &&
1326 (vm_entry.object.vm_object != 0))
1327 break;
1328
1329 addr = (long)vm_entry.next;
1330 }
1331 if (i == mp->nentries) {
1332 seterr("%u: map not found\n", p->p_pid);
1333 return 0;
1334 }
1335
1336 if (vm_entry.is_a_map || vm_entry.is_sub_map) {
1337 seterr("%u: Is a map\n", p->p_pid);
1338 return 0;
1339 }
1340
1341 /* Locate memory object */
1342 off = (vaddr - vm_entry.start) + vm_entry.offset;
1343 addr = (long)vm_entry.object.vm_object;
1344 while (1) {
1345 if (!KREAD(addr, &vm_object, sizeof vm_object)) {
1346 setsyserr("vatosw: read vm_object");
1347 return 0;
1348 }
1349
1350 #if DEBUG
1351 fprintf(stderr, "%u: find page: object %#x offset %x\n",
1352 p->p_pid, addr, off);
1353 #endif
1354
1355 /* Lookup in page queue */
1356 if (findpage(addr, off, maddr))
1357 return 1;
1358
1359 if (vm_object.shadow == 0)
1360 break;
1361
1362 #if DEBUG
1363 fprintf(stderr, "%u: shadow obj at %x: offset %x+%x\n",
1364 p->p_pid, addr, off, vm_object.shadow_offset);
1365 #endif
1366
1367 addr = (long)vm_object.shadow;
1368 off += vm_object.shadow_offset;
1369 }
1370
1371 if (!vm_object.pager) {
1372 seterr("%u: no pager\n", p->p_pid);
1373 return 0;
1374 }
1375
1376 /* Find address in swap space */
1377 if (!KREAD(vm_object.pager, &pager, sizeof pager)) {
1378 setsyserr("vatosw: read pager");
1379 return 0;
1380 }
1381 if (pager.pg_type != PG_SWAP) {
1382 seterr("%u: weird pager\n", p->p_pid);
1383 return 0;
1384 }
1385
1386 /* Get swap pager data */
1387 if (!KREAD(pager.pg_data, &swpager, sizeof swpager)) {
1388 setsyserr("vatosw: read swpager");
1389 return 0;
1390 }
1391
1392 off += vm_object.paging_offset;
1393
1394 /* Read swap block array */
1395 if (!KREAD((long)swpager.sw_blocks +
1396 (off/dbtob(swpager.sw_bsize)) * sizeof swblock,
1397 &swblock, sizeof swblock)) {
1398 setsyserr("vatosw: read swblock");
1399 return 0;
1400 }
1401 swb->offset = dbtob(swblock.swb_block)+ (off % dbtob(swpager.sw_bsize));
1402 swb->size = dbtob(swpager.sw_bsize) - (off % dbtob(swpager.sw_bsize));
1403 return 1;
1404 }
1405
1406
1407 #define atop(x) (((unsigned)(x)) >> page_shift)
1408 #define vm_page_hash(object, offset) \
1409 (((unsigned)object+(unsigned)atop(offset))&vm_page_hash_mask)
1410
1411 static int
1412 findpage(object, offset, maddr)
1413 long object;
1414 long offset;
1415 vm_offset_t *maddr;
1416 {
1417 static long vm_page_hash_mask;
1418 static long vm_page_buckets;
1419 static long page_shift;
1420 queue_head_t bucket;
1421 struct vm_page mem;
1422 long addr, baddr;
1423
1424 if (vm_page_hash_mask == 0 && !KREAD(nl[X_VM_PAGE_HASH_MASK].n_value,
1425 &vm_page_hash_mask, sizeof (long))) {
1426 seterr("can't read vm_page_hash_mask");
1427 return 0;
1428 }
1429 if (page_shift == 0 && !KREAD(nl[X_PAGE_SHIFT].n_value,
1430 &page_shift, sizeof (long))) {
1431 seterr("can't read page_shift");
1432 return 0;
1433 }
1434 if (vm_page_buckets == 0 && !KREAD(nl[X_VM_PAGE_BUCKETS].n_value,
1435 &vm_page_buckets, sizeof (long))) {
1436 seterr("can't read vm_page_buckets");
1437 return 0;
1438 }
1439
1440 baddr = vm_page_buckets + vm_page_hash(object,offset) * sizeof(queue_head_t);
1441 if (!KREAD(baddr, &bucket, sizeof (bucket))) {
1442 seterr("can't read vm_page_bucket");
1443 return 0;
1444 }
1445
1446 addr = (long)bucket.next;
1447 while (addr != baddr) {
1448 if (!KREAD(addr, &mem, sizeof (mem))) {
1449 seterr("can't read vm_page");
1450 return 0;
1451 }
1452 if ((long)mem.object == object && mem.offset == offset) {
1453 *maddr = (long)mem.phys_addr;
1454 return 1;
1455 }
1456 addr = (long)mem.hashq.next;
1457 }
1458 return 0;
1459 }
1460 #endif /* NEWVM */
1461
1462 #include <varargs.h>
1463 static char errbuf[_POSIX2_LINE_MAX];
1464
1465 static void
1466 seterr(va_alist)
1467 va_dcl
1468 {
1469 char *fmt;
1470 va_list ap;
1471
1472 va_start(ap);
1473 fmt = va_arg(ap, char *);
1474 (void) vsnprintf(errbuf, _POSIX2_LINE_MAX, fmt, ap);
1475 #if DEBUG
1476 (void) vfprintf(stderr, fmt, ap);
1477 #endif
1478 va_end(ap);
1479 }
1480
1481 static void
1482 setsyserr(va_alist)
1483 va_dcl
1484 {
1485 char *fmt, *cp;
1486 va_list ap;
1487 extern int errno;
1488
1489 va_start(ap);
1490 fmt = va_arg(ap, char *);
1491 (void) vsnprintf(errbuf, _POSIX2_LINE_MAX, fmt, ap);
1492 for (cp=errbuf; *cp; cp++)
1493 ;
1494 snprintf(cp, _POSIX2_LINE_MAX - (cp - errbuf), ": %s", strerror(errno));
1495 va_end(ap);
1496 }
1497
1498 char *
1499 kvm_geterr()
1500 {
1501 return (errbuf);
1502 }
1503