Home | History | Annotate | Line # | Download | only in libkvm
kvm.c revision 1.27
      1 /*-
      2  * Copyright (c) 1994 Charles Hannum.
      3  * Copyright (c) 1993 Christopher G. Demetriou
      4  * Copyright (c) 1989 The Regents of the University of California.
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. All advertising materials mentioning features or use of this software
     16  *    must display the following acknowledgement:
     17  *	This product includes software developed by the University of
     18  *	California, Berkeley and its contributors.
     19  * 4. Neither the name of the University nor the names of its contributors
     20  *    may be used to endorse or promote products derived from this software
     21  *    without specific prior written permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     33  * SUCH DAMAGE.
     34  */
     35 
     36 #if defined(LIBC_SCCS) && !defined(lint)
     37 /*static char sccsid[] = "from: @(#)kvm.c	5.18 (Berkeley) 5/7/91";*/
     38 static char rcsid[] = "$Id: kvm.c,v 1.27 1994/03/01 22:14:13 phil Exp $";
     39 #endif /* LIBC_SCCS and not lint */
     40 
     41 #include <sys/param.h>
     42 #include <sys/user.h>
     43 #include <sys/proc.h>
     44 #include <sys/ioctl.h>
     45 #include <sys/kinfo.h>
     46 #include <sys/tty.h>
     47 #include <sys/exec.h>
     48 #include <machine/vmparam.h>
     49 #include <fcntl.h>
     50 #include <nlist.h>
     51 #include <kvm.h>
     52 #include <ndbm.h>
     53 #include <limits.h>
     54 #include <paths.h>
     55 #include <stdio.h>
     56 #include <string.h>
     57 
     58 #define	btop(x)		(((unsigned)(x)) >> PGSHIFT)	/* XXX */
     59 #define	ptob(x)		((caddr_t)((x) << PGSHIFT))	/* XXX */
     60 #include <vm/vm.h>	/* ??? kinfo_proc currently includes this*/
     61 #include <vm/vm_page.h>
     62 #include <vm/swap_pager.h>
     63 #include <sys/kinfo_proc.h>
     64 #if defined(m68k)
     65 #include <machine/pte.h>
     66 #define	btos(x)		(((unsigned)(x)) >> SEGSHIFT)	/* XXX */
     67 #endif
     68 
     69 /*
     70  * files
     71  */
     72 static	const char *unixf, *memf, *kmemf, *swapf;
     73 static	int unixx, mem, kmem, swap;
     74 static	DBM *db;
     75 /*
     76  * flags
     77  */
     78 static	int deadkernel;
     79 static	int kvminit = 0;
     80 static	int kvmfilesopen = 0;
     81 /*
     82  * state
     83  */
     84 static	struct kinfo_proc *kvmprocbase, *kvmprocptr;
     85 static	int kvmnprocs;
     86 /*
     87  * u. buffer
     88  */
     89 static union {
     90 	struct	user user;
     91 	char	upages[UPAGES][NBPG];
     92 } user;
     93 
     94 struct swapblk {
     95 	long	offset;		/* offset in swap device */
     96 	long	size;		/* remaining size of block in swap device */
     97 };
     98 
     99 /*
    100  * random other stuff
    101  */
    102 static	int	dmmin, dmmax;
    103 static	int	pcbpf;
    104 static	int	nswap;
    105 static	long	vm_page_hash_mask;
    106 static	long	vm_page_buckets;
    107 static	long	page_shift;
    108 static	char	*tmp;
    109 #if defined(m68k)
    110 #if defined(amiga)
    111 static  int	cpu040;
    112 #endif
    113 static	int	lowram;
    114 static	struct ste *Sysseg;
    115 #endif
    116 #if defined(i386)
    117 static	struct pde *PTD;
    118 #endif
    119 
    120 #define atop(x)		(((unsigned)(x)) >> page_shift)
    121 #define vm_page_hash(object, offset) \
    122         (((unsigned)object+(unsigned)atop(offset))&vm_page_hash_mask)
    123 
    124 #define basename(cp)	((tmp=rindex((cp), '/')) ? tmp+1 : (cp))
    125 #define	MAXSYMSIZE	256
    126 
    127 static struct nlist nl[] = {
    128 	{ "_Usrptmap" },
    129 #define	X_USRPTMAP	0
    130 	{ "_usrpt" },
    131 #define	X_USRPT		1
    132 	{ "_nswap" },
    133 #define	X_NSWAP		2
    134 	{ "_dmmin" },
    135 #define	X_DMMIN		3
    136 	{ "_dmmax" },
    137 #define	X_DMMAX		4
    138 	{ "_vm_page_buckets" },
    139 #define X_VM_PAGE_BUCKETS	5
    140 	{ "_vm_page_hash_mask" },
    141 #define X_VM_PAGE_HASH_MASK	6
    142 	{ "_page_shift" },
    143 #define X_PAGE_SHIFT	7
    144 
    145 #if defined(m68k)
    146 #define	X_DEADKERNEL	8
    147 #endif
    148 
    149 #if defined(i386)
    150 #define	X_DEADKERNEL	8
    151 #endif
    152 
    153 #if defined(ns32k)
    154 #define	X_DEADKERNEL	8
    155 #endif
    156 
    157 #if defined(sparc)
    158 	{ "_pmap_dtos" },
    159 #define	X_PMAP_DTOS	8
    160 #define	X_DEADKERNEL	9
    161 #endif
    162 
    163 	/*
    164 	 * everything here and down, only if a dead kernel
    165 	 */
    166 	{ "_Sysmap" },
    167 #define	X_SYSMAP	(X_DEADKERNEL + 0)
    168 	{ "_Syssize" },
    169 #define	X_SYSSIZE	(X_DEADKERNEL + 1)
    170 	{ "_allproc" },
    171 #define X_ALLPROC	(X_DEADKERNEL + 2)
    172 	{ "_zombproc" },
    173 #define X_ZOMBPROC	(X_DEADKERNEL + 3)
    174 	{ "_nproc" },
    175 #define	X_NPROC		(X_DEADKERNEL + 4)
    176 #define	X_LAST		(X_DEADKERNEL + 5)
    177 
    178 #if defined(m68k)
    179 	{ "_Sysseg" },
    180 #define	X_SYSSEG	(X_LAST+0)
    181 	{ "_lowram" },
    182 #define	X_LOWRAM	(X_LAST+1)
    183 #if defined(amiga)
    184 	{ "_cpu040" },
    185 #define X_CPU040	(X_LAST+2)
    186 #endif
    187 #endif
    188 
    189 #if defined(i386)
    190 	{ "_IdlePTD" },
    191 #define	X_IdlePTD	(X_LAST+0)
    192 #endif
    193 
    194 	{ "" },
    195 };
    196 
    197 static off_t Vtophys();
    198 static void klseek(), seterr(), setsyserr(), vstodb();
    199 static int getkvars(), kvm_doprocs(), kvm_init();
    200 static int vatosw();
    201 static int pager_get();
    202 static int findpage();
    203 #if defined(sparc)
    204 static vm_offset_t phys2realphys();
    205 #endif
    206 
    207 /*
    208  * returns 	0 if files were opened now,
    209  * 		1 if files were already opened,
    210  *		-1 if files could not be opened.
    211  */
    212 kvm_openfiles(uf, mf, sf)
    213 	const char *uf, *mf, *sf;
    214 {
    215 	if (kvmfilesopen)
    216 		return (1);
    217 	unixx = mem = kmem = swap = -1;
    218 	unixf = (uf == NULL) ? _PATH_UNIX : uf;
    219 	memf = (mf == NULL) ? _PATH_MEM : mf;
    220 
    221 	if ((unixx = open(unixf, O_RDONLY, 0)) == -1) {
    222 		setsyserr("can't open %s", unixf);
    223 		goto failed;
    224 	}
    225 	if ((mem = open(memf, O_RDONLY, 0)) == -1) {
    226 		setsyserr("can't open %s", memf);
    227 		goto failed;
    228 	}
    229 	if (sf != NULL)
    230 		swapf = sf;
    231 	if (mf != NULL) {
    232 		deadkernel++;
    233 		kmemf = mf;
    234 		kmem = mem;
    235 		swap = -1;
    236 	} else {
    237 		kmemf = _PATH_KMEM;
    238 		if ((kmem = open(kmemf, O_RDONLY, 0)) == -1) {
    239 			setsyserr("can't open %s", kmemf);
    240 			goto failed;
    241 		}
    242 		swapf = (sf == NULL) ?  _PATH_DRUM : sf;
    243 		/*
    244 		 * live kernel - avoid looking up nlist entries
    245 		 * past X_DEADKERNEL.
    246 		 */
    247 		nl[X_DEADKERNEL].n_name = "";
    248 	}
    249 	if (swapf != NULL && ((swap = open(swapf, O_RDONLY, 0)) == -1)) {
    250 		seterr("can't open %s", swapf);
    251 		goto failed;
    252 	}
    253 	kvmfilesopen++;
    254 	if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1) /*XXX*/
    255 		return (-1);
    256 	return (0);
    257 failed:
    258 	kvm_close();
    259 	return (-1);
    260 }
    261 
    262 static
    263 kvm_init(uf, mf, sf)
    264 	char *uf, *mf, *sf;
    265 {
    266 	if (kvmfilesopen == 0 && kvm_openfiles(NULL, NULL, NULL) == -1)
    267 		return (-1);
    268 	if (getkvars() == -1)
    269 		return (-1);
    270 	kvminit = 1;
    271 
    272 	return (0);
    273 }
    274 
    275 kvm_close()
    276 {
    277 	if (unixx != -1) {
    278 		close(unixx);
    279 		unixx = -1;
    280 	}
    281 	if (kmem != -1) {
    282 		if (kmem != mem)
    283 			close(kmem);
    284 		/* otherwise kmem is a copy of mem, and will be closed below */
    285 		kmem = -1;
    286 	}
    287 	if (mem != -1) {
    288 		close(mem);
    289 		mem = -1;
    290 	}
    291 	if (swap != -1) {
    292 		close(swap);
    293 		swap = -1;
    294 	}
    295 	if (db != NULL) {
    296 		dbm_close(db);
    297 		db = NULL;
    298 	}
    299 	kvminit = 0;
    300 	kvmfilesopen = 0;
    301 	deadkernel = 0;
    302 }
    303 
    304 kvm_nlist(nl)
    305 	struct nlist *nl;
    306 {
    307 	datum key, data;
    308 	char dbname[MAXPATHLEN];
    309 	char dbversion[_POSIX2_LINE_MAX];
    310 	char kversion[_POSIX2_LINE_MAX];
    311 	int dbversionlen;
    312 	char symbuf[MAXSYMSIZE];
    313 	struct nlist nbuf, *n;
    314 	int num, did;
    315 
    316 	if (kvmfilesopen == 0 && kvm_openfiles(NULL, NULL, NULL) == -1)
    317 		return (-1);
    318 	if (deadkernel)
    319 		goto hard2;
    320 	/*
    321 	 * initialize key datum
    322 	 */
    323 	key.dptr = symbuf;
    324 
    325 	if (db != NULL)
    326 		goto win;	/* off to the races */
    327 	/*
    328 	 * open database
    329 	 */
    330 	sprintf(dbname, "%s/kvm_%s", _PATH_VARRUN, basename(unixf));
    331 	if ((db = dbm_open(dbname, O_RDONLY, 0)) == NULL)
    332 		goto hard2;
    333 	/*
    334 	 * read version out of database
    335 	 */
    336 	bcopy("VERSION", symbuf, sizeof ("VERSION")-1);
    337 	key.dsize = (sizeof ("VERSION") - 1);
    338 	data = dbm_fetch(db, key);
    339 	if (data.dptr == NULL)
    340 		goto hard1;
    341 	bcopy(data.dptr, dbversion, data.dsize);
    342 	dbversionlen = data.dsize;
    343 	/*
    344 	 * read version string from kernel memory
    345 	 */
    346 	bcopy("_version", symbuf, sizeof ("_version")-1);
    347 	key.dsize = (sizeof ("_version")-1);
    348 	data = dbm_fetch(db, key);
    349 	if (data.dptr == NULL)
    350 		goto hard1;
    351 	if (data.dsize != sizeof (struct nlist))
    352 		goto hard1;
    353 	bcopy(data.dptr, &nbuf, sizeof (struct nlist));
    354 	lseek(kmem, nbuf.n_value, 0);
    355 	if (read(kmem, kversion, dbversionlen) != dbversionlen)
    356 		goto hard1;
    357 	/*
    358 	 * if they match, we win - otherwise do it the hard way
    359 	 */
    360 	if (bcmp(dbversion, kversion, dbversionlen) != 0)
    361 		goto hard1;
    362 	/*
    363 	 * getem from the database.
    364 	 */
    365 win:
    366 	num = did = 0;
    367 	for (n = nl; n->n_name && n->n_name[0]; n++, num++) {
    368 		int len;
    369 		/*
    370 		 * clear out fields from users buffer
    371 		 */
    372 		n->n_type = 0;
    373 		n->n_other = 0;
    374 		n->n_desc = 0;
    375 		n->n_value = 0;
    376 		/*
    377 		 * query db
    378 		 */
    379 		if ((len = strlen(n->n_name)) > MAXSYMSIZE) {
    380 			seterr("symbol too large");
    381 			return (-1);
    382 		}
    383 		(void)strcpy(symbuf, n->n_name);
    384 		key.dsize = len;
    385 		data = dbm_fetch(db, key);
    386 		if (data.dptr == NULL || data.dsize != sizeof (struct nlist))
    387 			continue;
    388 		bcopy(data.dptr, &nbuf, sizeof (struct nlist));
    389 		n->n_value = nbuf.n_value;
    390 		n->n_type = nbuf.n_type;
    391 		n->n_desc = nbuf.n_desc;
    392 		n->n_other = nbuf.n_other;
    393 		did++;
    394 	}
    395 	return (num - did);
    396 hard1:
    397 	dbm_close(db);
    398 	db = NULL;
    399 hard2:
    400 	num = nlist(unixf, nl);
    401 	if (num == -1)
    402 		seterr("nlist (hard way) failed");
    403 	return (num);
    404 }
    405 
    406 kvm_getprocs(what, arg)
    407 	int what, arg;
    408 {
    409 	static int	ocopysize = -1;
    410 
    411 	if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1)
    412 		return (NULL);
    413 	if (!deadkernel) {
    414 		int ret, copysize;
    415 
    416 		if ((ret = getkerninfo(what, NULL, NULL, arg)) == -1) {
    417 			setsyserr("can't get estimate for kerninfo");
    418 			return (-1);
    419 		}
    420 		copysize = ret;
    421 		if (copysize > ocopysize || !kvmprocbase) {
    422 			if (ocopysize == -1 || !kvmprocbase)
    423 				kvmprocbase =
    424 					(struct kinfo_proc *)malloc(copysize);
    425 			else
    426 				kvmprocbase =
    427 					(struct kinfo_proc *)realloc(kvmprocbase,
    428 								copysize);
    429 			if (!kvmprocbase) {
    430 				seterr("out of memory");
    431 				return (-1);
    432 			}
    433 		}
    434 		ocopysize = copysize;
    435 		if ((ret = getkerninfo(what, kvmprocbase, &copysize,
    436 		     arg)) == -1) {
    437 			setsyserr("can't get proc list");
    438 			return (-1);
    439 		}
    440 		if (copysize % sizeof (struct kinfo_proc)) {
    441 			seterr("proc size mismatch (got %d total, kinfo_proc: %d)",
    442 				copysize, sizeof (struct kinfo_proc));
    443 			return (-1);
    444 		}
    445 		kvmnprocs = copysize / sizeof (struct kinfo_proc);
    446 	} else {
    447 		int nproc;
    448 
    449 		if (kvm_read((void *) nl[X_NPROC].n_value, &nproc,
    450 		    sizeof (int)) == -1) {
    451 			seterr("can't read nproc");
    452 			return (-1);
    453 		}
    454 		if ((kvmprocbase = (struct kinfo_proc *)
    455 		     malloc(nproc * sizeof (struct kinfo_proc))) == NULL) {
    456 			seterr("out of memory (addr: %x nproc = %d)",
    457 				nl[X_NPROC].n_value, nproc);
    458 			return (-1);
    459 		}
    460 		kvmnprocs = kvm_doprocs(what, arg, kvmprocbase);
    461 		realloc(kvmprocbase, kvmnprocs * sizeof (struct kinfo_proc));
    462 	}
    463 	kvmprocptr = kvmprocbase;
    464 
    465 	return (kvmnprocs);
    466 }
    467 
    468 /*
    469  * XXX - should NOT give up so easily - especially since the kernel
    470  * may be corrupt (it died).  Should gather as much information as possible.
    471  * Follows proc ptrs instead of reading table since table may go
    472  * away soon.
    473  */
    474 static
    475 kvm_doprocs(what, arg, buff)
    476 	int what, arg;
    477 	char *buff;
    478 {
    479 	struct proc *p, proc;
    480 	register char *bp = buff;
    481 	int i = 0;
    482 	int doingzomb = 0;
    483 	struct eproc eproc;
    484 	struct pgrp pgrp;
    485 	struct session sess;
    486 	struct tty tty;
    487 
    488 	/* allproc */
    489 	if (kvm_read((void *) nl[X_ALLPROC].n_value, &p,
    490 	    sizeof (struct proc *)) == -1) {
    491 		seterr("can't read allproc");
    492 		return (-1);
    493 	}
    494 
    495 again:
    496 	for (; p; p = proc.p_nxt) {
    497 		if (kvm_read(p, &proc, sizeof (struct proc)) == -1) {
    498 			seterr("can't read proc at %x", p);
    499 			return (-1);
    500 		}
    501 		if (kvm_read(proc.p_cred, &eproc.e_pcred,
    502 		    sizeof (struct pcred)) != -1)
    503 			(void) kvm_read(eproc.e_pcred.pc_ucred, &eproc.e_ucred,
    504 			    sizeof (struct ucred));
    505 
    506 		switch(ki_op(what)) {
    507 
    508 		case KINFO_PROC_PID:
    509 			if (proc.p_pid != (pid_t)arg)
    510 				continue;
    511 			break;
    512 
    513 
    514 		case KINFO_PROC_UID:
    515 			if (eproc.e_ucred.cr_uid != (uid_t)arg)
    516 				continue;
    517 			break;
    518 
    519 		case KINFO_PROC_RUID:
    520 			if (eproc.e_pcred.p_ruid != (uid_t)arg)
    521 				continue;
    522 			break;
    523 		}
    524 		/*
    525 		 * gather eproc
    526 		 */
    527 		eproc.e_paddr = p;
    528 		if (kvm_read(proc.p_pgrp, &pgrp, sizeof (struct pgrp)) == -1) {
    529 			seterr("can't read pgrp at %x", proc.p_pgrp);
    530 			return (-1);
    531 		}
    532 		eproc.e_sess = pgrp.pg_session;
    533 		eproc.e_pgid = pgrp.pg_id;
    534 		eproc.e_jobc = pgrp.pg_jobc;
    535 		if (kvm_read(pgrp.pg_session, &sess, sizeof (struct session))
    536 		    == -1) {
    537 			seterr("can't read session at %x", pgrp.pg_session);
    538 			return (-1);
    539 		}
    540 		if ((proc.p_flag&SCTTY) && sess.s_ttyp != NULL) {
    541 			if (kvm_read(sess.s_ttyp, &tty, sizeof (struct tty))
    542 			    == -1) {
    543 				seterr("can't read tty at %x", sess.s_ttyp);
    544 				return (-1);
    545 			}
    546 			eproc.e_tdev = tty.t_dev;
    547 			eproc.e_tsess = tty.t_session;
    548 			if (tty.t_pgrp != NULL) {
    549 				if (kvm_read(tty.t_pgrp, &pgrp, sizeof (struct
    550 				    pgrp)) == -1) {
    551 					seterr("can't read tpgrp at &x",
    552 						tty.t_pgrp);
    553 					return (-1);
    554 				}
    555 				eproc.e_tpgid = pgrp.pg_id;
    556 			} else
    557 				eproc.e_tpgid = -1;
    558 		} else
    559 			eproc.e_tdev = NODEV;
    560 		if (proc.p_wmesg)
    561 			(void) kvm_read(proc.p_wmesg, eproc.e_wmesg, WMESGLEN);
    562 		(void) kvm_read(proc.p_vmspace, &eproc.e_vm,
    563 		    sizeof (struct vmspace));
    564 		eproc.e_xsize = eproc.e_xrssize =
    565 			eproc.e_xccount = eproc.e_xswrss = 0;
    566 
    567 		switch(ki_op(what)) {
    568 
    569 		case KINFO_PROC_PGRP:
    570 			if (eproc.e_pgid != (pid_t)arg)
    571 				continue;
    572 			break;
    573 
    574 		case KINFO_PROC_TTY:
    575 			if ((proc.p_flag&SCTTY) == 0 ||
    576 			     eproc.e_tdev != (dev_t)arg)
    577 				continue;
    578 			break;
    579 		}
    580 
    581 		i++;
    582 		bcopy(&proc, bp, sizeof (struct proc));
    583 		bp += sizeof (struct proc);
    584 		bcopy(&eproc, bp, sizeof (struct eproc));
    585 		bp+= sizeof (struct eproc);
    586 	}
    587 	if (!doingzomb) {
    588 		/* zombproc */
    589 		if (kvm_read((void *) nl[X_ZOMBPROC].n_value, &p,
    590 		    sizeof (struct proc *)) == -1) {
    591 			seterr("can't read zombproc");
    592 			return (-1);
    593 		}
    594 		doingzomb = 1;
    595 		goto again;
    596 	}
    597 
    598 	return (i);
    599 }
    600 
    601 struct proc *
    602 kvm_nextproc()
    603 {
    604 
    605 	if (!kvmprocbase && kvm_getprocs(0, 0) == -1)
    606 		return (NULL);
    607 	if (kvmprocptr >= (kvmprocbase + kvmnprocs)) {
    608 		seterr("end of proc list");
    609 		return (NULL);
    610 	}
    611 	return((struct proc *)(kvmprocptr++));
    612 }
    613 
    614 struct eproc *
    615 kvm_geteproc(p)
    616 	const struct proc *p;
    617 {
    618 	return ((struct eproc *)(((char *)p) + sizeof (struct proc)));
    619 }
    620 
    621 kvm_setproc()
    622 {
    623 	kvmprocptr = kvmprocbase;
    624 }
    625 
    626 kvm_freeprocs()
    627 {
    628 
    629 	if (kvmprocbase) {
    630 		free(kvmprocbase);
    631 		kvmprocbase = NULL;
    632 	}
    633 }
    634 
    635 struct user *
    636 kvm_getu(p)
    637 	const struct proc *p;
    638 {
    639 	register struct kinfo_proc *kp = (struct kinfo_proc *)p;
    640 	register int i;
    641 	register char *up;
    642 	u_int vaddr;
    643 	struct swapblk swb;
    644 
    645 	if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1)
    646 		return (NULL);
    647 	if (p->p_stat == SZOMB) {
    648 		seterr("zombie process");
    649 		return (NULL);
    650 	}
    651 
    652 	if ((p->p_flag & SLOAD) == 0) {
    653 		vm_offset_t	maddr;
    654 
    655 		if (swap < 0) {
    656 			seterr("no swap");
    657 			return (NULL);
    658 		}
    659 		/*
    660 		 * Costly operation, better set enable_swap to zero
    661 		 * in vm/vm_glue.c, since paging of user pages isn't
    662 		 * done yet anyway.
    663 		 */
    664 		if (vatosw(&kp->kp_eproc.e_vm.vm_map, USRSTACK + i * NBPG,
    665 			   &maddr, &swb) == 0)
    666 			return NULL;
    667 
    668 		if (maddr == 0 && swb.size < UPAGES * NBPG)
    669 			return NULL;
    670 
    671 		for (i = 0; i < UPAGES; i++) {
    672 			if (maddr) {
    673 				(void) lseek(mem, maddr + i * NBPG, 0);
    674 				if (read(mem,
    675 				    (char *)user.upages[i], NBPG) != NBPG) {
    676 					setsyserr(
    677 					    "can't read u for pid %d from %s",
    678 					    p->p_pid, swapf);
    679 					return NULL;
    680 				}
    681 			} else {
    682 				(void) lseek(swap, swb.offset + i * NBPG, 0);
    683 				if (read(swap,
    684 				    (char *)user.upages[i], NBPG) != NBPG) {
    685 					setsyserr(
    686 					    "can't read u for pid %d from %s",
    687 					    p->p_pid, swapf);
    688 					return NULL;
    689 				}
    690 			}
    691 		}
    692 		return(&user.user);
    693 	}
    694 	/*
    695 	 * Read u-area one page at a time for the benefit of post-mortems
    696 	 */
    697 	up = (char *) p->p_addr;
    698 	for (i = 0; i < UPAGES; i++) {
    699 		klseek(kmem, (long)up, 0);
    700 		if (read(kmem, user.upages[i], CLBYTES) != CLBYTES) {
    701 			setsyserr("cant read page %x of u of pid %d from %s",
    702 			    up, p->p_pid, kmemf);
    703 			return(NULL);
    704 		}
    705 		up += CLBYTES;
    706 	}
    707 	pcbpf = (int) btop(p->p_addr);	/* what should this be really? */
    708 
    709 	return(&user.user);
    710 }
    711 
    712 int
    713 kvm_procread(p, addr, buf, len)
    714 	const struct proc *p;
    715 	const unsigned addr;
    716 	unsigned len;
    717 	char *buf;
    718 {
    719 	register struct kinfo_proc *kp = (struct kinfo_proc *) p;
    720 	struct swapblk swb;
    721 	vm_offset_t swaddr = 0, memaddr = 0;
    722 	unsigned real_len;
    723 	static int last_pid = -1;
    724 	static vm_offset_t last_addr;
    725 	static char bouncebuf[CLBYTES];
    726 
    727 	real_len = len < (CLBYTES - (addr & CLOFSET)) ? len : (CLBYTES - (addr & CLOFSET));
    728 
    729 	if (p->p_pid != last_pid || last_addr != (addr & ~CLOFSET)) {
    730         	if (vatosw(&kp->kp_eproc.e_vm.vm_map, addr & ~CLOFSET, &memaddr,
    731 		    &swb) == 0)
    732 			return 0;
    733 
    734 		if (memaddr) {
    735 #if defined(sparc)
    736 			memaddr = phys2realphys(memaddr);
    737 #endif
    738 			if (lseek(mem, memaddr, 0) == -1) {
    739 				setsyserr("kvm_procread: lseek mem");
    740 				return 0;
    741 			}
    742 			len = read(mem, bouncebuf, CLBYTES);
    743 			if (len == -1 || len < CLBYTES) {
    744 				last_pid = -1;
    745 				setsyserr("kvm_procread: read mem");
    746 				return 0;
    747 			}
    748 		} else {
    749 			swaddr = swb.offset;
    750 			if (lseek(swap, swaddr, 0) == -1) {
    751 				setsyserr("kvm_procread: lseek swap");
    752 				return 0;
    753 			}
    754 			len = read(swap, bouncebuf, CLBYTES);
    755 			if (len == -1 || len < CLBYTES) {
    756 				last_pid = -1;
    757 				setsyserr("kvm_procread: read swap");
    758 				return 0;
    759 			}
    760 		}
    761 	}
    762 
    763 	memcpy(buf, &bouncebuf[addr & CLOFSET], real_len);
    764 	last_pid = p->p_pid;
    765 	last_addr = addr & ~CLOFSET;
    766 	return real_len;
    767 }
    768 
    769 int
    770 kvm_procreadstr(p, addr, buf, len)
    771         const struct proc *p;
    772         const unsigned addr;
    773 	char *buf;
    774 	unsigned len;
    775 {
    776 	int	done, little;
    777 	char	copy[200], *pb;
    778 	char	a;
    779 
    780 	done = 0;
    781 	copy[0] = '\0';
    782 	while (len) {
    783 		little = kvm_procread(p, addr+done, copy, MIN(len, sizeof copy));
    784 		if (little<1)
    785 			break;
    786 		pb = copy;
    787 		while (little--) {
    788 			len--;
    789 			if( (*buf++ = *pb++) == '\0' )
    790 				return done;
    791 			done++;
    792 		}
    793 	}
    794 	return done;
    795 }
    796 
    797 char *
    798 kvm_getargs(p, up)
    799 	const struct proc *p;
    800 	const struct user *up;
    801 {
    802 	static char *cmdbuf = NULL, ucomm[sizeof(p->p_comm) + 4];
    803 	register char *cp, *acp;
    804 	int left, rv;
    805 	struct ps_strings arginfo;
    806 
    807 	if (cmdbuf == NULL) {
    808 		cmdbuf = (char *)malloc(ARG_MAX + sizeof(p->p_comm) + 5);
    809 		if (cmdbuf == NULL)
    810 			cmdbuf = ucomm;
    811 	}
    812 
    813 	if (cmdbuf == ucomm || up == NULL || p->p_pid == 0 || p->p_pid == 2)
    814 		goto retucomm;
    815 
    816 	if (kvm_procread(p, PS_STRINGS, (char *)&arginfo, sizeof(arginfo)) !=
    817 		sizeof(arginfo))
    818 		goto bad;
    819 
    820 	cmdbuf[0] = '\0';
    821 	cp = cmdbuf;
    822 	acp = arginfo.ps_argvstr;
    823 	left = ARG_MAX + 1;
    824 	while (arginfo.ps_nargvstr--) {
    825 		if ((rv = kvm_procreadstr(p, acp, cp, left)) >= 0) {
    826 			acp += rv + 1;
    827 			left -= rv + 1;
    828 			cp += rv;
    829 			*cp++ = ' ';
    830 			*cp = '\0';
    831 		} else
    832 			goto bad;
    833 	}
    834 	cp-- ; *cp = '\0';
    835 
    836 	if (cmdbuf[0] == '-' || cmdbuf[0] == '?' || cmdbuf[0] <= ' ') {
    837 		(void) strcat(cmdbuf, " (");
    838 		(void) strncat(cmdbuf, p->p_comm, sizeof(p->p_comm));
    839 		(void) strcat(cmdbuf, ")");
    840 	}
    841 	return (cmdbuf);
    842 
    843 bad:
    844 	seterr("error locating command name for pid %d", p->p_pid);
    845 retucomm:
    846 	(void) strcpy(cmdbuf, "(");
    847 	(void) strncat(cmdbuf, p->p_comm, sizeof (p->p_comm));
    848 	(void) strcat(cmdbuf, ")");
    849 	return (cmdbuf);
    850 }
    851 
    852 char *
    853 kvm_getenv(p, up)
    854 	const struct proc *p;
    855 	const struct user *up;
    856 {
    857 	static char *envbuf = NULL, emptyenv[1];
    858 	register char *cp, *acp;
    859 	int left, rv;
    860 	struct ps_strings arginfo;
    861 
    862 	if (envbuf == NULL) {
    863 		envbuf = (char *)malloc(ARG_MAX + 1);
    864 		if (envbuf == NULL)
    865 			envbuf = emptyenv;
    866 	}
    867 
    868 	if (envbuf == emptyenv || up == NULL || p->p_pid == 0 || p->p_pid == 2)
    869 		goto retemptyenv;
    870 
    871 	if (kvm_procread(p, PS_STRINGS, (char *)&arginfo, sizeof(arginfo)) !=
    872 		sizeof(arginfo))
    873 		goto bad;
    874 
    875 	cp = envbuf;
    876 	acp = arginfo.ps_envstr;
    877 	left = ARG_MAX + 1;
    878 	while (arginfo.ps_nenvstr--) {
    879 		if ((rv = kvm_procreadstr(p, acp, cp, left)) >= 0) {
    880 			acp += rv + 1;
    881 			left -= rv + 1;
    882 			cp += rv;
    883 			*cp++ = ' ';
    884 			*cp = '\0';
    885 		} else
    886 			goto bad;
    887 	}
    888 	cp-- ; *cp = '\0';
    889 	return (envbuf);
    890 
    891 bad:
    892 	seterr("error locating environment for pid %d", p->p_pid);
    893 retemptyenv:
    894 	envbuf[0] = '\0';
    895 	return (envbuf);
    896 }
    897 
    898 static
    899 getkvars()
    900 {
    901 	if (kvm_nlist(nl) == -1)
    902 		return (-1);
    903 	if (deadkernel) {
    904 		/* We must do the sys map first because klseek uses it */
    905 		long	addr;
    906 
    907 #if defined(m68k)
    908 #if defined(amiga)
    909 		addr = (long) nl[X_CPU040].n_value;
    910 		(void) lseek(kmem, addr, 0);
    911 		if (read(kmem, (char *) &cpu040, sizeof (cpu040))
    912 		    != sizeof (cpu040)) {
    913 			seterr("can't read cpu040");
    914 			return (-1);
    915 		}
    916 #endif
    917 		addr = (long) nl[X_LOWRAM].n_value;
    918 		(void) lseek(kmem, addr, 0);
    919 		if (read(kmem, (char *) &lowram, sizeof (lowram))
    920 		    != sizeof (lowram)) {
    921 			seterr("can't read lowram");
    922 			return (-1);
    923 		}
    924 		lowram = btop(lowram);
    925 		Sysseg = (struct ste *) malloc(NBPG);
    926 		if (Sysseg == NULL) {
    927 			seterr("out of space for Sysseg");
    928 			return (-1);
    929 		}
    930 		addr = (long) nl[X_SYSSEG].n_value;
    931 		(void) lseek(kmem, addr, 0);
    932 		read(kmem, (char *)&addr, sizeof(addr));
    933 		(void) lseek(kmem, (long)addr, 0);
    934 		if (read(kmem, (char *) Sysseg, NBPG) != NBPG) {
    935 			seterr("can't read Sysseg");
    936 			return (-1);
    937 		}
    938 #endif
    939 #if defined(i386)
    940 		PTD = (struct pde *) malloc(NBPG);
    941 		if (PTD == NULL) {
    942 			seterr("out of space for PTD");
    943 			return (-1);
    944 		}
    945 		addr = (long) nl[X_IdlePTD].n_value;
    946 		(void) lseek(kmem, addr, 0);
    947 		read(kmem, (char *)&addr, sizeof(addr));
    948 		(void) lseek(kmem, (long)addr, 0);
    949 		if (read(kmem, (char *) PTD, NBPG) != NBPG) {
    950 			seterr("can't read PTD");
    951 			return (-1);
    952 		}
    953 #endif
    954 	}
    955 	if (kvm_read((void *) nl[X_NSWAP].n_value, &nswap, sizeof (long)) == -1) {
    956 		seterr("can't read nswap");
    957 		return (-1);
    958 	}
    959 	if (kvm_read((void *) nl[X_DMMIN].n_value, &dmmin, sizeof (long)) == -1) {
    960 		seterr("can't read dmmin");
    961 		return (-1);
    962 	}
    963 	if (kvm_read((void *) nl[X_DMMAX].n_value, &dmmax, sizeof (long)) == -1) {
    964 		seterr("can't read dmmax");
    965 		return (-1);
    966 	}
    967 	if (kvm_read((void *) nl[X_VM_PAGE_HASH_MASK].n_value,
    968 		     &vm_page_hash_mask, sizeof (long)) == -1) {
    969 		seterr("can't read vm_page_hash_mask");
    970 		return (-1);
    971 	}
    972 	if (kvm_read((void *) nl[X_VM_PAGE_BUCKETS].n_value,
    973 		     &vm_page_buckets, sizeof (long)) == -1) {
    974 		seterr("can't read vm_page_buckets");
    975 		return (-1);
    976 	}
    977 	if (kvm_read((void *) nl[X_PAGE_SHIFT].n_value,
    978 		     &page_shift, sizeof (long)) == -1) {
    979 		seterr("can't read page_shift");
    980 		return (-1);
    981 	}
    982 
    983 	return (0);
    984 }
    985 
    986 int
    987 kvm_read(loc, buf, len)
    988 	void *loc;
    989 	void *buf;
    990 {
    991 	int n;
    992 
    993 	if (kvmfilesopen == 0 && kvm_openfiles(NULL, NULL, NULL) == -1)
    994 		return (-1);
    995 	klseek(kmem, (off_t) loc, 0);
    996 	if ((n = read(kmem, buf, len)) != len) {
    997 		if (n == -1)
    998 			setsyserr("error reading kmem at %#x", loc);
    999 		else
   1000 			seterr("short read on kmem at %#x", loc);
   1001 		return (-1);
   1002 	}
   1003 	return (len);
   1004 }
   1005 
   1006 static void
   1007 klseek(fd, loc, off)
   1008 	int fd;
   1009 	off_t loc;
   1010 	int off;
   1011 {
   1012 
   1013 	if (deadkernel) {
   1014 		if ((loc = Vtophys(loc)) == -1)
   1015 			return;
   1016 	}
   1017 	(void) lseek(fd, (off_t)loc, off);
   1018 }
   1019 
   1020 static off_t
   1021 Vtophys(loc)
   1022 	u_long	loc;
   1023 {
   1024 	off_t newloc = (off_t) -1;
   1025 #if defined(m68k)
   1026 	int p, ste, pte;
   1027 
   1028 	ste = *(int *)&Sysseg[btos(loc)];
   1029 	if ((ste & SG_V) == 0) {
   1030 		seterr("vtophys: segment not valid");
   1031 		return((off_t) -1);
   1032 	}
   1033 	p = btop(loc & SG_PMASK);
   1034 	newloc = (ste & SG_FRAME) + (p * sizeof(struct pte));
   1035 	(void) lseek(mem, newloc, 0);
   1036 	if (read(mem, (char *)&pte, sizeof pte) != sizeof pte) {
   1037 		seterr("vtophys: cannot locate pte");
   1038 		return((off_t) -1);
   1039 	}
   1040 	newloc = pte & PG_FRAME;
   1041 	if (pte == PG_NV || newloc < (off_t)ptob(lowram)) {
   1042 		seterr("vtophys: page not valid");
   1043 		return((off_t) -1);
   1044 	}
   1045 	newloc = (newloc - (off_t)ptob(lowram)) + (loc & PGOFSET);
   1046 #endif
   1047 #if defined(i386)
   1048 	struct pde pde;
   1049 	struct pte pte;
   1050 	int p;
   1051 
   1052 	pde = PTD[loc >> PDSHIFT];
   1053 	if (pde.pd_v == 0) {
   1054 		seterr("vtophys: page directory entry not valid");
   1055 		return((off_t) -1);
   1056 	}
   1057 	p = btop(loc & PT_MASK);
   1058 	newloc = pde.pd_pfnum + (p * sizeof(struct pte));
   1059 	(void) lseek(kmem, (long)newloc, 0);
   1060 	if (read(kmem, (char *)&pte, sizeof pte) != sizeof pte) {
   1061 		seterr("vtophys: cannot obtain desired pte");
   1062 		return((off_t) -1);
   1063 	}
   1064 	newloc = pte.pg_pfnum;
   1065 	if (pte.pg_v == 0) {
   1066 		seterr("vtophys: page table entry not valid");
   1067 		return((off_t) -1);
   1068 	}
   1069 	newloc += (loc & PGOFSET);
   1070 #endif
   1071 	return((off_t) newloc);
   1072 }
   1073 
   1074 /*
   1075  * locate address of unwired or swapped page
   1076  */
   1077 
   1078 static int
   1079 vatosw(mp, vaddr, maddr, swb)
   1080 vm_map_t	mp;
   1081 vm_offset_t	vaddr;
   1082 vm_offset_t	*maddr;
   1083 struct swapblk	*swb;
   1084 {
   1085 	struct vm_object	vm_object;
   1086 	struct vm_map_entry	vm_entry;
   1087 	long			saddr, addr, off;
   1088 	int			i;
   1089 
   1090 	saddr = addr = (long)mp->header.next;
   1091 #ifdef DEBUG
   1092 	fprintf(stderr, "vatosw: head=%x\n", &mp->header);
   1093 #endif
   1094 	for (i = 0; ; i++) {
   1095 		/* Weed through map entries until vaddr in range */
   1096 		if (kvm_read((void *) addr, &vm_entry, sizeof(vm_entry)) == -1) {
   1097 			seterr("vatosw: can't read vm_map_entry");
   1098 			return 0;
   1099 		}
   1100 #ifdef DEBUG
   1101 		fprintf(stderr, "vatosw: %d/%d, vaddr=%x, start=%x, end=%x ",
   1102 			i, mp->nentries, vaddr, vm_entry.start, vm_entry.end);
   1103 		fprintf(stderr, "addr=%x, next=%x\n", addr, vm_entry.next);
   1104 #endif
   1105 		if ((vaddr >= vm_entry.start) && (vaddr < vm_entry.end))
   1106 			if (vm_entry.object.vm_object != 0)
   1107 				break;
   1108 			else {
   1109 				seterr("vatosw: no object\n");
   1110 				return 0;
   1111 			}
   1112 
   1113 		addr = (long)vm_entry.next;
   1114 
   1115 		if (addr == saddr) {
   1116 			seterr("vatosw: map not found\n");
   1117 			return 0;
   1118 		}
   1119 	}
   1120 
   1121 	if (vm_entry.is_a_map || vm_entry.is_sub_map) {
   1122 #ifdef DEBUG
   1123 		fprintf(stderr, "vatosw: is a %smap\n",
   1124 			vm_entry.is_sub_map ? "sub " : "");
   1125 #endif
   1126 		seterr("vatosw: is a %smap\n",
   1127 		       vm_entry.is_sub_map ? "sub " : "");
   1128 		return 0;
   1129 	}
   1130 
   1131 	/* Locate memory object */
   1132 	off = (vaddr - vm_entry.start) + vm_entry.offset;
   1133 	addr = (long)vm_entry.object.vm_object;
   1134 	while (1) {
   1135 		if (kvm_read((void *) addr, &vm_object, sizeof (vm_object)) == -1) {
   1136 			seterr("vatosw: can't read vm_object");
   1137 			return 0;
   1138 		}
   1139 
   1140 #ifdef DEBUG
   1141 		fprintf(stderr, "vatosw: find page: object %#x offset %x\n",
   1142 			addr, off);
   1143 #endif
   1144 
   1145 		/* Lookup in page queue */
   1146 		if ((i = findpage(addr, off, maddr)) != -1)
   1147 			return i;
   1148 
   1149 		if (vm_object.pager != 0 &&
   1150 		    (i = pager_get(&vm_object, off, swb)) != -1)
   1151 			return i;
   1152 
   1153 		if (vm_object.shadow == 0)
   1154 			break;
   1155 
   1156 #ifdef DEBUG
   1157 		fprintf(stderr, "vatosw: shadow obj at %x: offset %x+%x\n",
   1158 			addr, off, vm_object.shadow_offset);
   1159 #endif
   1160 
   1161 		addr = (long)vm_object.shadow;
   1162 		off += vm_object.shadow_offset;
   1163 	}
   1164 
   1165 	seterr("vatosw: page not found\n");
   1166 	return 0;
   1167 }
   1168 
   1169 
   1170 int
   1171 pager_get(object, off, swb)
   1172 struct vm_object *object;
   1173 long off;
   1174 struct swapblk	*swb;
   1175 {
   1176 	struct pager_struct	pager;
   1177 	struct swpager		swpager;
   1178 	struct swblock		swblock;
   1179 
   1180 	/* Find address in swap space */
   1181 	if (kvm_read(object->pager, &pager, sizeof (pager)) == -1) {
   1182 		seterr("pager_get: can't read pager");
   1183 		return 0;
   1184 	}
   1185 	if (pager.pg_type != PG_SWAP) {
   1186 		seterr("pager_get: weird pager\n");
   1187 		return 0;
   1188 	}
   1189 
   1190 	/* Get swap pager data */
   1191 	if (kvm_read(pager.pg_data, &swpager, sizeof (swpager)) == -1) {
   1192 		seterr("pager_get: can't read swpager");
   1193 		return 0;
   1194 	}
   1195 
   1196 	off += object->paging_offset;
   1197 
   1198 	/* Read swap block array */
   1199 	if (kvm_read((void *) swpager.sw_blocks +
   1200 			(off/dbtob(swpager.sw_bsize)) * sizeof swblock,
   1201 			&swblock, sizeof (swblock)) == -1) {
   1202 		seterr("pager_get: can't read swblock");
   1203 		return 0;
   1204 	}
   1205 
   1206 	off %= dbtob(swpager.sw_bsize);
   1207 
   1208 	if (swblock.swb_mask & (1 << atop(off))) {
   1209 		swb->offset = dbtob(swblock.swb_block) + off;
   1210 		swb->size = dbtob(swpager.sw_bsize) - off;
   1211 		return 1;
   1212 	}
   1213 
   1214 	return -1;
   1215 }
   1216 
   1217 static int
   1218 findpage(object, offset, maddr)
   1219 long			object;
   1220 long			offset;
   1221 vm_offset_t		*maddr;
   1222 {
   1223 	queue_head_t	bucket;
   1224 	struct vm_page	mem;
   1225 	long		addr, baddr;
   1226 
   1227 	baddr = vm_page_buckets +
   1228 		vm_page_hash(object,offset) * sizeof(queue_head_t);
   1229 
   1230 	if (kvm_read((void *) baddr, &bucket, sizeof (bucket)) == -1) {
   1231 		seterr("can't read vm_page_bucket");
   1232 		return 0;
   1233 	}
   1234 
   1235 	addr = (long)bucket.next;
   1236 
   1237 	while (addr != baddr) {
   1238 		if (kvm_read((void *) addr, &mem, sizeof (mem)) == -1) {
   1239 			seterr("can't read vm_page");
   1240 			return 0;
   1241 		}
   1242 
   1243 		if ((long)mem.object == object && mem.offset == offset) {
   1244 			*maddr = (long)mem.phys_addr;
   1245 			return 1;
   1246 		}
   1247 
   1248 		addr = (long)mem.hashq.next;
   1249 	}
   1250 
   1251 	return -1;
   1252 }
   1253 
   1254 #if defined(sparc)
   1255 /*
   1256  * This comes from the bowels of pmap.c
   1257  */
   1258 #define MAXMEM 	(128 * 1024 * 1024)	/* no more than 128 MB phys mem */
   1259 #define NPGBANK	16			/* 2^4 pages per bank (64K / bank) */
   1260 #define	BSHIFT	4			/* log2(NPGBANK) */
   1261 #define BOFFSET	(NPGBANK - 1)
   1262 #define BTSIZE 	(MAXMEM / NBPG / NPGBANK)
   1263 
   1264 static int	pmap_dtos[BTSIZE];		/* dense to sparse */
   1265 static int	pmap_stod[BTSIZE];		/* sparse to dense */
   1266 
   1267 #define	HWTOSW(pg) (pmap_stod[(pg) >> BSHIFT] | ((pg) & BOFFSET))
   1268 #define	SWTOHW(pg) (pmap_dtos[(pg) >> BSHIFT] | ((pg) & BOFFSET))
   1269 /* -- */
   1270 
   1271 static int pmap_dtos_valid;
   1272 
   1273 /*
   1274  * Translate a VM physical address to a hardware physical address.
   1275  */
   1276 static vm_offset_t
   1277 phys2realphys(memaddr)
   1278 vm_offset_t memaddr;
   1279 {
   1280 	if (nl[X_PMAP_DTOS].n_value == 0)
   1281 		/* This is possibly a sun4 */
   1282 		return memaddr;
   1283 
   1284 	if (!pmap_dtos_valid) {
   1285 		if (kvm_read((void *)nl[X_PMAP_DTOS].n_value,
   1286 					pmap_dtos, sizeof (pmap_dtos)) == -1) {
   1287 			seterr("can't read pmap_dtos table");
   1288 			return -1;
   1289 		}
   1290 		pmap_dtos_valid = 1;
   1291 	}
   1292 	return (SWTOHW(atop(memaddr)) << PGSHIFT) | (memaddr & PGOFSET);
   1293 }
   1294 #endif
   1295 
   1296 #include <varargs.h>
   1297 static char errbuf[_POSIX2_LINE_MAX];
   1298 
   1299 static void
   1300 seterr(va_alist)
   1301 	va_dcl
   1302 {
   1303 	char *fmt;
   1304 	va_list ap;
   1305 
   1306 	va_start(ap);
   1307 	fmt = va_arg(ap, char *);
   1308 	(void) vsnprintf(errbuf, _POSIX2_LINE_MAX, fmt, ap);
   1309 #ifdef DEBUG
   1310 	(void) fprintf(stderr, "%s\n", errbuf);
   1311 #endif
   1312 	va_end(ap);
   1313 }
   1314 
   1315 static void
   1316 setsyserr(va_alist)
   1317 	va_dcl
   1318 {
   1319 	char *fmt, *cp;
   1320 	va_list ap;
   1321 	extern int errno;
   1322 
   1323 	va_start(ap);
   1324 	fmt = va_arg(ap, char *);
   1325 	(void) vsnprintf(cp = errbuf, _POSIX2_LINE_MAX, fmt, ap);
   1326 	cp += strlen(cp);
   1327 	(void) snprintf(cp, _POSIX2_LINE_MAX - (cp - errbuf), ": %s",
   1328 			strerror(errno));
   1329 #ifdef DEBUG
   1330 	(void) fprintf(stderr, "%s\n", errbuf);
   1331 #endif
   1332 	va_end(ap);
   1333 }
   1334 
   1335 char *
   1336 kvm_geterr()
   1337 {
   1338 	return (errbuf);
   1339 }
   1340