Home | History | Annotate | Line # | Download | only in libkvm
kvm_proc.c revision 1.8
      1 /*-
      2  * Copyright (c) 1994 Charles Hannum.
      3  * Copyright (c) 1989, 1992, 1993
      4  *	The Regents of the University of California.  All rights reserved.
      5  *
      6  * This code is derived from software developed by the Computer Systems
      7  * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
      8  * BG 91-66 and contributed to Berkeley.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by the University of
     21  *	California, Berkeley and its contributors.
     22  * 4. Neither the name of the University nor the names of its contributors
     23  *    may be used to endorse or promote products derived from this software
     24  *    without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     36  * SUCH DAMAGE.
     37  */
     38 
     39 #if defined(LIBC_SCCS) && !defined(lint)
     40 static char sccsid[] = "@(#)kvm_proc.c	8.3 (Berkeley) 9/23/93";
     41 #endif /* LIBC_SCCS and not lint */
     42 
     43 /*
     44  * Proc traversal interface for kvm.  ps and w are (probably) the exclusive
     45  * users of this code, so we've factored it out into a separate module.
     46  * Thus, we keep this grunge out of the other kvm applications (i.e.,
     47  * most other applications are interested only in open/close/read/nlist).
     48  */
     49 
     50 #include <sys/param.h>
     51 #include <sys/user.h>
     52 #include <sys/proc.h>
     53 #include <sys/exec.h>
     54 #include <sys/stat.h>
     55 #include <sys/ioctl.h>
     56 #include <sys/tty.h>
     57 #include <stdlib.h>
     58 #include <unistd.h>
     59 #include <nlist.h>
     60 #include <kvm.h>
     61 
     62 #include <vm/vm.h>
     63 #include <vm/vm_param.h>
     64 #include <vm/swap_pager.h>
     65 
     66 #include <sys/sysctl.h>
     67 
     68 #include <limits.h>
     69 #include <db.h>
     70 #include <paths.h>
     71 
     72 #include "kvm_private.h"
     73 
     74 #define KREAD(kd, addr, obj) \
     75 	(kvm_read(kd, addr, (char *)(obj), sizeof(*obj)) != sizeof(*obj))
     76 
     77 int _kvm_readfromcore __P((kvm_t *, u_long, u_long));
     78 int _kvm_readfrompager __P((kvm_t *, struct vm_object *, u_long));
     79 ssize_t kvm_uread __P((kvm_t *, const struct proc *, u_long, char *, size_t));
     80 
     81 char *
     82 _kvm_uread(kd, p, va, cnt)
     83 	kvm_t *kd;
     84 	const struct proc *p;
     85 	u_long va;
     86 	u_long *cnt;
     87 {
     88 	register u_long addr, head;
     89 	register u_long offset;
     90 	struct vm_map_entry vme;
     91 	struct vm_object vmo;
     92 	int rv;
     93 
     94 	if (kd->swapspc == 0) {
     95 		kd->swapspc = (char *)_kvm_malloc(kd, kd->nbpg);
     96 		if (kd->swapspc == 0)
     97 			return (0);
     98 	}
     99 
    100 	/*
    101 	 * Look through the address map for the memory object
    102 	 * that corresponds to the given virtual address.
    103 	 * The header just has the entire valid range.
    104 	 */
    105 	head = (u_long)&p->p_vmspace->vm_map.header;
    106 	addr = head;
    107 	while (1) {
    108 		if (KREAD(kd, addr, &vme))
    109 			return (0);
    110 
    111 		if (va >= vme.start && va < vme.end &&
    112 		    vme.object.vm_object != 0)
    113 			break;
    114 
    115 		addr = (u_long)vme.next;
    116 		if (addr == head)
    117 			return (0);
    118 	}
    119 
    120 	/*
    121 	 * We found the right object -- follow shadow links.
    122 	 */
    123 	offset = va - vme.start + vme.offset;
    124 	addr = (u_long)vme.object.vm_object;
    125 
    126 	while (1) {
    127 		/* Try reading the page from core first. */
    128 		if ((rv = _kvm_readfromcore(kd, addr, offset)))
    129 			break;
    130 
    131 		if (KREAD(kd, addr, &vmo))
    132 			return (0);
    133 
    134 		/* If there is a pager here, see if it has the page. */
    135 		if (vmo.pager != 0 &&
    136 		    (rv = _kvm_readfrompager(kd, &vmo, offset)))
    137 			break;
    138 
    139 		/* Move down the shadow chain. */
    140 		addr = (u_long)vmo.shadow;
    141 		if (addr == 0)
    142 			return (0);
    143 		offset += vmo.shadow_offset;
    144 	}
    145 
    146 	if (rv == -1)
    147 		return (0);
    148 
    149 	/* Found the page. */
    150 	offset %= kd->nbpg;
    151 	*cnt = kd->nbpg - offset;
    152 	return (&kd->swapspc[offset]);
    153 }
    154 
    155 #define	vm_page_hash(kd, object, offset) \
    156 	(((u_long)object + (u_long)(offset / kd->nbpg)) & kd->vm_page_hash_mask)
    157 
    158 int
    159 _kvm_coreinit(kd)
    160 	kvm_t *kd;
    161 {
    162 	struct nlist nlist[3];
    163 
    164 	nlist[0].n_name = "_vm_page_buckets";
    165 	nlist[1].n_name = "_vm_page_hash_mask";
    166 	nlist[2].n_name = 0;
    167 	if (kvm_nlist(kd, nlist) != 0)
    168 		return (-1);
    169 
    170 	if (KREAD(kd, nlist[0].n_value, &kd->vm_page_buckets) ||
    171 	    KREAD(kd, nlist[1].n_value, &kd->vm_page_hash_mask))
    172 		return (-1);
    173 
    174 	return (0);
    175 }
    176 
    177 int
    178 _kvm_readfromcore(kd, object, offset)
    179 	kvm_t *kd;
    180 	u_long object, offset;
    181 {
    182 	u_long addr;
    183 	struct pglist bucket;
    184 	struct vm_page mem;
    185 	off_t seekpoint;
    186 
    187 	if (kd->vm_page_buckets == 0 &&
    188 	    _kvm_coreinit(kd))
    189 		return (-1);
    190 
    191 	addr = (u_long)&kd->vm_page_buckets[vm_page_hash(kd, object, offset)];
    192 	if (KREAD(kd, addr, &bucket))
    193 		return (-1);
    194 
    195 	addr = (u_long)bucket.tqh_first;
    196 	offset &= ~(kd->nbpg -1);
    197 	while (1) {
    198 		if (addr == 0)
    199 			return (0);
    200 
    201 		if (KREAD(kd, addr, &mem))
    202 			return (-1);
    203 
    204 		if ((u_long)mem.object == object &&
    205 		    (u_long)mem.offset == offset)
    206 			break;
    207 
    208 		addr = (u_long)mem.hashq.tqe_next;
    209 	}
    210 
    211 	seekpoint = mem.phys_addr;
    212 
    213 	if (lseek(kd->pmfd, seekpoint, 0) == -1)
    214 		return (-1);
    215 	if (read(kd->pmfd, kd->swapspc, kd->nbpg) != kd->nbpg)
    216 		return (-1);
    217 
    218 	return (1);
    219 }
    220 
    221 int
    222 _kvm_readfrompager(kd, vmop, offset)
    223 	kvm_t *kd;
    224 	struct vm_object *vmop;
    225 	u_long offset;
    226 {
    227 	u_long addr;
    228 	struct pager_struct pager;
    229 	struct swpager swap;
    230 	int ix;
    231 	struct swblock swb;
    232 	off_t seekpoint;
    233 
    234 	/* Read in the pager info and make sure it's a swap device. */
    235 	addr = (u_long)vmop->pager;
    236 	if (KREAD(kd, addr, &pager) || pager.pg_type != PG_SWAP)
    237 		return (-1);
    238 
    239 	/* Read in the swap_pager private data. */
    240 	addr = (u_long)pager.pg_data;
    241 	if (KREAD(kd, addr, &swap))
    242 		return (-1);
    243 
    244 	/*
    245 	 * Calculate the paging offset, and make sure it's within the
    246 	 * bounds of the pager.
    247 	 */
    248 	offset += vmop->paging_offset;
    249 	ix = offset / dbtob(swap.sw_bsize);
    250 #if 0
    251 	if (swap.sw_blocks == 0 || ix >= swap.sw_nblocks)
    252 		return (-1);
    253 #else
    254 	if (swap.sw_blocks == 0 || ix >= swap.sw_nblocks) {
    255 		int i;
    256 		printf("BUG BUG BUG BUG:\n");
    257 		printf("object %x offset %x pgoffset %x pager %x swpager %x\n",
    258 		    vmop, offset - vmop->paging_offset, vmop->paging_offset,
    259 		    vmop->pager, pager.pg_data);
    260 		printf("osize %x bsize %x blocks %x nblocks %x\n",
    261 		    swap.sw_osize, swap.sw_bsize, swap.sw_blocks,
    262 		    swap.sw_nblocks);
    263 		for (ix = 0; ix < swap.sw_nblocks; ix++) {
    264 			addr = (u_long)&swap.sw_blocks[ix];
    265 			if (KREAD(kd, addr, &swb))
    266 				return (0);
    267 			printf("sw_blocks[%d]: block %x mask %x\n", ix,
    268 			    swb.swb_block, swb.swb_mask);
    269 		}
    270 		return (-1);
    271 	}
    272 #endif
    273 
    274 	/* Read in the swap records. */
    275 	addr = (u_long)&swap.sw_blocks[ix];
    276 	if (KREAD(kd, addr, &swb))
    277 		return (-1);
    278 
    279 	/* Calculate offset within pager. */
    280 	offset %= dbtob(swap.sw_bsize);
    281 
    282 	/* Check that the page is actually present. */
    283 	if ((swb.swb_mask & (1 << (offset / kd->nbpg))) == 0)
    284 		return (0);
    285 
    286 	if (!ISALIVE(kd))
    287 		return (-1);
    288 
    289 	/* Calculate the physical address and read the page. */
    290 	seekpoint = dbtob(swb.swb_block) + (offset & ~(kd->nbpg -1));
    291 
    292 	if (lseek(kd->swfd, seekpoint, 0) == -1)
    293 		return (-1);
    294 	if (read(kd->swfd, kd->swapspc, kd->nbpg) != kd->nbpg)
    295 		return (-1);
    296 
    297 	return (1);
    298 }
    299 
    300 /*
    301  * Read proc's from memory file into buffer bp, which has space to hold
    302  * at most maxcnt procs.
    303  */
    304 static int
    305 kvm_proclist(kd, what, arg, p, bp, maxcnt)
    306 	kvm_t *kd;
    307 	int what, arg;
    308 	struct proc *p;
    309 	struct kinfo_proc *bp;
    310 	int maxcnt;
    311 {
    312 	register int cnt = 0;
    313 	struct eproc eproc;
    314 	struct pgrp pgrp;
    315 	struct session sess;
    316 	struct tty tty;
    317 	struct proc proc;
    318 
    319 	for (; cnt < maxcnt && p != NULL; p = proc.p_list.le_next) {
    320 		if (KREAD(kd, (u_long)p, &proc)) {
    321 			_kvm_err(kd, kd->program, "can't read proc at %x", p);
    322 			return (-1);
    323 		}
    324 		if (KREAD(kd, (u_long)proc.p_cred, &eproc.e_pcred) == 0)
    325 			KREAD(kd, (u_long)eproc.e_pcred.pc_ucred,
    326 			      &eproc.e_ucred);
    327 
    328 		switch(what) {
    329 
    330 		case KERN_PROC_PID:
    331 			if (proc.p_pid != (pid_t)arg)
    332 				continue;
    333 			break;
    334 
    335 		case KERN_PROC_UID:
    336 			if (eproc.e_ucred.cr_uid != (uid_t)arg)
    337 				continue;
    338 			break;
    339 
    340 		case KERN_PROC_RUID:
    341 			if (eproc.e_pcred.p_ruid != (uid_t)arg)
    342 				continue;
    343 			break;
    344 		}
    345 		/*
    346 		 * We're going to add another proc to the set.  If this
    347 		 * will overflow the buffer, assume the reason is because
    348 		 * nprocs (or the proc list) is corrupt and declare an error.
    349 		 */
    350 		if (cnt >= maxcnt) {
    351 			_kvm_err(kd, kd->program, "nprocs corrupt");
    352 			return (-1);
    353 		}
    354 		/*
    355 		 * gather eproc
    356 		 */
    357 		eproc.e_paddr = p;
    358 		if (KREAD(kd, (u_long)proc.p_pgrp, &pgrp)) {
    359 			_kvm_err(kd, kd->program, "can't read pgrp at %x",
    360 				 proc.p_pgrp);
    361 			return (-1);
    362 		}
    363 		eproc.e_sess = pgrp.pg_session;
    364 		eproc.e_pgid = pgrp.pg_id;
    365 		eproc.e_jobc = pgrp.pg_jobc;
    366 		if (KREAD(kd, (u_long)pgrp.pg_session, &sess)) {
    367 			_kvm_err(kd, kd->program, "can't read session at %x",
    368 				pgrp.pg_session);
    369 			return (-1);
    370 		}
    371 		if ((proc.p_flag & P_CONTROLT) && sess.s_ttyp != NULL) {
    372 			if (KREAD(kd, (u_long)sess.s_ttyp, &tty)) {
    373 				_kvm_err(kd, kd->program,
    374 					 "can't read tty at %x", sess.s_ttyp);
    375 				return (-1);
    376 			}
    377 			eproc.e_tdev = tty.t_dev;
    378 			eproc.e_tsess = tty.t_session;
    379 			if (tty.t_pgrp != NULL) {
    380 				if (KREAD(kd, (u_long)tty.t_pgrp, &pgrp)) {
    381 					_kvm_err(kd, kd->program,
    382 						 "can't read tpgrp at &x",
    383 						tty.t_pgrp);
    384 					return (-1);
    385 				}
    386 				eproc.e_tpgid = pgrp.pg_id;
    387 			} else
    388 				eproc.e_tpgid = -1;
    389 		} else
    390 			eproc.e_tdev = NODEV;
    391 		eproc.e_flag = sess.s_ttyvp ? EPROC_CTTY : 0;
    392 		if (sess.s_leader == p)
    393 			eproc.e_flag |= EPROC_SLEADER;
    394 		if (proc.p_wmesg)
    395 			(void)kvm_read(kd, (u_long)proc.p_wmesg,
    396 			    eproc.e_wmesg, WMESGLEN);
    397 
    398 #ifdef sparc
    399 		(void)kvm_read(kd, (u_long)&proc.p_vmspace->vm_rssize,
    400 		    (char *)&eproc.e_vm.vm_rssize,
    401 		    sizeof(eproc.e_vm.vm_rssize));
    402 		(void)kvm_read(kd, (u_long)&proc.p_vmspace->vm_tsize,
    403 		    (char *)&eproc.e_vm.vm_tsize,
    404 		    3 * sizeof(eproc.e_vm.vm_rssize));	/* XXX */
    405 #else
    406 		(void)kvm_read(kd, (u_long)proc.p_vmspace,
    407 		    (char *)&eproc.e_vm, sizeof(eproc.e_vm));
    408 #endif
    409 		eproc.e_xsize = eproc.e_xrssize = 0;
    410 		eproc.e_xccount = eproc.e_xswrss = 0;
    411 
    412 		switch (what) {
    413 
    414 		case KERN_PROC_PGRP:
    415 			if (eproc.e_pgid != (pid_t)arg)
    416 				continue;
    417 			break;
    418 
    419 		case KERN_PROC_TTY:
    420 			if ((proc.p_flag & P_CONTROLT) == 0 ||
    421 			     eproc.e_tdev != (dev_t)arg)
    422 				continue;
    423 			break;
    424 		}
    425 		bcopy(&proc, &bp->kp_proc, sizeof(proc));
    426 		bcopy(&eproc, &bp->kp_eproc, sizeof(eproc));
    427 		++bp;
    428 		++cnt;
    429 	}
    430 	return (cnt);
    431 }
    432 
    433 /*
    434  * Build proc info array by reading in proc list from a crash dump.
    435  * Return number of procs read.  maxcnt is the max we will read.
    436  */
    437 static int
    438 kvm_deadprocs(kd, what, arg, a_allproc, a_zombproc, maxcnt)
    439 	kvm_t *kd;
    440 	int what, arg;
    441 	u_long a_allproc;
    442 	u_long a_zombproc;
    443 	int maxcnt;
    444 {
    445 	register struct kinfo_proc *bp = kd->procbase;
    446 	register int acnt, zcnt;
    447 	struct proc *p;
    448 
    449 	if (KREAD(kd, a_allproc, &p)) {
    450 		_kvm_err(kd, kd->program, "cannot read allproc");
    451 		return (-1);
    452 	}
    453 	acnt = kvm_proclist(kd, what, arg, p, bp, maxcnt);
    454 	if (acnt < 0)
    455 		return (acnt);
    456 
    457 	if (KREAD(kd, a_zombproc, &p)) {
    458 		_kvm_err(kd, kd->program, "cannot read zombproc");
    459 		return (-1);
    460 	}
    461 	zcnt = kvm_proclist(kd, what, arg, p, bp + acnt, maxcnt - acnt);
    462 	if (zcnt < 0)
    463 		zcnt = 0;
    464 
    465 	return (acnt + zcnt);
    466 }
    467 
    468 struct kinfo_proc *
    469 kvm_getprocs(kd, op, arg, cnt)
    470 	kvm_t *kd;
    471 	int op, arg;
    472 	int *cnt;
    473 {
    474 	size_t size;
    475 	int mib[4], st, nprocs;
    476 
    477 	if (kd->procbase != 0) {
    478 		free((void *)kd->procbase);
    479 		/*
    480 		 * Clear this pointer in case this call fails.  Otherwise,
    481 		 * kvm_close() will free it again.
    482 		 */
    483 		kd->procbase = 0;
    484 	}
    485 	if (ISALIVE(kd)) {
    486 		size = 0;
    487 		mib[0] = CTL_KERN;
    488 		mib[1] = KERN_PROC;
    489 		mib[2] = op;
    490 		mib[3] = arg;
    491 		st = sysctl(mib, 4, NULL, &size, NULL, 0);
    492 		if (st == -1) {
    493 			_kvm_syserr(kd, kd->program, "kvm_getprocs");
    494 			return (0);
    495 		}
    496 		kd->procbase = (struct kinfo_proc *)_kvm_malloc(kd, size);
    497 		if (kd->procbase == 0)
    498 			return (0);
    499 		st = sysctl(mib, 4, kd->procbase, &size, NULL, 0);
    500 		if (st == -1) {
    501 			_kvm_syserr(kd, kd->program, "kvm_getprocs");
    502 			return (0);
    503 		}
    504 		if (size % sizeof(struct kinfo_proc) != 0) {
    505 			_kvm_err(kd, kd->program,
    506 				"proc size mismatch (%d total, %d chunks)",
    507 				size, sizeof(struct kinfo_proc));
    508 			return (0);
    509 		}
    510 		nprocs = size / sizeof(struct kinfo_proc);
    511 	} else {
    512 		struct nlist nl[4], *p;
    513 
    514 		nl[0].n_name = "_nprocs";
    515 		nl[1].n_name = "_allproc";
    516 		nl[2].n_name = "_zombproc";
    517 		nl[3].n_name = 0;
    518 
    519 		if (kvm_nlist(kd, nl) != 0) {
    520 			for (p = nl; p->n_type != 0; ++p)
    521 				;
    522 			_kvm_err(kd, kd->program,
    523 				 "%s: no such symbol", p->n_name);
    524 			return (0);
    525 		}
    526 		if (KREAD(kd, nl[0].n_value, &nprocs)) {
    527 			_kvm_err(kd, kd->program, "can't read nprocs");
    528 			return (0);
    529 		}
    530 		size = nprocs * sizeof(struct kinfo_proc);
    531 		kd->procbase = (struct kinfo_proc *)_kvm_malloc(kd, size);
    532 		if (kd->procbase == 0)
    533 			return (0);
    534 
    535 		nprocs = kvm_deadprocs(kd, op, arg, nl[1].n_value,
    536 				      nl[2].n_value, nprocs);
    537 #ifdef notdef
    538 		size = nprocs * sizeof(struct kinfo_proc);
    539 		(void)realloc(kd->procbase, size);
    540 #endif
    541 	}
    542 	*cnt = nprocs;
    543 	return (kd->procbase);
    544 }
    545 
    546 void
    547 _kvm_freeprocs(kd)
    548 	kvm_t *kd;
    549 {
    550 	if (kd->procbase) {
    551 		free(kd->procbase);
    552 		kd->procbase = 0;
    553 	}
    554 }
    555 
    556 void *
    557 _kvm_realloc(kd, p, n)
    558 	kvm_t *kd;
    559 	void *p;
    560 	size_t n;
    561 {
    562 	void *np = (void *)realloc(p, n);
    563 
    564 	if (np == 0)
    565 		_kvm_err(kd, kd->program, "out of memory");
    566 	return (np);
    567 }
    568 
    569 #ifndef MAX
    570 #define MAX(a, b) ((a) > (b) ? (a) : (b))
    571 #endif
    572 
    573 /*
    574  * Read in an argument vector from the user address space of process p.
    575  * addr if the user-space base address of narg null-terminated contiguous
    576  * strings.  This is used to read in both the command arguments and
    577  * environment strings.  Read at most maxcnt characters of strings.
    578  */
    579 static char **
    580 kvm_argv(kd, p, addr, narg, maxcnt)
    581 	kvm_t *kd;
    582 	struct proc *p;
    583 	register u_long addr;
    584 	register int narg;
    585 	register int maxcnt;
    586 {
    587 	register char *cp;
    588 	register int len, cc;
    589 	register char **argv;
    590 
    591 	/*
    592 	 * Check that there aren't an unreasonable number of agruments,
    593 	 * and that the address is in user space.
    594 	 */
    595 	if (narg > 512 || addr < VM_MIN_ADDRESS || addr >= VM_MAXUSER_ADDRESS)
    596 		return (0);
    597 
    598 	if (kd->argv == 0) {
    599 		/*
    600 		 * Try to avoid reallocs.
    601 		 */
    602 		kd->argc = MAX(narg + 1, 32);
    603 		kd->argv = (char **)_kvm_malloc(kd, kd->argc *
    604 						sizeof(*kd->argv));
    605 		if (kd->argv == 0)
    606 			return (0);
    607 	} else if (narg + 1 > kd->argc) {
    608 		kd->argc = MAX(2 * kd->argc, narg + 1);
    609 		kd->argv = (char **)_kvm_realloc(kd, kd->argv, kd->argc *
    610 						sizeof(*kd->argv));
    611 		if (kd->argv == 0)
    612 			return (0);
    613 	}
    614 	if (kd->argspc == 0) {
    615 		kd->argspc = (char *)_kvm_malloc(kd, kd->nbpg);
    616 		if (kd->argspc == 0)
    617 			return (0);
    618 		kd->arglen = kd->nbpg;
    619 	}
    620 	cp = kd->argspc;
    621 	argv = kd->argv;
    622 	*argv = cp;
    623 	len = 0;
    624 	/*
    625 	 * Loop over pages, filling in the argument vector.
    626 	 */
    627 	while (addr < VM_MAXUSER_ADDRESS) {
    628 		cc = kd->nbpg - (addr & (kd->nbpg - 1));
    629 		if (maxcnt > 0 && cc > maxcnt - len)
    630 			cc = maxcnt - len;;
    631 		if (len + cc > kd->arglen) {
    632 			register int off;
    633 			register char **pp;
    634 			register char *op = kd->argspc;
    635 
    636 			kd->arglen *= 2;
    637 			kd->argspc = (char *)_kvm_realloc(kd, kd->argspc,
    638 							  kd->arglen);
    639 			if (kd->argspc == 0)
    640 				return (0);
    641 			cp = &kd->argspc[len];
    642 			/*
    643 			 * Adjust argv pointers in case realloc moved
    644 			 * the string space.
    645 			 */
    646 			off = kd->argspc - op;
    647 			for (pp = kd->argv; pp < argv; ++pp)
    648 				*pp += off;
    649 		}
    650 		if (kvm_uread(kd, p, addr, cp, cc) != cc)
    651 			/* XXX */
    652 			return (0);
    653 		len += cc;
    654 		addr += cc;
    655 
    656 		if (maxcnt == 0 && len > 16 * kd->nbpg)
    657 			/* sanity */
    658 			return (0);
    659 
    660 		while (--cc >= 0) {
    661 			if (*cp++ == 0) {
    662 				if (--narg <= 0) {
    663 					*++argv = 0;
    664 					return (kd->argv);
    665 				} else
    666 					*++argv = cp;
    667 			}
    668 		}
    669 		if (maxcnt > 0 && len >= maxcnt) {
    670 			/*
    671 			 * We're stopping prematurely.  Terminate the
    672 			 * argv and current string.
    673 			 */
    674 			*++argv = 0;
    675 			*cp = 0;
    676 			return (kd->argv);
    677 		}
    678 	}
    679 }
    680 
    681 static void
    682 ps_str_a(p, addr, n)
    683 	struct ps_strings *p;
    684 	u_long *addr;
    685 	int *n;
    686 {
    687 	*addr = (u_long)p->ps_argvstr;
    688 	*n = p->ps_nargvstr;
    689 }
    690 
    691 static void
    692 ps_str_e(p, addr, n)
    693 	struct ps_strings *p;
    694 	u_long *addr;
    695 	int *n;
    696 {
    697 	*addr = (u_long)p->ps_envstr;
    698 	*n = p->ps_nenvstr;
    699 }
    700 
    701 /*
    702  * Determine if the proc indicated by p is still active.
    703  * This test is not 100% foolproof in theory, but chances of
    704  * being wrong are very low.
    705  */
    706 static int
    707 proc_verify(kd, kernp, p)
    708 	kvm_t *kd;
    709 	u_long kernp;
    710 	const struct proc *p;
    711 {
    712 	struct proc kernproc;
    713 
    714 	/*
    715 	 * Just read in the whole proc.  It's not that big relative
    716 	 * to the cost of the read system call.
    717 	 */
    718 	if (kvm_read(kd, kernp, (char *)&kernproc, sizeof(kernproc)) !=
    719 	    sizeof(kernproc))
    720 		return (0);
    721 	return (p->p_pid == kernproc.p_pid &&
    722 		(kernproc.p_stat != SZOMB || p->p_stat == SZOMB));
    723 }
    724 
    725 static char **
    726 kvm_doargv(kd, kp, nchr, info)
    727 	kvm_t *kd;
    728 	const struct kinfo_proc *kp;
    729 	int nchr;
    730 	int (*info)(struct ps_strings*, u_long *, int *);
    731 {
    732 	register const struct proc *p = &kp->kp_proc;
    733 	register char **ap;
    734 	u_long addr;
    735 	int cnt;
    736 	struct ps_strings arginfo;
    737 
    738 	/*
    739 	 * Pointers are stored at the top of the user stack.
    740 	 */
    741 	if (p->p_stat == SZOMB ||
    742 	    kvm_uread(kd, p, USRSTACK - sizeof(arginfo), (char *)&arginfo,
    743 		      sizeof(arginfo)) != sizeof(arginfo))
    744 		return (0);
    745 
    746 	(*info)(&arginfo, &addr, &cnt);
    747 	if (cnt == 0)
    748 		return (0);
    749 	ap = kvm_argv(kd, p, addr, cnt, nchr);
    750 	/*
    751 	 * For live kernels, make sure this process didn't go away.
    752 	 */
    753 	if (ap != 0 && ISALIVE(kd) &&
    754 	    !proc_verify(kd, (u_long)kp->kp_eproc.e_paddr, p))
    755 		ap = 0;
    756 	return (ap);
    757 }
    758 
    759 /*
    760  * Get the command args.  This code is now machine independent.
    761  */
    762 char **
    763 kvm_getargv(kd, kp, nchr)
    764 	kvm_t *kd;
    765 	const struct kinfo_proc *kp;
    766 	int nchr;
    767 {
    768 	return (kvm_doargv(kd, kp, nchr, ps_str_a));
    769 }
    770 
    771 char **
    772 kvm_getenvv(kd, kp, nchr)
    773 	kvm_t *kd;
    774 	const struct kinfo_proc *kp;
    775 	int nchr;
    776 {
    777 	return (kvm_doargv(kd, kp, nchr, ps_str_e));
    778 }
    779 
    780 /*
    781  * Read from user space.  The user context is given by p.
    782  */
    783 ssize_t
    784 kvm_uread(kd, p, uva, buf, len)
    785 	kvm_t *kd;
    786 	register const struct proc *p;
    787 	register u_long uva;
    788 	register char *buf;
    789 	register size_t len;
    790 {
    791 	register char *cp;
    792 
    793 	cp = buf;
    794 	while (len > 0) {
    795 		register int cc;
    796 		register char *dp;
    797 		int cnt;
    798 
    799 		dp = _kvm_uread(kd, p, uva, &cnt);
    800 		if (dp == 0) {
    801 			_kvm_err(kd, 0, "invalid address (%x)", uva);
    802 			return (0);
    803 		}
    804 		cc = MIN(cnt, len);
    805 		bcopy(dp, cp, cc);
    806 
    807 		cp += cc;
    808 		uva += cc;
    809 		len -= cc;
    810 	}
    811 	return (ssize_t)(cp - buf);
    812 }
    813