Home | History | Annotate | Line # | Download | only in libkvm
kvm_proc.c revision 1.15
      1 /*-
      2  * Copyright (c) 1994, 1995 Charles M. Hannum.  All rights reserved.
      3  * Copyright (c) 1989, 1992, 1993
      4  *	The Regents of the University of California.  All rights reserved.
      5  *
      6  * This code is derived from software developed by the Computer Systems
      7  * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
      8  * BG 91-66 and contributed to Berkeley.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by the University of
     21  *	California, Berkeley and its contributors.
     22  * 4. Neither the name of the University nor the names of its contributors
     23  *    may be used to endorse or promote products derived from this software
     24  *    without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     36  * SUCH DAMAGE.
     37  */
     38 
     39 #if defined(LIBC_SCCS) && !defined(lint)
     40 static char sccsid[] = "@(#)kvm_proc.c	8.3 (Berkeley) 9/23/93";
     41 #endif /* LIBC_SCCS and not lint */
     42 
     43 /*
     44  * Proc traversal interface for kvm.  ps and w are (probably) the exclusive
     45  * users of this code, so we've factored it out into a separate module.
     46  * Thus, we keep this grunge out of the other kvm applications (i.e.,
     47  * most other applications are interested only in open/close/read/nlist).
     48  */
     49 
     50 #include <sys/param.h>
     51 #include <sys/user.h>
     52 #include <sys/proc.h>
     53 #include <sys/exec.h>
     54 #include <sys/stat.h>
     55 #include <sys/ioctl.h>
     56 #include <sys/tty.h>
     57 #include <stdlib.h>
     58 #include <string.h>
     59 #include <unistd.h>
     60 #include <nlist.h>
     61 #include <kvm.h>
     62 
     63 #include <vm/vm.h>
     64 #include <vm/vm_param.h>
     65 #include <vm/swap_pager.h>
     66 
     67 #include <sys/sysctl.h>
     68 
     69 #include <limits.h>
     70 #include <db.h>
     71 #include <paths.h>
     72 
     73 #include "kvm_private.h"
     74 
     75 #define KREAD(kd, addr, obj) \
     76 	(kvm_read(kd, addr, (char *)(obj), sizeof(*obj)) != sizeof(*obj))
     77 
     78 int		_kvm_readfromcore __P((kvm_t *, u_long, u_long));
     79 int		_kvm_readfrompager __P((kvm_t *, struct vm_object *, u_long));
     80 ssize_t		kvm_uread __P((kvm_t *, const struct proc *, u_long, char *,
     81 		    size_t));
     82 
     83 static char	**kvm_argv __P((kvm_t *, const struct proc *, u_long, int,
     84 		    int));
     85 static int	kvm_deadprocs __P((kvm_t *, int, int, u_long, u_long, int));
     86 static char	**kvm_doargv __P((kvm_t *, const struct kinfo_proc *, int,
     87 		    void (*)(struct ps_strings *, u_long *, int *)));
     88 static int	kvm_proclist __P((kvm_t *, int, int, struct proc *,
     89 		    struct kinfo_proc *, int));
     90 static int	proc_verify __P((kvm_t *, u_long, const struct proc *));
     91 static void	ps_str_a __P((struct ps_strings *, u_long *, int *));
     92 static void	ps_str_e __P((struct ps_strings *, u_long *, int *));
     93 
     94 char *
     95 _kvm_uread(kd, p, va, cnt)
     96 	kvm_t *kd;
     97 	const struct proc *p;
     98 	u_long va;
     99 	u_long *cnt;
    100 {
    101 	register u_long addr, head;
    102 	register u_long offset;
    103 	struct vm_map_entry vme;
    104 	struct vm_object vmo;
    105 	int rv;
    106 
    107 	if (kd->swapspc == 0) {
    108 		kd->swapspc = (char *)_kvm_malloc(kd, kd->nbpg);
    109 		if (kd->swapspc == 0)
    110 			return (0);
    111 	}
    112 
    113 	/*
    114 	 * Look through the address map for the memory object
    115 	 * that corresponds to the given virtual address.
    116 	 * The header just has the entire valid range.
    117 	 */
    118 	head = (u_long)&p->p_vmspace->vm_map.header;
    119 	addr = head;
    120 	while (1) {
    121 		if (KREAD(kd, addr, &vme))
    122 			return (0);
    123 
    124 		if (va >= vme.start && va < vme.end &&
    125 		    vme.object.vm_object != 0)
    126 			break;
    127 
    128 		addr = (u_long)vme.next;
    129 		if (addr == head)
    130 			return (0);
    131 	}
    132 
    133 	/*
    134 	 * We found the right object -- follow shadow links.
    135 	 */
    136 	offset = va - vme.start + vme.offset;
    137 	addr = (u_long)vme.object.vm_object;
    138 
    139 	while (1) {
    140 		/* Try reading the page from core first. */
    141 		if ((rv = _kvm_readfromcore(kd, addr, offset)))
    142 			break;
    143 
    144 		if (KREAD(kd, addr, &vmo))
    145 			return (0);
    146 
    147 		/* If there is a pager here, see if it has the page. */
    148 		if (vmo.pager != 0 &&
    149 		    (rv = _kvm_readfrompager(kd, &vmo, offset)))
    150 			break;
    151 
    152 		/* Move down the shadow chain. */
    153 		addr = (u_long)vmo.shadow;
    154 		if (addr == 0)
    155 			return (0);
    156 		offset += vmo.shadow_offset;
    157 	}
    158 
    159 	if (rv == -1)
    160 		return (0);
    161 
    162 	/* Found the page. */
    163 	offset %= kd->nbpg;
    164 	*cnt = kd->nbpg - offset;
    165 	return (&kd->swapspc[offset]);
    166 }
    167 
    168 #define	vm_page_hash(kd, object, offset) \
    169 	(((u_long)object + (u_long)(offset / kd->nbpg)) & kd->vm_page_hash_mask)
    170 
    171 int
    172 _kvm_coreinit(kd)
    173 	kvm_t *kd;
    174 {
    175 	struct nlist nlist[3];
    176 
    177 	nlist[0].n_name = "_vm_page_buckets";
    178 	nlist[1].n_name = "_vm_page_hash_mask";
    179 	nlist[2].n_name = 0;
    180 	if (kvm_nlist(kd, nlist) != 0)
    181 		return (-1);
    182 
    183 	if (KREAD(kd, nlist[0].n_value, &kd->vm_page_buckets) ||
    184 	    KREAD(kd, nlist[1].n_value, &kd->vm_page_hash_mask))
    185 		return (-1);
    186 
    187 	return (0);
    188 }
    189 
    190 int
    191 _kvm_readfromcore(kd, object, offset)
    192 	kvm_t *kd;
    193 	u_long object, offset;
    194 {
    195 	u_long addr;
    196 	struct pglist bucket;
    197 	struct vm_page mem;
    198 	off_t seekpoint;
    199 
    200 	if (kd->vm_page_buckets == 0 &&
    201 	    _kvm_coreinit(kd))
    202 		return (-1);
    203 
    204 	addr = (u_long)&kd->vm_page_buckets[vm_page_hash(kd, object, offset)];
    205 	if (KREAD(kd, addr, &bucket))
    206 		return (-1);
    207 
    208 	addr = (u_long)bucket.tqh_first;
    209 	offset &= ~(kd->nbpg -1);
    210 	while (1) {
    211 		if (addr == 0)
    212 			return (0);
    213 
    214 		if (KREAD(kd, addr, &mem))
    215 			return (-1);
    216 
    217 		if ((u_long)mem.object == object &&
    218 		    (u_long)mem.offset == offset)
    219 			break;
    220 
    221 		addr = (u_long)mem.hashq.tqe_next;
    222 	}
    223 
    224 	seekpoint = mem.phys_addr;
    225 
    226 	if (lseek(kd->pmfd, seekpoint, 0) == -1)
    227 		return (-1);
    228 	if (read(kd->pmfd, kd->swapspc, kd->nbpg) != kd->nbpg)
    229 		return (-1);
    230 
    231 	return (1);
    232 }
    233 
    234 int
    235 _kvm_readfrompager(kd, vmop, offset)
    236 	kvm_t *kd;
    237 	struct vm_object *vmop;
    238 	u_long offset;
    239 {
    240 	u_long addr;
    241 	struct pager_struct pager;
    242 	struct swpager swap;
    243 	int ix;
    244 	struct swblock swb;
    245 	off_t seekpoint;
    246 
    247 	/* Read in the pager info and make sure it's a swap device. */
    248 	addr = (u_long)vmop->pager;
    249 	if (KREAD(kd, addr, &pager) || pager.pg_type != PG_SWAP)
    250 		return (-1);
    251 
    252 	/* Read in the swap_pager private data. */
    253 	addr = (u_long)pager.pg_data;
    254 	if (KREAD(kd, addr, &swap))
    255 		return (-1);
    256 
    257 	/*
    258 	 * Calculate the paging offset, and make sure it's within the
    259 	 * bounds of the pager.
    260 	 */
    261 	offset += vmop->paging_offset;
    262 	ix = offset / dbtob(swap.sw_bsize);
    263 #if 0
    264 	if (swap.sw_blocks == 0 || ix >= swap.sw_nblocks)
    265 		return (-1);
    266 #else
    267 	if (swap.sw_blocks == 0 || ix >= swap.sw_nblocks) {
    268 		int i;
    269 		printf("BUG BUG BUG BUG:\n");
    270 		printf("object %x offset %x pgoffset %x pager %x swpager %x\n",
    271 		    vmop, offset - vmop->paging_offset, vmop->paging_offset,
    272 		    vmop->pager, pager.pg_data);
    273 		printf("osize %x bsize %x blocks %x nblocks %x\n",
    274 		    swap.sw_osize, swap.sw_bsize, swap.sw_blocks,
    275 		    swap.sw_nblocks);
    276 		for (ix = 0; ix < swap.sw_nblocks; ix++) {
    277 			addr = (u_long)&swap.sw_blocks[ix];
    278 			if (KREAD(kd, addr, &swb))
    279 				return (0);
    280 			printf("sw_blocks[%d]: block %x mask %x\n", ix,
    281 			    swb.swb_block, swb.swb_mask);
    282 		}
    283 		return (-1);
    284 	}
    285 #endif
    286 
    287 	/* Read in the swap records. */
    288 	addr = (u_long)&swap.sw_blocks[ix];
    289 	if (KREAD(kd, addr, &swb))
    290 		return (-1);
    291 
    292 	/* Calculate offset within pager. */
    293 	offset %= dbtob(swap.sw_bsize);
    294 
    295 	/* Check that the page is actually present. */
    296 	if ((swb.swb_mask & (1 << (offset / kd->nbpg))) == 0)
    297 		return (0);
    298 
    299 	if (!ISALIVE(kd))
    300 		return (-1);
    301 
    302 	/* Calculate the physical address and read the page. */
    303 	seekpoint = dbtob(swb.swb_block) + (offset & ~(kd->nbpg -1));
    304 
    305 	if (lseek(kd->swfd, seekpoint, 0) == -1)
    306 		return (-1);
    307 	if (read(kd->swfd, kd->swapspc, kd->nbpg) != kd->nbpg)
    308 		return (-1);
    309 
    310 	return (1);
    311 }
    312 
    313 /*
    314  * Read proc's from memory file into buffer bp, which has space to hold
    315  * at most maxcnt procs.
    316  */
    317 static int
    318 kvm_proclist(kd, what, arg, p, bp, maxcnt)
    319 	kvm_t *kd;
    320 	int what, arg;
    321 	struct proc *p;
    322 	struct kinfo_proc *bp;
    323 	int maxcnt;
    324 {
    325 	register int cnt = 0;
    326 	struct eproc eproc;
    327 	struct pgrp pgrp;
    328 	struct session sess;
    329 	struct tty tty;
    330 	struct proc proc;
    331 
    332 	for (; cnt < maxcnt && p != NULL; p = proc.p_list.le_next) {
    333 		if (KREAD(kd, (u_long)p, &proc)) {
    334 			_kvm_err(kd, kd->program, "can't read proc at %x", p);
    335 			return (-1);
    336 		}
    337 		if (KREAD(kd, (u_long)proc.p_cred, &eproc.e_pcred) == 0)
    338 			KREAD(kd, (u_long)eproc.e_pcred.pc_ucred,
    339 			      &eproc.e_ucred);
    340 
    341 		switch(what) {
    342 
    343 		case KERN_PROC_PID:
    344 			if (proc.p_pid != (pid_t)arg)
    345 				continue;
    346 			break;
    347 
    348 		case KERN_PROC_UID:
    349 			if (eproc.e_ucred.cr_uid != (uid_t)arg)
    350 				continue;
    351 			break;
    352 
    353 		case KERN_PROC_RUID:
    354 			if (eproc.e_pcred.p_ruid != (uid_t)arg)
    355 				continue;
    356 			break;
    357 		}
    358 		/*
    359 		 * We're going to add another proc to the set.  If this
    360 		 * will overflow the buffer, assume the reason is because
    361 		 * nprocs (or the proc list) is corrupt and declare an error.
    362 		 */
    363 		if (cnt >= maxcnt) {
    364 			_kvm_err(kd, kd->program, "nprocs corrupt");
    365 			return (-1);
    366 		}
    367 		/*
    368 		 * gather eproc
    369 		 */
    370 		eproc.e_paddr = p;
    371 		if (KREAD(kd, (u_long)proc.p_pgrp, &pgrp)) {
    372 			_kvm_err(kd, kd->program, "can't read pgrp at %x",
    373 				 proc.p_pgrp);
    374 			return (-1);
    375 		}
    376 		eproc.e_sess = pgrp.pg_session;
    377 		eproc.e_pgid = pgrp.pg_id;
    378 		eproc.e_jobc = pgrp.pg_jobc;
    379 		if (KREAD(kd, (u_long)pgrp.pg_session, &sess)) {
    380 			_kvm_err(kd, kd->program, "can't read session at %x",
    381 				pgrp.pg_session);
    382 			return (-1);
    383 		}
    384 		if ((proc.p_flag & P_CONTROLT) && sess.s_ttyp != NULL) {
    385 			if (KREAD(kd, (u_long)sess.s_ttyp, &tty)) {
    386 				_kvm_err(kd, kd->program,
    387 					 "can't read tty at %x", sess.s_ttyp);
    388 				return (-1);
    389 			}
    390 			eproc.e_tdev = tty.t_dev;
    391 			eproc.e_tsess = tty.t_session;
    392 			if (tty.t_pgrp != NULL) {
    393 				if (KREAD(kd, (u_long)tty.t_pgrp, &pgrp)) {
    394 					_kvm_err(kd, kd->program,
    395 						 "can't read tpgrp at &x",
    396 						tty.t_pgrp);
    397 					return (-1);
    398 				}
    399 				eproc.e_tpgid = pgrp.pg_id;
    400 			} else
    401 				eproc.e_tpgid = -1;
    402 		} else
    403 			eproc.e_tdev = NODEV;
    404 		eproc.e_flag = sess.s_ttyvp ? EPROC_CTTY : 0;
    405 		if (sess.s_leader == p)
    406 			eproc.e_flag |= EPROC_SLEADER;
    407 		if (proc.p_wmesg)
    408 			(void)kvm_read(kd, (u_long)proc.p_wmesg,
    409 			    eproc.e_wmesg, WMESGLEN);
    410 
    411 		(void)kvm_read(kd, (u_long)proc.p_vmspace,
    412 		    (char *)&eproc.e_vm, sizeof(eproc.e_vm));
    413 
    414 		eproc.e_xsize = eproc.e_xrssize = 0;
    415 		eproc.e_xccount = eproc.e_xswrss = 0;
    416 
    417 		switch (what) {
    418 
    419 		case KERN_PROC_PGRP:
    420 			if (eproc.e_pgid != (pid_t)arg)
    421 				continue;
    422 			break;
    423 
    424 		case KERN_PROC_TTY:
    425 			if ((proc.p_flag & P_CONTROLT) == 0 ||
    426 			     eproc.e_tdev != (dev_t)arg)
    427 				continue;
    428 			break;
    429 		}
    430 		bcopy(&proc, &bp->kp_proc, sizeof(proc));
    431 		bcopy(&eproc, &bp->kp_eproc, sizeof(eproc));
    432 		++bp;
    433 		++cnt;
    434 	}
    435 	return (cnt);
    436 }
    437 
    438 /*
    439  * Build proc info array by reading in proc list from a crash dump.
    440  * Return number of procs read.  maxcnt is the max we will read.
    441  */
    442 static int
    443 kvm_deadprocs(kd, what, arg, a_allproc, a_zombproc, maxcnt)
    444 	kvm_t *kd;
    445 	int what, arg;
    446 	u_long a_allproc;
    447 	u_long a_zombproc;
    448 	int maxcnt;
    449 {
    450 	register struct kinfo_proc *bp = kd->procbase;
    451 	register int acnt, zcnt;
    452 	struct proc *p;
    453 
    454 	if (KREAD(kd, a_allproc, &p)) {
    455 		_kvm_err(kd, kd->program, "cannot read allproc");
    456 		return (-1);
    457 	}
    458 	acnt = kvm_proclist(kd, what, arg, p, bp, maxcnt);
    459 	if (acnt < 0)
    460 		return (acnt);
    461 
    462 	if (KREAD(kd, a_zombproc, &p)) {
    463 		_kvm_err(kd, kd->program, "cannot read zombproc");
    464 		return (-1);
    465 	}
    466 	zcnt = kvm_proclist(kd, what, arg, p, bp + acnt, maxcnt - acnt);
    467 	if (zcnt < 0)
    468 		zcnt = 0;
    469 
    470 	return (acnt + zcnt);
    471 }
    472 
    473 struct kinfo_proc *
    474 kvm_getprocs(kd, op, arg, cnt)
    475 	kvm_t *kd;
    476 	int op, arg;
    477 	int *cnt;
    478 {
    479 	size_t size;
    480 	int mib[4], st, nprocs;
    481 
    482 	if (kd->procbase != 0) {
    483 		free((void *)kd->procbase);
    484 		/*
    485 		 * Clear this pointer in case this call fails.  Otherwise,
    486 		 * kvm_close() will free it again.
    487 		 */
    488 		kd->procbase = 0;
    489 	}
    490 	if (ISALIVE(kd)) {
    491 		size = 0;
    492 		mib[0] = CTL_KERN;
    493 		mib[1] = KERN_PROC;
    494 		mib[2] = op;
    495 		mib[3] = arg;
    496 		st = sysctl(mib, 4, NULL, &size, NULL, 0);
    497 		if (st == -1) {
    498 			_kvm_syserr(kd, kd->program, "kvm_getprocs");
    499 			return (0);
    500 		}
    501 		kd->procbase = (struct kinfo_proc *)_kvm_malloc(kd, size);
    502 		if (kd->procbase == 0)
    503 			return (0);
    504 		st = sysctl(mib, 4, kd->procbase, &size, NULL, 0);
    505 		if (st == -1) {
    506 			_kvm_syserr(kd, kd->program, "kvm_getprocs");
    507 			return (0);
    508 		}
    509 		if (size % sizeof(struct kinfo_proc) != 0) {
    510 			_kvm_err(kd, kd->program,
    511 				"proc size mismatch (%d total, %d chunks)",
    512 				size, sizeof(struct kinfo_proc));
    513 			return (0);
    514 		}
    515 		nprocs = size / sizeof(struct kinfo_proc);
    516 	} else {
    517 		struct nlist nl[4], *p;
    518 
    519 		nl[0].n_name = "_nprocs";
    520 		nl[1].n_name = "_allproc";
    521 		nl[2].n_name = "_zombproc";
    522 		nl[3].n_name = 0;
    523 
    524 		if (kvm_nlist(kd, nl) != 0) {
    525 			for (p = nl; p->n_type != 0; ++p)
    526 				;
    527 			_kvm_err(kd, kd->program,
    528 				 "%s: no such symbol", p->n_name);
    529 			return (0);
    530 		}
    531 		if (KREAD(kd, nl[0].n_value, &nprocs)) {
    532 			_kvm_err(kd, kd->program, "can't read nprocs");
    533 			return (0);
    534 		}
    535 		size = nprocs * sizeof(struct kinfo_proc);
    536 		kd->procbase = (struct kinfo_proc *)_kvm_malloc(kd, size);
    537 		if (kd->procbase == 0)
    538 			return (0);
    539 
    540 		nprocs = kvm_deadprocs(kd, op, arg, nl[1].n_value,
    541 				      nl[2].n_value, nprocs);
    542 #ifdef notdef
    543 		size = nprocs * sizeof(struct kinfo_proc);
    544 		(void)realloc(kd->procbase, size);
    545 #endif
    546 	}
    547 	*cnt = nprocs;
    548 	return (kd->procbase);
    549 }
    550 
    551 void
    552 _kvm_freeprocs(kd)
    553 	kvm_t *kd;
    554 {
    555 	if (kd->procbase) {
    556 		free(kd->procbase);
    557 		kd->procbase = 0;
    558 	}
    559 }
    560 
    561 void *
    562 _kvm_realloc(kd, p, n)
    563 	kvm_t *kd;
    564 	void *p;
    565 	size_t n;
    566 {
    567 	void *np = (void *)realloc(p, n);
    568 
    569 	if (np == 0)
    570 		_kvm_err(kd, kd->program, "out of memory");
    571 	return (np);
    572 }
    573 
    574 #ifndef MAX
    575 #define MAX(a, b) ((a) > (b) ? (a) : (b))
    576 #endif
    577 
    578 /*
    579  * Read in an argument vector from the user address space of process p.
    580  * addr if the user-space base address of narg null-terminated contiguous
    581  * strings.  This is used to read in both the command arguments and
    582  * environment strings.  Read at most maxcnt characters of strings.
    583  */
    584 static char **
    585 kvm_argv(kd, p, addr, narg, maxcnt)
    586 	kvm_t *kd;
    587 	const struct proc *p;
    588 	register u_long addr;
    589 	register int narg;
    590 	register int maxcnt;
    591 {
    592 	register char *np, *cp, *ep, *ap;
    593 	register u_long oaddr = -1;
    594 	register int len, cc;
    595 	register char **argv;
    596 
    597 	/*
    598 	 * Check that there aren't an unreasonable number of agruments,
    599 	 * and that the address is in user space.
    600 	 */
    601 	if (narg > ARG_MAX || addr < VM_MIN_ADDRESS || addr >= VM_MAXUSER_ADDRESS)
    602 		return (0);
    603 
    604 	if (kd->argv == 0) {
    605 		/*
    606 		 * Try to avoid reallocs.
    607 		 */
    608 		kd->argc = MAX(narg + 1, 32);
    609 		kd->argv = (char **)_kvm_malloc(kd, kd->argc *
    610 						sizeof(*kd->argv));
    611 		if (kd->argv == 0)
    612 			return (0);
    613 	} else if (narg + 1 > kd->argc) {
    614 		kd->argc = MAX(2 * kd->argc, narg + 1);
    615 		kd->argv = (char **)_kvm_realloc(kd, kd->argv, kd->argc *
    616 						sizeof(*kd->argv));
    617 		if (kd->argv == 0)
    618 			return (0);
    619 	}
    620 	if (kd->argspc == 0) {
    621 		kd->argspc = (char *)_kvm_malloc(kd, kd->nbpg);
    622 		if (kd->argspc == 0)
    623 			return (0);
    624 		kd->arglen = kd->nbpg;
    625 	}
    626 	if (kd->argbuf == 0) {
    627 		kd->argbuf = (char *)_kvm_malloc(kd, kd->nbpg);
    628 		if (kd->argbuf == 0)
    629 			return (0);
    630 	}
    631 	cc = sizeof(char *) * narg;
    632 	if (kvm_uread(kd, p, addr, (char *)kd->argv, cc) != cc)
    633 		return (0);
    634 	ap = np = kd->argspc;
    635 	argv = kd->argv;
    636 	len = 0;
    637 	/*
    638 	 * Loop over pages, filling in the argument vector.
    639 	 */
    640 	while (argv < kd->argv + narg && *argv != 0) {
    641 		addr = (u_long)*argv & ~(kd->nbpg - 1);
    642 		if (addr != oaddr) {
    643 			if (kvm_uread(kd, p, addr, kd->argbuf, kd->nbpg) !=
    644 			    kd->nbpg)
    645 				return (0);
    646 			oaddr = addr;
    647 		}
    648 		addr = (u_long)*argv & (kd->nbpg - 1);
    649 		cp = kd->argbuf + addr;
    650 		cc = kd->nbpg - addr;
    651 		if (maxcnt > 0 && cc > maxcnt - len)
    652 			cc = maxcnt - len;;
    653 		ep = memchr(cp, '\0', cc);
    654 		if (ep != 0)
    655 			cc = ep - cp + 1;
    656 		if (len + cc > kd->arglen) {
    657 			register int off;
    658 			register char **pp;
    659 			register char *op = kd->argspc;
    660 
    661 			kd->arglen *= 2;
    662 			kd->argspc = (char *)_kvm_realloc(kd, kd->argspc,
    663 							  kd->arglen);
    664 			if (kd->argspc == 0)
    665 				return (0);
    666 			/*
    667 			 * Adjust argv pointers in case realloc moved
    668 			 * the string space.
    669 			 */
    670 			off = kd->argspc - op;
    671 			for (pp = kd->argv; pp < argv; pp++)
    672 				*pp += off;
    673 			ap += off;
    674 			np += off;
    675 		}
    676 		memcpy(np, cp, cc);
    677 		np += cc;
    678 		len += cc;
    679 		if (ep != 0) {
    680 			*argv++ = ap;
    681 			ap = np;
    682 		} else
    683 			*argv += cc;
    684 		if (maxcnt > 0 && len >= maxcnt) {
    685 			/*
    686 			 * We're stopping prematurely.  Terminate the
    687 			 * current string.
    688 			 */
    689 			if (ep == 0) {
    690 				*np = '\0';
    691 				*argv++ = ap;
    692 			}
    693 			break;
    694 		}
    695 	}
    696 	/* Make sure argv is terminated. */
    697 	*argv = 0;
    698 	return (kd->argv);
    699 }
    700 
    701 static void
    702 ps_str_a(p, addr, n)
    703 	struct ps_strings *p;
    704 	u_long *addr;
    705 	int *n;
    706 {
    707 	*addr = (u_long)p->ps_argvstr;
    708 	*n = p->ps_nargvstr;
    709 }
    710 
    711 static void
    712 ps_str_e(p, addr, n)
    713 	struct ps_strings *p;
    714 	u_long *addr;
    715 	int *n;
    716 {
    717 	*addr = (u_long)p->ps_envstr;
    718 	*n = p->ps_nenvstr;
    719 }
    720 
    721 /*
    722  * Determine if the proc indicated by p is still active.
    723  * This test is not 100% foolproof in theory, but chances of
    724  * being wrong are very low.
    725  */
    726 static int
    727 proc_verify(kd, kernp, p)
    728 	kvm_t *kd;
    729 	u_long kernp;
    730 	const struct proc *p;
    731 {
    732 	struct proc kernproc;
    733 
    734 	/*
    735 	 * Just read in the whole proc.  It's not that big relative
    736 	 * to the cost of the read system call.
    737 	 */
    738 	if (kvm_read(kd, kernp, (char *)&kernproc, sizeof(kernproc)) !=
    739 	    sizeof(kernproc))
    740 		return (0);
    741 	return (p->p_pid == kernproc.p_pid &&
    742 		(kernproc.p_stat != SZOMB || p->p_stat == SZOMB));
    743 }
    744 
    745 static char **
    746 kvm_doargv(kd, kp, nchr, info)
    747 	kvm_t *kd;
    748 	const struct kinfo_proc *kp;
    749 	int nchr;
    750 	void (*info)(struct ps_strings *, u_long *, int *);
    751 {
    752 	register const struct proc *p = &kp->kp_proc;
    753 	register char **ap;
    754 	u_long addr;
    755 	int cnt;
    756 	struct ps_strings arginfo;
    757 
    758 	/*
    759 	 * Pointers are stored at the top of the user stack.
    760 	 */
    761 	if (p->p_stat == SZOMB ||
    762 	    kvm_uread(kd, p, USRSTACK - sizeof(arginfo), (char *)&arginfo,
    763 		      sizeof(arginfo)) != sizeof(arginfo))
    764 		return (0);
    765 
    766 	(*info)(&arginfo, &addr, &cnt);
    767 	if (cnt == 0)
    768 		return (0);
    769 	ap = kvm_argv(kd, p, addr, cnt, nchr);
    770 	/*
    771 	 * For live kernels, make sure this process didn't go away.
    772 	 */
    773 	if (ap != 0 && ISALIVE(kd) &&
    774 	    !proc_verify(kd, (u_long)kp->kp_eproc.e_paddr, p))
    775 		ap = 0;
    776 	return (ap);
    777 }
    778 
    779 /*
    780  * Get the command args.  This code is now machine independent.
    781  */
    782 char **
    783 kvm_getargv(kd, kp, nchr)
    784 	kvm_t *kd;
    785 	const struct kinfo_proc *kp;
    786 	int nchr;
    787 {
    788 	return (kvm_doargv(kd, kp, nchr, ps_str_a));
    789 }
    790 
    791 char **
    792 kvm_getenvv(kd, kp, nchr)
    793 	kvm_t *kd;
    794 	const struct kinfo_proc *kp;
    795 	int nchr;
    796 {
    797 	return (kvm_doargv(kd, kp, nchr, ps_str_e));
    798 }
    799 
    800 /*
    801  * Read from user space.  The user context is given by p.
    802  */
    803 ssize_t
    804 kvm_uread(kd, p, uva, buf, len)
    805 	kvm_t *kd;
    806 	register const struct proc *p;
    807 	register u_long uva;
    808 	register char *buf;
    809 	register size_t len;
    810 {
    811 	register char *cp;
    812 
    813 	cp = buf;
    814 	while (len > 0) {
    815 		register int cc;
    816 		register char *dp;
    817 		u_long cnt;
    818 
    819 		dp = _kvm_uread(kd, p, uva, &cnt);
    820 		if (dp == 0) {
    821 			_kvm_err(kd, 0, "invalid address (%x)", uva);
    822 			return (0);
    823 		}
    824 		cc = MIN(cnt, len);
    825 		bcopy(dp, cp, cc);
    826 
    827 		cp += cc;
    828 		uva += cc;
    829 		len -= cc;
    830 	}
    831 	return (ssize_t)(cp - buf);
    832 }
    833