Home | History | Annotate | Line # | Download | only in libkvm
kvm_proc.c revision 1.25
      1 /*	$NetBSD: kvm_proc.c,v 1.25 1998/08/10 02:46:07 perry Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1994, 1995 Charles M. Hannum.  All rights reserved.
      5  * Copyright (c) 1989, 1992, 1993
      6  *	The Regents of the University of California.  All rights reserved.
      7  *
      8  * This code is derived from software developed by the Computer Systems
      9  * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
     10  * BG 91-66 and contributed to Berkeley.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. All advertising materials mentioning features or use of this software
     21  *    must display the following acknowledgement:
     22  *	This product includes software developed by the University of
     23  *	California, Berkeley and its contributors.
     24  * 4. Neither the name of the University nor the names of its contributors
     25  *    may be used to endorse or promote products derived from this software
     26  *    without specific prior written permission.
     27  *
     28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     38  * SUCH DAMAGE.
     39  */
     40 
     41 #include <sys/cdefs.h>
     42 #if defined(LIBC_SCCS) && !defined(lint)
     43 #if 0
     44 static char sccsid[] = "@(#)kvm_proc.c	8.3 (Berkeley) 9/23/93";
     45 #else
     46 __RCSID("$NetBSD: kvm_proc.c,v 1.25 1998/08/10 02:46:07 perry Exp $");
     47 #endif
     48 #endif /* LIBC_SCCS and not lint */
     49 
     50 /*
     51  * Proc traversal interface for kvm.  ps and w are (probably) the exclusive
     52  * users of this code, so we've factored it out into a separate module.
     53  * Thus, we keep this grunge out of the other kvm applications (i.e.,
     54  * most other applications are interested only in open/close/read/nlist).
     55  */
     56 
     57 #include <sys/param.h>
     58 #include <sys/user.h>
     59 #include <sys/proc.h>
     60 #include <sys/exec.h>
     61 #include <sys/stat.h>
     62 #include <sys/ioctl.h>
     63 #include <sys/tty.h>
     64 #include <stdlib.h>
     65 #include <string.h>
     66 #include <unistd.h>
     67 #include <nlist.h>
     68 #include <kvm.h>
     69 
     70 #include <vm/vm.h>
     71 #include <vm/vm_param.h>
     72 #include <vm/swap_pager.h>
     73 
     74 #if defined(UVM)
     75 #include <uvm/uvm_extern.h>
     76 #endif
     77 
     78 #include <sys/sysctl.h>
     79 
     80 #include <limits.h>
     81 #include <db.h>
     82 #include <paths.h>
     83 
     84 #include "kvm_private.h"
     85 
     86 #define KREAD(kd, addr, obj) \
     87 	(kvm_read(kd, addr, (char *)(obj), sizeof(*obj)) != sizeof(*obj))
     88 
     89 char		*_kvm_uread __P((kvm_t *, const struct proc *, u_long, u_long *));
     90 #if !defined(UVM)
     91 int		_kvm_coreinit __P((kvm_t *));
     92 int		_kvm_readfromcore __P((kvm_t *, u_long, u_long));
     93 int		_kvm_readfrompager __P((kvm_t *, struct vm_object *, u_long));
     94 #endif
     95 ssize_t		kvm_uread __P((kvm_t *, const struct proc *, u_long, char *,
     96 		    size_t));
     97 
     98 static char	**kvm_argv __P((kvm_t *, const struct proc *, u_long, int,
     99 		    int));
    100 static int	kvm_deadprocs __P((kvm_t *, int, int, u_long, u_long, int));
    101 static char	**kvm_doargv __P((kvm_t *, const struct kinfo_proc *, int,
    102 		    void (*)(struct ps_strings *, u_long *, int *)));
    103 static int	kvm_proclist __P((kvm_t *, int, int, struct proc *,
    104 		    struct kinfo_proc *, int));
    105 static int	proc_verify __P((kvm_t *, u_long, const struct proc *));
    106 static void	ps_str_a __P((struct ps_strings *, u_long *, int *));
    107 static void	ps_str_e __P((struct ps_strings *, u_long *, int *));
    108 
    109 char *
    110 _kvm_uread(kd, p, va, cnt)
    111 	kvm_t *kd;
    112 	const struct proc *p;
    113 	u_long va;
    114 	u_long *cnt;
    115 {
    116 	u_long addr, head;
    117 	u_long offset;
    118 	struct vm_map_entry vme;
    119 #if defined(UVM)
    120 	struct vm_amap amap;
    121 	struct vm_anon *anonp, anon;
    122 	struct vm_page pg;
    123 	int slot;
    124 #else
    125 	struct vm_object vmo;
    126 	int rv;
    127 #endif
    128 
    129 	if (kd->swapspc == 0) {
    130 		kd->swapspc = (char *)_kvm_malloc(kd, kd->nbpg);
    131 		if (kd->swapspc == 0)
    132 			return (0);
    133 	}
    134 
    135 	/*
    136 	 * Look through the address map for the memory object
    137 	 * that corresponds to the given virtual address.
    138 	 * The header just has the entire valid range.
    139 	 */
    140 	head = (u_long)&p->p_vmspace->vm_map.header;
    141 	addr = head;
    142 	while (1) {
    143 		if (KREAD(kd, addr, &vme))
    144 			return (0);
    145 
    146 #if defined(UVM)
    147 		if (va >= vme.start && va < vme.end &&
    148 		    vme.aref.ar_amap != NULL)
    149 			break;
    150 
    151 #else
    152 		if (va >= vme.start && va < vme.end &&
    153 		    vme.object.vm_object != 0)
    154 			break;
    155 #endif
    156 
    157 		addr = (u_long)vme.next;
    158 		if (addr == head)
    159 			return (0);
    160 
    161 	}
    162 #if defined(UVM)
    163 
    164 	/*
    165 	 * we found the map entry, now to find the object...
    166 	 */
    167 	if (vme.aref.ar_amap == NULL)
    168 		return NULL;
    169 
    170 	addr = (u_long)vme.aref.ar_amap;
    171 	if (KREAD(kd, addr, &amap))
    172 		return NULL;
    173 
    174 	offset = va - vme.start;
    175 	slot = offset / kd->nbpg + vme.aref.ar_slotoff;
    176 	/* sanity-check slot number */
    177 	if (slot  > amap.am_nslot)
    178 		return NULL;
    179 
    180 	addr = (u_long)amap.am_anon + (offset / kd->nbpg) * sizeof(anonp);
    181 	if (KREAD(kd, addr, &anonp))
    182 		return NULL;
    183 
    184 	addr = (u_long)anonp;
    185 	if (KREAD(kd, addr, &anon))
    186 		return NULL;
    187 
    188 	addr = (u_long)anon.u.an_page;
    189 	if (addr) {
    190 		if (KREAD(kd, addr, &pg))
    191 			return NULL;
    192 
    193 		if (pread(kd->pmfd, kd->swapspc, kd->nbpg,
    194 		    (off_t)pg.phys_addr) != kd->nbpg)
    195 			return NULL;
    196 	}
    197 	else {
    198 		if (pread(kd->swfd, kd->swapspc, kd->nbpg,
    199 		    (off_t)(anon.an_swslot * kd->nbpg)) != kd->nbpg)
    200 			return NULL;
    201 	}
    202 #else
    203 	/*
    204 	 * We found the right object -- follow shadow links.
    205 	 */
    206 	offset = va - vme.start + vme.offset;
    207 	addr = (u_long)vme.object.vm_object;
    208 
    209 	while (1) {
    210 		/* Try reading the page from core first. */
    211 		if ((rv = _kvm_readfromcore(kd, addr, offset)))
    212 			break;
    213 
    214 		if (KREAD(kd, addr, &vmo))
    215 			return (0);
    216 
    217 		/* If there is a pager here, see if it has the page. */
    218 		if (vmo.pager != 0 &&
    219 		    (rv = _kvm_readfrompager(kd, &vmo, offset)))
    220 			break;
    221 
    222 		/* Move down the shadow chain. */
    223 		addr = (u_long)vmo.shadow;
    224 		if (addr == 0)
    225 			return (0);
    226 		offset += vmo.shadow_offset;
    227 	}
    228 
    229 	if (rv == -1)
    230 		return (0);
    231 #endif
    232 
    233 	/* Found the page. */
    234 	offset %= kd->nbpg;
    235 	*cnt = kd->nbpg - offset;
    236 	return (&kd->swapspc[offset]);
    237 }
    238 
    239 #if !defined(UVM)
    240 
    241 #define	vm_page_hash(kd, object, offset) \
    242 	(((u_long)object + (u_long)(offset / kd->nbpg)) & kd->vm_page_hash_mask)
    243 
    244 int
    245 _kvm_coreinit(kd)
    246 	kvm_t *kd;
    247 {
    248 	struct nlist nlist[3];
    249 
    250 	nlist[0].n_name = "_vm_page_buckets";
    251 	nlist[1].n_name = "_vm_page_hash_mask";
    252 	nlist[2].n_name = 0;
    253 	if (kvm_nlist(kd, nlist) != 0)
    254 		return (-1);
    255 
    256 	if (KREAD(kd, nlist[0].n_value, &kd->vm_page_buckets) ||
    257 	    KREAD(kd, nlist[1].n_value, &kd->vm_page_hash_mask))
    258 		return (-1);
    259 
    260 	return (0);
    261 }
    262 
    263 int
    264 _kvm_readfromcore(kd, object, offset)
    265 	kvm_t *kd;
    266 	u_long object, offset;
    267 {
    268 	u_long addr;
    269 	struct pglist bucket;
    270 	struct vm_page mem;
    271 	off_t seekpoint;
    272 
    273 	if (kd->vm_page_buckets == 0 &&
    274 	    _kvm_coreinit(kd))
    275 		return (-1);
    276 
    277 	addr = (u_long)&kd->vm_page_buckets[vm_page_hash(kd, object, offset)];
    278 	if (KREAD(kd, addr, &bucket))
    279 		return (-1);
    280 
    281 	addr = (u_long)bucket.tqh_first;
    282 	offset &= ~(kd->nbpg -1);
    283 	while (1) {
    284 		if (addr == 0)
    285 			return (0);
    286 
    287 		if (KREAD(kd, addr, &mem))
    288 			return (-1);
    289 
    290 		if ((u_long)mem.object == object &&
    291 		    (u_long)mem.offset == offset)
    292 			break;
    293 
    294 		addr = (u_long)mem.hashq.tqe_next;
    295 	}
    296 
    297 	seekpoint = mem.phys_addr;
    298 
    299 	if (pread(kd->pmfd, kd->swapspc, kd->nbpg, seekpoint) != kd->nbpg)
    300 		return (-1);
    301 
    302 	return (1);
    303 }
    304 
    305 int
    306 _kvm_readfrompager(kd, vmop, offset)
    307 	kvm_t *kd;
    308 	struct vm_object *vmop;
    309 	u_long offset;
    310 {
    311 	u_long addr;
    312 	struct pager_struct pager;
    313 	struct swpager swap;
    314 	int ix;
    315 	struct swblock swb;
    316 	off_t seekpoint;
    317 
    318 	/* Read in the pager info and make sure it's a swap device. */
    319 	addr = (u_long)vmop->pager;
    320 	if (KREAD(kd, addr, &pager) || pager.pg_type != PG_SWAP)
    321 		return (-1);
    322 
    323 	/* Read in the swap_pager private data. */
    324 	addr = (u_long)pager.pg_data;
    325 	if (KREAD(kd, addr, &swap))
    326 		return (-1);
    327 
    328 	/*
    329 	 * Calculate the paging offset, and make sure it's within the
    330 	 * bounds of the pager.
    331 	 */
    332 	offset += vmop->paging_offset;
    333 	ix = offset / dbtob(swap.sw_bsize);
    334 #if 0
    335 	if (swap.sw_blocks == 0 || ix >= swap.sw_nblocks)
    336 		return (-1);
    337 #else
    338 	if (swap.sw_blocks == 0 || ix >= swap.sw_nblocks) {
    339 		int i;
    340 		printf("BUG BUG BUG BUG:\n");
    341 		printf("object %p offset %lx pgoffset %lx ",
    342 		    vmop, offset - vmop->paging_offset,
    343 		    (u_long)vmop->paging_offset);
    344 		printf("pager %p swpager %p\n",
    345 		    vmop->pager, pager.pg_data);
    346 		printf("osize %lx bsize %x blocks %p nblocks %x\n",
    347 		    (u_long)swap.sw_osize, swap.sw_bsize, swap.sw_blocks,
    348 		    swap.sw_nblocks);
    349 		for (i = 0; i < swap.sw_nblocks; i++) {
    350 			addr = (u_long)&swap.sw_blocks[i];
    351 			if (KREAD(kd, addr, &swb))
    352 				return (0);
    353 			printf("sw_blocks[%d]: block %x mask %x\n", i,
    354 			    swb.swb_block, swb.swb_mask);
    355 		}
    356 		return (-1);
    357 	}
    358 #endif
    359 
    360 	/* Read in the swap records. */
    361 	addr = (u_long)&swap.sw_blocks[ix];
    362 	if (KREAD(kd, addr, &swb))
    363 		return (-1);
    364 
    365 	/* Calculate offset within pager. */
    366 	offset %= dbtob(swap.sw_bsize);
    367 
    368 	/* Check that the page is actually present. */
    369 	if ((swb.swb_mask & (1 << (offset / kd->nbpg))) == 0)
    370 		return (0);
    371 
    372 	if (!ISALIVE(kd))
    373 		return (-1);
    374 
    375 	/* Calculate the physical address and read the page. */
    376 	seekpoint = dbtob(swb.swb_block) + (offset & ~(kd->nbpg -1));
    377 
    378 	if (pread(kd->swfd, kd->swapspc, kd->nbpg, seekpoint) != kd->nbpg)
    379 		return (-1);
    380 
    381 	return (1);
    382 }
    383 #endif /* !defined(UVM) */
    384 
    385 /*
    386  * Read proc's from memory file into buffer bp, which has space to hold
    387  * at most maxcnt procs.
    388  */
    389 static int
    390 kvm_proclist(kd, what, arg, p, bp, maxcnt)
    391 	kvm_t *kd;
    392 	int what, arg;
    393 	struct proc *p;
    394 	struct kinfo_proc *bp;
    395 	int maxcnt;
    396 {
    397 	int cnt = 0;
    398 	struct eproc eproc;
    399 	struct pgrp pgrp;
    400 	struct session sess;
    401 	struct tty tty;
    402 	struct proc proc;
    403 
    404 	for (; cnt < maxcnt && p != NULL; p = proc.p_list.le_next) {
    405 		if (KREAD(kd, (u_long)p, &proc)) {
    406 			_kvm_err(kd, kd->program, "can't read proc at %x", p);
    407 			return (-1);
    408 		}
    409 		if (KREAD(kd, (u_long)proc.p_cred, &eproc.e_pcred) == 0)
    410 			(void)KREAD(kd, (u_long)eproc.e_pcred.pc_ucred,
    411 			      &eproc.e_ucred);
    412 
    413 		switch(what) {
    414 
    415 		case KERN_PROC_PID:
    416 			if (proc.p_pid != (pid_t)arg)
    417 				continue;
    418 			break;
    419 
    420 		case KERN_PROC_UID:
    421 			if (eproc.e_ucred.cr_uid != (uid_t)arg)
    422 				continue;
    423 			break;
    424 
    425 		case KERN_PROC_RUID:
    426 			if (eproc.e_pcred.p_ruid != (uid_t)arg)
    427 				continue;
    428 			break;
    429 		}
    430 		/*
    431 		 * We're going to add another proc to the set.  If this
    432 		 * will overflow the buffer, assume the reason is because
    433 		 * nprocs (or the proc list) is corrupt and declare an error.
    434 		 */
    435 		if (cnt >= maxcnt) {
    436 			_kvm_err(kd, kd->program, "nprocs corrupt");
    437 			return (-1);
    438 		}
    439 		/*
    440 		 * gather eproc
    441 		 */
    442 		eproc.e_paddr = p;
    443 		if (KREAD(kd, (u_long)proc.p_pgrp, &pgrp)) {
    444 			_kvm_err(kd, kd->program, "can't read pgrp at %x",
    445 				 proc.p_pgrp);
    446 			return (-1);
    447 		}
    448 		eproc.e_sess = pgrp.pg_session;
    449 		eproc.e_pgid = pgrp.pg_id;
    450 		eproc.e_jobc = pgrp.pg_jobc;
    451 		if (KREAD(kd, (u_long)pgrp.pg_session, &sess)) {
    452 			_kvm_err(kd, kd->program, "can't read session at %x",
    453 				pgrp.pg_session);
    454 			return (-1);
    455 		}
    456 		if ((proc.p_flag & P_CONTROLT) && sess.s_ttyp != NULL) {
    457 			if (KREAD(kd, (u_long)sess.s_ttyp, &tty)) {
    458 				_kvm_err(kd, kd->program,
    459 					 "can't read tty at %x", sess.s_ttyp);
    460 				return (-1);
    461 			}
    462 			eproc.e_tdev = tty.t_dev;
    463 			eproc.e_tsess = tty.t_session;
    464 			if (tty.t_pgrp != NULL) {
    465 				if (KREAD(kd, (u_long)tty.t_pgrp, &pgrp)) {
    466 					_kvm_err(kd, kd->program,
    467 						 "can't read tpgrp at &x",
    468 						tty.t_pgrp);
    469 					return (-1);
    470 				}
    471 				eproc.e_tpgid = pgrp.pg_id;
    472 			} else
    473 				eproc.e_tpgid = -1;
    474 		} else
    475 			eproc.e_tdev = NODEV;
    476 		eproc.e_flag = sess.s_ttyvp ? EPROC_CTTY : 0;
    477 		if (sess.s_leader == p)
    478 			eproc.e_flag |= EPROC_SLEADER;
    479 		if (proc.p_wmesg)
    480 			(void)kvm_read(kd, (u_long)proc.p_wmesg,
    481 			    eproc.e_wmesg, WMESGLEN);
    482 
    483 		(void)kvm_read(kd, (u_long)proc.p_vmspace,
    484 		    (char *)&eproc.e_vm, sizeof(eproc.e_vm));
    485 
    486 		eproc.e_xsize = eproc.e_xrssize = 0;
    487 		eproc.e_xccount = eproc.e_xswrss = 0;
    488 
    489 		switch (what) {
    490 
    491 		case KERN_PROC_PGRP:
    492 			if (eproc.e_pgid != (pid_t)arg)
    493 				continue;
    494 			break;
    495 
    496 		case KERN_PROC_TTY:
    497 			if ((proc.p_flag & P_CONTROLT) == 0 ||
    498 			     eproc.e_tdev != (dev_t)arg)
    499 				continue;
    500 			break;
    501 		}
    502 		memcpy(&bp->kp_proc, &proc, sizeof(proc));
    503 		memcpy(&bp->kp_eproc, &eproc, sizeof(eproc));
    504 		++bp;
    505 		++cnt;
    506 	}
    507 	return (cnt);
    508 }
    509 
    510 /*
    511  * Build proc info array by reading in proc list from a crash dump.
    512  * Return number of procs read.  maxcnt is the max we will read.
    513  */
    514 static int
    515 kvm_deadprocs(kd, what, arg, a_allproc, a_zombproc, maxcnt)
    516 	kvm_t *kd;
    517 	int what, arg;
    518 	u_long a_allproc;
    519 	u_long a_zombproc;
    520 	int maxcnt;
    521 {
    522 	struct kinfo_proc *bp = kd->procbase;
    523 	int acnt, zcnt;
    524 	struct proc *p;
    525 
    526 	if (KREAD(kd, a_allproc, &p)) {
    527 		_kvm_err(kd, kd->program, "cannot read allproc");
    528 		return (-1);
    529 	}
    530 	acnt = kvm_proclist(kd, what, arg, p, bp, maxcnt);
    531 	if (acnt < 0)
    532 		return (acnt);
    533 
    534 	if (KREAD(kd, a_zombproc, &p)) {
    535 		_kvm_err(kd, kd->program, "cannot read zombproc");
    536 		return (-1);
    537 	}
    538 	zcnt = kvm_proclist(kd, what, arg, p, bp + acnt, maxcnt - acnt);
    539 	if (zcnt < 0)
    540 		zcnt = 0;
    541 
    542 	return (acnt + zcnt);
    543 }
    544 
    545 struct kinfo_proc *
    546 kvm_getprocs(kd, op, arg, cnt)
    547 	kvm_t *kd;
    548 	int op, arg;
    549 	int *cnt;
    550 {
    551 	size_t size;
    552 	int mib[4], st, nprocs;
    553 
    554 	if (kd->procbase != 0) {
    555 		free((void *)kd->procbase);
    556 		/*
    557 		 * Clear this pointer in case this call fails.  Otherwise,
    558 		 * kvm_close() will free it again.
    559 		 */
    560 		kd->procbase = 0;
    561 	}
    562 	if (ISALIVE(kd)) {
    563 		size = 0;
    564 		mib[0] = CTL_KERN;
    565 		mib[1] = KERN_PROC;
    566 		mib[2] = op;
    567 		mib[3] = arg;
    568 		st = sysctl(mib, 4, NULL, &size, NULL, 0);
    569 		if (st == -1) {
    570 			_kvm_syserr(kd, kd->program, "kvm_getprocs");
    571 			return (0);
    572 		}
    573 		kd->procbase = (struct kinfo_proc *)_kvm_malloc(kd, size);
    574 		if (kd->procbase == 0)
    575 			return (0);
    576 		st = sysctl(mib, 4, kd->procbase, &size, NULL, 0);
    577 		if (st == -1) {
    578 			_kvm_syserr(kd, kd->program, "kvm_getprocs");
    579 			return (0);
    580 		}
    581 		if (size % sizeof(struct kinfo_proc) != 0) {
    582 			_kvm_err(kd, kd->program,
    583 				"proc size mismatch (%d total, %d chunks)",
    584 				size, sizeof(struct kinfo_proc));
    585 			return (0);
    586 		}
    587 		nprocs = size / sizeof(struct kinfo_proc);
    588 	} else {
    589 		struct nlist nl[4], *p;
    590 
    591 		nl[0].n_name = "_nprocs";
    592 		nl[1].n_name = "_allproc";
    593 		nl[2].n_name = "_zombproc";
    594 		nl[3].n_name = 0;
    595 
    596 		if (kvm_nlist(kd, nl) != 0) {
    597 			for (p = nl; p->n_type != 0; ++p)
    598 				;
    599 			_kvm_err(kd, kd->program,
    600 				 "%s: no such symbol", p->n_name);
    601 			return (0);
    602 		}
    603 		if (KREAD(kd, nl[0].n_value, &nprocs)) {
    604 			_kvm_err(kd, kd->program, "can't read nprocs");
    605 			return (0);
    606 		}
    607 		size = nprocs * sizeof(struct kinfo_proc);
    608 		kd->procbase = (struct kinfo_proc *)_kvm_malloc(kd, size);
    609 		if (kd->procbase == 0)
    610 			return (0);
    611 
    612 		nprocs = kvm_deadprocs(kd, op, arg, nl[1].n_value,
    613 				      nl[2].n_value, nprocs);
    614 #ifdef notdef
    615 		size = nprocs * sizeof(struct kinfo_proc);
    616 		(void)realloc(kd->procbase, size);
    617 #endif
    618 	}
    619 	*cnt = nprocs;
    620 	return (kd->procbase);
    621 }
    622 
    623 void
    624 _kvm_freeprocs(kd)
    625 	kvm_t *kd;
    626 {
    627 	if (kd->procbase) {
    628 		free(kd->procbase);
    629 		kd->procbase = 0;
    630 	}
    631 }
    632 
    633 void *
    634 _kvm_realloc(kd, p, n)
    635 	kvm_t *kd;
    636 	void *p;
    637 	size_t n;
    638 {
    639 	void *np = (void *)realloc(p, n);
    640 
    641 	if (np == 0)
    642 		_kvm_err(kd, kd->program, "out of memory");
    643 	return (np);
    644 }
    645 
    646 #ifndef MAX
    647 #define MAX(a, b) ((a) > (b) ? (a) : (b))
    648 #endif
    649 
    650 /*
    651  * Read in an argument vector from the user address space of process p.
    652  * addr if the user-space base address of narg null-terminated contiguous
    653  * strings.  This is used to read in both the command arguments and
    654  * environment strings.  Read at most maxcnt characters of strings.
    655  */
    656 static char **
    657 kvm_argv(kd, p, addr, narg, maxcnt)
    658 	kvm_t *kd;
    659 	const struct proc *p;
    660 	u_long addr;
    661 	int narg;
    662 	int maxcnt;
    663 {
    664 	char *np, *cp, *ep, *ap;
    665 	u_long oaddr = -1;
    666 	int len, cc;
    667 	char **argv;
    668 
    669 	/*
    670 	 * Check that there aren't an unreasonable number of agruments,
    671 	 * and that the address is in user space.
    672 	 */
    673 	if (narg > ARG_MAX || addr < kd->min_uva || addr >= kd->max_uva)
    674 		return (0);
    675 
    676 	if (kd->argv == 0) {
    677 		/*
    678 		 * Try to avoid reallocs.
    679 		 */
    680 		kd->argc = MAX(narg + 1, 32);
    681 		kd->argv = (char **)_kvm_malloc(kd, kd->argc *
    682 						sizeof(*kd->argv));
    683 		if (kd->argv == 0)
    684 			return (0);
    685 	} else if (narg + 1 > kd->argc) {
    686 		kd->argc = MAX(2 * kd->argc, narg + 1);
    687 		kd->argv = (char **)_kvm_realloc(kd, kd->argv, kd->argc *
    688 						sizeof(*kd->argv));
    689 		if (kd->argv == 0)
    690 			return (0);
    691 	}
    692 	if (kd->argspc == 0) {
    693 		kd->argspc = (char *)_kvm_malloc(kd, kd->nbpg);
    694 		if (kd->argspc == 0)
    695 			return (0);
    696 		kd->arglen = kd->nbpg;
    697 	}
    698 	if (kd->argbuf == 0) {
    699 		kd->argbuf = (char *)_kvm_malloc(kd, kd->nbpg);
    700 		if (kd->argbuf == 0)
    701 			return (0);
    702 	}
    703 	cc = sizeof(char *) * narg;
    704 	if (kvm_uread(kd, p, addr, (char *)kd->argv, cc) != cc)
    705 		return (0);
    706 	ap = np = kd->argspc;
    707 	argv = kd->argv;
    708 	len = 0;
    709 	/*
    710 	 * Loop over pages, filling in the argument vector.
    711 	 */
    712 	while (argv < kd->argv + narg && *argv != 0) {
    713 		addr = (u_long)*argv & ~(kd->nbpg - 1);
    714 		if (addr != oaddr) {
    715 			if (kvm_uread(kd, p, addr, kd->argbuf, kd->nbpg) !=
    716 			    kd->nbpg)
    717 				return (0);
    718 			oaddr = addr;
    719 		}
    720 		addr = (u_long)*argv & (kd->nbpg - 1);
    721 		cp = kd->argbuf + addr;
    722 		cc = kd->nbpg - addr;
    723 		if (maxcnt > 0 && cc > maxcnt - len)
    724 			cc = maxcnt - len;;
    725 		ep = memchr(cp, '\0', cc);
    726 		if (ep != 0)
    727 			cc = ep - cp + 1;
    728 		if (len + cc > kd->arglen) {
    729 			int off;
    730 			char **pp;
    731 			char *op = kd->argspc;
    732 
    733 			kd->arglen *= 2;
    734 			kd->argspc = (char *)_kvm_realloc(kd, kd->argspc,
    735 							  kd->arglen);
    736 			if (kd->argspc == 0)
    737 				return (0);
    738 			/*
    739 			 * Adjust argv pointers in case realloc moved
    740 			 * the string space.
    741 			 */
    742 			off = kd->argspc - op;
    743 			for (pp = kd->argv; pp < argv; pp++)
    744 				*pp += off;
    745 			ap += off;
    746 			np += off;
    747 		}
    748 		memcpy(np, cp, cc);
    749 		np += cc;
    750 		len += cc;
    751 		if (ep != 0) {
    752 			*argv++ = ap;
    753 			ap = np;
    754 		} else
    755 			*argv += cc;
    756 		if (maxcnt > 0 && len >= maxcnt) {
    757 			/*
    758 			 * We're stopping prematurely.  Terminate the
    759 			 * current string.
    760 			 */
    761 			if (ep == 0) {
    762 				*np = '\0';
    763 				*argv++ = ap;
    764 			}
    765 			break;
    766 		}
    767 	}
    768 	/* Make sure argv is terminated. */
    769 	*argv = 0;
    770 	return (kd->argv);
    771 }
    772 
    773 static void
    774 ps_str_a(p, addr, n)
    775 	struct ps_strings *p;
    776 	u_long *addr;
    777 	int *n;
    778 {
    779 	*addr = (u_long)p->ps_argvstr;
    780 	*n = p->ps_nargvstr;
    781 }
    782 
    783 static void
    784 ps_str_e(p, addr, n)
    785 	struct ps_strings *p;
    786 	u_long *addr;
    787 	int *n;
    788 {
    789 	*addr = (u_long)p->ps_envstr;
    790 	*n = p->ps_nenvstr;
    791 }
    792 
    793 /*
    794  * Determine if the proc indicated by p is still active.
    795  * This test is not 100% foolproof in theory, but chances of
    796  * being wrong are very low.
    797  */
    798 static int
    799 proc_verify(kd, kernp, p)
    800 	kvm_t *kd;
    801 	u_long kernp;
    802 	const struct proc *p;
    803 {
    804 	struct proc kernproc;
    805 
    806 	/*
    807 	 * Just read in the whole proc.  It's not that big relative
    808 	 * to the cost of the read system call.
    809 	 */
    810 	if (kvm_read(kd, kernp, (char *)&kernproc, sizeof(kernproc)) !=
    811 	    sizeof(kernproc))
    812 		return (0);
    813 	return (p->p_pid == kernproc.p_pid &&
    814 		(kernproc.p_stat != SZOMB || p->p_stat == SZOMB));
    815 }
    816 
    817 static char **
    818 kvm_doargv(kd, kp, nchr, info)
    819 	kvm_t *kd;
    820 	const struct kinfo_proc *kp;
    821 	int nchr;
    822 	void (*info)(struct ps_strings *, u_long *, int *);
    823 {
    824 	const struct proc *p = &kp->kp_proc;
    825 	char **ap;
    826 	u_long addr;
    827 	int cnt;
    828 	struct ps_strings arginfo;
    829 
    830 	/*
    831 	 * Pointers are stored at the top of the user stack.
    832 	 */
    833 	if (p->p_stat == SZOMB)
    834 		return (0);
    835 	cnt = kvm_uread(kd, p, kd->usrstack - sizeof(arginfo),
    836 	    (char *)&arginfo, sizeof(arginfo));
    837 	if (cnt != sizeof(arginfo))
    838 		return (0);
    839 
    840 	(*info)(&arginfo, &addr, &cnt);
    841 	if (cnt == 0)
    842 		return (0);
    843 	ap = kvm_argv(kd, p, addr, cnt, nchr);
    844 	/*
    845 	 * For live kernels, make sure this process didn't go away.
    846 	 */
    847 	if (ap != 0 && ISALIVE(kd) &&
    848 	    !proc_verify(kd, (u_long)kp->kp_eproc.e_paddr, p))
    849 		ap = 0;
    850 	return (ap);
    851 }
    852 
    853 /*
    854  * Get the command args.  This code is now machine independent.
    855  */
    856 char **
    857 kvm_getargv(kd, kp, nchr)
    858 	kvm_t *kd;
    859 	const struct kinfo_proc *kp;
    860 	int nchr;
    861 {
    862 	return (kvm_doargv(kd, kp, nchr, ps_str_a));
    863 }
    864 
    865 char **
    866 kvm_getenvv(kd, kp, nchr)
    867 	kvm_t *kd;
    868 	const struct kinfo_proc *kp;
    869 	int nchr;
    870 {
    871 	return (kvm_doargv(kd, kp, nchr, ps_str_e));
    872 }
    873 
    874 /*
    875  * Read from user space.  The user context is given by p.
    876  */
    877 ssize_t
    878 kvm_uread(kd, p, uva, buf, len)
    879 	kvm_t *kd;
    880 	const struct proc *p;
    881 	u_long uva;
    882 	char *buf;
    883 	size_t len;
    884 {
    885 	char *cp;
    886 
    887 	cp = buf;
    888 	while (len > 0) {
    889 		int cc;
    890 		char *dp;
    891 		u_long cnt;
    892 
    893 		dp = _kvm_uread(kd, p, uva, &cnt);
    894 		if (dp == 0) {
    895 			_kvm_err(kd, 0, "invalid address (%x)", uva);
    896 			return (0);
    897 		}
    898 		cc = MIN(cnt, len);
    899 		memcpy(cp, dp, cc);
    900 
    901 		cp += cc;
    902 		uva += cc;
    903 		len -= cc;
    904 	}
    905 	return (ssize_t)(cp - buf);
    906 }
    907