kvm_proc.c revision 1.3 1 /*-
2 * Copyright (c) 1994 Charles Hannum.
3 * Copyright (c) 1989, 1992, 1993
4 * The Regents of the University of California. All rights reserved.
5 *
6 * This code is derived from software developed by the Computer Systems
7 * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
8 * BG 91-66 and contributed to Berkeley.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 */
38
39 #if defined(LIBC_SCCS) && !defined(lint)
40 static char sccsid[] = "@(#)kvm_proc.c 8.3 (Berkeley) 9/23/93";
41 #endif /* LIBC_SCCS and not lint */
42
43 /*
44 * Proc traversal interface for kvm. ps and w are (probably) the exclusive
45 * users of this code, so we've factored it out into a separate module.
46 * Thus, we keep this grunge out of the other kvm applications (i.e.,
47 * most other applications are interested only in open/close/read/nlist).
48 */
49
50 #include <sys/param.h>
51 #include <sys/user.h>
52 #include <sys/proc.h>
53 #include <sys/exec.h>
54 #include <sys/stat.h>
55 #include <sys/ioctl.h>
56 #include <sys/tty.h>
57 #include <unistd.h>
58 #include <nlist.h>
59 #include <kvm.h>
60
61 #include <vm/vm.h>
62 #include <vm/vm_param.h>
63 #include <vm/swap_pager.h>
64
65 #include <sys/sysctl.h>
66
67 #include <limits.h>
68 #include <db.h>
69 #include <paths.h>
70
71 #include "kvm_private.h"
72
73 #define KREAD(kd, addr, obj) \
74 (kvm_read(kd, addr, (char *)(obj), sizeof(*obj)) != sizeof(*obj))
75
76 int _kvm_readfrompager __P((kvm_t *, struct vm_object *, u_long, char *));
77
78 static char *
79 kvm_readswap(kd, p, va, cnt)
80 kvm_t *kd;
81 const struct proc *p;
82 u_long va;
83 u_long *cnt;
84 {
85 register u_long addr, head;
86 register u_long offset;
87 struct vm_map_entry vme;
88 struct vm_object vmo;
89 static char page[NBPG];
90
91 head = (u_long)&p->p_vmspace->vm_map.header;
92 /*
93 * Look through the address map for the memory object
94 * that corresponds to the given virtual address.
95 * The header just has the entire valid range.
96 */
97 addr = head;
98 while (1) {
99 if (KREAD(kd, addr, &vme))
100 return (0);
101
102 if (va >= vme.start && va < vme.end &&
103 vme.object.vm_object != 0)
104 break;
105
106 addr = (u_long)vme.next;
107 if (addr == head)
108 return (0);
109 }
110
111 /*
112 * We found the right object -- follow shadow links.
113 */
114 offset = va - vme.start + vme.offset;
115 addr = (u_long)vme.object.vm_object;
116 while (1) {
117 if (KREAD(kd, addr, &vmo))
118 return (0);
119
120 /* If there is a pager here, see if it has the page. */
121 if (vmo.pager != 0 &&
122 _kvm_readfrompager(kd, &vmo, offset, page))
123 break;
124
125 /* Move down the shadow chain. */
126 addr = (u_long)vmo.shadow;
127 if (addr == 0)
128 return (0);
129 offset += vmo.shadow_offset;
130 }
131
132 /* Found the page. */
133 offset %= NBPG;
134 *cnt = NBPG - offset;
135 return (&page[offset]);
136 }
137
138 int
139 _kvm_readfrompager(kd, vmop, offset, buf)
140 kvm_t *kd;
141 struct vm_object *vmop;
142 u_long offset;
143 char *buf;
144 {
145 u_long addr;
146 struct pager_struct pager;
147 struct swpager swap;
148 int ix;
149 struct swblock swb;
150 register off_t seekpoint;
151
152 /* Read in the pager info and make sure it's a swap device. */
153 addr = (u_long)vmop->pager;
154 if (KREAD(kd, addr, &pager) || pager.pg_type != PG_SWAP)
155 return (0);
156
157 /* Read in the swap_pager private data. */
158 addr = (u_long)pager.pg_data;
159 if (KREAD(kd, addr, &swap))
160 return (0);
161
162 /*
163 * Calculate the paging offset, and make sure it's within the
164 * bounds of the pager.
165 */
166 offset += vmop->paging_offset;
167 ix = offset / dbtob(swap.sw_bsize);
168 #if 0
169 if (swap.sw_blocks == 0 || ix >= swap.sw_nblocks)
170 return (0);
171 #else
172 if (swap.sw_blocks == 0 || ix >= swap.sw_nblocks) {
173 int i;
174 printf("BUG BUG BUG BUG:\n");
175 printf("object %x offset %x pgoffset %x pager %x swpager %x\n",
176 vmop, offset - vmop->paging_offset, vmop->paging_offset,
177 vmop->pager, pager.pg_data);
178 printf("osize %x bsize %x blocks %x nblocks %x\n",
179 swap.sw_osize, swap.sw_bsize, swap.sw_blocks,
180 swap.sw_nblocks);
181 for (ix = 0; ix < swap.sw_nblocks; ix++) {
182 addr = (u_long)&swap.sw_blocks[ix];
183 if (KREAD(kd, addr, &swb))
184 return (0);
185 printf("sw_blocks[%d]: block %x mask %x\n", ix,
186 swb.swb_block, swb.swb_mask);
187 }
188 return (0);
189 }
190 #endif
191
192 /* Read in the swap records. */
193 addr = (u_long)&swap.sw_blocks[ix];
194 if (KREAD(kd, addr, &swb))
195 return (0);
196
197 /* Calculate offset within pager. */
198 offset %= dbtob(swap.sw_bsize);
199
200 /* Check that the page is actually present. */
201 if ((swb.swb_mask & (1 << (offset / NBPG))) == 0)
202 return (0);
203
204 /* Calculate the physical address and read the page. */
205 seekpoint = dbtob(swb.swb_block) + (offset & ~PGOFSET);
206 if (lseek(kd->swfd, seekpoint, 0) == -1)
207 return (0);
208 if (read(kd->swfd, buf, NBPG) != NBPG)
209 return (0);
210
211 return (1);
212 }
213
214 /*
215 * Read proc's from memory file into buffer bp, which has space to hold
216 * at most maxcnt procs.
217 */
218 static int
219 kvm_proclist(kd, what, arg, p, bp, maxcnt)
220 kvm_t *kd;
221 int what, arg;
222 struct proc *p;
223 struct kinfo_proc *bp;
224 int maxcnt;
225 {
226 register int cnt = 0;
227 struct eproc eproc;
228 struct pgrp pgrp;
229 struct session sess;
230 struct tty tty;
231 struct proc proc;
232
233 for (; cnt < maxcnt && p != NULL; p = proc.p_next) {
234 if (KREAD(kd, (u_long)p, &proc)) {
235 _kvm_err(kd, kd->program, "can't read proc at %x", p);
236 return (-1);
237 }
238 if (KREAD(kd, (u_long)proc.p_cred, &eproc.e_pcred) == 0)
239 KREAD(kd, (u_long)eproc.e_pcred.pc_ucred,
240 &eproc.e_ucred);
241
242 switch(what) {
243
244 case KERN_PROC_PID:
245 if (proc.p_pid != (pid_t)arg)
246 continue;
247 break;
248
249 case KERN_PROC_UID:
250 if (eproc.e_ucred.cr_uid != (uid_t)arg)
251 continue;
252 break;
253
254 case KERN_PROC_RUID:
255 if (eproc.e_pcred.p_ruid != (uid_t)arg)
256 continue;
257 break;
258 }
259 /*
260 * We're going to add another proc to the set. If this
261 * will overflow the buffer, assume the reason is because
262 * nprocs (or the proc list) is corrupt and declare an error.
263 */
264 if (cnt >= maxcnt) {
265 _kvm_err(kd, kd->program, "nprocs corrupt");
266 return (-1);
267 }
268 /*
269 * gather eproc
270 */
271 eproc.e_paddr = p;
272 if (KREAD(kd, (u_long)proc.p_pgrp, &pgrp)) {
273 _kvm_err(kd, kd->program, "can't read pgrp at %x",
274 proc.p_pgrp);
275 return (-1);
276 }
277 eproc.e_sess = pgrp.pg_session;
278 eproc.e_pgid = pgrp.pg_id;
279 eproc.e_jobc = pgrp.pg_jobc;
280 if (KREAD(kd, (u_long)pgrp.pg_session, &sess)) {
281 _kvm_err(kd, kd->program, "can't read session at %x",
282 pgrp.pg_session);
283 return (-1);
284 }
285 if ((proc.p_flag & P_CONTROLT) && sess.s_ttyp != NULL) {
286 if (KREAD(kd, (u_long)sess.s_ttyp, &tty)) {
287 _kvm_err(kd, kd->program,
288 "can't read tty at %x", sess.s_ttyp);
289 return (-1);
290 }
291 eproc.e_tdev = tty.t_dev;
292 eproc.e_tsess = tty.t_session;
293 if (tty.t_pgrp != NULL) {
294 if (KREAD(kd, (u_long)tty.t_pgrp, &pgrp)) {
295 _kvm_err(kd, kd->program,
296 "can't read tpgrp at &x",
297 tty.t_pgrp);
298 return (-1);
299 }
300 eproc.e_tpgid = pgrp.pg_id;
301 } else
302 eproc.e_tpgid = -1;
303 } else
304 eproc.e_tdev = NODEV;
305 eproc.e_flag = sess.s_ttyvp ? EPROC_CTTY : 0;
306 if (sess.s_leader == p)
307 eproc.e_flag |= EPROC_SLEADER;
308 if (proc.p_wmesg)
309 (void)kvm_read(kd, (u_long)proc.p_wmesg,
310 eproc.e_wmesg, WMESGLEN);
311
312 #ifdef sparc
313 (void)kvm_read(kd, (u_long)&proc.p_vmspace->vm_rssize,
314 (char *)&eproc.e_vm.vm_rssize,
315 sizeof(eproc.e_vm.vm_rssize));
316 (void)kvm_read(kd, (u_long)&proc.p_vmspace->vm_tsize,
317 (char *)&eproc.e_vm.vm_tsize,
318 3 * sizeof(eproc.e_vm.vm_rssize)); /* XXX */
319 #else
320 (void)kvm_read(kd, (u_long)proc.p_vmspace,
321 (char *)&eproc.e_vm, sizeof(eproc.e_vm));
322 #endif
323 eproc.e_xsize = eproc.e_xrssize = 0;
324 eproc.e_xccount = eproc.e_xswrss = 0;
325
326 switch (what) {
327
328 case KERN_PROC_PGRP:
329 if (eproc.e_pgid != (pid_t)arg)
330 continue;
331 break;
332
333 case KERN_PROC_TTY:
334 if ((proc.p_flag & P_CONTROLT) == 0 ||
335 eproc.e_tdev != (dev_t)arg)
336 continue;
337 break;
338 }
339 bcopy(&proc, &bp->kp_proc, sizeof(proc));
340 bcopy(&eproc, &bp->kp_eproc, sizeof(eproc));
341 ++bp;
342 ++cnt;
343 }
344 return (cnt);
345 }
346
347 /*
348 * Build proc info array by reading in proc list from a crash dump.
349 * Return number of procs read. maxcnt is the max we will read.
350 */
351 static int
352 kvm_deadprocs(kd, what, arg, a_allproc, a_zombproc, maxcnt)
353 kvm_t *kd;
354 int what, arg;
355 u_long a_allproc;
356 u_long a_zombproc;
357 int maxcnt;
358 {
359 register struct kinfo_proc *bp = kd->procbase;
360 register int acnt, zcnt;
361 struct proc *p;
362
363 if (KREAD(kd, a_allproc, &p)) {
364 _kvm_err(kd, kd->program, "cannot read allproc");
365 return (-1);
366 }
367 acnt = kvm_proclist(kd, what, arg, p, bp, maxcnt);
368 if (acnt < 0)
369 return (acnt);
370
371 if (KREAD(kd, a_zombproc, &p)) {
372 _kvm_err(kd, kd->program, "cannot read zombproc");
373 return (-1);
374 }
375 zcnt = kvm_proclist(kd, what, arg, p, bp + acnt, maxcnt - acnt);
376 if (zcnt < 0)
377 zcnt = 0;
378
379 return (acnt + zcnt);
380 }
381
382 struct kinfo_proc *
383 kvm_getprocs(kd, op, arg, cnt)
384 kvm_t *kd;
385 int op, arg;
386 int *cnt;
387 {
388 int mib[4], size, st, nprocs;
389
390 if (kd->procbase != 0) {
391 free((void *)kd->procbase);
392 /*
393 * Clear this pointer in case this call fails. Otherwise,
394 * kvm_close() will free it again.
395 */
396 kd->procbase = 0;
397 }
398 if (ISALIVE(kd)) {
399 size = 0;
400 mib[0] = CTL_KERN;
401 mib[1] = KERN_PROC;
402 mib[2] = op;
403 mib[3] = arg;
404 st = sysctl(mib, 4, NULL, &size, NULL, 0);
405 if (st == -1) {
406 _kvm_syserr(kd, kd->program, "kvm_getprocs");
407 return (0);
408 }
409 kd->procbase = (struct kinfo_proc *)_kvm_malloc(kd, size);
410 if (kd->procbase == 0)
411 return (0);
412 st = sysctl(mib, 4, kd->procbase, &size, NULL, 0);
413 if (st == -1) {
414 _kvm_syserr(kd, kd->program, "kvm_getprocs");
415 return (0);
416 }
417 if (size % sizeof(struct kinfo_proc) != 0) {
418 _kvm_err(kd, kd->program,
419 "proc size mismatch (%d total, %d chunks)",
420 size, sizeof(struct kinfo_proc));
421 return (0);
422 }
423 nprocs = size / sizeof(struct kinfo_proc);
424 } else {
425 struct nlist nl[4], *p;
426
427 nl[0].n_name = "_nprocs";
428 nl[1].n_name = "_allproc";
429 nl[2].n_name = "_zombproc";
430 nl[3].n_name = 0;
431
432 if (kvm_nlist(kd, nl) != 0) {
433 for (p = nl; p->n_type != 0; ++p)
434 ;
435 _kvm_err(kd, kd->program,
436 "%s: no such symbol", p->n_name);
437 return (0);
438 }
439 if (KREAD(kd, nl[0].n_value, &nprocs)) {
440 _kvm_err(kd, kd->program, "can't read nprocs");
441 return (0);
442 }
443 size = nprocs * sizeof(struct kinfo_proc);
444 kd->procbase = (struct kinfo_proc *)_kvm_malloc(kd, size);
445 if (kd->procbase == 0)
446 return (0);
447
448 nprocs = kvm_deadprocs(kd, op, arg, nl[1].n_value,
449 nl[2].n_value, nprocs);
450 #ifdef notdef
451 size = nprocs * sizeof(struct kinfo_proc);
452 (void)realloc(kd->procbase, size);
453 #endif
454 }
455 *cnt = nprocs;
456 return (kd->procbase);
457 }
458
459 void
460 _kvm_freeprocs(kd)
461 kvm_t *kd;
462 {
463 if (kd->procbase) {
464 free(kd->procbase);
465 kd->procbase = 0;
466 }
467 }
468
469 void *
470 _kvm_realloc(kd, p, n)
471 kvm_t *kd;
472 void *p;
473 size_t n;
474 {
475 void *np = (void *)realloc(p, n);
476
477 if (np == 0)
478 _kvm_err(kd, kd->program, "out of memory");
479 return (np);
480 }
481
482 #ifndef MAX
483 #define MAX(a, b) ((a) > (b) ? (a) : (b))
484 #endif
485
486 /*
487 * Read in an argument vector from the user address space of process p.
488 * addr if the user-space base address of narg null-terminated contiguous
489 * strings. This is used to read in both the command arguments and
490 * environment strings. Read at most maxcnt characters of strings.
491 */
492 static char **
493 kvm_argv(kd, p, addr, narg, maxcnt)
494 kvm_t *kd;
495 struct proc *p;
496 register u_long addr;
497 register int narg;
498 register int maxcnt;
499 {
500 register char *cp;
501 register int len, cc;
502 register char **argv;
503
504 /*
505 * Check that there aren't an unreasonable number of agruments,
506 * and that the address is in user space.
507 */
508 if (narg > 512 || addr < VM_MIN_ADDRESS || addr >= VM_MAXUSER_ADDRESS)
509 return (0);
510
511 if (kd->argv == 0) {
512 /*
513 * Try to avoid reallocs.
514 */
515 kd->argc = MAX(narg + 1, 32);
516 kd->argv = (char **)_kvm_malloc(kd, kd->argc *
517 sizeof(*kd->argv));
518 if (kd->argv == 0)
519 return (0);
520 } else if (narg + 1 > kd->argc) {
521 kd->argc = MAX(2 * kd->argc, narg + 1);
522 kd->argv = (char **)_kvm_realloc(kd, kd->argv, kd->argc *
523 sizeof(*kd->argv));
524 if (kd->argv == 0)
525 return (0);
526 }
527 if (kd->argspc == 0) {
528 kd->argspc = (char *)_kvm_malloc(kd, NBPG);
529 if (kd->argspc == 0)
530 return (0);
531 kd->arglen = NBPG;
532 }
533 cp = kd->argspc;
534 argv = kd->argv;
535 *argv = cp;
536 len = 0;
537 /*
538 * Loop over pages, filling in the argument vector.
539 */
540 while (addr < VM_MAXUSER_ADDRESS) {
541 cc = NBPG - (addr & PGOFSET);
542 if (maxcnt > 0 && cc > maxcnt - len)
543 cc = maxcnt - len;;
544 if (len + cc > kd->arglen) {
545 register int off;
546 register char **pp;
547 register char *op = kd->argspc;
548
549 kd->arglen *= 2;
550 kd->argspc = (char *)_kvm_realloc(kd, kd->argspc,
551 kd->arglen);
552 if (kd->argspc == 0)
553 return (0);
554 cp = &kd->argspc[len];
555 /*
556 * Adjust argv pointers in case realloc moved
557 * the string space.
558 */
559 off = kd->argspc - op;
560 for (pp = kd->argv; pp < argv; ++pp)
561 *pp += off;
562 }
563 if (kvm_uread(kd, p, addr, cp, cc) != cc)
564 /* XXX */
565 return (0);
566 len += cc;
567 addr += cc;
568
569 if (maxcnt == 0 && len > 16 * NBPG)
570 /* sanity */
571 return (0);
572
573 while (--cc >= 0) {
574 if (*cp++ == 0) {
575 if (--narg <= 0) {
576 *++argv = 0;
577 return (kd->argv);
578 } else
579 *++argv = cp;
580 }
581 }
582 if (maxcnt > 0 && len >= maxcnt) {
583 /*
584 * We're stopping prematurely. Terminate the
585 * argv and current string.
586 */
587 *++argv = 0;
588 *cp = 0;
589 return (kd->argv);
590 }
591 }
592 }
593
594 static void
595 ps_str_a(p, addr, n)
596 struct ps_strings *p;
597 u_long *addr;
598 int *n;
599 {
600 *addr = (u_long)p->ps_argvstr;
601 *n = p->ps_nargvstr;
602 }
603
604 static void
605 ps_str_e(p, addr, n)
606 struct ps_strings *p;
607 u_long *addr;
608 int *n;
609 {
610 *addr = (u_long)p->ps_envstr;
611 *n = p->ps_nenvstr;
612 }
613
614 /*
615 * Determine if the proc indicated by p is still active.
616 * This test is not 100% foolproof in theory, but chances of
617 * being wrong are very low.
618 */
619 static int
620 proc_verify(kd, kernp, p)
621 kvm_t *kd;
622 u_long kernp;
623 const struct proc *p;
624 {
625 struct proc kernproc;
626
627 /*
628 * Just read in the whole proc. It's not that big relative
629 * to the cost of the read system call.
630 */
631 if (kvm_read(kd, kernp, (char *)&kernproc, sizeof(kernproc)) !=
632 sizeof(kernproc))
633 return (0);
634 return (p->p_pid == kernproc.p_pid &&
635 (kernproc.p_stat != SZOMB || p->p_stat == SZOMB));
636 }
637
638 static char **
639 kvm_doargv(kd, kp, nchr, info)
640 kvm_t *kd;
641 const struct kinfo_proc *kp;
642 int nchr;
643 int (*info)(struct ps_strings*, u_long *, int *);
644 {
645 register const struct proc *p = &kp->kp_proc;
646 register char **ap;
647 u_long addr;
648 int cnt;
649 struct ps_strings arginfo;
650
651 /*
652 * Pointers are stored at the top of the user stack.
653 */
654 if (p->p_stat == SZOMB ||
655 kvm_uread(kd, p, USRSTACK - sizeof(arginfo), (char *)&arginfo,
656 sizeof(arginfo)) != sizeof(arginfo))
657 return (0);
658
659 (*info)(&arginfo, &addr, &cnt);
660 if (cnt == 0)
661 return (0);
662 ap = kvm_argv(kd, p, addr, cnt, nchr);
663 /*
664 * For live kernels, make sure this process didn't go away.
665 */
666 if (ap != 0 && ISALIVE(kd) &&
667 !proc_verify(kd, (u_long)kp->kp_eproc.e_paddr, p))
668 ap = 0;
669 return (ap);
670 }
671
672 /*
673 * Get the command args. This code is now machine independent.
674 */
675 char **
676 kvm_getargv(kd, kp, nchr)
677 kvm_t *kd;
678 const struct kinfo_proc *kp;
679 int nchr;
680 {
681 return (kvm_doargv(kd, kp, nchr, ps_str_a));
682 }
683
684 char **
685 kvm_getenvv(kd, kp, nchr)
686 kvm_t *kd;
687 const struct kinfo_proc *kp;
688 int nchr;
689 {
690 return (kvm_doargv(kd, kp, nchr, ps_str_e));
691 }
692
693 /*
694 * Read from user space. The user context is given by p.
695 */
696 ssize_t
697 kvm_uread(kd, p, uva, buf, len)
698 kvm_t *kd;
699 register struct proc *p;
700 register u_long uva;
701 register char *buf;
702 register size_t len;
703 {
704 register char *cp;
705
706 cp = buf;
707 while (len > 0) {
708 u_long pa;
709 register int cc;
710
711 cc = _kvm_uvatop(kd, p, uva, &pa);
712 if (cc > 0) {
713 if (cc > len)
714 cc = len;
715 errno = 0;
716 if (lseek(kd->pmfd, (off_t)pa, 0) == -1 && errno != 0) {
717 _kvm_err(kd, 0, "invalid address (%x)", uva);
718 break;
719 }
720 cc = read(kd->pmfd, cp, cc);
721 if (cc < 0) {
722 _kvm_syserr(kd, 0, _PATH_MEM);
723 break;
724 } else if (cc < len) {
725 _kvm_err(kd, kd->program, "short read");
726 break;
727 }
728 } else if (ISALIVE(kd)) {
729 /* try swap */
730 register char *dp;
731 int cnt;
732
733 dp = kvm_readswap(kd, p, uva, &cnt);
734 if (dp == 0) {
735 _kvm_err(kd, 0, "invalid address (%x)", uva);
736 return (0);
737 }
738 cc = MIN(cnt, len);
739 bcopy(dp, cp, cc);
740 } else
741 break;
742 cp += cc;
743 uva += cc;
744 len -= cc;
745 }
746 return (ssize_t)(cp - buf);
747 }
748