kvm_proc.c revision 1.5 1 /*-
2 * Copyright (c) 1994 Charles Hannum.
3 * Copyright (c) 1989, 1992, 1993
4 * The Regents of the University of California. All rights reserved.
5 *
6 * This code is derived from software developed by the Computer Systems
7 * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
8 * BG 91-66 and contributed to Berkeley.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 */
38
39 #if defined(LIBC_SCCS) && !defined(lint)
40 static char sccsid[] = "@(#)kvm_proc.c 8.3 (Berkeley) 9/23/93";
41 #endif /* LIBC_SCCS and not lint */
42
43 /*
44 * Proc traversal interface for kvm. ps and w are (probably) the exclusive
45 * users of this code, so we've factored it out into a separate module.
46 * Thus, we keep this grunge out of the other kvm applications (i.e.,
47 * most other applications are interested only in open/close/read/nlist).
48 */
49
50 #include <sys/param.h>
51 #include <sys/user.h>
52 #include <sys/proc.h>
53 #include <sys/exec.h>
54 #include <sys/stat.h>
55 #include <sys/ioctl.h>
56 #include <sys/tty.h>
57 #include <unistd.h>
58 #include <nlist.h>
59 #include <kvm.h>
60
61 #include <vm/vm.h>
62 #include <vm/vm_param.h>
63 #include <vm/swap_pager.h>
64
65 #include <sys/sysctl.h>
66
67 #include <limits.h>
68 #include <db.h>
69 #include <paths.h>
70
71 #include "kvm_private.h"
72
73 #define KREAD(kd, addr, obj) \
74 (kvm_read(kd, addr, (char *)(obj), sizeof(*obj)) != sizeof(*obj))
75
76 int _kvm_readfrompager __P((kvm_t *, struct vm_object *, u_long, char *));
77
78 static char *
79 kvm_readswap(kd, p, va, cnt)
80 kvm_t *kd;
81 const struct proc *p;
82 u_long va;
83 u_long *cnt;
84 {
85 register u_long addr, head;
86 register u_long offset;
87 struct vm_map_entry vme;
88 struct vm_object vmo;
89 static char *page;
90 int nbpg = getpagesize();
91
92 if (page == 0) {
93 /* XXX should be placed in kvm_t (so we can free it) */
94 page = (char *)_kvm_malloc(kd, nbpg);
95 if (page == 0)
96 return (0);
97 }
98 head = (u_long)&p->p_vmspace->vm_map.header;
99 /*
100 * Look through the address map for the memory object
101 * that corresponds to the given virtual address.
102 * The header just has the entire valid range.
103 */
104 addr = head;
105 while (1) {
106 if (KREAD(kd, addr, &vme))
107 return (0);
108
109 if (va >= vme.start && va < vme.end &&
110 vme.object.vm_object != 0)
111 break;
112
113 addr = (u_long)vme.next;
114 if (addr == head)
115 return (0);
116 }
117
118 /*
119 * We found the right object -- follow shadow links.
120 */
121 offset = va - vme.start + vme.offset;
122 addr = (u_long)vme.object.vm_object;
123 while (1) {
124 if (KREAD(kd, addr, &vmo))
125 return (0);
126
127 /* If there is a pager here, see if it has the page. */
128 if (vmo.pager != 0 &&
129 _kvm_readfrompager(kd, &vmo, offset, page))
130 break;
131
132 /* Move down the shadow chain. */
133 addr = (u_long)vmo.shadow;
134 if (addr == 0)
135 return (0);
136 offset += vmo.shadow_offset;
137 }
138
139 /* Found the page. */
140 offset %= nbpg;
141 *cnt = nbpg - offset;
142 return (&page[offset]);
143 }
144
145 int
146 _kvm_readfrompager(kd, vmop, offset, buf)
147 kvm_t *kd;
148 struct vm_object *vmop;
149 u_long offset;
150 char *buf;
151 {
152 u_long addr;
153 struct pager_struct pager;
154 struct swpager swap;
155 int ix;
156 struct swblock swb;
157 register off_t seekpoint;
158 int nbpg = getpagesize();
159
160 /* Read in the pager info and make sure it's a swap device. */
161 addr = (u_long)vmop->pager;
162 if (KREAD(kd, addr, &pager) || pager.pg_type != PG_SWAP)
163 return (0);
164
165 /* Read in the swap_pager private data. */
166 addr = (u_long)pager.pg_data;
167 if (KREAD(kd, addr, &swap))
168 return (0);
169
170 /*
171 * Calculate the paging offset, and make sure it's within the
172 * bounds of the pager.
173 */
174 offset += vmop->paging_offset;
175 ix = offset / dbtob(swap.sw_bsize);
176 #if 0
177 if (swap.sw_blocks == 0 || ix >= swap.sw_nblocks)
178 return (0);
179 #else
180 if (swap.sw_blocks == 0 || ix >= swap.sw_nblocks) {
181 int i;
182 printf("BUG BUG BUG BUG:\n");
183 printf("object %x offset %x pgoffset %x pager %x swpager %x\n",
184 vmop, offset - vmop->paging_offset, vmop->paging_offset,
185 vmop->pager, pager.pg_data);
186 printf("osize %x bsize %x blocks %x nblocks %x\n",
187 swap.sw_osize, swap.sw_bsize, swap.sw_blocks,
188 swap.sw_nblocks);
189 for (ix = 0; ix < swap.sw_nblocks; ix++) {
190 addr = (u_long)&swap.sw_blocks[ix];
191 if (KREAD(kd, addr, &swb))
192 return (0);
193 printf("sw_blocks[%d]: block %x mask %x\n", ix,
194 swb.swb_block, swb.swb_mask);
195 }
196 return (0);
197 }
198 #endif
199
200 /* Read in the swap records. */
201 addr = (u_long)&swap.sw_blocks[ix];
202 if (KREAD(kd, addr, &swb))
203 return (0);
204
205 /* Calculate offset within pager. */
206 offset %= dbtob(swap.sw_bsize);
207
208 /* Check that the page is actually present. */
209 if ((swb.swb_mask & (1 << (offset / nbpg))) == 0)
210 return (0);
211
212 /* Calculate the physical address and read the page. */
213 seekpoint = dbtob(swb.swb_block) + (offset & ~(nbpg -1));
214 if (lseek(kd->swfd, seekpoint, 0) == -1)
215 return (0);
216 if (read(kd->swfd, buf, nbpg) != nbpg)
217 return (0);
218
219 return (1);
220 }
221
222 /*
223 * Read proc's from memory file into buffer bp, which has space to hold
224 * at most maxcnt procs.
225 */
226 static int
227 kvm_proclist(kd, what, arg, p, bp, maxcnt)
228 kvm_t *kd;
229 int what, arg;
230 struct proc *p;
231 struct kinfo_proc *bp;
232 int maxcnt;
233 {
234 register int cnt = 0;
235 struct eproc eproc;
236 struct pgrp pgrp;
237 struct session sess;
238 struct tty tty;
239 struct proc proc;
240
241 for (; cnt < maxcnt && p != NULL; p = proc.p_list.le_next) {
242 if (KREAD(kd, (u_long)p, &proc)) {
243 _kvm_err(kd, kd->program, "can't read proc at %x", p);
244 return (-1);
245 }
246 if (KREAD(kd, (u_long)proc.p_cred, &eproc.e_pcred) == 0)
247 KREAD(kd, (u_long)eproc.e_pcred.pc_ucred,
248 &eproc.e_ucred);
249
250 switch(what) {
251
252 case KERN_PROC_PID:
253 if (proc.p_pid != (pid_t)arg)
254 continue;
255 break;
256
257 case KERN_PROC_UID:
258 if (eproc.e_ucred.cr_uid != (uid_t)arg)
259 continue;
260 break;
261
262 case KERN_PROC_RUID:
263 if (eproc.e_pcred.p_ruid != (uid_t)arg)
264 continue;
265 break;
266 }
267 /*
268 * We're going to add another proc to the set. If this
269 * will overflow the buffer, assume the reason is because
270 * nprocs (or the proc list) is corrupt and declare an error.
271 */
272 if (cnt >= maxcnt) {
273 _kvm_err(kd, kd->program, "nprocs corrupt");
274 return (-1);
275 }
276 /*
277 * gather eproc
278 */
279 eproc.e_paddr = p;
280 if (KREAD(kd, (u_long)proc.p_pgrp, &pgrp)) {
281 _kvm_err(kd, kd->program, "can't read pgrp at %x",
282 proc.p_pgrp);
283 return (-1);
284 }
285 eproc.e_sess = pgrp.pg_session;
286 eproc.e_pgid = pgrp.pg_id;
287 eproc.e_jobc = pgrp.pg_jobc;
288 if (KREAD(kd, (u_long)pgrp.pg_session, &sess)) {
289 _kvm_err(kd, kd->program, "can't read session at %x",
290 pgrp.pg_session);
291 return (-1);
292 }
293 if ((proc.p_flag & P_CONTROLT) && sess.s_ttyp != NULL) {
294 if (KREAD(kd, (u_long)sess.s_ttyp, &tty)) {
295 _kvm_err(kd, kd->program,
296 "can't read tty at %x", sess.s_ttyp);
297 return (-1);
298 }
299 eproc.e_tdev = tty.t_dev;
300 eproc.e_tsess = tty.t_session;
301 if (tty.t_pgrp != NULL) {
302 if (KREAD(kd, (u_long)tty.t_pgrp, &pgrp)) {
303 _kvm_err(kd, kd->program,
304 "can't read tpgrp at &x",
305 tty.t_pgrp);
306 return (-1);
307 }
308 eproc.e_tpgid = pgrp.pg_id;
309 } else
310 eproc.e_tpgid = -1;
311 } else
312 eproc.e_tdev = NODEV;
313 eproc.e_flag = sess.s_ttyvp ? EPROC_CTTY : 0;
314 if (sess.s_leader == p)
315 eproc.e_flag |= EPROC_SLEADER;
316 if (proc.p_wmesg)
317 (void)kvm_read(kd, (u_long)proc.p_wmesg,
318 eproc.e_wmesg, WMESGLEN);
319
320 #ifdef sparc
321 (void)kvm_read(kd, (u_long)&proc.p_vmspace->vm_rssize,
322 (char *)&eproc.e_vm.vm_rssize,
323 sizeof(eproc.e_vm.vm_rssize));
324 (void)kvm_read(kd, (u_long)&proc.p_vmspace->vm_tsize,
325 (char *)&eproc.e_vm.vm_tsize,
326 3 * sizeof(eproc.e_vm.vm_rssize)); /* XXX */
327 #else
328 (void)kvm_read(kd, (u_long)proc.p_vmspace,
329 (char *)&eproc.e_vm, sizeof(eproc.e_vm));
330 #endif
331 eproc.e_xsize = eproc.e_xrssize = 0;
332 eproc.e_xccount = eproc.e_xswrss = 0;
333
334 switch (what) {
335
336 case KERN_PROC_PGRP:
337 if (eproc.e_pgid != (pid_t)arg)
338 continue;
339 break;
340
341 case KERN_PROC_TTY:
342 if ((proc.p_flag & P_CONTROLT) == 0 ||
343 eproc.e_tdev != (dev_t)arg)
344 continue;
345 break;
346 }
347 bcopy(&proc, &bp->kp_proc, sizeof(proc));
348 bcopy(&eproc, &bp->kp_eproc, sizeof(eproc));
349 ++bp;
350 ++cnt;
351 }
352 return (cnt);
353 }
354
355 /*
356 * Build proc info array by reading in proc list from a crash dump.
357 * Return number of procs read. maxcnt is the max we will read.
358 */
359 static int
360 kvm_deadprocs(kd, what, arg, a_allproc, a_zombproc, maxcnt)
361 kvm_t *kd;
362 int what, arg;
363 u_long a_allproc;
364 u_long a_zombproc;
365 int maxcnt;
366 {
367 register struct kinfo_proc *bp = kd->procbase;
368 register int acnt, zcnt;
369 struct proc *p;
370
371 if (KREAD(kd, a_allproc, &p)) {
372 _kvm_err(kd, kd->program, "cannot read allproc");
373 return (-1);
374 }
375 acnt = kvm_proclist(kd, what, arg, p, bp, maxcnt);
376 if (acnt < 0)
377 return (acnt);
378
379 if (KREAD(kd, a_zombproc, &p)) {
380 _kvm_err(kd, kd->program, "cannot read zombproc");
381 return (-1);
382 }
383 zcnt = kvm_proclist(kd, what, arg, p, bp + acnt, maxcnt - acnt);
384 if (zcnt < 0)
385 zcnt = 0;
386
387 return (acnt + zcnt);
388 }
389
390 struct kinfo_proc *
391 kvm_getprocs(kd, op, arg, cnt)
392 kvm_t *kd;
393 int op, arg;
394 int *cnt;
395 {
396 int mib[4], size, st, nprocs;
397
398 if (kd->procbase != 0) {
399 free((void *)kd->procbase);
400 /*
401 * Clear this pointer in case this call fails. Otherwise,
402 * kvm_close() will free it again.
403 */
404 kd->procbase = 0;
405 }
406 if (ISALIVE(kd)) {
407 size = 0;
408 mib[0] = CTL_KERN;
409 mib[1] = KERN_PROC;
410 mib[2] = op;
411 mib[3] = arg;
412 st = sysctl(mib, 4, NULL, &size, NULL, 0);
413 if (st == -1) {
414 _kvm_syserr(kd, kd->program, "kvm_getprocs");
415 return (0);
416 }
417 kd->procbase = (struct kinfo_proc *)_kvm_malloc(kd, size);
418 if (kd->procbase == 0)
419 return (0);
420 st = sysctl(mib, 4, kd->procbase, &size, NULL, 0);
421 if (st == -1) {
422 _kvm_syserr(kd, kd->program, "kvm_getprocs");
423 return (0);
424 }
425 if (size % sizeof(struct kinfo_proc) != 0) {
426 _kvm_err(kd, kd->program,
427 "proc size mismatch (%d total, %d chunks)",
428 size, sizeof(struct kinfo_proc));
429 return (0);
430 }
431 nprocs = size / sizeof(struct kinfo_proc);
432 } else {
433 struct nlist nl[4], *p;
434
435 nl[0].n_name = "_nprocs";
436 nl[1].n_name = "_allproc";
437 nl[2].n_name = "_zombproc";
438 nl[3].n_name = 0;
439
440 if (kvm_nlist(kd, nl) != 0) {
441 for (p = nl; p->n_type != 0; ++p)
442 ;
443 _kvm_err(kd, kd->program,
444 "%s: no such symbol", p->n_name);
445 return (0);
446 }
447 if (KREAD(kd, nl[0].n_value, &nprocs)) {
448 _kvm_err(kd, kd->program, "can't read nprocs");
449 return (0);
450 }
451 size = nprocs * sizeof(struct kinfo_proc);
452 kd->procbase = (struct kinfo_proc *)_kvm_malloc(kd, size);
453 if (kd->procbase == 0)
454 return (0);
455
456 nprocs = kvm_deadprocs(kd, op, arg, nl[1].n_value,
457 nl[2].n_value, nprocs);
458 #ifdef notdef
459 size = nprocs * sizeof(struct kinfo_proc);
460 (void)realloc(kd->procbase, size);
461 #endif
462 }
463 *cnt = nprocs;
464 return (kd->procbase);
465 }
466
467 void
468 _kvm_freeprocs(kd)
469 kvm_t *kd;
470 {
471 if (kd->procbase) {
472 free(kd->procbase);
473 kd->procbase = 0;
474 }
475 }
476
477 void *
478 _kvm_realloc(kd, p, n)
479 kvm_t *kd;
480 void *p;
481 size_t n;
482 {
483 void *np = (void *)realloc(p, n);
484
485 if (np == 0)
486 _kvm_err(kd, kd->program, "out of memory");
487 return (np);
488 }
489
490 #ifndef MAX
491 #define MAX(a, b) ((a) > (b) ? (a) : (b))
492 #endif
493
494 /*
495 * Read in an argument vector from the user address space of process p.
496 * addr if the user-space base address of narg null-terminated contiguous
497 * strings. This is used to read in both the command arguments and
498 * environment strings. Read at most maxcnt characters of strings.
499 */
500 static char **
501 kvm_argv(kd, p, addr, narg, maxcnt)
502 kvm_t *kd;
503 struct proc *p;
504 register u_long addr;
505 register int narg;
506 register int maxcnt;
507 {
508 register char *cp;
509 register int len, cc;
510 register char **argv;
511 int nbpg = getpagesize();
512
513 /*
514 * Check that there aren't an unreasonable number of agruments,
515 * and that the address is in user space.
516 */
517 if (narg > 512 || addr < VM_MIN_ADDRESS || addr >= VM_MAXUSER_ADDRESS)
518 return (0);
519
520 if (kd->argv == 0) {
521 /*
522 * Try to avoid reallocs.
523 */
524 kd->argc = MAX(narg + 1, 32);
525 kd->argv = (char **)_kvm_malloc(kd, kd->argc *
526 sizeof(*kd->argv));
527 if (kd->argv == 0)
528 return (0);
529 } else if (narg + 1 > kd->argc) {
530 kd->argc = MAX(2 * kd->argc, narg + 1);
531 kd->argv = (char **)_kvm_realloc(kd, kd->argv, kd->argc *
532 sizeof(*kd->argv));
533 if (kd->argv == 0)
534 return (0);
535 }
536 if (kd->argspc == 0) {
537 kd->argspc = (char *)_kvm_malloc(kd, nbpg);
538 if (kd->argspc == 0)
539 return (0);
540 kd->arglen = nbpg;
541 }
542 cp = kd->argspc;
543 argv = kd->argv;
544 *argv = cp;
545 len = 0;
546 /*
547 * Loop over pages, filling in the argument vector.
548 */
549 while (addr < VM_MAXUSER_ADDRESS) {
550 cc = nbpg - (addr & (nbpg - 1));
551 if (maxcnt > 0 && cc > maxcnt - len)
552 cc = maxcnt - len;;
553 if (len + cc > kd->arglen) {
554 register int off;
555 register char **pp;
556 register char *op = kd->argspc;
557
558 kd->arglen *= 2;
559 kd->argspc = (char *)_kvm_realloc(kd, kd->argspc,
560 kd->arglen);
561 if (kd->argspc == 0)
562 return (0);
563 cp = &kd->argspc[len];
564 /*
565 * Adjust argv pointers in case realloc moved
566 * the string space.
567 */
568 off = kd->argspc - op;
569 for (pp = kd->argv; pp < argv; ++pp)
570 *pp += off;
571 }
572 if (kvm_uread(kd, p, addr, cp, cc) != cc)
573 /* XXX */
574 return (0);
575 len += cc;
576 addr += cc;
577
578 if (maxcnt == 0 && len > 16 * nbpg)
579 /* sanity */
580 return (0);
581
582 while (--cc >= 0) {
583 if (*cp++ == 0) {
584 if (--narg <= 0) {
585 *++argv = 0;
586 return (kd->argv);
587 } else
588 *++argv = cp;
589 }
590 }
591 if (maxcnt > 0 && len >= maxcnt) {
592 /*
593 * We're stopping prematurely. Terminate the
594 * argv and current string.
595 */
596 *++argv = 0;
597 *cp = 0;
598 return (kd->argv);
599 }
600 }
601 }
602
603 static void
604 ps_str_a(p, addr, n)
605 struct ps_strings *p;
606 u_long *addr;
607 int *n;
608 {
609 *addr = (u_long)p->ps_argvstr;
610 *n = p->ps_nargvstr;
611 }
612
613 static void
614 ps_str_e(p, addr, n)
615 struct ps_strings *p;
616 u_long *addr;
617 int *n;
618 {
619 *addr = (u_long)p->ps_envstr;
620 *n = p->ps_nenvstr;
621 }
622
623 /*
624 * Determine if the proc indicated by p is still active.
625 * This test is not 100% foolproof in theory, but chances of
626 * being wrong are very low.
627 */
628 static int
629 proc_verify(kd, kernp, p)
630 kvm_t *kd;
631 u_long kernp;
632 const struct proc *p;
633 {
634 struct proc kernproc;
635
636 /*
637 * Just read in the whole proc. It's not that big relative
638 * to the cost of the read system call.
639 */
640 if (kvm_read(kd, kernp, (char *)&kernproc, sizeof(kernproc)) !=
641 sizeof(kernproc))
642 return (0);
643 return (p->p_pid == kernproc.p_pid &&
644 (kernproc.p_stat != SZOMB || p->p_stat == SZOMB));
645 }
646
647 static char **
648 kvm_doargv(kd, kp, nchr, info)
649 kvm_t *kd;
650 const struct kinfo_proc *kp;
651 int nchr;
652 int (*info)(struct ps_strings*, u_long *, int *);
653 {
654 register const struct proc *p = &kp->kp_proc;
655 register char **ap;
656 u_long addr;
657 int cnt;
658 struct ps_strings arginfo;
659
660 /*
661 * Pointers are stored at the top of the user stack.
662 */
663 if (p->p_stat == SZOMB ||
664 kvm_uread(kd, p, USRSTACK - sizeof(arginfo), (char *)&arginfo,
665 sizeof(arginfo)) != sizeof(arginfo))
666 return (0);
667
668 (*info)(&arginfo, &addr, &cnt);
669 if (cnt == 0)
670 return (0);
671 ap = kvm_argv(kd, p, addr, cnt, nchr);
672 /*
673 * For live kernels, make sure this process didn't go away.
674 */
675 if (ap != 0 && ISALIVE(kd) &&
676 !proc_verify(kd, (u_long)kp->kp_eproc.e_paddr, p))
677 ap = 0;
678 return (ap);
679 }
680
681 /*
682 * Get the command args. This code is now machine independent.
683 */
684 char **
685 kvm_getargv(kd, kp, nchr)
686 kvm_t *kd;
687 const struct kinfo_proc *kp;
688 int nchr;
689 {
690 return (kvm_doargv(kd, kp, nchr, ps_str_a));
691 }
692
693 char **
694 kvm_getenvv(kd, kp, nchr)
695 kvm_t *kd;
696 const struct kinfo_proc *kp;
697 int nchr;
698 {
699 return (kvm_doargv(kd, kp, nchr, ps_str_e));
700 }
701
702 /*
703 * Read from user space. The user context is given by p.
704 */
705 ssize_t
706 kvm_uread(kd, p, uva, buf, len)
707 kvm_t *kd;
708 register struct proc *p;
709 register u_long uva;
710 register char *buf;
711 register size_t len;
712 {
713 register char *cp;
714
715 cp = buf;
716 while (len > 0) {
717 u_long pa;
718 register int cc;
719
720 cc = _kvm_uvatop(kd, p, uva, &pa);
721 if (cc > 0) {
722 if (cc > len)
723 cc = len;
724 errno = 0;
725 if (lseek(kd->pmfd, (off_t)pa, 0) == -1 && errno != 0) {
726 _kvm_err(kd, 0, "invalid address (%x)", uva);
727 break;
728 }
729 cc = read(kd->pmfd, cp, cc);
730 if (cc < 0) {
731 _kvm_syserr(kd, 0, _PATH_MEM);
732 break;
733 } else if (cc < len) {
734 _kvm_err(kd, kd->program, "short read");
735 break;
736 }
737 } else if (ISALIVE(kd)) {
738 /* try swap */
739 register char *dp;
740 int cnt;
741
742 dp = kvm_readswap(kd, p, uva, &cnt);
743 if (dp == 0) {
744 _kvm_err(kd, 0, "invalid address (%x)", uva);
745 return (0);
746 }
747 cc = MIN(cnt, len);
748 bcopy(dp, cp, cc);
749 } else
750 break;
751 cp += cc;
752 uva += cc;
753 len -= cc;
754 }
755 return (ssize_t)(cp - buf);
756 }
757