kvm_proc.c revision 1.7 1 /*-
2 * Copyright (c) 1994 Charles Hannum.
3 * Copyright (c) 1989, 1992, 1993
4 * The Regents of the University of California. All rights reserved.
5 *
6 * This code is derived from software developed by the Computer Systems
7 * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
8 * BG 91-66 and contributed to Berkeley.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 */
38
39 #if defined(LIBC_SCCS) && !defined(lint)
40 static char sccsid[] = "@(#)kvm_proc.c 8.3 (Berkeley) 9/23/93";
41 #endif /* LIBC_SCCS and not lint */
42
43 /*
44 * Proc traversal interface for kvm. ps and w are (probably) the exclusive
45 * users of this code, so we've factored it out into a separate module.
46 * Thus, we keep this grunge out of the other kvm applications (i.e.,
47 * most other applications are interested only in open/close/read/nlist).
48 */
49
50 #include <sys/param.h>
51 #include <sys/user.h>
52 #include <sys/proc.h>
53 #include <sys/exec.h>
54 #include <sys/stat.h>
55 #include <sys/ioctl.h>
56 #include <sys/tty.h>
57 #include <stdlib.h>
58 #include <unistd.h>
59 #include <nlist.h>
60 #include <kvm.h>
61
62 #include <vm/vm.h>
63 #include <vm/vm_param.h>
64 #include <vm/swap_pager.h>
65
66 #include <sys/sysctl.h>
67
68 #include <limits.h>
69 #include <db.h>
70 #include <paths.h>
71
72 #include "kvm_private.h"
73
74 #define KREAD(kd, addr, obj) \
75 (kvm_read(kd, addr, (char *)(obj), sizeof(*obj)) != sizeof(*obj))
76
77 int _kvm_readfrompager __P((kvm_t *, struct vm_object *, u_long));
78 ssize_t kvm_uread __P((kvm_t *, const struct proc *, u_long, char *, size_t));
79
80 static char *
81 kvm_readswap(kd, p, va, cnt)
82 kvm_t *kd;
83 const struct proc *p;
84 u_long va;
85 u_long *cnt;
86 {
87 register u_long addr, head;
88 register u_long offset;
89 struct vm_map_entry vme;
90 struct vm_object vmo;
91
92 if (kd->swapspc == 0) {
93 kd->swapspc = (char *)_kvm_malloc(kd, kd->nbpg);
94 if (kd->swapspc == 0)
95 return (0);
96 }
97 head = (u_long)&p->p_vmspace->vm_map.header;
98 /*
99 * Look through the address map for the memory object
100 * that corresponds to the given virtual address.
101 * The header just has the entire valid range.
102 */
103 addr = head;
104 while (1) {
105 if (KREAD(kd, addr, &vme))
106 return (0);
107
108 if (va >= vme.start && va < vme.end &&
109 vme.object.vm_object != 0)
110 break;
111
112 addr = (u_long)vme.next;
113 if (addr == head)
114 return (0);
115 }
116
117 /*
118 * We found the right object -- follow shadow links.
119 */
120 offset = va - vme.start + vme.offset;
121 addr = (u_long)vme.object.vm_object;
122 while (1) {
123 if (KREAD(kd, addr, &vmo))
124 return (0);
125
126 /* If there is a pager here, see if it has the page. */
127 if (vmo.pager != 0 &&
128 _kvm_readfrompager(kd, &vmo, offset))
129 break;
130
131 /* Move down the shadow chain. */
132 addr = (u_long)vmo.shadow;
133 if (addr == 0)
134 return (0);
135 offset += vmo.shadow_offset;
136 }
137
138 /* Found the page. */
139 offset %= kd->nbpg;
140 *cnt = kd->nbpg - offset;
141 return (&kd->swapspc[offset]);
142 }
143
144 int
145 _kvm_readfrompager(kd, vmop, offset)
146 kvm_t *kd;
147 struct vm_object *vmop;
148 u_long offset;
149 {
150 u_long addr;
151 struct pager_struct pager;
152 struct swpager swap;
153 int ix;
154 struct swblock swb;
155 register off_t seekpoint;
156
157 /* Read in the pager info and make sure it's a swap device. */
158 addr = (u_long)vmop->pager;
159 if (KREAD(kd, addr, &pager) || pager.pg_type != PG_SWAP)
160 return (0);
161
162 /* Read in the swap_pager private data. */
163 addr = (u_long)pager.pg_data;
164 if (KREAD(kd, addr, &swap))
165 return (0);
166
167 /*
168 * Calculate the paging offset, and make sure it's within the
169 * bounds of the pager.
170 */
171 offset += vmop->paging_offset;
172 ix = offset / dbtob(swap.sw_bsize);
173 #if 0
174 if (swap.sw_blocks == 0 || ix >= swap.sw_nblocks)
175 return (0);
176 #else
177 if (swap.sw_blocks == 0 || ix >= swap.sw_nblocks) {
178 int i;
179 printf("BUG BUG BUG BUG:\n");
180 printf("object %x offset %x pgoffset %x pager %x swpager %x\n",
181 vmop, offset - vmop->paging_offset, vmop->paging_offset,
182 vmop->pager, pager.pg_data);
183 printf("osize %x bsize %x blocks %x nblocks %x\n",
184 swap.sw_osize, swap.sw_bsize, swap.sw_blocks,
185 swap.sw_nblocks);
186 for (ix = 0; ix < swap.sw_nblocks; ix++) {
187 addr = (u_long)&swap.sw_blocks[ix];
188 if (KREAD(kd, addr, &swb))
189 return (0);
190 printf("sw_blocks[%d]: block %x mask %x\n", ix,
191 swb.swb_block, swb.swb_mask);
192 }
193 return (0);
194 }
195 #endif
196
197 /* Read in the swap records. */
198 addr = (u_long)&swap.sw_blocks[ix];
199 if (KREAD(kd, addr, &swb))
200 return (0);
201
202 /* Calculate offset within pager. */
203 offset %= dbtob(swap.sw_bsize);
204
205 /* Check that the page is actually present. */
206 if ((swb.swb_mask & (1 << (offset / kd->nbpg))) == 0)
207 return (0);
208
209 /* Calculate the physical address and read the page. */
210 seekpoint = dbtob(swb.swb_block) + (offset & ~(kd->nbpg -1));
211 if (lseek(kd->swfd, seekpoint, 0) == -1)
212 return (0);
213 if (read(kd->swfd, kd->swapspc, kd->nbpg) != kd->nbpg)
214 return (0);
215
216 return (1);
217 }
218
219 /*
220 * Read proc's from memory file into buffer bp, which has space to hold
221 * at most maxcnt procs.
222 */
223 static int
224 kvm_proclist(kd, what, arg, p, bp, maxcnt)
225 kvm_t *kd;
226 int what, arg;
227 struct proc *p;
228 struct kinfo_proc *bp;
229 int maxcnt;
230 {
231 register int cnt = 0;
232 struct eproc eproc;
233 struct pgrp pgrp;
234 struct session sess;
235 struct tty tty;
236 struct proc proc;
237
238 for (; cnt < maxcnt && p != NULL; p = proc.p_list.le_next) {
239 if (KREAD(kd, (u_long)p, &proc)) {
240 _kvm_err(kd, kd->program, "can't read proc at %x", p);
241 return (-1);
242 }
243 if (KREAD(kd, (u_long)proc.p_cred, &eproc.e_pcred) == 0)
244 KREAD(kd, (u_long)eproc.e_pcred.pc_ucred,
245 &eproc.e_ucred);
246
247 switch(what) {
248
249 case KERN_PROC_PID:
250 if (proc.p_pid != (pid_t)arg)
251 continue;
252 break;
253
254 case KERN_PROC_UID:
255 if (eproc.e_ucred.cr_uid != (uid_t)arg)
256 continue;
257 break;
258
259 case KERN_PROC_RUID:
260 if (eproc.e_pcred.p_ruid != (uid_t)arg)
261 continue;
262 break;
263 }
264 /*
265 * We're going to add another proc to the set. If this
266 * will overflow the buffer, assume the reason is because
267 * nprocs (or the proc list) is corrupt and declare an error.
268 */
269 if (cnt >= maxcnt) {
270 _kvm_err(kd, kd->program, "nprocs corrupt");
271 return (-1);
272 }
273 /*
274 * gather eproc
275 */
276 eproc.e_paddr = p;
277 if (KREAD(kd, (u_long)proc.p_pgrp, &pgrp)) {
278 _kvm_err(kd, kd->program, "can't read pgrp at %x",
279 proc.p_pgrp);
280 return (-1);
281 }
282 eproc.e_sess = pgrp.pg_session;
283 eproc.e_pgid = pgrp.pg_id;
284 eproc.e_jobc = pgrp.pg_jobc;
285 if (KREAD(kd, (u_long)pgrp.pg_session, &sess)) {
286 _kvm_err(kd, kd->program, "can't read session at %x",
287 pgrp.pg_session);
288 return (-1);
289 }
290 if ((proc.p_flag & P_CONTROLT) && sess.s_ttyp != NULL) {
291 if (KREAD(kd, (u_long)sess.s_ttyp, &tty)) {
292 _kvm_err(kd, kd->program,
293 "can't read tty at %x", sess.s_ttyp);
294 return (-1);
295 }
296 eproc.e_tdev = tty.t_dev;
297 eproc.e_tsess = tty.t_session;
298 if (tty.t_pgrp != NULL) {
299 if (KREAD(kd, (u_long)tty.t_pgrp, &pgrp)) {
300 _kvm_err(kd, kd->program,
301 "can't read tpgrp at &x",
302 tty.t_pgrp);
303 return (-1);
304 }
305 eproc.e_tpgid = pgrp.pg_id;
306 } else
307 eproc.e_tpgid = -1;
308 } else
309 eproc.e_tdev = NODEV;
310 eproc.e_flag = sess.s_ttyvp ? EPROC_CTTY : 0;
311 if (sess.s_leader == p)
312 eproc.e_flag |= EPROC_SLEADER;
313 if (proc.p_wmesg)
314 (void)kvm_read(kd, (u_long)proc.p_wmesg,
315 eproc.e_wmesg, WMESGLEN);
316
317 #ifdef sparc
318 (void)kvm_read(kd, (u_long)&proc.p_vmspace->vm_rssize,
319 (char *)&eproc.e_vm.vm_rssize,
320 sizeof(eproc.e_vm.vm_rssize));
321 (void)kvm_read(kd, (u_long)&proc.p_vmspace->vm_tsize,
322 (char *)&eproc.e_vm.vm_tsize,
323 3 * sizeof(eproc.e_vm.vm_rssize)); /* XXX */
324 #else
325 (void)kvm_read(kd, (u_long)proc.p_vmspace,
326 (char *)&eproc.e_vm, sizeof(eproc.e_vm));
327 #endif
328 eproc.e_xsize = eproc.e_xrssize = 0;
329 eproc.e_xccount = eproc.e_xswrss = 0;
330
331 switch (what) {
332
333 case KERN_PROC_PGRP:
334 if (eproc.e_pgid != (pid_t)arg)
335 continue;
336 break;
337
338 case KERN_PROC_TTY:
339 if ((proc.p_flag & P_CONTROLT) == 0 ||
340 eproc.e_tdev != (dev_t)arg)
341 continue;
342 break;
343 }
344 bcopy(&proc, &bp->kp_proc, sizeof(proc));
345 bcopy(&eproc, &bp->kp_eproc, sizeof(eproc));
346 ++bp;
347 ++cnt;
348 }
349 return (cnt);
350 }
351
352 /*
353 * Build proc info array by reading in proc list from a crash dump.
354 * Return number of procs read. maxcnt is the max we will read.
355 */
356 static int
357 kvm_deadprocs(kd, what, arg, a_allproc, a_zombproc, maxcnt)
358 kvm_t *kd;
359 int what, arg;
360 u_long a_allproc;
361 u_long a_zombproc;
362 int maxcnt;
363 {
364 register struct kinfo_proc *bp = kd->procbase;
365 register int acnt, zcnt;
366 struct proc *p;
367
368 if (KREAD(kd, a_allproc, &p)) {
369 _kvm_err(kd, kd->program, "cannot read allproc");
370 return (-1);
371 }
372 acnt = kvm_proclist(kd, what, arg, p, bp, maxcnt);
373 if (acnt < 0)
374 return (acnt);
375
376 if (KREAD(kd, a_zombproc, &p)) {
377 _kvm_err(kd, kd->program, "cannot read zombproc");
378 return (-1);
379 }
380 zcnt = kvm_proclist(kd, what, arg, p, bp + acnt, maxcnt - acnt);
381 if (zcnt < 0)
382 zcnt = 0;
383
384 return (acnt + zcnt);
385 }
386
387 struct kinfo_proc *
388 kvm_getprocs(kd, op, arg, cnt)
389 kvm_t *kd;
390 int op, arg;
391 int *cnt;
392 {
393 size_t size;
394 int mib[4], st, nprocs;
395
396 if (kd->procbase != 0) {
397 free((void *)kd->procbase);
398 /*
399 * Clear this pointer in case this call fails. Otherwise,
400 * kvm_close() will free it again.
401 */
402 kd->procbase = 0;
403 }
404 if (ISALIVE(kd)) {
405 size = 0;
406 mib[0] = CTL_KERN;
407 mib[1] = KERN_PROC;
408 mib[2] = op;
409 mib[3] = arg;
410 st = sysctl(mib, 4, NULL, &size, NULL, 0);
411 if (st == -1) {
412 _kvm_syserr(kd, kd->program, "kvm_getprocs");
413 return (0);
414 }
415 kd->procbase = (struct kinfo_proc *)_kvm_malloc(kd, size);
416 if (kd->procbase == 0)
417 return (0);
418 st = sysctl(mib, 4, kd->procbase, &size, NULL, 0);
419 if (st == -1) {
420 _kvm_syserr(kd, kd->program, "kvm_getprocs");
421 return (0);
422 }
423 if (size % sizeof(struct kinfo_proc) != 0) {
424 _kvm_err(kd, kd->program,
425 "proc size mismatch (%d total, %d chunks)",
426 size, sizeof(struct kinfo_proc));
427 return (0);
428 }
429 nprocs = size / sizeof(struct kinfo_proc);
430 } else {
431 struct nlist nl[4], *p;
432
433 nl[0].n_name = "_nprocs";
434 nl[1].n_name = "_allproc";
435 nl[2].n_name = "_zombproc";
436 nl[3].n_name = 0;
437
438 if (kvm_nlist(kd, nl) != 0) {
439 for (p = nl; p->n_type != 0; ++p)
440 ;
441 _kvm_err(kd, kd->program,
442 "%s: no such symbol", p->n_name);
443 return (0);
444 }
445 if (KREAD(kd, nl[0].n_value, &nprocs)) {
446 _kvm_err(kd, kd->program, "can't read nprocs");
447 return (0);
448 }
449 size = nprocs * sizeof(struct kinfo_proc);
450 kd->procbase = (struct kinfo_proc *)_kvm_malloc(kd, size);
451 if (kd->procbase == 0)
452 return (0);
453
454 nprocs = kvm_deadprocs(kd, op, arg, nl[1].n_value,
455 nl[2].n_value, nprocs);
456 #ifdef notdef
457 size = nprocs * sizeof(struct kinfo_proc);
458 (void)realloc(kd->procbase, size);
459 #endif
460 }
461 *cnt = nprocs;
462 return (kd->procbase);
463 }
464
465 void
466 _kvm_freeprocs(kd)
467 kvm_t *kd;
468 {
469 if (kd->procbase) {
470 free(kd->procbase);
471 kd->procbase = 0;
472 }
473 }
474
475 void *
476 _kvm_realloc(kd, p, n)
477 kvm_t *kd;
478 void *p;
479 size_t n;
480 {
481 void *np = (void *)realloc(p, n);
482
483 if (np == 0)
484 _kvm_err(kd, kd->program, "out of memory");
485 return (np);
486 }
487
488 #ifndef MAX
489 #define MAX(a, b) ((a) > (b) ? (a) : (b))
490 #endif
491
492 /*
493 * Read in an argument vector from the user address space of process p.
494 * addr if the user-space base address of narg null-terminated contiguous
495 * strings. This is used to read in both the command arguments and
496 * environment strings. Read at most maxcnt characters of strings.
497 */
498 static char **
499 kvm_argv(kd, p, addr, narg, maxcnt)
500 kvm_t *kd;
501 struct proc *p;
502 register u_long addr;
503 register int narg;
504 register int maxcnt;
505 {
506 register char *cp;
507 register int len, cc;
508 register char **argv;
509
510 /*
511 * Check that there aren't an unreasonable number of agruments,
512 * and that the address is in user space.
513 */
514 if (narg > 512 || addr < VM_MIN_ADDRESS || addr >= VM_MAXUSER_ADDRESS)
515 return (0);
516
517 if (kd->argv == 0) {
518 /*
519 * Try to avoid reallocs.
520 */
521 kd->argc = MAX(narg + 1, 32);
522 kd->argv = (char **)_kvm_malloc(kd, kd->argc *
523 sizeof(*kd->argv));
524 if (kd->argv == 0)
525 return (0);
526 } else if (narg + 1 > kd->argc) {
527 kd->argc = MAX(2 * kd->argc, narg + 1);
528 kd->argv = (char **)_kvm_realloc(kd, kd->argv, kd->argc *
529 sizeof(*kd->argv));
530 if (kd->argv == 0)
531 return (0);
532 }
533 if (kd->argspc == 0) {
534 kd->argspc = (char *)_kvm_malloc(kd, kd->nbpg);
535 if (kd->argspc == 0)
536 return (0);
537 kd->arglen = kd->nbpg;
538 }
539 cp = kd->argspc;
540 argv = kd->argv;
541 *argv = cp;
542 len = 0;
543 /*
544 * Loop over pages, filling in the argument vector.
545 */
546 while (addr < VM_MAXUSER_ADDRESS) {
547 cc = kd->nbpg - (addr & (kd->nbpg - 1));
548 if (maxcnt > 0 && cc > maxcnt - len)
549 cc = maxcnt - len;;
550 if (len + cc > kd->arglen) {
551 register int off;
552 register char **pp;
553 register char *op = kd->argspc;
554
555 kd->arglen *= 2;
556 kd->argspc = (char *)_kvm_realloc(kd, kd->argspc,
557 kd->arglen);
558 if (kd->argspc == 0)
559 return (0);
560 cp = &kd->argspc[len];
561 /*
562 * Adjust argv pointers in case realloc moved
563 * the string space.
564 */
565 off = kd->argspc - op;
566 for (pp = kd->argv; pp < argv; ++pp)
567 *pp += off;
568 }
569 if (kvm_uread(kd, p, addr, cp, cc) != cc)
570 /* XXX */
571 return (0);
572 len += cc;
573 addr += cc;
574
575 if (maxcnt == 0 && len > 16 * kd->nbpg)
576 /* sanity */
577 return (0);
578
579 while (--cc >= 0) {
580 if (*cp++ == 0) {
581 if (--narg <= 0) {
582 *++argv = 0;
583 return (kd->argv);
584 } else
585 *++argv = cp;
586 }
587 }
588 if (maxcnt > 0 && len >= maxcnt) {
589 /*
590 * We're stopping prematurely. Terminate the
591 * argv and current string.
592 */
593 *++argv = 0;
594 *cp = 0;
595 return (kd->argv);
596 }
597 }
598 }
599
600 static void
601 ps_str_a(p, addr, n)
602 struct ps_strings *p;
603 u_long *addr;
604 int *n;
605 {
606 *addr = (u_long)p->ps_argvstr;
607 *n = p->ps_nargvstr;
608 }
609
610 static void
611 ps_str_e(p, addr, n)
612 struct ps_strings *p;
613 u_long *addr;
614 int *n;
615 {
616 *addr = (u_long)p->ps_envstr;
617 *n = p->ps_nenvstr;
618 }
619
620 /*
621 * Determine if the proc indicated by p is still active.
622 * This test is not 100% foolproof in theory, but chances of
623 * being wrong are very low.
624 */
625 static int
626 proc_verify(kd, kernp, p)
627 kvm_t *kd;
628 u_long kernp;
629 const struct proc *p;
630 {
631 struct proc kernproc;
632
633 /*
634 * Just read in the whole proc. It's not that big relative
635 * to the cost of the read system call.
636 */
637 if (kvm_read(kd, kernp, (char *)&kernproc, sizeof(kernproc)) !=
638 sizeof(kernproc))
639 return (0);
640 return (p->p_pid == kernproc.p_pid &&
641 (kernproc.p_stat != SZOMB || p->p_stat == SZOMB));
642 }
643
644 static char **
645 kvm_doargv(kd, kp, nchr, info)
646 kvm_t *kd;
647 const struct kinfo_proc *kp;
648 int nchr;
649 int (*info)(struct ps_strings*, u_long *, int *);
650 {
651 register const struct proc *p = &kp->kp_proc;
652 register char **ap;
653 u_long addr;
654 int cnt;
655 struct ps_strings arginfo;
656
657 /*
658 * Pointers are stored at the top of the user stack.
659 */
660 if (p->p_stat == SZOMB ||
661 kvm_uread(kd, p, USRSTACK - sizeof(arginfo), (char *)&arginfo,
662 sizeof(arginfo)) != sizeof(arginfo))
663 return (0);
664
665 (*info)(&arginfo, &addr, &cnt);
666 if (cnt == 0)
667 return (0);
668 ap = kvm_argv(kd, p, addr, cnt, nchr);
669 /*
670 * For live kernels, make sure this process didn't go away.
671 */
672 if (ap != 0 && ISALIVE(kd) &&
673 !proc_verify(kd, (u_long)kp->kp_eproc.e_paddr, p))
674 ap = 0;
675 return (ap);
676 }
677
678 /*
679 * Get the command args. This code is now machine independent.
680 */
681 char **
682 kvm_getargv(kd, kp, nchr)
683 kvm_t *kd;
684 const struct kinfo_proc *kp;
685 int nchr;
686 {
687 return (kvm_doargv(kd, kp, nchr, ps_str_a));
688 }
689
690 char **
691 kvm_getenvv(kd, kp, nchr)
692 kvm_t *kd;
693 const struct kinfo_proc *kp;
694 int nchr;
695 {
696 return (kvm_doargv(kd, kp, nchr, ps_str_e));
697 }
698
699 /*
700 * Read from user space. The user context is given by p.
701 */
702 ssize_t
703 kvm_uread(kd, p, uva, buf, len)
704 kvm_t *kd;
705 register const struct proc *p;
706 register u_long uva;
707 register char *buf;
708 register size_t len;
709 {
710 register char *cp;
711
712 cp = buf;
713 while (len > 0) {
714 u_long pa;
715 register int cc;
716
717 cc = _kvm_uvatop(kd, p, uva, &pa);
718 if (cc > 0) {
719 if (cc > len)
720 cc = len;
721 errno = 0;
722 if (lseek(kd->pmfd, (off_t)pa, 0) == -1 && errno != 0) {
723 _kvm_err(kd, 0, "invalid address (%x)", uva);
724 break;
725 }
726 cc = read(kd->pmfd, cp, cc);
727 if (cc < 0) {
728 _kvm_syserr(kd, 0, _PATH_MEM);
729 break;
730 } else if (cc < len) {
731 _kvm_err(kd, kd->program, "short read");
732 break;
733 }
734 } else if (ISALIVE(kd)) {
735 /* try swap */
736 register char *dp;
737 int cnt;
738
739 dp = kvm_readswap(kd, p, uva, &cnt);
740 if (dp == 0) {
741 _kvm_err(kd, 0, "invalid address (%x)", uva);
742 return (0);
743 }
744 cc = MIN(cnt, len);
745 bcopy(dp, cp, cc);
746 } else
747 break;
748 cp += cc;
749 uva += cc;
750 len -= cc;
751 }
752 return (ssize_t)(cp - buf);
753 }
754