kvm_proc.c revision 1.62.4.1 1 /* $NetBSD: kvm_proc.c,v 1.62.4.1 2007/02/11 13:40:55 tron Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*-
40 * Copyright (c) 1989, 1992, 1993
41 * The Regents of the University of California. All rights reserved.
42 *
43 * This code is derived from software developed by the Computer Systems
44 * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
45 * BG 91-66 and contributed to Berkeley.
46 *
47 * Redistribution and use in source and binary forms, with or without
48 * modification, are permitted provided that the following conditions
49 * are met:
50 * 1. Redistributions of source code must retain the above copyright
51 * notice, this list of conditions and the following disclaimer.
52 * 2. Redistributions in binary form must reproduce the above copyright
53 * notice, this list of conditions and the following disclaimer in the
54 * documentation and/or other materials provided with the distribution.
55 * 3. Neither the name of the University nor the names of its contributors
56 * may be used to endorse or promote products derived from this software
57 * without specific prior written permission.
58 *
59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 * SUCH DAMAGE.
70 */
71
72 #include <sys/cdefs.h>
73 #if defined(LIBC_SCCS) && !defined(lint)
74 #if 0
75 static char sccsid[] = "@(#)kvm_proc.c 8.3 (Berkeley) 9/23/93";
76 #else
77 __RCSID("$NetBSD: kvm_proc.c,v 1.62.4.1 2007/02/11 13:40:55 tron Exp $");
78 #endif
79 #endif /* LIBC_SCCS and not lint */
80
81 /*
82 * Proc traversal interface for kvm. ps and w are (probably) the exclusive
83 * users of this code, so we've factored it out into a separate module.
84 * Thus, we keep this grunge out of the other kvm applications (i.e.,
85 * most other applications are interested only in open/close/read/nlist).
86 */
87
88 #include <sys/param.h>
89 #include <sys/user.h>
90 #include <sys/lwp.h>
91 #include <sys/proc.h>
92 #include <sys/exec.h>
93 #include <sys/stat.h>
94 #include <sys/ioctl.h>
95 #include <sys/tty.h>
96 #include <sys/resourcevar.h>
97 #include <stdlib.h>
98 #include <stddef.h>
99 #include <string.h>
100 #include <unistd.h>
101 #include <nlist.h>
102 #include <kvm.h>
103
104 #include <uvm/uvm_extern.h>
105 #include <uvm/uvm_amap.h>
106
107 #include <sys/sysctl.h>
108
109 #include <limits.h>
110 #include <db.h>
111 #include <paths.h>
112
113 #include "kvm_private.h"
114
115 /*
116 * Common info from kinfo_proc and kinfo_proc2 used by helper routines.
117 */
118 struct miniproc {
119 struct vmspace *p_vmspace;
120 char p_stat;
121 struct proc *p_paddr;
122 pid_t p_pid;
123 };
124
125 /*
126 * Convert from struct proc and kinfo_proc{,2} to miniproc.
127 */
128 #define PTOMINI(kp, p) \
129 do { \
130 (p)->p_stat = (kp)->p_stat; \
131 (p)->p_pid = (kp)->p_pid; \
132 (p)->p_paddr = NULL; \
133 (p)->p_vmspace = (kp)->p_vmspace; \
134 } while (/*CONSTCOND*/0);
135
136 #define KPTOMINI(kp, p) \
137 do { \
138 (p)->p_stat = (kp)->kp_proc.p_stat; \
139 (p)->p_pid = (kp)->kp_proc.p_pid; \
140 (p)->p_paddr = (kp)->kp_eproc.e_paddr; \
141 (p)->p_vmspace = (kp)->kp_proc.p_vmspace; \
142 } while (/*CONSTCOND*/0);
143
144 #define KP2TOMINI(kp, p) \
145 do { \
146 (p)->p_stat = (kp)->p_stat; \
147 (p)->p_pid = (kp)->p_pid; \
148 (p)->p_paddr = (void *)(long)(kp)->p_paddr; \
149 (p)->p_vmspace = (void *)(long)(kp)->p_vmspace; \
150 } while (/*CONSTCOND*/0);
151
152 /*
153 * NetBSD uses kauth(9) to manage credentials, which are stored in kauth_cred_t,
154 * a kernel-only opaque type. This is an embedded version which is *INTERNAL* to
155 * kvm(3) so dumps can be read properly.
156 *
157 * Whenever NetBSD starts exporting credentials to userland consistently (using
158 * 'struct uucred', or something) this will have to be updated again.
159 */
160 struct kvm_kauth_cred {
161 struct simplelock cr_lock; /* lock on cr_refcnt */
162 u_int cr_refcnt; /* reference count */
163 uid_t cr_uid; /* user id */
164 uid_t cr_euid; /* effective user id */
165 uid_t cr_svuid; /* saved effective user id */
166 gid_t cr_gid; /* group id */
167 gid_t cr_egid; /* effective group id */
168 gid_t cr_svgid; /* saved effective group id */
169 u_int cr_ngroups; /* number of groups */
170 gid_t cr_groups[NGROUPS]; /* group memberships */
171 };
172
173 #define KREAD(kd, addr, obj) \
174 (kvm_read(kd, addr, (obj), sizeof(*obj)) != sizeof(*obj))
175
176 /* XXX: What uses these two functions? */
177 char *_kvm_uread __P((kvm_t *, const struct proc *, u_long,
178 u_long *));
179 ssize_t kvm_uread __P((kvm_t *, const struct proc *, u_long, char *,
180 size_t));
181
182 static char *_kvm_ureadm __P((kvm_t *, const struct miniproc *, u_long,
183 u_long *));
184 static ssize_t kvm_ureadm __P((kvm_t *, const struct miniproc *, u_long,
185 char *, size_t));
186
187 static char **kvm_argv __P((kvm_t *, const struct miniproc *, u_long, int,
188 int));
189 static int kvm_deadprocs __P((kvm_t *, int, int, u_long, u_long, int));
190 static char **kvm_doargv __P((kvm_t *, const struct miniproc *, int,
191 void (*)(struct ps_strings *, u_long *, int *)));
192 static char **kvm_doargv2 __P((kvm_t *, pid_t, int, int));
193 static int kvm_proclist __P((kvm_t *, int, int, struct proc *,
194 struct kinfo_proc *, int));
195 static int proc_verify __P((kvm_t *, u_long, const struct miniproc *));
196 static void ps_str_a __P((struct ps_strings *, u_long *, int *));
197 static void ps_str_e __P((struct ps_strings *, u_long *, int *));
198
199
200 static char *
201 _kvm_ureadm(kd, p, va, cnt)
202 kvm_t *kd;
203 const struct miniproc *p;
204 u_long va;
205 u_long *cnt;
206 {
207 int true = 1;
208 u_long addr, head;
209 u_long offset;
210 struct vm_map_entry vme;
211 struct vm_amap amap;
212 struct vm_anon *anonp, anon;
213 struct vm_page pg;
214 u_long slot;
215
216 if (kd->swapspc == NULL) {
217 kd->swapspc = _kvm_malloc(kd, (size_t)kd->nbpg);
218 if (kd->swapspc == NULL)
219 return (NULL);
220 }
221
222 /*
223 * Look through the address map for the memory object
224 * that corresponds to the given virtual address.
225 * The header just has the entire valid range.
226 */
227 head = (u_long)&p->p_vmspace->vm_map.header;
228 addr = head;
229 while (true) {
230 if (KREAD(kd, addr, &vme))
231 return (NULL);
232
233 if (va >= vme.start && va < vme.end &&
234 vme.aref.ar_amap != NULL)
235 break;
236
237 addr = (u_long)vme.next;
238 if (addr == head)
239 return (NULL);
240 }
241
242 /*
243 * we found the map entry, now to find the object...
244 */
245 if (vme.aref.ar_amap == NULL)
246 return (NULL);
247
248 addr = (u_long)vme.aref.ar_amap;
249 if (KREAD(kd, addr, &amap))
250 return (NULL);
251
252 offset = va - vme.start;
253 slot = offset / kd->nbpg + vme.aref.ar_pageoff;
254 /* sanity-check slot number */
255 if (slot > amap.am_nslot)
256 return (NULL);
257
258 addr = (u_long)amap.am_anon + (offset / kd->nbpg) * sizeof(anonp);
259 if (KREAD(kd, addr, &anonp))
260 return (NULL);
261
262 addr = (u_long)anonp;
263 if (KREAD(kd, addr, &anon))
264 return (NULL);
265
266 addr = (u_long)anon.an_page;
267 if (addr) {
268 if (KREAD(kd, addr, &pg))
269 return (NULL);
270
271 if (pread(kd->pmfd, kd->swapspc, (size_t)kd->nbpg,
272 (off_t)pg.phys_addr) != kd->nbpg)
273 return (NULL);
274 } else {
275 if (kd->swfd < 0 ||
276 pread(kd->swfd, kd->swapspc, (size_t)kd->nbpg,
277 (off_t)(anon.an_swslot * kd->nbpg)) != kd->nbpg)
278 return (NULL);
279 }
280
281 /* Found the page. */
282 offset %= kd->nbpg;
283 *cnt = kd->nbpg - offset;
284 return (&kd->swapspc[(size_t)offset]);
285 }
286
287 char *
288 _kvm_uread(kd, p, va, cnt)
289 kvm_t *kd;
290 const struct proc *p;
291 u_long va;
292 u_long *cnt;
293 {
294 struct miniproc mp;
295
296 PTOMINI(p, &mp);
297 return (_kvm_ureadm(kd, &mp, va, cnt));
298 }
299
300 /*
301 * Convert credentials located in kernel space address 'cred' and store
302 * them in the appropriate members of 'eproc'.
303 */
304 static int
305 _kvm_convertcred(kvm_t *kd, u_long cred, struct eproc *eproc)
306 {
307 struct kvm_kauth_cred kauthcred;
308 struct pcred *pc = &eproc->e_pcred;
309 struct ucred *uc = &eproc->e_ucred;
310
311 if (KREAD(kd, cred, &kauthcred) != 0)
312 return (-1);
313
314 /* inlined version of kauth_cred_to_pcred, see kauth(9). */
315 pc->p_ruid = kauthcred.cr_uid;
316 pc->p_svuid = kauthcred.cr_svuid;
317 pc->p_rgid = kauthcred.cr_gid;
318 pc->p_svgid = kauthcred.cr_svgid;
319 pc->p_refcnt = kauthcred.cr_refcnt;
320 pc->pc_ucred = (void *)cred;
321
322 /* inlined version of kauth_cred_to_ucred(), see kauth(9). */
323 uc->cr_ref = kauthcred.cr_refcnt;
324 uc->cr_uid = kauthcred.cr_euid;
325 uc->cr_gid = kauthcred.cr_egid;
326 uc->cr_ngroups = MIN(kauthcred.cr_ngroups,
327 sizeof(uc->cr_groups) / sizeof(uc->cr_groups[0]));
328 memcpy(uc->cr_groups, kauthcred.cr_groups,
329 uc->cr_ngroups * sizeof(uc->cr_groups[0]));
330
331 return (0);
332 }
333
334 /*
335 * Read proc's from memory file into buffer bp, which has space to hold
336 * at most maxcnt procs.
337 */
338 static int
339 kvm_proclist(kd, what, arg, p, bp, maxcnt)
340 kvm_t *kd;
341 int what, arg;
342 struct proc *p;
343 struct kinfo_proc *bp;
344 int maxcnt;
345 {
346 int cnt = 0;
347 int nlwps;
348 struct kinfo_lwp *kl;
349 struct eproc eproc;
350 struct pgrp pgrp;
351 struct session sess;
352 struct tty tty;
353 struct proc proc;
354
355 for (; cnt < maxcnt && p != NULL; p = proc.p_list.le_next) {
356 if (KREAD(kd, (u_long)p, &proc)) {
357 _kvm_err(kd, kd->program, "can't read proc at %p", p);
358 return (-1);
359 }
360 if (_kvm_convertcred(kd, (u_long)proc.p_cred, &eproc) != 0) {
361 _kvm_err(kd, kd->program,
362 "can't read proc credentials at %p", p);
363 return (-1);
364 }
365
366 switch (what) {
367
368 case KERN_PROC_PID:
369 if (proc.p_pid != (pid_t)arg)
370 continue;
371 break;
372
373 case KERN_PROC_UID:
374 if (eproc.e_ucred.cr_uid != (uid_t)arg)
375 continue;
376 break;
377
378 case KERN_PROC_RUID:
379 if (eproc.e_pcred.p_ruid != (uid_t)arg)
380 continue;
381 break;
382 }
383 /*
384 * We're going to add another proc to the set. If this
385 * will overflow the buffer, assume the reason is because
386 * nprocs (or the proc list) is corrupt and declare an error.
387 */
388 if (cnt >= maxcnt) {
389 _kvm_err(kd, kd->program, "nprocs corrupt");
390 return (-1);
391 }
392 /*
393 * gather eproc
394 */
395 eproc.e_paddr = p;
396 if (KREAD(kd, (u_long)proc.p_pgrp, &pgrp)) {
397 _kvm_err(kd, kd->program, "can't read pgrp at %p",
398 proc.p_pgrp);
399 return (-1);
400 }
401 eproc.e_sess = pgrp.pg_session;
402 eproc.e_pgid = pgrp.pg_id;
403 eproc.e_jobc = pgrp.pg_jobc;
404 if (KREAD(kd, (u_long)pgrp.pg_session, &sess)) {
405 _kvm_err(kd, kd->program, "can't read session at %p",
406 pgrp.pg_session);
407 return (-1);
408 }
409 if ((proc.p_flag & P_CONTROLT) && sess.s_ttyp != NULL) {
410 if (KREAD(kd, (u_long)sess.s_ttyp, &tty)) {
411 _kvm_err(kd, kd->program,
412 "can't read tty at %p", sess.s_ttyp);
413 return (-1);
414 }
415 eproc.e_tdev = tty.t_dev;
416 eproc.e_tsess = tty.t_session;
417 if (tty.t_pgrp != NULL) {
418 if (KREAD(kd, (u_long)tty.t_pgrp, &pgrp)) {
419 _kvm_err(kd, kd->program,
420 "can't read tpgrp at %p",
421 tty.t_pgrp);
422 return (-1);
423 }
424 eproc.e_tpgid = pgrp.pg_id;
425 } else
426 eproc.e_tpgid = -1;
427 } else
428 eproc.e_tdev = NODEV;
429 eproc.e_flag = sess.s_ttyvp ? EPROC_CTTY : 0;
430 eproc.e_sid = sess.s_sid;
431 if (sess.s_leader == p)
432 eproc.e_flag |= EPROC_SLEADER;
433 /*
434 * Fill in the old-style proc.p_wmesg by copying the wmesg
435 * from the first available LWP.
436 */
437 kl = kvm_getlwps(kd, proc.p_pid,
438 (u_long)PTRTOUINT64(eproc.e_paddr),
439 sizeof(struct kinfo_lwp), &nlwps);
440 if (kl) {
441 if (nlwps > 0) {
442 strcpy(eproc.e_wmesg, kl[0].l_wmesg);
443 }
444 }
445 (void)kvm_read(kd, (u_long)proc.p_vmspace, &eproc.e_vm,
446 sizeof(eproc.e_vm));
447
448 eproc.e_xsize = eproc.e_xrssize = 0;
449 eproc.e_xccount = eproc.e_xswrss = 0;
450
451 switch (what) {
452
453 case KERN_PROC_PGRP:
454 if (eproc.e_pgid != (pid_t)arg)
455 continue;
456 break;
457
458 case KERN_PROC_TTY:
459 if ((proc.p_flag & P_CONTROLT) == 0 ||
460 eproc.e_tdev != (dev_t)arg)
461 continue;
462 break;
463 }
464 memcpy(&bp->kp_proc, &proc, sizeof(proc));
465 memcpy(&bp->kp_eproc, &eproc, sizeof(eproc));
466 ++bp;
467 ++cnt;
468 }
469 return (cnt);
470 }
471
472 /*
473 * Build proc info array by reading in proc list from a crash dump.
474 * Return number of procs read. maxcnt is the max we will read.
475 */
476 static int
477 kvm_deadprocs(kd, what, arg, a_allproc, a_zombproc, maxcnt)
478 kvm_t *kd;
479 int what, arg;
480 u_long a_allproc;
481 u_long a_zombproc;
482 int maxcnt;
483 {
484 struct kinfo_proc *bp = kd->procbase;
485 int acnt, zcnt;
486 struct proc *p;
487
488 if (KREAD(kd, a_allproc, &p)) {
489 _kvm_err(kd, kd->program, "cannot read allproc");
490 return (-1);
491 }
492 acnt = kvm_proclist(kd, what, arg, p, bp, maxcnt);
493 if (acnt < 0)
494 return (acnt);
495
496 if (KREAD(kd, a_zombproc, &p)) {
497 _kvm_err(kd, kd->program, "cannot read zombproc");
498 return (-1);
499 }
500 zcnt = kvm_proclist(kd, what, arg, p, bp + acnt,
501 maxcnt - acnt);
502 if (zcnt < 0)
503 zcnt = 0;
504
505 return (acnt + zcnt);
506 }
507
508 struct kinfo_proc2 *
509 kvm_getproc2(kd, op, arg, esize, cnt)
510 kvm_t *kd;
511 int op, arg;
512 size_t esize;
513 int *cnt;
514 {
515 size_t size;
516 int mib[6], st, nprocs;
517 struct pstats pstats;
518
519 if (ISSYSCTL(kd)) {
520 size = 0;
521 mib[0] = CTL_KERN;
522 mib[1] = KERN_PROC2;
523 mib[2] = op;
524 mib[3] = arg;
525 mib[4] = (int)esize;
526 mib[5] = 0;
527 st = sysctl(mib, 6, NULL, &size, NULL, (size_t)0);
528 if (st == -1) {
529 _kvm_syserr(kd, kd->program, "kvm_getproc2");
530 return (NULL);
531 }
532
533 mib[5] = (int) (size / esize);
534 KVM_ALLOC(kd, procbase2, size);
535 st = sysctl(mib, 6, kd->procbase2, &size, NULL, (size_t)0);
536 if (st == -1) {
537 _kvm_syserr(kd, kd->program, "kvm_getproc2");
538 return (NULL);
539 }
540 nprocs = (int) (size / esize);
541 } else {
542 char *kp2c;
543 struct kinfo_proc *kp;
544 struct kinfo_proc2 kp2, *kp2p;
545 struct kinfo_lwp *kl;
546 int i, nlwps;
547
548 kp = kvm_getprocs(kd, op, arg, &nprocs);
549 if (kp == NULL)
550 return (NULL);
551
552 size = nprocs * esize;
553 KVM_ALLOC(kd, procbase2, size);
554 kp2c = (char *)(void *)kd->procbase2;
555 kp2p = &kp2;
556 for (i = 0; i < nprocs; i++, kp++) {
557 kl = kvm_getlwps(kd, kp->kp_proc.p_pid,
558 (u_long)PTRTOUINT64(kp->kp_eproc.e_paddr),
559 sizeof(struct kinfo_lwp), &nlwps);
560 /* We use kl[0] as the "representative" LWP */
561 memset(kp2p, 0, sizeof(kp2));
562 kp2p->p_forw = kl[0].l_forw;
563 kp2p->p_back = kl[0].l_back;
564 kp2p->p_paddr = PTRTOUINT64(kp->kp_eproc.e_paddr);
565 kp2p->p_addr = kl[0].l_addr;
566 kp2p->p_fd = PTRTOUINT64(kp->kp_proc.p_fd);
567 kp2p->p_cwdi = PTRTOUINT64(kp->kp_proc.p_cwdi);
568 kp2p->p_stats = PTRTOUINT64(kp->kp_proc.p_stats);
569 kp2p->p_limit = PTRTOUINT64(kp->kp_proc.p_limit);
570 kp2p->p_vmspace = PTRTOUINT64(kp->kp_proc.p_vmspace);
571 kp2p->p_sigacts = PTRTOUINT64(kp->kp_proc.p_sigacts);
572 kp2p->p_sess = PTRTOUINT64(kp->kp_eproc.e_sess);
573 kp2p->p_tsess = 0;
574 kp2p->p_ru = PTRTOUINT64(kp->kp_proc.p_ru);
575
576 kp2p->p_eflag = 0;
577 kp2p->p_exitsig = kp->kp_proc.p_exitsig;
578 kp2p->p_flag = kp->kp_proc.p_flag;
579
580 kp2p->p_pid = kp->kp_proc.p_pid;
581
582 kp2p->p_ppid = kp->kp_eproc.e_ppid;
583 kp2p->p_sid = kp->kp_eproc.e_sid;
584 kp2p->p__pgid = kp->kp_eproc.e_pgid;
585
586 kp2p->p_tpgid = -1 /* XXX NO_PGID! */;
587
588 kp2p->p_uid = kp->kp_eproc.e_ucred.cr_uid;
589 kp2p->p_ruid = kp->kp_eproc.e_pcred.p_ruid;
590 kp2p->p_svuid = kp->kp_eproc.e_pcred.p_svuid;
591 kp2p->p_gid = kp->kp_eproc.e_ucred.cr_gid;
592 kp2p->p_rgid = kp->kp_eproc.e_pcred.p_rgid;
593 kp2p->p_svgid = kp->kp_eproc.e_pcred.p_svgid;
594
595 /*CONSTCOND*/
596 memcpy(kp2p->p_groups, kp->kp_eproc.e_ucred.cr_groups,
597 MIN(sizeof(kp2p->p_groups),
598 sizeof(kp->kp_eproc.e_ucred.cr_groups)));
599 kp2p->p_ngroups = kp->kp_eproc.e_ucred.cr_ngroups;
600
601 kp2p->p_jobc = kp->kp_eproc.e_jobc;
602 kp2p->p_tdev = kp->kp_eproc.e_tdev;
603 kp2p->p_tpgid = kp->kp_eproc.e_tpgid;
604 kp2p->p_tsess = PTRTOUINT64(kp->kp_eproc.e_tsess);
605
606 kp2p->p_estcpu = kp->kp_proc.p_estcpu;
607 kp2p->p_rtime_sec = kp->kp_proc.p_estcpu;
608 kp2p->p_rtime_usec = kp->kp_proc.p_estcpu;
609 kp2p->p_cpticks = kp->kp_proc.p_cpticks;
610 kp2p->p_pctcpu = kp->kp_proc.p_pctcpu;
611 kp2p->p_swtime = kl[0].l_swtime;
612 kp2p->p_slptime = kl[0].l_slptime;
613 #if 0 /* XXX thorpej */
614 kp2p->p_schedflags = kp->kp_proc.p_schedflags;
615 #else
616 kp2p->p_schedflags = 0;
617 #endif
618
619 kp2p->p_uticks = kp->kp_proc.p_uticks;
620 kp2p->p_sticks = kp->kp_proc.p_sticks;
621 kp2p->p_iticks = kp->kp_proc.p_iticks;
622
623 kp2p->p_tracep = PTRTOUINT64(kp->kp_proc.p_tracep);
624 kp2p->p_traceflag = kp->kp_proc.p_traceflag;
625
626 kp2p->p_holdcnt = kl[0].l_holdcnt;
627
628 memcpy(&kp2p->p_siglist,
629 &kp->kp_proc.p_sigctx.ps_siglist,
630 sizeof(ki_sigset_t));
631 memcpy(&kp2p->p_sigmask,
632 &kp->kp_proc.p_sigctx.ps_sigmask,
633 sizeof(ki_sigset_t));
634 memcpy(&kp2p->p_sigignore,
635 &kp->kp_proc.p_sigctx.ps_sigignore,
636 sizeof(ki_sigset_t));
637 memcpy(&kp2p->p_sigcatch,
638 &kp->kp_proc.p_sigctx.ps_sigcatch,
639 sizeof(ki_sigset_t));
640
641 kp2p->p_stat = kp->kp_proc.p_stat;
642 kp2p->p_priority = kl[0].l_priority;
643 kp2p->p_usrpri = kl[0].l_usrpri;
644 kp2p->p_nice = kp->kp_proc.p_nice;
645
646 kp2p->p_xstat = kp->kp_proc.p_xstat;
647 kp2p->p_acflag = kp->kp_proc.p_acflag;
648
649 /*CONSTCOND*/
650 strncpy(kp2p->p_comm, kp->kp_proc.p_comm,
651 MIN(sizeof(kp2p->p_comm),
652 sizeof(kp->kp_proc.p_comm)));
653
654 strncpy(kp2p->p_wmesg, kp->kp_eproc.e_wmesg,
655 sizeof(kp2p->p_wmesg));
656 kp2p->p_wchan = kl[0].l_wchan;
657 strncpy(kp2p->p_login, kp->kp_eproc.e_login,
658 sizeof(kp2p->p_login));
659
660 kp2p->p_vm_rssize = kp->kp_eproc.e_xrssize;
661 kp2p->p_vm_tsize = kp->kp_eproc.e_vm.vm_tsize;
662 kp2p->p_vm_dsize = kp->kp_eproc.e_vm.vm_dsize;
663 kp2p->p_vm_ssize = kp->kp_eproc.e_vm.vm_ssize;
664
665 kp2p->p_eflag = (int32_t)kp->kp_eproc.e_flag;
666
667 kp2p->p_realflag = kp->kp_proc.p_flag;
668 kp2p->p_nlwps = kp->kp_proc.p_nlwps;
669 kp2p->p_nrlwps = kp->kp_proc.p_nrlwps;
670 kp2p->p_realstat = kp->kp_proc.p_stat;
671
672 if (P_ZOMBIE(&kp->kp_proc) ||
673 kp->kp_proc.p_stats == NULL ||
674 KREAD(kd, (u_long)kp->kp_proc.p_stats, &pstats)) {
675 kp2p->p_uvalid = 0;
676 } else {
677 kp2p->p_uvalid = 1;
678
679 kp2p->p_ustart_sec = (u_int32_t)
680 pstats.p_start.tv_sec;
681 kp2p->p_ustart_usec = (u_int32_t)
682 pstats.p_start.tv_usec;
683
684 kp2p->p_uutime_sec = (u_int32_t)
685 pstats.p_ru.ru_utime.tv_sec;
686 kp2p->p_uutime_usec = (u_int32_t)
687 pstats.p_ru.ru_utime.tv_usec;
688 kp2p->p_ustime_sec = (u_int32_t)
689 pstats.p_ru.ru_stime.tv_sec;
690 kp2p->p_ustime_usec = (u_int32_t)
691 pstats.p_ru.ru_stime.tv_usec;
692
693 kp2p->p_uru_maxrss = pstats.p_ru.ru_maxrss;
694 kp2p->p_uru_ixrss = pstats.p_ru.ru_ixrss;
695 kp2p->p_uru_idrss = pstats.p_ru.ru_idrss;
696 kp2p->p_uru_isrss = pstats.p_ru.ru_isrss;
697 kp2p->p_uru_minflt = pstats.p_ru.ru_minflt;
698 kp2p->p_uru_majflt = pstats.p_ru.ru_majflt;
699 kp2p->p_uru_nswap = pstats.p_ru.ru_nswap;
700 kp2p->p_uru_inblock = pstats.p_ru.ru_inblock;
701 kp2p->p_uru_oublock = pstats.p_ru.ru_oublock;
702 kp2p->p_uru_msgsnd = pstats.p_ru.ru_msgsnd;
703 kp2p->p_uru_msgrcv = pstats.p_ru.ru_msgrcv;
704 kp2p->p_uru_nsignals = pstats.p_ru.ru_nsignals;
705 kp2p->p_uru_nvcsw = pstats.p_ru.ru_nvcsw;
706 kp2p->p_uru_nivcsw = pstats.p_ru.ru_nivcsw;
707
708 kp2p->p_uctime_sec = (u_int32_t)
709 (pstats.p_cru.ru_utime.tv_sec +
710 pstats.p_cru.ru_stime.tv_sec);
711 kp2p->p_uctime_usec = (u_int32_t)
712 (pstats.p_cru.ru_utime.tv_usec +
713 pstats.p_cru.ru_stime.tv_usec);
714 }
715
716 memcpy(kp2c, &kp2, esize);
717 kp2c += esize;
718 }
719 }
720 *cnt = nprocs;
721 return (kd->procbase2);
722 }
723
724 struct kinfo_lwp *
725 kvm_getlwps(kd, pid, paddr, esize, cnt)
726 kvm_t *kd;
727 int pid;
728 u_long paddr;
729 size_t esize;
730 int *cnt;
731 {
732 size_t size;
733 int mib[5], nlwps;
734 ssize_t st;
735 struct kinfo_lwp *kl;
736
737 if (ISSYSCTL(kd)) {
738 size = 0;
739 mib[0] = CTL_KERN;
740 mib[1] = KERN_LWP;
741 mib[2] = pid;
742 mib[3] = (int)esize;
743 mib[4] = 0;
744 st = sysctl(mib, 5, NULL, &size, NULL, (size_t)0);
745 if (st == -1) {
746 _kvm_syserr(kd, kd->program, "kvm_getlwps");
747 return (NULL);
748 }
749
750 mib[4] = (int) (size / esize);
751 KVM_ALLOC(kd, lwpbase, size);
752 st = sysctl(mib, 5, kd->lwpbase, &size, NULL, (size_t)0);
753 if (st == -1) {
754 _kvm_syserr(kd, kd->program, "kvm_getlwps");
755 return (NULL);
756 }
757 nlwps = (int) (size / esize);
758 } else {
759 /* grovel through the memory image */
760 struct proc p;
761 struct lwp l;
762 u_long laddr;
763 int i;
764
765 st = kvm_read(kd, paddr, &p, sizeof(p));
766 if (st == -1) {
767 _kvm_syserr(kd, kd->program, "kvm_getlwps");
768 return (NULL);
769 }
770
771 nlwps = p.p_nlwps;
772 size = nlwps * sizeof(*kd->lwpbase);
773 KVM_ALLOC(kd, lwpbase, size);
774 laddr = (u_long)PTRTOUINT64(p.p_lwps.lh_first);
775 for (i = 0; (i < nlwps) && (laddr != 0); i++) {
776 st = kvm_read(kd, laddr, &l, sizeof(l));
777 if (st == -1) {
778 _kvm_syserr(kd, kd->program, "kvm_getlwps");
779 return (NULL);
780 }
781 kl = &kd->lwpbase[i];
782 kl->l_laddr = laddr;
783 kl->l_forw = PTRTOUINT64(l.l_forw);
784 kl->l_back = PTRTOUINT64(l.l_back);
785 kl->l_addr = PTRTOUINT64(l.l_addr);
786 kl->l_lid = l.l_lid;
787 kl->l_flag = l.l_flag;
788 kl->l_swtime = l.l_swtime;
789 kl->l_slptime = l.l_slptime;
790 kl->l_schedflags = 0; /* XXX */
791 kl->l_holdcnt = l.l_holdcnt;
792 kl->l_priority = l.l_priority;
793 kl->l_usrpri = l.l_usrpri;
794 kl->l_stat = l.l_stat;
795 kl->l_wchan = PTRTOUINT64(l.l_wchan);
796 if (l.l_wmesg)
797 (void)kvm_read(kd, (u_long)l.l_wmesg,
798 kl->l_wmesg, (size_t)WMESGLEN);
799 kl->l_cpuid = KI_NOCPU;
800 laddr = (u_long)PTRTOUINT64(l.l_sibling.le_next);
801 }
802 }
803
804 *cnt = nlwps;
805 return (kd->lwpbase);
806 }
807
808 struct kinfo_proc *
809 kvm_getprocs(kd, op, arg, cnt)
810 kvm_t *kd;
811 int op, arg;
812 int *cnt;
813 {
814 size_t size;
815 int mib[4], st, nprocs;
816
817 if (ISKMEM(kd)) {
818 size = 0;
819 mib[0] = CTL_KERN;
820 mib[1] = KERN_PROC;
821 mib[2] = op;
822 mib[3] = arg;
823 st = sysctl(mib, 4, NULL, &size, NULL, (size_t)0);
824 if (st == -1) {
825 _kvm_syserr(kd, kd->program, "kvm_getprocs");
826 return (NULL);
827 }
828 KVM_ALLOC(kd, procbase, size);
829 st = sysctl(mib, 4, kd->procbase, &size, NULL, (size_t)0);
830 if (st == -1) {
831 _kvm_syserr(kd, kd->program, "kvm_getprocs");
832 return (NULL);
833 }
834 if (size % sizeof(struct kinfo_proc) != 0) {
835 _kvm_err(kd, kd->program,
836 "proc size mismatch (%lu total, %lu chunks)",
837 (u_long)size, (u_long)sizeof(struct kinfo_proc));
838 return (NULL);
839 }
840 nprocs = (int) (size / sizeof(struct kinfo_proc));
841 } else if (ISSYSCTL(kd)) {
842 _kvm_err(kd, kd->program, "kvm_open called with KVM_NO_FILES, "
843 "can't use kvm_getprocs");
844 return (NULL);
845 } else {
846 struct nlist nl[4], *p;
847
848 (void)memset(nl, 0, sizeof(nl));
849 nl[0].n_name = "_nprocs";
850 nl[1].n_name = "_allproc";
851 nl[2].n_name = "_zombproc";
852 nl[3].n_name = NULL;
853
854 if (kvm_nlist(kd, nl) != 0) {
855 for (p = nl; p->n_type != 0; ++p)
856 continue;
857 _kvm_err(kd, kd->program,
858 "%s: no such symbol", p->n_name);
859 return (NULL);
860 }
861 if (KREAD(kd, nl[0].n_value, &nprocs)) {
862 _kvm_err(kd, kd->program, "can't read nprocs");
863 return (NULL);
864 }
865 size = nprocs * sizeof(*kd->procbase);
866 KVM_ALLOC(kd, procbase, size);
867 nprocs = kvm_deadprocs(kd, op, arg, nl[1].n_value,
868 nl[2].n_value, nprocs);
869 if (nprocs < 0)
870 return (NULL);
871 #ifdef notdef
872 size = nprocs * sizeof(struct kinfo_proc);
873 (void)realloc(kd->procbase, size);
874 #endif
875 }
876 *cnt = nprocs;
877 return (kd->procbase);
878 }
879
880 void *
881 _kvm_realloc(kd, p, n)
882 kvm_t *kd;
883 void *p;
884 size_t n;
885 {
886 void *np = realloc(p, n);
887
888 if (np == NULL)
889 _kvm_err(kd, kd->program, "out of memory");
890 return (np);
891 }
892
893 /*
894 * Read in an argument vector from the user address space of process p.
895 * addr if the user-space base address of narg null-terminated contiguous
896 * strings. This is used to read in both the command arguments and
897 * environment strings. Read at most maxcnt characters of strings.
898 */
899 static char **
900 kvm_argv(kd, p, addr, narg, maxcnt)
901 kvm_t *kd;
902 const struct miniproc *p;
903 u_long addr;
904 int narg;
905 int maxcnt;
906 {
907 char *np, *cp, *ep, *ap;
908 u_long oaddr = (u_long)~0L;
909 u_long len;
910 size_t cc;
911 char **argv;
912
913 /*
914 * Check that there aren't an unreasonable number of arguments,
915 * and that the address is in user space.
916 */
917 if (narg > ARG_MAX || addr < kd->min_uva || addr >= kd->max_uva)
918 return (NULL);
919
920 if (kd->argv == NULL) {
921 /*
922 * Try to avoid reallocs.
923 */
924 kd->argc = MAX(narg + 1, 32);
925 kd->argv = _kvm_malloc(kd, kd->argc * sizeof(*kd->argv));
926 if (kd->argv == NULL)
927 return (NULL);
928 } else if (narg + 1 > kd->argc) {
929 kd->argc = MAX(2 * kd->argc, narg + 1);
930 kd->argv = _kvm_realloc(kd, kd->argv, kd->argc *
931 sizeof(*kd->argv));
932 if (kd->argv == NULL)
933 return (NULL);
934 }
935 if (kd->argspc == NULL) {
936 kd->argspc = _kvm_malloc(kd, (size_t)kd->nbpg);
937 if (kd->argspc == NULL)
938 return (NULL);
939 kd->argspc_len = kd->nbpg;
940 }
941 if (kd->argbuf == NULL) {
942 kd->argbuf = _kvm_malloc(kd, (size_t)kd->nbpg);
943 if (kd->argbuf == NULL)
944 return (NULL);
945 }
946 cc = sizeof(char *) * narg;
947 if (kvm_ureadm(kd, p, addr, (void *)kd->argv, cc) != cc)
948 return (NULL);
949 ap = np = kd->argspc;
950 argv = kd->argv;
951 len = 0;
952 /*
953 * Loop over pages, filling in the argument vector.
954 */
955 while (argv < kd->argv + narg && *argv != NULL) {
956 addr = (u_long)*argv & ~(kd->nbpg - 1);
957 if (addr != oaddr) {
958 if (kvm_ureadm(kd, p, addr, kd->argbuf,
959 (size_t)kd->nbpg) != kd->nbpg)
960 return (NULL);
961 oaddr = addr;
962 }
963 addr = (u_long)*argv & (kd->nbpg - 1);
964 cp = kd->argbuf + (size_t)addr;
965 cc = kd->nbpg - (size_t)addr;
966 if (maxcnt > 0 && cc > (size_t)(maxcnt - len))
967 cc = (size_t)(maxcnt - len);
968 ep = memchr(cp, '\0', cc);
969 if (ep != NULL)
970 cc = ep - cp + 1;
971 if (len + cc > kd->argspc_len) {
972 ptrdiff_t off;
973 char **pp;
974 char *op = kd->argspc;
975
976 kd->argspc_len *= 2;
977 kd->argspc = _kvm_realloc(kd, kd->argspc,
978 kd->argspc_len);
979 if (kd->argspc == NULL)
980 return (NULL);
981 /*
982 * Adjust argv pointers in case realloc moved
983 * the string space.
984 */
985 off = kd->argspc - op;
986 for (pp = kd->argv; pp < argv; pp++)
987 *pp += off;
988 ap += off;
989 np += off;
990 }
991 memcpy(np, cp, cc);
992 np += cc;
993 len += cc;
994 if (ep != NULL) {
995 *argv++ = ap;
996 ap = np;
997 } else
998 *argv += cc;
999 if (maxcnt > 0 && len >= maxcnt) {
1000 /*
1001 * We're stopping prematurely. Terminate the
1002 * current string.
1003 */
1004 if (ep == NULL) {
1005 *np = '\0';
1006 *argv++ = ap;
1007 }
1008 break;
1009 }
1010 }
1011 /* Make sure argv is terminated. */
1012 *argv = NULL;
1013 return (kd->argv);
1014 }
1015
1016 static void
1017 ps_str_a(p, addr, n)
1018 struct ps_strings *p;
1019 u_long *addr;
1020 int *n;
1021 {
1022
1023 *addr = (u_long)p->ps_argvstr;
1024 *n = p->ps_nargvstr;
1025 }
1026
1027 static void
1028 ps_str_e(p, addr, n)
1029 struct ps_strings *p;
1030 u_long *addr;
1031 int *n;
1032 {
1033
1034 *addr = (u_long)p->ps_envstr;
1035 *n = p->ps_nenvstr;
1036 }
1037
1038 /*
1039 * Determine if the proc indicated by p is still active.
1040 * This test is not 100% foolproof in theory, but chances of
1041 * being wrong are very low.
1042 */
1043 static int
1044 proc_verify(kd, kernp, p)
1045 kvm_t *kd;
1046 u_long kernp;
1047 const struct miniproc *p;
1048 {
1049 struct proc kernproc;
1050
1051 /*
1052 * Just read in the whole proc. It's not that big relative
1053 * to the cost of the read system call.
1054 */
1055 if (kvm_read(kd, kernp, &kernproc, sizeof(kernproc)) !=
1056 sizeof(kernproc))
1057 return (0);
1058 return (p->p_pid == kernproc.p_pid &&
1059 (kernproc.p_stat != SZOMB || p->p_stat == SZOMB));
1060 }
1061
1062 static char **
1063 kvm_doargv(kd, p, nchr, info)
1064 kvm_t *kd;
1065 const struct miniproc *p;
1066 int nchr;
1067 void (*info)(struct ps_strings *, u_long *, int *);
1068 {
1069 char **ap;
1070 u_long addr;
1071 int cnt;
1072 struct ps_strings arginfo;
1073
1074 /*
1075 * Pointers are stored at the top of the user stack.
1076 */
1077 if (p->p_stat == SZOMB)
1078 return (NULL);
1079 cnt = (int)kvm_ureadm(kd, p, kd->usrstack - sizeof(arginfo),
1080 (void *)&arginfo, sizeof(arginfo));
1081 if (cnt != sizeof(arginfo))
1082 return (NULL);
1083
1084 (*info)(&arginfo, &addr, &cnt);
1085 if (cnt == 0)
1086 return (NULL);
1087 ap = kvm_argv(kd, p, addr, cnt, nchr);
1088 /*
1089 * For live kernels, make sure this process didn't go away.
1090 */
1091 if (ap != NULL && ISALIVE(kd) &&
1092 !proc_verify(kd, (u_long)p->p_paddr, p))
1093 ap = NULL;
1094 return (ap);
1095 }
1096
1097 /*
1098 * Get the command args. This code is now machine independent.
1099 */
1100 char **
1101 kvm_getargv(kd, kp, nchr)
1102 kvm_t *kd;
1103 const struct kinfo_proc *kp;
1104 int nchr;
1105 {
1106 struct miniproc p;
1107
1108 KPTOMINI(kp, &p);
1109 return (kvm_doargv(kd, &p, nchr, ps_str_a));
1110 }
1111
1112 char **
1113 kvm_getenvv(kd, kp, nchr)
1114 kvm_t *kd;
1115 const struct kinfo_proc *kp;
1116 int nchr;
1117 {
1118 struct miniproc p;
1119
1120 KPTOMINI(kp, &p);
1121 return (kvm_doargv(kd, &p, nchr, ps_str_e));
1122 }
1123
1124 static char **
1125 kvm_doargv2(kd, pid, type, nchr)
1126 kvm_t *kd;
1127 pid_t pid;
1128 int type;
1129 int nchr;
1130 {
1131 size_t bufs;
1132 int narg, mib[4];
1133 size_t newargspc_len;
1134 char **ap, *bp, *endp;
1135
1136 /*
1137 * Check that there aren't an unreasonable number of arguments.
1138 */
1139 if (nchr > ARG_MAX)
1140 return (NULL);
1141
1142 if (nchr == 0)
1143 nchr = ARG_MAX;
1144
1145 /* Get number of strings in argv */
1146 mib[0] = CTL_KERN;
1147 mib[1] = KERN_PROC_ARGS;
1148 mib[2] = pid;
1149 mib[3] = type == KERN_PROC_ARGV ? KERN_PROC_NARGV : KERN_PROC_NENV;
1150 bufs = sizeof(narg);
1151 if (sysctl(mib, 4, &narg, &bufs, NULL, (size_t)0) == -1)
1152 return (NULL);
1153
1154 if (kd->argv == NULL) {
1155 /*
1156 * Try to avoid reallocs.
1157 */
1158 kd->argc = MAX(narg + 1, 32);
1159 kd->argv = _kvm_malloc(kd, kd->argc * sizeof(*kd->argv));
1160 if (kd->argv == NULL)
1161 return (NULL);
1162 } else if (narg + 1 > kd->argc) {
1163 kd->argc = MAX(2 * kd->argc, narg + 1);
1164 kd->argv = _kvm_realloc(kd, kd->argv, kd->argc *
1165 sizeof(*kd->argv));
1166 if (kd->argv == NULL)
1167 return (NULL);
1168 }
1169
1170 newargspc_len = MIN(nchr, ARG_MAX);
1171 KVM_ALLOC(kd, argspc, newargspc_len);
1172 memset(kd->argspc, 0, (size_t)kd->argspc_len); /* XXX necessary? */
1173
1174 mib[0] = CTL_KERN;
1175 mib[1] = KERN_PROC_ARGS;
1176 mib[2] = pid;
1177 mib[3] = type;
1178 bufs = kd->argspc_len;
1179 if (sysctl(mib, 4, kd->argspc, &bufs, NULL, (size_t)0) == -1)
1180 return (NULL);
1181
1182 bp = kd->argspc;
1183 bp[kd->argspc_len-1] = '\0'; /* make sure the string ends with nul */
1184 ap = kd->argv;
1185 endp = bp + MIN(nchr, bufs);
1186
1187 while (bp < endp) {
1188 *ap++ = bp;
1189 /*
1190 * XXX: don't need following anymore, or stick check
1191 * for max argc in above while loop?
1192 */
1193 if (ap >= kd->argv + kd->argc) {
1194 kd->argc *= 2;
1195 kd->argv = _kvm_realloc(kd, kd->argv,
1196 kd->argc * sizeof(*kd->argv));
1197 ap = kd->argv;
1198 }
1199 bp += strlen(bp) + 1;
1200 }
1201 *ap = NULL;
1202
1203 return (kd->argv);
1204 }
1205
1206 char **
1207 kvm_getargv2(kd, kp, nchr)
1208 kvm_t *kd;
1209 const struct kinfo_proc2 *kp;
1210 int nchr;
1211 {
1212
1213 return (kvm_doargv2(kd, kp->p_pid, KERN_PROC_ARGV, nchr));
1214 }
1215
1216 char **
1217 kvm_getenvv2(kd, kp, nchr)
1218 kvm_t *kd;
1219 const struct kinfo_proc2 *kp;
1220 int nchr;
1221 {
1222
1223 return (kvm_doargv2(kd, kp->p_pid, KERN_PROC_ENV, nchr));
1224 }
1225
1226 /*
1227 * Read from user space. The user context is given by p.
1228 */
1229 static ssize_t
1230 kvm_ureadm(kd, p, uva, buf, len)
1231 kvm_t *kd;
1232 const struct miniproc *p;
1233 u_long uva;
1234 char *buf;
1235 size_t len;
1236 {
1237 char *cp;
1238
1239 cp = buf;
1240 while (len > 0) {
1241 size_t cc;
1242 char *dp;
1243 u_long cnt;
1244
1245 dp = _kvm_ureadm(kd, p, uva, &cnt);
1246 if (dp == NULL) {
1247 _kvm_err(kd, 0, "invalid address (%lx)", uva);
1248 return (0);
1249 }
1250 cc = (size_t)MIN(cnt, len);
1251 memcpy(cp, dp, cc);
1252 cp += cc;
1253 uva += cc;
1254 len -= cc;
1255 }
1256 return (ssize_t)(cp - buf);
1257 }
1258
1259 ssize_t
1260 kvm_uread(kd, p, uva, buf, len)
1261 kvm_t *kd;
1262 const struct proc *p;
1263 u_long uva;
1264 char *buf;
1265 size_t len;
1266 {
1267 struct miniproc mp;
1268
1269 PTOMINI(p, &mp);
1270 return (kvm_ureadm(kd, &mp, uva, buf, len));
1271 }
1272