kvm.c revision 1.8 1 1.1 cgd /*-
2 1.1 cgd * Copyright (c) 1989 The Regents of the University of California.
3 1.1 cgd * All rights reserved.
4 1.1 cgd *
5 1.1 cgd * Redistribution and use in source and binary forms, with or without
6 1.1 cgd * modification, are permitted provided that the following conditions
7 1.1 cgd * are met:
8 1.1 cgd * 1. Redistributions of source code must retain the above copyright
9 1.1 cgd * notice, this list of conditions and the following disclaimer.
10 1.1 cgd * 2. Redistributions in binary form must reproduce the above copyright
11 1.1 cgd * notice, this list of conditions and the following disclaimer in the
12 1.1 cgd * documentation and/or other materials provided with the distribution.
13 1.1 cgd * 3. All advertising materials mentioning features or use of this software
14 1.1 cgd * must display the following acknowledgement:
15 1.1 cgd * This product includes software developed by the University of
16 1.1 cgd * California, Berkeley and its contributors.
17 1.1 cgd * 4. Neither the name of the University nor the names of its contributors
18 1.1 cgd * may be used to endorse or promote products derived from this software
19 1.1 cgd * without specific prior written permission.
20 1.1 cgd *
21 1.1 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 1.1 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 1.1 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 1.1 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 1.1 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 1.1 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 1.1 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 1.1 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 1.1 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 1.1 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 1.1 cgd * SUCH DAMAGE.
32 1.1 cgd */
33 1.1 cgd
34 1.1 cgd #if defined(LIBC_SCCS) && !defined(lint)
35 1.6 cgd /* from: static char sccsid[] = "@(#)kvm.c 5.18 (Berkeley) 5/7/91"; */
36 1.8 cgd static char rcsid[] = "$Id: kvm.c,v 1.8 1993/06/01 01:35:01 cgd Exp $";
37 1.1 cgd #endif /* LIBC_SCCS and not lint */
38 1.1 cgd
39 1.1 cgd #include <sys/param.h>
40 1.1 cgd #include <sys/user.h>
41 1.1 cgd #include <sys/proc.h>
42 1.1 cgd #include <sys/ioctl.h>
43 1.1 cgd #include <sys/kinfo.h>
44 1.1 cgd #include <sys/tty.h>
45 1.8 cgd #include <sys/exec.h>
46 1.1 cgd #include <machine/vmparam.h>
47 1.1 cgd #include <fcntl.h>
48 1.1 cgd #include <nlist.h>
49 1.1 cgd #include <kvm.h>
50 1.1 cgd #include <ndbm.h>
51 1.1 cgd #include <limits.h>
52 1.1 cgd #include <paths.h>
53 1.1 cgd #include <stdio.h>
54 1.1 cgd #include <string.h>
55 1.1 cgd
56 1.1 cgd #ifdef SPPWAIT
57 1.1 cgd #define NEWVM
58 1.1 cgd #endif
59 1.1 cgd
60 1.1 cgd #ifdef NEWVM
61 1.1 cgd #define btop(x) (((unsigned)(x)) >> PGSHIFT) /* XXX */
62 1.1 cgd #define ptob(x) ((caddr_t)((x) << PGSHIFT)) /* XXX */
63 1.1 cgd #include <vm/vm.h> /* ??? kinfo_proc currently includes this*/
64 1.3 cgd #include <vm/vm_page.h>
65 1.3 cgd #include <vm/swap_pager.h>
66 1.1 cgd #include <sys/kinfo_proc.h>
67 1.1 cgd #ifdef hp300
68 1.1 cgd #include <hp300/hp300/pte.h>
69 1.1 cgd #endif
70 1.1 cgd #else /* NEWVM */
71 1.1 cgd #include <machine/pte.h>
72 1.1 cgd #include <sys/vmmac.h>
73 1.1 cgd #include <sys/text.h>
74 1.1 cgd #endif /* NEWVM */
75 1.1 cgd
76 1.1 cgd /*
77 1.1 cgd * files
78 1.1 cgd */
79 1.1 cgd static const char *unixf, *memf, *kmemf, *swapf;
80 1.1 cgd static int unixx, mem, kmem, swap;
81 1.1 cgd static DBM *db;
82 1.1 cgd /*
83 1.1 cgd * flags
84 1.1 cgd */
85 1.1 cgd static int deadkernel;
86 1.1 cgd static int kvminit = 0;
87 1.1 cgd static int kvmfilesopen = 0;
88 1.1 cgd /*
89 1.1 cgd * state
90 1.1 cgd */
91 1.1 cgd static struct kinfo_proc *kvmprocbase, *kvmprocptr;
92 1.1 cgd static int kvmnprocs;
93 1.1 cgd /*
94 1.1 cgd * u. buffer
95 1.1 cgd */
96 1.1 cgd static union {
97 1.1 cgd struct user user;
98 1.1 cgd char upages[UPAGES][NBPG];
99 1.1 cgd } user;
100 1.3 cgd
101 1.3 cgd #ifdef NEWVM
102 1.3 cgd struct swapblk {
103 1.3 cgd long offset; /* offset in swap device */
104 1.3 cgd long size; /* remaining size of block in swap device */
105 1.3 cgd };
106 1.3 cgd #endif
107 1.1 cgd /*
108 1.1 cgd * random other stuff
109 1.1 cgd */
110 1.1 cgd #ifndef NEWVM
111 1.1 cgd static struct pte *Usrptmap, *usrpt;
112 1.1 cgd static struct pte *Sysmap;
113 1.1 cgd static int Syssize;
114 1.1 cgd #endif
115 1.1 cgd static int dmmin, dmmax;
116 1.1 cgd static int pcbpf;
117 1.1 cgd static int nswap;
118 1.1 cgd static char *tmp;
119 1.1 cgd #if defined(hp300)
120 1.1 cgd static int lowram;
121 1.1 cgd static struct ste *Sysseg;
122 1.1 cgd #endif
123 1.1 cgd #if defined(i386)
124 1.1 cgd static struct pde *PTD;
125 1.1 cgd #endif
126 1.1 cgd
127 1.1 cgd #define basename(cp) ((tmp=rindex((cp), '/')) ? tmp+1 : (cp))
128 1.1 cgd #define MAXSYMSIZE 256
129 1.1 cgd
130 1.1 cgd #if defined(hp300)
131 1.1 cgd #define pftoc(f) ((f) - lowram)
132 1.1 cgd #define iskva(v) (1)
133 1.1 cgd #endif
134 1.1 cgd
135 1.1 cgd #ifndef pftoc
136 1.1 cgd #define pftoc(f) (f)
137 1.1 cgd #endif
138 1.1 cgd #ifndef iskva
139 1.1 cgd #define iskva(v) ((u_long)(v) & KERNBASE)
140 1.1 cgd #endif
141 1.1 cgd
142 1.1 cgd static struct nlist nl[] = {
143 1.1 cgd { "_Usrptmap" },
144 1.1 cgd #define X_USRPTMAP 0
145 1.1 cgd { "_usrpt" },
146 1.1 cgd #define X_USRPT 1
147 1.1 cgd { "_nswap" },
148 1.1 cgd #define X_NSWAP 2
149 1.1 cgd { "_dmmin" },
150 1.1 cgd #define X_DMMIN 3
151 1.1 cgd { "_dmmax" },
152 1.1 cgd #define X_DMMAX 4
153 1.3 cgd { "_vm_page_buckets" },
154 1.3 cgd #define X_VM_PAGE_BUCKETS 5
155 1.3 cgd { "_vm_page_hash_mask" },
156 1.3 cgd #define X_VM_PAGE_HASH_MASK 6
157 1.3 cgd { "_page_shift" },
158 1.3 cgd #define X_PAGE_SHIFT 7
159 1.1 cgd /*
160 1.1 cgd * everything here and down, only if a dead kernel
161 1.1 cgd */
162 1.1 cgd { "_Sysmap" },
163 1.3 cgd #define X_SYSMAP 8
164 1.1 cgd #define X_DEADKERNEL X_SYSMAP
165 1.1 cgd { "_Syssize" },
166 1.3 cgd #define X_SYSSIZE 9
167 1.1 cgd { "_allproc" },
168 1.3 cgd #define X_ALLPROC 10
169 1.1 cgd { "_zombproc" },
170 1.3 cgd #define X_ZOMBPROC 11
171 1.1 cgd { "_nproc" },
172 1.3 cgd #define X_NPROC 12
173 1.3 cgd #define X_LAST 12
174 1.1 cgd #if defined(hp300)
175 1.1 cgd { "_Sysseg" },
176 1.1 cgd #define X_SYSSEG (X_LAST+1)
177 1.1 cgd { "_lowram" },
178 1.1 cgd #define X_LOWRAM (X_LAST+2)
179 1.1 cgd #endif
180 1.1 cgd #if defined(i386)
181 1.1 cgd { "_IdlePTD" },
182 1.1 cgd #define X_IdlePTD (X_LAST+1)
183 1.1 cgd #endif
184 1.1 cgd { "" },
185 1.1 cgd };
186 1.1 cgd
187 1.1 cgd static off_t Vtophys();
188 1.1 cgd static void klseek(), seterr(), setsyserr(), vstodb();
189 1.1 cgd static int getkvars(), kvm_doprocs(), kvm_init();
190 1.3 cgd #ifdef NEWVM
191 1.3 cgd static int vatosw();
192 1.3 cgd static int findpage();
193 1.3 cgd #endif
194 1.1 cgd
195 1.1 cgd /*
196 1.1 cgd * returns 0 if files were opened now,
197 1.1 cgd * 1 if files were already opened,
198 1.1 cgd * -1 if files could not be opened.
199 1.1 cgd */
200 1.1 cgd kvm_openfiles(uf, mf, sf)
201 1.1 cgd const char *uf, *mf, *sf;
202 1.1 cgd {
203 1.1 cgd if (kvmfilesopen)
204 1.1 cgd return (1);
205 1.1 cgd unixx = mem = kmem = swap = -1;
206 1.1 cgd unixf = (uf == NULL) ? _PATH_UNIX : uf;
207 1.1 cgd memf = (mf == NULL) ? _PATH_MEM : mf;
208 1.1 cgd
209 1.1 cgd if ((unixx = open(unixf, O_RDONLY, 0)) == -1) {
210 1.1 cgd setsyserr("can't open %s", unixf);
211 1.1 cgd goto failed;
212 1.1 cgd }
213 1.1 cgd if ((mem = open(memf, O_RDONLY, 0)) == -1) {
214 1.1 cgd setsyserr("can't open %s", memf);
215 1.1 cgd goto failed;
216 1.1 cgd }
217 1.1 cgd if (sf != NULL)
218 1.1 cgd swapf = sf;
219 1.1 cgd if (mf != NULL) {
220 1.1 cgd deadkernel++;
221 1.1 cgd kmemf = mf;
222 1.1 cgd kmem = mem;
223 1.1 cgd swap = -1;
224 1.1 cgd } else {
225 1.1 cgd kmemf = _PATH_KMEM;
226 1.1 cgd if ((kmem = open(kmemf, O_RDONLY, 0)) == -1) {
227 1.1 cgd setsyserr("can't open %s", kmemf);
228 1.1 cgd goto failed;
229 1.1 cgd }
230 1.1 cgd swapf = (sf == NULL) ? _PATH_DRUM : sf;
231 1.1 cgd /*
232 1.1 cgd * live kernel - avoid looking up nlist entries
233 1.1 cgd * past X_DEADKERNEL.
234 1.1 cgd */
235 1.1 cgd nl[X_DEADKERNEL].n_name = "";
236 1.1 cgd }
237 1.1 cgd if (swapf != NULL && ((swap = open(swapf, O_RDONLY, 0)) == -1)) {
238 1.1 cgd seterr("can't open %s", swapf);
239 1.1 cgd goto failed;
240 1.1 cgd }
241 1.1 cgd kvmfilesopen++;
242 1.1 cgd if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1) /*XXX*/
243 1.1 cgd return (-1);
244 1.1 cgd return (0);
245 1.1 cgd failed:
246 1.1 cgd kvm_close();
247 1.1 cgd return (-1);
248 1.1 cgd }
249 1.1 cgd
250 1.1 cgd static
251 1.1 cgd kvm_init(uf, mf, sf)
252 1.1 cgd char *uf, *mf, *sf;
253 1.1 cgd {
254 1.1 cgd if (kvmfilesopen == 0 && kvm_openfiles(NULL, NULL, NULL) == -1)
255 1.1 cgd return (-1);
256 1.1 cgd if (getkvars() == -1)
257 1.1 cgd return (-1);
258 1.1 cgd kvminit = 1;
259 1.1 cgd
260 1.1 cgd return (0);
261 1.1 cgd }
262 1.1 cgd
263 1.1 cgd kvm_close()
264 1.1 cgd {
265 1.1 cgd if (unixx != -1) {
266 1.1 cgd close(unixx);
267 1.1 cgd unixx = -1;
268 1.1 cgd }
269 1.1 cgd if (kmem != -1) {
270 1.1 cgd if (kmem != mem)
271 1.1 cgd close(kmem);
272 1.1 cgd /* otherwise kmem is a copy of mem, and will be closed below */
273 1.1 cgd kmem = -1;
274 1.1 cgd }
275 1.1 cgd if (mem != -1) {
276 1.1 cgd close(mem);
277 1.1 cgd mem = -1;
278 1.1 cgd }
279 1.1 cgd if (swap != -1) {
280 1.1 cgd close(swap);
281 1.1 cgd swap = -1;
282 1.1 cgd }
283 1.1 cgd if (db != NULL) {
284 1.1 cgd dbm_close(db);
285 1.1 cgd db = NULL;
286 1.1 cgd }
287 1.1 cgd kvminit = 0;
288 1.1 cgd kvmfilesopen = 0;
289 1.1 cgd deadkernel = 0;
290 1.1 cgd #ifndef NEWVM
291 1.1 cgd if (Sysmap) {
292 1.1 cgd free(Sysmap);
293 1.1 cgd Sysmap = NULL;
294 1.1 cgd }
295 1.1 cgd #endif
296 1.1 cgd }
297 1.1 cgd
298 1.1 cgd kvm_nlist(nl)
299 1.1 cgd struct nlist *nl;
300 1.1 cgd {
301 1.1 cgd datum key, data;
302 1.1 cgd char dbname[MAXPATHLEN];
303 1.1 cgd char dbversion[_POSIX2_LINE_MAX];
304 1.1 cgd char kversion[_POSIX2_LINE_MAX];
305 1.1 cgd int dbversionlen;
306 1.1 cgd char symbuf[MAXSYMSIZE];
307 1.1 cgd struct nlist nbuf, *n;
308 1.1 cgd int num, did;
309 1.1 cgd
310 1.1 cgd if (kvmfilesopen == 0 && kvm_openfiles(NULL, NULL, NULL) == -1)
311 1.1 cgd return (-1);
312 1.1 cgd if (deadkernel)
313 1.1 cgd goto hard2;
314 1.1 cgd /*
315 1.1 cgd * initialize key datum
316 1.1 cgd */
317 1.1 cgd key.dptr = symbuf;
318 1.1 cgd
319 1.1 cgd if (db != NULL)
320 1.1 cgd goto win; /* off to the races */
321 1.1 cgd /*
322 1.1 cgd * open database
323 1.1 cgd */
324 1.1 cgd sprintf(dbname, "%s/kvm_%s", _PATH_VARRUN, basename(unixf));
325 1.1 cgd if ((db = dbm_open(dbname, O_RDONLY, 0)) == NULL)
326 1.1 cgd goto hard2;
327 1.1 cgd /*
328 1.1 cgd * read version out of database
329 1.1 cgd */
330 1.1 cgd bcopy("VERSION", symbuf, sizeof ("VERSION")-1);
331 1.2 cgd key.dsize = (sizeof ("VERSION") - 1);
332 1.1 cgd data = dbm_fetch(db, key);
333 1.1 cgd if (data.dptr == NULL)
334 1.1 cgd goto hard1;
335 1.1 cgd bcopy(data.dptr, dbversion, data.dsize);
336 1.1 cgd dbversionlen = data.dsize;
337 1.1 cgd /*
338 1.1 cgd * read version string from kernel memory
339 1.1 cgd */
340 1.1 cgd bcopy("_version", symbuf, sizeof ("_version")-1);
341 1.2 cgd key.dsize = (sizeof ("_version")-1);
342 1.1 cgd data = dbm_fetch(db, key);
343 1.1 cgd if (data.dptr == NULL)
344 1.1 cgd goto hard1;
345 1.1 cgd if (data.dsize != sizeof (struct nlist))
346 1.1 cgd goto hard1;
347 1.1 cgd bcopy(data.dptr, &nbuf, sizeof (struct nlist));
348 1.1 cgd lseek(kmem, nbuf.n_value, 0);
349 1.1 cgd if (read(kmem, kversion, dbversionlen) != dbversionlen)
350 1.1 cgd goto hard1;
351 1.1 cgd /*
352 1.1 cgd * if they match, we win - otherwise do it the hard way
353 1.1 cgd */
354 1.1 cgd if (bcmp(dbversion, kversion, dbversionlen) != 0)
355 1.1 cgd goto hard1;
356 1.1 cgd /*
357 1.1 cgd * getem from the database.
358 1.1 cgd */
359 1.1 cgd win:
360 1.1 cgd num = did = 0;
361 1.1 cgd for (n = nl; n->n_name && n->n_name[0]; n++, num++) {
362 1.1 cgd int len;
363 1.1 cgd /*
364 1.1 cgd * clear out fields from users buffer
365 1.1 cgd */
366 1.1 cgd n->n_type = 0;
367 1.1 cgd n->n_other = 0;
368 1.1 cgd n->n_desc = 0;
369 1.1 cgd n->n_value = 0;
370 1.1 cgd /*
371 1.1 cgd * query db
372 1.1 cgd */
373 1.1 cgd if ((len = strlen(n->n_name)) > MAXSYMSIZE) {
374 1.1 cgd seterr("symbol too large");
375 1.1 cgd return (-1);
376 1.1 cgd }
377 1.1 cgd (void)strcpy(symbuf, n->n_name);
378 1.2 cgd key.dsize = len;
379 1.1 cgd data = dbm_fetch(db, key);
380 1.1 cgd if (data.dptr == NULL || data.dsize != sizeof (struct nlist))
381 1.1 cgd continue;
382 1.1 cgd bcopy(data.dptr, &nbuf, sizeof (struct nlist));
383 1.1 cgd n->n_value = nbuf.n_value;
384 1.1 cgd n->n_type = nbuf.n_type;
385 1.1 cgd n->n_desc = nbuf.n_desc;
386 1.1 cgd n->n_other = nbuf.n_other;
387 1.1 cgd did++;
388 1.1 cgd }
389 1.1 cgd return (num - did);
390 1.1 cgd hard1:
391 1.1 cgd dbm_close(db);
392 1.1 cgd db = NULL;
393 1.1 cgd hard2:
394 1.1 cgd num = nlist(unixf, nl);
395 1.1 cgd if (num == -1)
396 1.1 cgd seterr("nlist (hard way) failed");
397 1.1 cgd return (num);
398 1.1 cgd }
399 1.1 cgd
400 1.1 cgd kvm_getprocs(what, arg)
401 1.1 cgd int what, arg;
402 1.1 cgd {
403 1.3 cgd static int ocopysize = -1;
404 1.3 cgd
405 1.1 cgd if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1)
406 1.1 cgd return (NULL);
407 1.1 cgd if (!deadkernel) {
408 1.1 cgd int ret, copysize;
409 1.1 cgd
410 1.1 cgd if ((ret = getkerninfo(what, NULL, NULL, arg)) == -1) {
411 1.1 cgd setsyserr("can't get estimate for kerninfo");
412 1.1 cgd return (-1);
413 1.1 cgd }
414 1.1 cgd copysize = ret;
415 1.5 cgd if (copysize > ocopysize || !kvmprocbase) {
416 1.5 cgd if (ocopysize == -1 || !kvmprocbase)
417 1.4 mycroft kvmprocbase =
418 1.4 mycroft (struct kinfo_proc *)malloc(copysize);
419 1.4 mycroft else
420 1.4 mycroft kvmprocbase =
421 1.4 mycroft (struct kinfo_proc *)realloc(kvmprocbase,
422 1.4 mycroft copysize);
423 1.4 mycroft if (!kvmprocbase) {
424 1.4 mycroft seterr("out of memory");
425 1.4 mycroft return (-1);
426 1.4 mycroft }
427 1.1 cgd }
428 1.3 cgd ocopysize = copysize;
429 1.1 cgd if ((ret = getkerninfo(what, kvmprocbase, ©size,
430 1.1 cgd arg)) == -1) {
431 1.1 cgd setsyserr("can't get proc list");
432 1.1 cgd return (-1);
433 1.1 cgd }
434 1.1 cgd if (copysize % sizeof (struct kinfo_proc)) {
435 1.1 cgd seterr("proc size mismatch (got %d total, kinfo_proc: %d)",
436 1.1 cgd copysize, sizeof (struct kinfo_proc));
437 1.1 cgd return (-1);
438 1.1 cgd }
439 1.1 cgd kvmnprocs = copysize / sizeof (struct kinfo_proc);
440 1.1 cgd } else {
441 1.1 cgd int nproc;
442 1.1 cgd
443 1.1 cgd if (kvm_read((void *) nl[X_NPROC].n_value, &nproc,
444 1.1 cgd sizeof (int)) != sizeof (int)) {
445 1.1 cgd seterr("can't read nproc");
446 1.1 cgd return (-1);
447 1.1 cgd }
448 1.1 cgd if ((kvmprocbase = (struct kinfo_proc *)
449 1.1 cgd malloc(nproc * sizeof (struct kinfo_proc))) == NULL) {
450 1.1 cgd seterr("out of memory (addr: %x nproc = %d)",
451 1.1 cgd nl[X_NPROC].n_value, nproc);
452 1.1 cgd return (-1);
453 1.1 cgd }
454 1.1 cgd kvmnprocs = kvm_doprocs(what, arg, kvmprocbase);
455 1.1 cgd realloc(kvmprocbase, kvmnprocs * sizeof (struct kinfo_proc));
456 1.1 cgd }
457 1.1 cgd kvmprocptr = kvmprocbase;
458 1.1 cgd
459 1.1 cgd return (kvmnprocs);
460 1.1 cgd }
461 1.1 cgd
462 1.1 cgd /*
463 1.1 cgd * XXX - should NOT give up so easily - especially since the kernel
464 1.1 cgd * may be corrupt (it died). Should gather as much information as possible.
465 1.1 cgd * Follows proc ptrs instead of reading table since table may go
466 1.1 cgd * away soon.
467 1.1 cgd */
468 1.1 cgd static
469 1.1 cgd kvm_doprocs(what, arg, buff)
470 1.1 cgd int what, arg;
471 1.1 cgd char *buff;
472 1.1 cgd {
473 1.1 cgd struct proc *p, proc;
474 1.1 cgd register char *bp = buff;
475 1.1 cgd int i = 0;
476 1.1 cgd int doingzomb = 0;
477 1.1 cgd struct eproc eproc;
478 1.1 cgd struct pgrp pgrp;
479 1.1 cgd struct session sess;
480 1.1 cgd struct tty tty;
481 1.1 cgd #ifndef NEWVM
482 1.1 cgd struct text text;
483 1.1 cgd #endif
484 1.1 cgd
485 1.1 cgd /* allproc */
486 1.1 cgd if (kvm_read((void *) nl[X_ALLPROC].n_value, &p,
487 1.1 cgd sizeof (struct proc *)) != sizeof (struct proc *)) {
488 1.1 cgd seterr("can't read allproc");
489 1.1 cgd return (-1);
490 1.1 cgd }
491 1.1 cgd
492 1.1 cgd again:
493 1.1 cgd for (; p; p = proc.p_nxt) {
494 1.1 cgd if (kvm_read(p, &proc, sizeof (struct proc)) !=
495 1.1 cgd sizeof (struct proc)) {
496 1.1 cgd seterr("can't read proc at %x", p);
497 1.1 cgd return (-1);
498 1.1 cgd }
499 1.1 cgd #ifdef NEWVM
500 1.1 cgd if (kvm_read(proc.p_cred, &eproc.e_pcred,
501 1.1 cgd sizeof (struct pcred)) == sizeof (struct pcred))
502 1.1 cgd (void) kvm_read(eproc.e_pcred.pc_ucred, &eproc.e_ucred,
503 1.1 cgd sizeof (struct ucred));
504 1.1 cgd switch(ki_op(what)) {
505 1.1 cgd
506 1.1 cgd case KINFO_PROC_PID:
507 1.1 cgd if (proc.p_pid != (pid_t)arg)
508 1.1 cgd continue;
509 1.1 cgd break;
510 1.1 cgd
511 1.1 cgd
512 1.1 cgd case KINFO_PROC_UID:
513 1.1 cgd if (eproc.e_ucred.cr_uid != (uid_t)arg)
514 1.1 cgd continue;
515 1.1 cgd break;
516 1.1 cgd
517 1.1 cgd case KINFO_PROC_RUID:
518 1.1 cgd if (eproc.e_pcred.p_ruid != (uid_t)arg)
519 1.1 cgd continue;
520 1.1 cgd break;
521 1.1 cgd }
522 1.1 cgd #else
523 1.1 cgd switch(ki_op(what)) {
524 1.1 cgd
525 1.1 cgd case KINFO_PROC_PID:
526 1.1 cgd if (proc.p_pid != (pid_t)arg)
527 1.1 cgd continue;
528 1.1 cgd break;
529 1.1 cgd
530 1.1 cgd
531 1.1 cgd case KINFO_PROC_UID:
532 1.1 cgd if (proc.p_uid != (uid_t)arg)
533 1.1 cgd continue;
534 1.1 cgd break;
535 1.1 cgd
536 1.1 cgd case KINFO_PROC_RUID:
537 1.1 cgd if (proc.p_ruid != (uid_t)arg)
538 1.1 cgd continue;
539 1.1 cgd break;
540 1.1 cgd }
541 1.1 cgd #endif
542 1.1 cgd /*
543 1.1 cgd * gather eproc
544 1.1 cgd */
545 1.1 cgd eproc.e_paddr = p;
546 1.1 cgd if (kvm_read(proc.p_pgrp, &pgrp, sizeof (struct pgrp)) !=
547 1.1 cgd sizeof (struct pgrp)) {
548 1.1 cgd seterr("can't read pgrp at %x", proc.p_pgrp);
549 1.1 cgd return (-1);
550 1.1 cgd }
551 1.1 cgd eproc.e_sess = pgrp.pg_session;
552 1.1 cgd eproc.e_pgid = pgrp.pg_id;
553 1.1 cgd eproc.e_jobc = pgrp.pg_jobc;
554 1.1 cgd if (kvm_read(pgrp.pg_session, &sess, sizeof (struct session))
555 1.1 cgd != sizeof (struct session)) {
556 1.1 cgd seterr("can't read session at %x", pgrp.pg_session);
557 1.1 cgd return (-1);
558 1.1 cgd }
559 1.1 cgd if ((proc.p_flag&SCTTY) && sess.s_ttyp != NULL) {
560 1.1 cgd if (kvm_read(sess.s_ttyp, &tty, sizeof (struct tty))
561 1.1 cgd != sizeof (struct tty)) {
562 1.1 cgd seterr("can't read tty at %x", sess.s_ttyp);
563 1.1 cgd return (-1);
564 1.1 cgd }
565 1.1 cgd eproc.e_tdev = tty.t_dev;
566 1.1 cgd eproc.e_tsess = tty.t_session;
567 1.1 cgd if (tty.t_pgrp != NULL) {
568 1.1 cgd if (kvm_read(tty.t_pgrp, &pgrp, sizeof (struct
569 1.1 cgd pgrp)) != sizeof (struct pgrp)) {
570 1.1 cgd seterr("can't read tpgrp at &x",
571 1.1 cgd tty.t_pgrp);
572 1.1 cgd return (-1);
573 1.1 cgd }
574 1.1 cgd eproc.e_tpgid = pgrp.pg_id;
575 1.1 cgd } else
576 1.1 cgd eproc.e_tpgid = -1;
577 1.1 cgd } else
578 1.1 cgd eproc.e_tdev = NODEV;
579 1.1 cgd if (proc.p_wmesg)
580 1.1 cgd kvm_read(proc.p_wmesg, eproc.e_wmesg, WMESGLEN);
581 1.1 cgd #ifdef NEWVM
582 1.1 cgd (void) kvm_read(proc.p_vmspace, &eproc.e_vm,
583 1.1 cgd sizeof (struct vmspace));
584 1.1 cgd eproc.e_xsize = eproc.e_xrssize =
585 1.1 cgd eproc.e_xccount = eproc.e_xswrss = 0;
586 1.1 cgd #else
587 1.1 cgd if (proc.p_textp) {
588 1.1 cgd kvm_read(proc.p_textp, &text, sizeof (text));
589 1.1 cgd eproc.e_xsize = text.x_size;
590 1.1 cgd eproc.e_xrssize = text.x_rssize;
591 1.1 cgd eproc.e_xccount = text.x_ccount;
592 1.1 cgd eproc.e_xswrss = text.x_swrss;
593 1.1 cgd } else {
594 1.1 cgd eproc.e_xsize = eproc.e_xrssize =
595 1.1 cgd eproc.e_xccount = eproc.e_xswrss = 0;
596 1.1 cgd }
597 1.1 cgd #endif
598 1.1 cgd
599 1.1 cgd switch(ki_op(what)) {
600 1.1 cgd
601 1.1 cgd case KINFO_PROC_PGRP:
602 1.1 cgd if (eproc.e_pgid != (pid_t)arg)
603 1.1 cgd continue;
604 1.1 cgd break;
605 1.1 cgd
606 1.1 cgd case KINFO_PROC_TTY:
607 1.1 cgd if ((proc.p_flag&SCTTY) == 0 ||
608 1.1 cgd eproc.e_tdev != (dev_t)arg)
609 1.1 cgd continue;
610 1.1 cgd break;
611 1.1 cgd }
612 1.1 cgd
613 1.1 cgd i++;
614 1.1 cgd bcopy(&proc, bp, sizeof (struct proc));
615 1.1 cgd bp += sizeof (struct proc);
616 1.1 cgd bcopy(&eproc, bp, sizeof (struct eproc));
617 1.1 cgd bp+= sizeof (struct eproc);
618 1.1 cgd }
619 1.1 cgd if (!doingzomb) {
620 1.1 cgd /* zombproc */
621 1.1 cgd if (kvm_read((void *) nl[X_ZOMBPROC].n_value, &p,
622 1.1 cgd sizeof (struct proc *)) != sizeof (struct proc *)) {
623 1.1 cgd seterr("can't read zombproc");
624 1.1 cgd return (-1);
625 1.1 cgd }
626 1.1 cgd doingzomb = 1;
627 1.1 cgd goto again;
628 1.1 cgd }
629 1.1 cgd
630 1.1 cgd return (i);
631 1.1 cgd }
632 1.1 cgd
633 1.1 cgd struct proc *
634 1.1 cgd kvm_nextproc()
635 1.1 cgd {
636 1.1 cgd
637 1.1 cgd if (!kvmprocbase && kvm_getprocs(0, 0) == -1)
638 1.1 cgd return (NULL);
639 1.1 cgd if (kvmprocptr >= (kvmprocbase + kvmnprocs)) {
640 1.1 cgd seterr("end of proc list");
641 1.1 cgd return (NULL);
642 1.1 cgd }
643 1.1 cgd return((struct proc *)(kvmprocptr++));
644 1.1 cgd }
645 1.1 cgd
646 1.1 cgd struct eproc *
647 1.1 cgd kvm_geteproc(p)
648 1.1 cgd const struct proc *p;
649 1.1 cgd {
650 1.1 cgd return ((struct eproc *)(((char *)p) + sizeof (struct proc)));
651 1.1 cgd }
652 1.1 cgd
653 1.1 cgd kvm_setproc()
654 1.1 cgd {
655 1.1 cgd kvmprocptr = kvmprocbase;
656 1.1 cgd }
657 1.1 cgd
658 1.1 cgd kvm_freeprocs()
659 1.1 cgd {
660 1.1 cgd
661 1.1 cgd if (kvmprocbase) {
662 1.1 cgd free(kvmprocbase);
663 1.1 cgd kvmprocbase = NULL;
664 1.1 cgd }
665 1.1 cgd }
666 1.1 cgd
667 1.1 cgd #ifdef NEWVM
668 1.1 cgd struct user *
669 1.1 cgd kvm_getu(p)
670 1.1 cgd const struct proc *p;
671 1.1 cgd {
672 1.1 cgd register struct kinfo_proc *kp = (struct kinfo_proc *)p;
673 1.1 cgd register int i;
674 1.1 cgd register char *up;
675 1.3 cgd u_int vaddr;
676 1.3 cgd struct swapblk swb;
677 1.1 cgd
678 1.1 cgd if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1)
679 1.1 cgd return (NULL);
680 1.1 cgd if (p->p_stat == SZOMB) {
681 1.1 cgd seterr("zombie process");
682 1.1 cgd return (NULL);
683 1.1 cgd }
684 1.3 cgd
685 1.3 cgd if ((p->p_flag & SLOAD) == 0) {
686 1.3 cgd vm_offset_t maddr;
687 1.3 cgd
688 1.3 cgd if (swap < 0) {
689 1.3 cgd seterr("no swap");
690 1.3 cgd return (NULL);
691 1.3 cgd }
692 1.3 cgd /*
693 1.3 cgd * Costly operation, better set enable_swap to zero
694 1.3 cgd * in vm/vm_glue.c, since paging of user pages isn't
695 1.3 cgd * done yet anyway.
696 1.3 cgd */
697 1.3 cgd if (vatosw(p, USRSTACK + i * NBPG, &maddr, &swb) == 0)
698 1.3 cgd return NULL;
699 1.3 cgd
700 1.3 cgd if (maddr == 0 && swb.size < UPAGES * NBPG)
701 1.3 cgd return NULL;
702 1.3 cgd
703 1.3 cgd for (i = 0; i < UPAGES; i++) {
704 1.3 cgd if (maddr) {
705 1.3 cgd (void) lseek(mem, maddr + i * NBPG, 0);
706 1.3 cgd if (read(mem,
707 1.3 cgd (char *)user.upages[i], NBPG) != NBPG) {
708 1.3 cgd seterr(
709 1.3 cgd "can't read u for pid %d from %s",
710 1.3 cgd p->p_pid, swapf);
711 1.3 cgd return NULL;
712 1.3 cgd }
713 1.3 cgd } else {
714 1.3 cgd (void) lseek(swap, swb.offset + i * NBPG, 0);
715 1.3 cgd if (read(swap,
716 1.3 cgd (char *)user.upages[i], NBPG) != NBPG) {
717 1.3 cgd seterr(
718 1.3 cgd "can't read u for pid %d from %s",
719 1.3 cgd p->p_pid, swapf);
720 1.3 cgd return NULL;
721 1.3 cgd }
722 1.3 cgd }
723 1.3 cgd }
724 1.3 cgd return(&user.user);
725 1.3 cgd }
726 1.1 cgd /*
727 1.1 cgd * Read u-area one page at a time for the benefit of post-mortems
728 1.1 cgd */
729 1.1 cgd up = (char *) p->p_addr;
730 1.1 cgd for (i = 0; i < UPAGES; i++) {
731 1.1 cgd klseek(kmem, (long)up, 0);
732 1.1 cgd if (read(kmem, user.upages[i], CLBYTES) != CLBYTES) {
733 1.1 cgd seterr("cant read page %x of u of pid %d from %s",
734 1.1 cgd up, p->p_pid, kmemf);
735 1.1 cgd return(NULL);
736 1.1 cgd }
737 1.1 cgd up += CLBYTES;
738 1.1 cgd }
739 1.1 cgd pcbpf = (int) btop(p->p_addr); /* what should this be really? */
740 1.1 cgd
741 1.1 cgd kp->kp_eproc.e_vm.vm_rssize =
742 1.1 cgd kp->kp_eproc.e_vm.vm_pmap.pm_stats.resident_count; /* XXX */
743 1.1 cgd return(&user.user);
744 1.1 cgd }
745 1.1 cgd #else
746 1.1 cgd struct user *
747 1.1 cgd kvm_getu(p)
748 1.1 cgd const struct proc *p;
749 1.1 cgd {
750 1.1 cgd struct pte *pteaddr, apte;
751 1.1 cgd struct pte arguutl[HIGHPAGES+(CLSIZE*2)];
752 1.1 cgd register int i;
753 1.1 cgd int ncl;
754 1.1 cgd
755 1.1 cgd if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1)
756 1.1 cgd return (NULL);
757 1.1 cgd if (p->p_stat == SZOMB) {
758 1.1 cgd seterr("zombie process");
759 1.1 cgd return (NULL);
760 1.1 cgd }
761 1.1 cgd if ((p->p_flag & SLOAD) == 0) {
762 1.1 cgd if (swap < 0) {
763 1.1 cgd seterr("no swap");
764 1.1 cgd return (NULL);
765 1.1 cgd }
766 1.1 cgd (void) lseek(swap, (long)dtob(p->p_swaddr), 0);
767 1.1 cgd if (read(swap, (char *)&user.user, sizeof (struct user)) !=
768 1.1 cgd sizeof (struct user)) {
769 1.1 cgd seterr("can't read u for pid %d from %s",
770 1.1 cgd p->p_pid, swapf);
771 1.1 cgd return (NULL);
772 1.1 cgd }
773 1.1 cgd pcbpf = 0;
774 1.1 cgd argaddr0 = 0;
775 1.1 cgd argaddr1 = 0;
776 1.1 cgd return (&user.user);
777 1.1 cgd }
778 1.1 cgd pteaddr = &Usrptmap[btokmx(p->p_p0br) + p->p_szpt - 1];
779 1.1 cgd klseek(kmem, (long)pteaddr, 0);
780 1.1 cgd if (read(kmem, (char *)&apte, sizeof(apte)) != sizeof(apte)) {
781 1.1 cgd seterr("can't read indir pte to get u for pid %d from %s",
782 1.1 cgd p->p_pid, kmemf);
783 1.1 cgd return (NULL);
784 1.1 cgd }
785 1.1 cgd lseek(mem, (long)ctob(pftoc(apte.pg_pfnum+1)) - sizeof(arguutl), 0);
786 1.1 cgd if (read(mem, (char *)arguutl, sizeof(arguutl)) != sizeof(arguutl)) {
787 1.1 cgd seterr("can't read page table for u of pid %d from %s",
788 1.1 cgd p->p_pid, memf);
789 1.1 cgd return (NULL);
790 1.1 cgd }
791 1.1 cgd if (arguutl[0].pg_fod == 0 && arguutl[0].pg_pfnum)
792 1.1 cgd argaddr0 = ctob(pftoc(arguutl[0].pg_pfnum));
793 1.1 cgd else
794 1.1 cgd argaddr0 = 0;
795 1.1 cgd if (arguutl[CLSIZE*1].pg_fod == 0 && arguutl[CLSIZE*1].pg_pfnum)
796 1.1 cgd argaddr1 = ctob(pftoc(arguutl[CLSIZE*1].pg_pfnum));
797 1.1 cgd else
798 1.1 cgd argaddr1 = 0;
799 1.1 cgd pcbpf = arguutl[CLSIZE*2].pg_pfnum;
800 1.1 cgd ncl = (sizeof (struct user) + CLBYTES - 1) / CLBYTES;
801 1.1 cgd while (--ncl >= 0) {
802 1.1 cgd i = ncl * CLSIZE;
803 1.1 cgd lseek(mem,
804 1.1 cgd (long)ctob(pftoc(arguutl[(CLSIZE*2)+i].pg_pfnum)), 0);
805 1.1 cgd if (read(mem, user.upages[i], CLBYTES) != CLBYTES) {
806 1.1 cgd seterr("can't read page %d of u of pid %d from %s",
807 1.1 cgd arguutl[(CLSIZE*2)+i].pg_pfnum, p->p_pid, memf);
808 1.1 cgd return(NULL);
809 1.1 cgd }
810 1.1 cgd }
811 1.1 cgd return (&user.user);
812 1.1 cgd }
813 1.1 cgd #endif
814 1.1 cgd
815 1.8 cgd int
816 1.8 cgd kvm_procread(p, addr, buf, len)
817 1.8 cgd const struct proc *p;
818 1.8 cgd const unsigned addr, buf, len;
819 1.8 cgd {
820 1.8 cgd register struct kinfo_proc *kp = (struct kinfo_proc *) p;
821 1.8 cgd struct swapblk swb;
822 1.8 cgd vm_offset_t swaddr = 0, memaddr = 0;
823 1.8 cgd unsigned real_len;
824 1.8 cgd
825 1.8 cgd real_len = len < (CLBYTES - (addr & CLOFSET)) ? len : (CLBYTES - (addr & CLOFSET));
826 1.8 cgd
827 1.8 cgd #if defined(hp300)
828 1.8 cgd /*
829 1.8 cgd * XXX DANGER WILL ROBINSON -- i have *no* idea to what extent this
830 1.8 cgd * works... -- cgd
831 1.8 cgd */
832 1.8 cgd BREAK HERE!!!
833 1.8 cgd #endif
834 1.8 cgd #if defined(i386)
835 1.8 cgd if (kp->kp_eproc.e_vm.vm_pmap.pm_pdir) {
836 1.8 cgd struct pde pde;
837 1.8 cgd
838 1.8 cgd klseek(kmem,
839 1.8 cgd (long)(&kp->kp_eproc.e_vm.vm_pmap.pm_pdir[pdei(addr)]), 0);
840 1.8 cgd
841 1.8 cgd if (read(kmem, (char *)&pde, sizeof pde) == sizeof pde
842 1.8 cgd && pde.pd_v) {
843 1.8 cgd
844 1.8 cgd struct pte pte;
845 1.8 cgd
846 1.8 cgd if (lseek(mem, (long)ctob(pde.pd_pfnum) +
847 1.8 cgd (ptei(addr) * sizeof pte), 0) == -1)
848 1.8 cgd seterr("kvm_procread: lseek");
849 1.8 cgd if (read(mem, (char *)&pte, sizeof pte) == sizeof pte) {
850 1.8 cgd if (pte.pg_v) {
851 1.8 cgd memaddr = (long)ctob(pte.pg_pfnum) +
852 1.8 cgd (addr % (1 << PGSHIFT));
853 1.8 cgd }
854 1.8 cgd } else {
855 1.8 cgd seterr("kvm_procread: read");
856 1.8 cgd }
857 1.8 cgd }
858 1.8 cgd }
859 1.8 cgd #endif /* i386 */
860 1.8 cgd
861 1.8 cgd swb.size = 0; /* XXX */
862 1.8 cgd if (memaddr == 0 && vatosw(p, addr & ~CLOFSET, &memaddr, &swb)) {
863 1.8 cgd if (memaddr != 0) {
864 1.8 cgd memaddr += addr & CLOFSET;
865 1.8 cgd } else {
866 1.8 cgd swaddr += addr & CLOFSET;
867 1.8 cgd swb.size -= addr & CLOFSET;
868 1.8 cgd if (swb.size >= real_len)
869 1.8 cgd swaddr = swb.offset;
870 1.8 cgd }
871 1.8 cgd }
872 1.8 cgd
873 1.8 cgd if (memaddr) {
874 1.8 cgd if (lseek(mem, memaddr, 0) == -1)
875 1.8 cgd seterr("kvm_getu: lseek");
876 1.8 cgd real_len = read(mem, (char *)buf, real_len);
877 1.8 cgd if (real_len == -1) {
878 1.8 cgd real_len = 0;
879 1.8 cgd seterr("kvm_procread: read");
880 1.8 cgd }
881 1.8 cgd } else if (swaddr) {
882 1.8 cgd if (lseek(swap, swaddr, 0) == -1)
883 1.8 cgd seterr("kvm_getu: lseek");
884 1.8 cgd real_len = read(swap, (char *)buf, real_len);
885 1.8 cgd if (real_len == -1) {
886 1.8 cgd real_len = 0;
887 1.8 cgd seterr("kvm_procread: read");
888 1.8 cgd }
889 1.8 cgd } else
890 1.8 cgd real_len = 0;
891 1.8 cgd
892 1.8 cgd return real_len;
893 1.8 cgd }
894 1.8 cgd
895 1.8 cgd int
896 1.8 cgd kvm_procreadstr(p, addr, buf, len)
897 1.8 cgd const struct proc *p;
898 1.8 cgd const unsigned addr, buf;
899 1.8 cgd unsigned len;
900 1.8 cgd {
901 1.8 cgd int done;
902 1.8 cgd char a, *bp = (char *) buf;
903 1.8 cgd
904 1.8 cgd /* XXX -- should be optimized */
905 1.8 cgd
906 1.8 cgd done = 0;
907 1.8 cgd while (len && kvm_procread(p, addr+done, &a, 1) == 1) {
908 1.8 cgd *bp++ = a;
909 1.8 cgd if (a == '\0')
910 1.8 cgd return done;
911 1.8 cgd done++;
912 1.8 cgd len--;
913 1.8 cgd }
914 1.8 cgd return done;
915 1.8 cgd }
916 1.8 cgd
917 1.1 cgd char *
918 1.1 cgd kvm_getargs(p, up)
919 1.1 cgd const struct proc *p;
920 1.1 cgd const struct user *up;
921 1.1 cgd {
922 1.8 cgd static char cmdbuf[ARG_MAX + sizeof(p->p_comm) + 5];
923 1.8 cgd register char *cp, *acp;
924 1.8 cgd int left, rv;
925 1.8 cgd struct ps_strings arginfo;
926 1.1 cgd
927 1.1 cgd if (up == NULL || p->p_pid == 0 || p->p_pid == 2)
928 1.1 cgd goto retucomm;
929 1.8 cgd
930 1.8 cgd if (kvm_procread(p, PS_STRINGS, &arginfo, sizeof(arginfo)) !=
931 1.8 cgd sizeof(arginfo))
932 1.8 cgd goto bad;
933 1.8 cgd
934 1.8 cgd cp = cmdbuf;
935 1.8 cgd acp = arginfo.ps_argvstr;
936 1.8 cgd left = ARG_MAX + 1;
937 1.8 cgd while (arginfo.ps_nargvstr--) {
938 1.8 cgd if ((rv = kvm_procreadstr(p, acp, cp, left)) >= 0) {
939 1.8 cgd acp += rv + 1;
940 1.8 cgd left -= rv + 1;
941 1.8 cgd cp += rv;
942 1.8 cgd *cp++ = ' ';
943 1.8 cgd *cp = '\0';
944 1.1 cgd } else
945 1.1 cgd goto bad;
946 1.1 cgd }
947 1.8 cgd cp-- ; *cp = '\0';
948 1.2 cgd
949 1.8 cgd if (cmdbuf[0] == '-' || cmdbuf[0] == '?' || cmdbuf[0] <= ' ') {
950 1.1 cgd (void) strcat(cmdbuf, " (");
951 1.1 cgd (void) strncat(cmdbuf, p->p_comm, sizeof(p->p_comm));
952 1.1 cgd (void) strcat(cmdbuf, ")");
953 1.1 cgd }
954 1.1 cgd return (cmdbuf);
955 1.1 cgd
956 1.1 cgd bad:
957 1.8 cgd seterr("error locating command name for pid %d", p->p_pid);
958 1.1 cgd retucomm:
959 1.8 cgd (void) strcpy(cmdbuf, "(");
960 1.1 cgd (void) strncat(cmdbuf, p->p_comm, sizeof (p->p_comm));
961 1.1 cgd (void) strcat(cmdbuf, ")");
962 1.1 cgd return (cmdbuf);
963 1.1 cgd }
964 1.1 cgd
965 1.8 cgd char *
966 1.8 cgd kvm_getenv(p, up)
967 1.8 cgd const struct proc *p;
968 1.8 cgd const struct user *up;
969 1.8 cgd {
970 1.8 cgd static char envbuf[ARG_MAX + 1];
971 1.8 cgd register char *cp, *acp;
972 1.8 cgd int left, rv;
973 1.8 cgd struct ps_strings arginfo;
974 1.8 cgd
975 1.8 cgd if (up == NULL || p->p_pid == 0 || p->p_pid == 2)
976 1.8 cgd goto retemptyenv;
977 1.8 cgd
978 1.8 cgd if (kvm_procread(p, PS_STRINGS, &arginfo, sizeof(arginfo)) !=
979 1.8 cgd sizeof(arginfo))
980 1.8 cgd goto bad;
981 1.8 cgd
982 1.8 cgd cp = envbuf;
983 1.8 cgd acp = arginfo.ps_envstr;
984 1.8 cgd left = ARG_MAX + 1;
985 1.8 cgd while (arginfo.ps_nenvstr--) {
986 1.8 cgd if ((rv = kvm_procreadstr(p, acp, cp, left)) >= 0) {
987 1.8 cgd acp += rv + 1;
988 1.8 cgd left -= rv + 1;
989 1.8 cgd cp += rv;
990 1.8 cgd *cp++ = ' ';
991 1.8 cgd *cp = '\0';
992 1.8 cgd } else
993 1.8 cgd goto bad;
994 1.8 cgd }
995 1.8 cgd cp-- ; *cp = '\0';
996 1.8 cgd return (envbuf);
997 1.8 cgd
998 1.8 cgd bad:
999 1.8 cgd seterr("error locating environment for pid %d", p->p_pid);
1000 1.8 cgd retemptyenv:
1001 1.8 cgd envbuf[0] = '\0';
1002 1.8 cgd return (envbuf);
1003 1.8 cgd }
1004 1.1 cgd
1005 1.1 cgd static
1006 1.1 cgd getkvars()
1007 1.1 cgd {
1008 1.1 cgd if (kvm_nlist(nl) == -1)
1009 1.1 cgd return (-1);
1010 1.1 cgd if (deadkernel) {
1011 1.1 cgd /* We must do the sys map first because klseek uses it */
1012 1.1 cgd long addr;
1013 1.1 cgd
1014 1.1 cgd #ifndef NEWVM
1015 1.1 cgd Syssize = nl[X_SYSSIZE].n_value;
1016 1.1 cgd Sysmap = (struct pte *)
1017 1.1 cgd calloc((unsigned) Syssize, sizeof (struct pte));
1018 1.1 cgd if (Sysmap == NULL) {
1019 1.1 cgd seterr("out of space for Sysmap");
1020 1.1 cgd return (-1);
1021 1.1 cgd }
1022 1.1 cgd addr = (long) nl[X_SYSMAP].n_value;
1023 1.1 cgd addr &= ~KERNBASE;
1024 1.1 cgd (void) lseek(kmem, addr, 0);
1025 1.1 cgd if (read(kmem, (char *) Sysmap, Syssize * sizeof (struct pte))
1026 1.1 cgd != Syssize * sizeof (struct pte)) {
1027 1.1 cgd seterr("can't read Sysmap");
1028 1.1 cgd return (-1);
1029 1.1 cgd }
1030 1.1 cgd #endif
1031 1.1 cgd #if defined(hp300)
1032 1.1 cgd addr = (long) nl[X_LOWRAM].n_value;
1033 1.1 cgd (void) lseek(kmem, addr, 0);
1034 1.1 cgd if (read(kmem, (char *) &lowram, sizeof (lowram))
1035 1.1 cgd != sizeof (lowram)) {
1036 1.1 cgd seterr("can't read lowram");
1037 1.1 cgd return (-1);
1038 1.1 cgd }
1039 1.1 cgd lowram = btop(lowram);
1040 1.1 cgd Sysseg = (struct ste *) malloc(NBPG);
1041 1.1 cgd if (Sysseg == NULL) {
1042 1.1 cgd seterr("out of space for Sysseg");
1043 1.1 cgd return (-1);
1044 1.1 cgd }
1045 1.1 cgd addr = (long) nl[X_SYSSEG].n_value;
1046 1.1 cgd (void) lseek(kmem, addr, 0);
1047 1.1 cgd read(kmem, (char *)&addr, sizeof(addr));
1048 1.1 cgd (void) lseek(kmem, (long)addr, 0);
1049 1.1 cgd if (read(kmem, (char *) Sysseg, NBPG) != NBPG) {
1050 1.1 cgd seterr("can't read Sysseg");
1051 1.1 cgd return (-1);
1052 1.1 cgd }
1053 1.1 cgd #endif
1054 1.1 cgd #if defined(i386)
1055 1.1 cgd PTD = (struct pde *) malloc(NBPG);
1056 1.1 cgd if (PTD == NULL) {
1057 1.1 cgd seterr("out of space for PTD");
1058 1.1 cgd return (-1);
1059 1.1 cgd }
1060 1.1 cgd addr = (long) nl[X_IdlePTD].n_value;
1061 1.1 cgd (void) lseek(kmem, addr, 0);
1062 1.1 cgd read(kmem, (char *)&addr, sizeof(addr));
1063 1.1 cgd (void) lseek(kmem, (long)addr, 0);
1064 1.1 cgd if (read(kmem, (char *) PTD, NBPG) != NBPG) {
1065 1.1 cgd seterr("can't read PTD");
1066 1.1 cgd return (-1);
1067 1.1 cgd }
1068 1.1 cgd #endif
1069 1.1 cgd }
1070 1.1 cgd #ifndef NEWVM
1071 1.1 cgd usrpt = (struct pte *)nl[X_USRPT].n_value;
1072 1.1 cgd Usrptmap = (struct pte *)nl[X_USRPTMAP].n_value;
1073 1.1 cgd #endif
1074 1.1 cgd if (kvm_read((void *) nl[X_NSWAP].n_value, &nswap, sizeof (long)) !=
1075 1.1 cgd sizeof (long)) {
1076 1.1 cgd seterr("can't read nswap");
1077 1.1 cgd return (-1);
1078 1.1 cgd }
1079 1.1 cgd if (kvm_read((void *) nl[X_DMMIN].n_value, &dmmin, sizeof (long)) !=
1080 1.1 cgd sizeof (long)) {
1081 1.1 cgd seterr("can't read dmmin");
1082 1.1 cgd return (-1);
1083 1.1 cgd }
1084 1.1 cgd if (kvm_read((void *) nl[X_DMMAX].n_value, &dmmax, sizeof (long)) !=
1085 1.1 cgd sizeof (long)) {
1086 1.1 cgd seterr("can't read dmmax");
1087 1.1 cgd return (-1);
1088 1.1 cgd }
1089 1.1 cgd return (0);
1090 1.1 cgd }
1091 1.1 cgd
1092 1.1 cgd kvm_read(loc, buf, len)
1093 1.1 cgd void *loc;
1094 1.1 cgd void *buf;
1095 1.1 cgd {
1096 1.1 cgd if (kvmfilesopen == 0 && kvm_openfiles(NULL, NULL, NULL) == -1)
1097 1.1 cgd return (-1);
1098 1.1 cgd if (iskva(loc)) {
1099 1.1 cgd klseek(kmem, (off_t) loc, 0);
1100 1.1 cgd if (read(kmem, buf, len) != len) {
1101 1.1 cgd seterr("error reading kmem at %x", loc);
1102 1.1 cgd return (-1);
1103 1.1 cgd }
1104 1.1 cgd } else {
1105 1.1 cgd lseek(mem, (off_t) loc, 0);
1106 1.1 cgd if (read(mem, buf, len) != len) {
1107 1.1 cgd seterr("error reading mem at %x", loc);
1108 1.1 cgd return (-1);
1109 1.1 cgd }
1110 1.1 cgd }
1111 1.1 cgd return (len);
1112 1.1 cgd }
1113 1.1 cgd
1114 1.1 cgd static void
1115 1.1 cgd klseek(fd, loc, off)
1116 1.1 cgd int fd;
1117 1.1 cgd off_t loc;
1118 1.1 cgd int off;
1119 1.1 cgd {
1120 1.1 cgd
1121 1.1 cgd if (deadkernel) {
1122 1.1 cgd if ((loc = Vtophys(loc)) == -1)
1123 1.1 cgd return;
1124 1.1 cgd }
1125 1.1 cgd (void) lseek(fd, (off_t)loc, off);
1126 1.1 cgd }
1127 1.1 cgd
1128 1.1 cgd #ifndef NEWVM
1129 1.1 cgd /*
1130 1.1 cgd * Given a base/size pair in virtual swap area,
1131 1.1 cgd * return a physical base/size pair which is the
1132 1.1 cgd * (largest) initial, physically contiguous block.
1133 1.1 cgd */
1134 1.1 cgd static void
1135 1.1 cgd vstodb(vsbase, vssize, dmp, dbp, rev)
1136 1.1 cgd register int vsbase;
1137 1.1 cgd int vssize;
1138 1.1 cgd struct dmap *dmp;
1139 1.1 cgd register struct dblock *dbp;
1140 1.1 cgd {
1141 1.1 cgd register int blk = dmmin;
1142 1.1 cgd register swblk_t *ip = dmp->dm_map;
1143 1.1 cgd
1144 1.1 cgd vsbase = ctod(vsbase);
1145 1.1 cgd vssize = ctod(vssize);
1146 1.1 cgd if (vsbase < 0 || vsbase + vssize > dmp->dm_size)
1147 1.1 cgd /*panic("vstodb")*/;
1148 1.1 cgd while (vsbase >= blk) {
1149 1.1 cgd vsbase -= blk;
1150 1.1 cgd if (blk < dmmax)
1151 1.1 cgd blk *= 2;
1152 1.1 cgd ip++;
1153 1.1 cgd }
1154 1.1 cgd if (*ip <= 0 || *ip + blk > nswap)
1155 1.1 cgd /*panic("vstodb")*/;
1156 1.1 cgd dbp->db_size = MIN(vssize, blk - vsbase);
1157 1.1 cgd dbp->db_base = *ip + (rev ? blk - (vsbase + dbp->db_size) : vsbase);
1158 1.1 cgd }
1159 1.1 cgd #endif
1160 1.1 cgd
1161 1.1 cgd #ifdef NEWVM
1162 1.1 cgd static off_t
1163 1.1 cgd Vtophys(loc)
1164 1.1 cgd u_long loc;
1165 1.1 cgd {
1166 1.1 cgd off_t newloc = (off_t) -1;
1167 1.1 cgd #ifdef hp300
1168 1.1 cgd int p, ste, pte;
1169 1.1 cgd
1170 1.1 cgd ste = *(int *)&Sysseg[loc >> SG_ISHIFT];
1171 1.1 cgd if ((ste & SG_V) == 0) {
1172 1.1 cgd seterr("vtophys: segment not valid");
1173 1.1 cgd return((off_t) -1);
1174 1.1 cgd }
1175 1.1 cgd p = btop(loc & SG_PMASK);
1176 1.1 cgd newloc = (ste & SG_FRAME) + (p * sizeof(struct pte));
1177 1.1 cgd (void) lseek(kmem, (long)(newloc-(off_t)ptob(lowram)), 0);
1178 1.1 cgd if (read(kmem, (char *)&pte, sizeof pte) != sizeof pte) {
1179 1.1 cgd seterr("vtophys: cannot locate pte");
1180 1.1 cgd return((off_t) -1);
1181 1.1 cgd }
1182 1.1 cgd newloc = pte & PG_FRAME;
1183 1.1 cgd if (pte == PG_NV || newloc < (off_t)ptob(lowram)) {
1184 1.1 cgd seterr("vtophys: page not valid");
1185 1.1 cgd return((off_t) -1);
1186 1.1 cgd }
1187 1.1 cgd newloc = (newloc - (off_t)ptob(lowram)) + (loc & PGOFSET);
1188 1.1 cgd #endif
1189 1.1 cgd #ifdef i386
1190 1.1 cgd struct pde pde;
1191 1.1 cgd struct pte pte;
1192 1.1 cgd int p;
1193 1.1 cgd
1194 1.1 cgd pde = PTD[loc >> PD_SHIFT];
1195 1.1 cgd if (pde.pd_v == 0) {
1196 1.1 cgd seterr("vtophys: page directory entry not valid");
1197 1.1 cgd return((off_t) -1);
1198 1.1 cgd }
1199 1.1 cgd p = btop(loc & PT_MASK);
1200 1.1 cgd newloc = pde.pd_pfnum + (p * sizeof(struct pte));
1201 1.1 cgd (void) lseek(kmem, (long)newloc, 0);
1202 1.1 cgd if (read(kmem, (char *)&pte, sizeof pte) != sizeof pte) {
1203 1.1 cgd seterr("vtophys: cannot obtain desired pte");
1204 1.1 cgd return((off_t) -1);
1205 1.1 cgd }
1206 1.1 cgd newloc = pte.pg_pfnum;
1207 1.1 cgd if (pte.pg_v == 0) {
1208 1.1 cgd seterr("vtophys: page table entry not valid");
1209 1.1 cgd return((off_t) -1);
1210 1.1 cgd }
1211 1.1 cgd newloc += (loc & PGOFSET);
1212 1.1 cgd #endif
1213 1.1 cgd return((off_t) newloc);
1214 1.1 cgd }
1215 1.1 cgd #else
1216 1.1 cgd static off_t
1217 1.1 cgd vtophys(loc)
1218 1.1 cgd long loc;
1219 1.1 cgd {
1220 1.1 cgd int p;
1221 1.1 cgd off_t newloc;
1222 1.1 cgd register struct pte *pte;
1223 1.1 cgd
1224 1.1 cgd newloc = loc & ~KERNBASE;
1225 1.1 cgd p = btop(newloc);
1226 1.1 cgd #if defined(vax) || defined(tahoe)
1227 1.1 cgd if ((loc & KERNBASE) == 0) {
1228 1.1 cgd seterr("vtophys: translating non-kernel address");
1229 1.1 cgd return((off_t) -1);
1230 1.1 cgd }
1231 1.1 cgd #endif
1232 1.1 cgd if (p >= Syssize) {
1233 1.1 cgd seterr("vtophys: page out of bound (%d>=%d)", p, Syssize);
1234 1.1 cgd return((off_t) -1);
1235 1.1 cgd }
1236 1.1 cgd pte = &Sysmap[p];
1237 1.1 cgd if (pte->pg_v == 0 && (pte->pg_fod || pte->pg_pfnum == 0)) {
1238 1.1 cgd seterr("vtophys: page not valid");
1239 1.1 cgd return((off_t) -1);
1240 1.1 cgd }
1241 1.1 cgd #if defined(hp300)
1242 1.1 cgd if (pte->pg_pfnum < lowram) {
1243 1.1 cgd seterr("vtophys: non-RAM page (%d<%d)", pte->pg_pfnum, lowram);
1244 1.1 cgd return((off_t) -1);
1245 1.1 cgd }
1246 1.1 cgd #endif
1247 1.1 cgd loc = (long) (ptob(pftoc(pte->pg_pfnum)) + (loc & PGOFSET));
1248 1.1 cgd return(loc);
1249 1.1 cgd }
1250 1.1 cgd #endif
1251 1.1 cgd
1252 1.3 cgd
1253 1.3 cgd #ifdef NEWVM
1254 1.3 cgd /*
1255 1.3 cgd * locate address of unwired or swapped page
1256 1.3 cgd */
1257 1.3 cgd
1258 1.3 cgd #define DEBUG 0
1259 1.3 cgd
1260 1.3 cgd #define KREAD(off, addr, len) \
1261 1.3 cgd (kvm_read((void *)(off), (char *)(addr), (len)) == (len))
1262 1.3 cgd
1263 1.3 cgd
1264 1.3 cgd static int
1265 1.3 cgd vatosw(p, vaddr, maddr, swb)
1266 1.3 cgd struct proc *p ;
1267 1.3 cgd vm_offset_t vaddr;
1268 1.3 cgd vm_offset_t *maddr;
1269 1.3 cgd struct swapblk *swb;
1270 1.3 cgd {
1271 1.3 cgd register struct kinfo_proc *kp = (struct kinfo_proc *)p;
1272 1.3 cgd vm_map_t mp = &kp->kp_eproc.e_vm.vm_map;
1273 1.3 cgd struct vm_object vm_object;
1274 1.3 cgd struct vm_map_entry vm_entry;
1275 1.3 cgd struct pager_struct pager;
1276 1.3 cgd struct swpager swpager;
1277 1.3 cgd struct swblock swblock;
1278 1.3 cgd long addr, off;
1279 1.3 cgd int i;
1280 1.3 cgd
1281 1.3 cgd if (p->p_pid == 0 || p->p_pid == 2)
1282 1.3 cgd return 0;
1283 1.3 cgd
1284 1.3 cgd addr = (long)mp->header.next;
1285 1.3 cgd for (i = 0; i < mp->nentries; i++) {
1286 1.3 cgd /* Weed through map entries until vaddr in range */
1287 1.3 cgd if (!KREAD(addr, &vm_entry, sizeof(vm_entry))) {
1288 1.3 cgd setsyserr("vatosw: read vm_map_entry");
1289 1.3 cgd return 0;
1290 1.3 cgd }
1291 1.3 cgd if ((vaddr >= vm_entry.start) && (vaddr <= vm_entry.end) &&
1292 1.3 cgd (vm_entry.object.vm_object != 0))
1293 1.3 cgd break;
1294 1.3 cgd
1295 1.3 cgd addr = (long)vm_entry.next;
1296 1.3 cgd }
1297 1.3 cgd if (i == mp->nentries) {
1298 1.3 cgd seterr("%u: map not found\n", p->p_pid);
1299 1.3 cgd return 0;
1300 1.3 cgd }
1301 1.3 cgd
1302 1.3 cgd if (vm_entry.is_a_map || vm_entry.is_sub_map) {
1303 1.3 cgd seterr("%u: Is a map\n", p->p_pid);
1304 1.3 cgd return 0;
1305 1.3 cgd }
1306 1.3 cgd
1307 1.3 cgd /* Locate memory object */
1308 1.3 cgd off = (vaddr - vm_entry.start) + vm_entry.offset;
1309 1.3 cgd addr = (long)vm_entry.object.vm_object;
1310 1.3 cgd while (1) {
1311 1.3 cgd if (!KREAD(addr, &vm_object, sizeof vm_object)) {
1312 1.3 cgd setsyserr("vatosw: read vm_object");
1313 1.3 cgd return 0;
1314 1.3 cgd }
1315 1.3 cgd
1316 1.3 cgd #if DEBUG
1317 1.3 cgd fprintf(stderr, "%u: find page: object %#x offset %x\n",
1318 1.3 cgd p->p_pid, addr, off);
1319 1.3 cgd #endif
1320 1.3 cgd
1321 1.3 cgd /* Lookup in page queue */
1322 1.3 cgd if (findpage(addr, off, maddr))
1323 1.3 cgd return 1;
1324 1.3 cgd
1325 1.3 cgd if (vm_object.shadow == 0)
1326 1.3 cgd break;
1327 1.3 cgd
1328 1.3 cgd #if DEBUG
1329 1.3 cgd fprintf(stderr, "%u: shadow obj at %x: offset %x+%x\n",
1330 1.3 cgd p->p_pid, addr, off, vm_object.shadow_offset);
1331 1.3 cgd #endif
1332 1.3 cgd
1333 1.3 cgd addr = (long)vm_object.shadow;
1334 1.3 cgd off += vm_object.shadow_offset;
1335 1.3 cgd }
1336 1.3 cgd
1337 1.3 cgd if (!vm_object.pager) {
1338 1.3 cgd seterr("%u: no pager\n", p->p_pid);
1339 1.3 cgd return 0;
1340 1.3 cgd }
1341 1.3 cgd
1342 1.3 cgd /* Find address in swap space */
1343 1.3 cgd if (!KREAD(vm_object.pager, &pager, sizeof pager)) {
1344 1.3 cgd setsyserr("vatosw: read pager");
1345 1.3 cgd return 0;
1346 1.3 cgd }
1347 1.3 cgd if (pager.pg_type != PG_SWAP) {
1348 1.3 cgd seterr("%u: weird pager\n", p->p_pid);
1349 1.3 cgd return 0;
1350 1.3 cgd }
1351 1.3 cgd
1352 1.3 cgd /* Get swap pager data */
1353 1.3 cgd if (!KREAD(pager.pg_data, &swpager, sizeof swpager)) {
1354 1.3 cgd setsyserr("vatosw: read swpager");
1355 1.3 cgd return 0;
1356 1.3 cgd }
1357 1.3 cgd
1358 1.3 cgd off += vm_object.paging_offset;
1359 1.3 cgd
1360 1.3 cgd /* Read swap block array */
1361 1.3 cgd if (!KREAD((long)swpager.sw_blocks +
1362 1.3 cgd (off/dbtob(swpager.sw_bsize)) * sizeof swblock,
1363 1.3 cgd &swblock, sizeof swblock)) {
1364 1.3 cgd setsyserr("vatosw: read swblock");
1365 1.3 cgd return 0;
1366 1.3 cgd }
1367 1.3 cgd swb->offset = dbtob(swblock.swb_block)+ (off % dbtob(swpager.sw_bsize));
1368 1.3 cgd swb->size = dbtob(swpager.sw_bsize) - (off % dbtob(swpager.sw_bsize));
1369 1.3 cgd return 1;
1370 1.3 cgd }
1371 1.3 cgd
1372 1.3 cgd
1373 1.3 cgd #define atop(x) (((unsigned)(x)) >> page_shift)
1374 1.3 cgd #define vm_page_hash(object, offset) \
1375 1.3 cgd (((unsigned)object+(unsigned)atop(offset))&vm_page_hash_mask)
1376 1.3 cgd
1377 1.3 cgd static int
1378 1.3 cgd findpage(object, offset, maddr)
1379 1.3 cgd long object;
1380 1.3 cgd long offset;
1381 1.3 cgd vm_offset_t *maddr;
1382 1.3 cgd {
1383 1.3 cgd static long vm_page_hash_mask;
1384 1.3 cgd static long vm_page_buckets;
1385 1.3 cgd static long page_shift;
1386 1.3 cgd queue_head_t bucket;
1387 1.3 cgd struct vm_page mem;
1388 1.3 cgd long addr, baddr;
1389 1.3 cgd
1390 1.3 cgd if (vm_page_hash_mask == 0 && !KREAD(nl[X_VM_PAGE_HASH_MASK].n_value,
1391 1.3 cgd &vm_page_hash_mask, sizeof (long))) {
1392 1.3 cgd seterr("can't read vm_page_hash_mask");
1393 1.3 cgd return 0;
1394 1.3 cgd }
1395 1.3 cgd if (page_shift == 0 && !KREAD(nl[X_PAGE_SHIFT].n_value,
1396 1.3 cgd &page_shift, sizeof (long))) {
1397 1.3 cgd seterr("can't read page_shift");
1398 1.3 cgd return 0;
1399 1.3 cgd }
1400 1.3 cgd if (vm_page_buckets == 0 && !KREAD(nl[X_VM_PAGE_BUCKETS].n_value,
1401 1.3 cgd &vm_page_buckets, sizeof (long))) {
1402 1.3 cgd seterr("can't read vm_page_buckets");
1403 1.3 cgd return 0;
1404 1.3 cgd }
1405 1.3 cgd
1406 1.3 cgd baddr = vm_page_buckets + vm_page_hash(object,offset) * sizeof(queue_head_t);
1407 1.3 cgd if (!KREAD(baddr, &bucket, sizeof (bucket))) {
1408 1.3 cgd seterr("can't read vm_page_bucket");
1409 1.3 cgd return 0;
1410 1.3 cgd }
1411 1.3 cgd
1412 1.3 cgd addr = (long)bucket.next;
1413 1.3 cgd while (addr != baddr) {
1414 1.3 cgd if (!KREAD(addr, &mem, sizeof (mem))) {
1415 1.3 cgd seterr("can't read vm_page");
1416 1.3 cgd return 0;
1417 1.3 cgd }
1418 1.3 cgd if ((long)mem.object == object && mem.offset == offset) {
1419 1.3 cgd *maddr = (long)mem.phys_addr;
1420 1.3 cgd return 1;
1421 1.3 cgd }
1422 1.3 cgd addr = (long)mem.hashq.next;
1423 1.3 cgd }
1424 1.3 cgd return 0;
1425 1.3 cgd }
1426 1.3 cgd #endif /* NEWVM */
1427 1.3 cgd
1428 1.1 cgd #include <varargs.h>
1429 1.1 cgd static char errbuf[_POSIX2_LINE_MAX];
1430 1.1 cgd
1431 1.1 cgd static void
1432 1.1 cgd seterr(va_alist)
1433 1.1 cgd va_dcl
1434 1.1 cgd {
1435 1.1 cgd char *fmt;
1436 1.1 cgd va_list ap;
1437 1.1 cgd
1438 1.1 cgd va_start(ap);
1439 1.1 cgd fmt = va_arg(ap, char *);
1440 1.1 cgd (void) vsnprintf(errbuf, _POSIX2_LINE_MAX, fmt, ap);
1441 1.3 cgd #if DEBUG
1442 1.3 cgd (void) vfprintf(stderr, fmt, ap);
1443 1.3 cgd #endif
1444 1.1 cgd va_end(ap);
1445 1.1 cgd }
1446 1.1 cgd
1447 1.1 cgd static void
1448 1.1 cgd setsyserr(va_alist)
1449 1.1 cgd va_dcl
1450 1.1 cgd {
1451 1.1 cgd char *fmt, *cp;
1452 1.1 cgd va_list ap;
1453 1.1 cgd extern int errno;
1454 1.1 cgd
1455 1.1 cgd va_start(ap);
1456 1.1 cgd fmt = va_arg(ap, char *);
1457 1.1 cgd (void) vsnprintf(errbuf, _POSIX2_LINE_MAX, fmt, ap);
1458 1.1 cgd for (cp=errbuf; *cp; cp++)
1459 1.1 cgd ;
1460 1.1 cgd snprintf(cp, _POSIX2_LINE_MAX - (cp - errbuf), ": %s", strerror(errno));
1461 1.1 cgd va_end(ap);
1462 1.1 cgd }
1463 1.1 cgd
1464 1.1 cgd char *
1465 1.1 cgd kvm_geterr()
1466 1.1 cgd {
1467 1.1 cgd return (errbuf);
1468 1.1 cgd }
1469