procfs_mem.c revision 1.17 1 1.17 mrg /* $NetBSD: procfs_mem.c,v 1.17 1998/02/05 08:00:14 mrg Exp $ */
2 1.6 cgd
3 1.1 cgd /*
4 1.1 cgd * Copyright (c) 1993 Jan-Simon Pendry
5 1.4 mycroft * Copyright (c) 1993 Sean Eric Fagan
6 1.4 mycroft * Copyright (c) 1993
7 1.4 mycroft * The Regents of the University of California. All rights reserved.
8 1.1 cgd *
9 1.1 cgd * This code is derived from software contributed to Berkeley by
10 1.4 mycroft * Jan-Simon Pendry and Sean Eric Fagan.
11 1.1 cgd *
12 1.1 cgd * Redistribution and use in source and binary forms, with or without
13 1.1 cgd * modification, are permitted provided that the following conditions
14 1.1 cgd * are met:
15 1.1 cgd * 1. Redistributions of source code must retain the above copyright
16 1.1 cgd * notice, this list of conditions and the following disclaimer.
17 1.1 cgd * 2. Redistributions in binary form must reproduce the above copyright
18 1.1 cgd * notice, this list of conditions and the following disclaimer in the
19 1.1 cgd * documentation and/or other materials provided with the distribution.
20 1.1 cgd * 3. All advertising materials mentioning features or use of this software
21 1.1 cgd * must display the following acknowledgement:
22 1.1 cgd * This product includes software developed by the University of
23 1.1 cgd * California, Berkeley and its contributors.
24 1.1 cgd * 4. Neither the name of the University nor the names of its contributors
25 1.1 cgd * may be used to endorse or promote products derived from this software
26 1.1 cgd * without specific prior written permission.
27 1.1 cgd *
28 1.1 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 1.1 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 1.1 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 1.1 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 1.1 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 1.1 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 1.1 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 1.1 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 1.1 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 1.1 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 1.1 cgd * SUCH DAMAGE.
39 1.1 cgd *
40 1.6 cgd * @(#)procfs_mem.c 8.5 (Berkeley) 6/15/94
41 1.1 cgd */
42 1.1 cgd
43 1.1 cgd /*
44 1.1 cgd * This is a lightly hacked and merged version
45 1.1 cgd * of sef's pread/pwrite functions
46 1.1 cgd */
47 1.1 cgd
48 1.1 cgd #include <sys/param.h>
49 1.1 cgd #include <sys/systm.h>
50 1.1 cgd #include <sys/time.h>
51 1.1 cgd #include <sys/kernel.h>
52 1.1 cgd #include <sys/proc.h>
53 1.1 cgd #include <sys/vnode.h>
54 1.1 cgd #include <miscfs/procfs/procfs.h>
55 1.1 cgd #include <vm/vm.h>
56 1.1 cgd #include <vm/vm_kern.h>
57 1.1 cgd #include <vm/vm_page.h>
58 1.1 cgd
59 1.17 mrg #if defined(UVM)
60 1.17 mrg #include <uvm/uvm_extern.h>
61 1.17 mrg #endif
62 1.17 mrg
63 1.13 explorer #define ISSET(t, f) ((t) & (f))
64 1.13 explorer
65 1.17 mrg #if !defined(UVM)
66 1.8 christos static int procfs_rwmem __P((struct proc *, struct uio *));
67 1.8 christos
68 1.1 cgd static int
69 1.4 mycroft procfs_rwmem(p, uio)
70 1.1 cgd struct proc *p;
71 1.1 cgd struct uio *uio;
72 1.1 cgd {
73 1.1 cgd int error;
74 1.1 cgd int writing;
75 1.1 cgd
76 1.1 cgd writing = uio->uio_rw == UIO_WRITE;
77 1.1 cgd
78 1.1 cgd /*
79 1.1 cgd * Only map in one page at a time. We don't have to, but it
80 1.1 cgd * makes things easier. This way is trivial - right?
81 1.1 cgd */
82 1.1 cgd do {
83 1.1 cgd vm_map_t map, tmap;
84 1.1 cgd vm_object_t object;
85 1.1 cgd vm_offset_t kva;
86 1.1 cgd vm_offset_t uva;
87 1.1 cgd int page_offset; /* offset into page */
88 1.1 cgd vm_offset_t pageno; /* page number */
89 1.1 cgd vm_map_entry_t out_entry;
90 1.1 cgd vm_prot_t out_prot;
91 1.1 cgd vm_page_t m;
92 1.1 cgd boolean_t wired, single_use;
93 1.1 cgd vm_offset_t off;
94 1.1 cgd u_int len;
95 1.1 cgd int fix_prot;
96 1.1 cgd
97 1.1 cgd uva = (vm_offset_t) uio->uio_offset;
98 1.1 cgd if (uva > VM_MAXUSER_ADDRESS) {
99 1.1 cgd error = 0;
100 1.1 cgd break;
101 1.1 cgd }
102 1.1 cgd
103 1.1 cgd /*
104 1.1 cgd * Get the page number of this segment.
105 1.1 cgd */
106 1.1 cgd pageno = trunc_page(uva);
107 1.1 cgd page_offset = uva - pageno;
108 1.1 cgd
109 1.1 cgd /*
110 1.1 cgd * How many bytes to copy
111 1.1 cgd */
112 1.1 cgd len = min(PAGE_SIZE - page_offset, uio->uio_resid);
113 1.1 cgd
114 1.1 cgd /*
115 1.1 cgd * The map we want...
116 1.1 cgd */
117 1.1 cgd map = &p->p_vmspace->vm_map;
118 1.1 cgd
119 1.1 cgd /*
120 1.1 cgd * Check the permissions for the area we're interested
121 1.1 cgd * in.
122 1.1 cgd */
123 1.1 cgd fix_prot = 0;
124 1.1 cgd if (writing)
125 1.1 cgd fix_prot = !vm_map_check_protection(map, pageno,
126 1.1 cgd pageno + PAGE_SIZE, VM_PROT_WRITE);
127 1.1 cgd
128 1.1 cgd if (fix_prot) {
129 1.1 cgd /*
130 1.1 cgd * If the page is not writable, we make it so.
131 1.1 cgd * XXX It is possible that a page may *not* be
132 1.1 cgd * read/executable, if a process changes that!
133 1.1 cgd * We will assume, for now, that a page is either
134 1.1 cgd * VM_PROT_ALL, or VM_PROT_READ|VM_PROT_EXECUTE.
135 1.1 cgd */
136 1.1 cgd error = vm_map_protect(map, pageno,
137 1.1 cgd pageno + PAGE_SIZE, VM_PROT_ALL, 0);
138 1.1 cgd if (error)
139 1.1 cgd break;
140 1.1 cgd }
141 1.1 cgd
142 1.1 cgd /*
143 1.1 cgd * Now we need to get the page. out_entry, out_prot, wired,
144 1.1 cgd * and single_use aren't used. One would think the vm code
145 1.1 cgd * would be a *bit* nicer... We use tmap because
146 1.1 cgd * vm_map_lookup() can change the map argument.
147 1.1 cgd */
148 1.1 cgd tmap = map;
149 1.1 cgd error = vm_map_lookup(&tmap, pageno,
150 1.1 cgd writing ? VM_PROT_WRITE : VM_PROT_READ,
151 1.1 cgd &out_entry, &object, &off, &out_prot,
152 1.1 cgd &wired, &single_use);
153 1.1 cgd /*
154 1.1 cgd * We're done with tmap now.
155 1.1 cgd */
156 1.1 cgd if (!error)
157 1.1 cgd vm_map_lookup_done(tmap, out_entry);
158 1.1 cgd
159 1.1 cgd /*
160 1.1 cgd * Fault the page in...
161 1.1 cgd */
162 1.1 cgd if (!error && writing && object->shadow) {
163 1.1 cgd m = vm_page_lookup(object, off);
164 1.3 briggs if (m == 0 || (m->flags & PG_COPYONWRITE))
165 1.1 cgd error = vm_fault(map, pageno,
166 1.1 cgd VM_PROT_WRITE, FALSE);
167 1.1 cgd }
168 1.1 cgd
169 1.1 cgd /* Find space in kernel_map for the page we're interested in */
170 1.7 chopps if (!error) {
171 1.7 chopps kva = VM_MIN_KERNEL_ADDRESS;
172 1.1 cgd error = vm_map_find(kernel_map, object, off, &kva,
173 1.1 cgd PAGE_SIZE, 1);
174 1.7 chopps }
175 1.1 cgd
176 1.1 cgd if (!error) {
177 1.1 cgd /*
178 1.1 cgd * Neither vm_map_lookup() nor vm_map_find() appear
179 1.1 cgd * to add a reference count to the object, so we do
180 1.1 cgd * that here and now.
181 1.1 cgd */
182 1.1 cgd vm_object_reference(object);
183 1.1 cgd
184 1.1 cgd /*
185 1.1 cgd * Mark the page we just found as pageable.
186 1.1 cgd */
187 1.1 cgd error = vm_map_pageable(kernel_map, kva,
188 1.1 cgd kva + PAGE_SIZE, 0);
189 1.1 cgd
190 1.1 cgd /*
191 1.1 cgd * Now do the i/o move.
192 1.1 cgd */
193 1.1 cgd if (!error)
194 1.8 christos error = uiomove((caddr_t) (kva + page_offset),
195 1.8 christos len, uio);
196 1.1 cgd
197 1.1 cgd vm_map_remove(kernel_map, kva, kva + PAGE_SIZE);
198 1.1 cgd }
199 1.1 cgd if (fix_prot)
200 1.1 cgd vm_map_protect(map, pageno, pageno + PAGE_SIZE,
201 1.1 cgd VM_PROT_READ|VM_PROT_EXECUTE, 0);
202 1.1 cgd } while (error == 0 && uio->uio_resid > 0);
203 1.1 cgd
204 1.1 cgd return (error);
205 1.1 cgd }
206 1.17 mrg #endif
207 1.1 cgd
208 1.1 cgd /*
209 1.1 cgd * Copy data in and out of the target process.
210 1.1 cgd * We do this by mapping the process's page into
211 1.1 cgd * the kernel and then doing a uiomove direct
212 1.1 cgd * from the kernel address space.
213 1.1 cgd */
214 1.4 mycroft int
215 1.4 mycroft procfs_domem(curp, p, pfs, uio)
216 1.14 thorpej struct proc *curp; /* tracer */
217 1.14 thorpej struct proc *p; /* traced */
218 1.1 cgd struct pfsnode *pfs;
219 1.1 cgd struct uio *uio;
220 1.1 cgd {
221 1.9 mycroft int error;
222 1.1 cgd
223 1.1 cgd if (uio->uio_resid == 0)
224 1.1 cgd return (0);
225 1.12 thorpej
226 1.12 thorpej if ((error = procfs_checkioperm(curp, p)) != 0)
227 1.12 thorpej return (error);
228 1.1 cgd
229 1.17 mrg #if defined(UVM)
230 1.17 mrg /* XXXCDC: how should locking work here? */
231 1.17 mrg if ((p->p_flag & P_WEXIT) || (p->p_vmspace->vm_refcnt < 1))
232 1.17 mrg return(EFAULT);
233 1.17 mrg PHOLD(p);
234 1.17 mrg p->p_vmspace->vm_refcnt++; /* XXX */
235 1.17 mrg error = uvm_io(&p->p_vmspace->vm_map, uio);
236 1.17 mrg PRELE(p);
237 1.17 mrg uvmspace_free(p->p_vmspace);
238 1.17 mrg #else
239 1.9 mycroft PHOLD(p);
240 1.9 mycroft error = procfs_rwmem(p, uio);
241 1.9 mycroft PRELE(p);
242 1.17 mrg #endif
243 1.9 mycroft return (error);
244 1.1 cgd }
245 1.1 cgd
246 1.1 cgd /*
247 1.1 cgd * Given process (p), find the vnode from which
248 1.1 cgd * it's text segment is being executed.
249 1.1 cgd *
250 1.1 cgd * It would be nice to grab this information from
251 1.1 cgd * the VM system, however, there is no sure-fire
252 1.1 cgd * way of doing that. Instead, fork(), exec() and
253 1.1 cgd * wait() all maintain the p_textvp field in the
254 1.1 cgd * process proc structure which contains a held
255 1.1 cgd * reference to the exec'ed vnode.
256 1.1 cgd */
257 1.1 cgd struct vnode *
258 1.1 cgd procfs_findtextvp(p)
259 1.1 cgd struct proc *p;
260 1.1 cgd {
261 1.4 mycroft
262 1.1 cgd return (p->p_textvp);
263 1.1 cgd }
264 1.1 cgd
265 1.14 thorpej /*
266 1.14 thorpej * Ensure that a process has permission to perform I/O on another.
267 1.14 thorpej * Arguments:
268 1.14 thorpej * p The process wishing to do the I/O (the tracer).
269 1.14 thorpej * t The process who's memory/registers will be read/written.
270 1.14 thorpej */
271 1.13 explorer int
272 1.14 thorpej procfs_checkioperm(p, t)
273 1.14 thorpej struct proc *p, *t;
274 1.13 explorer {
275 1.13 explorer int error;
276 1.13 explorer
277 1.13 explorer /*
278 1.13 explorer * You cannot attach to a processes mem/regs if:
279 1.13 explorer *
280 1.13 explorer * (1) it's not owned by you, or is set-id on exec
281 1.13 explorer * (unless you're root), or...
282 1.13 explorer */
283 1.13 explorer if ((t->p_cred->p_ruid != p->p_cred->p_ruid ||
284 1.16 enami ISSET(t->p_flag, P_SUGID)) &&
285 1.13 explorer (error = suser(p->p_ucred, &p->p_acflag)) != 0)
286 1.13 explorer return (error);
287 1.13 explorer
288 1.13 explorer /*
289 1.13 explorer * (2) ...it's init, which controls the security level
290 1.13 explorer * of the entire system, and the system was not
291 1.13 explorer * compiled with permanetly insecure mode turned on.
292 1.13 explorer */
293 1.13 explorer if (t == initproc && securelevel > -1)
294 1.13 explorer return (EPERM);
295 1.13 explorer
296 1.13 explorer return (0);
297 1.13 explorer }
298 1.1 cgd
299 1.1 cgd #ifdef probably_never
300 1.1 cgd /*
301 1.1 cgd * Given process (p), find the vnode from which
302 1.1 cgd * it's text segment is being mapped.
303 1.1 cgd *
304 1.1 cgd * (This is here, rather than in procfs_subr in order
305 1.1 cgd * to keep all the VM related code in one place.)
306 1.1 cgd */
307 1.1 cgd struct vnode *
308 1.1 cgd procfs_findtextvp(p)
309 1.1 cgd struct proc *p;
310 1.1 cgd {
311 1.1 cgd int error;
312 1.1 cgd vm_object_t object;
313 1.1 cgd vm_offset_t pageno; /* page number */
314 1.1 cgd
315 1.1 cgd /* find a vnode pager for the user address space */
316 1.1 cgd
317 1.1 cgd for (pageno = VM_MIN_ADDRESS;
318 1.1 cgd pageno < VM_MAXUSER_ADDRESS;
319 1.1 cgd pageno += PAGE_SIZE) {
320 1.1 cgd vm_map_t map;
321 1.1 cgd vm_map_entry_t out_entry;
322 1.1 cgd vm_prot_t out_prot;
323 1.1 cgd boolean_t wired, single_use;
324 1.1 cgd vm_offset_t off;
325 1.1 cgd
326 1.1 cgd map = &p->p_vmspace->vm_map;
327 1.1 cgd error = vm_map_lookup(&map, pageno,
328 1.1 cgd VM_PROT_READ,
329 1.1 cgd &out_entry, &object, &off, &out_prot,
330 1.1 cgd &wired, &single_use);
331 1.1 cgd
332 1.1 cgd if (!error) {
333 1.1 cgd vm_pager_t pager;
334 1.1 cgd
335 1.11 christos printf("procfs: found vm object\n");
336 1.1 cgd vm_map_lookup_done(map, out_entry);
337 1.15 christos printf("procfs: vm object = %p\n", object);
338 1.1 cgd
339 1.1 cgd /*
340 1.1 cgd * At this point, assuming no errors, object
341 1.1 cgd * is the VM object mapping UVA (pageno).
342 1.1 cgd * Ensure it has a vnode pager, then grab
343 1.1 cgd * the vnode from that pager's handle.
344 1.1 cgd */
345 1.1 cgd
346 1.1 cgd pager = object->pager;
347 1.15 christos printf("procfs: pager = %p\n", pager);
348 1.1 cgd if (pager)
349 1.11 christos printf("procfs: found pager, type = %d\n",
350 1.10 christos pager->pg_type);
351 1.1 cgd if (pager && pager->pg_type == PG_VNODE) {
352 1.1 cgd struct vnode *vp;
353 1.1 cgd
354 1.1 cgd vp = (struct vnode *) pager->pg_handle;
355 1.15 christos printf("procfs: vp = %p\n", vp);
356 1.1 cgd return (vp);
357 1.1 cgd }
358 1.1 cgd }
359 1.1 cgd }
360 1.1 cgd
361 1.11 christos printf("procfs: text object not found\n");
362 1.1 cgd return (0);
363 1.1 cgd }
364 1.4 mycroft #endif /* probably_never */
365