procfs_mem.c revision 1.17 1 /* $NetBSD: procfs_mem.c,v 1.17 1998/02/05 08:00:14 mrg Exp $ */
2
3 /*
4 * Copyright (c) 1993 Jan-Simon Pendry
5 * Copyright (c) 1993 Sean Eric Fagan
6 * Copyright (c) 1993
7 * The Regents of the University of California. All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * Jan-Simon Pendry and Sean Eric Fagan.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * @(#)procfs_mem.c 8.5 (Berkeley) 6/15/94
41 */
42
43 /*
44 * This is a lightly hacked and merged version
45 * of sef's pread/pwrite functions
46 */
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/time.h>
51 #include <sys/kernel.h>
52 #include <sys/proc.h>
53 #include <sys/vnode.h>
54 #include <miscfs/procfs/procfs.h>
55 #include <vm/vm.h>
56 #include <vm/vm_kern.h>
57 #include <vm/vm_page.h>
58
59 #if defined(UVM)
60 #include <uvm/uvm_extern.h>
61 #endif
62
63 #define ISSET(t, f) ((t) & (f))
64
65 #if !defined(UVM)
66 static int procfs_rwmem __P((struct proc *, struct uio *));
67
68 static int
69 procfs_rwmem(p, uio)
70 struct proc *p;
71 struct uio *uio;
72 {
73 int error;
74 int writing;
75
76 writing = uio->uio_rw == UIO_WRITE;
77
78 /*
79 * Only map in one page at a time. We don't have to, but it
80 * makes things easier. This way is trivial - right?
81 */
82 do {
83 vm_map_t map, tmap;
84 vm_object_t object;
85 vm_offset_t kva;
86 vm_offset_t uva;
87 int page_offset; /* offset into page */
88 vm_offset_t pageno; /* page number */
89 vm_map_entry_t out_entry;
90 vm_prot_t out_prot;
91 vm_page_t m;
92 boolean_t wired, single_use;
93 vm_offset_t off;
94 u_int len;
95 int fix_prot;
96
97 uva = (vm_offset_t) uio->uio_offset;
98 if (uva > VM_MAXUSER_ADDRESS) {
99 error = 0;
100 break;
101 }
102
103 /*
104 * Get the page number of this segment.
105 */
106 pageno = trunc_page(uva);
107 page_offset = uva - pageno;
108
109 /*
110 * How many bytes to copy
111 */
112 len = min(PAGE_SIZE - page_offset, uio->uio_resid);
113
114 /*
115 * The map we want...
116 */
117 map = &p->p_vmspace->vm_map;
118
119 /*
120 * Check the permissions for the area we're interested
121 * in.
122 */
123 fix_prot = 0;
124 if (writing)
125 fix_prot = !vm_map_check_protection(map, pageno,
126 pageno + PAGE_SIZE, VM_PROT_WRITE);
127
128 if (fix_prot) {
129 /*
130 * If the page is not writable, we make it so.
131 * XXX It is possible that a page may *not* be
132 * read/executable, if a process changes that!
133 * We will assume, for now, that a page is either
134 * VM_PROT_ALL, or VM_PROT_READ|VM_PROT_EXECUTE.
135 */
136 error = vm_map_protect(map, pageno,
137 pageno + PAGE_SIZE, VM_PROT_ALL, 0);
138 if (error)
139 break;
140 }
141
142 /*
143 * Now we need to get the page. out_entry, out_prot, wired,
144 * and single_use aren't used. One would think the vm code
145 * would be a *bit* nicer... We use tmap because
146 * vm_map_lookup() can change the map argument.
147 */
148 tmap = map;
149 error = vm_map_lookup(&tmap, pageno,
150 writing ? VM_PROT_WRITE : VM_PROT_READ,
151 &out_entry, &object, &off, &out_prot,
152 &wired, &single_use);
153 /*
154 * We're done with tmap now.
155 */
156 if (!error)
157 vm_map_lookup_done(tmap, out_entry);
158
159 /*
160 * Fault the page in...
161 */
162 if (!error && writing && object->shadow) {
163 m = vm_page_lookup(object, off);
164 if (m == 0 || (m->flags & PG_COPYONWRITE))
165 error = vm_fault(map, pageno,
166 VM_PROT_WRITE, FALSE);
167 }
168
169 /* Find space in kernel_map for the page we're interested in */
170 if (!error) {
171 kva = VM_MIN_KERNEL_ADDRESS;
172 error = vm_map_find(kernel_map, object, off, &kva,
173 PAGE_SIZE, 1);
174 }
175
176 if (!error) {
177 /*
178 * Neither vm_map_lookup() nor vm_map_find() appear
179 * to add a reference count to the object, so we do
180 * that here and now.
181 */
182 vm_object_reference(object);
183
184 /*
185 * Mark the page we just found as pageable.
186 */
187 error = vm_map_pageable(kernel_map, kva,
188 kva + PAGE_SIZE, 0);
189
190 /*
191 * Now do the i/o move.
192 */
193 if (!error)
194 error = uiomove((caddr_t) (kva + page_offset),
195 len, uio);
196
197 vm_map_remove(kernel_map, kva, kva + PAGE_SIZE);
198 }
199 if (fix_prot)
200 vm_map_protect(map, pageno, pageno + PAGE_SIZE,
201 VM_PROT_READ|VM_PROT_EXECUTE, 0);
202 } while (error == 0 && uio->uio_resid > 0);
203
204 return (error);
205 }
206 #endif
207
208 /*
209 * Copy data in and out of the target process.
210 * We do this by mapping the process's page into
211 * the kernel and then doing a uiomove direct
212 * from the kernel address space.
213 */
214 int
215 procfs_domem(curp, p, pfs, uio)
216 struct proc *curp; /* tracer */
217 struct proc *p; /* traced */
218 struct pfsnode *pfs;
219 struct uio *uio;
220 {
221 int error;
222
223 if (uio->uio_resid == 0)
224 return (0);
225
226 if ((error = procfs_checkioperm(curp, p)) != 0)
227 return (error);
228
229 #if defined(UVM)
230 /* XXXCDC: how should locking work here? */
231 if ((p->p_flag & P_WEXIT) || (p->p_vmspace->vm_refcnt < 1))
232 return(EFAULT);
233 PHOLD(p);
234 p->p_vmspace->vm_refcnt++; /* XXX */
235 error = uvm_io(&p->p_vmspace->vm_map, uio);
236 PRELE(p);
237 uvmspace_free(p->p_vmspace);
238 #else
239 PHOLD(p);
240 error = procfs_rwmem(p, uio);
241 PRELE(p);
242 #endif
243 return (error);
244 }
245
246 /*
247 * Given process (p), find the vnode from which
248 * it's text segment is being executed.
249 *
250 * It would be nice to grab this information from
251 * the VM system, however, there is no sure-fire
252 * way of doing that. Instead, fork(), exec() and
253 * wait() all maintain the p_textvp field in the
254 * process proc structure which contains a held
255 * reference to the exec'ed vnode.
256 */
257 struct vnode *
258 procfs_findtextvp(p)
259 struct proc *p;
260 {
261
262 return (p->p_textvp);
263 }
264
265 /*
266 * Ensure that a process has permission to perform I/O on another.
267 * Arguments:
268 * p The process wishing to do the I/O (the tracer).
269 * t The process who's memory/registers will be read/written.
270 */
271 int
272 procfs_checkioperm(p, t)
273 struct proc *p, *t;
274 {
275 int error;
276
277 /*
278 * You cannot attach to a processes mem/regs if:
279 *
280 * (1) it's not owned by you, or is set-id on exec
281 * (unless you're root), or...
282 */
283 if ((t->p_cred->p_ruid != p->p_cred->p_ruid ||
284 ISSET(t->p_flag, P_SUGID)) &&
285 (error = suser(p->p_ucred, &p->p_acflag)) != 0)
286 return (error);
287
288 /*
289 * (2) ...it's init, which controls the security level
290 * of the entire system, and the system was not
291 * compiled with permanetly insecure mode turned on.
292 */
293 if (t == initproc && securelevel > -1)
294 return (EPERM);
295
296 return (0);
297 }
298
299 #ifdef probably_never
300 /*
301 * Given process (p), find the vnode from which
302 * it's text segment is being mapped.
303 *
304 * (This is here, rather than in procfs_subr in order
305 * to keep all the VM related code in one place.)
306 */
307 struct vnode *
308 procfs_findtextvp(p)
309 struct proc *p;
310 {
311 int error;
312 vm_object_t object;
313 vm_offset_t pageno; /* page number */
314
315 /* find a vnode pager for the user address space */
316
317 for (pageno = VM_MIN_ADDRESS;
318 pageno < VM_MAXUSER_ADDRESS;
319 pageno += PAGE_SIZE) {
320 vm_map_t map;
321 vm_map_entry_t out_entry;
322 vm_prot_t out_prot;
323 boolean_t wired, single_use;
324 vm_offset_t off;
325
326 map = &p->p_vmspace->vm_map;
327 error = vm_map_lookup(&map, pageno,
328 VM_PROT_READ,
329 &out_entry, &object, &off, &out_prot,
330 &wired, &single_use);
331
332 if (!error) {
333 vm_pager_t pager;
334
335 printf("procfs: found vm object\n");
336 vm_map_lookup_done(map, out_entry);
337 printf("procfs: vm object = %p\n", object);
338
339 /*
340 * At this point, assuming no errors, object
341 * is the VM object mapping UVA (pageno).
342 * Ensure it has a vnode pager, then grab
343 * the vnode from that pager's handle.
344 */
345
346 pager = object->pager;
347 printf("procfs: pager = %p\n", pager);
348 if (pager)
349 printf("procfs: found pager, type = %d\n",
350 pager->pg_type);
351 if (pager && pager->pg_type == PG_VNODE) {
352 struct vnode *vp;
353
354 vp = (struct vnode *) pager->pg_handle;
355 printf("procfs: vp = %p\n", vp);
356 return (vp);
357 }
358 }
359 }
360
361 printf("procfs: text object not found\n");
362 return (0);
363 }
364 #endif /* probably_never */
365