procfs_mem.c revision 1.15 1 /* $NetBSD: procfs_mem.c,v 1.15 1997/09/10 13:44:26 christos Exp $ */
2
3 /*
4 * Copyright (c) 1993 Jan-Simon Pendry
5 * Copyright (c) 1993 Sean Eric Fagan
6 * Copyright (c) 1993
7 * The Regents of the University of California. All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * Jan-Simon Pendry and Sean Eric Fagan.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * @(#)procfs_mem.c 8.5 (Berkeley) 6/15/94
41 */
42
43 /*
44 * This is a lightly hacked and merged version
45 * of sef's pread/pwrite functions
46 */
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/time.h>
51 #include <sys/kernel.h>
52 #include <sys/proc.h>
53 #include <sys/vnode.h>
54 #include <miscfs/procfs/procfs.h>
55 #include <vm/vm.h>
56 #include <vm/vm_kern.h>
57 #include <vm/vm_page.h>
58
59 #define ISSET(t, f) ((t) & (f))
60
61 static int procfs_rwmem __P((struct proc *, struct uio *));
62
63 static int
64 procfs_rwmem(p, uio)
65 struct proc *p;
66 struct uio *uio;
67 {
68 int error;
69 int writing;
70
71 writing = uio->uio_rw == UIO_WRITE;
72
73 /*
74 * Only map in one page at a time. We don't have to, but it
75 * makes things easier. This way is trivial - right?
76 */
77 do {
78 vm_map_t map, tmap;
79 vm_object_t object;
80 vm_offset_t kva;
81 vm_offset_t uva;
82 int page_offset; /* offset into page */
83 vm_offset_t pageno; /* page number */
84 vm_map_entry_t out_entry;
85 vm_prot_t out_prot;
86 vm_page_t m;
87 boolean_t wired, single_use;
88 vm_offset_t off;
89 u_int len;
90 int fix_prot;
91
92 uva = (vm_offset_t) uio->uio_offset;
93 if (uva > VM_MAXUSER_ADDRESS) {
94 error = 0;
95 break;
96 }
97
98 /*
99 * Get the page number of this segment.
100 */
101 pageno = trunc_page(uva);
102 page_offset = uva - pageno;
103
104 /*
105 * How many bytes to copy
106 */
107 len = min(PAGE_SIZE - page_offset, uio->uio_resid);
108
109 /*
110 * The map we want...
111 */
112 map = &p->p_vmspace->vm_map;
113
114 /*
115 * Check the permissions for the area we're interested
116 * in.
117 */
118 fix_prot = 0;
119 if (writing)
120 fix_prot = !vm_map_check_protection(map, pageno,
121 pageno + PAGE_SIZE, VM_PROT_WRITE);
122
123 if (fix_prot) {
124 /*
125 * If the page is not writable, we make it so.
126 * XXX It is possible that a page may *not* be
127 * read/executable, if a process changes that!
128 * We will assume, for now, that a page is either
129 * VM_PROT_ALL, or VM_PROT_READ|VM_PROT_EXECUTE.
130 */
131 error = vm_map_protect(map, pageno,
132 pageno + PAGE_SIZE, VM_PROT_ALL, 0);
133 if (error)
134 break;
135 }
136
137 /*
138 * Now we need to get the page. out_entry, out_prot, wired,
139 * and single_use aren't used. One would think the vm code
140 * would be a *bit* nicer... We use tmap because
141 * vm_map_lookup() can change the map argument.
142 */
143 tmap = map;
144 error = vm_map_lookup(&tmap, pageno,
145 writing ? VM_PROT_WRITE : VM_PROT_READ,
146 &out_entry, &object, &off, &out_prot,
147 &wired, &single_use);
148 /*
149 * We're done with tmap now.
150 */
151 if (!error)
152 vm_map_lookup_done(tmap, out_entry);
153
154 /*
155 * Fault the page in...
156 */
157 if (!error && writing && object->shadow) {
158 m = vm_page_lookup(object, off);
159 if (m == 0 || (m->flags & PG_COPYONWRITE))
160 error = vm_fault(map, pageno,
161 VM_PROT_WRITE, FALSE);
162 }
163
164 /* Find space in kernel_map for the page we're interested in */
165 if (!error) {
166 kva = VM_MIN_KERNEL_ADDRESS;
167 error = vm_map_find(kernel_map, object, off, &kva,
168 PAGE_SIZE, 1);
169 }
170
171 if (!error) {
172 /*
173 * Neither vm_map_lookup() nor vm_map_find() appear
174 * to add a reference count to the object, so we do
175 * that here and now.
176 */
177 vm_object_reference(object);
178
179 /*
180 * Mark the page we just found as pageable.
181 */
182 error = vm_map_pageable(kernel_map, kva,
183 kva + PAGE_SIZE, 0);
184
185 /*
186 * Now do the i/o move.
187 */
188 if (!error)
189 error = uiomove((caddr_t) (kva + page_offset),
190 len, uio);
191
192 vm_map_remove(kernel_map, kva, kva + PAGE_SIZE);
193 }
194 if (fix_prot)
195 vm_map_protect(map, pageno, pageno + PAGE_SIZE,
196 VM_PROT_READ|VM_PROT_EXECUTE, 0);
197 } while (error == 0 && uio->uio_resid > 0);
198
199 return (error);
200 }
201
202 /*
203 * Copy data in and out of the target process.
204 * We do this by mapping the process's page into
205 * the kernel and then doing a uiomove direct
206 * from the kernel address space.
207 */
208 int
209 procfs_domem(curp, p, pfs, uio)
210 struct proc *curp; /* tracer */
211 struct proc *p; /* traced */
212 struct pfsnode *pfs;
213 struct uio *uio;
214 {
215 int error;
216
217 if (uio->uio_resid == 0)
218 return (0);
219
220 if ((error = procfs_checkioperm(curp, p)) != 0)
221 return (error);
222
223 PHOLD(p);
224 error = procfs_rwmem(p, uio);
225 PRELE(p);
226 return (error);
227 }
228
229 /*
230 * Given process (p), find the vnode from which
231 * it's text segment is being executed.
232 *
233 * It would be nice to grab this information from
234 * the VM system, however, there is no sure-fire
235 * way of doing that. Instead, fork(), exec() and
236 * wait() all maintain the p_textvp field in the
237 * process proc structure which contains a held
238 * reference to the exec'ed vnode.
239 */
240 struct vnode *
241 procfs_findtextvp(p)
242 struct proc *p;
243 {
244
245 return (p->p_textvp);
246 }
247
248 /*
249 * Ensure that a process has permission to perform I/O on another.
250 * Arguments:
251 * p The process wishing to do the I/O (the tracer).
252 * t The process who's memory/registers will be read/written.
253 */
254 int
255 procfs_checkioperm(p, t)
256 struct proc *p, *t;
257 {
258 int error;
259
260 /*
261 * You cannot attach to a processes mem/regs if:
262 *
263 * (1) it's not owned by you, or is set-id on exec
264 * (unless you're root), or...
265 */
266 if ((t->p_cred->p_ruid != p->p_cred->p_ruid ||
267 ISSET(t->p_flag, P_SUGID)) &&
268 (error = suser(p->p_ucred, &p->p_acflag)) != 0)
269 return (error);
270
271 /*
272 * (2) ...it's init, which controls the security level
273 * of the entire system, and the system was not
274 * compiled with permanetly insecure mode turned on.
275 */
276 if (t == initproc && securelevel > -1)
277 return (EPERM);
278
279 return (0);
280 }
281
282 #ifdef probably_never
283 /*
284 * Given process (p), find the vnode from which
285 * it's text segment is being mapped.
286 *
287 * (This is here, rather than in procfs_subr in order
288 * to keep all the VM related code in one place.)
289 */
290 struct vnode *
291 procfs_findtextvp(p)
292 struct proc *p;
293 {
294 int error;
295 vm_object_t object;
296 vm_offset_t pageno; /* page number */
297
298 /* find a vnode pager for the user address space */
299
300 for (pageno = VM_MIN_ADDRESS;
301 pageno < VM_MAXUSER_ADDRESS;
302 pageno += PAGE_SIZE) {
303 vm_map_t map;
304 vm_map_entry_t out_entry;
305 vm_prot_t out_prot;
306 boolean_t wired, single_use;
307 vm_offset_t off;
308
309 map = &p->p_vmspace->vm_map;
310 error = vm_map_lookup(&map, pageno,
311 VM_PROT_READ,
312 &out_entry, &object, &off, &out_prot,
313 &wired, &single_use);
314
315 if (!error) {
316 vm_pager_t pager;
317
318 printf("procfs: found vm object\n");
319 vm_map_lookup_done(map, out_entry);
320 printf("procfs: vm object = %p\n", object);
321
322 /*
323 * At this point, assuming no errors, object
324 * is the VM object mapping UVA (pageno).
325 * Ensure it has a vnode pager, then grab
326 * the vnode from that pager's handle.
327 */
328
329 pager = object->pager;
330 printf("procfs: pager = %p\n", pager);
331 if (pager)
332 printf("procfs: found pager, type = %d\n",
333 pager->pg_type);
334 if (pager && pager->pg_type == PG_VNODE) {
335 struct vnode *vp;
336
337 vp = (struct vnode *) pager->pg_handle;
338 printf("procfs: vp = %p\n", vp);
339 return (vp);
340 }
341 }
342 }
343
344 printf("procfs: text object not found\n");
345 return (0);
346 }
347 #endif /* probably_never */
348