exec_subr.c revision 1.15.2.1 1 /* $NetBSD: exec_subr.c,v 1.15.2.1 1998/11/09 06:06:31 chs Exp $ */
2
3 /*
4 * Copyright (c) 1993, 1994, 1996 Christopher G. Demetriou
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Christopher G. Demetriou.
18 * 4. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include "opt_uvm.h"
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/proc.h>
38 #include <sys/malloc.h>
39 #include <sys/vnode.h>
40 #include <sys/filedesc.h>
41 #include <sys/exec.h>
42 #include <sys/mman.h>
43
44 #include <vm/vm.h>
45
46 #if defined(UVM)
47 #include <uvm/uvm.h>
48 #endif
49
50 /*
51 * XXX cgd 960926: this module should collect simple statistics
52 * (calls, extends, kills).
53 */
54
55 #ifdef DEBUG
56 /*
57 * new_vmcmd():
58 * create a new vmcmd structure and fill in its fields based
59 * on function call arguments. make sure objects ref'd by
60 * the vmcmd are 'held'.
61 *
62 * If not debugging, this is a macro, so it's expanded inline.
63 */
64
65 void
66 new_vmcmd(evsp, proc, len, addr, vp, offset, prot)
67 struct exec_vmcmd_set *evsp;
68 int (*proc) __P((struct proc * p, struct exec_vmcmd *));
69 u_long len;
70 u_long addr;
71 struct vnode *vp;
72 u_long offset;
73 u_int prot;
74 {
75 struct exec_vmcmd *vcp;
76
77 if (evsp->evs_used >= evsp->evs_cnt)
78 vmcmdset_extend(evsp);
79 vcp = &evsp->evs_cmds[evsp->evs_used++];
80 vcp->ev_proc = proc;
81 vcp->ev_len = len;
82 vcp->ev_addr = addr;
83 if ((vcp->ev_vp = vp) != NULL)
84 vref(vp);
85 vcp->ev_offset = offset;
86 vcp->ev_prot = prot;
87 }
88 #endif /* DEBUG */
89
90 void
91 vmcmdset_extend(evsp)
92 struct exec_vmcmd_set *evsp;
93 {
94 struct exec_vmcmd *nvcp;
95 u_int ocnt;
96
97 #ifdef DIAGNOSTIC
98 if (evsp->evs_used < evsp->evs_cnt)
99 panic("vmcmdset_extend: not necessary");
100 #endif
101
102 /* figure out number of entries in new set */
103 ocnt = evsp->evs_cnt;
104 evsp->evs_cnt += ocnt ? ocnt : EXEC_DEFAULT_VMCMD_SETSIZE;
105
106 /* allocate it */
107 MALLOC(nvcp, struct exec_vmcmd *,
108 (evsp->evs_cnt * sizeof(struct exec_vmcmd)), M_EXEC, M_WAITOK);
109
110 /* free the old struct, if there was one, and record the new one */
111 if (ocnt) {
112 memcpy(nvcp, evsp->evs_cmds, (ocnt * sizeof(struct exec_vmcmd)));
113 FREE(evsp->evs_cmds, M_EXEC);
114 }
115 evsp->evs_cmds = nvcp;
116 }
117
118 void
119 kill_vmcmds(evsp)
120 struct exec_vmcmd_set *evsp;
121 {
122 struct exec_vmcmd *vcp;
123 int i;
124
125 if (evsp->evs_cnt == 0)
126 return;
127
128 for (i = 0; i < evsp->evs_used; i++) {
129 vcp = &evsp->evs_cmds[i];
130 if (vcp->ev_vp != NULLVP)
131 vrele(vcp->ev_vp);
132 }
133 evsp->evs_used = evsp->evs_cnt = 0;
134 FREE(evsp->evs_cmds, M_EXEC);
135 }
136
137 /*
138 * vmcmd_map_pagedvn():
139 * handle vmcmd which specifies that a vnode should be mmap'd.
140 * appropriate for handling demand-paged text and data segments.
141 */
142
143 int
144 vmcmd_map_pagedvn(p, cmd)
145 struct proc *p;
146 struct exec_vmcmd *cmd;
147 {
148 /*
149 * note that if you're going to map part of an process as being
150 * paged from a vnode, that vnode had damn well better be marked as
151 * VTEXT. that's handled in the routine which sets up the vmcmd to
152 * call this routine.
153 */
154 #if defined(UVM)
155 struct uvm_object *uobj;
156 int retval;
157
158 /*
159 * map the vnode in using uvm_map.
160 */
161
162 /* checks imported from uvm_mmap, needed? */
163 if (cmd->ev_len == 0)
164 return(0);
165 if (cmd->ev_offset & PAGE_MASK)
166 return(EINVAL);
167 if (cmd->ev_addr & PAGE_MASK)
168 return(EINVAL);
169
170 /*
171 * first, attach to the object
172 */
173
174 uobj = uvn_attach((void *) cmd->ev_vp, VM_PROT_READ|VM_PROT_EXECUTE);
175 if (uobj == NULL)
176 return(ENOMEM);
177 /* XXX for now, attaching doesn't gain a ref */
178 VREF(cmd->ev_vp);
179
180 /*
181 * do the map
182 */
183
184 retval = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr, cmd->ev_len,
185 uobj, cmd->ev_offset,
186 UVM_MAPFLAG(cmd->ev_prot, VM_PROT_ALL, UVM_INH_COPY,
187 UVM_ADV_NORMAL, UVM_FLAG_COPYONW|UVM_FLAG_FIXED));
188
189 /*
190 * check for error
191 */
192
193 if (retval == KERN_SUCCESS)
194 return(0);
195
196 /*
197 * error: detach from object
198 */
199
200 uobj->pgops->pgo_detach(uobj);
201 return(EINVAL);
202
203 #else
204 return vm_mmap(&p->p_vmspace->vm_map, &cmd->ev_addr, cmd->ev_len,
205 cmd->ev_prot, VM_PROT_ALL, MAP_FIXED|MAP_COPY, (caddr_t)cmd->ev_vp,
206 cmd->ev_offset);
207 #endif
208 }
209
210 /*
211 * vmcmd_map_readvn():
212 * handle vmcmd which specifies that a vnode should be read from.
213 * appropriate for non-demand-paged text/data segments, i.e. impure
214 * objects (a la OMAGIC and NMAGIC).
215 */
216 int
217 vmcmd_map_readvn(p, cmd)
218 struct proc *p;
219 struct exec_vmcmd *cmd;
220 {
221 int error;
222
223 #if defined(UVM)
224 if (cmd->ev_len == 0)
225 return(KERN_SUCCESS); /* XXXCDC: should it happen? */
226
227 cmd->ev_addr = trunc_page(cmd->ev_addr); /* required by uvm_map */
228 error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
229 round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET,
230 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_COPY,
231 UVM_ADV_NORMAL,
232 UVM_FLAG_FIXED|UVM_FLAG_OVERLAY|UVM_FLAG_COPYONW));
233
234 #else
235 error = vm_allocate(&p->p_vmspace->vm_map, &cmd->ev_addr,
236 cmd->ev_len, 0);
237 #endif
238 if (error)
239 return error;
240
241 error = vn_rdwr(UIO_READ, cmd->ev_vp, (caddr_t)cmd->ev_addr,
242 cmd->ev_len, cmd->ev_offset, UIO_USERSPACE, IO_UNIT,
243 p->p_ucred, NULL, p);
244 if (error)
245 return error;
246
247 #if defined(UVM)
248 if (cmd->ev_prot != (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)) {
249 /*
250 * we had to map in the area at PROT_ALL so that vn_rdwr()
251 * could write to it. however, the caller seems to want
252 * it mapped read-only, so now we are going to have to call
253 * uvm_map_protect() to fix up the protection. ICK.
254 */
255 return(uvm_map_protect(&p->p_vmspace->vm_map,
256 trunc_page(cmd->ev_addr),
257 round_page(cmd->ev_addr + cmd->ev_len),
258 cmd->ev_prot, FALSE));
259 } else {
260 return(KERN_SUCCESS);
261 }
262 #else
263 return vm_map_protect(&p->p_vmspace->vm_map, trunc_page(cmd->ev_addr),
264 round_page(cmd->ev_addr + cmd->ev_len), cmd->ev_prot, FALSE);
265 #endif
266 }
267
268 /*
269 * vmcmd_map_zero():
270 * handle vmcmd which specifies a zero-filled address space region. The
271 * address range must be first allocated, then protected appropriately.
272 */
273
274 int
275 vmcmd_map_zero(p, cmd)
276 struct proc *p;
277 struct exec_vmcmd *cmd;
278 {
279 int error;
280
281 #if defined(UVM)
282 if (cmd->ev_len == 0)
283 return(KERN_SUCCESS); /* XXXCDC: should it happen? */
284
285 cmd->ev_addr = trunc_page(cmd->ev_addr); /* required by uvm_map */
286 error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
287 round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET,
288 UVM_MAPFLAG(cmd->ev_prot, UVM_PROT_ALL, UVM_INH_COPY,
289 UVM_ADV_NORMAL,
290 UVM_FLAG_FIXED|UVM_FLAG_COPYONW));
291
292 #else
293 error = vm_allocate(&p->p_vmspace->vm_map, &cmd->ev_addr,
294 cmd->ev_len, 0);
295 #endif
296 if (error)
297 return error;
298
299 #if !defined(UVM)
300 if (cmd->ev_prot != VM_PROT_DEFAULT)
301 return vm_map_protect(&p->p_vmspace->vm_map,
302 trunc_page(cmd->ev_addr),
303 round_page(cmd->ev_addr + cmd->ev_len),
304 cmd->ev_prot, FALSE);
305 #endif
306 return(KERN_SUCCESS);
307 }
308