exec_subr.c revision 1.14 1 /* $NetBSD: exec_subr.c,v 1.14 1998/07/28 18:11:39 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1993, 1994, 1996 Christopher G. Demetriou
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Christopher G. Demetriou.
18 * 4. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include "opt_uvm.h"
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/proc.h>
38 #include <sys/malloc.h>
39 #include <sys/vnode.h>
40 #include <sys/filedesc.h>
41 #include <sys/exec.h>
42 #include <sys/mman.h>
43
44 #include <vm/vm.h>
45
46 #if defined(UVM)
47 #include <uvm/uvm.h>
48 #endif
49
50 /*
51 * XXX cgd 960926: this module should collect simple statistics
52 * (calls, extends, kills).
53 */
54
55 #ifdef DEBUG
56 /*
57 * new_vmcmd():
58 * create a new vmcmd structure and fill in its fields based
59 * on function call arguments. make sure objects ref'd by
60 * the vmcmd are 'held'.
61 *
62 * If not debugging, this is a macro, so it's expanded inline.
63 */
64
65 void
66 new_vmcmd(evsp, proc, len, addr, vp, offset, prot)
67 struct exec_vmcmd_set *evsp;
68 int (*proc) __P((struct proc * p, struct exec_vmcmd *));
69 u_long len;
70 u_long addr;
71 struct vnode *vp;
72 u_long offset;
73 u_int prot;
74 {
75 struct exec_vmcmd *vcp;
76
77 if (evsp->evs_used >= evsp->evs_cnt)
78 vmcmdset_extend(evsp);
79 vcp = &evsp->evs_cmds[evsp->evs_used++];
80 vcp->ev_proc = proc;
81 vcp->ev_len = len;
82 vcp->ev_addr = addr;
83 if ((vcp->ev_vp = vp) != NULL)
84 vref(vp);
85 vcp->ev_offset = offset;
86 vcp->ev_prot = prot;
87 }
88 #endif /* DEBUG */
89
90 void
91 vmcmdset_extend(evsp)
92 struct exec_vmcmd_set *evsp;
93 {
94 struct exec_vmcmd *nvcp;
95 u_int ocnt;
96
97 #ifdef DIAGNOSTIC
98 if (evsp->evs_used < evsp->evs_cnt)
99 panic("vmcmdset_extend: not necessary");
100 #endif
101
102 /* figure out number of entries in new set */
103 ocnt = evsp->evs_cnt;
104 evsp->evs_cnt += ocnt ? ocnt : EXEC_DEFAULT_VMCMD_SETSIZE;
105
106 /* allocate it */
107 MALLOC(nvcp, struct exec_vmcmd *,
108 (evsp->evs_cnt * sizeof(struct exec_vmcmd)), M_EXEC, M_WAITOK);
109
110 /* free the old struct, if there was one, and record the new one */
111 if (ocnt) {
112 bcopy(evsp->evs_cmds, nvcp, (ocnt * sizeof(struct exec_vmcmd)));
113 FREE(evsp->evs_cmds, M_EXEC);
114 }
115 evsp->evs_cmds = nvcp;
116 }
117
118 void
119 kill_vmcmds(evsp)
120 struct exec_vmcmd_set *evsp;
121 {
122 struct exec_vmcmd *vcp;
123 int i;
124
125 if (evsp->evs_cnt == 0)
126 return;
127
128 for (i = 0; i < evsp->evs_used; i++) {
129 vcp = &evsp->evs_cmds[i];
130 if (vcp->ev_vp != NULLVP)
131 vrele(vcp->ev_vp);
132 }
133 evsp->evs_used = evsp->evs_cnt = 0;
134 FREE(evsp->evs_cmds, M_EXEC);
135 }
136
137 /*
138 * vmcmd_map_pagedvn():
139 * handle vmcmd which specifies that a vnode should be mmap'd.
140 * appropriate for handling demand-paged text and data segments.
141 */
142
143 int
144 vmcmd_map_pagedvn(p, cmd)
145 struct proc *p;
146 struct exec_vmcmd *cmd;
147 {
148 /*
149 * note that if you're going to map part of an process as being
150 * paged from a vnode, that vnode had damn well better be marked as
151 * VTEXT. that's handled in the routine which sets up the vmcmd to
152 * call this routine.
153 */
154 #if defined(UVM)
155 struct uvm_object *uobj;
156 int retval;
157
158 /*
159 * map the vnode in using uvm_map.
160 */
161
162 /* checks imported from uvm_mmap, needed? */
163 if (cmd->ev_len == 0)
164 return(0);
165 if (cmd->ev_offset & PAGE_MASK)
166 return(EINVAL);
167 if (cmd->ev_addr & PAGE_MASK)
168 return(EINVAL);
169
170 /*
171 * first, attach to the object
172 */
173
174 uobj = uvn_attach((void *) cmd->ev_vp, VM_PROT_READ|VM_PROT_EXECUTE);
175 if (uobj == NULL)
176 return(ENOMEM);
177
178 /*
179 * do the map
180 */
181
182 retval = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr, cmd->ev_len,
183 uobj, cmd->ev_offset,
184 UVM_MAPFLAG(cmd->ev_prot, VM_PROT_ALL, UVM_INH_COPY,
185 UVM_ADV_NORMAL, UVM_FLAG_COPYONW|UVM_FLAG_FIXED));
186
187 /*
188 * check for error
189 */
190
191 if (retval == KERN_SUCCESS)
192 return(0);
193
194 /*
195 * error: detach from object
196 */
197
198 uobj->pgops->pgo_detach(uobj);
199 return(EINVAL);
200
201 #else
202 return vm_mmap(&p->p_vmspace->vm_map, &cmd->ev_addr, cmd->ev_len,
203 cmd->ev_prot, VM_PROT_ALL, MAP_FIXED|MAP_COPY, (caddr_t)cmd->ev_vp,
204 cmd->ev_offset);
205 #endif
206 }
207
208 /*
209 * vmcmd_map_readvn():
210 * handle vmcmd which specifies that a vnode should be read from.
211 * appropriate for non-demand-paged text/data segments, i.e. impure
212 * objects (a la OMAGIC and NMAGIC).
213 */
214 int
215 vmcmd_map_readvn(p, cmd)
216 struct proc *p;
217 struct exec_vmcmd *cmd;
218 {
219 int error;
220
221 #if defined(UVM)
222 if (cmd->ev_len == 0)
223 return(KERN_SUCCESS); /* XXXCDC: should it happen? */
224
225 cmd->ev_addr = trunc_page(cmd->ev_addr); /* required by uvm_map */
226 error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
227 round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET,
228 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_COPY,
229 UVM_ADV_NORMAL,
230 UVM_FLAG_FIXED|UVM_FLAG_OVERLAY|UVM_FLAG_COPYONW));
231
232 #else
233 error = vm_allocate(&p->p_vmspace->vm_map, &cmd->ev_addr,
234 cmd->ev_len, 0);
235 #endif
236 if (error)
237 return error;
238
239 error = vn_rdwr(UIO_READ, cmd->ev_vp, (caddr_t)cmd->ev_addr,
240 cmd->ev_len, cmd->ev_offset, UIO_USERSPACE, IO_UNIT,
241 p->p_ucred, NULL, p);
242 if (error)
243 return error;
244
245 #if defined(UVM)
246 if (cmd->ev_prot != (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)) {
247 /*
248 * we had to map in the area at PROT_ALL so that vn_rdwr()
249 * could write to it. however, the caller seems to want
250 * it mapped read-only, so now we are going to have to call
251 * uvm_map_protect() to fix up the protection. ICK.
252 */
253 return(uvm_map_protect(&p->p_vmspace->vm_map,
254 trunc_page(cmd->ev_addr),
255 round_page(cmd->ev_addr + cmd->ev_len),
256 cmd->ev_prot, FALSE));
257 } else {
258 return(KERN_SUCCESS);
259 }
260 #else
261 return vm_map_protect(&p->p_vmspace->vm_map, trunc_page(cmd->ev_addr),
262 round_page(cmd->ev_addr + cmd->ev_len), cmd->ev_prot, FALSE);
263 #endif
264 }
265
266 /*
267 * vmcmd_map_zero():
268 * handle vmcmd which specifies a zero-filled address space region. The
269 * address range must be first allocated, then protected appropriately.
270 */
271
272 int
273 vmcmd_map_zero(p, cmd)
274 struct proc *p;
275 struct exec_vmcmd *cmd;
276 {
277 int error;
278
279 #if defined(UVM)
280 if (cmd->ev_len == 0)
281 return(KERN_SUCCESS); /* XXXCDC: should it happen? */
282
283 cmd->ev_addr = trunc_page(cmd->ev_addr); /* required by uvm_map */
284 error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
285 round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET,
286 UVM_MAPFLAG(cmd->ev_prot, UVM_PROT_ALL, UVM_INH_COPY,
287 UVM_ADV_NORMAL,
288 UVM_FLAG_FIXED|UVM_FLAG_COPYONW));
289
290 #else
291 error = vm_allocate(&p->p_vmspace->vm_map, &cmd->ev_addr,
292 cmd->ev_len, 0);
293 #endif
294 if (error)
295 return error;
296
297 #if !defined(UVM)
298 if (cmd->ev_prot != VM_PROT_DEFAULT)
299 return vm_map_protect(&p->p_vmspace->vm_map,
300 trunc_page(cmd->ev_addr),
301 round_page(cmd->ev_addr + cmd->ev_len),
302 cmd->ev_prot, FALSE);
303 #endif
304 return(KERN_SUCCESS);
305 }
306