exec_subr.c revision 1.51 1 1.51 thorpej /* $NetBSD: exec_subr.c,v 1.51 2007/02/22 06:34:42 thorpej Exp $ */
2 1.8 cgd
3 1.1 cgd /*
4 1.10 cgd * Copyright (c) 1993, 1994, 1996 Christopher G. Demetriou
5 1.1 cgd * All rights reserved.
6 1.1 cgd *
7 1.1 cgd * Redistribution and use in source and binary forms, with or without
8 1.1 cgd * modification, are permitted provided that the following conditions
9 1.1 cgd * are met:
10 1.1 cgd * 1. Redistributions of source code must retain the above copyright
11 1.1 cgd * notice, this list of conditions and the following disclaimer.
12 1.1 cgd * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 cgd * notice, this list of conditions and the following disclaimer in the
14 1.1 cgd * documentation and/or other materials provided with the distribution.
15 1.1 cgd * 3. All advertising materials mentioning features or use of this software
16 1.1 cgd * must display the following acknowledgement:
17 1.1 cgd * This product includes software developed by Christopher G. Demetriou.
18 1.1 cgd * 4. The name of the author may not be used to endorse or promote products
19 1.5 jtc * derived from this software without specific prior written permission
20 1.1 cgd *
21 1.1 cgd * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 1.1 cgd * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 1.1 cgd * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 1.1 cgd * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 1.1 cgd * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 1.1 cgd * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 1.1 cgd * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 1.1 cgd * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 1.1 cgd * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 1.1 cgd * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 1.1 cgd */
32 1.29 lukem
33 1.29 lukem #include <sys/cdefs.h>
34 1.51 thorpej __KERNEL_RCSID(0, "$NetBSD: exec_subr.c,v 1.51 2007/02/22 06:34:42 thorpej Exp $");
35 1.48 elad
36 1.48 elad #include "opt_pax.h"
37 1.12 mrg
38 1.1 cgd #include <sys/param.h>
39 1.1 cgd #include <sys/systm.h>
40 1.1 cgd #include <sys/proc.h>
41 1.1 cgd #include <sys/malloc.h>
42 1.1 cgd #include <sys/vnode.h>
43 1.4 cgd #include <sys/filedesc.h>
44 1.1 cgd #include <sys/exec.h>
45 1.1 cgd #include <sys/mman.h>
46 1.38 christos #include <sys/resourcevar.h>
47 1.45 thorpej #include <sys/device.h>
48 1.1 cgd
49 1.48 elad #ifdef PAX_MPROTECT
50 1.48 elad #include <sys/pax.h>
51 1.48 elad #endif /* PAX_MPROTECT */
52 1.48 elad
53 1.11 mrg #include <uvm/uvm.h>
54 1.11 mrg
55 1.45 thorpej #define VMCMD_EVCNT_DECL(name) \
56 1.45 thorpej static struct evcnt vmcmd_ev_##name = \
57 1.45 thorpej EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "vmcmd", #name); \
58 1.45 thorpej EVCNT_ATTACH_STATIC(vmcmd_ev_##name)
59 1.45 thorpej
60 1.45 thorpej #define VMCMD_EVCNT_INCR(name) \
61 1.45 thorpej vmcmd_ev_##name.ev_count++
62 1.45 thorpej
63 1.45 thorpej VMCMD_EVCNT_DECL(calls);
64 1.45 thorpej VMCMD_EVCNT_DECL(extends);
65 1.45 thorpej VMCMD_EVCNT_DECL(kills);
66 1.10 cgd
67 1.1 cgd /*
68 1.1 cgd * new_vmcmd():
69 1.1 cgd * create a new vmcmd structure and fill in its fields based
70 1.1 cgd * on function call arguments. make sure objects ref'd by
71 1.1 cgd * the vmcmd are 'held'.
72 1.1 cgd */
73 1.1 cgd
74 1.1 cgd void
75 1.22 thorpej new_vmcmd(struct exec_vmcmd_set *evsp,
76 1.46 christos int (*proc)(struct lwp * l, struct exec_vmcmd *),
77 1.22 thorpej u_long len, u_long addr, struct vnode *vp, u_long offset,
78 1.22 thorpej u_int prot, int flags)
79 1.1 cgd {
80 1.1 cgd struct exec_vmcmd *vcp;
81 1.1 cgd
82 1.45 thorpej VMCMD_EVCNT_INCR(calls);
83 1.45 thorpej
84 1.1 cgd if (evsp->evs_used >= evsp->evs_cnt)
85 1.1 cgd vmcmdset_extend(evsp);
86 1.1 cgd vcp = &evsp->evs_cmds[evsp->evs_used++];
87 1.1 cgd vcp->ev_proc = proc;
88 1.1 cgd vcp->ev_len = len;
89 1.1 cgd vcp->ev_addr = addr;
90 1.1 cgd if ((vcp->ev_vp = vp) != NULL)
91 1.1 cgd vref(vp);
92 1.1 cgd vcp->ev_offset = offset;
93 1.1 cgd vcp->ev_prot = prot;
94 1.25 tv vcp->ev_flags = flags;
95 1.1 cgd }
96 1.1 cgd
97 1.1 cgd void
98 1.22 thorpej vmcmdset_extend(struct exec_vmcmd_set *evsp)
99 1.1 cgd {
100 1.1 cgd struct exec_vmcmd *nvcp;
101 1.1 cgd u_int ocnt;
102 1.1 cgd
103 1.1 cgd #ifdef DIAGNOSTIC
104 1.1 cgd if (evsp->evs_used < evsp->evs_cnt)
105 1.1 cgd panic("vmcmdset_extend: not necessary");
106 1.1 cgd #endif
107 1.1 cgd
108 1.1 cgd /* figure out number of entries in new set */
109 1.45 thorpej if ((ocnt = evsp->evs_cnt) != 0) {
110 1.45 thorpej evsp->evs_cnt += ocnt;
111 1.45 thorpej VMCMD_EVCNT_INCR(extends);
112 1.45 thorpej } else
113 1.45 thorpej evsp->evs_cnt = EXEC_DEFAULT_VMCMD_SETSIZE;
114 1.1 cgd
115 1.1 cgd /* allocate it */
116 1.23 thorpej nvcp = malloc(evsp->evs_cnt * sizeof(struct exec_vmcmd),
117 1.23 thorpej M_EXEC, M_WAITOK);
118 1.1 cgd
119 1.1 cgd /* free the old struct, if there was one, and record the new one */
120 1.1 cgd if (ocnt) {
121 1.23 thorpej memcpy(nvcp, evsp->evs_cmds,
122 1.23 thorpej (ocnt * sizeof(struct exec_vmcmd)));
123 1.23 thorpej free(evsp->evs_cmds, M_EXEC);
124 1.1 cgd }
125 1.1 cgd evsp->evs_cmds = nvcp;
126 1.1 cgd }
127 1.1 cgd
128 1.1 cgd void
129 1.22 thorpej kill_vmcmds(struct exec_vmcmd_set *evsp)
130 1.1 cgd {
131 1.1 cgd struct exec_vmcmd *vcp;
132 1.30 thorpej u_int i;
133 1.1 cgd
134 1.45 thorpej VMCMD_EVCNT_INCR(kills);
135 1.45 thorpej
136 1.1 cgd if (evsp->evs_cnt == 0)
137 1.1 cgd return;
138 1.1 cgd
139 1.1 cgd for (i = 0; i < evsp->evs_used; i++) {
140 1.1 cgd vcp = &evsp->evs_cmds[i];
141 1.40 chs if (vcp->ev_vp != NULL)
142 1.1 cgd vrele(vcp->ev_vp);
143 1.1 cgd }
144 1.1 cgd evsp->evs_used = evsp->evs_cnt = 0;
145 1.23 thorpej free(evsp->evs_cmds, M_EXEC);
146 1.1 cgd }
147 1.1 cgd
148 1.1 cgd /*
149 1.1 cgd * vmcmd_map_pagedvn():
150 1.1 cgd * handle vmcmd which specifies that a vnode should be mmap'd.
151 1.1 cgd * appropriate for handling demand-paged text and data segments.
152 1.1 cgd */
153 1.1 cgd
154 1.1 cgd int
155 1.46 christos vmcmd_map_pagedvn(struct lwp *l, struct exec_vmcmd *cmd)
156 1.1 cgd {
157 1.27 chs struct uvm_object *uobj;
158 1.50 chs struct vnode *vp = cmd->ev_vp;
159 1.46 christos struct proc *p = l->l_proc;
160 1.27 chs int error;
161 1.48 elad vm_prot_t prot, maxprot;
162 1.27 chs
163 1.50 chs KASSERT(vp->v_flag & VTEXT);
164 1.11 mrg
165 1.11 mrg /*
166 1.11 mrg * map the vnode in using uvm_map.
167 1.11 mrg */
168 1.11 mrg
169 1.11 mrg if (cmd->ev_len == 0)
170 1.11 mrg return(0);
171 1.11 mrg if (cmd->ev_offset & PAGE_MASK)
172 1.11 mrg return(EINVAL);
173 1.11 mrg if (cmd->ev_addr & PAGE_MASK)
174 1.18 chs return(EINVAL);
175 1.18 chs if (cmd->ev_len & PAGE_MASK)
176 1.11 mrg return(EINVAL);
177 1.11 mrg
178 1.11 mrg /*
179 1.11 mrg * first, attach to the object
180 1.11 mrg */
181 1.11 mrg
182 1.50 chs uobj = uvn_attach(vp, VM_PROT_READ|VM_PROT_EXECUTE);
183 1.11 mrg if (uobj == NULL)
184 1.11 mrg return(ENOMEM);
185 1.50 chs VREF(vp);
186 1.50 chs
187 1.50 chs if ((vp->v_flag & VMAPPED) == 0) {
188 1.50 chs vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
189 1.50 chs simple_lock(&vp->v_interlock);
190 1.50 chs vp->v_flag |= VMAPPED;
191 1.50 chs simple_unlock(&vp->v_interlock);
192 1.50 chs VOP_UNLOCK(vp, 0);
193 1.50 chs }
194 1.11 mrg
195 1.48 elad prot = cmd->ev_prot;
196 1.48 elad maxprot = UVM_PROT_ALL;
197 1.48 elad #ifdef PAX_MPROTECT
198 1.48 elad pax_mprotect(l, &prot, &maxprot);
199 1.48 elad #endif /* PAX_MPROTECT */
200 1.48 elad
201 1.11 mrg /*
202 1.11 mrg * do the map
203 1.11 mrg */
204 1.11 mrg
205 1.43 perry error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr, cmd->ev_len,
206 1.24 thorpej uobj, cmd->ev_offset, 0,
207 1.48 elad UVM_MAPFLAG(prot, maxprot, UVM_INH_COPY,
208 1.34 atatat UVM_ADV_NORMAL, UVM_FLAG_COPYONW|UVM_FLAG_FIXED));
209 1.27 chs if (error) {
210 1.27 chs uobj->pgops->pgo_detach(uobj);
211 1.27 chs }
212 1.27 chs return error;
213 1.1 cgd }
214 1.1 cgd
215 1.1 cgd /*
216 1.1 cgd * vmcmd_map_readvn():
217 1.1 cgd * handle vmcmd which specifies that a vnode should be read from.
218 1.1 cgd * appropriate for non-demand-paged text/data segments, i.e. impure
219 1.1 cgd * objects (a la OMAGIC and NMAGIC).
220 1.1 cgd */
221 1.1 cgd int
222 1.46 christos vmcmd_map_readvn(struct lwp *l, struct exec_vmcmd *cmd)
223 1.1 cgd {
224 1.46 christos struct proc *p = l->l_proc;
225 1.1 cgd int error;
226 1.17 ws long diff;
227 1.1 cgd
228 1.11 mrg if (cmd->ev_len == 0)
229 1.27 chs return 0;
230 1.27 chs
231 1.17 ws diff = cmd->ev_addr - trunc_page(cmd->ev_addr);
232 1.17 ws cmd->ev_addr -= diff; /* required by uvm_map */
233 1.17 ws cmd->ev_offset -= diff;
234 1.17 ws cmd->ev_len += diff;
235 1.17 ws
236 1.43 perry error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
237 1.24 thorpej round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0,
238 1.13 chuck UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_COPY,
239 1.11 mrg UVM_ADV_NORMAL,
240 1.34 atatat UVM_FLAG_FIXED|UVM_FLAG_OVERLAY|UVM_FLAG_COPYONW));
241 1.11 mrg
242 1.1 cgd if (error)
243 1.1 cgd return error;
244 1.19 matt
245 1.46 christos return vmcmd_readvn(l, cmd);
246 1.19 matt }
247 1.19 matt
248 1.19 matt int
249 1.46 christos vmcmd_readvn(struct lwp *l, struct exec_vmcmd *cmd)
250 1.19 matt {
251 1.46 christos struct proc *p = l->l_proc;
252 1.19 matt int error;
253 1.48 elad vm_prot_t prot, maxprot;
254 1.1 cgd
255 1.1 cgd error = vn_rdwr(UIO_READ, cmd->ev_vp, (caddr_t)cmd->ev_addr,
256 1.10 cgd cmd->ev_len, cmd->ev_offset, UIO_USERSPACE, IO_UNIT,
257 1.49 ad l->l_cred, NULL, l);
258 1.1 cgd if (error)
259 1.1 cgd return error;
260 1.32 matt
261 1.48 elad prot = cmd->ev_prot;
262 1.48 elad maxprot = VM_PROT_ALL;
263 1.48 elad #ifdef PAX_MPROTECT
264 1.48 elad pax_mprotect(l, &prot, &maxprot);
265 1.48 elad #endif /* PAX_MPROTECT */
266 1.48 elad
267 1.32 matt #ifdef PMAP_NEED_PROCWR
268 1.32 matt /*
269 1.32 matt * we had to write the process, make sure the pages are synched
270 1.32 matt * with the instruction cache.
271 1.32 matt */
272 1.48 elad if (prot & VM_PROT_EXECUTE)
273 1.32 matt pmap_procwr(p, cmd->ev_addr, cmd->ev_len);
274 1.32 matt #endif
275 1.1 cgd
276 1.48 elad /*
277 1.48 elad * we had to map in the area at PROT_ALL so that vn_rdwr()
278 1.48 elad * could write to it. however, the caller seems to want
279 1.48 elad * it mapped read-only, so now we are going to have to call
280 1.48 elad * uvm_map_protect() to fix up the protection. ICK.
281 1.48 elad */
282 1.48 elad if (maxprot != VM_PROT_ALL) {
283 1.48 elad error = uvm_map_protect(&p->p_vmspace->vm_map,
284 1.48 elad trunc_page(cmd->ev_addr),
285 1.48 elad round_page(cmd->ev_addr + cmd->ev_len),
286 1.51 thorpej maxprot, true);
287 1.48 elad if (error)
288 1.48 elad return (error);
289 1.48 elad }
290 1.27 chs
291 1.48 elad if (prot != maxprot) {
292 1.48 elad error = uvm_map_protect(&p->p_vmspace->vm_map,
293 1.13 chuck trunc_page(cmd->ev_addr),
294 1.13 chuck round_page(cmd->ev_addr + cmd->ev_len),
295 1.51 thorpej prot, false);
296 1.48 elad if (error)
297 1.48 elad return (error);
298 1.13 chuck }
299 1.48 elad
300 1.27 chs return 0;
301 1.1 cgd }
302 1.1 cgd
303 1.1 cgd /*
304 1.1 cgd * vmcmd_map_zero():
305 1.1 cgd * handle vmcmd which specifies a zero-filled address space region. The
306 1.1 cgd * address range must be first allocated, then protected appropriately.
307 1.1 cgd */
308 1.1 cgd
309 1.1 cgd int
310 1.46 christos vmcmd_map_zero(struct lwp *l, struct exec_vmcmd *cmd)
311 1.1 cgd {
312 1.46 christos struct proc *p = l->l_proc;
313 1.1 cgd int error;
314 1.17 ws long diff;
315 1.48 elad vm_prot_t prot, maxprot;
316 1.1 cgd
317 1.17 ws diff = cmd->ev_addr - trunc_page(cmd->ev_addr);
318 1.17 ws cmd->ev_addr -= diff; /* required by uvm_map */
319 1.17 ws cmd->ev_len += diff;
320 1.17 ws
321 1.48 elad prot = cmd->ev_prot;
322 1.48 elad maxprot = UVM_PROT_ALL;
323 1.48 elad #ifdef PAX_MPROTECT
324 1.48 elad pax_mprotect(l, &prot, &maxprot);
325 1.48 elad #endif /* PAX_MPROTECT */
326 1.48 elad
327 1.43 perry error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
328 1.24 thorpej round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0,
329 1.48 elad UVM_MAPFLAG(prot, maxprot, UVM_INH_COPY,
330 1.11 mrg UVM_ADV_NORMAL,
331 1.34 atatat UVM_FLAG_FIXED|UVM_FLAG_COPYONW));
332 1.27 chs return error;
333 1.1 cgd }
334 1.28 christos
335 1.28 christos /*
336 1.28 christos * exec_read_from():
337 1.28 christos *
338 1.28 christos * Read from vnode into buffer at offset.
339 1.28 christos */
340 1.28 christos int
341 1.46 christos exec_read_from(struct lwp *l, struct vnode *vp, u_long off, void *bf,
342 1.28 christos size_t size)
343 1.28 christos {
344 1.28 christos int error;
345 1.28 christos size_t resid;
346 1.28 christos
347 1.44 christos if ((error = vn_rdwr(UIO_READ, vp, bf, size, off, UIO_SYSSPACE,
348 1.49 ad 0, l->l_cred, &resid, NULL)) != 0)
349 1.28 christos return error;
350 1.28 christos /*
351 1.28 christos * See if we got all of it
352 1.28 christos */
353 1.28 christos if (resid != 0)
354 1.28 christos return ENOEXEC;
355 1.28 christos return 0;
356 1.28 christos }
357 1.28 christos
358 1.38 christos /*
359 1.38 christos * exec_setup_stack(): Set up the stack segment for an elf
360 1.38 christos * executable.
361 1.38 christos *
362 1.38 christos * Note that the ep_ssize parameter must be set to be the current stack
363 1.38 christos * limit; this is adjusted in the body of execve() to yield the
364 1.38 christos * appropriate stack segment usage once the argument length is
365 1.38 christos * calculated.
366 1.38 christos *
367 1.38 christos * This function returns an int for uniformity with other (future) formats'
368 1.38 christos * stack setup functions. They might have errors to return.
369 1.38 christos */
370 1.38 christos
371 1.38 christos int
372 1.46 christos exec_setup_stack(struct lwp *l, struct exec_package *epp)
373 1.38 christos {
374 1.38 christos u_long max_stack_size;
375 1.38 christos u_long access_linear_min, access_size;
376 1.38 christos u_long noaccess_linear_min, noaccess_size;
377 1.38 christos
378 1.38 christos #ifndef USRSTACK32
379 1.38 christos #define USRSTACK32 (0x00000000ffffffffL&~PGOFSET)
380 1.38 christos #endif
381 1.38 christos
382 1.38 christos if (epp->ep_flags & EXEC_32) {
383 1.38 christos epp->ep_minsaddr = USRSTACK32;
384 1.38 christos max_stack_size = MAXSSIZ;
385 1.38 christos } else {
386 1.38 christos epp->ep_minsaddr = USRSTACK;
387 1.38 christos max_stack_size = MAXSSIZ;
388 1.38 christos }
389 1.43 perry epp->ep_maxsaddr = (u_long)STACK_GROW(epp->ep_minsaddr,
390 1.38 christos max_stack_size);
391 1.46 christos epp->ep_ssize = l->l_proc->p_rlimit[RLIMIT_STACK].rlim_cur;
392 1.38 christos
393 1.38 christos /*
394 1.38 christos * set up commands for stack. note that this takes *two*, one to
395 1.38 christos * map the part of the stack which we can access, and one to map
396 1.38 christos * the part which we can't.
397 1.38 christos *
398 1.38 christos * arguably, it could be made into one, but that would require the
399 1.38 christos * addition of another mapping proc, which is unnecessary
400 1.38 christos */
401 1.38 christos access_size = epp->ep_ssize;
402 1.38 christos access_linear_min = (u_long)STACK_ALLOC(epp->ep_minsaddr, access_size);
403 1.38 christos noaccess_size = max_stack_size - access_size;
404 1.43 perry noaccess_linear_min = (u_long)STACK_ALLOC(STACK_GROW(epp->ep_minsaddr,
405 1.38 christos access_size), noaccess_size);
406 1.39 yamt if (noaccess_size > 0) {
407 1.39 yamt NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, noaccess_size,
408 1.40 chs noaccess_linear_min, NULL, 0, VM_PROT_NONE);
409 1.39 yamt }
410 1.39 yamt KASSERT(access_size > 0);
411 1.38 christos NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, access_size,
412 1.40 chs access_linear_min, NULL, 0, VM_PROT_READ | VM_PROT_WRITE);
413 1.38 christos
414 1.38 christos return 0;
415 1.38 christos }
416