exec_subr.c revision 1.90 1 /* $NetBSD: exec_subr.c,v 1.90 2024/12/06 16:19:41 riastradh Exp $ */
2
3 /*
4 * Copyright (c) 1993, 1994, 1996 Christopher G. Demetriou
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Christopher G. Demetriou.
18 * 4. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: exec_subr.c,v 1.90 2024/12/06 16:19:41 riastradh Exp $");
35
36 #include "opt_pax.h"
37
38 #include <sys/param.h>
39 #include <sys/types.h>
40
41 #include <sys/device.h>
42 #include <sys/exec.h>
43 #include <sys/filedesc.h>
44 #include <sys/kmem.h>
45 #include <sys/mman.h>
46 #include <sys/pax.h>
47 #include <sys/proc.h>
48 #include <sys/resourcevar.h>
49 #include <sys/sdt.h>
50 #include <sys/systm.h>
51 #include <sys/vnode.h>
52
53 #include <uvm/uvm_extern.h>
54
55 #define VMCMD_EVCNT_DECL(name) \
56 static struct evcnt vmcmd_ev_##name = \
57 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "vmcmd", #name); \
58 EVCNT_ATTACH_STATIC(vmcmd_ev_##name)
59
60 #define VMCMD_EVCNT_INCR(name) \
61 vmcmd_ev_##name.ev_count++
62
63 VMCMD_EVCNT_DECL(calls);
64 VMCMD_EVCNT_DECL(extends);
65 VMCMD_EVCNT_DECL(kills);
66
67 #ifdef DEBUG_STACK
68 #define DPRINTF(a) uprintf a
69 #else
70 #define DPRINTF(a)
71 #endif
72
73 unsigned int user_stack_guard_size = 1024 * 1024;
74 unsigned int user_thread_stack_guard_size = 64 * 1024;
75
76 /*
77 * new_vmcmd():
78 * create a new vmcmd structure and fill in its fields based
79 * on function call arguments. make sure objects ref'd by
80 * the vmcmd are 'held'.
81 */
82
83 void
84 new_vmcmd(struct exec_vmcmd_set *evsp,
85 int (*proc)(struct lwp * l, struct exec_vmcmd *),
86 vsize_t len, vaddr_t addr, struct vnode *vp, u_long offset,
87 u_int prot, int flags)
88 {
89 struct exec_vmcmd *vcp;
90
91 VMCMD_EVCNT_INCR(calls);
92 KASSERT(proc != vmcmd_map_pagedvn || (vp->v_iflag & VI_TEXT));
93 KASSERT(vp == NULL || vrefcnt(vp) > 0);
94
95 if (evsp->evs_used >= evsp->evs_cnt)
96 vmcmdset_extend(evsp);
97 vcp = &evsp->evs_cmds[evsp->evs_used++];
98 vcp->ev_proc = proc;
99 vcp->ev_len = len;
100 vcp->ev_addr = addr;
101 if ((vcp->ev_vp = vp) != NULL)
102 vref(vp);
103 vcp->ev_offset = offset;
104 vcp->ev_prot = prot;
105 vcp->ev_flags = flags;
106 }
107
108 void
109 vmcmdset_extend(struct exec_vmcmd_set *evsp)
110 {
111 struct exec_vmcmd *nvcp;
112 u_int ocnt;
113
114 #ifdef DIAGNOSTIC
115 if (evsp->evs_used < evsp->evs_cnt)
116 panic("vmcmdset_extend: not necessary");
117 #endif
118
119 /* figure out number of entries in new set */
120 if ((ocnt = evsp->evs_cnt) != 0) {
121 evsp->evs_cnt += ocnt;
122 VMCMD_EVCNT_INCR(extends);
123 } else
124 evsp->evs_cnt = EXEC_DEFAULT_VMCMD_SETSIZE;
125
126 /* allocate it */
127 nvcp = kmem_alloc(evsp->evs_cnt * sizeof(struct exec_vmcmd), KM_SLEEP);
128
129 /* free the old struct, if there was one, and record the new one */
130 if (ocnt) {
131 memcpy(nvcp, evsp->evs_cmds,
132 (ocnt * sizeof(struct exec_vmcmd)));
133 kmem_free(evsp->evs_cmds, ocnt * sizeof(struct exec_vmcmd));
134 }
135 evsp->evs_cmds = nvcp;
136 }
137
138 void
139 kill_vmcmds(struct exec_vmcmd_set *evsp)
140 {
141 struct exec_vmcmd *vcp;
142 u_int i;
143
144 VMCMD_EVCNT_INCR(kills);
145
146 if (evsp->evs_cnt == 0)
147 return;
148
149 for (i = 0; i < evsp->evs_used; i++) {
150 vcp = &evsp->evs_cmds[i];
151 if (vcp->ev_vp != NULL)
152 vrele(vcp->ev_vp);
153 }
154 kmem_free(evsp->evs_cmds, evsp->evs_cnt * sizeof(struct exec_vmcmd));
155 evsp->evs_used = evsp->evs_cnt = 0;
156 }
157
158 /*
159 * vmcmd_map_pagedvn():
160 * handle vmcmd which specifies that a vnode should be mmap'd.
161 * appropriate for handling demand-paged text and data segments.
162 */
163
164 static int
165 vmcmd_get_prot(struct lwp *l, const struct exec_vmcmd *cmd, vm_prot_t *prot,
166 vm_prot_t *maxprot)
167 {
168 vm_prot_t extraprot = PROT_MPROTECT_EXTRACT(cmd->ev_prot);
169
170 *prot = cmd->ev_prot & UVM_PROT_ALL;
171 *maxprot = PAX_MPROTECT_MAXPROTECT(l, *prot, extraprot, UVM_PROT_ALL);
172
173 if ((*prot & *maxprot) != *prot)
174 return SET_ERROR(EACCES);
175 return PAX_MPROTECT_VALIDATE(l, *prot);
176 }
177
178 int
179 vmcmd_map_pagedvn(struct lwp *l, struct exec_vmcmd *cmd)
180 {
181 struct uvm_object *uobj;
182 struct vnode *vp = cmd->ev_vp;
183 struct proc *p = l->l_proc;
184 int error;
185 vm_prot_t prot, maxprot;
186
187 KASSERT(vp->v_iflag & VI_TEXT);
188
189 /*
190 * map the vnode in using uvm_map.
191 */
192
193 if (cmd->ev_len == 0)
194 return 0;
195 if (cmd->ev_offset & PAGE_MASK)
196 return SET_ERROR(EINVAL);
197 if (cmd->ev_addr & PAGE_MASK)
198 return SET_ERROR(EINVAL);
199 if (cmd->ev_len & PAGE_MASK)
200 return SET_ERROR(EINVAL);
201
202 if ((error = vmcmd_get_prot(l, cmd, &prot, &maxprot)) != 0)
203 return error;
204
205 /*
206 * check the file system's opinion about mmapping the file
207 */
208
209 error = VOP_MMAP(vp, prot, l->l_cred);
210 if (error)
211 return error;
212
213 if ((vp->v_vflag & VV_MAPPED) == 0) {
214 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
215 vp->v_vflag |= VV_MAPPED;
216 VOP_UNLOCK(vp);
217 }
218
219 /*
220 * do the map, reference the object for this map entry
221 */
222 uobj = &vp->v_uobj;
223 vref(vp);
224
225 error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr, cmd->ev_len,
226 uobj, cmd->ev_offset, 0,
227 UVM_MAPFLAG(prot, maxprot, UVM_INH_COPY,
228 UVM_ADV_NORMAL, UVM_FLAG_COPYONW|UVM_FLAG_FIXED));
229 if (error) {
230 uobj->pgops->pgo_detach(uobj);
231 }
232 return error;
233 }
234
235 /*
236 * vmcmd_map_readvn():
237 * handle vmcmd which specifies that a vnode should be read from.
238 * appropriate for non-demand-paged text/data segments, i.e. impure
239 * objects (a la OMAGIC and NMAGIC).
240 */
241 int
242 vmcmd_map_readvn(struct lwp *l, struct exec_vmcmd *cmd)
243 {
244 struct proc *p = l->l_proc;
245 int error;
246 long diff;
247
248 if (cmd->ev_len == 0)
249 return 0;
250
251 diff = cmd->ev_addr - trunc_page(cmd->ev_addr);
252 cmd->ev_addr -= diff; /* required by uvm_map */
253 cmd->ev_offset -= diff;
254 cmd->ev_len += diff;
255
256 error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
257 round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0,
258 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_COPY,
259 UVM_ADV_NORMAL,
260 UVM_FLAG_FIXED|UVM_FLAG_OVERLAY|UVM_FLAG_COPYONW));
261
262 if (error)
263 return error;
264
265 return vmcmd_readvn(l, cmd);
266 }
267
268 int
269 vmcmd_readvn(struct lwp *l, struct exec_vmcmd *cmd)
270 {
271 struct proc *p = l->l_proc;
272 int error;
273 vm_prot_t prot, maxprot;
274
275 error = vn_rdwr(UIO_READ, cmd->ev_vp, (void *)cmd->ev_addr,
276 cmd->ev_len, cmd->ev_offset, UIO_USERSPACE, IO_UNIT,
277 l->l_cred, NULL, l);
278 if (error)
279 return error;
280
281 if ((error = vmcmd_get_prot(l, cmd, &prot, &maxprot)) != 0)
282 return error;
283
284 #ifdef PMAP_NEED_PROCWR
285 /*
286 * we had to write the process, make sure the pages are synched
287 * with the instruction cache.
288 */
289 if (prot & VM_PROT_EXECUTE)
290 pmap_procwr(p, cmd->ev_addr, cmd->ev_len);
291 #endif
292
293 /*
294 * we had to map in the area at PROT_ALL so that vn_rdwr()
295 * could write to it. however, the caller seems to want
296 * it mapped read-only, so now we are going to have to call
297 * uvm_map_protect() to fix up the protection. ICK.
298 */
299 if (maxprot != VM_PROT_ALL) {
300 error = uvm_map_protect(&p->p_vmspace->vm_map,
301 trunc_page(cmd->ev_addr),
302 round_page(cmd->ev_addr + cmd->ev_len),
303 maxprot, true);
304 if (error)
305 return error;
306 }
307
308 if (prot != maxprot) {
309 error = uvm_map_protect(&p->p_vmspace->vm_map,
310 trunc_page(cmd->ev_addr),
311 round_page(cmd->ev_addr + cmd->ev_len),
312 prot, false);
313 if (error)
314 return error;
315 }
316
317 return 0;
318 }
319
320 /*
321 * vmcmd_map_zero():
322 * handle vmcmd which specifies a zero-filled address space region. The
323 * address range must be first allocated, then protected appropriately.
324 */
325
326 int
327 vmcmd_map_zero(struct lwp *l, struct exec_vmcmd *cmd)
328 {
329 struct proc *p = l->l_proc;
330 int error;
331 long diff;
332 vm_prot_t prot, maxprot;
333
334 diff = cmd->ev_addr - trunc_page(cmd->ev_addr);
335 cmd->ev_addr -= diff; /* required by uvm_map */
336 cmd->ev_len += diff;
337
338 if ((error = vmcmd_get_prot(l, cmd, &prot, &maxprot)) != 0)
339 return error;
340
341 error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
342 round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0,
343 UVM_MAPFLAG(prot, maxprot, UVM_INH_COPY,
344 UVM_ADV_NORMAL,
345 UVM_FLAG_FIXED|UVM_FLAG_COPYONW));
346 if (cmd->ev_flags & VMCMD_STACK)
347 curproc->p_vmspace->vm_issize += atop(round_page(cmd->ev_len));
348 return error;
349 }
350
351 /*
352 * exec_read():
353 *
354 * Read from vnode into buffer at offset.
355 */
356 int
357 exec_read(struct lwp *l, struct vnode *vp, u_long off, void *bf, size_t size,
358 int ioflg)
359 {
360 int error;
361 size_t resid;
362
363 KASSERT((ioflg & IO_NODELOCKED) == 0 || VOP_ISLOCKED(vp) != LK_NONE);
364
365 if ((error = vn_rdwr(UIO_READ, vp, bf, size, off, UIO_SYSSPACE,
366 ioflg, l->l_cred, &resid, NULL)) != 0)
367 return error;
368 /*
369 * See if we got all of it
370 */
371 if (resid != 0)
372 return SET_ERROR(ENOEXEC);
373 return 0;
374 }
375
376 /*
377 * exec_setup_stack(): Set up the stack segment for an elf
378 * executable.
379 *
380 * Note that the ep_ssize parameter must be set to be the current stack
381 * limit; this is adjusted in the body of execve() to yield the
382 * appropriate stack segment usage once the argument length is
383 * calculated.
384 *
385 * This function returns an int for uniformity with other (future) formats'
386 * stack setup functions. They might have errors to return.
387 */
388
389 int
390 exec_setup_stack(struct lwp *l, struct exec_package *epp)
391 {
392 vsize_t max_stack_size;
393 vaddr_t access_linear_min;
394 vsize_t access_size;
395 vaddr_t noaccess_linear_min;
396 vsize_t noaccess_size;
397
398 #ifndef USRSTACK32
399 #define USRSTACK32 (0x00000000ffffffffL&~PGOFSET)
400 #endif
401 #ifndef MAXSSIZ32
402 #define MAXSSIZ32 (MAXSSIZ >> 2)
403 #endif
404
405 if (epp->ep_flags & EXEC_32) {
406 epp->ep_minsaddr = USRSTACK32;
407 max_stack_size = MAXSSIZ32;
408 } else {
409 epp->ep_minsaddr = USRSTACK;
410 max_stack_size = MAXSSIZ;
411 }
412
413 DPRINTF(("ep_minsaddr=%#jx max_stack_size=%#jx\n",
414 (uintmax_t)epp->ep_minsaddr, (uintmax_t)max_stack_size));
415
416 pax_aslr_stack(epp, &max_stack_size);
417
418 DPRINTF(("[RLIMIT_STACK].lim_cur=%#jx max_stack_size=%#jx\n",
419 (uintmax_t)l->l_proc->p_rlimit[RLIMIT_STACK].rlim_cur,
420 (uintmax_t)max_stack_size));
421 epp->ep_ssize = MIN(l->l_proc->p_rlimit[RLIMIT_STACK].rlim_cur,
422 max_stack_size);
423
424 l->l_proc->p_stackbase = epp->ep_minsaddr;
425
426 epp->ep_maxsaddr = (vaddr_t)STACK_GROW(epp->ep_minsaddr,
427 max_stack_size);
428
429 DPRINTF(("ep_ssize=%#jx ep_minsaddr=%#jx ep_maxsaddr=%#jx\n",
430 (uintmax_t)epp->ep_ssize, (uintmax_t)epp->ep_minsaddr,
431 (uintmax_t)epp->ep_maxsaddr));
432
433 /*
434 * set up commands for stack. note that this takes *two*, one to
435 * map the part of the stack which we can access, and one to map
436 * the part which we can't.
437 *
438 * arguably, it could be made into one, but that would require the
439 * addition of another mapping proc, which is unnecessary
440 */
441 access_size = epp->ep_ssize;
442 access_linear_min = (vaddr_t)STACK_ALLOC(epp->ep_minsaddr, access_size);
443 noaccess_size = max_stack_size - access_size;
444 noaccess_linear_min = (vaddr_t)STACK_ALLOC(STACK_GROW(epp->ep_minsaddr,
445 access_size), noaccess_size);
446
447 DPRINTF(("access_size=%#jx, access_linear_min=%#jx, "
448 "noaccess_size=%#jx, noaccess_linear_min=%#jx\n",
449 (uintmax_t)access_size, (uintmax_t)access_linear_min,
450 (uintmax_t)noaccess_size, (uintmax_t)noaccess_linear_min));
451
452 if (user_stack_guard_size > 0) {
453 #ifdef __MACHINE_STACK_GROWS_UP
454 vsize_t guard_size = MIN(VM_MAXUSER_ADDRESS - epp->ep_maxsaddr, user_stack_guard_size);
455 if (guard_size > 0)
456 NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, guard_size,
457 epp->ep_maxsaddr, NULL, 0, VM_PROT_NONE);
458 #else
459 NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, user_stack_guard_size,
460 epp->ep_maxsaddr - user_stack_guard_size, NULL, 0, VM_PROT_NONE);
461 #endif
462 }
463 if (noaccess_size > 0 && noaccess_size <= MAXSSIZ) {
464 NEW_VMCMD2(&epp->ep_vmcmds, vmcmd_map_zero, noaccess_size,
465 noaccess_linear_min, NULL, 0,
466 VM_PROT_NONE | PROT_MPROTECT(VM_PROT_READ | VM_PROT_WRITE),
467 VMCMD_STACK);
468 }
469 KASSERT(access_size > 0);
470 KASSERT(access_size <= MAXSSIZ);
471 NEW_VMCMD2(&epp->ep_vmcmds, vmcmd_map_zero, access_size,
472 access_linear_min, NULL, 0, VM_PROT_READ | VM_PROT_WRITE,
473 VMCMD_STACK);
474
475 return 0;
476 }
477