1 1.186 andvar /* $NetBSD: uvm_mmap.c,v 1.186 2025/02/24 21:32:26 andvar Exp $ */ 2 1.1 mrg 3 1.1 mrg /* 4 1.1 mrg * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 1.51 chs * Copyright (c) 1991, 1993 The Regents of the University of California. 6 1.1 mrg * Copyright (c) 1988 University of Utah. 7 1.51 chs * 8 1.1 mrg * All rights reserved. 9 1.1 mrg * 10 1.1 mrg * This code is derived from software contributed to Berkeley by 11 1.1 mrg * the Systems Programming Group of the University of Utah Computer 12 1.1 mrg * Science Department. 13 1.1 mrg * 14 1.1 mrg * Redistribution and use in source and binary forms, with or without 15 1.1 mrg * modification, are permitted provided that the following conditions 16 1.1 mrg * are met: 17 1.1 mrg * 1. Redistributions of source code must retain the above copyright 18 1.1 mrg * notice, this list of conditions and the following disclaimer. 19 1.1 mrg * 2. Redistributions in binary form must reproduce the above copyright 20 1.1 mrg * notice, this list of conditions and the following disclaimer in the 21 1.1 mrg * documentation and/or other materials provided with the distribution. 22 1.134 chuck * 3. Neither the name of the University nor the names of its contributors 23 1.1 mrg * may be used to endorse or promote products derived from this software 24 1.1 mrg * without specific prior written permission. 25 1.1 mrg * 26 1.1 mrg * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 1.1 mrg * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 1.1 mrg * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 1.1 mrg * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 1.1 mrg * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 1.1 mrg * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 1.1 mrg * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 1.1 mrg * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 1.1 mrg * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 1.1 mrg * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 1.1 mrg * SUCH DAMAGE. 37 1.1 mrg * 38 1.1 mrg * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$ 39 1.1 mrg * @(#)vm_mmap.c 8.5 (Berkeley) 5/19/94 40 1.3 mrg * from: Id: uvm_mmap.c,v 1.1.2.14 1998/01/05 21:04:26 chuck Exp 41 1.1 mrg */ 42 1.1 mrg 43 1.1 mrg /* 44 1.1 mrg * uvm_mmap.c: system call interface into VM system, plus kernel vm_mmap 45 1.1 mrg * function. 46 1.1 mrg */ 47 1.60 lukem 48 1.60 lukem #include <sys/cdefs.h> 49 1.186 andvar __KERNEL_RCSID(0, "$NetBSD: uvm_mmap.c,v 1.186 2025/02/24 21:32:26 andvar Exp $"); 50 1.80 jdolecek 51 1.80 jdolecek #include "opt_compat_netbsd.h" 52 1.97 elad #include "opt_pax.h" 53 1.60 lukem 54 1.176 skrll #include <sys/param.h> 55 1.150 chs #include <sys/types.h> 56 1.1 mrg #include <sys/file.h> 57 1.1 mrg #include <sys/filedesc.h> 58 1.1 mrg #include <sys/resourcevar.h> 59 1.1 mrg #include <sys/mman.h> 60 1.97 elad #include <sys/pax.h> 61 1.1 mrg 62 1.1 mrg #include <sys/syscallargs.h> 63 1.1 mrg 64 1.1 mrg #include <uvm/uvm.h> 65 1.1 mrg #include <uvm/uvm_device.h> 66 1.1 mrg 67 1.150 chs static int uvm_mmap(struct vm_map *, vaddr_t *, vsize_t, vm_prot_t, vm_prot_t, 68 1.161 maxv int, int, struct uvm_object *, voff_t, vsize_t); 69 1.1 mrg 70 1.115 yamt static int 71 1.171 christos range_test(const struct vm_map *map, vaddr_t addr, vsize_t size, bool ismmap) 72 1.115 yamt { 73 1.158 martin vaddr_t vm_min_address = vm_map_min(map); 74 1.158 martin vaddr_t vm_max_address = vm_map_max(map); 75 1.115 yamt vaddr_t eaddr = addr + size; 76 1.145 martin int res = 0; 77 1.115 yamt 78 1.115 yamt if (addr < vm_min_address) 79 1.115 yamt return EINVAL; 80 1.115 yamt if (eaddr > vm_max_address) 81 1.115 yamt return ismmap ? EFBIG : EINVAL; 82 1.115 yamt if (addr > eaddr) /* no wrapping! */ 83 1.115 yamt return ismmap ? EOVERFLOW : EINVAL; 84 1.145 martin 85 1.145 martin #ifdef MD_MMAP_RANGE_TEST 86 1.145 martin res = MD_MMAP_RANGE_TEST(addr, eaddr); 87 1.145 martin #endif 88 1.145 martin 89 1.145 martin return res; 90 1.115 yamt } 91 1.110 christos 92 1.1 mrg /* 93 1.171 christos * align the address to a page boundary, and adjust the size accordingly 94 1.171 christos */ 95 1.171 christos static int 96 1.171 christos round_and_check(const struct vm_map *map, vaddr_t *addr, vsize_t *size) 97 1.171 christos { 98 1.171 christos const vsize_t pageoff = (vsize_t)(*addr & PAGE_MASK); 99 1.171 christos 100 1.171 christos *addr -= pageoff; 101 1.171 christos 102 1.171 christos if (*size != 0) { 103 1.171 christos *size += pageoff; 104 1.171 christos *size = (vsize_t)round_page(*size); 105 1.171 christos } else if (*addr + *size < *addr) { 106 1.171 christos return ENOMEM; 107 1.171 christos } 108 1.171 christos 109 1.171 christos return range_test(map, *addr, *size, false); 110 1.171 christos } 111 1.171 christos 112 1.171 christos /* 113 1.1 mrg * sys_mincore: determine if pages are in core or not. 114 1.1 mrg */ 115 1.1 mrg 116 1.1 mrg /* ARGSUSED */ 117 1.6 mrg int 118 1.129 yamt sys_mincore(struct lwp *l, const struct sys_mincore_args *uap, 119 1.129 yamt register_t *retval) 120 1.1 mrg { 121 1.119 dsl /* { 122 1.22 thorpej syscallarg(void *) addr; 123 1.20 mrg syscallarg(size_t) len; 124 1.20 mrg syscallarg(char *) vec; 125 1.119 dsl } */ 126 1.67 thorpej struct proc *p = l->l_proc; 127 1.56 chs struct vm_page *pg; 128 1.22 thorpej char *vec, pgi; 129 1.22 thorpej struct uvm_object *uobj; 130 1.22 thorpej struct vm_amap *amap; 131 1.22 thorpej struct vm_anon *anon; 132 1.53 chs struct vm_map_entry *entry; 133 1.22 thorpej vaddr_t start, end, lim; 134 1.53 chs struct vm_map *map; 135 1.22 thorpej vsize_t len; 136 1.173 maxv int error = 0; 137 1.173 maxv size_t npgs; 138 1.22 thorpej 139 1.22 thorpej map = &p->p_vmspace->vm_map; 140 1.22 thorpej 141 1.22 thorpej start = (vaddr_t)SCARG(uap, addr); 142 1.22 thorpej len = SCARG(uap, len); 143 1.22 thorpej vec = SCARG(uap, vec); 144 1.22 thorpej 145 1.22 thorpej if (start & PAGE_MASK) 146 1.161 maxv return EINVAL; 147 1.22 thorpej len = round_page(len); 148 1.22 thorpej end = start + len; 149 1.22 thorpej if (end <= start) 150 1.161 maxv return EINVAL; 151 1.22 thorpej 152 1.22 thorpej /* 153 1.22 thorpej * Lock down vec, so our returned status isn't outdated by 154 1.22 thorpej * storing the status byte for a page. 155 1.22 thorpej */ 156 1.50 chs 157 1.62 chs npgs = len >> PAGE_SHIFT; 158 1.100 chs error = uvm_vslock(p->p_vmspace, vec, npgs, VM_PROT_WRITE); 159 1.62 chs if (error) { 160 1.62 chs return error; 161 1.62 chs } 162 1.22 thorpej vm_map_lock_read(map); 163 1.22 thorpej 164 1.107 thorpej if (uvm_map_lookup_entry(map, start, &entry) == false) { 165 1.22 thorpej error = ENOMEM; 166 1.22 thorpej goto out; 167 1.22 thorpej } 168 1.22 thorpej 169 1.22 thorpej for (/* nothing */; 170 1.22 thorpej entry != &map->header && entry->start < end; 171 1.22 thorpej entry = entry->next) { 172 1.49 chs KASSERT(!UVM_ET_ISSUBMAP(entry)); 173 1.49 chs KASSERT(start >= entry->start); 174 1.49 chs 175 1.22 thorpej /* Make sure there are no holes. */ 176 1.22 thorpej if (entry->end < end && 177 1.22 thorpej (entry->next == &map->header || 178 1.22 thorpej entry->next->start > entry->end)) { 179 1.22 thorpej error = ENOMEM; 180 1.22 thorpej goto out; 181 1.22 thorpej } 182 1.6 mrg 183 1.22 thorpej lim = end < entry->end ? end : entry->end; 184 1.22 thorpej 185 1.22 thorpej /* 186 1.31 thorpej * Special case for objects with no "real" pages. Those 187 1.31 thorpej * are always considered resident (mapped devices). 188 1.22 thorpej */ 189 1.50 chs 190 1.22 thorpej if (UVM_ET_ISOBJ(entry)) { 191 1.49 chs KASSERT(!UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)); 192 1.79 yamt if (UVM_OBJ_IS_DEVICE(entry->object.uvm_obj)) { 193 1.22 thorpej for (/* nothing */; start < lim; 194 1.22 thorpej start += PAGE_SIZE, vec++) 195 1.172 thorpej ustore_char(vec, 1); 196 1.22 thorpej continue; 197 1.22 thorpej } 198 1.22 thorpej } 199 1.22 thorpej 200 1.132 uebayasi amap = entry->aref.ar_amap; /* upper layer */ 201 1.132 uebayasi uobj = entry->object.uvm_obj; /* lower layer */ 202 1.22 thorpej 203 1.22 thorpej if (amap != NULL) 204 1.175 ad amap_lock(amap, RW_READER); 205 1.22 thorpej if (uobj != NULL) 206 1.175 ad rw_enter(uobj->vmobjlock, RW_READER); 207 1.22 thorpej 208 1.22 thorpej for (/* nothing */; start < lim; start += PAGE_SIZE, vec++) { 209 1.22 thorpej pgi = 0; 210 1.22 thorpej if (amap != NULL) { 211 1.132 uebayasi /* Check the upper layer first. */ 212 1.22 thorpej anon = amap_lookup(&entry->aref, 213 1.22 thorpej start - entry->start); 214 1.22 thorpej /* Don't need to lock anon here. */ 215 1.91 yamt if (anon != NULL && anon->an_page != NULL) { 216 1.50 chs 217 1.22 thorpej /* 218 1.22 thorpej * Anon has the page for this entry 219 1.22 thorpej * offset. 220 1.22 thorpej */ 221 1.50 chs 222 1.22 thorpej pgi = 1; 223 1.22 thorpej } 224 1.22 thorpej } 225 1.22 thorpej if (uobj != NULL && pgi == 0) { 226 1.132 uebayasi /* Check the lower layer. */ 227 1.56 chs pg = uvm_pagelookup(uobj, 228 1.22 thorpej entry->offset + (start - entry->start)); 229 1.56 chs if (pg != NULL) { 230 1.50 chs 231 1.22 thorpej /* 232 1.22 thorpej * Object has the page for this entry 233 1.22 thorpej * offset. 234 1.22 thorpej */ 235 1.50 chs 236 1.22 thorpej pgi = 1; 237 1.22 thorpej } 238 1.22 thorpej } 239 1.172 thorpej (void) ustore_char(vec, pgi); 240 1.22 thorpej } 241 1.22 thorpej if (uobj != NULL) 242 1.175 ad rw_exit(uobj->vmobjlock); 243 1.22 thorpej if (amap != NULL) 244 1.22 thorpej amap_unlock(amap); 245 1.22 thorpej } 246 1.22 thorpej 247 1.22 thorpej out: 248 1.22 thorpej vm_map_unlock_read(map); 249 1.100 chs uvm_vsunlock(p->p_vmspace, SCARG(uap, vec), npgs); 250 1.161 maxv return error; 251 1.1 mrg } 252 1.1 mrg 253 1.1 mrg /* 254 1.1 mrg * sys_mmap: mmap system call. 255 1.1 mrg * 256 1.64 atatat * => file offset and address may not be page aligned 257 1.1 mrg * - if MAP_FIXED, offset and address must have remainder mod PAGE_SIZE 258 1.1 mrg * - if address isn't page aligned the mapping starts at trunc_page(addr) 259 1.1 mrg * and the return value is adjusted up by the page offset. 260 1.1 mrg */ 261 1.1 mrg 262 1.6 mrg int 263 1.119 dsl sys_mmap(struct lwp *l, const struct sys_mmap_args *uap, register_t *retval) 264 1.6 mrg { 265 1.119 dsl /* { 266 1.108 christos syscallarg(void *) addr; 267 1.6 mrg syscallarg(size_t) len; 268 1.6 mrg syscallarg(int) prot; 269 1.6 mrg syscallarg(int) flags; 270 1.6 mrg syscallarg(int) fd; 271 1.6 mrg syscallarg(long) pad; 272 1.6 mrg syscallarg(off_t) pos; 273 1.119 dsl } */ 274 1.67 thorpej struct proc *p = l->l_proc; 275 1.12 eeh vaddr_t addr; 276 1.6 mrg off_t pos; 277 1.181 riastrad vsize_t size, pageoff; 278 1.164 joerg vm_prot_t prot, maxprot, extraprot; 279 1.150 chs int flags, fd, advice; 280 1.180 riastrad vaddr_t defaddr = 0; /* XXXGCC */ 281 1.180 riastrad bool addrhint = false; 282 1.116 ad struct file *fp = NULL; 283 1.150 chs struct uvm_object *uobj; 284 1.6 mrg int error; 285 1.120 christos vaddr_t orig_addr; 286 1.6 mrg 287 1.6 mrg /* 288 1.6 mrg * first, extract syscall args from the uap. 289 1.6 mrg */ 290 1.6 mrg 291 1.50 chs addr = (vaddr_t)SCARG(uap, addr); 292 1.50 chs size = (vsize_t)SCARG(uap, len); 293 1.6 mrg prot = SCARG(uap, prot) & VM_PROT_ALL; 294 1.164 joerg extraprot = PROT_MPROTECT_EXTRACT(SCARG(uap, prot)); 295 1.6 mrg flags = SCARG(uap, flags); 296 1.6 mrg fd = SCARG(uap, fd); 297 1.6 mrg pos = SCARG(uap, pos); 298 1.6 mrg 299 1.120 christos orig_addr = addr; 300 1.120 christos 301 1.24 thorpej if ((flags & (MAP_SHARED|MAP_PRIVATE)) == (MAP_SHARED|MAP_PRIVATE)) 302 1.161 maxv return EINVAL; 303 1.24 thorpej 304 1.177 hannken if (size == 0 && (flags & MAP_ANON) == 0) 305 1.177 hannken return EINVAL; 306 1.177 hannken 307 1.24 thorpej /* 308 1.181 riastrad * Align file position and save offset into page. Adjust size 309 1.181 riastrad * so that it is an integral multiple of the page size. 310 1.6 mrg */ 311 1.181 riastrad pageoff = pos & PAGE_MASK; 312 1.181 riastrad pos -= pageoff; 313 1.184 rin KASSERT(PAGE_MASK <= __type_max(vsize_t)); 314 1.184 rin KASSERT((__type_max(vsize_t) - PAGE_SIZE + 1) % PAGE_SIZE == 0); 315 1.181 riastrad if (size > __type_max(vsize_t) - PAGE_SIZE + 1 - pageoff) 316 1.161 maxv return ENOMEM; 317 1.181 riastrad /* 318 1.181 riastrad * size + pageoff <= VSIZE_MAX + 1 - PAGE_SIZE, and the 319 1.181 riastrad * right-hand side is an integral multiple of the page size, so 320 1.181 riastrad * round_page(size + pageoff) <= VSIZE_MAX + 1 - PAGE_SIZE. 321 1.181 riastrad */ 322 1.181 riastrad size = round_page(size + pageoff); 323 1.6 mrg 324 1.6 mrg /* 325 1.51 chs * now check (MAP_FIXED) or get (!MAP_FIXED) the "addr" 326 1.6 mrg */ 327 1.6 mrg if (flags & MAP_FIXED) { 328 1.6 mrg /* ensure address and file offset are aligned properly */ 329 1.6 mrg addr -= pageoff; 330 1.6 mrg if (addr & PAGE_MASK) 331 1.161 maxv return EINVAL; 332 1.6 mrg 333 1.158 martin error = range_test(&p->p_vmspace->vm_map, addr, size, true); 334 1.150 chs if (error) { 335 1.115 yamt return error; 336 1.150 chs } 337 1.75 christos } else if (addr == 0 || !(flags & MAP_TRYFIXED)) { 338 1.6 mrg /* 339 1.68 atatat * not fixed: make sure we skip over the largest 340 1.68 atatat * possible heap for non-topdown mapping arrangements. 341 1.68 atatat * we will refine our guess later (e.g. to account for 342 1.68 atatat * VAC, etc) 343 1.6 mrg */ 344 1.46 chs 345 1.89 fvdl defaddr = p->p_emul->e_vm_default_addr(p, 346 1.154 martin (vaddr_t)p->p_vmspace->vm_daddr, size, 347 1.154 martin p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN); 348 1.89 fvdl 349 1.161 maxv if (addr == 0 || !(p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN)) 350 1.89 fvdl addr = MAX(addr, defaddr); 351 1.68 atatat else 352 1.89 fvdl addr = MIN(addr, defaddr); 353 1.180 riastrad 354 1.180 riastrad /* 355 1.180 riastrad * If addr is nonzero and not the default, then the 356 1.180 riastrad * address is a hint. 357 1.180 riastrad */ 358 1.180 riastrad addrhint = (addr != 0 && addr != defaddr); 359 1.6 mrg } 360 1.6 mrg 361 1.6 mrg /* 362 1.6 mrg * check for file mappings (i.e. not anonymous) and verify file. 363 1.6 mrg */ 364 1.6 mrg 365 1.150 chs advice = UVM_ADV_NORMAL; 366 1.6 mrg if ((flags & MAP_ANON) == 0) { 367 1.182 riastrad KASSERT(size != 0); 368 1.182 riastrad 369 1.122 ad if ((fp = fd_getfile(fd)) == NULL) 370 1.161 maxv return EBADF; 371 1.150 chs 372 1.150 chs if (fp->f_ops->fo_mmap == NULL) { 373 1.150 chs error = ENODEV; 374 1.150 chs goto out; 375 1.116 ad } 376 1.150 chs error = (*fp->f_ops->fo_mmap)(fp, &pos, size, prot, &flags, 377 1.161 maxv &advice, &uobj, &maxprot); 378 1.150 chs if (error) { 379 1.150 chs goto out; 380 1.116 ad } 381 1.150 chs if (uobj == NULL) { 382 1.6 mrg flags |= MAP_ANON; 383 1.122 ad fd_putfile(fd); 384 1.116 ad fp = NULL; 385 1.6 mrg goto is_anon; 386 1.6 mrg } 387 1.6 mrg } else { /* MAP_ANON case */ 388 1.24 thorpej /* 389 1.24 thorpej * XXX What do we do about (MAP_SHARED|MAP_PRIVATE) == 0? 390 1.24 thorpej */ 391 1.6 mrg if (fd != -1) 392 1.161 maxv return EINVAL; 393 1.1 mrg 394 1.24 thorpej is_anon: /* label for SunOS style /dev/zero */ 395 1.150 chs uobj = NULL; 396 1.6 mrg maxprot = VM_PROT_ALL; 397 1.6 mrg pos = 0; 398 1.28 cgd } 399 1.28 cgd 400 1.164 joerg maxprot = PAX_MPROTECT_MAXPROTECT(l, prot, extraprot, maxprot); 401 1.167 utkarsh0 if (((prot | extraprot) & maxprot) != (prot | extraprot)) { 402 1.167 utkarsh0 error = EACCES; 403 1.167 utkarsh0 goto out; 404 1.167 utkarsh0 } 405 1.164 joerg if ((error = PAX_MPROTECT_VALIDATE(l, prot))) 406 1.167 utkarsh0 goto out; 407 1.97 elad 408 1.153 maxv pax_aslr_mmap(l, &addr, orig_addr, flags); 409 1.120 christos 410 1.6 mrg /* 411 1.180 riastrad * Now let kernel internal function uvm_mmap do the work. 412 1.180 riastrad * 413 1.180 riastrad * If the user provided a hint, take a reference to uobj in 414 1.180 riastrad * case the first attempt to satisfy the hint fails, so we can 415 1.180 riastrad * try again with the default address. 416 1.6 mrg */ 417 1.180 riastrad if (addrhint) { 418 1.180 riastrad if (uobj) 419 1.180 riastrad (*uobj->pgops->pgo_reference)(uobj); 420 1.180 riastrad } 421 1.6 mrg error = uvm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, maxprot, 422 1.150 chs flags, advice, uobj, pos, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur); 423 1.180 riastrad if (addrhint) { 424 1.180 riastrad if (error) { 425 1.180 riastrad addr = defaddr; 426 1.180 riastrad pax_aslr_mmap(l, &addr, orig_addr, flags); 427 1.180 riastrad error = uvm_mmap(&p->p_vmspace->vm_map, &addr, size, 428 1.180 riastrad prot, maxprot, flags, advice, uobj, pos, 429 1.180 riastrad p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur); 430 1.180 riastrad } else if (uobj) { 431 1.180 riastrad /* Release the exta reference we took. */ 432 1.180 riastrad (*uobj->pgops->pgo_detach)(uobj); 433 1.180 riastrad } 434 1.180 riastrad } 435 1.6 mrg 436 1.150 chs /* remember to add offset */ 437 1.150 chs *retval = (register_t)(addr + pageoff); 438 1.1 mrg 439 1.150 chs out: 440 1.161 maxv if (fp != NULL) 441 1.122 ad fd_putfile(fd); 442 1.116 ad 443 1.161 maxv return error; 444 1.1 mrg } 445 1.1 mrg 446 1.1 mrg /* 447 1.1 mrg * sys___msync13: the msync system call (a front-end for flush) 448 1.1 mrg */ 449 1.1 mrg 450 1.6 mrg int 451 1.129 yamt sys___msync13(struct lwp *l, const struct sys___msync13_args *uap, 452 1.129 yamt register_t *retval) 453 1.6 mrg { 454 1.119 dsl /* { 455 1.108 christos syscallarg(void *) addr; 456 1.6 mrg syscallarg(size_t) len; 457 1.6 mrg syscallarg(int) flags; 458 1.119 dsl } */ 459 1.67 thorpej struct proc *p = l->l_proc; 460 1.12 eeh vaddr_t addr; 461 1.171 christos vsize_t size; 462 1.53 chs struct vm_map *map; 463 1.159 pgoyette int error, flags, uvmflags; 464 1.159 pgoyette bool rv; 465 1.6 mrg 466 1.6 mrg /* 467 1.6 mrg * extract syscall args from the uap 468 1.6 mrg */ 469 1.6 mrg 470 1.12 eeh addr = (vaddr_t)SCARG(uap, addr); 471 1.12 eeh size = (vsize_t)SCARG(uap, len); 472 1.6 mrg flags = SCARG(uap, flags); 473 1.6 mrg 474 1.6 mrg /* sanity check flags */ 475 1.6 mrg if ((flags & ~(MS_ASYNC | MS_SYNC | MS_INVALIDATE)) != 0 || 476 1.77 chs (flags & (MS_ASYNC | MS_SYNC | MS_INVALIDATE)) == 0 || 477 1.77 chs (flags & (MS_ASYNC | MS_SYNC)) == (MS_ASYNC | MS_SYNC)) 478 1.161 maxv return EINVAL; 479 1.6 mrg if ((flags & (MS_ASYNC | MS_SYNC)) == 0) 480 1.77 chs flags |= MS_SYNC; 481 1.1 mrg 482 1.6 mrg /* 483 1.6 mrg * get map 484 1.6 mrg */ 485 1.158 martin map = &p->p_vmspace->vm_map; 486 1.6 mrg 487 1.171 christos if (round_and_check(map, &addr, &size)) 488 1.160 maxv return ENOMEM; 489 1.6 mrg 490 1.6 mrg /* 491 1.6 mrg * XXXCDC: do we really need this semantic? 492 1.6 mrg * 493 1.6 mrg * XXX Gak! If size is zero we are supposed to sync "all modified 494 1.6 mrg * pages with the region containing addr". Unfortunately, we 495 1.6 mrg * don't really keep track of individual mmaps so we approximate 496 1.6 mrg * by flushing the range of the map entry containing addr. 497 1.6 mrg * This can be incorrect if the region splits or is coalesced 498 1.6 mrg * with a neighbor. 499 1.6 mrg */ 500 1.50 chs 501 1.6 mrg if (size == 0) { 502 1.53 chs struct vm_map_entry *entry; 503 1.51 chs 504 1.6 mrg vm_map_lock_read(map); 505 1.6 mrg rv = uvm_map_lookup_entry(map, addr, &entry); 506 1.107 thorpej if (rv == true) { 507 1.6 mrg addr = entry->start; 508 1.6 mrg size = entry->end - entry->start; 509 1.6 mrg } 510 1.6 mrg vm_map_unlock_read(map); 511 1.107 thorpej if (rv == false) 512 1.161 maxv return EINVAL; 513 1.6 mrg } 514 1.6 mrg 515 1.6 mrg /* 516 1.6 mrg * translate MS_ flags into PGO_ flags 517 1.6 mrg */ 518 1.50 chs 519 1.34 thorpej uvmflags = PGO_CLEANIT; 520 1.34 thorpej if (flags & MS_INVALIDATE) 521 1.34 thorpej uvmflags |= PGO_FREE; 522 1.6 mrg if (flags & MS_SYNC) 523 1.6 mrg uvmflags |= PGO_SYNCIO; 524 1.6 mrg 525 1.50 chs error = uvm_map_clean(map, addr, addr+size, uvmflags); 526 1.50 chs return error; 527 1.1 mrg } 528 1.1 mrg 529 1.1 mrg /* 530 1.1 mrg * sys_munmap: unmap a users memory 531 1.1 mrg */ 532 1.1 mrg 533 1.6 mrg int 534 1.119 dsl sys_munmap(struct lwp *l, const struct sys_munmap_args *uap, register_t *retval) 535 1.6 mrg { 536 1.119 dsl /* { 537 1.108 christos syscallarg(void *) addr; 538 1.6 mrg syscallarg(size_t) len; 539 1.119 dsl } */ 540 1.67 thorpej struct proc *p = l->l_proc; 541 1.12 eeh vaddr_t addr; 542 1.171 christos vsize_t size; 543 1.53 chs struct vm_map *map; 544 1.6 mrg struct vm_map_entry *dead_entries; 545 1.6 mrg 546 1.6 mrg /* 547 1.50 chs * get syscall args. 548 1.6 mrg */ 549 1.6 mrg 550 1.50 chs addr = (vaddr_t)SCARG(uap, addr); 551 1.50 chs size = (vsize_t)SCARG(uap, len); 552 1.51 chs 553 1.171 christos map = &p->p_vmspace->vm_map; 554 1.6 mrg 555 1.171 christos if (round_and_check(map, &addr, &size)) 556 1.171 christos return EINVAL; 557 1.6 mrg 558 1.6 mrg if (size == 0) 559 1.161 maxv return 0; 560 1.6 mrg 561 1.161 maxv vm_map_lock(map); 562 1.161 maxv #if 0 563 1.6 mrg /* 564 1.51 chs * interesting system call semantic: make sure entire range is 565 1.6 mrg * allocated before allowing an unmap. 566 1.6 mrg */ 567 1.6 mrg if (!uvm_map_checkprot(map, addr, addr + size, VM_PROT_NONE)) { 568 1.6 mrg vm_map_unlock(map); 569 1.161 maxv return EINVAL; 570 1.6 mrg } 571 1.66 mycroft #endif 572 1.144 para uvm_unmap_remove(map, addr, addr + size, &dead_entries, 0); 573 1.50 chs vm_map_unlock(map); 574 1.6 mrg if (dead_entries != NULL) 575 1.6 mrg uvm_unmap_detach(dead_entries, 0); 576 1.161 maxv return 0; 577 1.1 mrg } 578 1.1 mrg 579 1.1 mrg /* 580 1.1 mrg * sys_mprotect: the mprotect system call 581 1.1 mrg */ 582 1.1 mrg 583 1.6 mrg int 584 1.129 yamt sys_mprotect(struct lwp *l, const struct sys_mprotect_args *uap, 585 1.129 yamt register_t *retval) 586 1.6 mrg { 587 1.119 dsl /* { 588 1.108 christos syscallarg(void *) addr; 589 1.76 chs syscallarg(size_t) len; 590 1.6 mrg syscallarg(int) prot; 591 1.119 dsl } */ 592 1.67 thorpej struct proc *p = l->l_proc; 593 1.12 eeh vaddr_t addr; 594 1.171 christos vsize_t size; 595 1.6 mrg vm_prot_t prot; 596 1.50 chs int error; 597 1.6 mrg 598 1.6 mrg /* 599 1.6 mrg * extract syscall args from uap 600 1.6 mrg */ 601 1.6 mrg 602 1.12 eeh addr = (vaddr_t)SCARG(uap, addr); 603 1.12 eeh size = (vsize_t)SCARG(uap, len); 604 1.6 mrg prot = SCARG(uap, prot) & VM_PROT_ALL; 605 1.6 mrg 606 1.171 christos if (round_and_check(&p->p_vmspace->vm_map, &addr, &size)) 607 1.160 maxv return EINVAL; 608 1.110 christos 609 1.164 joerg error = uvm_map_protect_user(l, addr, addr + size, prot); 610 1.50 chs return error; 611 1.1 mrg } 612 1.1 mrg 613 1.1 mrg /* 614 1.1 mrg * sys_minherit: the minherit system call 615 1.1 mrg */ 616 1.1 mrg 617 1.6 mrg int 618 1.129 yamt sys_minherit(struct lwp *l, const struct sys_minherit_args *uap, 619 1.129 yamt register_t *retval) 620 1.6 mrg { 621 1.119 dsl /* { 622 1.108 christos syscallarg(void *) addr; 623 1.6 mrg syscallarg(int) len; 624 1.6 mrg syscallarg(int) inherit; 625 1.119 dsl } */ 626 1.67 thorpej struct proc *p = l->l_proc; 627 1.12 eeh vaddr_t addr; 628 1.171 christos vsize_t size; 629 1.40 augustss vm_inherit_t inherit; 630 1.50 chs int error; 631 1.51 chs 632 1.12 eeh addr = (vaddr_t)SCARG(uap, addr); 633 1.12 eeh size = (vsize_t)SCARG(uap, len); 634 1.6 mrg inherit = SCARG(uap, inherit); 635 1.50 chs 636 1.171 christos if (round_and_check(&p->p_vmspace->vm_map, &addr, &size)) 637 1.160 maxv return EINVAL; 638 1.110 christos 639 1.50 chs error = uvm_map_inherit(&p->p_vmspace->vm_map, addr, addr + size, 640 1.161 maxv inherit); 641 1.50 chs return error; 642 1.21 mrg } 643 1.21 mrg 644 1.21 mrg /* 645 1.21 mrg * sys_madvise: give advice about memory usage. 646 1.21 mrg */ 647 1.21 mrg 648 1.21 mrg /* ARGSUSED */ 649 1.21 mrg int 650 1.129 yamt sys_madvise(struct lwp *l, const struct sys_madvise_args *uap, 651 1.129 yamt register_t *retval) 652 1.21 mrg { 653 1.119 dsl /* { 654 1.108 christos syscallarg(void *) addr; 655 1.21 mrg syscallarg(size_t) len; 656 1.21 mrg syscallarg(int) behav; 657 1.119 dsl } */ 658 1.67 thorpej struct proc *p = l->l_proc; 659 1.21 mrg vaddr_t addr; 660 1.171 christos vsize_t size; 661 1.50 chs int advice, error; 662 1.51 chs 663 1.21 mrg addr = (vaddr_t)SCARG(uap, addr); 664 1.21 mrg size = (vsize_t)SCARG(uap, len); 665 1.21 mrg advice = SCARG(uap, behav); 666 1.21 mrg 667 1.171 christos if (round_and_check(&p->p_vmspace->vm_map, &addr, &size)) 668 1.160 maxv return EINVAL; 669 1.29 thorpej 670 1.29 thorpej switch (advice) { 671 1.29 thorpej case MADV_NORMAL: 672 1.29 thorpej case MADV_RANDOM: 673 1.29 thorpej case MADV_SEQUENTIAL: 674 1.50 chs error = uvm_map_advice(&p->p_vmspace->vm_map, addr, addr + size, 675 1.29 thorpej advice); 676 1.29 thorpej break; 677 1.29 thorpej 678 1.29 thorpej case MADV_WILLNEED: 679 1.50 chs 680 1.29 thorpej /* 681 1.29 thorpej * Activate all these pages, pre-faulting them in if 682 1.29 thorpej * necessary. 683 1.29 thorpej */ 684 1.130 yamt error = uvm_map_willneed(&p->p_vmspace->vm_map, 685 1.130 yamt addr, addr + size); 686 1.130 yamt break; 687 1.29 thorpej 688 1.29 thorpej case MADV_DONTNEED: 689 1.50 chs 690 1.29 thorpej /* 691 1.29 thorpej * Deactivate all these pages. We don't need them 692 1.29 thorpej * any more. We don't, however, toss the data in 693 1.29 thorpej * the pages. 694 1.29 thorpej */ 695 1.50 chs 696 1.50 chs error = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size, 697 1.29 thorpej PGO_DEACTIVATE); 698 1.29 thorpej break; 699 1.29 thorpej 700 1.29 thorpej case MADV_FREE: 701 1.50 chs 702 1.29 thorpej /* 703 1.29 thorpej * These pages contain no valid data, and may be 704 1.45 soren * garbage-collected. Toss all resources, including 705 1.30 thorpej * any swap space in use. 706 1.29 thorpej */ 707 1.50 chs 708 1.50 chs error = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size, 709 1.29 thorpej PGO_FREE); 710 1.29 thorpej break; 711 1.29 thorpej 712 1.29 thorpej case MADV_SPACEAVAIL: 713 1.50 chs 714 1.29 thorpej /* 715 1.29 thorpej * XXXMRG What is this? I think it's: 716 1.29 thorpej * 717 1.29 thorpej * Ensure that we have allocated backing-store 718 1.29 thorpej * for these pages. 719 1.29 thorpej * 720 1.29 thorpej * This is going to require changes to the page daemon, 721 1.29 thorpej * as it will free swap space allocated to pages in core. 722 1.29 thorpej * There's also what to do for device/file/anonymous memory. 723 1.29 thorpej */ 724 1.50 chs 725 1.161 maxv return EINVAL; 726 1.29 thorpej 727 1.29 thorpej default: 728 1.161 maxv return EINVAL; 729 1.29 thorpej } 730 1.29 thorpej 731 1.50 chs return error; 732 1.1 mrg } 733 1.1 mrg 734 1.1 mrg /* 735 1.1 mrg * sys_mlock: memory lock 736 1.1 mrg */ 737 1.1 mrg 738 1.6 mrg int 739 1.119 dsl sys_mlock(struct lwp *l, const struct sys_mlock_args *uap, register_t *retval) 740 1.6 mrg { 741 1.119 dsl /* { 742 1.10 kleink syscallarg(const void *) addr; 743 1.6 mrg syscallarg(size_t) len; 744 1.119 dsl } */ 745 1.67 thorpej struct proc *p = l->l_proc; 746 1.12 eeh vaddr_t addr; 747 1.171 christos vsize_t size; 748 1.6 mrg int error; 749 1.6 mrg 750 1.6 mrg /* 751 1.6 mrg * extract syscall args from uap 752 1.6 mrg */ 753 1.50 chs 754 1.12 eeh addr = (vaddr_t)SCARG(uap, addr); 755 1.12 eeh size = (vsize_t)SCARG(uap, len); 756 1.6 mrg 757 1.171 christos if (round_and_check(&p->p_vmspace->vm_map, &addr, &size)) 758 1.160 maxv return ENOMEM; 759 1.1 mrg 760 1.6 mrg if (atop(size) + uvmexp.wired > uvmexp.wiredmax) 761 1.161 maxv return EAGAIN; 762 1.1 mrg 763 1.6 mrg if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) > 764 1.161 maxv p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur) 765 1.161 maxv return EAGAIN; 766 1.1 mrg 767 1.107 thorpej error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, false, 768 1.35 thorpej 0); 769 1.85 briggs if (error == EFAULT) 770 1.85 briggs error = ENOMEM; 771 1.50 chs return error; 772 1.1 mrg } 773 1.1 mrg 774 1.1 mrg /* 775 1.1 mrg * sys_munlock: unlock wired pages 776 1.1 mrg */ 777 1.1 mrg 778 1.6 mrg int 779 1.129 yamt sys_munlock(struct lwp *l, const struct sys_munlock_args *uap, 780 1.129 yamt register_t *retval) 781 1.6 mrg { 782 1.119 dsl /* { 783 1.10 kleink syscallarg(const void *) addr; 784 1.6 mrg syscallarg(size_t) len; 785 1.119 dsl } */ 786 1.67 thorpej struct proc *p = l->l_proc; 787 1.12 eeh vaddr_t addr; 788 1.171 christos vsize_t size; 789 1.6 mrg 790 1.6 mrg /* 791 1.6 mrg * extract syscall args from uap 792 1.6 mrg */ 793 1.6 mrg 794 1.12 eeh addr = (vaddr_t)SCARG(uap, addr); 795 1.12 eeh size = (vsize_t)SCARG(uap, len); 796 1.6 mrg 797 1.171 christos if (round_and_check(&p->p_vmspace->vm_map, &addr, &size)) 798 1.160 maxv return ENOMEM; 799 1.1 mrg 800 1.171 christos if (uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, true, 0)) 801 1.162 kre return ENOMEM; 802 1.162 kre 803 1.162 kre return 0; 804 1.22 thorpej } 805 1.22 thorpej 806 1.22 thorpej /* 807 1.22 thorpej * sys_mlockall: lock all pages mapped into an address space. 808 1.22 thorpej */ 809 1.22 thorpej 810 1.22 thorpej int 811 1.129 yamt sys_mlockall(struct lwp *l, const struct sys_mlockall_args *uap, 812 1.129 yamt register_t *retval) 813 1.22 thorpej { 814 1.119 dsl /* { 815 1.22 thorpej syscallarg(int) flags; 816 1.119 dsl } */ 817 1.67 thorpej struct proc *p = l->l_proc; 818 1.22 thorpej int error, flags; 819 1.22 thorpej 820 1.22 thorpej flags = SCARG(uap, flags); 821 1.22 thorpej 822 1.161 maxv if (flags == 0 || (flags & ~(MCL_CURRENT|MCL_FUTURE)) != 0) 823 1.161 maxv return EINVAL; 824 1.22 thorpej 825 1.25 thorpej error = uvm_map_pageable_all(&p->p_vmspace->vm_map, flags, 826 1.25 thorpej p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur); 827 1.161 maxv return error; 828 1.22 thorpej } 829 1.22 thorpej 830 1.22 thorpej /* 831 1.22 thorpej * sys_munlockall: unlock all pages mapped into an address space. 832 1.22 thorpej */ 833 1.22 thorpej 834 1.22 thorpej int 835 1.119 dsl sys_munlockall(struct lwp *l, const void *v, register_t *retval) 836 1.22 thorpej { 837 1.67 thorpej struct proc *p = l->l_proc; 838 1.22 thorpej 839 1.22 thorpej (void) uvm_map_pageable_all(&p->p_vmspace->vm_map, 0, 0); 840 1.161 maxv return 0; 841 1.1 mrg } 842 1.1 mrg 843 1.1 mrg /* 844 1.1 mrg * uvm_mmap: internal version of mmap 845 1.1 mrg * 846 1.56 chs * - used by sys_mmap and various framebuffers 847 1.150 chs * - uobj is a struct uvm_object pointer or NULL for MAP_ANON 848 1.1 mrg * - caller must page-align the file offset 849 1.180 riastrad * 850 1.180 riastrad * XXX This appears to leak the uobj in various error branches? Need 851 1.180 riastrad * to clean up the contract around uobj reference. 852 1.1 mrg */ 853 1.1 mrg 854 1.180 riastrad static int 855 1.129 yamt uvm_mmap(struct vm_map *map, vaddr_t *addr, vsize_t size, vm_prot_t prot, 856 1.150 chs vm_prot_t maxprot, int flags, int advice, struct uvm_object *uobj, 857 1.150 chs voff_t foff, vsize_t locklimit) 858 1.6 mrg { 859 1.70 matt vaddr_t align = 0; 860 1.50 chs int error; 861 1.6 mrg uvm_flag_t uvmflag = 0; 862 1.6 mrg 863 1.6 mrg /* 864 1.6 mrg * check params 865 1.6 mrg */ 866 1.6 mrg 867 1.6 mrg if (size == 0) 868 1.161 maxv return 0; 869 1.6 mrg if (foff & PAGE_MASK) 870 1.161 maxv return EINVAL; 871 1.6 mrg if ((prot & maxprot) != prot) 872 1.161 maxv return EINVAL; 873 1.6 mrg 874 1.6 mrg /* 875 1.6 mrg * for non-fixed mappings, round off the suggested address. 876 1.165 chs * for fixed mappings, check alignment. 877 1.6 mrg */ 878 1.6 mrg 879 1.6 mrg if ((flags & MAP_FIXED) == 0) { 880 1.56 chs *addr = round_page(*addr); 881 1.6 mrg } else { 882 1.6 mrg if (*addr & PAGE_MASK) 883 1.161 maxv return EINVAL; 884 1.166 chs uvmflag |= UVM_FLAG_FIXED | UVM_FLAG_UNMAP; 885 1.6 mrg } 886 1.6 mrg 887 1.6 mrg /* 888 1.70 matt * Try to see if any requested alignment can even be attemped. 889 1.70 matt * Make sure we can express the alignment (asking for a >= 4GB 890 1.186 andvar * alignment on an ILP32 architecture make no sense) and the 891 1.70 matt * alignment is at least for a page sized quanitiy. If the 892 1.70 matt * request was for a fixed mapping, make sure supplied address 893 1.70 matt * adheres to the request alignment. 894 1.70 matt */ 895 1.70 matt align = (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT; 896 1.70 matt if (align) { 897 1.70 matt if (align >= sizeof(vaddr_t) * NBBY) 898 1.161 maxv return EINVAL; 899 1.174 kamil align = 1UL << align; 900 1.70 matt if (align < PAGE_SIZE) 901 1.161 maxv return EINVAL; 902 1.88 chs if (align >= vm_map_max(map)) 903 1.161 maxv return ENOMEM; 904 1.70 matt if (flags & MAP_FIXED) { 905 1.70 matt if ((*addr & (align-1)) != 0) 906 1.161 maxv return EINVAL; 907 1.70 matt align = 0; 908 1.70 matt } 909 1.70 matt } 910 1.70 matt 911 1.70 matt /* 912 1.128 mrg * check resource limits 913 1.128 mrg */ 914 1.128 mrg 915 1.128 mrg if (!VM_MAP_IS_KERNEL(map) && 916 1.128 mrg (((rlim_t)curproc->p_vmspace->vm_map.size + (rlim_t)size) > 917 1.128 mrg curproc->p_rlimit[RLIMIT_AS].rlim_cur)) 918 1.128 mrg return ENOMEM; 919 1.128 mrg 920 1.128 mrg /* 921 1.6 mrg * handle anon vs. non-anon mappings. for non-anon mappings attach 922 1.6 mrg * to underlying vm object. 923 1.6 mrg */ 924 1.6 mrg 925 1.6 mrg if (flags & MAP_ANON) { 926 1.150 chs KASSERT(uobj == NULL); 927 1.36 thorpej foff = UVM_UNKNOWN_OFFSET; 928 1.6 mrg if ((flags & MAP_SHARED) == 0) 929 1.6 mrg /* XXX: defer amap create */ 930 1.6 mrg uvmflag |= UVM_FLAG_COPYONW; 931 1.6 mrg else 932 1.6 mrg /* shared: create amap now */ 933 1.6 mrg uvmflag |= UVM_FLAG_OVERLAY; 934 1.6 mrg 935 1.6 mrg } else { 936 1.150 chs KASSERT(uobj != NULL); 937 1.92 yamt if ((flags & MAP_SHARED) == 0) { 938 1.6 mrg uvmflag |= UVM_FLAG_COPYONW; 939 1.100 chs } 940 1.6 mrg } 941 1.6 mrg 942 1.51 chs uvmflag = UVM_MAPFLAG(prot, maxprot, 943 1.161 maxv (flags & MAP_SHARED) ? UVM_INH_SHARE : UVM_INH_COPY, advice, 944 1.161 maxv uvmflag); 945 1.70 matt error = uvm_map(map, addr, size, uobj, foff, align, uvmflag); 946 1.50 chs if (error) { 947 1.50 chs if (uobj) 948 1.50 chs uobj->pgops->pgo_detach(uobj); 949 1.50 chs return error; 950 1.50 chs } 951 1.1 mrg 952 1.6 mrg /* 953 1.50 chs * POSIX 1003.1b -- if our address space was configured 954 1.50 chs * to lock all future mappings, wire the one we just made. 955 1.78 thorpej * 956 1.78 thorpej * Also handle the MAP_WIRED flag here. 957 1.6 mrg */ 958 1.6 mrg 959 1.50 chs if (prot == VM_PROT_NONE) { 960 1.6 mrg 961 1.25 thorpej /* 962 1.50 chs * No more work to do in this case. 963 1.25 thorpej */ 964 1.25 thorpej 965 1.161 maxv return 0; 966 1.50 chs } 967 1.78 thorpej if ((flags & MAP_WIRED) != 0 || (map->flags & VM_MAP_WIREFUTURE) != 0) { 968 1.126 ad vm_map_lock(map); 969 1.87 chs if (atop(size) + uvmexp.wired > uvmexp.wiredmax || 970 1.87 chs (locklimit != 0 && 971 1.87 chs size + ptoa(pmap_wired_count(vm_map_pmap(map))) > 972 1.87 chs locklimit)) { 973 1.50 chs vm_map_unlock(map); 974 1.50 chs uvm_unmap(map, *addr, *addr + size); 975 1.50 chs return ENOMEM; 976 1.25 thorpej } 977 1.25 thorpej 978 1.50 chs /* 979 1.50 chs * uvm_map_pageable() always returns the map unlocked. 980 1.50 chs */ 981 1.25 thorpej 982 1.50 chs error = uvm_map_pageable(map, *addr, *addr + size, 983 1.161 maxv false, UVM_LK_ENTER); 984 1.50 chs if (error) { 985 1.50 chs uvm_unmap(map, *addr, *addr + size); 986 1.50 chs return error; 987 1.50 chs } 988 1.161 maxv return 0; 989 1.25 thorpej } 990 1.50 chs return 0; 991 1.1 mrg } 992 1.89 fvdl 993 1.89 fvdl vaddr_t 994 1.154 martin uvm_default_mapaddr(struct proc *p, vaddr_t base, vsize_t sz, int topdown) 995 1.89 fvdl { 996 1.102 yamt 997 1.154 martin if (topdown) 998 1.146 christos return VM_DEFAULT_ADDRESS_TOPDOWN(base, sz); 999 1.146 christos else 1000 1.146 christos return VM_DEFAULT_ADDRESS_BOTTOMUP(base, sz); 1001 1.89 fvdl } 1002 1.150 chs 1003 1.150 chs int 1004 1.150 chs uvm_mmap_dev(struct proc *p, void **addrp, size_t len, dev_t dev, 1005 1.150 chs off_t off) 1006 1.150 chs { 1007 1.150 chs struct uvm_object *uobj; 1008 1.150 chs int error, flags, prot; 1009 1.150 chs 1010 1.183 riastrad KASSERT(len > 0); 1011 1.183 riastrad 1012 1.150 chs flags = MAP_SHARED; 1013 1.150 chs prot = VM_PROT_READ | VM_PROT_WRITE; 1014 1.150 chs if (*addrp) 1015 1.150 chs flags |= MAP_FIXED; 1016 1.150 chs else 1017 1.150 chs *addrp = (void *)p->p_emul->e_vm_default_addr(p, 1018 1.154 martin (vaddr_t)p->p_vmspace->vm_daddr, len, 1019 1.154 martin p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN); 1020 1.150 chs 1021 1.151 chs uobj = udv_attach(dev, prot, off, len); 1022 1.150 chs if (uobj == NULL) 1023 1.150 chs return EINVAL; 1024 1.150 chs 1025 1.150 chs error = uvm_mmap(&p->p_vmspace->vm_map, (vaddr_t *)addrp, 1026 1.161 maxv (vsize_t)len, prot, prot, flags, UVM_ADV_RANDOM, uobj, off, 1027 1.161 maxv p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur); 1028 1.150 chs return error; 1029 1.150 chs } 1030 1.150 chs 1031 1.150 chs int 1032 1.150 chs uvm_mmap_anon(struct proc *p, void **addrp, size_t len) 1033 1.150 chs { 1034 1.150 chs int error, flags, prot; 1035 1.150 chs 1036 1.150 chs flags = MAP_PRIVATE | MAP_ANON; 1037 1.150 chs prot = VM_PROT_READ | VM_PROT_WRITE; 1038 1.150 chs if (*addrp) 1039 1.150 chs flags |= MAP_FIXED; 1040 1.150 chs else 1041 1.150 chs *addrp = (void *)p->p_emul->e_vm_default_addr(p, 1042 1.154 martin (vaddr_t)p->p_vmspace->vm_daddr, len, 1043 1.154 martin p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN); 1044 1.150 chs 1045 1.150 chs error = uvm_mmap(&p->p_vmspace->vm_map, (vaddr_t *)addrp, 1046 1.161 maxv (vsize_t)len, prot, prot, flags, UVM_ADV_NORMAL, NULL, 0, 1047 1.161 maxv p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur); 1048 1.150 chs return error; 1049 1.150 chs } 1050