uvm_mremap.c revision 1.7 1 /* $NetBSD: uvm_mremap.c,v 1.7 2007/08/08 11:08:18 drochner Exp $ */
2
3 /*-
4 * Copyright (c)2006 YAMAMOTO Takashi,
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: uvm_mremap.c,v 1.7 2007/08/08 11:08:18 drochner Exp $");
31
32 #include <sys/param.h>
33 #include <sys/mman.h>
34 #include <sys/syscallargs.h>
35 #include <sys/proc.h>
36
37 #include <uvm/uvm.h>
38
39 static int
40 uvm_mapent_extend(struct vm_map *map, vaddr_t endva, vsize_t size)
41 {
42 struct vm_map_entry *entry;
43 struct vm_map_entry *reserved_entry;
44 struct uvm_object *uobj;
45 int error = 0;
46
47 vm_map_lock(map);
48 if (!uvm_map_lookup_entry(map, endva, &reserved_entry)) {
49 error = ENOENT;
50 goto done;
51 }
52 if (reserved_entry->start != endva ||
53 reserved_entry->end != endva + size ||
54 reserved_entry->object.uvm_obj != NULL ||
55 reserved_entry->aref.ar_amap != NULL) {
56 error = EINVAL;
57 goto done;
58 }
59 entry = reserved_entry->prev;
60 KASSERT(&map->header != entry);
61 if (entry->end != endva) {
62 error = EINVAL;
63 goto done;
64 }
65
66 /*
67 * now, make reserved_entry compatible with entry, and then
68 * try to merge.
69 */
70
71 uobj = entry->object.uvm_obj;
72 if (uobj) {
73 voff_t offset = entry->offset;
74 voff_t newoffset;
75
76 newoffset = offset + entry->end - entry->start;
77 if (newoffset <= offset) {
78 error = E2BIG; /* XXX */
79 goto done;
80 }
81 simple_lock(&uobj->vmobjlock);
82 KASSERT(uobj->uo_refs > 0);
83 uobj->uo_refs++;
84 simple_unlock(&uobj->vmobjlock);
85 reserved_entry->object.uvm_obj = uobj;
86 reserved_entry->offset = newoffset;
87 }
88 reserved_entry->etype = entry->etype;
89 if (UVM_ET_ISCOPYONWRITE(entry)) {
90 reserved_entry->etype |= UVM_ET_NEEDSCOPY;
91 }
92 reserved_entry->flags &= ~UVM_MAP_NOMERGE;
93 reserved_entry->protection = entry->protection;
94 reserved_entry->max_protection = entry->max_protection;
95 reserved_entry->inheritance = entry->inheritance;
96 reserved_entry->advice = entry->advice;
97 reserved_entry->wired_count = 0; /* XXX should inherit? */
98 uvm_mapent_trymerge(map, reserved_entry, 0);
99 done:
100 vm_map_unlock(map);
101
102 return error;
103 }
104
105 /*
106 * uvm_mremap: move and/or resize existing mappings.
107 */
108
109 int
110 uvm_mremap(struct vm_map *oldmap, vaddr_t oldva, vsize_t oldsize,
111 struct vm_map *newmap, vaddr_t *newvap, vsize_t newsize,
112 struct proc *newproc, int flags)
113 {
114 vaddr_t dstva;
115 vsize_t movesize;
116 vaddr_t newva;
117 vaddr_t align = 0;
118 int error = 0;
119 const bool fixed = (flags & MAP_FIXED) != 0;
120
121 if (fixed) {
122 newva = *newvap;
123 } else {
124 newva = 0;
125 }
126 if ((oldva & PAGE_MASK) != 0 ||
127 (newva & PAGE_MASK) != 0 ||
128 (oldsize & PAGE_MASK) != 0 ||
129 (newsize & PAGE_MASK) != 0) {
130 return EINVAL;
131 }
132 /* XXX zero-size should be allowed? */
133 if (oldva + oldsize <= oldva || newva + newsize <= newva) {
134 return EINVAL;
135 }
136
137 /*
138 * Try to see if any requested alignment can even be attemped.
139 * Make sure we can express the alignment (asking for a >= 4GB
140 * alignment on an ILP32 architecure make no sense) and the
141 * alignment is at least for a page sized quanitiy. If the
142 * request was for a fixed mapping, make sure supplied address
143 * adheres to the request alignment.
144 */
145 align = (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT;
146 if (align) {
147 if (align >= sizeof(vaddr_t) * NBBY)
148 return(EINVAL);
149 align = 1L << align;
150 if (align < PAGE_SIZE)
151 return(EINVAL);
152 if (align >= vm_map_max(oldmap))
153 return(ENOMEM);
154 if (flags & MAP_FIXED) {
155 if ((*newvap & (align-1)) != 0)
156 return(EINVAL);
157 align = 0;
158 }
159 }
160
161 /*
162 * check the easy cases first.
163 */
164
165 if ((!fixed || newva == oldva) && newmap == oldmap &&
166 (align == 0 || (oldva & ~(align - 1)) == 0)) {
167 vaddr_t va;
168
169 if (newsize == oldsize) {
170 newva = oldva;
171 goto done;
172 }
173 if (newsize < oldsize) {
174 uvm_unmap(oldmap, oldva + newsize, oldva + oldsize);
175 newva = oldva;
176 goto done;
177 }
178 va = oldva + oldsize;
179 if (uvm_map_reserve(oldmap, newsize - oldsize, 0, 0, &va,
180 UVM_FLAG_FIXED)) {
181 newva = oldva;
182 goto extend;
183 }
184 if (fixed) {
185 return ENOMEM;
186 }
187 }
188
189 /*
190 * we need to move mappings.
191 */
192
193 if (!fixed) {
194 KASSERT(&newproc->p_vmspace->vm_map == newmap);
195 newva = newproc->p_emul->e_vm_default_addr(newproc,
196 (vaddr_t)newproc->p_vmspace->vm_daddr, newsize);
197 }
198 dstva = newva;
199 if (!uvm_map_reserve(newmap, newsize, oldva, align, &dstva,
200 fixed ? UVM_FLAG_FIXED : 0)) {
201 return ENOMEM;
202 }
203 KASSERT(!fixed || dstva == newva);
204 newva = dstva;
205 movesize = MIN(oldsize, newsize);
206 error = uvm_map_extract(oldmap, oldva, movesize, newmap, &dstva,
207 UVM_EXTRACT_RESERVED);
208 KASSERT(dstva == newva);
209 if (error != 0) {
210 /* undo uvm_map_reserve */
211 uvm_unmap(newmap, newva, newva + newsize);
212 return error;
213 }
214 if (newsize > oldsize) {
215 extend:
216 error = uvm_mapent_extend(newmap, newva + oldsize,
217 newsize - oldsize);
218 if (error != 0) {
219 /* undo uvm_map_reserve and uvm_map_extract */
220 uvm_unmap(newmap, newva, newva + newsize);
221 return error;
222 }
223 }
224
225 /*
226 * now we won't fail. remove original entries.
227 */
228
229 if (oldva != newva || oldmap != newmap) {
230 uvm_unmap(oldmap, oldva, oldva + oldsize);
231 }
232 done:
233 *newvap = newva;
234 return 0;
235 }
236
237 /*
238 * sys_mremap: mremap system call.
239 */
240
241 int
242 sys_mremap(struct lwp *l, void *v, register_t *retval)
243 {
244 struct sys_mremap_args /* {
245 syscallarg(void *) old_address;
246 syscallarg(size_t) old_size;
247 syscallarg(void *) new_address;
248 syscallarg(size_t) new_size;
249 syscallarg(int) flags;
250 } */ *uap = v;
251
252 struct proc *p;
253 struct vm_map *map;
254 vaddr_t oldva;
255 vaddr_t newva;
256 size_t oldsize;
257 size_t newsize;
258 int flags;
259 int error;
260
261 flags = SCARG(uap, flags);
262 oldva = (vaddr_t)SCARG(uap, old_address);
263 oldsize = (vsize_t)(SCARG(uap, old_size));
264 newva = (vaddr_t)SCARG(uap, new_address);
265 newsize = (vsize_t)(SCARG(uap, new_size));
266
267 if ((flags & ~(MAP_FIXED | MAP_ALIGNMENT_MASK)) != 0) {
268 error = EINVAL;
269 goto done;
270 }
271
272 oldsize = round_page(oldsize);
273 newsize = round_page(newsize);
274
275 p = l->l_proc;
276 map = &p->p_vmspace->vm_map;
277 error = uvm_mremap(map, oldva, oldsize, map, &newva, newsize, p,
278 flags);
279
280 done:
281 *retval = (error != 0) ? 0 : (register_t)newva;
282 return error;
283
284 }
285