sysv_shm.c revision 1.125.2.2 1 1.125.2.2 martin /* $NetBSD: sysv_shm.c,v 1.125.2.2 2020/01/21 19:19:17 martin Exp $ */
2 1.52 thorpej
3 1.52 thorpej /*-
4 1.96 ad * Copyright (c) 1999, 2007 The NetBSD Foundation, Inc.
5 1.52 thorpej * All rights reserved.
6 1.52 thorpej *
7 1.52 thorpej * This code is derived from software contributed to The NetBSD Foundation
8 1.52 thorpej * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 1.102 ad * NASA Ames Research Center, and by Mindaugas Rasiukevicius.
10 1.52 thorpej *
11 1.52 thorpej * Redistribution and use in source and binary forms, with or without
12 1.52 thorpej * modification, are permitted provided that the following conditions
13 1.52 thorpej * are met:
14 1.52 thorpej * 1. Redistributions of source code must retain the above copyright
15 1.52 thorpej * notice, this list of conditions and the following disclaimer.
16 1.52 thorpej * 2. Redistributions in binary form must reproduce the above copyright
17 1.52 thorpej * notice, this list of conditions and the following disclaimer in the
18 1.52 thorpej * documentation and/or other materials provided with the distribution.
19 1.52 thorpej *
20 1.52 thorpej * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 1.52 thorpej * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 1.52 thorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 1.52 thorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 1.52 thorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 1.52 thorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 1.52 thorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 1.52 thorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 1.52 thorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 1.52 thorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 1.52 thorpej * POSSIBILITY OF SUCH DAMAGE.
31 1.52 thorpej */
32 1.22 cgd
33 1.11 hpeyerl /*
34 1.48 mycroft * Copyright (c) 1994 Adam Glass and Charles M. Hannum. All rights reserved.
35 1.11 hpeyerl *
36 1.11 hpeyerl * Redistribution and use in source and binary forms, with or without
37 1.11 hpeyerl * modification, are permitted provided that the following conditions
38 1.11 hpeyerl * are met:
39 1.11 hpeyerl * 1. Redistributions of source code must retain the above copyright
40 1.11 hpeyerl * notice, this list of conditions and the following disclaimer.
41 1.17 mycroft * 2. Redistributions in binary form must reproduce the above copyright
42 1.17 mycroft * notice, this list of conditions and the following disclaimer in the
43 1.17 mycroft * documentation and/or other materials provided with the distribution.
44 1.17 mycroft * 3. All advertising materials mentioning features or use of this software
45 1.17 mycroft * must display the following acknowledgement:
46 1.48 mycroft * This product includes software developed by Adam Glass and Charles M.
47 1.17 mycroft * Hannum.
48 1.17 mycroft * 4. The names of the authors may not be used to endorse or promote products
49 1.11 hpeyerl * derived from this software without specific prior written permission.
50 1.11 hpeyerl *
51 1.17 mycroft * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
52 1.17 mycroft * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
53 1.17 mycroft * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
54 1.17 mycroft * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
55 1.17 mycroft * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
56 1.17 mycroft * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 1.17 mycroft * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 1.17 mycroft * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 1.17 mycroft * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
60 1.17 mycroft * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 1.11 hpeyerl */
62 1.62 lukem
63 1.62 lukem #include <sys/cdefs.h>
64 1.125.2.2 martin __KERNEL_RCSID(0, "$NetBSD: sysv_shm.c,v 1.125.2.2 2020/01/21 19:19:17 martin Exp $");
65 1.43 mrg
66 1.50 tron #define SYSVSHM
67 1.11 hpeyerl
68 1.11 hpeyerl #include <sys/param.h>
69 1.11 hpeyerl #include <sys/kernel.h>
70 1.102 ad #include <sys/kmem.h>
71 1.11 hpeyerl #include <sys/shm.h>
72 1.96 ad #include <sys/mutex.h>
73 1.11 hpeyerl #include <sys/mman.h>
74 1.12 mycroft #include <sys/stat.h>
75 1.56 simonb #include <sys/sysctl.h>
76 1.56 simonb #include <sys/mount.h> /* XXX for <sys/syscallargs.h> */
77 1.56 simonb #include <sys/syscallargs.h>
78 1.69 drochner #include <sys/queue.h>
79 1.87 elad #include <sys/kauth.h>
80 1.35 christos
81 1.60 thorpej #include <uvm/uvm_extern.h>
82 1.75 christos #include <uvm/uvm_object.h>
83 1.60 thorpej
84 1.69 drochner struct shmmap_entry {
85 1.69 drochner SLIST_ENTRY(shmmap_entry) next;
86 1.47 eeh vaddr_t va;
87 1.11 hpeyerl int shmid;
88 1.11 hpeyerl };
89 1.11 hpeyerl
90 1.119 rmind int shm_nused __cacheline_aligned;
91 1.119 rmind struct shmid_ds * shmsegs __read_mostly;
92 1.119 rmind
93 1.119 rmind static kmutex_t shm_lock __cacheline_aligned;
94 1.119 rmind static kcondvar_t * shm_cv __cacheline_aligned;
95 1.119 rmind static int shm_last_free __cacheline_aligned;
96 1.119 rmind static size_t shm_committed __cacheline_aligned;
97 1.119 rmind static int shm_use_phys __read_mostly;
98 1.102 ad
99 1.102 ad static kcondvar_t shm_realloc_cv;
100 1.102 ad static bool shm_realloc_state;
101 1.102 ad static u_int shm_realloc_disable;
102 1.69 drochner
103 1.69 drochner struct shmmap_state {
104 1.69 drochner unsigned int nitems;
105 1.69 drochner unsigned int nrefs;
106 1.69 drochner SLIST_HEAD(, shmmap_entry) entries;
107 1.69 drochner };
108 1.69 drochner
109 1.102 ad #ifdef SHMDEBUG
110 1.102 ad #define SHMPRINTF(a) printf a
111 1.102 ad #else
112 1.102 ad #define SHMPRINTF(a)
113 1.102 ad #endif
114 1.102 ad
115 1.92 christos static int shmrealloc(int);
116 1.11 hpeyerl
117 1.102 ad /*
118 1.125.2.2 martin * Find the shared memory segment permission by the index. Only used by
119 1.125.2.2 martin * compat_linux to implement SHM_STAT.
120 1.125.2.2 martin */
121 1.125.2.2 martin int
122 1.125.2.2 martin shm_find_segment_perm_by_index(int index, struct ipc_perm *perm)
123 1.125.2.2 martin {
124 1.125.2.2 martin struct shmid_ds *shmseg;
125 1.125.2.2 martin
126 1.125.2.2 martin mutex_enter(&shm_lock);
127 1.125.2.2 martin if (index < 0 || index >= shminfo.shmmni) {
128 1.125.2.2 martin mutex_exit(&shm_lock);
129 1.125.2.2 martin return EINVAL;
130 1.125.2.2 martin }
131 1.125.2.2 martin shmseg = &shmsegs[index];
132 1.125.2.2 martin memcpy(perm, &shmseg->shm_perm, sizeof(*perm));
133 1.125.2.2 martin mutex_exit(&shm_lock);
134 1.125.2.2 martin return 0;
135 1.125.2.2 martin }
136 1.125.2.2 martin
137 1.125.2.2 martin /*
138 1.102 ad * Find the shared memory segment by the identifier.
139 1.102 ad * => must be called with shm_lock held;
140 1.102 ad */
141 1.86 thorpej static struct shmid_ds *
142 1.86 thorpej shm_find_segment_by_shmid(int shmid)
143 1.11 hpeyerl {
144 1.11 hpeyerl int segnum;
145 1.11 hpeyerl struct shmid_ds *shmseg;
146 1.11 hpeyerl
147 1.102 ad KASSERT(mutex_owned(&shm_lock));
148 1.102 ad
149 1.11 hpeyerl segnum = IPCID_TO_IX(shmid);
150 1.12 mycroft if (segnum < 0 || segnum >= shminfo.shmmni)
151 1.11 hpeyerl return NULL;
152 1.11 hpeyerl shmseg = &shmsegs[segnum];
153 1.64 fvdl if ((shmseg->shm_perm.mode & SHMSEG_ALLOCATED) == 0)
154 1.64 fvdl return NULL;
155 1.102 ad if ((shmseg->shm_perm.mode &
156 1.102 ad (SHMSEG_REMOVED|SHMSEG_RMLINGER)) == SHMSEG_REMOVED)
157 1.64 fvdl return NULL;
158 1.64 fvdl if (shmseg->shm_perm._seq != IPCID_TO_SEQ(shmid))
159 1.11 hpeyerl return NULL;
160 1.102 ad
161 1.11 hpeyerl return shmseg;
162 1.11 hpeyerl }
163 1.11 hpeyerl
164 1.102 ad /*
165 1.102 ad * Free memory segment.
166 1.102 ad * => must be called with shm_lock held;
167 1.102 ad */
168 1.12 mycroft static void
169 1.102 ad shm_free_segment(int segnum)
170 1.12 mycroft {
171 1.102 ad struct shmid_ds *shmseg;
172 1.102 ad size_t size;
173 1.102 ad bool wanted;
174 1.102 ad
175 1.102 ad KASSERT(mutex_owned(&shm_lock));
176 1.12 mycroft
177 1.102 ad shmseg = &shmsegs[segnum];
178 1.102 ad SHMPRINTF(("shm freeing key 0x%lx seq 0x%x\n",
179 1.102 ad shmseg->shm_perm._key, shmseg->shm_perm._seq));
180 1.102 ad
181 1.102 ad size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
182 1.102 ad wanted = (shmseg->shm_perm.mode & SHMSEG_WANTED);
183 1.85 christos
184 1.52 thorpej shmseg->_shm_internal = NULL;
185 1.14 mycroft shm_committed -= btoc(size);
186 1.102 ad shm_nused--;
187 1.12 mycroft shmseg->shm_perm.mode = SHMSEG_FREE;
188 1.102 ad shm_last_free = segnum;
189 1.102 ad if (wanted == true)
190 1.102 ad cv_broadcast(&shm_cv[segnum]);
191 1.12 mycroft }
192 1.12 mycroft
193 1.102 ad /*
194 1.102 ad * Delete entry from the shm map.
195 1.102 ad * => must be called with shm_lock held;
196 1.102 ad */
197 1.102 ad static struct uvm_object *
198 1.102 ad shm_delete_mapping(struct shmmap_state *shmmap_s,
199 1.86 thorpej struct shmmap_entry *shmmap_se)
200 1.11 hpeyerl {
201 1.102 ad struct uvm_object *uobj = NULL;
202 1.12 mycroft struct shmid_ds *shmseg;
203 1.61 chs int segnum;
204 1.102 ad
205 1.102 ad KASSERT(mutex_owned(&shm_lock));
206 1.76 junyoung
207 1.69 drochner segnum = IPCID_TO_IX(shmmap_se->shmid);
208 1.12 mycroft shmseg = &shmsegs[segnum];
209 1.69 drochner SLIST_REMOVE(&shmmap_s->entries, shmmap_se, shmmap_entry, next);
210 1.69 drochner shmmap_s->nitems--;
211 1.88 kardel shmseg->shm_dtime = time_second;
212 1.12 mycroft if ((--shmseg->shm_nattch <= 0) &&
213 1.11 hpeyerl (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
214 1.102 ad uobj = shmseg->_shm_internal;
215 1.102 ad shm_free_segment(segnum);
216 1.11 hpeyerl }
217 1.102 ad
218 1.102 ad return uobj;
219 1.11 hpeyerl }
220 1.11 hpeyerl
221 1.69 drochner /*
222 1.102 ad * Get a non-shared shm map for that vmspace. Note, that memory
223 1.102 ad * allocation might be performed with lock held.
224 1.69 drochner */
225 1.69 drochner static struct shmmap_state *
226 1.69 drochner shmmap_getprivate(struct proc *p)
227 1.69 drochner {
228 1.69 drochner struct shmmap_state *oshmmap_s, *shmmap_s;
229 1.69 drochner struct shmmap_entry *oshmmap_se, *shmmap_se;
230 1.69 drochner
231 1.102 ad KASSERT(mutex_owned(&shm_lock));
232 1.102 ad
233 1.102 ad /* 1. A shm map with refcnt = 1, used by ourselves, thus return */
234 1.69 drochner oshmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
235 1.69 drochner if (oshmmap_s && oshmmap_s->nrefs == 1)
236 1.102 ad return oshmmap_s;
237 1.69 drochner
238 1.102 ad /* 2. No shm map preset - create a fresh one */
239 1.102 ad shmmap_s = kmem_zalloc(sizeof(struct shmmap_state), KM_SLEEP);
240 1.69 drochner shmmap_s->nrefs = 1;
241 1.69 drochner SLIST_INIT(&shmmap_s->entries);
242 1.98 christos p->p_vmspace->vm_shm = (void *)shmmap_s;
243 1.69 drochner
244 1.102 ad if (oshmmap_s == NULL)
245 1.102 ad return shmmap_s;
246 1.102 ad
247 1.102 ad SHMPRINTF(("shmmap_getprivate: vm %p split (%d entries), was used by %d\n",
248 1.102 ad p->p_vmspace, oshmmap_s->nitems, oshmmap_s->nrefs));
249 1.69 drochner
250 1.102 ad /* 3. A shared shm map, copy to a fresh one and adjust refcounts */
251 1.69 drochner SLIST_FOREACH(oshmmap_se, &oshmmap_s->entries, next) {
252 1.119 rmind shmmap_se = kmem_alloc(sizeof(struct shmmap_entry), KM_SLEEP);
253 1.69 drochner shmmap_se->va = oshmmap_se->va;
254 1.69 drochner shmmap_se->shmid = oshmmap_se->shmid;
255 1.69 drochner SLIST_INSERT_HEAD(&shmmap_s->entries, shmmap_se, next);
256 1.69 drochner }
257 1.69 drochner shmmap_s->nitems = oshmmap_s->nitems;
258 1.69 drochner oshmmap_s->nrefs--;
259 1.102 ad
260 1.102 ad return shmmap_s;
261 1.69 drochner }
262 1.69 drochner
263 1.102 ad /*
264 1.102 ad * Lock/unlock the memory.
265 1.102 ad * => must be called with shm_lock held;
266 1.102 ad * => called from one place, thus, inline;
267 1.102 ad */
268 1.102 ad static inline int
269 1.102 ad shm_memlock(struct lwp *l, struct shmid_ds *shmseg, int shmid, int cmd)
270 1.70 drochner {
271 1.102 ad struct proc *p = l->l_proc;
272 1.70 drochner struct shmmap_entry *shmmap_se;
273 1.102 ad struct shmmap_state *shmmap_s;
274 1.102 ad size_t size;
275 1.102 ad int error;
276 1.102 ad
277 1.102 ad KASSERT(mutex_owned(&shm_lock));
278 1.102 ad shmmap_s = shmmap_getprivate(p);
279 1.102 ad
280 1.102 ad /* Find our shared memory address by shmid */
281 1.102 ad SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next) {
282 1.102 ad if (shmmap_se->shmid != shmid)
283 1.102 ad continue;
284 1.102 ad
285 1.102 ad size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
286 1.102 ad
287 1.102 ad if (cmd == SHM_LOCK &&
288 1.102 ad (shmseg->shm_perm.mode & SHMSEG_WIRED) == 0) {
289 1.102 ad /* Wire the object and map, then tag it */
290 1.120 rmind error = uvm_obj_wirepages(shmseg->_shm_internal,
291 1.122 christos 0, size, NULL);
292 1.102 ad if (error)
293 1.102 ad return EIO;
294 1.102 ad error = uvm_map_pageable(&p->p_vmspace->vm_map,
295 1.102 ad shmmap_se->va, shmmap_se->va + size, false, 0);
296 1.102 ad if (error) {
297 1.120 rmind uvm_obj_unwirepages(shmseg->_shm_internal,
298 1.120 rmind 0, size);
299 1.102 ad if (error == EFAULT)
300 1.102 ad error = ENOMEM;
301 1.102 ad return error;
302 1.102 ad }
303 1.102 ad shmseg->shm_perm.mode |= SHMSEG_WIRED;
304 1.70 drochner
305 1.102 ad } else if (cmd == SHM_UNLOCK &&
306 1.102 ad (shmseg->shm_perm.mode & SHMSEG_WIRED) != 0) {
307 1.102 ad /* Unwire the object and map, then untag it */
308 1.120 rmind uvm_obj_unwirepages(shmseg->_shm_internal, 0, size);
309 1.102 ad error = uvm_map_pageable(&p->p_vmspace->vm_map,
310 1.102 ad shmmap_se->va, shmmap_se->va + size, true, 0);
311 1.102 ad if (error)
312 1.102 ad return EIO;
313 1.102 ad shmseg->shm_perm.mode &= ~SHMSEG_WIRED;
314 1.102 ad }
315 1.70 drochner }
316 1.102 ad
317 1.70 drochner return 0;
318 1.70 drochner }
319 1.70 drochner
320 1.102 ad /*
321 1.102 ad * Unmap shared memory.
322 1.102 ad */
323 1.12 mycroft int
324 1.101 dsl sys_shmdt(struct lwp *l, const struct sys_shmdt_args *uap, register_t *retval)
325 1.32 thorpej {
326 1.101 dsl /* {
327 1.44 kleink syscallarg(const void *) shmaddr;
328 1.101 dsl } */
329 1.65 thorpej struct proc *p = l->l_proc;
330 1.102 ad struct shmmap_state *shmmap_s1, *shmmap_s;
331 1.69 drochner struct shmmap_entry *shmmap_se;
332 1.102 ad struct uvm_object *uobj;
333 1.102 ad struct shmid_ds *shmseg;
334 1.102 ad size_t size;
335 1.11 hpeyerl
336 1.102 ad mutex_enter(&shm_lock);
337 1.102 ad /* In case of reallocation, we will wait for completion */
338 1.102 ad while (__predict_false(shm_realloc_state))
339 1.102 ad cv_wait(&shm_realloc_cv, &shm_lock);
340 1.102 ad
341 1.102 ad shmmap_s1 = (struct shmmap_state *)p->p_vmspace->vm_shm;
342 1.102 ad if (shmmap_s1 == NULL) {
343 1.102 ad mutex_exit(&shm_lock);
344 1.38 christos return EINVAL;
345 1.102 ad }
346 1.38 christos
347 1.102 ad /* Find the map entry */
348 1.102 ad SLIST_FOREACH(shmmap_se, &shmmap_s1->entries, next)
349 1.102 ad if (shmmap_se->va == (vaddr_t)SCARG(uap, shmaddr))
350 1.102 ad break;
351 1.102 ad if (shmmap_se == NULL) {
352 1.102 ad mutex_exit(&shm_lock);
353 1.70 drochner return EINVAL;
354 1.102 ad }
355 1.70 drochner
356 1.102 ad shmmap_s = shmmap_getprivate(p);
357 1.102 ad if (shmmap_s != shmmap_s1) {
358 1.102 ad /* Map has been copied, lookup entry in new map */
359 1.102 ad SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next)
360 1.102 ad if (shmmap_se->va == (vaddr_t)SCARG(uap, shmaddr))
361 1.102 ad break;
362 1.102 ad if (shmmap_se == NULL) {
363 1.102 ad mutex_exit(&shm_lock);
364 1.102 ad return EINVAL;
365 1.102 ad }
366 1.70 drochner }
367 1.102 ad
368 1.102 ad SHMPRINTF(("shmdt: vm %p: remove %d @%lx\n",
369 1.102 ad p->p_vmspace, shmmap_se->shmid, shmmap_se->va));
370 1.102 ad
371 1.102 ad /* Delete the entry from shm map */
372 1.102 ad uobj = shm_delete_mapping(shmmap_s, shmmap_se);
373 1.102 ad shmseg = &shmsegs[IPCID_TO_IX(shmmap_se->shmid)];
374 1.102 ad size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
375 1.102 ad mutex_exit(&shm_lock);
376 1.102 ad
377 1.102 ad uvm_deallocate(&p->p_vmspace->vm_map, shmmap_se->va, size);
378 1.119 rmind if (uobj != NULL) {
379 1.102 ad uao_detach(uobj);
380 1.119 rmind }
381 1.119 rmind kmem_free(shmmap_se, sizeof(struct shmmap_entry));
382 1.102 ad
383 1.70 drochner return 0;
384 1.11 hpeyerl }
385 1.11 hpeyerl
386 1.102 ad /*
387 1.102 ad * Map shared memory.
388 1.102 ad */
389 1.12 mycroft int
390 1.101 dsl sys_shmat(struct lwp *l, const struct sys_shmat_args *uap, register_t *retval)
391 1.32 thorpej {
392 1.101 dsl /* {
393 1.26 cgd syscallarg(int) shmid;
394 1.44 kleink syscallarg(const void *) shmaddr;
395 1.35 christos syscallarg(int) shmflg;
396 1.101 dsl } */
397 1.94 rmind int error, flags = 0;
398 1.65 thorpej struct proc *p = l->l_proc;
399 1.89 ad kauth_cred_t cred = l->l_cred;
400 1.11 hpeyerl struct shmid_ds *shmseg;
401 1.69 drochner struct shmmap_state *shmmap_s;
402 1.102 ad struct shmmap_entry *shmmap_se;
403 1.74 christos struct uvm_object *uobj;
404 1.102 ad struct vmspace *vm;
405 1.47 eeh vaddr_t attach_va;
406 1.11 hpeyerl vm_prot_t prot;
407 1.47 eeh vsize_t size;
408 1.102 ad
409 1.102 ad /* Allocate a new map entry and set it */
410 1.119 rmind shmmap_se = kmem_alloc(sizeof(struct shmmap_entry), KM_SLEEP);
411 1.114 rmind shmmap_se->shmid = SCARG(uap, shmid);
412 1.102 ad
413 1.102 ad mutex_enter(&shm_lock);
414 1.102 ad /* In case of reallocation, we will wait for completion */
415 1.102 ad while (__predict_false(shm_realloc_state))
416 1.102 ad cv_wait(&shm_realloc_cv, &shm_lock);
417 1.11 hpeyerl
418 1.78 jdolecek shmseg = shm_find_segment_by_shmid(SCARG(uap, shmid));
419 1.102 ad if (shmseg == NULL) {
420 1.102 ad error = EINVAL;
421 1.102 ad goto err;
422 1.102 ad }
423 1.35 christos error = ipcperm(cred, &shmseg->shm_perm,
424 1.102 ad (SCARG(uap, shmflg) & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
425 1.35 christos if (error)
426 1.102 ad goto err;
427 1.69 drochner
428 1.102 ad vm = p->p_vmspace;
429 1.102 ad shmmap_s = (struct shmmap_state *)vm->vm_shm;
430 1.102 ad if (shmmap_s && shmmap_s->nitems >= shminfo.shmseg) {
431 1.102 ad error = EMFILE;
432 1.102 ad goto err;
433 1.102 ad }
434 1.69 drochner
435 1.53 ragge size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
436 1.12 mycroft prot = VM_PROT_READ;
437 1.78 jdolecek if ((SCARG(uap, shmflg) & SHM_RDONLY) == 0)
438 1.12 mycroft prot |= VM_PROT_WRITE;
439 1.78 jdolecek if (SCARG(uap, shmaddr)) {
440 1.94 rmind flags |= UVM_FLAG_FIXED;
441 1.78 jdolecek if (SCARG(uap, shmflg) & SHM_RND)
442 1.26 cgd attach_va =
443 1.78 jdolecek (vaddr_t)SCARG(uap, shmaddr) & ~(SHMLBA-1);
444 1.78 jdolecek else if (((vaddr_t)SCARG(uap, shmaddr) & (SHMLBA-1)) == 0)
445 1.78 jdolecek attach_va = (vaddr_t)SCARG(uap, shmaddr);
446 1.102 ad else {
447 1.102 ad error = EINVAL;
448 1.102 ad goto err;
449 1.102 ad }
450 1.12 mycroft } else {
451 1.108 rmind /* This is just a hint to uvm_map() about where to put it. */
452 1.83 fvdl attach_va = p->p_emul->e_vm_default_addr(p,
453 1.102 ad (vaddr_t)vm->vm_daddr, size);
454 1.11 hpeyerl }
455 1.102 ad
456 1.102 ad /*
457 1.102 ad * Create a map entry, add it to the list and increase the counters.
458 1.102 ad * The lock will be dropped before the mapping, disable reallocation.
459 1.102 ad */
460 1.102 ad shmmap_s = shmmap_getprivate(p);
461 1.102 ad SLIST_INSERT_HEAD(&shmmap_s->entries, shmmap_se, next);
462 1.102 ad shmmap_s->nitems++;
463 1.102 ad shmseg->shm_lpid = p->p_pid;
464 1.102 ad shmseg->shm_nattch++;
465 1.102 ad shm_realloc_disable++;
466 1.102 ad mutex_exit(&shm_lock);
467 1.102 ad
468 1.102 ad /*
469 1.102 ad * Add a reference to the memory object, map it to the
470 1.102 ad * address space, and lock the memory, if needed.
471 1.102 ad */
472 1.80 jdolecek uobj = shmseg->_shm_internal;
473 1.102 ad uao_reference(uobj);
474 1.102 ad error = uvm_map(&vm->vm_map, &attach_va, size, uobj, 0, 0,
475 1.94 rmind UVM_MAPFLAG(prot, prot, UVM_INH_SHARE, UVM_ADV_RANDOM, flags));
476 1.92 christos if (error)
477 1.102 ad goto err_detach;
478 1.92 christos if (shm_use_phys || (shmseg->shm_perm.mode & SHMSEG_WIRED)) {
479 1.102 ad error = uvm_map_pageable(&vm->vm_map, attach_va,
480 1.97 thorpej attach_va + size, false, 0);
481 1.92 christos if (error) {
482 1.92 christos if (error == EFAULT)
483 1.92 christos error = ENOMEM;
484 1.102 ad uvm_deallocate(&vm->vm_map, attach_va, size);
485 1.102 ad goto err_detach;
486 1.92 christos }
487 1.42 mrg }
488 1.92 christos
489 1.102 ad /* Set the new address, and update the time */
490 1.102 ad mutex_enter(&shm_lock);
491 1.69 drochner shmmap_se->va = attach_va;
492 1.88 kardel shmseg->shm_atime = time_second;
493 1.102 ad shm_realloc_disable--;
494 1.102 ad retval[0] = attach_va;
495 1.102 ad SHMPRINTF(("shmat: vm %p: add %d @%lx\n",
496 1.102 ad p->p_vmspace, shmmap_se->shmid, attach_va));
497 1.102 ad err:
498 1.102 ad cv_broadcast(&shm_realloc_cv);
499 1.102 ad mutex_exit(&shm_lock);
500 1.119 rmind if (error && shmmap_se) {
501 1.119 rmind kmem_free(shmmap_se, sizeof(struct shmmap_entry));
502 1.119 rmind }
503 1.102 ad return error;
504 1.78 jdolecek
505 1.102 ad err_detach:
506 1.102 ad uao_detach(uobj);
507 1.102 ad mutex_enter(&shm_lock);
508 1.102 ad uobj = shm_delete_mapping(shmmap_s, shmmap_se);
509 1.102 ad shm_realloc_disable--;
510 1.102 ad cv_broadcast(&shm_realloc_cv);
511 1.102 ad mutex_exit(&shm_lock);
512 1.119 rmind if (uobj != NULL) {
513 1.102 ad uao_detach(uobj);
514 1.119 rmind }
515 1.119 rmind kmem_free(shmmap_se, sizeof(struct shmmap_entry));
516 1.92 christos return error;
517 1.11 hpeyerl }
518 1.11 hpeyerl
519 1.102 ad /*
520 1.102 ad * Shared memory control operations.
521 1.102 ad */
522 1.12 mycroft int
523 1.115 christos sys___shmctl50(struct lwp *l, const struct sys___shmctl50_args *uap,
524 1.115 christos register_t *retval)
525 1.32 thorpej {
526 1.101 dsl /* {
527 1.26 cgd syscallarg(int) shmid;
528 1.26 cgd syscallarg(int) cmd;
529 1.26 cgd syscallarg(struct shmid_ds *) buf;
530 1.101 dsl } */
531 1.52 thorpej struct shmid_ds shmbuf;
532 1.52 thorpej int cmd, error;
533 1.52 thorpej
534 1.52 thorpej cmd = SCARG(uap, cmd);
535 1.52 thorpej if (cmd == IPC_SET) {
536 1.52 thorpej error = copyin(SCARG(uap, buf), &shmbuf, sizeof(shmbuf));
537 1.52 thorpej if (error)
538 1.102 ad return error;
539 1.52 thorpej }
540 1.52 thorpej
541 1.89 ad error = shmctl1(l, SCARG(uap, shmid), cmd,
542 1.52 thorpej (cmd == IPC_SET || cmd == IPC_STAT) ? &shmbuf : NULL);
543 1.52 thorpej
544 1.52 thorpej if (error == 0 && cmd == IPC_STAT)
545 1.52 thorpej error = copyout(&shmbuf, SCARG(uap, buf), sizeof(shmbuf));
546 1.52 thorpej
547 1.102 ad return error;
548 1.52 thorpej }
549 1.52 thorpej
550 1.52 thorpej int
551 1.89 ad shmctl1(struct lwp *l, int shmid, int cmd, struct shmid_ds *shmbuf)
552 1.52 thorpej {
553 1.102 ad struct uvm_object *uobj = NULL;
554 1.89 ad kauth_cred_t cred = l->l_cred;
555 1.11 hpeyerl struct shmid_ds *shmseg;
556 1.52 thorpej int error = 0;
557 1.102 ad
558 1.102 ad mutex_enter(&shm_lock);
559 1.102 ad /* In case of reallocation, we will wait for completion */
560 1.102 ad while (__predict_false(shm_realloc_state))
561 1.102 ad cv_wait(&shm_realloc_cv, &shm_lock);
562 1.11 hpeyerl
563 1.78 jdolecek shmseg = shm_find_segment_by_shmid(shmid);
564 1.102 ad if (shmseg == NULL) {
565 1.102 ad mutex_exit(&shm_lock);
566 1.11 hpeyerl return EINVAL;
567 1.102 ad }
568 1.92 christos
569 1.52 thorpej switch (cmd) {
570 1.11 hpeyerl case IPC_STAT:
571 1.35 christos if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_R)) != 0)
572 1.102 ad break;
573 1.125.2.1 martin memset(shmbuf, 0, sizeof *shmbuf);
574 1.125.2.1 martin shmbuf->shm_perm = shmseg->shm_perm;
575 1.125.2.1 martin shmbuf->shm_perm.mode &= 0777;
576 1.125.2.1 martin shmbuf->shm_segsz = shmseg->shm_segsz;
577 1.125.2.1 martin shmbuf->shm_lpid = shmseg->shm_lpid;
578 1.125.2.1 martin shmbuf->shm_cpid = shmseg->shm_cpid;
579 1.125.2.1 martin shmbuf->shm_nattch = shmseg->shm_nattch;
580 1.125.2.1 martin shmbuf->shm_atime = shmseg->shm_atime;
581 1.125.2.1 martin shmbuf->shm_dtime = shmseg->shm_dtime;
582 1.125.2.1 martin shmbuf->shm_ctime = shmseg->shm_ctime;
583 1.11 hpeyerl break;
584 1.11 hpeyerl case IPC_SET:
585 1.35 christos if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
586 1.102 ad break;
587 1.52 thorpej shmseg->shm_perm.uid = shmbuf->shm_perm.uid;
588 1.52 thorpej shmseg->shm_perm.gid = shmbuf->shm_perm.gid;
589 1.12 mycroft shmseg->shm_perm.mode =
590 1.12 mycroft (shmseg->shm_perm.mode & ~ACCESSPERMS) |
591 1.52 thorpej (shmbuf->shm_perm.mode & ACCESSPERMS);
592 1.88 kardel shmseg->shm_ctime = time_second;
593 1.11 hpeyerl break;
594 1.11 hpeyerl case IPC_RMID:
595 1.35 christos if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
596 1.102 ad break;
597 1.52 thorpej shmseg->shm_perm._key = IPC_PRIVATE;
598 1.12 mycroft shmseg->shm_perm.mode |= SHMSEG_REMOVED;
599 1.12 mycroft if (shmseg->shm_nattch <= 0) {
600 1.102 ad uobj = shmseg->_shm_internal;
601 1.102 ad shm_free_segment(IPCID_TO_IX(shmid));
602 1.11 hpeyerl }
603 1.11 hpeyerl break;
604 1.11 hpeyerl case SHM_LOCK:
605 1.11 hpeyerl case SHM_UNLOCK:
606 1.123 elad if ((error = kauth_authorize_system(cred,
607 1.123 elad KAUTH_SYSTEM_SYSVIPC,
608 1.123 elad (cmd == SHM_LOCK) ? KAUTH_REQ_SYSTEM_SYSVIPC_SHM_LOCK :
609 1.123 elad KAUTH_REQ_SYSTEM_SYSVIPC_SHM_UNLOCK, NULL, NULL, NULL)) != 0)
610 1.102 ad break;
611 1.102 ad error = shm_memlock(l, shmseg, shmid, cmd);
612 1.92 christos break;
613 1.11 hpeyerl default:
614 1.102 ad error = EINVAL;
615 1.11 hpeyerl }
616 1.102 ad
617 1.102 ad mutex_exit(&shm_lock);
618 1.102 ad if (uobj != NULL)
619 1.102 ad uao_detach(uobj);
620 1.102 ad return error;
621 1.11 hpeyerl }
622 1.11 hpeyerl
623 1.102 ad /*
624 1.102 ad * Try to take an already existing segment.
625 1.102 ad * => must be called with shm_lock held;
626 1.102 ad * => called from one place, thus, inline;
627 1.102 ad */
628 1.102 ad static inline int
629 1.101 dsl shmget_existing(struct lwp *l, const struct sys_shmget_args *uap, int mode,
630 1.102 ad register_t *retval)
631 1.11 hpeyerl {
632 1.12 mycroft struct shmid_ds *shmseg;
633 1.89 ad kauth_cred_t cred = l->l_cred;
634 1.102 ad int segnum, error;
635 1.102 ad again:
636 1.102 ad KASSERT(mutex_owned(&shm_lock));
637 1.102 ad
638 1.102 ad /* Find segment by key */
639 1.102 ad for (segnum = 0; segnum < shminfo.shmmni; segnum++)
640 1.102 ad if ((shmsegs[segnum].shm_perm.mode & SHMSEG_ALLOCATED) &&
641 1.102 ad shmsegs[segnum].shm_perm._key == SCARG(uap, key))
642 1.102 ad break;
643 1.102 ad if (segnum == shminfo.shmmni) {
644 1.102 ad /* Not found */
645 1.102 ad return -1;
646 1.102 ad }
647 1.11 hpeyerl
648 1.11 hpeyerl shmseg = &shmsegs[segnum];
649 1.16 mycroft if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
650 1.16 mycroft /*
651 1.16 mycroft * This segment is in the process of being allocated. Wait
652 1.16 mycroft * until it's done, and look the key up again (in case the
653 1.16 mycroft * allocation failed or it was freed).
654 1.16 mycroft */
655 1.16 mycroft shmseg->shm_perm.mode |= SHMSEG_WANTED;
656 1.102 ad error = cv_wait_sig(&shm_cv[segnum], &shm_lock);
657 1.35 christos if (error)
658 1.16 mycroft return error;
659 1.102 ad goto again;
660 1.16 mycroft }
661 1.102 ad
662 1.113 erh /*
663 1.113 erh * First check the flags, to generate a useful error when a
664 1.113 erh * segment already exists.
665 1.113 erh */
666 1.113 erh if ((SCARG(uap, shmflg) & (IPC_CREAT | IPC_EXCL)) ==
667 1.113 erh (IPC_CREAT | IPC_EXCL))
668 1.113 erh return EEXIST;
669 1.113 erh
670 1.113 erh /* Check the permission and segment size. */
671 1.102 ad error = ipcperm(cred, &shmseg->shm_perm, mode);
672 1.102 ad if (error)
673 1.11 hpeyerl return error;
674 1.26 cgd if (SCARG(uap, size) && SCARG(uap, size) > shmseg->shm_segsz)
675 1.11 hpeyerl return EINVAL;
676 1.102 ad
677 1.11 hpeyerl *retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
678 1.11 hpeyerl return 0;
679 1.11 hpeyerl }
680 1.11 hpeyerl
681 1.102 ad int
682 1.102 ad sys_shmget(struct lwp *l, const struct sys_shmget_args *uap, register_t *retval)
683 1.14 mycroft {
684 1.102 ad /* {
685 1.102 ad syscallarg(key_t) key;
686 1.104 rmind syscallarg(size_t) size;
687 1.102 ad syscallarg(int) shmflg;
688 1.102 ad } */
689 1.102 ad struct shmid_ds *shmseg;
690 1.89 ad kauth_cred_t cred = l->l_cred;
691 1.102 ad key_t key = SCARG(uap, key);
692 1.104 rmind size_t size;
693 1.104 rmind int error, mode, segnum;
694 1.102 ad bool lockmem;
695 1.102 ad
696 1.102 ad mode = SCARG(uap, shmflg) & ACCESSPERMS;
697 1.102 ad if (SCARG(uap, shmflg) & _SHM_RMLINGER)
698 1.102 ad mode |= SHMSEG_RMLINGER;
699 1.102 ad
700 1.118 jakllsch SHMPRINTF(("shmget: key 0x%lx size 0x%zx shmflg 0x%x mode 0x%x\n",
701 1.102 ad SCARG(uap, key), SCARG(uap, size), SCARG(uap, shmflg), mode));
702 1.102 ad
703 1.102 ad mutex_enter(&shm_lock);
704 1.102 ad /* In case of reallocation, we will wait for completion */
705 1.102 ad while (__predict_false(shm_realloc_state))
706 1.102 ad cv_wait(&shm_realloc_cv, &shm_lock);
707 1.102 ad
708 1.102 ad if (key != IPC_PRIVATE) {
709 1.102 ad error = shmget_existing(l, uap, mode, retval);
710 1.102 ad if (error != -1) {
711 1.102 ad mutex_exit(&shm_lock);
712 1.102 ad return error;
713 1.102 ad }
714 1.102 ad if ((SCARG(uap, shmflg) & IPC_CREAT) == 0) {
715 1.102 ad mutex_exit(&shm_lock);
716 1.102 ad return ENOENT;
717 1.102 ad }
718 1.102 ad }
719 1.102 ad error = 0;
720 1.76 junyoung
721 1.102 ad /*
722 1.102 ad * Check the for the limits.
723 1.102 ad */
724 1.102 ad size = SCARG(uap, size);
725 1.102 ad if (size < shminfo.shmmin || size > shminfo.shmmax) {
726 1.102 ad mutex_exit(&shm_lock);
727 1.14 mycroft return EINVAL;
728 1.102 ad }
729 1.102 ad if (shm_nused >= shminfo.shmmni) {
730 1.102 ad mutex_exit(&shm_lock);
731 1.14 mycroft return ENOSPC;
732 1.102 ad }
733 1.102 ad size = (size + PGOFSET) & ~PGOFSET;
734 1.102 ad if (shm_committed + btoc(size) > shminfo.shmall) {
735 1.102 ad mutex_exit(&shm_lock);
736 1.14 mycroft return ENOMEM;
737 1.102 ad }
738 1.102 ad
739 1.102 ad /* Find the first available segment */
740 1.14 mycroft if (shm_last_free < 0) {
741 1.102 ad for (segnum = 0; segnum < shminfo.shmmni; segnum++)
742 1.102 ad if (shmsegs[segnum].shm_perm.mode & SHMSEG_FREE)
743 1.14 mycroft break;
744 1.102 ad KASSERT(segnum < shminfo.shmmni);
745 1.102 ad } else {
746 1.14 mycroft segnum = shm_last_free;
747 1.14 mycroft shm_last_free = -1;
748 1.14 mycroft }
749 1.102 ad
750 1.102 ad /*
751 1.102 ad * Initialize the segment.
752 1.102 ad * We will drop the lock while allocating the memory, thus mark the
753 1.102 ad * segment present, but removed, that no other thread could take it.
754 1.102 ad * Also, disable reallocation, while lock is dropped.
755 1.102 ad */
756 1.14 mycroft shmseg = &shmsegs[segnum];
757 1.102 ad shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
758 1.102 ad shm_committed += btoc(size);
759 1.102 ad shm_nused++;
760 1.102 ad lockmem = shm_use_phys;
761 1.102 ad shm_realloc_disable++;
762 1.102 ad mutex_exit(&shm_lock);
763 1.102 ad
764 1.102 ad /* Allocate the memory object and lock it if needed */
765 1.102 ad shmseg->_shm_internal = uao_create(size, 0);
766 1.102 ad if (lockmem) {
767 1.102 ad /* Wire the pages and tag it */
768 1.122 christos error = uvm_obj_wirepages(shmseg->_shm_internal, 0, size, NULL);
769 1.102 ad if (error) {
770 1.108 rmind uao_detach(shmseg->_shm_internal);
771 1.102 ad mutex_enter(&shm_lock);
772 1.102 ad shm_free_segment(segnum);
773 1.102 ad shm_realloc_disable--;
774 1.102 ad mutex_exit(&shm_lock);
775 1.102 ad return error;
776 1.102 ad }
777 1.102 ad }
778 1.102 ad
779 1.14 mycroft /*
780 1.102 ad * Please note, while segment is marked, there are no need to hold the
781 1.102 ad * lock, while setting it (except shm_perm.mode).
782 1.14 mycroft */
783 1.52 thorpej shmseg->shm_perm._key = SCARG(uap, key);
784 1.52 thorpej shmseg->shm_perm._seq = (shmseg->shm_perm._seq + 1) & 0x7fff;
785 1.102 ad *retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
786 1.42 mrg
787 1.87 elad shmseg->shm_perm.cuid = shmseg->shm_perm.uid = kauth_cred_geteuid(cred);
788 1.87 elad shmseg->shm_perm.cgid = shmseg->shm_perm.gid = kauth_cred_getegid(cred);
789 1.26 cgd shmseg->shm_segsz = SCARG(uap, size);
790 1.89 ad shmseg->shm_cpid = l->l_proc->p_pid;
791 1.14 mycroft shmseg->shm_lpid = shmseg->shm_nattch = 0;
792 1.14 mycroft shmseg->shm_atime = shmseg->shm_dtime = 0;
793 1.88 kardel shmseg->shm_ctime = time_second;
794 1.40 drochner
795 1.102 ad /*
796 1.102 ad * Segment is initialized.
797 1.102 ad * Enter the lock, mark as allocated, and notify waiters (if any).
798 1.102 ad * Also, unmark the state of reallocation.
799 1.102 ad */
800 1.102 ad mutex_enter(&shm_lock);
801 1.102 ad shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
802 1.102 ad (mode & (ACCESSPERMS | SHMSEG_RMLINGER)) |
803 1.102 ad SHMSEG_ALLOCATED | (lockmem ? SHMSEG_WIRED : 0);
804 1.16 mycroft if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
805 1.16 mycroft shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
806 1.102 ad cv_broadcast(&shm_cv[segnum]);
807 1.92 christos }
808 1.102 ad shm_realloc_disable--;
809 1.102 ad cv_broadcast(&shm_realloc_cv);
810 1.102 ad mutex_exit(&shm_lock);
811 1.92 christos
812 1.40 drochner return error;
813 1.14 mycroft }
814 1.14 mycroft
815 1.12 mycroft void
816 1.86 thorpej shmfork(struct vmspace *vm1, struct vmspace *vm2)
817 1.11 hpeyerl {
818 1.11 hpeyerl struct shmmap_state *shmmap_s;
819 1.69 drochner struct shmmap_entry *shmmap_se;
820 1.69 drochner
821 1.102 ad SHMPRINTF(("shmfork %p->%p\n", vm1, vm2));
822 1.102 ad mutex_enter(&shm_lock);
823 1.69 drochner vm2->vm_shm = vm1->vm_shm;
824 1.102 ad if (vm1->vm_shm) {
825 1.102 ad shmmap_s = (struct shmmap_state *)vm1->vm_shm;
826 1.102 ad SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next)
827 1.102 ad shmsegs[IPCID_TO_IX(shmmap_se->shmid)].shm_nattch++;
828 1.102 ad shmmap_s->nrefs++;
829 1.102 ad }
830 1.102 ad mutex_exit(&shm_lock);
831 1.11 hpeyerl }
832 1.11 hpeyerl
833 1.12 mycroft void
834 1.86 thorpej shmexit(struct vmspace *vm)
835 1.11 hpeyerl {
836 1.12 mycroft struct shmmap_state *shmmap_s;
837 1.69 drochner struct shmmap_entry *shmmap_se;
838 1.102 ad
839 1.102 ad mutex_enter(&shm_lock);
840 1.41 thorpej shmmap_s = (struct shmmap_state *)vm->vm_shm;
841 1.102 ad if (shmmap_s == NULL) {
842 1.102 ad mutex_exit(&shm_lock);
843 1.38 christos return;
844 1.102 ad }
845 1.41 thorpej vm->vm_shm = NULL;
846 1.69 drochner
847 1.69 drochner if (--shmmap_s->nrefs > 0) {
848 1.102 ad SHMPRINTF(("shmexit: vm %p drop ref (%d entries), refs = %d\n",
849 1.102 ad vm, shmmap_s->nitems, shmmap_s->nrefs));
850 1.117 rmind SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next) {
851 1.69 drochner shmsegs[IPCID_TO_IX(shmmap_se->shmid)].shm_nattch--;
852 1.117 rmind }
853 1.102 ad mutex_exit(&shm_lock);
854 1.102 ad return;
855 1.102 ad }
856 1.102 ad
857 1.117 rmind SHMPRINTF(("shmexit: vm %p cleanup (%d entries)\n", vm, shmmap_s->nitems));
858 1.117 rmind if (shmmap_s->nitems == 0) {
859 1.117 rmind mutex_exit(&shm_lock);
860 1.102 ad kmem_free(shmmap_s, sizeof(struct shmmap_state));
861 1.69 drochner return;
862 1.69 drochner }
863 1.69 drochner
864 1.117 rmind /*
865 1.117 rmind * Delete the entry from shm map.
866 1.117 rmind */
867 1.117 rmind for (;;) {
868 1.102 ad struct shmid_ds *shmseg;
869 1.117 rmind struct uvm_object *uobj;
870 1.117 rmind size_t sz;
871 1.102 ad
872 1.69 drochner shmmap_se = SLIST_FIRST(&shmmap_s->entries);
873 1.117 rmind KASSERT(shmmap_se != NULL);
874 1.117 rmind
875 1.102 ad shmseg = &shmsegs[IPCID_TO_IX(shmmap_se->shmid)];
876 1.117 rmind sz = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
877 1.117 rmind /* shm_delete_mapping() removes from the list. */
878 1.117 rmind uobj = shm_delete_mapping(shmmap_s, shmmap_se);
879 1.117 rmind mutex_exit(&shm_lock);
880 1.102 ad
881 1.117 rmind uvm_deallocate(&vm->vm_map, shmmap_se->va, sz);
882 1.117 rmind if (uobj != NULL) {
883 1.117 rmind uao_detach(uobj);
884 1.117 rmind }
885 1.119 rmind kmem_free(shmmap_se, sizeof(struct shmmap_entry));
886 1.117 rmind
887 1.117 rmind if (SLIST_EMPTY(&shmmap_s->entries)) {
888 1.117 rmind break;
889 1.117 rmind }
890 1.117 rmind mutex_enter(&shm_lock);
891 1.117 rmind KASSERT(!SLIST_EMPTY(&shmmap_s->entries));
892 1.102 ad }
893 1.102 ad kmem_free(shmmap_s, sizeof(struct shmmap_state));
894 1.11 hpeyerl }
895 1.11 hpeyerl
896 1.92 christos static int
897 1.92 christos shmrealloc(int newshmni)
898 1.92 christos {
899 1.92 christos vaddr_t v;
900 1.102 ad struct shmid_ds *oldshmsegs, *newshmsegs;
901 1.110 ad kcondvar_t *newshm_cv, *oldshm_cv;
902 1.104 rmind size_t sz;
903 1.110 ad int i, lsegid, oldshmni;
904 1.92 christos
905 1.92 christos if (newshmni < 1)
906 1.92 christos return EINVAL;
907 1.92 christos
908 1.92 christos /* Allocate new memory area */
909 1.102 ad sz = ALIGN(newshmni * sizeof(struct shmid_ds)) +
910 1.104 rmind ALIGN(newshmni * sizeof(kcondvar_t));
911 1.121 uebayasi sz = round_page(sz);
912 1.121 uebayasi v = uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_WIRED|UVM_KMF_ZERO);
913 1.92 christos if (v == 0)
914 1.92 christos return ENOMEM;
915 1.92 christos
916 1.102 ad mutex_enter(&shm_lock);
917 1.102 ad while (shm_realloc_state || shm_realloc_disable)
918 1.102 ad cv_wait(&shm_realloc_cv, &shm_lock);
919 1.102 ad
920 1.102 ad /*
921 1.102 ad * Get the number of last segment. Fail we are trying to
922 1.102 ad * reallocate less memory than we use.
923 1.104 rmind */
924 1.102 ad lsegid = 0;
925 1.102 ad for (i = 0; i < shminfo.shmmni; i++)
926 1.102 ad if ((shmsegs[i].shm_perm.mode & SHMSEG_FREE) == 0)
927 1.102 ad lsegid = i;
928 1.102 ad if (lsegid >= newshmni) {
929 1.102 ad mutex_exit(&shm_lock);
930 1.102 ad uvm_km_free(kernel_map, v, sz, UVM_KMF_WIRED);
931 1.102 ad return EBUSY;
932 1.102 ad }
933 1.102 ad shm_realloc_state = true;
934 1.102 ad
935 1.92 christos newshmsegs = (void *)v;
936 1.111 rmind newshm_cv = (void *)((uintptr_t)newshmsegs +
937 1.111 rmind ALIGN(newshmni * sizeof(struct shmid_ds)));
938 1.92 christos
939 1.92 christos /* Copy all memory to the new area */
940 1.125 njoly for (i = 0; i < shm_nused; i++) {
941 1.125 njoly cv_init(&newshm_cv[i], "shmwait");
942 1.92 christos (void)memcpy(&newshmsegs[i], &shmsegs[i],
943 1.92 christos sizeof(newshmsegs[0]));
944 1.125 njoly }
945 1.92 christos
946 1.92 christos /* Mark as free all new segments, if there is any */
947 1.92 christos for (; i < newshmni; i++) {
948 1.102 ad cv_init(&newshm_cv[i], "shmwait");
949 1.92 christos newshmsegs[i].shm_perm.mode = SHMSEG_FREE;
950 1.92 christos newshmsegs[i].shm_perm._seq = 0;
951 1.92 christos }
952 1.92 christos
953 1.102 ad oldshmsegs = shmsegs;
954 1.110 ad oldshmni = shminfo.shmmni;
955 1.102 ad shminfo.shmmni = newshmni;
956 1.92 christos shmsegs = newshmsegs;
957 1.102 ad shm_cv = newshm_cv;
958 1.102 ad
959 1.102 ad /* Reallocation completed - notify all waiters, if any */
960 1.102 ad shm_realloc_state = false;
961 1.102 ad cv_broadcast(&shm_realloc_cv);
962 1.102 ad mutex_exit(&shm_lock);
963 1.92 christos
964 1.110 ad /* Release now unused resources. */
965 1.111 rmind oldshm_cv = (void *)((uintptr_t)oldshmsegs +
966 1.111 rmind ALIGN(oldshmni * sizeof(struct shmid_ds)));
967 1.110 ad for (i = 0; i < oldshmni; i++)
968 1.110 ad cv_destroy(&oldshm_cv[i]);
969 1.110 ad
970 1.110 ad sz = ALIGN(oldshmni * sizeof(struct shmid_ds)) +
971 1.110 ad ALIGN(oldshmni * sizeof(kcondvar_t));
972 1.121 uebayasi sz = round_page(sz);
973 1.102 ad uvm_km_free(kernel_map, (vaddr_t)oldshmsegs, sz, UVM_KMF_WIRED);
974 1.110 ad
975 1.92 christos return 0;
976 1.92 christos }
977 1.92 christos
978 1.12 mycroft void
979 1.86 thorpej shminit(void)
980 1.11 hpeyerl {
981 1.71 jdolecek vaddr_t v;
982 1.104 rmind size_t sz;
983 1.104 rmind int i;
984 1.71 jdolecek
985 1.96 ad mutex_init(&shm_lock, MUTEX_DEFAULT, IPL_NONE);
986 1.102 ad cv_init(&shm_realloc_cv, "shmrealc");
987 1.102 ad
988 1.102 ad /* Allocate the wired memory for our structures */
989 1.102 ad sz = ALIGN(shminfo.shmmni * sizeof(struct shmid_ds)) +
990 1.102 ad ALIGN(shminfo.shmmni * sizeof(kcondvar_t));
991 1.121 uebayasi sz = round_page(sz);
992 1.121 uebayasi v = uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_WIRED|UVM_KMF_ZERO);
993 1.84 yamt if (v == 0)
994 1.71 jdolecek panic("sysv_shm: cannot allocate memory");
995 1.71 jdolecek shmsegs = (void *)v;
996 1.111 rmind shm_cv = (void *)((uintptr_t)shmsegs +
997 1.111 rmind ALIGN(shminfo.shmmni * sizeof(struct shmid_ds)));
998 1.24 deraadt
999 1.116 joerg if (shminfo.shmmax == 0)
1000 1.116 joerg shminfo.shmmax = max(physmem / 4, 1024) * PAGE_SIZE;
1001 1.116 joerg else
1002 1.116 joerg shminfo.shmmax *= PAGE_SIZE;
1003 1.116 joerg shminfo.shmall = shminfo.shmmax / PAGE_SIZE;
1004 1.11 hpeyerl
1005 1.11 hpeyerl for (i = 0; i < shminfo.shmmni; i++) {
1006 1.102 ad cv_init(&shm_cv[i], "shmwait");
1007 1.11 hpeyerl shmsegs[i].shm_perm.mode = SHMSEG_FREE;
1008 1.52 thorpej shmsegs[i].shm_perm._seq = 0;
1009 1.11 hpeyerl }
1010 1.11 hpeyerl shm_last_free = 0;
1011 1.11 hpeyerl shm_nused = 0;
1012 1.11 hpeyerl shm_committed = 0;
1013 1.102 ad shm_realloc_disable = 0;
1014 1.102 ad shm_realloc_state = false;
1015 1.123 elad
1016 1.123 elad sysvipcinit();
1017 1.11 hpeyerl }
1018 1.92 christos
1019 1.92 christos static int
1020 1.92 christos sysctl_ipc_shmmni(SYSCTLFN_ARGS)
1021 1.92 christos {
1022 1.92 christos int newsize, error;
1023 1.92 christos struct sysctlnode node;
1024 1.92 christos node = *rnode;
1025 1.92 christos node.sysctl_data = &newsize;
1026 1.92 christos
1027 1.92 christos newsize = shminfo.shmmni;
1028 1.92 christos error = sysctl_lookup(SYSCTLFN_CALL(&node));
1029 1.92 christos if (error || newp == NULL)
1030 1.92 christos return error;
1031 1.92 christos
1032 1.103 ad sysctl_unlock();
1033 1.103 ad error = shmrealloc(newsize);
1034 1.103 ad sysctl_relock();
1035 1.103 ad return error;
1036 1.92 christos }
1037 1.92 christos
1038 1.92 christos static int
1039 1.92 christos sysctl_ipc_shmmaxpgs(SYSCTLFN_ARGS)
1040 1.92 christos {
1041 1.112 rmind uint32_t newsize;
1042 1.112 rmind int error;
1043 1.92 christos struct sysctlnode node;
1044 1.92 christos node = *rnode;
1045 1.92 christos node.sysctl_data = &newsize;
1046 1.102 ad
1047 1.92 christos newsize = shminfo.shmall;
1048 1.92 christos error = sysctl_lookup(SYSCTLFN_CALL(&node));
1049 1.92 christos if (error || newp == NULL)
1050 1.92 christos return error;
1051 1.92 christos
1052 1.92 christos if (newsize < 1)
1053 1.92 christos return EINVAL;
1054 1.92 christos
1055 1.92 christos shminfo.shmall = newsize;
1056 1.112 rmind shminfo.shmmax = (uint64_t)shminfo.shmall * PAGE_SIZE;
1057 1.112 rmind
1058 1.112 rmind return 0;
1059 1.112 rmind }
1060 1.112 rmind
1061 1.112 rmind static int
1062 1.112 rmind sysctl_ipc_shmmax(SYSCTLFN_ARGS)
1063 1.112 rmind {
1064 1.112 rmind uint64_t newsize;
1065 1.112 rmind int error;
1066 1.112 rmind struct sysctlnode node;
1067 1.112 rmind node = *rnode;
1068 1.112 rmind node.sysctl_data = &newsize;
1069 1.112 rmind
1070 1.112 rmind newsize = shminfo.shmmax;
1071 1.112 rmind error = sysctl_lookup(SYSCTLFN_CALL(&node));
1072 1.112 rmind if (error || newp == NULL)
1073 1.112 rmind return error;
1074 1.112 rmind
1075 1.112 rmind if (newsize < PAGE_SIZE)
1076 1.112 rmind return EINVAL;
1077 1.112 rmind
1078 1.112 rmind shminfo.shmmax = round_page(newsize);
1079 1.112 rmind shminfo.shmall = shminfo.shmmax >> PAGE_SHIFT;
1080 1.92 christos
1081 1.92 christos return 0;
1082 1.92 christos }
1083 1.92 christos
1084 1.92 christos SYSCTL_SETUP(sysctl_ipc_shm_setup, "sysctl kern.ipc subtree setup")
1085 1.92 christos {
1086 1.102 ad
1087 1.92 christos sysctl_createv(clog, 0, NULL, NULL,
1088 1.92 christos CTLFLAG_PERMANENT,
1089 1.92 christos CTLTYPE_NODE, "ipc",
1090 1.92 christos SYSCTL_DESCR("SysV IPC options"),
1091 1.92 christos NULL, 0, NULL, 0,
1092 1.92 christos CTL_KERN, KERN_SYSVIPC, CTL_EOL);
1093 1.92 christos sysctl_createv(clog, 0, NULL, NULL,
1094 1.112 rmind CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1095 1.112 rmind CTLTYPE_QUAD, "shmmax",
1096 1.92 christos SYSCTL_DESCR("Max shared memory segment size in bytes"),
1097 1.112 rmind sysctl_ipc_shmmax, 0, &shminfo.shmmax, 0,
1098 1.92 christos CTL_KERN, KERN_SYSVIPC, KERN_SYSVIPC_SHMMAX, CTL_EOL);
1099 1.92 christos sysctl_createv(clog, 0, NULL, NULL,
1100 1.92 christos CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1101 1.92 christos CTLTYPE_INT, "shmmni",
1102 1.92 christos SYSCTL_DESCR("Max number of shared memory identifiers"),
1103 1.92 christos sysctl_ipc_shmmni, 0, &shminfo.shmmni, 0,
1104 1.92 christos CTL_KERN, KERN_SYSVIPC, KERN_SYSVIPC_SHMMNI, CTL_EOL);
1105 1.92 christos sysctl_createv(clog, 0, NULL, NULL,
1106 1.92 christos CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1107 1.92 christos CTLTYPE_INT, "shmseg",
1108 1.92 christos SYSCTL_DESCR("Max shared memory segments per process"),
1109 1.92 christos NULL, 0, &shminfo.shmseg, 0,
1110 1.92 christos CTL_KERN, KERN_SYSVIPC, KERN_SYSVIPC_SHMSEG, CTL_EOL);
1111 1.92 christos sysctl_createv(clog, 0, NULL, NULL,
1112 1.92 christos CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1113 1.92 christos CTLTYPE_INT, "shmmaxpgs",
1114 1.92 christos SYSCTL_DESCR("Max amount of shared memory in pages"),
1115 1.92 christos sysctl_ipc_shmmaxpgs, 0, &shminfo.shmall, 0,
1116 1.92 christos CTL_KERN, KERN_SYSVIPC, KERN_SYSVIPC_SHMMAXPGS, CTL_EOL);
1117 1.92 christos sysctl_createv(clog, 0, NULL, NULL,
1118 1.92 christos CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1119 1.92 christos CTLTYPE_INT, "shm_use_phys",
1120 1.92 christos SYSCTL_DESCR("Enable/disable locking of shared memory in "
1121 1.92 christos "physical memory"), NULL, 0, &shm_use_phys, 0,
1122 1.92 christos CTL_KERN, KERN_SYSVIPC, KERN_SYSVIPC_SHMUSEPHYS, CTL_EOL);
1123 1.92 christos }
1124