sysv_shm.c revision 1.85 1 /* $NetBSD: sysv_shm.c,v 1.85 2005/11/10 18:45:20 christos Exp $ */
2
3 /*-
4 * Copyright (c) 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Copyright (c) 1994 Adam Glass and Charles M. Hannum. All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by Adam Glass and Charles M.
54 * Hannum.
55 * 4. The names of the authors may not be used to endorse or promote products
56 * derived from this software without specific prior written permission.
57 *
58 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
59 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
60 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
61 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
62 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
63 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
64 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
65 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
66 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
67 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
68 */
69
70 #include <sys/cdefs.h>
71 __KERNEL_RCSID(0, "$NetBSD: sysv_shm.c,v 1.85 2005/11/10 18:45:20 christos Exp $");
72
73 #define SYSVSHM
74
75 #include <sys/param.h>
76 #include <sys/kernel.h>
77 #include <sys/shm.h>
78 #include <sys/malloc.h>
79 #include <sys/mman.h>
80 #include <sys/stat.h>
81 #include <sys/sysctl.h>
82 #include <sys/mount.h> /* XXX for <sys/syscallargs.h> */
83 #include <sys/sa.h>
84 #include <sys/syscallargs.h>
85 #include <sys/queue.h>
86 #include <sys/pool.h>
87
88 #include <uvm/uvm_extern.h>
89 #include <uvm/uvm_object.h>
90
91 struct shmid_ds *shm_find_segment_by_shmid(int);
92
93 static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments");
94
95 /*
96 * Provides the following externally accessible functions:
97 *
98 * shminit(void); initialization
99 * shmexit(struct vmspace *) cleanup
100 * shmfork(struct vmspace *, struct vmspace *) fork handling
101 *
102 * Structures:
103 * shmsegs (an array of 'struct shmid_ds')
104 * per proc array of 'struct shmmap_state'
105 */
106
107 int shm_nused;
108 struct shmid_ds *shmsegs;
109
110 struct shmmap_entry {
111 SLIST_ENTRY(shmmap_entry) next;
112 vaddr_t va;
113 int shmid;
114 };
115
116 static int shm_last_free, shm_committed;
117
118 static POOL_INIT(shmmap_entry_pool, sizeof(struct shmmap_entry), 0, 0, 0,
119 "shmmp", &pool_allocator_nointr);
120
121 struct shmmap_state {
122 unsigned int nitems;
123 unsigned int nrefs;
124 SLIST_HEAD(, shmmap_entry) entries;
125 };
126
127 static int shm_find_segment_by_key(key_t);
128 static void shm_deallocate_segment(struct shmid_ds *);
129 static void shm_delete_mapping(struct vmspace *, struct shmmap_state *,
130 struct shmmap_entry *);
131 static int shmget_existing(struct proc *, struct sys_shmget_args *,
132 int, int, register_t *);
133 static int shmget_allocate_segment(struct proc *, struct sys_shmget_args *,
134 int, register_t *);
135 static struct shmmap_state *shmmap_getprivate(struct proc *);
136 static struct shmmap_entry *shm_find_mapping(struct shmmap_state *, vaddr_t);
137
138 static int
139 shm_find_segment_by_key(key)
140 key_t key;
141 {
142 int i;
143
144 for (i = 0; i < shminfo.shmmni; i++)
145 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
146 shmsegs[i].shm_perm._key == key)
147 return i;
148 return -1;
149 }
150
151 struct shmid_ds *
152 shm_find_segment_by_shmid(shmid)
153 int shmid;
154 {
155 int segnum;
156 struct shmid_ds *shmseg;
157
158 segnum = IPCID_TO_IX(shmid);
159 if (segnum < 0 || segnum >= shminfo.shmmni)
160 return NULL;
161 shmseg = &shmsegs[segnum];
162 if ((shmseg->shm_perm.mode & SHMSEG_ALLOCATED) == 0)
163 return NULL;
164 if ((shmseg->shm_perm.mode & (SHMSEG_REMOVED|SHMSEG_RMLINGER)) == SHMSEG_REMOVED)
165 return NULL;
166 if (shmseg->shm_perm._seq != IPCID_TO_SEQ(shmid))
167 return NULL;
168 return shmseg;
169 }
170
171 static void
172 shm_deallocate_segment(shmseg)
173 struct shmid_ds *shmseg;
174 {
175 struct uvm_object *uobj = shmseg->_shm_internal;
176 size_t size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
177
178 #ifdef SHMDEBUG
179 printf("shm freeing key 0x%lx seq 0x%x\n",
180 shmseg->shm_perm._key, shmseg->shm_perm._seq);
181 #endif
182
183 (*uobj->pgops->pgo_detach)(uobj);
184 shmseg->_shm_internal = NULL;
185 shm_committed -= btoc(size);
186 shmseg->shm_perm.mode = SHMSEG_FREE;
187 shm_nused--;
188 }
189
190 static void
191 shm_delete_mapping(vm, shmmap_s, shmmap_se)
192 struct vmspace *vm;
193 struct shmmap_state *shmmap_s;
194 struct shmmap_entry *shmmap_se;
195 {
196 struct shmid_ds *shmseg;
197 int segnum;
198 size_t size;
199
200 segnum = IPCID_TO_IX(shmmap_se->shmid);
201 #ifdef DEBUG
202 if (segnum < 0 || segnum >= shminfo.shmmni)
203 panic("shm_delete_mapping: vmspace %p state %p entry %p - "
204 "entry segment ID bad (%d)",
205 vm, shmmap_s, shmmap_se, segnum);
206 #endif
207 shmseg = &shmsegs[segnum];
208 size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
209 uvm_deallocate(&vm->vm_map, shmmap_se->va, size);
210 SLIST_REMOVE(&shmmap_s->entries, shmmap_se, shmmap_entry, next);
211 shmmap_s->nitems--;
212 pool_put(&shmmap_entry_pool, shmmap_se);
213 shmseg->shm_dtime = time.tv_sec;
214 if ((--shmseg->shm_nattch <= 0) &&
215 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
216 shm_deallocate_segment(shmseg);
217 shm_last_free = segnum;
218 }
219 }
220
221 /*
222 * Get a non-shared shm map for that vmspace.
223 * 3 cases:
224 * - no shm map present: create a fresh one
225 * - a shm map with refcount=1, just used by ourselves: fine
226 * - a shared shm map: copy to a fresh one and adjust refcounts
227 */
228 static struct shmmap_state *
229 shmmap_getprivate(struct proc *p)
230 {
231 struct shmmap_state *oshmmap_s, *shmmap_s;
232 struct shmmap_entry *oshmmap_se, *shmmap_se;
233
234 oshmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
235 if (oshmmap_s && oshmmap_s->nrefs == 1)
236 return (oshmmap_s);
237
238 shmmap_s = malloc(sizeof(struct shmmap_state), M_SHM, M_WAITOK);
239 memset(shmmap_s, 0, sizeof(struct shmmap_state));
240 shmmap_s->nrefs = 1;
241 SLIST_INIT(&shmmap_s->entries);
242 p->p_vmspace->vm_shm = (caddr_t)shmmap_s;
243
244 if (!oshmmap_s)
245 return (shmmap_s);
246
247 #ifdef SHMDEBUG
248 printf("shmmap_getprivate: vm %p split (%d entries), was used by %d\n",
249 p->p_vmspace, oshmmap_s->nitems, oshmmap_s->nrefs);
250 #endif
251 SLIST_FOREACH(oshmmap_se, &oshmmap_s->entries, next) {
252 shmmap_se = pool_get(&shmmap_entry_pool, PR_WAITOK);
253 shmmap_se->va = oshmmap_se->va;
254 shmmap_se->shmid = oshmmap_se->shmid;
255 SLIST_INSERT_HEAD(&shmmap_s->entries, shmmap_se, next);
256 }
257 shmmap_s->nitems = oshmmap_s->nitems;
258 oshmmap_s->nrefs--;
259 return (shmmap_s);
260 }
261
262 static struct shmmap_entry *
263 shm_find_mapping(map, va)
264 struct shmmap_state *map;
265 vaddr_t va;
266 {
267 struct shmmap_entry *shmmap_se;
268
269 SLIST_FOREACH(shmmap_se, &map->entries, next) {
270 if (shmmap_se->va == va)
271 return shmmap_se;
272 }
273 return 0;
274 }
275
276 int
277 sys_shmdt(l, v, retval)
278 struct lwp *l;
279 void *v;
280 register_t *retval;
281 {
282 struct sys_shmdt_args /* {
283 syscallarg(const void *) shmaddr;
284 } */ *uap = v;
285 struct proc *p = l->l_proc;
286 struct shmmap_state *shmmap_s, *shmmap_s1;
287 struct shmmap_entry *shmmap_se;
288
289 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
290 if (shmmap_s == NULL)
291 return EINVAL;
292
293 shmmap_se = shm_find_mapping(shmmap_s, (vaddr_t)SCARG(uap, shmaddr));
294 if (!shmmap_se)
295 return EINVAL;
296
297 shmmap_s1 = shmmap_getprivate(p);
298 if (shmmap_s1 != shmmap_s) {
299 /* map has been copied, lookup entry in new map */
300 shmmap_se = shm_find_mapping(shmmap_s1,
301 (vaddr_t)SCARG(uap, shmaddr));
302 KASSERT(shmmap_se != NULL);
303 }
304 #ifdef SHMDEBUG
305 printf("shmdt: vm %p: remove %d @%lx\n",
306 p->p_vmspace, shmmap_se->shmid, shmmap_se->va);
307 #endif
308 shm_delete_mapping(p->p_vmspace, shmmap_s1, shmmap_se);
309 return 0;
310 }
311
312 int
313 sys_shmat(l, v, retval)
314 struct lwp *l;
315 void *v;
316 register_t *retval;
317 {
318 struct sys_shmat_args /* {
319 syscallarg(int) shmid;
320 syscallarg(const void *) shmaddr;
321 syscallarg(int) shmflg;
322 } */ *uap = v;
323 int error, flags;
324 struct proc *p = l->l_proc;
325 struct ucred *cred = p->p_ucred;
326 struct shmid_ds *shmseg;
327 struct shmmap_state *shmmap_s;
328 struct uvm_object *uobj;
329 vaddr_t attach_va;
330 vm_prot_t prot;
331 vsize_t size;
332 struct shmmap_entry *shmmap_se;
333
334 shmseg = shm_find_segment_by_shmid(SCARG(uap, shmid));
335 if (shmseg == NULL)
336 return EINVAL;
337 error = ipcperm(cred, &shmseg->shm_perm,
338 (SCARG(uap, shmflg) & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
339 if (error)
340 return error;
341
342 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
343 if (shmmap_s && shmmap_s->nitems >= shminfo.shmseg)
344 return EMFILE;
345
346 size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
347 prot = VM_PROT_READ;
348 if ((SCARG(uap, shmflg) & SHM_RDONLY) == 0)
349 prot |= VM_PROT_WRITE;
350 flags = MAP_ANON | MAP_SHARED;
351 if (SCARG(uap, shmaddr)) {
352 flags |= MAP_FIXED;
353 if (SCARG(uap, shmflg) & SHM_RND)
354 attach_va =
355 (vaddr_t)SCARG(uap, shmaddr) & ~(SHMLBA-1);
356 else if (((vaddr_t)SCARG(uap, shmaddr) & (SHMLBA-1)) == 0)
357 attach_va = (vaddr_t)SCARG(uap, shmaddr);
358 else
359 return EINVAL;
360 } else {
361 /* This is just a hint to uvm_mmap() about where to put it. */
362 attach_va = p->p_emul->e_vm_default_addr(p,
363 (vaddr_t)p->p_vmspace->vm_daddr, size);
364 }
365 uobj = shmseg->_shm_internal;
366 (*uobj->pgops->pgo_reference)(uobj);
367 error = uvm_map(&p->p_vmspace->vm_map, &attach_va, size,
368 uobj, 0, 0,
369 UVM_MAPFLAG(prot, prot, UVM_INH_SHARE, UVM_ADV_RANDOM, 0));
370 if (error) {
371 (*uobj->pgops->pgo_detach)(uobj);
372 return error;
373 }
374 shmmap_se = pool_get(&shmmap_entry_pool, PR_WAITOK);
375 shmmap_se->va = attach_va;
376 shmmap_se->shmid = SCARG(uap, shmid);
377 shmmap_s = shmmap_getprivate(p);
378 #ifdef SHMDEBUG
379 printf("shmat: vm %p: add %d @%lx\n", p->p_vmspace, shmmap_se->shmid, attach_va);
380 #endif
381 SLIST_INSERT_HEAD(&shmmap_s->entries, shmmap_se, next);
382 shmmap_s->nitems++;
383 shmseg->shm_lpid = p->p_pid;
384 shmseg->shm_atime = time.tv_sec;
385 shmseg->shm_nattch++;
386
387 retval[0] = attach_va;
388 return 0;
389 }
390
391 int
392 sys___shmctl13(l, v, retval)
393 struct lwp *l;
394 void *v;
395 register_t *retval;
396 {
397 struct sys___shmctl13_args /* {
398 syscallarg(int) shmid;
399 syscallarg(int) cmd;
400 syscallarg(struct shmid_ds *) buf;
401 } */ *uap = v;
402 struct proc *p = l->l_proc;
403 struct shmid_ds shmbuf;
404 int cmd, error;
405
406 cmd = SCARG(uap, cmd);
407
408 if (cmd == IPC_SET) {
409 error = copyin(SCARG(uap, buf), &shmbuf, sizeof(shmbuf));
410 if (error)
411 return (error);
412 }
413
414 error = shmctl1(p, SCARG(uap, shmid), cmd,
415 (cmd == IPC_SET || cmd == IPC_STAT) ? &shmbuf : NULL);
416
417 if (error == 0 && cmd == IPC_STAT)
418 error = copyout(&shmbuf, SCARG(uap, buf), sizeof(shmbuf));
419
420 return (error);
421 }
422
423 int
424 shmctl1(p, shmid, cmd, shmbuf)
425 struct proc *p;
426 int shmid;
427 int cmd;
428 struct shmid_ds *shmbuf;
429 {
430 struct ucred *cred = p->p_ucred;
431 struct shmid_ds *shmseg;
432 int error = 0;
433
434 shmseg = shm_find_segment_by_shmid(shmid);
435 if (shmseg == NULL)
436 return EINVAL;
437 switch (cmd) {
438 case IPC_STAT:
439 if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_R)) != 0)
440 return error;
441 memcpy(shmbuf, shmseg, sizeof(struct shmid_ds));
442 break;
443 case IPC_SET:
444 if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
445 return error;
446 shmseg->shm_perm.uid = shmbuf->shm_perm.uid;
447 shmseg->shm_perm.gid = shmbuf->shm_perm.gid;
448 shmseg->shm_perm.mode =
449 (shmseg->shm_perm.mode & ~ACCESSPERMS) |
450 (shmbuf->shm_perm.mode & ACCESSPERMS);
451 shmseg->shm_ctime = time.tv_sec;
452 break;
453 case IPC_RMID:
454 if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
455 return error;
456 shmseg->shm_perm._key = IPC_PRIVATE;
457 shmseg->shm_perm.mode |= SHMSEG_REMOVED;
458 if (shmseg->shm_nattch <= 0) {
459 shm_deallocate_segment(shmseg);
460 shm_last_free = IPCID_TO_IX(shmid);
461 }
462 break;
463 case SHM_LOCK:
464 case SHM_UNLOCK:
465 default:
466 return EINVAL;
467 }
468 return 0;
469 }
470
471 static int
472 shmget_existing(p, uap, mode, segnum, retval)
473 struct proc *p;
474 struct sys_shmget_args /* {
475 syscallarg(key_t) key;
476 syscallarg(size_t) size;
477 syscallarg(int) shmflg;
478 } */ *uap;
479 int mode;
480 int segnum;
481 register_t *retval;
482 {
483 struct shmid_ds *shmseg;
484 struct ucred *cred = p->p_ucred;
485 int error;
486
487 shmseg = &shmsegs[segnum];
488 if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
489 /*
490 * This segment is in the process of being allocated. Wait
491 * until it's done, and look the key up again (in case the
492 * allocation failed or it was freed).
493 */
494 shmseg->shm_perm.mode |= SHMSEG_WANTED;
495 error = tsleep((caddr_t)shmseg, PLOCK | PCATCH, "shmget", 0);
496 if (error)
497 return error;
498 return EAGAIN;
499 }
500 if ((error = ipcperm(cred, &shmseg->shm_perm, mode)) != 0)
501 return error;
502 if (SCARG(uap, size) && SCARG(uap, size) > shmseg->shm_segsz)
503 return EINVAL;
504 if ((SCARG(uap, shmflg) & (IPC_CREAT | IPC_EXCL)) ==
505 (IPC_CREAT | IPC_EXCL))
506 return EEXIST;
507 *retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
508 return 0;
509 }
510
511 static int
512 shmget_allocate_segment(p, uap, mode, retval)
513 struct proc *p;
514 struct sys_shmget_args /* {
515 syscallarg(key_t) key;
516 syscallarg(size_t) size;
517 syscallarg(int) shmflg;
518 } */ *uap;
519 int mode;
520 register_t *retval;
521 {
522 int i, segnum, shmid, size;
523 struct ucred *cred = p->p_ucred;
524 struct shmid_ds *shmseg;
525 int error = 0;
526
527 if (SCARG(uap, size) < shminfo.shmmin ||
528 SCARG(uap, size) > shminfo.shmmax)
529 return EINVAL;
530 if (shm_nused >= shminfo.shmmni) /* any shmids left? */
531 return ENOSPC;
532 size = (SCARG(uap, size) + PGOFSET) & ~PGOFSET;
533 if (shm_committed + btoc(size) > shminfo.shmall)
534 return ENOMEM;
535 if (shm_last_free < 0) {
536 for (i = 0; i < shminfo.shmmni; i++)
537 if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
538 break;
539 if (i == shminfo.shmmni)
540 panic("shmseg free count inconsistent");
541 segnum = i;
542 } else {
543 segnum = shm_last_free;
544 shm_last_free = -1;
545 }
546 shmseg = &shmsegs[segnum];
547 /*
548 * In case we sleep in malloc(), mark the segment present but deleted
549 * so that noone else tries to create the same key.
550 */
551 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
552 shmseg->shm_perm._key = SCARG(uap, key);
553 shmseg->shm_perm._seq = (shmseg->shm_perm._seq + 1) & 0x7fff;
554 shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
555
556 shmseg->_shm_internal = uao_create(size, 0);
557
558 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
559 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
560 shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
561 (mode & (ACCESSPERMS|SHMSEG_RMLINGER)) | SHMSEG_ALLOCATED;
562 shmseg->shm_segsz = SCARG(uap, size);
563 shmseg->shm_cpid = p->p_pid;
564 shmseg->shm_lpid = shmseg->shm_nattch = 0;
565 shmseg->shm_atime = shmseg->shm_dtime = 0;
566 shmseg->shm_ctime = time.tv_sec;
567 shm_committed += btoc(size);
568 shm_nused++;
569
570 *retval = shmid;
571 if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
572 /*
573 * Somebody else wanted this key while we were asleep. Wake
574 * them up now.
575 */
576 shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
577 wakeup((caddr_t)shmseg);
578 }
579 return error;
580 }
581
582 int
583 sys_shmget(l, v, retval)
584 struct lwp *l;
585 void *v;
586 register_t *retval;
587 {
588 struct sys_shmget_args /* {
589 syscallarg(key_t) key;
590 syscallarg(int) size;
591 syscallarg(int) shmflg;
592 } */ *uap = v;
593 struct proc *p = l->l_proc;
594 int segnum, mode, error;
595
596 mode = SCARG(uap, shmflg) & ACCESSPERMS;
597 if (SCARG(uap, shmflg) & _SHM_RMLINGER)
598 mode |= SHMSEG_RMLINGER;
599
600 #ifdef SHMDEBUG
601 printf("shmget: key 0x%lx size 0x%x shmflg 0x%x mode 0x%x\n",
602 SCARG(uap, key), SCARG(uap, size), SCARG(uap, shmflg), mode);
603 #endif
604
605 if (SCARG(uap, key) != IPC_PRIVATE) {
606 again:
607 segnum = shm_find_segment_by_key(SCARG(uap, key));
608 if (segnum >= 0) {
609 error = shmget_existing(p, uap, mode, segnum, retval);
610 if (error == EAGAIN)
611 goto again;
612 return error;
613 }
614 if ((SCARG(uap, shmflg) & IPC_CREAT) == 0)
615 return ENOENT;
616 }
617 return shmget_allocate_segment(p, uap, mode, retval);
618 }
619
620 void
621 shmfork(vm1, vm2)
622 struct vmspace *vm1, *vm2;
623 {
624 struct shmmap_state *shmmap_s;
625 struct shmmap_entry *shmmap_se;
626
627 vm2->vm_shm = vm1->vm_shm;
628
629 if (vm1->vm_shm == NULL)
630 return;
631
632 #ifdef SHMDEBUG
633 printf("shmfork %p->%p\n", vm1, vm2);
634 #endif
635
636 shmmap_s = (struct shmmap_state *)vm1->vm_shm;
637
638 SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next)
639 shmsegs[IPCID_TO_IX(shmmap_se->shmid)].shm_nattch++;
640 shmmap_s->nrefs++;
641 }
642
643 void
644 shmexit(vm)
645 struct vmspace *vm;
646 {
647 struct shmmap_state *shmmap_s;
648 struct shmmap_entry *shmmap_se;
649
650 shmmap_s = (struct shmmap_state *)vm->vm_shm;
651 if (shmmap_s == NULL)
652 return;
653
654 vm->vm_shm = NULL;
655
656 if (--shmmap_s->nrefs > 0) {
657 #ifdef SHMDEBUG
658 printf("shmexit: vm %p drop ref (%d entries), now used by %d\n",
659 vm, shmmap_s->nitems, shmmap_s->nrefs);
660 #endif
661 SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next)
662 shmsegs[IPCID_TO_IX(shmmap_se->shmid)].shm_nattch--;
663 return;
664 }
665
666 #ifdef SHMDEBUG
667 printf("shmexit: vm %p cleanup (%d entries)\n", vm, shmmap_s->nitems);
668 #endif
669 while (!SLIST_EMPTY(&shmmap_s->entries)) {
670 shmmap_se = SLIST_FIRST(&shmmap_s->entries);
671 shm_delete_mapping(vm, shmmap_s, shmmap_se);
672 }
673 KASSERT(shmmap_s->nitems == 0);
674 free(shmmap_s, M_SHM);
675 }
676
677 void
678 shminit()
679 {
680 int i, sz;
681 vaddr_t v;
682
683 /* Allocate pageable memory for our structures */
684 sz = shminfo.shmmni * sizeof(struct shmid_ds);
685 v = uvm_km_alloc(kernel_map, round_page(sz), 0, UVM_KMF_WIRED);
686 if (v == 0)
687 panic("sysv_shm: cannot allocate memory");
688 shmsegs = (void *)v;
689
690 shminfo.shmmax *= PAGE_SIZE;
691
692 for (i = 0; i < shminfo.shmmni; i++) {
693 shmsegs[i].shm_perm.mode = SHMSEG_FREE;
694 shmsegs[i].shm_perm._seq = 0;
695 }
696 shm_last_free = 0;
697 shm_nused = 0;
698 shm_committed = 0;
699 }
700