Home | History | Annotate | Line # | Download | only in kern
sysv_shm.c revision 1.84
      1 /*	$NetBSD: sysv_shm.c,v 1.84 2005/04/01 11:59:37 yamt Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1999 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. All advertising materials mentioning features or use of this software
     20  *    must display the following acknowledgement:
     21  *	This product includes software developed by the NetBSD
     22  *	Foundation, Inc. and its contributors.
     23  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24  *    contributors may be used to endorse or promote products derived
     25  *    from this software without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37  * POSSIBILITY OF SUCH DAMAGE.
     38  */
     39 
     40 /*
     41  * Copyright (c) 1994 Adam Glass and Charles M. Hannum.  All rights reserved.
     42  *
     43  * Redistribution and use in source and binary forms, with or without
     44  * modification, are permitted provided that the following conditions
     45  * are met:
     46  * 1. Redistributions of source code must retain the above copyright
     47  *    notice, this list of conditions and the following disclaimer.
     48  * 2. Redistributions in binary form must reproduce the above copyright
     49  *    notice, this list of conditions and the following disclaimer in the
     50  *    documentation and/or other materials provided with the distribution.
     51  * 3. All advertising materials mentioning features or use of this software
     52  *    must display the following acknowledgement:
     53  *	This product includes software developed by Adam Glass and Charles M.
     54  *	Hannum.
     55  * 4. The names of the authors may not be used to endorse or promote products
     56  *    derived from this software without specific prior written permission.
     57  *
     58  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
     59  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     60  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     61  * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
     62  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     63  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     64  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     65  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     66  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     67  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     68  */
     69 
     70 #include <sys/cdefs.h>
     71 __KERNEL_RCSID(0, "$NetBSD: sysv_shm.c,v 1.84 2005/04/01 11:59:37 yamt Exp $");
     72 
     73 #define SYSVSHM
     74 
     75 #include <sys/param.h>
     76 #include <sys/kernel.h>
     77 #include <sys/shm.h>
     78 #include <sys/malloc.h>
     79 #include <sys/mman.h>
     80 #include <sys/stat.h>
     81 #include <sys/sysctl.h>
     82 #include <sys/mount.h>		/* XXX for <sys/syscallargs.h> */
     83 #include <sys/sa.h>
     84 #include <sys/syscallargs.h>
     85 #include <sys/queue.h>
     86 #include <sys/pool.h>
     87 
     88 #include <uvm/uvm_extern.h>
     89 #include <uvm/uvm_object.h>
     90 
     91 struct shmid_ds *shm_find_segment_by_shmid(int);
     92 
     93 static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments");
     94 
     95 /*
     96  * Provides the following externally accessible functions:
     97  *
     98  * shminit(void);		                 initialization
     99  * shmexit(struct vmspace *)                     cleanup
    100  * shmfork(struct vmspace *, struct vmspace *)   fork handling
    101  *
    102  * Structures:
    103  * shmsegs (an array of 'struct shmid_ds')
    104  * per proc array of 'struct shmmap_state'
    105  */
    106 
    107 #define	SHMSEG_FREE     	0x0200
    108 #define	SHMSEG_REMOVED  	0x0400
    109 #define	SHMSEG_ALLOCATED	0x0800
    110 #define	SHMSEG_WANTED		0x1000
    111 #define	SHMSEG_RMLINGER		0x2000
    112 
    113 static int	shm_last_free, shm_nused, shm_committed;
    114 struct	shmid_ds *shmsegs;
    115 
    116 struct shmmap_entry {
    117 	SLIST_ENTRY(shmmap_entry) next;
    118 	vaddr_t va;
    119 	int shmid;
    120 };
    121 
    122 static POOL_INIT(shmmap_entry_pool, sizeof(struct shmmap_entry), 0, 0, 0,
    123     "shmmp", &pool_allocator_nointr);
    124 
    125 struct shmmap_state {
    126 	unsigned int nitems;
    127 	unsigned int nrefs;
    128 	SLIST_HEAD(, shmmap_entry) entries;
    129 };
    130 
    131 static int shm_find_segment_by_key(key_t);
    132 static void shm_deallocate_segment(struct shmid_ds *);
    133 static void shm_delete_mapping(struct vmspace *, struct shmmap_state *,
    134 			       struct shmmap_entry *);
    135 static int shmget_existing(struct proc *, struct sys_shmget_args *,
    136 			   int, int, register_t *);
    137 static int shmget_allocate_segment(struct proc *, struct sys_shmget_args *,
    138 				   int, register_t *);
    139 static struct shmmap_state *shmmap_getprivate(struct proc *);
    140 static struct shmmap_entry *shm_find_mapping(struct shmmap_state *, vaddr_t);
    141 
    142 static int
    143 shm_find_segment_by_key(key)
    144 	key_t key;
    145 {
    146 	int i;
    147 
    148 	for (i = 0; i < shminfo.shmmni; i++)
    149 		if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
    150 		    shmsegs[i].shm_perm._key == key)
    151 			return i;
    152 	return -1;
    153 }
    154 
    155 struct shmid_ds *
    156 shm_find_segment_by_shmid(shmid)
    157 	int shmid;
    158 {
    159 	int segnum;
    160 	struct shmid_ds *shmseg;
    161 
    162 	segnum = IPCID_TO_IX(shmid);
    163 	if (segnum < 0 || segnum >= shminfo.shmmni)
    164 		return NULL;
    165 	shmseg = &shmsegs[segnum];
    166 	if ((shmseg->shm_perm.mode & SHMSEG_ALLOCATED) == 0)
    167 		return NULL;
    168 	if ((shmseg->shm_perm.mode & (SHMSEG_REMOVED|SHMSEG_RMLINGER)) == SHMSEG_REMOVED)
    169 		return NULL;
    170 	if (shmseg->shm_perm._seq != IPCID_TO_SEQ(shmid))
    171 		return NULL;
    172 	return shmseg;
    173 }
    174 
    175 static void
    176 shm_deallocate_segment(shmseg)
    177 	struct shmid_ds *shmseg;
    178 {
    179 	struct uvm_object *uobj = shmseg->_shm_internal;
    180 	size_t size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
    181 
    182 	(*uobj->pgops->pgo_detach)(uobj);
    183 	shmseg->_shm_internal = NULL;
    184 	shm_committed -= btoc(size);
    185 	shmseg->shm_perm.mode = SHMSEG_FREE;
    186 	shm_nused--;
    187 }
    188 
    189 static void
    190 shm_delete_mapping(vm, shmmap_s, shmmap_se)
    191 	struct vmspace *vm;
    192 	struct shmmap_state *shmmap_s;
    193 	struct shmmap_entry *shmmap_se;
    194 {
    195 	struct shmid_ds *shmseg;
    196 	int segnum;
    197 	size_t size;
    198 
    199 	segnum = IPCID_TO_IX(shmmap_se->shmid);
    200 #ifdef DEBUG
    201 	if (segnum < 0 || segnum >= shminfo.shmmni)
    202 		panic("shm_delete_mapping: vmspace %p state %p entry %p - "
    203 		    "entry segment ID bad (%d)",
    204 		    vm, shmmap_s, shmmap_se, segnum);
    205 #endif
    206 	shmseg = &shmsegs[segnum];
    207 	size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
    208 	uvm_deallocate(&vm->vm_map, shmmap_se->va, size);
    209 	SLIST_REMOVE(&shmmap_s->entries, shmmap_se, shmmap_entry, next);
    210 	shmmap_s->nitems--;
    211 	pool_put(&shmmap_entry_pool, shmmap_se);
    212 	shmseg->shm_dtime = time.tv_sec;
    213 	if ((--shmseg->shm_nattch <= 0) &&
    214 	    (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
    215 		shm_deallocate_segment(shmseg);
    216 		shm_last_free = segnum;
    217 	}
    218 }
    219 
    220 /*
    221  * Get a non-shared shm map for that vmspace.
    222  * 3 cases:
    223  *   - no shm map present: create a fresh one
    224  *   - a shm map with refcount=1, just used by ourselves: fine
    225  *   - a shared shm map: copy to a fresh one and adjust refcounts
    226  */
    227 static struct shmmap_state *
    228 shmmap_getprivate(struct proc *p)
    229 {
    230 	struct shmmap_state *oshmmap_s, *shmmap_s;
    231 	struct shmmap_entry *oshmmap_se, *shmmap_se;
    232 
    233 	oshmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
    234 	if (oshmmap_s && oshmmap_s->nrefs == 1)
    235 		return (oshmmap_s);
    236 
    237 	shmmap_s = malloc(sizeof(struct shmmap_state), M_SHM, M_WAITOK);
    238 	memset(shmmap_s, 0, sizeof(struct shmmap_state));
    239 	shmmap_s->nrefs = 1;
    240 	SLIST_INIT(&shmmap_s->entries);
    241 	p->p_vmspace->vm_shm = (caddr_t)shmmap_s;
    242 
    243 	if (!oshmmap_s)
    244 		return (shmmap_s);
    245 
    246 #ifdef SHMDEBUG
    247 	printf("shmmap_getprivate: vm %p split (%d entries), was used by %d\n",
    248 	       p->p_vmspace, oshmmap_s->nitems, oshmmap_s->nrefs);
    249 #endif
    250 	SLIST_FOREACH(oshmmap_se, &oshmmap_s->entries, next) {
    251 		shmmap_se = pool_get(&shmmap_entry_pool, PR_WAITOK);
    252 		shmmap_se->va = oshmmap_se->va;
    253 		shmmap_se->shmid = oshmmap_se->shmid;
    254 		SLIST_INSERT_HEAD(&shmmap_s->entries, shmmap_se, next);
    255 	}
    256 	shmmap_s->nitems = oshmmap_s->nitems;
    257 	oshmmap_s->nrefs--;
    258 	return (shmmap_s);
    259 }
    260 
    261 static struct shmmap_entry *
    262 shm_find_mapping(map, va)
    263 	struct shmmap_state *map;
    264 	vaddr_t va;
    265 {
    266 	struct shmmap_entry *shmmap_se;
    267 
    268 	SLIST_FOREACH(shmmap_se, &map->entries, next) {
    269 		if (shmmap_se->va == va)
    270 			return shmmap_se;
    271 	}
    272 	return 0;
    273 }
    274 
    275 int
    276 sys_shmdt(l, v, retval)
    277 	struct lwp *l;
    278 	void *v;
    279 	register_t *retval;
    280 {
    281 	struct sys_shmdt_args /* {
    282 		syscallarg(const void *) shmaddr;
    283 	} */ *uap = v;
    284 	struct proc *p = l->l_proc;
    285 	struct shmmap_state *shmmap_s, *shmmap_s1;
    286 	struct shmmap_entry *shmmap_se;
    287 
    288 	shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
    289 	if (shmmap_s == NULL)
    290 		return EINVAL;
    291 
    292 	shmmap_se = shm_find_mapping(shmmap_s, (vaddr_t)SCARG(uap, shmaddr));
    293 	if (!shmmap_se)
    294 		return EINVAL;
    295 
    296 	shmmap_s1 = shmmap_getprivate(p);
    297 	if (shmmap_s1 != shmmap_s) {
    298 		/* map has been copied, lookup entry in new map */
    299 		shmmap_se = shm_find_mapping(shmmap_s1,
    300 					     (vaddr_t)SCARG(uap, shmaddr));
    301 		KASSERT(shmmap_se != NULL);
    302 	}
    303 #ifdef SHMDEBUG
    304 	printf("shmdt: vm %p: remove %d @%lx\n",
    305 	       p->p_vmspace, shmmap_se->shmid, shmmap_se->va);
    306 #endif
    307 	shm_delete_mapping(p->p_vmspace, shmmap_s1, shmmap_se);
    308 	return 0;
    309 }
    310 
    311 int
    312 sys_shmat(l, v, retval)
    313 	struct lwp *l;
    314 	void *v;
    315 	register_t *retval;
    316 {
    317 	struct sys_shmat_args /* {
    318 		syscallarg(int) shmid;
    319 		syscallarg(const void *) shmaddr;
    320 		syscallarg(int) shmflg;
    321 	} */ *uap = v;
    322 	int error, flags;
    323 	struct proc *p = l->l_proc;
    324 	struct ucred *cred = p->p_ucred;
    325 	struct shmid_ds *shmseg;
    326 	struct shmmap_state *shmmap_s;
    327 	struct uvm_object *uobj;
    328 	vaddr_t attach_va;
    329 	vm_prot_t prot;
    330 	vsize_t size;
    331 	struct shmmap_entry *shmmap_se;
    332 
    333 	shmseg = shm_find_segment_by_shmid(SCARG(uap, shmid));
    334 	if (shmseg == NULL)
    335 		return EINVAL;
    336 	error = ipcperm(cred, &shmseg->shm_perm,
    337 		    (SCARG(uap, shmflg) & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
    338 	if (error)
    339 		return error;
    340 
    341 	shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
    342 	if (shmmap_s && shmmap_s->nitems >= shminfo.shmseg)
    343 		return EMFILE;
    344 
    345 	size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
    346 	prot = VM_PROT_READ;
    347 	if ((SCARG(uap, shmflg) & SHM_RDONLY) == 0)
    348 		prot |= VM_PROT_WRITE;
    349 	flags = MAP_ANON | MAP_SHARED;
    350 	if (SCARG(uap, shmaddr)) {
    351 		flags |= MAP_FIXED;
    352 		if (SCARG(uap, shmflg) & SHM_RND)
    353 			attach_va =
    354 			    (vaddr_t)SCARG(uap, shmaddr) & ~(SHMLBA-1);
    355 		else if (((vaddr_t)SCARG(uap, shmaddr) & (SHMLBA-1)) == 0)
    356 			attach_va = (vaddr_t)SCARG(uap, shmaddr);
    357 		else
    358 			return EINVAL;
    359 	} else {
    360 		/* This is just a hint to uvm_mmap() about where to put it. */
    361 		attach_va = p->p_emul->e_vm_default_addr(p,
    362 		    (vaddr_t)p->p_vmspace->vm_daddr, size);
    363 	}
    364 	uobj = shmseg->_shm_internal;
    365 	(*uobj->pgops->pgo_reference)(uobj);
    366 	error = uvm_map(&p->p_vmspace->vm_map, &attach_va, size,
    367 	    uobj, 0, 0,
    368 	    UVM_MAPFLAG(prot, prot, UVM_INH_SHARE, UVM_ADV_RANDOM, 0));
    369 	if (error) {
    370 		(*uobj->pgops->pgo_detach)(uobj);
    371 		return error;
    372 	}
    373 	shmmap_se = pool_get(&shmmap_entry_pool, PR_WAITOK);
    374 	shmmap_se->va = attach_va;
    375 	shmmap_se->shmid = SCARG(uap, shmid);
    376 	shmmap_s = shmmap_getprivate(p);
    377 #ifdef SHMDEBUG
    378 	printf("shmat: vm %p: add %d @%lx\n", p->p_vmspace, shmid, attach_va);
    379 #endif
    380 	SLIST_INSERT_HEAD(&shmmap_s->entries, shmmap_se, next);
    381 	shmmap_s->nitems++;
    382 	shmseg->shm_lpid = p->p_pid;
    383 	shmseg->shm_atime = time.tv_sec;
    384 	shmseg->shm_nattch++;
    385 
    386 	retval[0] = attach_va;
    387 	return 0;
    388 }
    389 
    390 int
    391 sys___shmctl13(l, v, retval)
    392 	struct lwp *l;
    393 	void *v;
    394 	register_t *retval;
    395 {
    396 	struct sys___shmctl13_args /* {
    397 		syscallarg(int) shmid;
    398 		syscallarg(int) cmd;
    399 		syscallarg(struct shmid_ds *) buf;
    400 	} */ *uap = v;
    401 	struct proc *p = l->l_proc;
    402 	struct shmid_ds shmbuf;
    403 	int cmd, error;
    404 
    405 	cmd = SCARG(uap, cmd);
    406 
    407 	if (cmd == IPC_SET) {
    408 		error = copyin(SCARG(uap, buf), &shmbuf, sizeof(shmbuf));
    409 		if (error)
    410 			return (error);
    411 	}
    412 
    413 	error = shmctl1(p, SCARG(uap, shmid), cmd,
    414 	    (cmd == IPC_SET || cmd == IPC_STAT) ? &shmbuf : NULL);
    415 
    416 	if (error == 0 && cmd == IPC_STAT)
    417 		error = copyout(&shmbuf, SCARG(uap, buf), sizeof(shmbuf));
    418 
    419 	return (error);
    420 }
    421 
    422 int
    423 shmctl1(p, shmid, cmd, shmbuf)
    424 	struct proc *p;
    425 	int shmid;
    426 	int cmd;
    427 	struct shmid_ds *shmbuf;
    428 {
    429 	struct ucred *cred = p->p_ucred;
    430 	struct shmid_ds *shmseg;
    431 	int error = 0;
    432 
    433 	shmseg = shm_find_segment_by_shmid(shmid);
    434 	if (shmseg == NULL)
    435 		return EINVAL;
    436 	switch (cmd) {
    437 	case IPC_STAT:
    438 		if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_R)) != 0)
    439 			return error;
    440 		memcpy(shmbuf, shmseg, sizeof(struct shmid_ds));
    441 		break;
    442 	case IPC_SET:
    443 		if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
    444 			return error;
    445 		shmseg->shm_perm.uid = shmbuf->shm_perm.uid;
    446 		shmseg->shm_perm.gid = shmbuf->shm_perm.gid;
    447 		shmseg->shm_perm.mode =
    448 		    (shmseg->shm_perm.mode & ~ACCESSPERMS) |
    449 		    (shmbuf->shm_perm.mode & ACCESSPERMS);
    450 		shmseg->shm_ctime = time.tv_sec;
    451 		break;
    452 	case IPC_RMID:
    453 		if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
    454 			return error;
    455 		shmseg->shm_perm._key = IPC_PRIVATE;
    456 		shmseg->shm_perm.mode |= SHMSEG_REMOVED;
    457 		if (shmseg->shm_nattch <= 0) {
    458 			shm_deallocate_segment(shmseg);
    459 			shm_last_free = IPCID_TO_IX(shmid);
    460 		}
    461 		break;
    462 	case SHM_LOCK:
    463 	case SHM_UNLOCK:
    464 	default:
    465 		return EINVAL;
    466 	}
    467 	return 0;
    468 }
    469 
    470 static int
    471 shmget_existing(p, uap, mode, segnum, retval)
    472 	struct proc *p;
    473 	struct sys_shmget_args /* {
    474 		syscallarg(key_t) key;
    475 		syscallarg(size_t) size;
    476 		syscallarg(int) shmflg;
    477 	} */ *uap;
    478 	int mode;
    479 	int segnum;
    480 	register_t *retval;
    481 {
    482 	struct shmid_ds *shmseg;
    483 	struct ucred *cred = p->p_ucred;
    484 	int error;
    485 
    486 	shmseg = &shmsegs[segnum];
    487 	if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
    488 		/*
    489 		 * This segment is in the process of being allocated.  Wait
    490 		 * until it's done, and look the key up again (in case the
    491 		 * allocation failed or it was freed).
    492 		 */
    493 		shmseg->shm_perm.mode |= SHMSEG_WANTED;
    494 		error = tsleep((caddr_t)shmseg, PLOCK | PCATCH, "shmget", 0);
    495 		if (error)
    496 			return error;
    497 		return EAGAIN;
    498 	}
    499 	if ((error = ipcperm(cred, &shmseg->shm_perm, mode)) != 0)
    500 		return error;
    501 	if (SCARG(uap, size) && SCARG(uap, size) > shmseg->shm_segsz)
    502 		return EINVAL;
    503 	if ((SCARG(uap, shmflg) & (IPC_CREAT | IPC_EXCL)) ==
    504 	    (IPC_CREAT | IPC_EXCL))
    505 		return EEXIST;
    506 	*retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
    507 	return 0;
    508 }
    509 
    510 static int
    511 shmget_allocate_segment(p, uap, mode, retval)
    512 	struct proc *p;
    513 	struct sys_shmget_args /* {
    514 		syscallarg(key_t) key;
    515 		syscallarg(size_t) size;
    516 		syscallarg(int) shmflg;
    517 	} */ *uap;
    518 	int mode;
    519 	register_t *retval;
    520 {
    521 	int i, segnum, shmid, size;
    522 	struct ucred *cred = p->p_ucred;
    523 	struct shmid_ds *shmseg;
    524 	int error = 0;
    525 
    526 	if (SCARG(uap, size) < shminfo.shmmin ||
    527 	    SCARG(uap, size) > shminfo.shmmax)
    528 		return EINVAL;
    529 	if (shm_nused >= shminfo.shmmni) /* any shmids left? */
    530 		return ENOSPC;
    531 	size = (SCARG(uap, size) + PGOFSET) & ~PGOFSET;
    532 	if (shm_committed + btoc(size) > shminfo.shmall)
    533 		return ENOMEM;
    534 	if (shm_last_free < 0) {
    535 		for (i = 0; i < shminfo.shmmni; i++)
    536 			if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
    537 				break;
    538 		if (i == shminfo.shmmni)
    539 			panic("shmseg free count inconsistent");
    540 		segnum = i;
    541 	} else  {
    542 		segnum = shm_last_free;
    543 		shm_last_free = -1;
    544 	}
    545 	shmseg = &shmsegs[segnum];
    546 	/*
    547 	 * In case we sleep in malloc(), mark the segment present but deleted
    548 	 * so that noone else tries to create the same key.
    549 	 */
    550 	shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
    551 	shmseg->shm_perm._key = SCARG(uap, key);
    552 	shmseg->shm_perm._seq = (shmseg->shm_perm._seq + 1) & 0x7fff;
    553 	shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
    554 
    555 	shmseg->_shm_internal = uao_create(size, 0);
    556 
    557 	shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
    558 	shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
    559 	shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
    560 	    (mode & (ACCESSPERMS|SHMSEG_RMLINGER)) | SHMSEG_ALLOCATED;
    561 	shmseg->shm_segsz = SCARG(uap, size);
    562 	shmseg->shm_cpid = p->p_pid;
    563 	shmseg->shm_lpid = shmseg->shm_nattch = 0;
    564 	shmseg->shm_atime = shmseg->shm_dtime = 0;
    565 	shmseg->shm_ctime = time.tv_sec;
    566 	shm_committed += btoc(size);
    567 	shm_nused++;
    568 
    569 	*retval = shmid;
    570 	if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
    571 		/*
    572 		 * Somebody else wanted this key while we were asleep.  Wake
    573 		 * them up now.
    574 		 */
    575 		shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
    576 		wakeup((caddr_t)shmseg);
    577 	}
    578 	return error;
    579 }
    580 
    581 int
    582 sys_shmget(l, v, retval)
    583 	struct lwp *l;
    584 	void *v;
    585 	register_t *retval;
    586 {
    587 	struct sys_shmget_args /* {
    588 		syscallarg(key_t) key;
    589 		syscallarg(int) size;
    590 		syscallarg(int) shmflg;
    591 	} */ *uap = v;
    592 	struct proc *p = l->l_proc;
    593 	int segnum, mode, error;
    594 
    595 	mode = SCARG(uap, shmflg) & ACCESSPERMS;
    596 	if (SCARG(uap, shmflg) & _SHM_RMLINGER)
    597 		mode |= SHMSEG_RMLINGER;
    598 
    599 	if (SCARG(uap, key) != IPC_PRIVATE) {
    600 	again:
    601 		segnum = shm_find_segment_by_key(SCARG(uap, key));
    602 		if (segnum >= 0) {
    603 			error = shmget_existing(p, uap, mode, segnum, retval);
    604 			if (error == EAGAIN)
    605 				goto again;
    606 			return error;
    607 		}
    608 		if ((SCARG(uap, shmflg) & IPC_CREAT) == 0)
    609 			return ENOENT;
    610 	}
    611 	return shmget_allocate_segment(p, uap, mode, retval);
    612 }
    613 
    614 void
    615 shmfork(vm1, vm2)
    616 	struct vmspace *vm1, *vm2;
    617 {
    618 	struct shmmap_state *shmmap_s;
    619 	struct shmmap_entry *shmmap_se;
    620 
    621 	vm2->vm_shm = vm1->vm_shm;
    622 
    623 	if (vm1->vm_shm == NULL)
    624 		return;
    625 
    626 #ifdef SHMDEBUG
    627 	printf("shmfork %p->%p\n", vm1, vm2);
    628 #endif
    629 
    630 	shmmap_s = (struct shmmap_state *)vm1->vm_shm;
    631 
    632 	SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next)
    633 		shmsegs[IPCID_TO_IX(shmmap_se->shmid)].shm_nattch++;
    634 	shmmap_s->nrefs++;
    635 }
    636 
    637 void
    638 shmexit(vm)
    639 	struct vmspace *vm;
    640 {
    641 	struct shmmap_state *shmmap_s;
    642 	struct shmmap_entry *shmmap_se;
    643 
    644 	shmmap_s = (struct shmmap_state *)vm->vm_shm;
    645 	if (shmmap_s == NULL)
    646 		return;
    647 
    648 	vm->vm_shm = NULL;
    649 
    650 	if (--shmmap_s->nrefs > 0) {
    651 #ifdef SHMDEBUG
    652 		printf("shmexit: vm %p drop ref (%d entries), now used by %d\n",
    653 		       vm, shmmap_s->nitems, shmmap_s->nrefs);
    654 #endif
    655 		SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next)
    656 			shmsegs[IPCID_TO_IX(shmmap_se->shmid)].shm_nattch--;
    657 		return;
    658 	}
    659 
    660 #ifdef SHMDEBUG
    661 	printf("shmexit: vm %p cleanup (%d entries)\n", vm, shmmap_s->nitems);
    662 #endif
    663 	while (!SLIST_EMPTY(&shmmap_s->entries)) {
    664 		shmmap_se = SLIST_FIRST(&shmmap_s->entries);
    665 		shm_delete_mapping(vm, shmmap_s, shmmap_se);
    666 	}
    667 	KASSERT(shmmap_s->nitems == 0);
    668 	free(shmmap_s, M_SHM);
    669 }
    670 
    671 void
    672 shminit()
    673 {
    674 	int i, sz;
    675 	vaddr_t v;
    676 
    677 	/* Allocate pageable memory for our structures */
    678 	sz = shminfo.shmmni * sizeof(struct shmid_ds);
    679 	v = uvm_km_alloc(kernel_map, round_page(sz), 0, UVM_KMF_WIRED);
    680 	if (v == 0)
    681 		panic("sysv_shm: cannot allocate memory");
    682 	shmsegs = (void *)v;
    683 
    684 	shminfo.shmmax *= PAGE_SIZE;
    685 
    686 	for (i = 0; i < shminfo.shmmni; i++) {
    687 		shmsegs[i].shm_perm.mode = SHMSEG_FREE;
    688 		shmsegs[i].shm_perm._seq = 0;
    689 	}
    690 	shm_last_free = 0;
    691 	shm_nused = 0;
    692 	shm_committed = 0;
    693 }
    694