Home | History | Annotate | Line # | Download | only in kern
sysv_shm.c revision 1.76.2.1
      1 /*	$NetBSD: sysv_shm.c,v 1.76.2.1 2004/10/04 05:19:09 jmc Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1999 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. All advertising materials mentioning features or use of this software
     20  *    must display the following acknowledgement:
     21  *	This product includes software developed by the NetBSD
     22  *	Foundation, Inc. and its contributors.
     23  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24  *    contributors may be used to endorse or promote products derived
     25  *    from this software without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37  * POSSIBILITY OF SUCH DAMAGE.
     38  */
     39 
     40 /*
     41  * Copyright (c) 1994 Adam Glass and Charles M. Hannum.  All rights reserved.
     42  *
     43  * Redistribution and use in source and binary forms, with or without
     44  * modification, are permitted provided that the following conditions
     45  * are met:
     46  * 1. Redistributions of source code must retain the above copyright
     47  *    notice, this list of conditions and the following disclaimer.
     48  * 2. Redistributions in binary form must reproduce the above copyright
     49  *    notice, this list of conditions and the following disclaimer in the
     50  *    documentation and/or other materials provided with the distribution.
     51  * 3. All advertising materials mentioning features or use of this software
     52  *    must display the following acknowledgement:
     53  *	This product includes software developed by Adam Glass and Charles M.
     54  *	Hannum.
     55  * 4. The names of the authors may not be used to endorse or promote products
     56  *    derived from this software without specific prior written permission.
     57  *
     58  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
     59  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     60  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     61  * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
     62  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     63  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     64  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     65  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     66  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     67  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     68  */
     69 
     70 #include <sys/cdefs.h>
     71 __KERNEL_RCSID(0, "$NetBSD: sysv_shm.c,v 1.76.2.1 2004/10/04 05:19:09 jmc Exp $");
     72 
     73 #define SYSVSHM
     74 
     75 #include <sys/param.h>
     76 #include <sys/kernel.h>
     77 #include <sys/shm.h>
     78 #include <sys/malloc.h>
     79 #include <sys/mman.h>
     80 #include <sys/stat.h>
     81 #include <sys/sysctl.h>
     82 #include <sys/mount.h>		/* XXX for <sys/syscallargs.h> */
     83 #include <sys/sa.h>
     84 #include <sys/syscallargs.h>
     85 #include <sys/queue.h>
     86 #include <sys/pool.h>
     87 
     88 #include <uvm/uvm_extern.h>
     89 #include <uvm/uvm_object.h>
     90 
     91 struct shmid_ds *shm_find_segment_by_shmid(int);
     92 
     93 static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments");
     94 
     95 /*
     96  * Provides the following externally accessible functions:
     97  *
     98  * shminit(void);		                 initialization
     99  * shmexit(struct vmspace *)                     cleanup
    100  * shmfork(struct vmspace *, struct vmspace *)   fork handling
    101  *
    102  * Structures:
    103  * shmsegs (an array of 'struct shmid_ds')
    104  * per proc array of 'struct shmmap_state'
    105  */
    106 
    107 #define	SHMSEG_FREE     	0x0200
    108 #define	SHMSEG_REMOVED  	0x0400
    109 #define	SHMSEG_ALLOCATED	0x0800
    110 #define	SHMSEG_WANTED		0x1000
    111 #define	SHMSEG_RMLINGER		0x2000
    112 
    113 static int	shm_last_free, shm_nused, shm_committed;
    114 struct	shmid_ds *shmsegs;
    115 
    116 struct shm_handle {
    117 	struct uvm_object *shm_object;
    118 };
    119 
    120 struct shmmap_entry {
    121 	SLIST_ENTRY(shmmap_entry) next;
    122 	vaddr_t va;
    123 	int shmid;
    124 };
    125 
    126 static struct pool shmmap_entry_pool;
    127 
    128 struct shmmap_state {
    129 	unsigned int nitems;
    130 	unsigned int nrefs;
    131 	SLIST_HEAD(, shmmap_entry) entries;
    132 };
    133 
    134 static int shm_find_segment_by_key(key_t);
    135 static void shm_deallocate_segment(struct shmid_ds *);
    136 static void shm_delete_mapping(struct vmspace *, struct shmmap_state *,
    137 			       struct shmmap_entry *);
    138 static int shmget_existing(struct proc *, struct sys_shmget_args *,
    139 			   int, int, register_t *);
    140 static int shmget_allocate_segment(struct proc *, struct sys_shmget_args *,
    141 				   int, register_t *);
    142 static struct shmmap_state *shmmap_getprivate(struct proc *);
    143 static struct shmmap_entry *shm_find_mapping(struct shmmap_state *, vaddr_t);
    144 
    145 static int
    146 shm_find_segment_by_key(key)
    147 	key_t key;
    148 {
    149 	int i;
    150 
    151 	for (i = 0; i < shminfo.shmmni; i++)
    152 		if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
    153 		    shmsegs[i].shm_perm._key == key)
    154 			return i;
    155 	return -1;
    156 }
    157 
    158 struct shmid_ds *
    159 shm_find_segment_by_shmid(shmid)
    160 	int shmid;
    161 {
    162 	int segnum;
    163 	struct shmid_ds *shmseg;
    164 
    165 	segnum = IPCID_TO_IX(shmid);
    166 	if (segnum < 0 || segnum >= shminfo.shmmni)
    167 		return NULL;
    168 	shmseg = &shmsegs[segnum];
    169 	if ((shmseg->shm_perm.mode & SHMSEG_ALLOCATED) == 0)
    170 		return NULL;
    171 	if ((shmseg->shm_perm.mode & (SHMSEG_REMOVED|SHMSEG_RMLINGER)) == SHMSEG_REMOVED)
    172 		return NULL;
    173 	if (shmseg->shm_perm._seq != IPCID_TO_SEQ(shmid))
    174 		return NULL;
    175 	return shmseg;
    176 }
    177 
    178 static void
    179 shm_deallocate_segment(shmseg)
    180 	struct shmid_ds *shmseg;
    181 {
    182 	struct shm_handle *shm_handle = shmseg->_shm_internal;
    183 	struct uvm_object *uobj = shm_handle->shm_object;
    184 	size_t size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
    185 
    186 	(*uobj->pgops->pgo_detach)(uobj);
    187 	free((caddr_t)shm_handle, M_SHM);
    188 	shmseg->_shm_internal = NULL;
    189 	shm_committed -= btoc(size);
    190 	shmseg->shm_perm.mode = SHMSEG_FREE;
    191 	shm_nused--;
    192 }
    193 
    194 static void
    195 shm_delete_mapping(vm, shmmap_s, shmmap_se)
    196 	struct vmspace *vm;
    197 	struct shmmap_state *shmmap_s;
    198 	struct shmmap_entry *shmmap_se;
    199 {
    200 	struct shmid_ds *shmseg;
    201 	int segnum;
    202 	size_t size;
    203 
    204 	segnum = IPCID_TO_IX(shmmap_se->shmid);
    205 #ifdef DEBUG
    206 	if (segnum < 0 || segnum >= shminfo.shmmni)
    207 		panic("shm_delete_mapping: vmspace %p state %p entry %p - "
    208 		    "entry segment ID bad (%d)",
    209 		    vm, shmmap_s, shmmap_se, segnum);
    210 #endif
    211 	shmseg = &shmsegs[segnum];
    212 	size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
    213 	uvm_deallocate(&vm->vm_map, shmmap_se->va, size);
    214 	SLIST_REMOVE(&shmmap_s->entries, shmmap_se, shmmap_entry, next);
    215 	shmmap_s->nitems--;
    216 	pool_put(&shmmap_entry_pool, shmmap_se);
    217 	shmseg->shm_dtime = time.tv_sec;
    218 	if ((--shmseg->shm_nattch <= 0) &&
    219 	    (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
    220 		shm_deallocate_segment(shmseg);
    221 		shm_last_free = segnum;
    222 	}
    223 }
    224 
    225 /*
    226  * Get a non-shared shm map for that vmspace.
    227  * 3 cases:
    228  *   - no shm map present: create a fresh one
    229  *   - a shm map with refcount=1, just used by ourselves: fine
    230  *   - a shared shm map: copy to a fresh one and adjust refcounts
    231  */
    232 static struct shmmap_state *
    233 shmmap_getprivate(struct proc *p)
    234 {
    235 	struct shmmap_state *oshmmap_s, *shmmap_s;
    236 	struct shmmap_entry *oshmmap_se, *shmmap_se;
    237 
    238 	oshmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
    239 	if (oshmmap_s && oshmmap_s->nrefs == 1)
    240 		return (oshmmap_s);
    241 
    242 	shmmap_s = malloc(sizeof(struct shmmap_state), M_SHM, M_WAITOK);
    243 	memset(shmmap_s, 0, sizeof(struct shmmap_state));
    244 	shmmap_s->nrefs = 1;
    245 	SLIST_INIT(&shmmap_s->entries);
    246 	p->p_vmspace->vm_shm = (caddr_t)shmmap_s;
    247 
    248 	if (!oshmmap_s)
    249 		return (shmmap_s);
    250 
    251 #ifdef SHMDEBUG
    252 	printf("shmmap_getprivate: vm %p split (%d entries), was used by %d\n",
    253 	       p->p_vmspace, oshmmap_s->nitems, oshmmap_s->nrefs);
    254 #endif
    255 	SLIST_FOREACH(oshmmap_se, &oshmmap_s->entries, next) {
    256 		shmmap_se = pool_get(&shmmap_entry_pool, PR_WAITOK);
    257 		shmmap_se->va = oshmmap_se->va;
    258 		shmmap_se->shmid = oshmmap_se->shmid;
    259 		SLIST_INSERT_HEAD(&shmmap_s->entries, shmmap_se, next);
    260 	}
    261 	shmmap_s->nitems = oshmmap_s->nitems;
    262 	oshmmap_s->nrefs--;
    263 	return (shmmap_s);
    264 }
    265 
    266 static struct shmmap_entry *
    267 shm_find_mapping(map, va)
    268 	struct shmmap_state *map;
    269 	vaddr_t va;
    270 {
    271 	struct shmmap_entry *shmmap_se;
    272 
    273 	SLIST_FOREACH(shmmap_se, &map->entries, next) {
    274 		if (shmmap_se->va == va)
    275 			return shmmap_se;
    276 	}
    277 	return 0;
    278 }
    279 
    280 int
    281 sys_shmdt(l, v, retval)
    282 	struct lwp *l;
    283 	void *v;
    284 	register_t *retval;
    285 {
    286 	struct sys_shmdt_args /* {
    287 		syscallarg(const void *) shmaddr;
    288 	} */ *uap = v;
    289 	struct proc *p = l->l_proc;
    290 	struct shmmap_state *shmmap_s, *shmmap_s1;
    291 	struct shmmap_entry *shmmap_se;
    292 
    293 	shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
    294 	if (shmmap_s == NULL)
    295 		return EINVAL;
    296 
    297 	shmmap_se = shm_find_mapping(shmmap_s, (vaddr_t)SCARG(uap, shmaddr));
    298 	if (!shmmap_se)
    299 		return EINVAL;
    300 
    301 	shmmap_s1 = shmmap_getprivate(p);
    302 	if (shmmap_s1 != shmmap_s) {
    303 		/* map has been copied, lookup entry in new map */
    304 		shmmap_se = shm_find_mapping(shmmap_s1,
    305 					     (vaddr_t)SCARG(uap, shmaddr));
    306 		KASSERT(shmmap_se != NULL);
    307 	}
    308 #ifdef SHMDEBUG
    309 	printf("shmdt: vm %p: remove %d @%lx\n",
    310 	       p->p_vmspace, shmmap_se->shmid, shmmap_se->va);
    311 #endif
    312 	shm_delete_mapping(p->p_vmspace, shmmap_s1, shmmap_se);
    313 	return 0;
    314 }
    315 
    316 int
    317 sys_shmat(l, v, retval)
    318 	struct lwp *l;
    319 	void *v;
    320 	register_t *retval;
    321 {
    322 	struct sys_shmat_args /* {
    323 		syscallarg(int) shmid;
    324 		syscallarg(const void *) shmaddr;
    325 		syscallarg(int) shmflg;
    326 	} */ *uap = v;
    327 	int error, flags;
    328 	struct proc *p = l->l_proc;
    329 	struct ucred *cred = p->p_ucred;
    330 	struct shmid_ds *shmseg;
    331 	struct shmmap_state *shmmap_s;
    332 	struct uvm_object *uobj;
    333 	vaddr_t attach_va;
    334 	vm_prot_t prot;
    335 	vsize_t size;
    336 	struct shmmap_entry *shmmap_se;
    337 
    338 	shmseg = shm_find_segment_by_shmid(SCARG(uap, shmid));
    339 	if (shmseg == NULL)
    340 		return EINVAL;
    341 	error = ipcperm(cred, &shmseg->shm_perm,
    342 		    (SCARG(uap, shmflg) & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
    343 	if (error)
    344 		return error;
    345 
    346 	shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
    347 	if (shmmap_s && shmmap_s->nitems >= shminfo.shmseg)
    348 		return EMFILE;
    349 
    350 	size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
    351 	prot = VM_PROT_READ;
    352 	if ((SCARG(uap, shmflg) & SHM_RDONLY) == 0)
    353 		prot |= VM_PROT_WRITE;
    354 	flags = MAP_ANON | MAP_SHARED;
    355 	if (SCARG(uap, shmaddr)) {
    356 		flags |= MAP_FIXED;
    357 		if (SCARG(uap, shmflg) & SHM_RND)
    358 			attach_va =
    359 			    (vaddr_t)SCARG(uap, shmaddr) & ~(SHMLBA-1);
    360 		else if (((vaddr_t)SCARG(uap, shmaddr) & (SHMLBA-1)) == 0)
    361 			attach_va = (vaddr_t)SCARG(uap, shmaddr);
    362 		else
    363 			return EINVAL;
    364 	} else {
    365 		/* This is just a hint to uvm_mmap() about where to put it. */
    366 		attach_va = VM_DEFAULT_ADDRESS(p->p_vmspace->vm_daddr, size);
    367 	}
    368 	uobj = ((struct shm_handle *)shmseg->_shm_internal)->shm_object;
    369 	(*uobj->pgops->pgo_reference)(uobj);
    370 	error = uvm_map(&p->p_vmspace->vm_map, &attach_va, size,
    371 	    uobj, 0, 0,
    372 	    UVM_MAPFLAG(prot, prot, UVM_INH_SHARE, UVM_ADV_RANDOM, 0));
    373 	if (error) {
    374 		(*uobj->pgops->pgo_detach)(uobj);
    375 		return error;
    376 	}
    377 	shmmap_se = pool_get(&shmmap_entry_pool, PR_WAITOK);
    378 	shmmap_se->va = attach_va;
    379 	shmmap_se->shmid = SCARG(uap, shmid);
    380 	shmmap_s = shmmap_getprivate(p);
    381 #ifdef SHMDEBUG
    382 	printf("shmat: vm %p: add %d @%lx\n", p->p_vmspace, shmid, attach_va);
    383 #endif
    384 	SLIST_INSERT_HEAD(&shmmap_s->entries, shmmap_se, next);
    385 	shmmap_s->nitems++;
    386 	shmseg->shm_lpid = p->p_pid;
    387 	shmseg->shm_atime = time.tv_sec;
    388 	shmseg->shm_nattch++;
    389 
    390 	retval[0] = attach_va;
    391 	return 0;
    392 }
    393 
    394 int
    395 sys___shmctl13(l, v, retval)
    396 	struct lwp *l;
    397 	void *v;
    398 	register_t *retval;
    399 {
    400 	struct sys___shmctl13_args /* {
    401 		syscallarg(int) shmid;
    402 		syscallarg(int) cmd;
    403 		syscallarg(struct shmid_ds *) buf;
    404 	} */ *uap = v;
    405 	struct proc *p = l->l_proc;
    406 	struct shmid_ds shmbuf;
    407 	int cmd, error;
    408 
    409 	cmd = SCARG(uap, cmd);
    410 
    411 	if (cmd == IPC_SET) {
    412 		error = copyin(SCARG(uap, buf), &shmbuf, sizeof(shmbuf));
    413 		if (error)
    414 			return (error);
    415 	}
    416 
    417 	error = shmctl1(p, SCARG(uap, shmid), cmd,
    418 	    (cmd == IPC_SET || cmd == IPC_STAT) ? &shmbuf : NULL);
    419 
    420 	if (error == 0 && cmd == IPC_STAT)
    421 		error = copyout(&shmbuf, SCARG(uap, buf), sizeof(shmbuf));
    422 
    423 	return (error);
    424 }
    425 
    426 int
    427 shmctl1(p, shmid, cmd, shmbuf)
    428 	struct proc *p;
    429 	int shmid;
    430 	int cmd;
    431 	struct shmid_ds *shmbuf;
    432 {
    433 	struct ucred *cred = p->p_ucred;
    434 	struct shmid_ds *shmseg;
    435 	int error = 0;
    436 
    437 	shmseg = shm_find_segment_by_shmid(shmid);
    438 	if (shmseg == NULL)
    439 		return EINVAL;
    440 	switch (cmd) {
    441 	case IPC_STAT:
    442 		if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_R)) != 0)
    443 			return error;
    444 		memcpy(shmbuf, shmseg, sizeof(struct shmid_ds));
    445 		break;
    446 	case IPC_SET:
    447 		if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
    448 			return error;
    449 		shmseg->shm_perm.uid = shmbuf->shm_perm.uid;
    450 		shmseg->shm_perm.gid = shmbuf->shm_perm.gid;
    451 		shmseg->shm_perm.mode =
    452 		    (shmseg->shm_perm.mode & ~ACCESSPERMS) |
    453 		    (shmbuf->shm_perm.mode & ACCESSPERMS);
    454 		shmseg->shm_ctime = time.tv_sec;
    455 		break;
    456 	case IPC_RMID:
    457 		if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
    458 			return error;
    459 		shmseg->shm_perm._key = IPC_PRIVATE;
    460 		shmseg->shm_perm.mode |= SHMSEG_REMOVED;
    461 		if (shmseg->shm_nattch <= 0) {
    462 			shm_deallocate_segment(shmseg);
    463 			shm_last_free = IPCID_TO_IX(shmid);
    464 		}
    465 		break;
    466 	case SHM_LOCK:
    467 	case SHM_UNLOCK:
    468 	default:
    469 		return EINVAL;
    470 	}
    471 	return 0;
    472 }
    473 
    474 static int
    475 shmget_existing(p, uap, mode, segnum, retval)
    476 	struct proc *p;
    477 	struct sys_shmget_args /* {
    478 		syscallarg(key_t) key;
    479 		syscallarg(size_t) size;
    480 		syscallarg(int) shmflg;
    481 	} */ *uap;
    482 	int mode;
    483 	int segnum;
    484 	register_t *retval;
    485 {
    486 	struct shmid_ds *shmseg;
    487 	struct ucred *cred = p->p_ucred;
    488 	int error;
    489 
    490 	shmseg = &shmsegs[segnum];
    491 	if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
    492 		/*
    493 		 * This segment is in the process of being allocated.  Wait
    494 		 * until it's done, and look the key up again (in case the
    495 		 * allocation failed or it was freed).
    496 		 */
    497 		shmseg->shm_perm.mode |= SHMSEG_WANTED;
    498 		error = tsleep((caddr_t)shmseg, PLOCK | PCATCH, "shmget", 0);
    499 		if (error)
    500 			return error;
    501 		return EAGAIN;
    502 	}
    503 	if ((error = ipcperm(cred, &shmseg->shm_perm, mode)) != 0)
    504 		return error;
    505 	if (SCARG(uap, size) && SCARG(uap, size) > shmseg->shm_segsz)
    506 		return EINVAL;
    507 	if ((SCARG(uap, shmflg) & (IPC_CREAT | IPC_EXCL)) ==
    508 	    (IPC_CREAT | IPC_EXCL))
    509 		return EEXIST;
    510 	*retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
    511 	return 0;
    512 }
    513 
    514 static int
    515 shmget_allocate_segment(p, uap, mode, retval)
    516 	struct proc *p;
    517 	struct sys_shmget_args /* {
    518 		syscallarg(key_t) key;
    519 		syscallarg(size_t) size;
    520 		syscallarg(int) shmflg;
    521 	} */ *uap;
    522 	int mode;
    523 	register_t *retval;
    524 {
    525 	int i, segnum, shmid, size;
    526 	struct ucred *cred = p->p_ucred;
    527 	struct shmid_ds *shmseg;
    528 	struct shm_handle *shm_handle;
    529 	int error = 0;
    530 
    531 	if (SCARG(uap, size) < shminfo.shmmin ||
    532 	    SCARG(uap, size) > shminfo.shmmax)
    533 		return EINVAL;
    534 	if (shm_nused >= shminfo.shmmni) /* any shmids left? */
    535 		return ENOSPC;
    536 	size = (SCARG(uap, size) + PGOFSET) & ~PGOFSET;
    537 	if (shm_committed + btoc(size) > shminfo.shmall)
    538 		return ENOMEM;
    539 	if (shm_last_free < 0) {
    540 		for (i = 0; i < shminfo.shmmni; i++)
    541 			if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
    542 				break;
    543 		if (i == shminfo.shmmni)
    544 			panic("shmseg free count inconsistent");
    545 		segnum = i;
    546 	} else  {
    547 		segnum = shm_last_free;
    548 		shm_last_free = -1;
    549 	}
    550 	shmseg = &shmsegs[segnum];
    551 	/*
    552 	 * In case we sleep in malloc(), mark the segment present but deleted
    553 	 * so that noone else tries to create the same key.
    554 	 */
    555 	shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
    556 	shmseg->shm_perm._key = SCARG(uap, key);
    557 	shmseg->shm_perm._seq = (shmseg->shm_perm._seq + 1) & 0x7fff;
    558 	shm_handle = (struct shm_handle *)
    559 	    malloc(sizeof(struct shm_handle), M_SHM, M_WAITOK);
    560 	shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
    561 
    562 	shm_handle->shm_object = uao_create(size, 0);
    563 
    564 	shmseg->_shm_internal = shm_handle;
    565 	shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
    566 	shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
    567 	shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
    568 	    (mode & (ACCESSPERMS|SHMSEG_RMLINGER)) | SHMSEG_ALLOCATED;
    569 	shmseg->shm_segsz = SCARG(uap, size);
    570 	shmseg->shm_cpid = p->p_pid;
    571 	shmseg->shm_lpid = shmseg->shm_nattch = 0;
    572 	shmseg->shm_atime = shmseg->shm_dtime = 0;
    573 	shmseg->shm_ctime = time.tv_sec;
    574 	shm_committed += btoc(size);
    575 	shm_nused++;
    576 
    577 	*retval = shmid;
    578 	if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
    579 		/*
    580 		 * Somebody else wanted this key while we were asleep.  Wake
    581 		 * them up now.
    582 		 */
    583 		shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
    584 		wakeup((caddr_t)shmseg);
    585 	}
    586 	return error;
    587 }
    588 
    589 int
    590 sys_shmget(l, v, retval)
    591 	struct lwp *l;
    592 	void *v;
    593 	register_t *retval;
    594 {
    595 	struct sys_shmget_args /* {
    596 		syscallarg(key_t) key;
    597 		syscallarg(int) size;
    598 		syscallarg(int) shmflg;
    599 	} */ *uap = v;
    600 	struct proc *p = l->l_proc;
    601 	int segnum, mode, error;
    602 
    603 	mode = SCARG(uap, shmflg) & ACCESSPERMS;
    604 	if (SCARG(uap, shmflg) & _SHM_RMLINGER)
    605 		mode |= SHMSEG_RMLINGER;
    606 
    607 	if (SCARG(uap, key) != IPC_PRIVATE) {
    608 	again:
    609 		segnum = shm_find_segment_by_key(SCARG(uap, key));
    610 		if (segnum >= 0) {
    611 			error = shmget_existing(p, uap, mode, segnum, retval);
    612 			if (error == EAGAIN)
    613 				goto again;
    614 			return error;
    615 		}
    616 		if ((SCARG(uap, shmflg) & IPC_CREAT) == 0)
    617 			return ENOENT;
    618 	}
    619 	return shmget_allocate_segment(p, uap, mode, retval);
    620 }
    621 
    622 void
    623 shmfork(vm1, vm2)
    624 	struct vmspace *vm1, *vm2;
    625 {
    626 	struct shmmap_state *shmmap_s;
    627 	struct shmmap_entry *shmmap_se;
    628 
    629 	vm2->vm_shm = vm1->vm_shm;
    630 
    631 	if (vm1->vm_shm == NULL)
    632 		return;
    633 
    634 #ifdef SHMDEBUG
    635 	printf("shmfork %p->%p\n", vm1, vm2);
    636 #endif
    637 
    638 	shmmap_s = (struct shmmap_state *)vm1->vm_shm;
    639 
    640 	SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next)
    641 		shmsegs[IPCID_TO_IX(shmmap_se->shmid)].shm_nattch++;
    642 	shmmap_s->nrefs++;
    643 }
    644 
    645 void
    646 shmexit(vm)
    647 	struct vmspace *vm;
    648 {
    649 	struct shmmap_state *shmmap_s;
    650 	struct shmmap_entry *shmmap_se;
    651 
    652 	shmmap_s = (struct shmmap_state *)vm->vm_shm;
    653 	if (shmmap_s == NULL)
    654 		return;
    655 
    656 	vm->vm_shm = NULL;
    657 
    658 	if (--shmmap_s->nrefs > 0) {
    659 #ifdef SHMDEBUG
    660 		printf("shmexit: vm %p drop ref (%d entries), now used by %d\n",
    661 		       vm, shmmap_s->nitems, shmmap_s->nrefs);
    662 #endif
    663 		SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next)
    664 			shmsegs[IPCID_TO_IX(shmmap_se->shmid)].shm_nattch--;
    665 		return;
    666 	}
    667 
    668 #ifdef SHMDEBUG
    669 	printf("shmexit: vm %p cleanup (%d entries)\n", vm, shmmap_s->nitems);
    670 #endif
    671 	while (!SLIST_EMPTY(&shmmap_s->entries)) {
    672 		shmmap_se = SLIST_FIRST(&shmmap_s->entries);
    673 		shm_delete_mapping(vm, shmmap_s, shmmap_se);
    674 	}
    675 	KASSERT(shmmap_s->nitems == 0);
    676 	free(shmmap_s, M_SHM);
    677 }
    678 
    679 void
    680 shminit()
    681 {
    682 	int i, sz;
    683 	vaddr_t v;
    684 
    685 	/* Allocate pageable memory for our structures */
    686 	sz = shminfo.shmmni * sizeof(struct shmid_ds);
    687 	if ((v = uvm_km_alloc(kernel_map, round_page(sz))) == 0)
    688 		panic("sysv_shm: cannot allocate memory");
    689 	shmsegs = (void *)v;
    690 
    691 	shminfo.shmmax *= PAGE_SIZE;
    692 
    693 	for (i = 0; i < shminfo.shmmni; i++) {
    694 		shmsegs[i].shm_perm.mode = SHMSEG_FREE;
    695 		shmsegs[i].shm_perm._seq = 0;
    696 	}
    697 	shm_last_free = 0;
    698 	shm_nused = 0;
    699 	shm_committed = 0;
    700 
    701 	pool_init(&shmmap_entry_pool, sizeof(struct shmmap_entry), 0, 0, 0,
    702 		  "shmmp", 0);
    703 }
    704